1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/vk/GrVkGpu.h"
9
10 #include "include/gpu/GrBackendSemaphore.h"
11 #include "include/gpu/GrBackendSurface.h"
12 #include "include/gpu/GrContextOptions.h"
13 #include "include/gpu/GrDirectContext.h"
14 #include "include/private/base/SkTo.h"
15 #include "src/core/SkCompressedDataUtils.h"
16 #include "src/core/SkConvertPixels.h"
17 #include "src/core/SkMipmap.h"
18 #include "src/core/SkTraceEvent.h"
19 #include "src/gpu/ganesh/GrBackendUtils.h"
20 #include "src/gpu/ganesh/GrDataUtils.h"
21 #include "src/gpu/ganesh/GrDirectContextPriv.h"
22 #include "src/gpu/ganesh/GrGeometryProcessor.h"
23 #include "src/gpu/ganesh/GrGpuResourceCacheAccess.h"
24 #include "src/gpu/ganesh/GrNativeRect.h"
25 #include "src/gpu/ganesh/GrPipeline.h"
26 #include "src/gpu/ganesh/GrRenderTarget.h"
27 #include "src/gpu/ganesh/GrResourceProvider.h"
28 #include "src/gpu/ganesh/GrTexture.h"
29 #include "src/gpu/ganesh/GrThreadSafePipelineBuilder.h"
30 #include "src/gpu/ganesh/SkGr.h"
31 #include "src/gpu/ganesh/vk/GrVkBuffer.h"
32 #include "src/gpu/ganesh/vk/GrVkCommandBuffer.h"
33 #include "src/gpu/ganesh/vk/GrVkCommandPool.h"
34 #include "src/gpu/ganesh/vk/GrVkFramebuffer.h"
35 #include "src/gpu/ganesh/vk/GrVkImage.h"
36 #include "src/gpu/ganesh/vk/GrVkOpsRenderPass.h"
37 #include "src/gpu/ganesh/vk/GrVkPipeline.h"
38 #include "src/gpu/ganesh/vk/GrVkPipelineState.h"
39 #include "src/gpu/ganesh/vk/GrVkRenderPass.h"
40 #include "src/gpu/ganesh/vk/GrVkResourceProvider.h"
41 #include "src/gpu/ganesh/vk/GrVkSemaphore.h"
42 #include "src/gpu/ganesh/vk/GrVkTexture.h"
43 #include "src/gpu/ganesh/vk/GrVkTextureRenderTarget.h"
44 #include "src/gpu/vk/VulkanAMDMemoryAllocator.h"
45 #include "src/gpu/vk/VulkanInterface.h"
46 #include "src/gpu/vk/VulkanMemory.h"
47 #include "src/gpu/vk/VulkanUtilsPriv.h"
48 #include "src/image/SkImage_Gpu.h"
49 #include "src/image/SkSurface_Gpu.h"
50
51 #include "include/gpu/vk/GrVkTypes.h"
52 #include "include/gpu/vk/VulkanExtensions.h"
53
54 #include <utility>
55
56 using namespace skia_private;
57
58 #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
59 #define VK_CALL_RET(RET, X) GR_VK_CALL_RESULT(this, RET, X)
60
Make(const GrVkBackendContext & backendContext,const GrContextOptions & options,GrDirectContext * direct)61 sk_sp<GrGpu> GrVkGpu::Make(const GrVkBackendContext& backendContext,
62 const GrContextOptions& options, GrDirectContext* direct) {
63 if (backendContext.fInstance == VK_NULL_HANDLE ||
64 backendContext.fPhysicalDevice == VK_NULL_HANDLE ||
65 backendContext.fDevice == VK_NULL_HANDLE ||
66 backendContext.fQueue == VK_NULL_HANDLE) {
67 SK_ABORT_IN_ANDROID_FRAMEWORK("Backend context invalid: %s%s%s%s",
68 backendContext.fInstance == VK_NULL_HANDLE ? "[instance null]" : "",
69 backendContext.fPhysicalDevice == VK_NULL_HANDLE ? "[physical device null]" : "",
70 backendContext.fDevice == VK_NULL_HANDLE ? "[device null]" : "",
71 backendContext.fQueue == VK_NULL_HANDLE ? "[queue null]" : ""
72 );
73 return nullptr;
74 }
75 if (!backendContext.fGetProc) {
76 SK_ABORT_IN_ANDROID_FRAMEWORK("Backend context's fGetProc is null");
77 return nullptr;
78 }
79
80 PFN_vkEnumerateInstanceVersion localEnumerateInstanceVersion =
81 reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
82 backendContext.fGetProc("vkEnumerateInstanceVersion",
83 VK_NULL_HANDLE, VK_NULL_HANDLE));
84 uint32_t instanceVersion = 0;
85 if (!localEnumerateInstanceVersion) {
86 instanceVersion = VK_MAKE_VERSION(1, 0, 0);
87 } else {
88 VkResult err = localEnumerateInstanceVersion(&instanceVersion);
89 if (err) {
90 SK_ABORT_IN_ANDROID_FRAMEWORK("Failed to enumerate instance version. Err: %d\n", err);
91 SkDebugf("Failed to enumerate instance version. Err: %d\n", err);
92 return nullptr;
93 }
94 }
95
96 PFN_vkGetPhysicalDeviceProperties localGetPhysicalDeviceProperties =
97 reinterpret_cast<PFN_vkGetPhysicalDeviceProperties>(
98 backendContext.fGetProc("vkGetPhysicalDeviceProperties",
99 backendContext.fInstance,
100 VK_NULL_HANDLE));
101
102 if (!localGetPhysicalDeviceProperties) {
103 SK_ABORT_IN_ANDROID_FRAMEWORK("Failed to get local vkGetPhysicalDeviceProperties proc");
104 return nullptr;
105 }
106 VkPhysicalDeviceProperties physDeviceProperties;
107 localGetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &physDeviceProperties);
108 uint32_t physDevVersion = physDeviceProperties.apiVersion;
109
110 uint32_t apiVersion = backendContext.fMaxAPIVersion ? backendContext.fMaxAPIVersion
111 : instanceVersion;
112
113 instanceVersion = std::min(instanceVersion, apiVersion);
114 physDevVersion = std::min(physDevVersion, apiVersion);
115
116 sk_sp<const skgpu::VulkanInterface> interface;
117
118 if (backendContext.fVkExtensions) {
119 interface.reset(new skgpu::VulkanInterface(backendContext.fGetProc,
120 backendContext.fInstance,
121 backendContext.fDevice,
122 instanceVersion,
123 physDevVersion,
124 backendContext.fVkExtensions));
125 if (!interface->validate(instanceVersion, physDevVersion, backendContext.fVkExtensions)) {
126 SK_ABORT_IN_ANDROID_FRAMEWORK("Failed to validate VulkanInterface (with " \
127 "given extensions)");
128 return nullptr;
129 }
130 } else {
131 skgpu::VulkanExtensions extensions;
132 // The only extension flag that may effect the vulkan backend is the swapchain extension. We
133 // need to know if this is enabled to know if we can transition to a present layout when
134 // flushing a surface.
135 if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
136 const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
137 extensions.init(backendContext.fGetProc, backendContext.fInstance,
138 backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
139 }
140 interface.reset(new skgpu::VulkanInterface(backendContext.fGetProc,
141 backendContext.fInstance,
142 backendContext.fDevice,
143 instanceVersion,
144 physDevVersion,
145 &extensions));
146 if (!interface->validate(instanceVersion, physDevVersion, &extensions)) {
147 if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
148 SK_ABORT_IN_ANDROID_FRAMEWORK("Failed to validate VulkanInterface (with " \
149 "only swapchain extension)");
150 }
151 else {
152 SK_ABORT_IN_ANDROID_FRAMEWORK("Failed to validate VulkanInterface (with " \
153 "no extensions)");
154 }
155
156 return nullptr;
157 }
158 }
159
160 sk_sp<GrVkCaps> caps;
161 if (backendContext.fDeviceFeatures2) {
162 caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
163 *backendContext.fDeviceFeatures2, instanceVersion, physDevVersion,
164 *backendContext.fVkExtensions, backendContext.fProtectedContext));
165 if (!caps) {
166 SK_ABORT_IN_ANDROID_FRAMEWORK("Failed to initialize GrVkCaps (with " \
167 "given VkPhysicalDeviceFeatures2 (extensible))");
168 }
169 } else if (backendContext.fDeviceFeatures) {
170 VkPhysicalDeviceFeatures2 features2;
171 features2.pNext = nullptr;
172 features2.features = *backendContext.fDeviceFeatures;
173 caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
174 features2, instanceVersion, physDevVersion,
175 *backendContext.fVkExtensions, backendContext.fProtectedContext));
176 if (!caps) {
177 SK_ABORT_IN_ANDROID_FRAMEWORK("Failed to initialize GrVkCaps (with " \
178 "given VkPhysicalDeviceFeatures)");
179 }
180 } else {
181 VkPhysicalDeviceFeatures2 features;
182 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
183 features.pNext = nullptr;
184 if (backendContext.fFeatures & kGeometryShader_GrVkFeatureFlag) {
185 features.features.geometryShader = true;
186 }
187 if (backendContext.fFeatures & kDualSrcBlend_GrVkFeatureFlag) {
188 features.features.dualSrcBlend = true;
189 }
190 if (backendContext.fFeatures & kSampleRateShading_GrVkFeatureFlag) {
191 features.features.sampleRateShading = true;
192 }
193 skgpu::VulkanExtensions extensions;
194 // The only extension flag that may effect the vulkan backend is the swapchain extension. We
195 // need to know if this is enabled to know if we can transition to a present layout when
196 // flushing a surface.
197 if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
198 const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
199 extensions.init(backendContext.fGetProc, backendContext.fInstance,
200 backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
201 }
202 caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
203 features, instanceVersion, physDevVersion, extensions,
204 backendContext.fProtectedContext));
205 if (!caps) {
206 SK_ABORT_IN_ANDROID_FRAMEWORK("Failed to initialize GrVkCaps (with " \
207 "minimal set of features and extensions)");
208 }
209 }
210
211 if (!caps) {
212 return nullptr;
213 }
214
215 sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocator = backendContext.fMemoryAllocator;
216 if (!memoryAllocator) {
217 // We were not given a memory allocator at creation
218 bool mustUseCoherentHostVisibleMemory = caps->mustUseCoherentHostVisibleMemory();
219 memoryAllocator = skgpu::VulkanAMDMemoryAllocator::Make(backendContext.fInstance,
220 backendContext.fPhysicalDevice,
221 backendContext.fDevice,
222 physDevVersion,
223 backendContext.fVkExtensions,
224 interface,
225 mustUseCoherentHostVisibleMemory,
226 /*=threadSafe=*/false);
227 }
228 if (!memoryAllocator) {
229 SK_ABORT_IN_ANDROID_FRAMEWORK("No supplied vulkan memory allocator and unable to create " \
230 "one internally.");
231 SkDEBUGFAIL("No supplied vulkan memory allocator and unable to create one internally.");
232 return nullptr;
233 }
234
235 sk_sp<GrVkGpu> vkGpu(new GrVkGpu(direct, backendContext, std::move(caps), interface,
236 instanceVersion, physDevVersion,
237 std::move(memoryAllocator)));
238 if (backendContext.fProtectedContext == GrProtected::kYes &&
239 !vkGpu->vkCaps().supportsProtectedMemory()) {
240 SK_ABORT_IN_ANDROID_FRAMEWORK("Backend content is in a protected context, but protected " \
241 "memory is not supported by current GrVkCaps");
242 return nullptr;
243 }
244 return std::move(vkGpu);
245 }
246
247 ////////////////////////////////////////////////////////////////////////////////
248
GrVkGpu(GrDirectContext * direct,const GrVkBackendContext & backendContext,sk_sp<GrVkCaps> caps,sk_sp<const skgpu::VulkanInterface> interface,uint32_t instanceVersion,uint32_t physicalDeviceVersion,sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocator)249 GrVkGpu::GrVkGpu(GrDirectContext* direct,
250 const GrVkBackendContext& backendContext,
251 sk_sp<GrVkCaps> caps,
252 sk_sp<const skgpu::VulkanInterface> interface,
253 uint32_t instanceVersion,
254 uint32_t physicalDeviceVersion,
255 sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocator)
256 : INHERITED(direct)
257 , fInterface(std::move(interface))
258 , fMemoryAllocator(std::move(memoryAllocator))
259 , fVkCaps(std::move(caps))
260 , fPhysicalDevice(backendContext.fPhysicalDevice)
261 , fDevice(backendContext.fDevice)
262 , fQueue(backendContext.fQueue)
263 , fQueueIndex(backendContext.fGraphicsQueueIndex)
264 , fResourceProvider(this)
265 , fStagingBufferManager(this)
266 , fDisconnected(false)
267 , fProtectedContext(backendContext.fProtectedContext) {
268 SkASSERT(!backendContext.fOwnsInstanceAndDevice);
269 SkASSERT(fMemoryAllocator);
270
271 this->initCapsAndCompiler(fVkCaps);
272
273 VK_CALL(GetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &fPhysDevProps));
274 VK_CALL(GetPhysicalDeviceMemoryProperties(backendContext.fPhysicalDevice, &fPhysDevMemProps));
275
276 fResourceProvider.init();
277
278 fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
279 if (fMainCmdPool) {
280 fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
281 SkASSERT(this->currentCommandBuffer());
282 this->currentCommandBuffer()->begin(this);
283 }
284 }
285
destroyResources()286 void GrVkGpu::destroyResources() {
287 if (fMainCmdPool) {
288 fMainCmdPool->getPrimaryCommandBuffer()->end(this, /*abandoningBuffer=*/true);
289 fMainCmdPool->close();
290 }
291
292 // wait for all commands to finish
293 this->finishOutstandingGpuWork();
294
295 if (fMainCmdPool) {
296 fMainCmdPool->unref();
297 fMainCmdPool = nullptr;
298 }
299
300 for (int i = 0; i < fSemaphoresToWaitOn.size(); ++i) {
301 fSemaphoresToWaitOn[i]->unref();
302 }
303 fSemaphoresToWaitOn.clear();
304
305 for (int i = 0; i < fSemaphoresToSignal.size(); ++i) {
306 fSemaphoresToSignal[i]->unref();
307 }
308 fSemaphoresToSignal.clear();
309
310 fStagingBufferManager.reset();
311
312 fMSAALoadManager.destroyResources(this);
313
314 // must call this just before we destroy the command pool and VkDevice
315 fResourceProvider.destroyResources();
316 }
317
~GrVkGpu()318 GrVkGpu::~GrVkGpu() {
319 if (!fDisconnected) {
320 this->destroyResources();
321 }
322 // We don't delete the memory allocator until the very end of the GrVkGpu lifetime so that
323 // clients can continue to delete backend textures even after a context has been abandoned.
324 fMemoryAllocator.reset();
325 }
326
327
disconnect(DisconnectType type)328 void GrVkGpu::disconnect(DisconnectType type) {
329 INHERITED::disconnect(type);
330 if (!fDisconnected) {
331 this->destroyResources();
332
333 fSemaphoresToWaitOn.clear();
334 fSemaphoresToSignal.clear();
335 fMainCmdBuffer = nullptr;
336 fDisconnected = true;
337 }
338 }
339
pipelineBuilder()340 GrThreadSafePipelineBuilder* GrVkGpu::pipelineBuilder() {
341 return fResourceProvider.pipelineStateCache();
342 }
343
refPipelineBuilder()344 sk_sp<GrThreadSafePipelineBuilder> GrVkGpu::refPipelineBuilder() {
345 return fResourceProvider.refPipelineStateCache();
346 }
347
348 ///////////////////////////////////////////////////////////////////////////////
349
onGetOpsRenderPass(GrRenderTarget * rt,bool useMSAASurface,GrAttachment * stencil,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const SkTArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)350 GrOpsRenderPass* GrVkGpu::onGetOpsRenderPass(
351 GrRenderTarget* rt,
352 bool useMSAASurface,
353 GrAttachment* stencil,
354 GrSurfaceOrigin origin,
355 const SkIRect& bounds,
356 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
357 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
358 const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
359 GrXferBarrierFlags renderPassXferBarriers) {
360 if (!fCachedOpsRenderPass) {
361 fCachedOpsRenderPass = std::make_unique<GrVkOpsRenderPass>(this);
362 }
363
364 // For the given render target and requested render pass features we need to find a compatible
365 // framebuffer to use for the render pass. Technically it is the underlying VkRenderPass that
366 // is compatible, but that is part of the framebuffer that we get here.
367 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
368
369 SkASSERT(!useMSAASurface ||
370 rt->numSamples() > 1 ||
371 (this->vkCaps().supportsDiscardableMSAAForDMSAA() &&
372 vkRT->resolveAttachment() &&
373 vkRT->resolveAttachment()->supportsInputAttachmentUsage()));
374
375 // Covert the GrXferBarrierFlags into render pass self dependency flags
376 GrVkRenderPass::SelfDependencyFlags selfDepFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
377 if (renderPassXferBarriers & GrXferBarrierFlags::kBlend) {
378 selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForNonCoherentAdvBlend;
379 }
380 if (renderPassXferBarriers & GrXferBarrierFlags::kTexture) {
381 selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForInputAttachment;
382 }
383
384 // Figure out if we need a resolve attachment for this render pass. A resolve attachment is
385 // needed if we are using msaa to draw with a discardable msaa attachment. If we are in this
386 // case we also need to update the color load/store ops since we don't want to ever load or
387 // store the msaa color attachment, but may need to for the resolve attachment.
388 GrOpsRenderPass::LoadAndStoreInfo localColorInfo = colorInfo;
389 bool withResolve = false;
390 GrVkRenderPass::LoadFromResolve loadFromResolve = GrVkRenderPass::LoadFromResolve::kNo;
391 GrOpsRenderPass::LoadAndStoreInfo resolveInfo{GrLoadOp::kLoad, GrStoreOp::kStore, {}};
392 if (useMSAASurface && this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
393 withResolve = true;
394 localColorInfo.fStoreOp = GrStoreOp::kDiscard;
395 if (colorInfo.fLoadOp == GrLoadOp::kLoad) {
396 loadFromResolve = GrVkRenderPass::LoadFromResolve::kLoad;
397 localColorInfo.fLoadOp = GrLoadOp::kDiscard;
398 } else {
399 resolveInfo.fLoadOp = GrLoadOp::kDiscard;
400 }
401 }
402
403 // Get the framebuffer to use for the render pass
404 sk_sp<GrVkFramebuffer> framebuffer;
405 if (vkRT->wrapsSecondaryCommandBuffer()) {
406 framebuffer = vkRT->externalFramebuffer();
407 } else {
408 auto fb = vkRT->getFramebuffer(withResolve, SkToBool(stencil), selfDepFlags,
409 loadFromResolve);
410 framebuffer = sk_ref_sp(fb);
411 }
412 if (!framebuffer) {
413 return nullptr;
414 }
415
416 if (!fCachedOpsRenderPass->set(rt, std::move(framebuffer), origin, bounds, localColorInfo,
417 stencilInfo, resolveInfo, selfDepFlags, loadFromResolve,
418 sampledProxies)) {
419 return nullptr;
420 }
421 return fCachedOpsRenderPass.get();
422 }
423
submitCommandBuffer(SyncQueue sync)424 bool GrVkGpu::submitCommandBuffer(SyncQueue sync) {
425 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
426 if (!this->currentCommandBuffer()) {
427 return false;
428 }
429 SkASSERT(!fCachedOpsRenderPass || !fCachedOpsRenderPass->isActive());
430
431 if (!this->currentCommandBuffer()->hasWork() && kForce_SyncQueue != sync &&
432 !fSemaphoresToSignal.size() && !fSemaphoresToWaitOn.size()) {
433 // We may have added finished procs during the flush call. Since there is no actual work
434 // we are not submitting the command buffer and may never come back around to submit it.
435 // Thus we call all current finished procs manually, since the work has technically
436 // finished.
437 this->currentCommandBuffer()->callFinishedProcs();
438 SkASSERT(fDrawables.empty());
439 fResourceProvider.checkCommandBuffers();
440 return true;
441 }
442
443 fMainCmdBuffer->end(this);
444 SkASSERT(fMainCmdPool);
445 fMainCmdPool->close();
446 bool didSubmit = fMainCmdBuffer->submitToQueue(this, fQueue, fSemaphoresToSignal,
447 fSemaphoresToWaitOn);
448
449 if (didSubmit && sync == kForce_SyncQueue) {
450 fMainCmdBuffer->forceSync(this);
451 }
452
453 // We must delete any drawables that had to wait until submit to destroy.
454 fDrawables.clear();
455
456 // If we didn't submit the command buffer then we did not wait on any semaphores. We will
457 // continue to hold onto these semaphores and wait on them during the next command buffer
458 // submission.
459 if (didSubmit) {
460 for (int i = 0; i < fSemaphoresToWaitOn.size(); ++i) {
461 fSemaphoresToWaitOn[i]->unref();
462 }
463 fSemaphoresToWaitOn.clear();
464 }
465
466 // Even if we did not submit the command buffer, we drop all the signal semaphores since we will
467 // not try to recover the work that wasn't submitted and instead just drop it all. The client
468 // will be notified that the semaphores were not submit so that they will not try to wait on
469 // them.
470 for (int i = 0; i < fSemaphoresToSignal.size(); ++i) {
471 fSemaphoresToSignal[i]->unref();
472 }
473 fSemaphoresToSignal.clear();
474
475 // Release old command pool and create a new one
476 fMainCmdPool->unref();
477 fMainCmdPool = fResourceProvider.findOrCreateCommandPool();
478 if (fMainCmdPool) {
479 fMainCmdBuffer = fMainCmdPool->getPrimaryCommandBuffer();
480 SkASSERT(fMainCmdBuffer);
481 fMainCmdBuffer->begin(this);
482 } else {
483 fMainCmdBuffer = nullptr;
484 }
485 // We must wait to call checkCommandBuffers until after we get a new command buffer. The
486 // checkCommandBuffers may trigger a releaseProc which may cause us to insert a barrier for a
487 // released GrVkImage. That barrier needs to be put into a new command buffer and not the old
488 // one that was just submitted.
489 fResourceProvider.checkCommandBuffers();
490 return didSubmit;
491 }
492
493 ///////////////////////////////////////////////////////////////////////////////
onCreateBuffer(size_t size,GrGpuBufferType type,GrAccessPattern accessPattern)494 sk_sp<GrGpuBuffer> GrVkGpu::onCreateBuffer(size_t size,
495 GrGpuBufferType type,
496 GrAccessPattern accessPattern) {
497 #ifdef SK_DEBUG
498 switch (type) {
499 case GrGpuBufferType::kVertex:
500 case GrGpuBufferType::kIndex:
501 case GrGpuBufferType::kDrawIndirect:
502 SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
503 accessPattern == kStatic_GrAccessPattern);
504 break;
505 case GrGpuBufferType::kXferCpuToGpu:
506 SkASSERT(accessPattern == kDynamic_GrAccessPattern);
507 break;
508 case GrGpuBufferType::kXferGpuToCpu:
509 SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
510 accessPattern == kStream_GrAccessPattern);
511 break;
512 case GrGpuBufferType::kUniform:
513 SkASSERT(accessPattern == kDynamic_GrAccessPattern);
514 break;
515 }
516 #endif
517 return GrVkBuffer::Make(this, size, type, accessPattern);
518 }
519
onWritePixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)520 bool GrVkGpu::onWritePixels(GrSurface* surface,
521 SkIRect rect,
522 GrColorType surfaceColorType,
523 GrColorType srcColorType,
524 const GrMipLevel texels[],
525 int mipLevelCount,
526 bool prepForTexSampling) {
527 GrVkTexture* texture = static_cast<GrVkTexture*>(surface->asTexture());
528 if (!texture) {
529 return false;
530 }
531 GrVkImage* texImage = texture->textureImage();
532
533 // Make sure we have at least the base level
534 if (!mipLevelCount || !texels[0].fPixels) {
535 return false;
536 }
537
538 SkASSERT(!skgpu::VkFormatIsCompressed(texImage->imageFormat()));
539 bool success = false;
540 bool linearTiling = texImage->isLinearTiled();
541 if (linearTiling) {
542 if (mipLevelCount > 1) {
543 SkDebugf("Can't upload mipmap data to linear tiled texture");
544 return false;
545 }
546 if (VK_IMAGE_LAYOUT_PREINITIALIZED != texImage->currentLayout()) {
547 // Need to change the layout to general in order to perform a host write
548 texImage->setImageLayout(this,
549 VK_IMAGE_LAYOUT_GENERAL,
550 VK_ACCESS_HOST_WRITE_BIT,
551 VK_PIPELINE_STAGE_HOST_BIT,
552 false);
553 if (!this->submitCommandBuffer(kForce_SyncQueue)) {
554 return false;
555 }
556 }
557 success = this->uploadTexDataLinear(texImage,
558 rect,
559 srcColorType,
560 texels[0].fPixels,
561 texels[0].fRowBytes);
562 } else {
563 SkASSERT(mipLevelCount <= (int)texImage->mipLevels());
564 success = this->uploadTexDataOptimal(texImage,
565 rect,
566 srcColorType,
567 texels,
568 mipLevelCount);
569 if (1 == mipLevelCount) {
570 texture->markMipmapsDirty();
571 }
572 }
573
574 if (prepForTexSampling) {
575 texImage->setImageLayout(this,
576 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
577 VK_ACCESS_SHADER_READ_BIT,
578 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
579 false);
580 }
581
582 return success;
583 }
584
585 // When we update vertex/index buffers via transfers we assume that they may have been used
586 // previously in draws and will be used again in draws afterwards. So we put a barrier before and
587 // after. If we had a mechanism for gathering the buffers that will be used in a GrVkOpsRenderPass
588 // *before* we begin a subpass we could do this lazily and non-redundantly by tracking the "last
589 // usage" on the GrVkBuffer. Then Pass 1 draw, xfer, xfer, xfer, Pass 2 draw would insert just two
590 // barriers: one before the first xfer and one before Pass 2. Currently, we'd use six barriers.
591 // Pass false as "after" before the transfer and true after the transfer.
add_transfer_dst_buffer_mem_barrier(GrVkGpu * gpu,GrVkBuffer * dst,size_t offset,size_t size,bool after)592 static void add_transfer_dst_buffer_mem_barrier(GrVkGpu* gpu,
593 GrVkBuffer* dst,
594 size_t offset,
595 size_t size,
596 bool after) {
597 if (dst->intendedType() != GrGpuBufferType::kIndex &&
598 dst->intendedType() != GrGpuBufferType::kVertex) {
599 return;
600 }
601
602 VkAccessFlags srcAccessMask = dst->intendedType() == GrGpuBufferType::kIndex
603 ? VK_ACCESS_INDEX_READ_BIT
604 : VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
605 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
606
607 VkPipelineStageFlagBits srcPipelineStageFlags = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
608 VkPipelineStageFlagBits dstPipelineStageFlags = VK_PIPELINE_STAGE_TRANSFER_BIT;
609
610 if (after) {
611 using std::swap;
612 swap(srcAccessMask, dstAccessMask );
613 swap(srcPipelineStageFlags, dstPipelineStageFlags);
614 }
615
616 VkBufferMemoryBarrier bufferMemoryBarrier = {
617 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType
618 nullptr, // pNext
619 srcAccessMask, // srcAccessMask
620 dstAccessMask, // dstAccessMask
621 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
622 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
623 dst->vkBuffer(), // buffer
624 offset, // offset
625 size, // size
626 };
627
628 gpu->addBufferMemoryBarrier(srcPipelineStageFlags,
629 dstPipelineStageFlags,
630 /*byRegion=*/false,
631 &bufferMemoryBarrier);
632 }
633
onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,size_t srcOffset,sk_sp<GrGpuBuffer> dst,size_t dstOffset,size_t size)634 bool GrVkGpu::onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
635 size_t srcOffset,
636 sk_sp<GrGpuBuffer> dst,
637 size_t dstOffset,
638 size_t size) {
639 if (!this->currentCommandBuffer()) {
640 return false;
641 }
642
643 VkBufferCopy copyRegion;
644 copyRegion.srcOffset = srcOffset;
645 copyRegion.dstOffset = dstOffset;
646 copyRegion.size = size;
647
648 add_transfer_dst_buffer_mem_barrier(this,
649 static_cast<GrVkBuffer*>(dst.get()),
650 dstOffset,
651 size,
652 /*after=*/false);
653 this->currentCommandBuffer()->copyBuffer(this, std::move(src), dst, 1, ©Region);
654 add_transfer_dst_buffer_mem_barrier(this,
655 static_cast<GrVkBuffer*>(dst.get()),
656 dstOffset,
657 size,
658 /*after=*/true);
659
660 return true;
661 }
662
onTransferPixelsTo(GrTexture * texture,SkIRect rect,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t bufferOffset,size_t rowBytes)663 bool GrVkGpu::onTransferPixelsTo(GrTexture* texture,
664 SkIRect rect,
665 GrColorType surfaceColorType,
666 GrColorType bufferColorType,
667 sk_sp<GrGpuBuffer> transferBuffer,
668 size_t bufferOffset,
669 size_t rowBytes) {
670 if (!this->currentCommandBuffer()) {
671 return false;
672 }
673
674 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
675 if (GrBackendFormatBytesPerPixel(texture->backendFormat()) != bpp) {
676 return false;
677 }
678
679 // Vulkan only supports offsets that are both 4-byte aligned and aligned to a pixel.
680 if ((bufferOffset & 0x3) || (bufferOffset % bpp)) {
681 return false;
682 }
683 GrVkTexture* tex = static_cast<GrVkTexture*>(texture);
684 if (!tex) {
685 return false;
686 }
687 GrVkImage* vkImage = tex->textureImage();
688 VkFormat format = vkImage->imageFormat();
689
690 // Can't transfer compressed data
691 SkASSERT(!skgpu::VkFormatIsCompressed(format));
692
693 if (!transferBuffer) {
694 return false;
695 }
696
697 if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
698 return false;
699 }
700 SkASSERT(skgpu::VkFormatBytesPerBlock(format) == GrColorTypeBytesPerPixel(bufferColorType));
701
702 SkASSERT(SkIRect::MakeSize(texture->dimensions()).contains(rect));
703
704 // Set up copy region
705 VkBufferImageCopy region;
706 memset(®ion, 0, sizeof(VkBufferImageCopy));
707 region.bufferOffset = bufferOffset;
708 region.bufferRowLength = (uint32_t)(rowBytes/bpp);
709 region.bufferImageHeight = 0;
710 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
711 region.imageOffset = { rect.left(), rect.top(), 0 };
712 region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
713
714 // Change layout of our target so it can be copied to
715 vkImage->setImageLayout(this,
716 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
717 VK_ACCESS_TRANSFER_WRITE_BIT,
718 VK_PIPELINE_STAGE_TRANSFER_BIT,
719 false);
720
721 const GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
722
723 // Copy the buffer to the image.
724 this->currentCommandBuffer()->copyBufferToImage(this,
725 vkBuffer->vkBuffer(),
726 vkImage,
727 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
728 1,
729 ®ion);
730 this->currentCommandBuffer()->addGrBuffer(std::move(transferBuffer));
731
732 tex->markMipmapsDirty();
733 return true;
734 }
735
onTransferPixelsFrom(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset)736 bool GrVkGpu::onTransferPixelsFrom(GrSurface* surface,
737 SkIRect rect,
738 GrColorType surfaceColorType,
739 GrColorType bufferColorType,
740 sk_sp<GrGpuBuffer> transferBuffer,
741 size_t offset) {
742 if (!this->currentCommandBuffer()) {
743 return false;
744 }
745 SkASSERT(surface);
746 SkASSERT(transferBuffer);
747 if (fProtectedContext == GrProtected::kYes) {
748 return false;
749 }
750
751 GrVkImage* srcImage;
752 if (GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget())) {
753 // Reading from render targets that wrap a secondary command buffer is not allowed since
754 // it would require us to know the VkImage, which we don't have, as well as need us to
755 // stop and start the VkRenderPass which we don't have access to.
756 if (rt->wrapsSecondaryCommandBuffer()) {
757 return false;
758 }
759 if (!rt->nonMSAAAttachment()) {
760 return false;
761 }
762 srcImage = rt->nonMSAAAttachment();
763 } else {
764 SkASSERT(surface->asTexture());
765 srcImage = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
766 }
767
768 VkFormat format = srcImage->imageFormat();
769 if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
770 return false;
771 }
772 SkASSERT(skgpu::VkFormatBytesPerBlock(format) == GrColorTypeBytesPerPixel(bufferColorType));
773
774 // Set up copy region
775 VkBufferImageCopy region;
776 memset(®ion, 0, sizeof(VkBufferImageCopy));
777 region.bufferOffset = offset;
778 region.bufferRowLength = rect.width();
779 region.bufferImageHeight = 0;
780 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
781 region.imageOffset = {rect.left(), rect.top(), 0};
782 region.imageExtent = {(uint32_t)rect.width(), (uint32_t)rect.height(), 1};
783
784 srcImage->setImageLayout(this,
785 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
786 VK_ACCESS_TRANSFER_READ_BIT,
787 VK_PIPELINE_STAGE_TRANSFER_BIT,
788 false);
789
790 this->currentCommandBuffer()->copyImageToBuffer(this, srcImage,
791 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
792 transferBuffer, 1, ®ion);
793
794 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
795 // Make sure the copy to buffer has finished.
796 vkBuffer->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
797 VK_ACCESS_HOST_READ_BIT,
798 VK_PIPELINE_STAGE_TRANSFER_BIT,
799 VK_PIPELINE_STAGE_HOST_BIT,
800 false);
801 return true;
802 }
803
resolveImage(GrSurface * dst,GrVkRenderTarget * src,const SkIRect & srcRect,const SkIPoint & dstPoint)804 void GrVkGpu::resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
805 const SkIPoint& dstPoint) {
806 if (!this->currentCommandBuffer()) {
807 return;
808 }
809
810 SkASSERT(dst);
811 SkASSERT(src && src->colorAttachment() && src->colorAttachment()->numSamples() > 1);
812
813 VkImageResolve resolveInfo;
814 resolveInfo.srcSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
815 resolveInfo.srcOffset = {srcRect.fLeft, srcRect.fTop, 0};
816 resolveInfo.dstSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
817 resolveInfo.dstOffset = {dstPoint.fX, dstPoint.fY, 0};
818 resolveInfo.extent = {(uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1};
819
820 GrVkImage* dstImage;
821 GrRenderTarget* dstRT = dst->asRenderTarget();
822 GrTexture* dstTex = dst->asTexture();
823 if (dstTex) {
824 dstImage = static_cast<GrVkTexture*>(dstTex)->textureImage();
825 } else {
826 SkASSERT(dst->asRenderTarget());
827 dstImage = static_cast<GrVkRenderTarget*>(dstRT)->nonMSAAAttachment();
828 }
829 SkASSERT(dstImage);
830
831 dstImage->setImageLayout(this,
832 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
833 VK_ACCESS_TRANSFER_WRITE_BIT,
834 VK_PIPELINE_STAGE_TRANSFER_BIT,
835 false);
836
837 src->colorAttachment()->setImageLayout(this,
838 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
839 VK_ACCESS_TRANSFER_READ_BIT,
840 VK_PIPELINE_STAGE_TRANSFER_BIT,
841 false);
842 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src->colorAttachment()));
843 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
844 this->currentCommandBuffer()->resolveImage(this, *src->colorAttachment(), *dstImage, 1,
845 &resolveInfo);
846 }
847
onResolveRenderTarget(GrRenderTarget * target,const SkIRect & resolveRect)848 void GrVkGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) {
849 SkASSERT(target->numSamples() > 1);
850 GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(target);
851 SkASSERT(rt->colorAttachmentView() && rt->resolveAttachmentView());
852
853 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(rt)) {
854 // We would have resolved the RT during the render pass;
855 return;
856 }
857
858 this->resolveImage(target, rt, resolveRect,
859 SkIPoint::Make(resolveRect.x(), resolveRect.y()));
860 }
861
uploadTexDataLinear(GrVkImage * texImage,SkIRect rect,GrColorType dataColorType,const void * data,size_t rowBytes)862 bool GrVkGpu::uploadTexDataLinear(GrVkImage* texImage,
863 SkIRect rect,
864 GrColorType dataColorType,
865 const void* data,
866 size_t rowBytes) {
867 SkASSERT(data);
868 SkASSERT(texImage->isLinearTiled());
869
870 SkASSERT(SkIRect::MakeSize(texImage->dimensions()).contains(rect));
871
872 size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
873 size_t trimRowBytes = rect.width() * bpp;
874
875 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == texImage->currentLayout() ||
876 VK_IMAGE_LAYOUT_GENERAL == texImage->currentLayout());
877 const VkImageSubresource subres = {
878 VK_IMAGE_ASPECT_COLOR_BIT,
879 0, // mipLevel
880 0, // arraySlice
881 };
882 VkSubresourceLayout layout;
883
884 const skgpu::VulkanInterface* interface = this->vkInterface();
885
886 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
887 texImage->image(),
888 &subres,
889 &layout));
890
891 const skgpu::VulkanAlloc& alloc = texImage->alloc();
892 if (VK_NULL_HANDLE == alloc.fMemory) {
893 return false;
894 }
895 VkDeviceSize offset = rect.top()*layout.rowPitch + rect.left()*bpp;
896 VkDeviceSize size = rect.height()*layout.rowPitch;
897 SkASSERT(size + offset <= alloc.fSize);
898 auto checkResult = [this](VkResult result) {
899 return this->checkVkResult(result);
900 };
901 auto allocator = this->memoryAllocator();
902 void* mapPtr = skgpu::VulkanMemory::MapAlloc(allocator, alloc, checkResult);
903 if (!mapPtr) {
904 return false;
905 }
906 mapPtr = reinterpret_cast<char*>(mapPtr) + offset;
907
908 SkRectMemcpy(mapPtr,
909 static_cast<size_t>(layout.rowPitch),
910 data,
911 rowBytes,
912 trimRowBytes,
913 rect.height());
914
915 skgpu::VulkanMemory::FlushMappedAlloc(allocator, alloc, offset, size, checkResult);
916 skgpu::VulkanMemory::UnmapAlloc(allocator, alloc);
917
918 return true;
919 }
920
921 // This fills in the 'regions' vector in preparation for copying a buffer to an image.
922 // 'individualMipOffsets' is filled in as a side-effect.
fill_in_compressed_regions(GrStagingBufferManager * stagingBufferManager,SkTArray<VkBufferImageCopy> * regions,SkTArray<size_t> * individualMipOffsets,GrStagingBufferManager::Slice * slice,SkImage::CompressionType compression,VkFormat vkFormat,SkISize dimensions,GrMipmapped mipmapped)923 static size_t fill_in_compressed_regions(GrStagingBufferManager* stagingBufferManager,
924 SkTArray<VkBufferImageCopy>* regions,
925 SkTArray<size_t>* individualMipOffsets,
926 GrStagingBufferManager::Slice* slice,
927 SkImage::CompressionType compression,
928 VkFormat vkFormat,
929 SkISize dimensions,
930 GrMipmapped mipmapped) {
931 SkASSERT(compression != SkImage::CompressionType::kNone);
932 int numMipLevels = 1;
933 if (mipmapped == GrMipmapped::kYes) {
934 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
935 }
936
937 regions->reserve_back(numMipLevels);
938 individualMipOffsets->reserve_back(numMipLevels);
939
940 size_t bytesPerBlock = skgpu::VkFormatBytesPerBlock(vkFormat);
941
942 size_t bufferSize = SkCompressedDataSize(compression,
943 dimensions,
944 individualMipOffsets,
945 mipmapped == GrMipmapped::kYes);
946 SkASSERT(individualMipOffsets->size() == numMipLevels);
947
948 // Get a staging buffer slice to hold our mip data.
949 // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
950 size_t alignment = bytesPerBlock;
951 switch (alignment & 0b11) {
952 case 0: break; // alignment is already a multiple of 4.
953 case 2: alignment *= 2; break; // alignment is a multiple of 2 but not 4.
954 default: alignment *= 4; break; // alignment is not a multiple of 2.
955 }
956 *slice = stagingBufferManager->allocateStagingBufferSlice(bufferSize, alignment);
957 if (!slice->fBuffer) {
958 return 0;
959 }
960
961 for (int i = 0; i < numMipLevels; ++i) {
962 VkBufferImageCopy& region = regions->push_back();
963 memset(®ion, 0, sizeof(VkBufferImageCopy));
964 region.bufferOffset = slice->fOffset + (*individualMipOffsets)[i];
965 SkISize revisedDimensions = GrCompressedDimensions(compression, dimensions);
966 region.bufferRowLength = revisedDimensions.width();
967 region.bufferImageHeight = revisedDimensions.height();
968 region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(i), 0, 1};
969 region.imageOffset = {0, 0, 0};
970 region.imageExtent = {SkToU32(dimensions.width()),
971 SkToU32(dimensions.height()), 1};
972
973 dimensions = {std::max(1, dimensions.width() /2),
974 std::max(1, dimensions.height()/2)};
975 }
976
977 return bufferSize;
978 }
979
uploadTexDataOptimal(GrVkImage * texImage,SkIRect rect,GrColorType dataColorType,const GrMipLevel texels[],int mipLevelCount)980 bool GrVkGpu::uploadTexDataOptimal(GrVkImage* texImage,
981 SkIRect rect,
982 GrColorType dataColorType,
983 const GrMipLevel texels[],
984 int mipLevelCount) {
985 if (!this->currentCommandBuffer()) {
986 return false;
987 }
988
989 SkASSERT(!texImage->isLinearTiled());
990 // The assumption is either that we have no mipmaps, or that our rect is the entire texture
991 SkASSERT(mipLevelCount == 1 || rect == SkIRect::MakeSize(texImage->dimensions()));
992
993 // We assume that if the texture has mip levels, we either upload to all the levels or just the
994 // first.
995 SkASSERT(mipLevelCount == 1 || mipLevelCount == (int)texImage->mipLevels());
996
997 SkASSERT(!rect.isEmpty());
998
999 SkASSERT(this->vkCaps().surfaceSupportsWritePixels(texImage));
1000
1001 SkASSERT(this->vkCaps().isVkFormatTexturable(texImage->imageFormat()));
1002 size_t bpp = GrColorTypeBytesPerPixel(dataColorType);
1003
1004 // texels is const.
1005 // But we may need to adjust the fPixels ptr based on the copyRect, or fRowBytes.
1006 // Because of this we need to make a non-const shallow copy of texels.
1007 AutoTArray<GrMipLevel> texelsShallowCopy(mipLevelCount);
1008 std::copy_n(texels, mipLevelCount, texelsShallowCopy.get());
1009
1010 SkTArray<size_t> individualMipOffsets;
1011 size_t combinedBufferSize;
1012 if (mipLevelCount > 1) {
1013 combinedBufferSize = GrComputeTightCombinedBufferSize(bpp,
1014 rect.size(),
1015 &individualMipOffsets,
1016 mipLevelCount);
1017 } else {
1018 SkASSERT(texelsShallowCopy[0].fPixels && texelsShallowCopy[0].fRowBytes);
1019 combinedBufferSize = rect.width()*rect.height()*bpp;
1020 individualMipOffsets.push_back(0);
1021 }
1022 SkASSERT(combinedBufferSize);
1023
1024 // Get a staging buffer slice to hold our mip data.
1025 // Vulkan requires offsets in the buffer to be aligned to multiple of the texel size and 4
1026 size_t alignment = bpp;
1027 switch (alignment & 0b11) {
1028 case 0: break; // alignment is already a multiple of 4.
1029 case 2: alignment *= 2; break; // alignment is a multiple of 2 but not 4.
1030 default: alignment *= 4; break; // alignment is not a multiple of 2.
1031 }
1032 GrStagingBufferManager::Slice slice =
1033 fStagingBufferManager.allocateStagingBufferSlice(combinedBufferSize, alignment);
1034 if (!slice.fBuffer) {
1035 return false;
1036 }
1037
1038 int uploadLeft = rect.left();
1039 int uploadTop = rect.top();
1040
1041 char* buffer = (char*) slice.fOffsetMapPtr;
1042 SkTArray<VkBufferImageCopy> regions(mipLevelCount);
1043
1044 int currentWidth = rect.width();
1045 int currentHeight = rect.height();
1046 for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) {
1047 if (texelsShallowCopy[currentMipLevel].fPixels) {
1048 const size_t trimRowBytes = currentWidth * bpp;
1049 const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
1050
1051 // copy data into the buffer, skipping the trailing bytes
1052 char* dst = buffer + individualMipOffsets[currentMipLevel];
1053 const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
1054 SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, currentHeight);
1055
1056 VkBufferImageCopy& region = regions.push_back();
1057 memset(®ion, 0, sizeof(VkBufferImageCopy));
1058 region.bufferOffset = slice.fOffset + individualMipOffsets[currentMipLevel];
1059 region.bufferRowLength = currentWidth;
1060 region.bufferImageHeight = currentHeight;
1061 region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, SkToU32(currentMipLevel), 0, 1};
1062 region.imageOffset = {uploadLeft, uploadTop, 0};
1063 region.imageExtent = {(uint32_t)currentWidth, (uint32_t)currentHeight, 1};
1064 }
1065
1066 currentWidth = std::max(1, currentWidth/2);
1067 currentHeight = std::max(1, currentHeight/2);
1068 }
1069
1070 // Change layout of our target so it can be copied to
1071 texImage->setImageLayout(this,
1072 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1073 VK_ACCESS_TRANSFER_WRITE_BIT,
1074 VK_PIPELINE_STAGE_TRANSFER_BIT,
1075 false);
1076
1077 // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1078 // because we don't need the command buffer to ref the buffer here. The reason being is that
1079 // the buffer is coming from the staging manager and the staging manager will make sure the
1080 // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
1081 // upload in the frame.
1082 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
1083 this->currentCommandBuffer()->copyBufferToImage(this,
1084 vkBuffer->vkBuffer(),
1085 texImage,
1086 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1087 regions.size(),
1088 regions.begin());
1089 return true;
1090 }
1091
1092 // It's probably possible to roll this into uploadTexDataOptimal,
1093 // but for now it's easier to maintain as a separate entity.
uploadTexDataCompressed(GrVkImage * uploadTexture,SkImage::CompressionType compression,VkFormat vkFormat,SkISize dimensions,GrMipmapped mipmapped,const void * data,size_t dataSize)1094 bool GrVkGpu::uploadTexDataCompressed(GrVkImage* uploadTexture,
1095 SkImage::CompressionType compression, VkFormat vkFormat,
1096 SkISize dimensions, GrMipmapped mipmapped,
1097 const void* data, size_t dataSize) {
1098 if (!this->currentCommandBuffer()) {
1099 return false;
1100 }
1101 SkASSERT(data);
1102 SkASSERT(!uploadTexture->isLinearTiled());
1103 // For now the assumption is that our rect is the entire texture.
1104 // Compressed textures are read-only so this should be a reasonable assumption.
1105 SkASSERT(dimensions.fWidth == uploadTexture->width() &&
1106 dimensions.fHeight == uploadTexture->height());
1107
1108 if (dimensions.fWidth == 0 || dimensions.fHeight == 0) {
1109 return false;
1110 }
1111
1112 SkASSERT(uploadTexture->imageFormat() == vkFormat);
1113 SkASSERT(this->vkCaps().isVkFormatTexturable(vkFormat));
1114
1115
1116 GrStagingBufferManager::Slice slice;
1117 SkTArray<VkBufferImageCopy> regions;
1118 SkTArray<size_t> individualMipOffsets;
1119 SkDEBUGCODE(size_t combinedBufferSize =) fill_in_compressed_regions(&fStagingBufferManager,
1120 ®ions,
1121 &individualMipOffsets,
1122 &slice,
1123 compression,
1124 vkFormat,
1125 dimensions,
1126 mipmapped);
1127 if (!slice.fBuffer) {
1128 return false;
1129 }
1130 SkASSERT(dataSize == combinedBufferSize);
1131
1132 {
1133 char* buffer = (char*)slice.fOffsetMapPtr;
1134 memcpy(buffer, data, dataSize);
1135 }
1136
1137 // Change layout of our target so it can be copied to
1138 uploadTexture->setImageLayout(this,
1139 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1140 VK_ACCESS_TRANSFER_WRITE_BIT,
1141 VK_PIPELINE_STAGE_TRANSFER_BIT,
1142 false);
1143
1144 // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1145 // because we don't need the command buffer to ref the buffer here. The reason being is that
1146 // the buffer is coming from the staging manager and the staging manager will make sure the
1147 // command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
1148 // upload in the frame.
1149 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
1150 this->currentCommandBuffer()->copyBufferToImage(this,
1151 vkBuffer->vkBuffer(),
1152 uploadTexture,
1153 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1154 regions.size(),
1155 regions.begin());
1156
1157 return true;
1158 }
1159
1160 ////////////////////////////////////////////////////////////////////////////////
1161 // TODO: make this take a GrMipmapped
onCreateTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,skgpu::Budgeted budgeted,GrProtected isProtected,int mipLevelCount,uint32_t levelClearMask,std::string_view label)1162 sk_sp<GrTexture> GrVkGpu::onCreateTexture(SkISize dimensions,
1163 const GrBackendFormat& format,
1164 GrRenderable renderable,
1165 int renderTargetSampleCnt,
1166 skgpu::Budgeted budgeted,
1167 GrProtected isProtected,
1168 int mipLevelCount,
1169 uint32_t levelClearMask,
1170 std::string_view label) {
1171 VkFormat pixelFormat;
1172 SkAssertResult(format.asVkFormat(&pixelFormat));
1173 SkASSERT(!skgpu::VkFormatIsCompressed(pixelFormat));
1174 SkASSERT(mipLevelCount > 0);
1175
1176 GrMipmapStatus mipmapStatus =
1177 mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
1178
1179 sk_sp<GrVkTexture> tex;
1180 if (renderable == GrRenderable::kYes) {
1181 tex = GrVkTextureRenderTarget::MakeNewTextureRenderTarget(
1182 this, budgeted, dimensions, pixelFormat, mipLevelCount, renderTargetSampleCnt,
1183 mipmapStatus, isProtected, label);
1184 } else {
1185 tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, pixelFormat,
1186 mipLevelCount, isProtected, mipmapStatus, label);
1187 }
1188
1189 if (!tex) {
1190 return nullptr;
1191 }
1192
1193 if (levelClearMask) {
1194 if (!this->currentCommandBuffer()) {
1195 return nullptr;
1196 }
1197 SkSTArray<1, VkImageSubresourceRange> ranges;
1198 bool inRange = false;
1199 GrVkImage* texImage = tex->textureImage();
1200 for (uint32_t i = 0; i < texImage->mipLevels(); ++i) {
1201 if (levelClearMask & (1U << i)) {
1202 if (inRange) {
1203 ranges.back().levelCount++;
1204 } else {
1205 auto& range = ranges.push_back();
1206 range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1207 range.baseArrayLayer = 0;
1208 range.baseMipLevel = i;
1209 range.layerCount = 1;
1210 range.levelCount = 1;
1211 inRange = true;
1212 }
1213 } else if (inRange) {
1214 inRange = false;
1215 }
1216 }
1217 SkASSERT(!ranges.empty());
1218 static constexpr VkClearColorValue kZeroClearColor = {};
1219 texImage->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1220 VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1221 this->currentCommandBuffer()->clearColorImage(this, texImage, &kZeroClearColor,
1222 ranges.size(), ranges.begin());
1223 }
1224 return std::move(tex);
1225 }
1226
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Budgeted budgeted,GrMipmapped mipmapped,GrProtected isProtected,const void * data,size_t dataSize)1227 sk_sp<GrTexture> GrVkGpu::onCreateCompressedTexture(SkISize dimensions,
1228 const GrBackendFormat& format,
1229 skgpu::Budgeted budgeted,
1230 GrMipmapped mipmapped,
1231 GrProtected isProtected,
1232 const void* data,
1233 size_t dataSize) {
1234 VkFormat pixelFormat;
1235 SkAssertResult(format.asVkFormat(&pixelFormat));
1236 SkASSERT(skgpu::VkFormatIsCompressed(pixelFormat));
1237
1238 int numMipLevels = 1;
1239 if (mipmapped == GrMipmapped::kYes) {
1240 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height())+1;
1241 }
1242
1243 GrMipmapStatus mipmapStatus = (mipmapped == GrMipmapped::kYes) ? GrMipmapStatus::kValid
1244 : GrMipmapStatus::kNotAllocated;
1245
1246 auto tex = GrVkTexture::MakeNewTexture(this,
1247 budgeted,
1248 dimensions,
1249 pixelFormat,
1250 numMipLevels,
1251 isProtected,
1252 mipmapStatus,
1253 /*label=*/"VkGpu_CreateCompressedTexture");
1254 if (!tex) {
1255 return nullptr;
1256 }
1257
1258 SkImage::CompressionType compression = GrBackendFormatToCompressionType(format);
1259 if (!this->uploadTexDataCompressed(tex->textureImage(), compression, pixelFormat,
1260 dimensions, mipmapped, data, dataSize)) {
1261 return nullptr;
1262 }
1263
1264 return std::move(tex);
1265 }
1266
1267 ////////////////////////////////////////////////////////////////////////////////
1268
updateBuffer(sk_sp<GrVkBuffer> buffer,const void * src,VkDeviceSize offset,VkDeviceSize size)1269 bool GrVkGpu::updateBuffer(sk_sp<GrVkBuffer> buffer, const void* src,
1270 VkDeviceSize offset, VkDeviceSize size) {
1271 if (!this->currentCommandBuffer()) {
1272 return false;
1273 }
1274 add_transfer_dst_buffer_mem_barrier(this,
1275 static_cast<GrVkBuffer*>(buffer.get()),
1276 offset,
1277 size,
1278 /*after=*/false);
1279 this->currentCommandBuffer()->updateBuffer(this, buffer, offset, size, src);
1280 add_transfer_dst_buffer_mem_barrier(this,
1281 static_cast<GrVkBuffer*>(buffer.get()),
1282 offset,
1283 size,
1284 /*after=*/true);
1285
1286 return true;
1287 }
1288
zeroBuffer(sk_sp<GrGpuBuffer> buffer)1289 bool GrVkGpu::zeroBuffer(sk_sp<GrGpuBuffer> buffer) {
1290 if (!this->currentCommandBuffer()) {
1291 return false;
1292 }
1293
1294 add_transfer_dst_buffer_mem_barrier(this,
1295 static_cast<GrVkBuffer*>(buffer.get()),
1296 /*offset=*/0,
1297 buffer->size(),
1298 /*after=*/false);
1299 this->currentCommandBuffer()->fillBuffer(this,
1300 buffer,
1301 /*offset=*/0,
1302 buffer->size(),
1303 /*data=*/0);
1304 add_transfer_dst_buffer_mem_barrier(this,
1305 static_cast<GrVkBuffer*>(buffer.get()),
1306 /*offset=*/0,
1307 buffer->size(),
1308 /*after=*/true);
1309
1310 return true;
1311 }
1312
1313 ////////////////////////////////////////////////////////////////////////////////
1314
check_image_info(const GrVkCaps & caps,const GrVkImageInfo & info,bool needsAllocation,uint32_t graphicsQueueIndex)1315 static bool check_image_info(const GrVkCaps& caps,
1316 const GrVkImageInfo& info,
1317 bool needsAllocation,
1318 uint32_t graphicsQueueIndex) {
1319 if (VK_NULL_HANDLE == info.fImage) {
1320 return false;
1321 }
1322
1323 if (VK_NULL_HANDLE == info.fAlloc.fMemory && needsAllocation) {
1324 return false;
1325 }
1326
1327 if (info.fImageLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR && !caps.supportsSwapchain()) {
1328 return false;
1329 }
1330
1331 if (info.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
1332 info.fCurrentQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
1333 info.fCurrentQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
1334 if (info.fSharingMode == VK_SHARING_MODE_EXCLUSIVE) {
1335 if (info.fCurrentQueueFamily != graphicsQueueIndex) {
1336 return false;
1337 }
1338 } else {
1339 return false;
1340 }
1341 }
1342
1343 if (info.fYcbcrConversionInfo.isValid()) {
1344 if (!caps.supportsYcbcrConversion()) {
1345 return false;
1346 }
1347 if (info.fYcbcrConversionInfo.fExternalFormat != 0) {
1348 return true;
1349 }
1350 }
1351
1352 // We currently require everything to be made with transfer bits set
1353 if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) ||
1354 !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
1355 return false;
1356 }
1357
1358 return true;
1359 }
1360
check_tex_image_info(const GrVkCaps & caps,const GrVkImageInfo & info)1361 static bool check_tex_image_info(const GrVkCaps& caps, const GrVkImageInfo& info) {
1362 // We don't support directly importing multisampled textures for sampling from shaders.
1363 if (info.fSampleCount != 1) {
1364 return false;
1365 }
1366
1367 if (info.fYcbcrConversionInfo.isValid() && info.fYcbcrConversionInfo.fExternalFormat != 0) {
1368 return true;
1369 }
1370 if (info.fImageTiling == VK_IMAGE_TILING_OPTIMAL) {
1371 if (!caps.isVkFormatTexturable(info.fFormat)) {
1372 return false;
1373 }
1374 } else if (info.fImageTiling == VK_IMAGE_TILING_LINEAR) {
1375 if (!caps.isVkFormatTexturableLinearly(info.fFormat)) {
1376 return false;
1377 }
1378 } else if (info.fImageTiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
1379 if (!caps.supportsDRMFormatModifiers()) {
1380 return false;
1381 }
1382 // To be technically correct we should query the vulkan support for VkFormat and
1383 // drmFormatModifier pairs to confirm the required feature support is there. However, we
1384 // currently don't have our caps and format tables set up to do this effeciently. So
1385 // instead we just rely on the client's passed in VkImageUsageFlags and assume they we set
1386 // up using valid features (checked below). In practice this should all be safe because
1387 // currently we are setting all drm format modifier textures to have a
1388 // GrTextureType::kExternal so we just really need to be able to read these video VkImage in
1389 // a shader. The video decoder isn't going to give us VkImages that don't support being
1390 // sampled.
1391 } else {
1392 SkUNREACHABLE;
1393 }
1394
1395 // We currently require all textures to be made with sample support
1396 if (!SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT)) {
1397 return false;
1398 }
1399
1400 return true;
1401 }
1402
check_rt_image_info(const GrVkCaps & caps,const GrVkImageInfo & info,bool resolveOnly)1403 static bool check_rt_image_info(const GrVkCaps& caps, const GrVkImageInfo& info, bool resolveOnly) {
1404 if (!caps.isFormatRenderable(info.fFormat, info.fSampleCount)) {
1405 return false;
1406 }
1407 if (!resolveOnly && !SkToBool(info.fImageUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
1408 return false;
1409 }
1410 return true;
1411 }
1412
onWrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)1413 sk_sp<GrTexture> GrVkGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
1414 GrWrapOwnership ownership,
1415 GrWrapCacheable cacheable,
1416 GrIOType ioType) {
1417 GrVkImageInfo imageInfo;
1418 if (!backendTex.getVkImageInfo(&imageInfo)) {
1419 return nullptr;
1420 }
1421
1422 if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1423 this->queueIndex())) {
1424 return nullptr;
1425 }
1426
1427 if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1428 return nullptr;
1429 }
1430
1431 if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1432 return nullptr;
1433 }
1434
1435 sk_sp<skgpu::MutableTextureStateRef> mutableState = backendTex.getMutableState();
1436 SkASSERT(mutableState);
1437 return GrVkTexture::MakeWrappedTexture(this, backendTex.dimensions(), ownership, cacheable,
1438 ioType, imageInfo, std::move(mutableState));
1439 }
1440
onWrapCompressedBackendTexture(const GrBackendTexture & beTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)1441 sk_sp<GrTexture> GrVkGpu::onWrapCompressedBackendTexture(const GrBackendTexture& beTex,
1442 GrWrapOwnership ownership,
1443 GrWrapCacheable cacheable) {
1444 return this->onWrapBackendTexture(beTex, ownership, cacheable, kRead_GrIOType);
1445 }
1446
onWrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrWrapOwnership ownership,GrWrapCacheable cacheable)1447 sk_sp<GrTexture> GrVkGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
1448 int sampleCnt,
1449 GrWrapOwnership ownership,
1450 GrWrapCacheable cacheable) {
1451 GrVkImageInfo imageInfo;
1452 if (!backendTex.getVkImageInfo(&imageInfo)) {
1453 return nullptr;
1454 }
1455
1456 if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1457 this->queueIndex())) {
1458 return nullptr;
1459 }
1460
1461 if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1462 return nullptr;
1463 }
1464 // If sampleCnt is > 1 we will create an intermediate MSAA VkImage and then resolve into
1465 // the wrapped VkImage.
1466 bool resolveOnly = sampleCnt > 1;
1467 if (!check_rt_image_info(this->vkCaps(), imageInfo, resolveOnly)) {
1468 return nullptr;
1469 }
1470
1471 if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1472 return nullptr;
1473 }
1474
1475 sampleCnt = this->vkCaps().getRenderTargetSampleCount(sampleCnt, imageInfo.fFormat);
1476
1477 sk_sp<skgpu::MutableTextureStateRef> mutableState = backendTex.getMutableState();
1478 SkASSERT(mutableState);
1479
1480 return GrVkTextureRenderTarget::MakeWrappedTextureRenderTarget(this, backendTex.dimensions(),
1481 sampleCnt, ownership, cacheable,
1482 imageInfo,
1483 std::move(mutableState));
1484 }
1485
onWrapBackendRenderTarget(const GrBackendRenderTarget & backendRT)1486 sk_sp<GrRenderTarget> GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
1487 GrVkImageInfo info;
1488 if (!backendRT.getVkImageInfo(&info)) {
1489 return nullptr;
1490 }
1491
1492 if (!check_image_info(this->vkCaps(), info, false, this->queueIndex())) {
1493 return nullptr;
1494 }
1495
1496 // We will always render directly to this VkImage.
1497 static bool kResolveOnly = false;
1498 if (!check_rt_image_info(this->vkCaps(), info, kResolveOnly)) {
1499 return nullptr;
1500 }
1501
1502 if (backendRT.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1503 return nullptr;
1504 }
1505
1506 sk_sp<skgpu::MutableTextureStateRef> mutableState = backendRT.getMutableState();
1507 SkASSERT(mutableState);
1508
1509 sk_sp<GrVkRenderTarget> tgt = GrVkRenderTarget::MakeWrappedRenderTarget(
1510 this, backendRT.dimensions(), backendRT.sampleCnt(), info, std::move(mutableState));
1511
1512 // We don't allow the client to supply a premade stencil buffer. We always create one if needed.
1513 SkASSERT(!backendRT.stencilBits());
1514 if (tgt) {
1515 SkASSERT(tgt->canAttemptStencilAttachment(tgt->numSamples() > 1));
1516 }
1517
1518 return std::move(tgt);
1519 }
1520
onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo & imageInfo,const GrVkDrawableInfo & vkInfo)1521 sk_sp<GrRenderTarget> GrVkGpu::onWrapVulkanSecondaryCBAsRenderTarget(
1522 const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo) {
1523 int maxSize = this->caps()->maxTextureSize();
1524 if (imageInfo.width() > maxSize || imageInfo.height() > maxSize) {
1525 return nullptr;
1526 }
1527
1528 GrBackendFormat backendFormat = GrBackendFormat::MakeVk(vkInfo.fFormat);
1529 if (!backendFormat.isValid()) {
1530 return nullptr;
1531 }
1532 int sampleCnt = this->vkCaps().getRenderTargetSampleCount(1, vkInfo.fFormat);
1533 if (!sampleCnt) {
1534 return nullptr;
1535 }
1536
1537 return GrVkRenderTarget::MakeSecondaryCBRenderTarget(this, imageInfo.dimensions(), vkInfo);
1538 }
1539
loadMSAAFromResolve(GrVkCommandBuffer * commandBuffer,const GrVkRenderPass & renderPass,GrAttachment * dst,GrVkImage * src,const SkIRect & srcRect)1540 bool GrVkGpu::loadMSAAFromResolve(GrVkCommandBuffer* commandBuffer,
1541 const GrVkRenderPass& renderPass,
1542 GrAttachment* dst,
1543 GrVkImage* src,
1544 const SkIRect& srcRect) {
1545 return fMSAALoadManager.loadMSAAFromResolve(this, commandBuffer, renderPass, dst, src, srcRect);
1546 }
1547
onRegenerateMipMapLevels(GrTexture * tex)1548 bool GrVkGpu::onRegenerateMipMapLevels(GrTexture* tex) {
1549 if (!this->currentCommandBuffer()) {
1550 return false;
1551 }
1552 auto* vkTex = static_cast<GrVkTexture*>(tex)->textureImage();
1553 // don't do anything for linearly tiled textures (can't have mipmaps)
1554 if (vkTex->isLinearTiled()) {
1555 SkDebugf("Trying to create mipmap for linear tiled texture");
1556 return false;
1557 }
1558 SkASSERT(tex->textureType() == GrTextureType::k2D);
1559
1560 // determine if we can blit to and from this format
1561 const GrVkCaps& caps = this->vkCaps();
1562 if (!caps.formatCanBeDstofBlit(vkTex->imageFormat(), false) ||
1563 !caps.formatCanBeSrcofBlit(vkTex->imageFormat(), false) ||
1564 !caps.mipmapSupport()) {
1565 return false;
1566 }
1567
1568 int width = tex->width();
1569 int height = tex->height();
1570 VkImageBlit blitRegion;
1571 memset(&blitRegion, 0, sizeof(VkImageBlit));
1572
1573 // SkMipmap doesn't include the base level in the level count so we have to add 1
1574 uint32_t levelCount = SkMipmap::ComputeLevelCount(tex->width(), tex->height()) + 1;
1575 SkASSERT(levelCount == vkTex->mipLevels());
1576
1577 // change layout of the layers so we can write to them.
1578 vkTex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_ACCESS_TRANSFER_WRITE_BIT,
1579 VK_PIPELINE_STAGE_TRANSFER_BIT, false);
1580
1581 // setup memory barrier
1582 SkASSERT(GrVkFormatIsSupported(vkTex->imageFormat()));
1583 VkImageMemoryBarrier imageMemoryBarrier = {
1584 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1585 nullptr, // pNext
1586 VK_ACCESS_TRANSFER_WRITE_BIT, // srcAccessMask
1587 VK_ACCESS_TRANSFER_READ_BIT, // dstAccessMask
1588 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // oldLayout
1589 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // newLayout
1590 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
1591 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
1592 vkTex->image(), // image
1593 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
1594 };
1595
1596 // Blit the miplevels
1597 uint32_t mipLevel = 1;
1598 while (mipLevel < levelCount) {
1599 int prevWidth = width;
1600 int prevHeight = height;
1601 width = std::max(1, width / 2);
1602 height = std::max(1, height / 2);
1603
1604 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1605 this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1606 VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1607
1608 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
1609 blitRegion.srcOffsets[0] = { 0, 0, 0 };
1610 blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
1611 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
1612 blitRegion.dstOffsets[0] = { 0, 0, 0 };
1613 blitRegion.dstOffsets[1] = { width, height, 1 };
1614 this->currentCommandBuffer()->blitImage(this,
1615 vkTex->resource(),
1616 vkTex->image(),
1617 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1618 vkTex->resource(),
1619 vkTex->image(),
1620 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1621 1,
1622 &blitRegion,
1623 VK_FILTER_LINEAR);
1624 ++mipLevel;
1625 }
1626 if (levelCount > 1) {
1627 // This barrier logically is not needed, but it changes the final level to the same layout
1628 // as all the others, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL. This makes tracking of the
1629 // layouts and future layout changes easier. The alternative here would be to track layout
1630 // and memory accesses per layer which doesn't seem work it.
1631 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1632 this->addImageMemoryBarrier(vkTex->resource(), VK_PIPELINE_STAGE_TRANSFER_BIT,
1633 VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1634 vkTex->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1635 }
1636 return true;
1637 }
1638
1639 ////////////////////////////////////////////////////////////////////////////////
1640
makeStencilAttachment(const GrBackendFormat &,SkISize dimensions,int numStencilSamples)1641 sk_sp<GrAttachment> GrVkGpu::makeStencilAttachment(const GrBackendFormat& /*colorFormat*/,
1642 SkISize dimensions, int numStencilSamples) {
1643 VkFormat sFmt = this->vkCaps().preferredStencilFormat();
1644
1645 fStats.incStencilAttachmentCreates();
1646 return GrVkImage::MakeStencil(this, dimensions, numStencilSamples, sFmt);
1647 }
1648
makeMSAAAttachment(SkISize dimensions,const GrBackendFormat & format,int numSamples,GrProtected isProtected,GrMemoryless memoryless)1649 sk_sp<GrAttachment> GrVkGpu::makeMSAAAttachment(SkISize dimensions,
1650 const GrBackendFormat& format,
1651 int numSamples,
1652 GrProtected isProtected,
1653 GrMemoryless memoryless) {
1654 VkFormat pixelFormat;
1655 SkAssertResult(format.asVkFormat(&pixelFormat));
1656 SkASSERT(!skgpu::VkFormatIsCompressed(pixelFormat));
1657 SkASSERT(this->vkCaps().isFormatRenderable(pixelFormat, numSamples));
1658
1659 fStats.incMSAAAttachmentCreates();
1660 return GrVkImage::MakeMSAA(this, dimensions, numSamples, pixelFormat, isProtected, memoryless);
1661 }
1662
1663 ////////////////////////////////////////////////////////////////////////////////
1664
copy_src_data(char * mapPtr,VkFormat vkFormat,const SkTArray<size_t> & individualMipOffsets,const GrPixmap srcData[],int numMipLevels)1665 bool copy_src_data(char* mapPtr,
1666 VkFormat vkFormat,
1667 const SkTArray<size_t>& individualMipOffsets,
1668 const GrPixmap srcData[],
1669 int numMipLevels) {
1670 SkASSERT(srcData && numMipLevels);
1671 SkASSERT(!skgpu::VkFormatIsCompressed(vkFormat));
1672 SkASSERT(individualMipOffsets.size() == numMipLevels);
1673 SkASSERT(mapPtr);
1674
1675 size_t bytesPerPixel = skgpu::VkFormatBytesPerBlock(vkFormat);
1676
1677 for (int level = 0; level < numMipLevels; ++level) {
1678 const size_t trimRB = srcData[level].info().width() * bytesPerPixel;
1679
1680 SkRectMemcpy(mapPtr + individualMipOffsets[level], trimRB,
1681 srcData[level].addr(), srcData[level].rowBytes(),
1682 trimRB, srcData[level].height());
1683 }
1684 return true;
1685 }
1686
createVkImageForBackendSurface(VkFormat vkFormat,SkISize dimensions,int sampleCnt,GrTexturable texturable,GrRenderable renderable,GrMipmapped mipmapped,GrVkImageInfo * info,GrProtected isProtected)1687 bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat,
1688 SkISize dimensions,
1689 int sampleCnt,
1690 GrTexturable texturable,
1691 GrRenderable renderable,
1692 GrMipmapped mipmapped,
1693 GrVkImageInfo* info,
1694 GrProtected isProtected) {
1695 SkASSERT(texturable == GrTexturable::kYes || renderable == GrRenderable::kYes);
1696
1697 if (fProtectedContext != isProtected) {
1698 return false;
1699 }
1700
1701 if (texturable == GrTexturable::kYes && !fVkCaps->isVkFormatTexturable(vkFormat)) {
1702 return false;
1703 }
1704
1705 // MSAA images are only currently used by createTestingOnlyBackendRenderTarget.
1706 if (sampleCnt > 1 && (texturable == GrTexturable::kYes || renderable == GrRenderable::kNo)) {
1707 return false;
1708 }
1709
1710 if (renderable == GrRenderable::kYes) {
1711 sampleCnt = fVkCaps->getRenderTargetSampleCount(sampleCnt, vkFormat);
1712 if (!sampleCnt) {
1713 return false;
1714 }
1715 }
1716
1717
1718 int numMipLevels = 1;
1719 if (mipmapped == GrMipmapped::kYes) {
1720 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
1721 }
1722
1723 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
1724 VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1725 if (texturable == GrTexturable::kYes) {
1726 usageFlags |= VK_IMAGE_USAGE_SAMPLED_BIT;
1727 }
1728 if (renderable == GrRenderable::kYes) {
1729 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
1730 // We always make our render targets support being used as input attachments
1731 usageFlags |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
1732 }
1733
1734 GrVkImage::ImageDesc imageDesc;
1735 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
1736 imageDesc.fFormat = vkFormat;
1737 imageDesc.fWidth = dimensions.width();
1738 imageDesc.fHeight = dimensions.height();
1739 imageDesc.fLevels = numMipLevels;
1740 imageDesc.fSamples = sampleCnt;
1741 imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
1742 imageDesc.fUsageFlags = usageFlags;
1743 imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1744 imageDesc.fIsProtected = fProtectedContext;
1745
1746 if (!GrVkImage::InitImageInfo(this, imageDesc, info)) {
1747 SkDebugf("Failed to init image info\n");
1748 return false;
1749 }
1750
1751 return true;
1752 }
1753
onClearBackendTexture(const GrBackendTexture & backendTexture,sk_sp<skgpu::RefCntedCallback> finishedCallback,std::array<float,4> color)1754 bool GrVkGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
1755 sk_sp<skgpu::RefCntedCallback> finishedCallback,
1756 std::array<float, 4> color) {
1757 GrVkImageInfo info;
1758 SkAssertResult(backendTexture.getVkImageInfo(&info));
1759
1760 sk_sp<skgpu::MutableTextureStateRef> mutableState = backendTexture.getMutableState();
1761 SkASSERT(mutableState);
1762 sk_sp<GrVkTexture> texture =
1763 GrVkTexture::MakeWrappedTexture(this, backendTexture.dimensions(),
1764 kBorrow_GrWrapOwnership, GrWrapCacheable::kNo,
1765 kRW_GrIOType, info, std::move(mutableState));
1766 if (!texture) {
1767 return false;
1768 }
1769 GrVkImage* texImage = texture->textureImage();
1770
1771 GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer();
1772 if (!cmdBuffer) {
1773 return false;
1774 }
1775
1776 texImage->setImageLayout(this,
1777 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1778 VK_ACCESS_TRANSFER_WRITE_BIT,
1779 VK_PIPELINE_STAGE_TRANSFER_BIT,
1780 false);
1781
1782 // CmdClearColorImage doesn't work for compressed formats
1783 SkASSERT(!skgpu::VkFormatIsCompressed(info.fFormat));
1784
1785 VkClearColorValue vkColor;
1786 // If we ever support SINT or UINT formats this needs to be updated to use the int32 and
1787 // uint32 union members in those cases.
1788 vkColor.float32[0] = color[0];
1789 vkColor.float32[1] = color[1];
1790 vkColor.float32[2] = color[2];
1791 vkColor.float32[3] = color[3];
1792 VkImageSubresourceRange range;
1793 range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1794 range.baseArrayLayer = 0;
1795 range.baseMipLevel = 0;
1796 range.layerCount = 1;
1797 range.levelCount = info.fLevelCount;
1798 cmdBuffer->clearColorImage(this, texImage, &vkColor, 1, &range);
1799
1800 // Change image layout to shader read since if we use this texture as a borrowed
1801 // texture within Ganesh we require that its layout be set to that
1802 texImage->setImageLayout(this, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
1803 VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1804 false);
1805
1806 if (finishedCallback) {
1807 this->addFinishedCallback(std::move(finishedCallback));
1808 }
1809 return true;
1810 }
1811
onCreateBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,GrMipmapped mipmapped,GrProtected isProtected,std::string_view label)1812 GrBackendTexture GrVkGpu::onCreateBackendTexture(SkISize dimensions,
1813 const GrBackendFormat& format,
1814 GrRenderable renderable,
1815 GrMipmapped mipmapped,
1816 GrProtected isProtected,
1817 std::string_view label) {
1818 const GrVkCaps& caps = this->vkCaps();
1819
1820 if (fProtectedContext != isProtected) {
1821 return {};
1822 }
1823
1824 VkFormat vkFormat;
1825 if (!format.asVkFormat(&vkFormat)) {
1826 return {};
1827 }
1828
1829 // TODO: move the texturability check up to GrGpu::createBackendTexture and just assert here
1830 if (!caps.isVkFormatTexturable(vkFormat)) {
1831 return {};
1832 }
1833
1834 if (skgpu::VkFormatNeedsYcbcrSampler(vkFormat)) {
1835 return {};
1836 }
1837
1838 GrVkImageInfo info;
1839 if (!this->createVkImageForBackendSurface(vkFormat, dimensions, 1, GrTexturable::kYes,
1840 renderable, mipmapped, &info, isProtected)) {
1841 return {};
1842 }
1843
1844 return GrBackendTexture(dimensions.width(), dimensions.height(), info);
1845 }
1846
onCreateCompressedBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrMipmapped mipmapped,GrProtected isProtected)1847 GrBackendTexture GrVkGpu::onCreateCompressedBackendTexture(
1848 SkISize dimensions, const GrBackendFormat& format, GrMipmapped mipmapped,
1849 GrProtected isProtected) {
1850 return this->onCreateBackendTexture(dimensions,
1851 format,
1852 GrRenderable::kNo,
1853 mipmapped,
1854 isProtected,
1855 /*label=*/"VkGpu_CreateCompressedBackendTexture");
1856 }
1857
onUpdateCompressedBackendTexture(const GrBackendTexture & backendTexture,sk_sp<skgpu::RefCntedCallback> finishedCallback,const void * data,size_t size)1858 bool GrVkGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1859 sk_sp<skgpu::RefCntedCallback> finishedCallback,
1860 const void* data,
1861 size_t size) {
1862 GrVkImageInfo info;
1863 SkAssertResult(backendTexture.getVkImageInfo(&info));
1864
1865 sk_sp<skgpu::MutableTextureStateRef> mutableState = backendTexture.getMutableState();
1866 SkASSERT(mutableState);
1867 sk_sp<GrVkTexture> texture = GrVkTexture::MakeWrappedTexture(this,
1868 backendTexture.dimensions(),
1869 kBorrow_GrWrapOwnership,
1870 GrWrapCacheable::kNo,
1871 kRW_GrIOType,
1872 info,
1873 std::move(mutableState));
1874 if (!texture) {
1875 return false;
1876 }
1877
1878 GrVkPrimaryCommandBuffer* cmdBuffer = this->currentCommandBuffer();
1879 if (!cmdBuffer) {
1880 return false;
1881 }
1882 GrVkImage* image = texture->textureImage();
1883 image->setImageLayout(this,
1884 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1885 VK_ACCESS_TRANSFER_WRITE_BIT,
1886 VK_PIPELINE_STAGE_TRANSFER_BIT,
1887 false);
1888
1889 SkImage::CompressionType compression =
1890 GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
1891
1892 SkTArray<VkBufferImageCopy> regions;
1893 SkTArray<size_t> individualMipOffsets;
1894 GrStagingBufferManager::Slice slice;
1895
1896 fill_in_compressed_regions(&fStagingBufferManager,
1897 ®ions,
1898 &individualMipOffsets,
1899 &slice,
1900 compression,
1901 info.fFormat,
1902 backendTexture.dimensions(),
1903 backendTexture.fMipmapped);
1904
1905 if (!slice.fBuffer) {
1906 return false;
1907 }
1908
1909 memcpy(slice.fOffsetMapPtr, data, size);
1910
1911 cmdBuffer->addGrSurface(texture);
1912 // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1913 // because we don't need the command buffer to ref the buffer here. The reason being is that
1914 // the buffer is coming from the staging manager and the staging manager will make sure the
1915 // command buffer has a ref on the buffer. This avoids having to add and remove a ref for
1916 // every upload in the frame.
1917 cmdBuffer->copyBufferToImage(this,
1918 static_cast<GrVkBuffer*>(slice.fBuffer)->vkBuffer(),
1919 image,
1920 image->currentLayout(),
1921 regions.size(),
1922 regions.begin());
1923
1924 // Change image layout to shader read since if we use this texture as a borrowed
1925 // texture within Ganesh we require that its layout be set to that
1926 image->setImageLayout(this,
1927 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
1928 VK_ACCESS_SHADER_READ_BIT,
1929 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1930 false);
1931
1932 if (finishedCallback) {
1933 this->addFinishedCallback(std::move(finishedCallback));
1934 }
1935 return true;
1936 }
1937
set_layout_and_queue_from_mutable_state(GrVkGpu * gpu,GrVkImage * image,const skgpu::VulkanMutableTextureState & newState)1938 void set_layout_and_queue_from_mutable_state(GrVkGpu* gpu, GrVkImage* image,
1939 const skgpu::VulkanMutableTextureState& newState) {
1940 // Even though internally we use this helper for getting src access flags and stages they
1941 // can also be used for general dst flags since we don't know exactly what the client
1942 // plans on using the image for.
1943 VkImageLayout newLayout = newState.getImageLayout();
1944 if (newLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
1945 newLayout = image->currentLayout();
1946 }
1947 VkPipelineStageFlags dstStage = GrVkImage::LayoutToPipelineSrcStageFlags(newLayout);
1948 VkAccessFlags dstAccess = GrVkImage::LayoutToSrcAccessMask(newLayout);
1949
1950 uint32_t currentQueueFamilyIndex = image->currentQueueFamilyIndex();
1951 uint32_t newQueueFamilyIndex = newState.getQueueFamilyIndex();
1952 auto isSpecialQueue = [](uint32_t queueFamilyIndex) {
1953 return queueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
1954 queueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT;
1955 };
1956 if (isSpecialQueue(currentQueueFamilyIndex) && isSpecialQueue(newQueueFamilyIndex)) {
1957 // It is illegal to have both the new and old queue be special queue families (i.e. external
1958 // or foreign).
1959 return;
1960 }
1961
1962 image->setImageLayoutAndQueueIndex(gpu, newLayout, dstAccess, dstStage, false,
1963 newQueueFamilyIndex);
1964 }
1965
setBackendSurfaceState(GrVkImageInfo info,sk_sp<skgpu::MutableTextureStateRef> currentState,SkISize dimensions,const skgpu::VulkanMutableTextureState & newState,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)1966 bool GrVkGpu::setBackendSurfaceState(GrVkImageInfo info,
1967 sk_sp<skgpu::MutableTextureStateRef> currentState,
1968 SkISize dimensions,
1969 const skgpu::VulkanMutableTextureState& newState,
1970 skgpu::MutableTextureState* previousState,
1971 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
1972 sk_sp<GrVkImage> texture = GrVkImage::MakeWrapped(this,
1973 dimensions,
1974 info,
1975 std::move(currentState),
1976 GrVkImage::UsageFlags::kColorAttachment,
1977 kBorrow_GrWrapOwnership,
1978 GrWrapCacheable::kNo,
1979 "VkGpu_SetBackendSurfaceState",
1980 /*forSecondaryCB=*/false);
1981 SkASSERT(texture);
1982 if (!texture) {
1983 return false;
1984 }
1985 if (previousState) {
1986 previousState->setVulkanState(texture->currentLayout(),
1987 texture->currentQueueFamilyIndex());
1988 }
1989 set_layout_and_queue_from_mutable_state(this, texture.get(), newState);
1990 if (finishedCallback) {
1991 this->addFinishedCallback(std::move(finishedCallback));
1992 }
1993 return true;
1994 }
1995
setBackendTextureState(const GrBackendTexture & backendTeture,const skgpu::MutableTextureState & newState,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)1996 bool GrVkGpu::setBackendTextureState(const GrBackendTexture& backendTeture,
1997 const skgpu::MutableTextureState& newState,
1998 skgpu::MutableTextureState* previousState,
1999 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
2000 GrVkImageInfo info;
2001 SkAssertResult(backendTeture.getVkImageInfo(&info));
2002 sk_sp<skgpu::MutableTextureStateRef> currentState = backendTeture.getMutableState();
2003 SkASSERT(currentState);
2004 SkASSERT(newState.isValid() && newState.fBackend == skgpu::BackendApi::kVulkan);
2005 return this->setBackendSurfaceState(info, std::move(currentState), backendTeture.dimensions(),
2006 newState.fVkState, previousState,
2007 std::move(finishedCallback));
2008 }
2009
setBackendRenderTargetState(const GrBackendRenderTarget & backendRenderTarget,const skgpu::MutableTextureState & newState,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)2010 bool GrVkGpu::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
2011 const skgpu::MutableTextureState& newState,
2012 skgpu::MutableTextureState* previousState,
2013 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
2014 GrVkImageInfo info;
2015 SkAssertResult(backendRenderTarget.getVkImageInfo(&info));
2016 sk_sp<skgpu::MutableTextureStateRef> currentState = backendRenderTarget.getMutableState();
2017 SkASSERT(currentState);
2018 SkASSERT(newState.fBackend == skgpu::BackendApi::kVulkan);
2019 return this->setBackendSurfaceState(info, std::move(currentState),
2020 backendRenderTarget.dimensions(), newState.fVkState,
2021 previousState, std::move(finishedCallback));
2022 }
2023
xferBarrier(GrRenderTarget * rt,GrXferBarrierType barrierType)2024 void GrVkGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType barrierType) {
2025 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
2026 VkPipelineStageFlags dstStage;
2027 VkAccessFlags dstAccess;
2028 if (barrierType == kBlend_GrXferBarrierType) {
2029 dstStage = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
2030 dstAccess = VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT;
2031 } else {
2032 SkASSERT(barrierType == kTexture_GrXferBarrierType);
2033 dstStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
2034 dstAccess = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
2035 }
2036 GrVkImage* image = vkRT->colorAttachment();
2037 VkImageMemoryBarrier barrier;
2038 barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
2039 barrier.pNext = nullptr;
2040 barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
2041 barrier.dstAccessMask = dstAccess;
2042 barrier.oldLayout = image->currentLayout();
2043 barrier.newLayout = barrier.oldLayout;
2044 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
2045 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
2046 barrier.image = image->image();
2047 barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, image->mipLevels(), 0, 1};
2048 this->addImageMemoryBarrier(image->resource(),
2049 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
2050 dstStage, true, &barrier);
2051 }
2052
deleteBackendTexture(const GrBackendTexture & tex)2053 void GrVkGpu::deleteBackendTexture(const GrBackendTexture& tex) {
2054 SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
2055
2056 GrVkImageInfo info;
2057 if (tex.getVkImageInfo(&info)) {
2058 GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
2059 }
2060 }
2061
compile(const GrProgramDesc & desc,const GrProgramInfo & programInfo)2062 bool GrVkGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) {
2063 GrVkRenderPass::AttachmentsDescriptor attachmentsDescriptor;
2064 GrVkRenderPass::AttachmentFlags attachmentFlags;
2065 GrVkRenderTarget::ReconstructAttachmentsDescriptor(this->vkCaps(), programInfo,
2066 &attachmentsDescriptor, &attachmentFlags);
2067
2068 GrVkRenderPass::SelfDependencyFlags selfDepFlags = GrVkRenderPass::SelfDependencyFlags::kNone;
2069 if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kBlend) {
2070 selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForNonCoherentAdvBlend;
2071 }
2072 if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kTexture) {
2073 selfDepFlags |= GrVkRenderPass::SelfDependencyFlags::kForInputAttachment;
2074 }
2075
2076 GrVkRenderPass::LoadFromResolve loadFromResolve = GrVkRenderPass::LoadFromResolve::kNo;
2077 if (this->vkCaps().programInfoWillUseDiscardableMSAA(programInfo) &&
2078 programInfo.colorLoadOp() == GrLoadOp::kLoad) {
2079 loadFromResolve = GrVkRenderPass::LoadFromResolve::kLoad;
2080 }
2081 sk_sp<const GrVkRenderPass> renderPass(this->resourceProvider().findCompatibleRenderPass(
2082 &attachmentsDescriptor, attachmentFlags, selfDepFlags, loadFromResolve));
2083 if (!renderPass) {
2084 return false;
2085 }
2086
2087 GrThreadSafePipelineBuilder::Stats::ProgramCacheResult stat;
2088
2089 auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState(
2090 desc,
2091 programInfo,
2092 renderPass->vkRenderPass(),
2093 &stat);
2094 if (!pipelineState) {
2095 return false;
2096 }
2097
2098 return stat != GrThreadSafePipelineBuilder::Stats::ProgramCacheResult::kHit;
2099 }
2100
2101 #if GR_TEST_UTILS
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const2102 bool GrVkGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
2103 SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
2104
2105 GrVkImageInfo backend;
2106 if (!tex.getVkImageInfo(&backend)) {
2107 return false;
2108 }
2109
2110 if (backend.fImage && backend.fAlloc.fMemory) {
2111 VkMemoryRequirements req;
2112 memset(&req, 0, sizeof(req));
2113 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
2114 backend.fImage,
2115 &req));
2116 // TODO: find a better check
2117 // This will probably fail with a different driver
2118 return (req.size > 0) && (req.size <= 8192 * 8192);
2119 }
2120
2121 return false;
2122 }
2123
createTestingOnlyBackendRenderTarget(SkISize dimensions,GrColorType ct,int sampleCnt,GrProtected isProtected)2124 GrBackendRenderTarget GrVkGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
2125 GrColorType ct,
2126 int sampleCnt,
2127 GrProtected isProtected) {
2128 if (dimensions.width() > this->caps()->maxRenderTargetSize() ||
2129 dimensions.height() > this->caps()->maxRenderTargetSize()) {
2130 return {};
2131 }
2132
2133 VkFormat vkFormat = this->vkCaps().getFormatFromColorType(ct);
2134
2135 GrVkImageInfo info;
2136 if (!this->createVkImageForBackendSurface(vkFormat, dimensions, sampleCnt, GrTexturable::kNo,
2137 GrRenderable::kYes, GrMipmapped::kNo, &info,
2138 isProtected)) {
2139 return {};
2140 }
2141 return GrBackendRenderTarget(dimensions.width(), dimensions.height(), 0, info);
2142 }
2143
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & rt)2144 void GrVkGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& rt) {
2145 SkASSERT(GrBackendApi::kVulkan == rt.fBackend);
2146
2147 GrVkImageInfo info;
2148 if (rt.getVkImageInfo(&info)) {
2149 // something in the command buffer may still be using this, so force submit
2150 SkAssertResult(this->submitCommandBuffer(kForce_SyncQueue));
2151 GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
2152 }
2153 }
2154 #endif
2155
2156 ////////////////////////////////////////////////////////////////////////////////
2157
addBufferMemoryBarrier(const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkBufferMemoryBarrier * barrier) const2158 void GrVkGpu::addBufferMemoryBarrier(const GrManagedResource* resource,
2159 VkPipelineStageFlags srcStageMask,
2160 VkPipelineStageFlags dstStageMask,
2161 bool byRegion,
2162 VkBufferMemoryBarrier* barrier) const {
2163 if (!this->currentCommandBuffer()) {
2164 return;
2165 }
2166 SkASSERT(resource);
2167 this->currentCommandBuffer()->pipelineBarrier(this,
2168 resource,
2169 srcStageMask,
2170 dstStageMask,
2171 byRegion,
2172 GrVkCommandBuffer::kBufferMemory_BarrierType,
2173 barrier);
2174 }
addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkBufferMemoryBarrier * barrier) const2175 void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
2176 VkPipelineStageFlags dstStageMask,
2177 bool byRegion,
2178 VkBufferMemoryBarrier* barrier) const {
2179 if (!this->currentCommandBuffer()) {
2180 return;
2181 }
2182 // We don't pass in a resource here to the command buffer. The command buffer only is using it
2183 // to hold a ref, but every place where we add a buffer memory barrier we are doing some other
2184 // command with the buffer on the command buffer. Thus those other commands will already cause
2185 // the command buffer to be holding a ref to the buffer.
2186 this->currentCommandBuffer()->pipelineBarrier(this,
2187 /*resource=*/nullptr,
2188 srcStageMask,
2189 dstStageMask,
2190 byRegion,
2191 GrVkCommandBuffer::kBufferMemory_BarrierType,
2192 barrier);
2193 }
2194
addImageMemoryBarrier(const GrManagedResource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkImageMemoryBarrier * barrier) const2195 void GrVkGpu::addImageMemoryBarrier(const GrManagedResource* resource,
2196 VkPipelineStageFlags srcStageMask,
2197 VkPipelineStageFlags dstStageMask,
2198 bool byRegion,
2199 VkImageMemoryBarrier* barrier) const {
2200 // If we are in the middle of destroying or abandoning the context we may hit a release proc
2201 // that triggers the destruction of a GrVkImage. This could cause us to try and transfer the
2202 // VkImage back to the original queue. In this state we don't submit anymore work and we may not
2203 // have a current command buffer. Thus we won't do the queue transfer.
2204 if (!this->currentCommandBuffer()) {
2205 return;
2206 }
2207 SkASSERT(resource);
2208 this->currentCommandBuffer()->pipelineBarrier(this,
2209 resource,
2210 srcStageMask,
2211 dstStageMask,
2212 byRegion,
2213 GrVkCommandBuffer::kImageMemory_BarrierType,
2214 barrier);
2215 }
2216
prepareSurfacesForBackendAccessAndStateUpdates(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const skgpu::MutableTextureState * newState)2217 void GrVkGpu::prepareSurfacesForBackendAccessAndStateUpdates(
2218 SkSpan<GrSurfaceProxy*> proxies,
2219 SkSurface::BackendSurfaceAccess access,
2220 const skgpu::MutableTextureState* newState) {
2221 // Submit the current command buffer to the Queue. Whether we inserted semaphores or not does
2222 // not effect what we do here.
2223 if (!proxies.empty() && (access == SkSurface::BackendSurfaceAccess::kPresent || newState)) {
2224 // We currently don't support passing in new surface state for multiple proxies here. The
2225 // only time we have multiple proxies is if we are flushing a yuv SkImage which won't have
2226 // state updates anyways. Additionally if we have a newState than we must not have any
2227 // BackendSurfaceAccess.
2228 SkASSERT(!newState || proxies.size() == 1);
2229 SkASSERT(!newState || access == SkSurface::BackendSurfaceAccess::kNoAccess);
2230 GrVkImage* image;
2231 for (GrSurfaceProxy* proxy : proxies) {
2232 SkASSERT(proxy->isInstantiated());
2233 if (GrTexture* tex = proxy->peekTexture()) {
2234 image = static_cast<GrVkTexture*>(tex)->textureImage();
2235 } else {
2236 GrRenderTarget* rt = proxy->peekRenderTarget();
2237 SkASSERT(rt);
2238 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
2239 image = vkRT->externalAttachment();
2240 }
2241 if (newState) {
2242 const skgpu::VulkanMutableTextureState& newInfo = newState->fVkState;
2243 set_layout_and_queue_from_mutable_state(this, image, newInfo);
2244 } else {
2245 SkASSERT(access == SkSurface::BackendSurfaceAccess::kPresent);
2246 image->prepareForPresent(this);
2247 }
2248 }
2249 }
2250 }
2251
addFinishedProc(GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)2252 void GrVkGpu::addFinishedProc(GrGpuFinishedProc finishedProc,
2253 GrGpuFinishedContext finishedContext) {
2254 SkASSERT(finishedProc);
2255 this->addFinishedCallback(skgpu::RefCntedCallback::Make(finishedProc, finishedContext));
2256 }
2257
addFinishedCallback(sk_sp<skgpu::RefCntedCallback> finishedCallback)2258 void GrVkGpu::addFinishedCallback(sk_sp<skgpu::RefCntedCallback> finishedCallback) {
2259 SkASSERT(finishedCallback);
2260 fResourceProvider.addFinishedProcToActiveCommandBuffers(std::move(finishedCallback));
2261 }
2262
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer)2263 void GrVkGpu::takeOwnershipOfBuffer(sk_sp<GrGpuBuffer> buffer) {
2264 this->currentCommandBuffer()->addGrBuffer(std::move(buffer));
2265 }
2266
onSubmitToGpu(bool syncCpu)2267 bool GrVkGpu::onSubmitToGpu(bool syncCpu) {
2268 if (syncCpu) {
2269 return this->submitCommandBuffer(kForce_SyncQueue);
2270 } else {
2271 return this->submitCommandBuffer(kSkip_SyncQueue);
2272 }
2273 }
2274
finishOutstandingGpuWork()2275 void GrVkGpu::finishOutstandingGpuWork() {
2276 VK_CALL(QueueWaitIdle(fQueue));
2277
2278 if (this->vkCaps().mustSyncCommandBuffersWithQueue()) {
2279 fResourceProvider.forceSyncAllCommandBuffers();
2280 }
2281 }
2282
onReportSubmitHistograms()2283 void GrVkGpu::onReportSubmitHistograms() {
2284 #if SK_HISTOGRAMS_ENABLED
2285 uint64_t allocatedMemory = 0, usedMemory = 0;
2286 std::tie(allocatedMemory, usedMemory) = fMemoryAllocator->totalAllocatedAndUsedMemory();
2287 SkASSERT(usedMemory <= allocatedMemory);
2288 if (allocatedMemory > 0) {
2289 SK_HISTOGRAM_PERCENTAGE("VulkanMemoryAllocator.PercentUsed",
2290 (usedMemory * 100) / allocatedMemory);
2291 }
2292 // allocatedMemory is in bytes and need to be reported it in kilobytes. SK_HISTOGRAM_MEMORY_KB
2293 // supports samples up to around 500MB which should support the amounts of memory we allocate.
2294 SK_HISTOGRAM_MEMORY_KB("VulkanMemoryAllocator.AmountAllocated", allocatedMemory >> 10);
2295 #endif // SK_HISTOGRAMS_ENABLED
2296 }
2297
copySurfaceAsCopyImage(GrSurface * dst,GrSurface * src,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIPoint & dstPoint)2298 void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
2299 GrSurface* src,
2300 GrVkImage* dstImage,
2301 GrVkImage* srcImage,
2302 const SkIRect& srcRect,
2303 const SkIPoint& dstPoint) {
2304 if (!this->currentCommandBuffer()) {
2305 return;
2306 }
2307
2308 #ifdef SK_DEBUG
2309 int dstSampleCnt = dstImage->numSamples();
2310 int srcSampleCnt = srcImage->numSamples();
2311 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2312 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2313 VkFormat dstFormat = dstImage->imageFormat();
2314 VkFormat srcFormat;
2315 SkAssertResult(dst->backendFormat().asVkFormat(&srcFormat));
2316 SkASSERT(this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2317 srcFormat, srcSampleCnt, srcHasYcbcr));
2318 #endif
2319 if (src->isProtected() && !dst->isProtected()) {
2320 SkDebugf("Can't copy from protected memory to non-protected");
2321 return;
2322 }
2323
2324 // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
2325 // the cache is flushed since it is only being written to.
2326 dstImage->setImageLayout(this,
2327 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2328 VK_ACCESS_TRANSFER_WRITE_BIT,
2329 VK_PIPELINE_STAGE_TRANSFER_BIT,
2330 false);
2331
2332 srcImage->setImageLayout(this,
2333 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2334 VK_ACCESS_TRANSFER_READ_BIT,
2335 VK_PIPELINE_STAGE_TRANSFER_BIT,
2336 false);
2337
2338 VkImageCopy copyRegion;
2339 memset(©Region, 0, sizeof(VkImageCopy));
2340 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2341 copyRegion.srcOffset = { srcRect.fLeft, srcRect.fTop, 0 };
2342 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2343 copyRegion.dstOffset = { dstPoint.fX, dstPoint.fY, 0 };
2344 copyRegion.extent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1 };
2345
2346 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2347 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2348 this->currentCommandBuffer()->copyImage(this,
2349 srcImage,
2350 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2351 dstImage,
2352 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2353 1,
2354 ©Region);
2355
2356 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2357 srcRect.width(), srcRect.height());
2358 // The rect is already in device space so we pass in kTopLeft so no flip is done.
2359 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2360 }
2361
copySurfaceAsBlit(GrSurface * dst,GrSurface * src,GrVkImage * dstImage,GrVkImage * srcImage,const SkIRect & srcRect,const SkIRect & dstRect,GrSamplerState::Filter filter)2362 void GrVkGpu::copySurfaceAsBlit(GrSurface* dst,
2363 GrSurface* src,
2364 GrVkImage* dstImage,
2365 GrVkImage* srcImage,
2366 const SkIRect& srcRect,
2367 const SkIRect& dstRect,
2368 GrSamplerState::Filter filter) {
2369 if (!this->currentCommandBuffer()) {
2370 return;
2371 }
2372
2373 #ifdef SK_DEBUG
2374 int dstSampleCnt = dstImage->numSamples();
2375 int srcSampleCnt = srcImage->numSamples();
2376 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2377 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2378 VkFormat dstFormat = dstImage->imageFormat();
2379 VkFormat srcFormat;
2380 SkAssertResult(dst->backendFormat().asVkFormat(&srcFormat));
2381 SkASSERT(this->vkCaps().canCopyAsBlit(dstFormat,
2382 dstSampleCnt,
2383 dstImage->isLinearTiled(),
2384 dstHasYcbcr,
2385 srcFormat,
2386 srcSampleCnt,
2387 srcImage->isLinearTiled(),
2388 srcHasYcbcr));
2389
2390 #endif
2391 if (src->isProtected() && !dst->isProtected()) {
2392 SkDebugf("Can't copy from protected memory to non-protected");
2393 return;
2394 }
2395
2396 dstImage->setImageLayout(this,
2397 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2398 VK_ACCESS_TRANSFER_WRITE_BIT,
2399 VK_PIPELINE_STAGE_TRANSFER_BIT,
2400 false);
2401
2402 srcImage->setImageLayout(this,
2403 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2404 VK_ACCESS_TRANSFER_READ_BIT,
2405 VK_PIPELINE_STAGE_TRANSFER_BIT,
2406 false);
2407
2408 VkImageBlit blitRegion;
2409 memset(&blitRegion, 0, sizeof(VkImageBlit));
2410 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2411 blitRegion.srcOffsets[0] = { srcRect.fLeft, srcRect.fTop, 0 };
2412 blitRegion.srcOffsets[1] = { srcRect.fRight, srcRect.fBottom, 1 };
2413 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2414 blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 };
2415 blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 1 };
2416
2417 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(src));
2418 this->currentCommandBuffer()->addGrSurface(sk_ref_sp<const GrSurface>(dst));
2419 this->currentCommandBuffer()->blitImage(this,
2420 *srcImage,
2421 *dstImage,
2422 1,
2423 &blitRegion,
2424 filter == GrSamplerState::Filter::kNearest ?
2425 VK_FILTER_NEAREST : VK_FILTER_LINEAR);
2426
2427 // The rect is already in device space so we pass in kTopLeft so no flip is done.
2428 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2429 }
2430
copySurfaceAsResolve(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)2431 void GrVkGpu::copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
2432 const SkIPoint& dstPoint) {
2433 if (src->isProtected() && !dst->isProtected()) {
2434 SkDebugf("Can't copy from protected memory to non-protected");
2435 return;
2436 }
2437 GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget());
2438 this->resolveImage(dst, srcRT, srcRect, dstPoint);
2439 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2440 srcRect.width(), srcRect.height());
2441 // The rect is already in device space so we pass in kTopLeft so no flip is done.
2442 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
2443 }
2444
onCopySurface(GrSurface * dst,const SkIRect & dstRect,GrSurface * src,const SkIRect & srcRect,GrSamplerState::Filter filter)2445 bool GrVkGpu::onCopySurface(GrSurface* dst, const SkIRect& dstRect,
2446 GrSurface* src, const SkIRect& srcRect,
2447 GrSamplerState::Filter filter) {
2448 #ifdef SK_DEBUG
2449 if (GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget())) {
2450 SkASSERT(!srcRT->wrapsSecondaryCommandBuffer());
2451 }
2452 if (GrVkRenderTarget* dstRT = static_cast<GrVkRenderTarget*>(dst->asRenderTarget())) {
2453 SkASSERT(!dstRT->wrapsSecondaryCommandBuffer());
2454 }
2455 #endif
2456 if (src->isProtected() && !dst->isProtected()) {
2457 SkDebugf("Can't copy from protected memory to non-protected");
2458 return false;
2459 }
2460
2461 GrVkImage* dstImage;
2462 GrVkImage* srcImage;
2463 GrRenderTarget* dstRT = dst->asRenderTarget();
2464 if (dstRT) {
2465 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
2466 if (vkRT->wrapsSecondaryCommandBuffer()) {
2467 return false;
2468 }
2469 // This will technically return true for single sample rts that used DMSAA in which case we
2470 // don't have to pick the resolve attachment. But in that case the resolve and color
2471 // attachments will be the same anyways.
2472 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2473 dstImage = vkRT->resolveAttachment();
2474 } else {
2475 dstImage = vkRT->colorAttachment();
2476 }
2477 } else if (dst->asTexture()) {
2478 dstImage = static_cast<GrVkTexture*>(dst->asTexture())->textureImage();
2479 } else {
2480 // The surface in a GrAttachment already
2481 dstImage = static_cast<GrVkImage*>(dst);
2482 }
2483 GrRenderTarget* srcRT = src->asRenderTarget();
2484 if (srcRT) {
2485 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(srcRT);
2486 // This will technically return true for single sample rts that used DMSAA in which case we
2487 // don't have to pick the resolve attachment. But in that case the resolve and color
2488 // attachments will be the same anyways.
2489 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2490 srcImage = vkRT->resolveAttachment();
2491 } else {
2492 srcImage = vkRT->colorAttachment();
2493 }
2494 } else if (src->asTexture()) {
2495 SkASSERT(src->asTexture());
2496 srcImage = static_cast<GrVkTexture*>(src->asTexture())->textureImage();
2497 } else {
2498 // The surface in a GrAttachment already
2499 srcImage = static_cast<GrVkImage*>(src);
2500 }
2501
2502 VkFormat dstFormat = dstImage->imageFormat();
2503 VkFormat srcFormat = srcImage->imageFormat();
2504
2505 int dstSampleCnt = dstImage->numSamples();
2506 int srcSampleCnt = srcImage->numSamples();
2507
2508 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2509 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2510
2511 if (srcRect.size() == dstRect.size()) {
2512 // Prefer resolves or copy-image commands when there is no scaling
2513 const SkIPoint dstPoint = dstRect.topLeft();
2514 if (this->vkCaps().canCopyAsResolve(dstFormat, dstSampleCnt, dstHasYcbcr,
2515 srcFormat, srcSampleCnt, srcHasYcbcr)) {
2516 this->copySurfaceAsResolve(dst, src, srcRect, dstPoint);
2517 return true;
2518 }
2519
2520 if (this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2521 srcFormat, srcSampleCnt, srcHasYcbcr)) {
2522 this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
2523 return true;
2524 }
2525 }
2526
2527 if (this->vkCaps().canCopyAsBlit(dstFormat,
2528 dstSampleCnt,
2529 dstImage->isLinearTiled(),
2530 dstHasYcbcr,
2531 srcFormat,
2532 srcSampleCnt,
2533 srcImage->isLinearTiled(),
2534 srcHasYcbcr)) {
2535 this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstRect, filter);
2536 return true;
2537 }
2538
2539 return false;
2540 }
2541
onReadPixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)2542 bool GrVkGpu::onReadPixels(GrSurface* surface,
2543 SkIRect rect,
2544 GrColorType surfaceColorType,
2545 GrColorType dstColorType,
2546 void* buffer,
2547 size_t rowBytes) {
2548 if (surface->isProtected()) {
2549 return false;
2550 }
2551
2552 if (!this->currentCommandBuffer()) {
2553 return false;
2554 }
2555
2556 GrVkImage* image = nullptr;
2557 GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget());
2558 if (rt) {
2559 // Reading from render targets that wrap a secondary command buffer is not allowed since
2560 // it would require us to know the VkImage, which we don't have, as well as need us to
2561 // stop and start the VkRenderPass which we don't have access to.
2562 if (rt->wrapsSecondaryCommandBuffer()) {
2563 return false;
2564 }
2565 image = rt->nonMSAAAttachment();
2566 } else {
2567 image = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
2568 }
2569
2570 if (!image) {
2571 return false;
2572 }
2573
2574 if (dstColorType == GrColorType::kUnknown ||
2575 dstColorType != this->vkCaps().transferColorType(image->imageFormat(), surfaceColorType)) {
2576 return false;
2577 }
2578
2579 // Change layout of our target so it can be used as copy
2580 image->setImageLayout(this,
2581 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2582 VK_ACCESS_TRANSFER_READ_BIT,
2583 VK_PIPELINE_STAGE_TRANSFER_BIT,
2584 false);
2585
2586 size_t bpp = GrColorTypeBytesPerPixel(dstColorType);
2587 if (skgpu::VkFormatBytesPerBlock(image->imageFormat()) != bpp) {
2588 return false;
2589 }
2590 size_t tightRowBytes = bpp*rect.width();
2591
2592 VkBufferImageCopy region;
2593 memset(®ion, 0, sizeof(VkBufferImageCopy));
2594 VkOffset3D offset = { rect.left(), rect.top(), 0 };
2595 region.imageOffset = offset;
2596 region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
2597
2598 size_t transBufferRowBytes = bpp * region.imageExtent.width;
2599 size_t imageRows = region.imageExtent.height;
2600 GrResourceProvider* resourceProvider = this->getContext()->priv().resourceProvider();
2601 sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
2602 transBufferRowBytes * imageRows,
2603 GrGpuBufferType::kXferGpuToCpu,
2604 kDynamic_GrAccessPattern,
2605 GrResourceProvider::ZeroInit::kNo);
2606
2607 if (!transferBuffer) {
2608 return false;
2609 }
2610
2611 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
2612
2613 // Copy the image to a buffer so we can map it to cpu memory
2614 region.bufferOffset = 0;
2615 region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below.
2616 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
2617 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2618
2619 this->currentCommandBuffer()->copyImageToBuffer(this,
2620 image,
2621 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2622 transferBuffer,
2623 1,
2624 ®ion);
2625
2626 // make sure the copy to buffer has finished
2627 vkBuffer->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
2628 VK_ACCESS_HOST_READ_BIT,
2629 VK_PIPELINE_STAGE_TRANSFER_BIT,
2630 VK_PIPELINE_STAGE_HOST_BIT,
2631 false);
2632
2633 // We need to submit the current command buffer to the Queue and make sure it finishes before
2634 // we can copy the data out of the buffer.
2635 if (!this->submitCommandBuffer(kForce_SyncQueue)) {
2636 return false;
2637 }
2638 void* mappedMemory = transferBuffer->map();
2639 if (!mappedMemory) {
2640 return false;
2641 }
2642
2643 SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, tightRowBytes, rect.height());
2644
2645 transferBuffer->unmap();
2646 return true;
2647 }
2648
beginRenderPass(const GrVkRenderPass * renderPass,sk_sp<const GrVkFramebuffer> framebuffer,const VkClearValue * colorClear,const GrSurface * target,const SkIRect & renderPassBounds,bool forSecondaryCB)2649 bool GrVkGpu::beginRenderPass(const GrVkRenderPass* renderPass,
2650 sk_sp<const GrVkFramebuffer> framebuffer,
2651 const VkClearValue* colorClear,
2652 const GrSurface* target,
2653 const SkIRect& renderPassBounds,
2654 bool forSecondaryCB) {
2655 if (!this->currentCommandBuffer()) {
2656 return false;
2657 }
2658 SkASSERT (!framebuffer->isExternal());
2659
2660 #ifdef SK_DEBUG
2661 uint32_t index;
2662 bool result = renderPass->colorAttachmentIndex(&index);
2663 SkASSERT(result && 0 == index);
2664 result = renderPass->stencilAttachmentIndex(&index);
2665 if (result) {
2666 SkASSERT(1 == index);
2667 }
2668 #endif
2669 VkClearValue clears[3];
2670 int stencilIndex = renderPass->hasResolveAttachment() ? 2 : 1;
2671 clears[0].color = colorClear->color;
2672 clears[stencilIndex].depthStencil.depth = 0.0f;
2673 clears[stencilIndex].depthStencil.stencil = 0;
2674
2675 return this->currentCommandBuffer()->beginRenderPass(
2676 this, renderPass, std::move(framebuffer), clears, target, renderPassBounds, forSecondaryCB);
2677 }
2678
endRenderPass(GrRenderTarget * target,GrSurfaceOrigin origin,const SkIRect & bounds)2679 void GrVkGpu::endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin,
2680 const SkIRect& bounds) {
2681 // We had a command buffer when we started the render pass, we should have one now as well.
2682 SkASSERT(this->currentCommandBuffer());
2683 this->currentCommandBuffer()->endRenderPass(this);
2684 this->didWriteToSurface(target, origin, &bounds);
2685 }
2686
checkVkResult(VkResult result)2687 bool GrVkGpu::checkVkResult(VkResult result) {
2688 switch (result) {
2689 case VK_SUCCESS:
2690 return true;
2691 case VK_ERROR_DEVICE_LOST:
2692 fDeviceIsLost = true;
2693 return false;
2694 case VK_ERROR_OUT_OF_DEVICE_MEMORY:
2695 case VK_ERROR_OUT_OF_HOST_MEMORY:
2696 this->setOOMed();
2697 return false;
2698 default:
2699 return false;
2700 }
2701 }
2702
submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer)2703 void GrVkGpu::submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer> buffer) {
2704 if (!this->currentCommandBuffer()) {
2705 return;
2706 }
2707 this->currentCommandBuffer()->executeCommands(this, std::move(buffer));
2708 }
2709
submit(GrOpsRenderPass * renderPass)2710 void GrVkGpu::submit(GrOpsRenderPass* renderPass) {
2711 SkASSERT(fCachedOpsRenderPass.get() == renderPass);
2712
2713 fCachedOpsRenderPass->submit();
2714 fCachedOpsRenderPass->reset();
2715 }
2716
insertFence()2717 GrFence SK_WARN_UNUSED_RESULT GrVkGpu::insertFence() {
2718 VkFenceCreateInfo createInfo;
2719 memset(&createInfo, 0, sizeof(VkFenceCreateInfo));
2720 createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
2721 createInfo.pNext = nullptr;
2722 createInfo.flags = 0;
2723 VkFence fence = VK_NULL_HANDLE;
2724 VkResult result;
2725
2726 VK_CALL_RET(result, CreateFence(this->device(), &createInfo, nullptr, &fence));
2727 if (result != VK_SUCCESS) {
2728 return 0;
2729 }
2730 VK_CALL_RET(result, QueueSubmit(this->queue(), 0, nullptr, fence));
2731 if (result != VK_SUCCESS) {
2732 VK_CALL(DestroyFence(this->device(), fence, nullptr));
2733 return 0;
2734 }
2735
2736 static_assert(sizeof(GrFence) >= sizeof(VkFence));
2737 return (GrFence)fence;
2738 }
2739
waitFence(GrFence fence)2740 bool GrVkGpu::waitFence(GrFence fence) {
2741 SkASSERT(VK_NULL_HANDLE != (VkFence)fence);
2742
2743 VkResult result;
2744 VK_CALL_RET(result, WaitForFences(this->device(), 1, (VkFence*)&fence, VK_TRUE, 0));
2745 return (VK_SUCCESS == result);
2746 }
2747
deleteFence(GrFence fence)2748 void GrVkGpu::deleteFence(GrFence fence) {
2749 VK_CALL(DestroyFence(this->device(), (VkFence)fence, nullptr));
2750 }
2751
makeSemaphore(bool isOwned)2752 std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrVkGpu::makeSemaphore(bool isOwned) {
2753 return GrVkSemaphore::Make(this, isOwned);
2754 }
2755
wrapBackendSemaphore(const GrBackendSemaphore & semaphore,GrSemaphoreWrapType wrapType,GrWrapOwnership ownership)2756 std::unique_ptr<GrSemaphore> GrVkGpu::wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
2757 GrSemaphoreWrapType wrapType,
2758 GrWrapOwnership ownership) {
2759 return GrVkSemaphore::MakeWrapped(this, semaphore.vkSemaphore(), wrapType, ownership);
2760 }
2761
insertSemaphore(GrSemaphore * semaphore)2762 void GrVkGpu::insertSemaphore(GrSemaphore* semaphore) {
2763 SkASSERT(semaphore);
2764
2765 GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2766
2767 GrVkSemaphore::Resource* resource = vkSem->getResource();
2768 if (resource->shouldSignal()) {
2769 resource->ref();
2770 fSemaphoresToSignal.push_back(resource);
2771 }
2772 }
2773
waitSemaphore(GrSemaphore * semaphore)2774 void GrVkGpu::waitSemaphore(GrSemaphore* semaphore) {
2775 SkASSERT(semaphore);
2776
2777 GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2778
2779 GrVkSemaphore::Resource* resource = vkSem->getResource();
2780 if (resource->shouldWait()) {
2781 resource->ref();
2782 fSemaphoresToWaitOn.push_back(resource);
2783 }
2784 }
2785
prepareTextureForCrossContextUsage(GrTexture * texture)2786 std::unique_ptr<GrSemaphore> GrVkGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
2787 SkASSERT(texture);
2788 GrVkImage* vkTexture = static_cast<GrVkTexture*>(texture)->textureImage();
2789 vkTexture->setImageLayout(this,
2790 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
2791 VK_ACCESS_SHADER_READ_BIT,
2792 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
2793 false);
2794 // TODO: should we have a way to notify the caller that this has failed? Currently if the submit
2795 // fails (caused by DEVICE_LOST) this will just cause us to fail the next use of the gpu.
2796 // Eventually we will abandon the whole GPU if this fails.
2797 this->submitToGpu(false);
2798
2799 // The image layout change serves as a barrier, so no semaphore is needed.
2800 // If we ever decide we need to return a semaphore here, we need to make sure GrVkSemaphore is
2801 // thread safe so that only the first thread that tries to use the semaphore actually submits
2802 // it. This additionally would also require thread safety in command buffer submissions to
2803 // queues in general.
2804 return nullptr;
2805 }
2806
addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)2807 void GrVkGpu::addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
2808 fDrawables.emplace_back(std::move(drawable));
2809 }
2810
storeVkPipelineCacheData()2811 void GrVkGpu::storeVkPipelineCacheData() {
2812 if (this->getContext()->priv().getPersistentCache()) {
2813 this->resourceProvider().storePipelineCacheData();
2814 }
2815 }
2816