1 /*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/graphite/vk/VulkanCommandBuffer.h"
9
10 #include "include/gpu/MutableTextureState.h"
11 #include "include/gpu/graphite/BackendSemaphore.h"
12 #include "include/gpu/graphite/vk/VulkanGraphiteTypes.h"
13 #include "include/gpu/vk/VulkanMutableTextureState.h"
14 #include "include/private/base/SkTArray.h"
15 #include "src/gpu/DataUtils.h"
16 #include "src/gpu/graphite/ContextUtils.h"
17 #include "src/gpu/graphite/DescriptorData.h"
18 #include "src/gpu/graphite/Log.h"
19 #include "src/gpu/graphite/RenderPassDesc.h"
20 #include "src/gpu/graphite/Surface_Graphite.h"
21 #include "src/gpu/graphite/TextureProxy.h"
22 #include "src/gpu/graphite/UniformManager.h"
23 #include "src/gpu/graphite/vk/VulkanBuffer.h"
24 #include "src/gpu/graphite/vk/VulkanCaps.h"
25 #include "src/gpu/graphite/vk/VulkanDescriptorSet.h"
26 #include "src/gpu/graphite/vk/VulkanFramebuffer.h"
27 #include "src/gpu/graphite/vk/VulkanGraphiteUtils.h"
28 #include "src/gpu/graphite/vk/VulkanRenderPass.h"
29 #include "src/gpu/graphite/vk/VulkanSampler.h"
30 #include "src/gpu/graphite/vk/VulkanSharedContext.h"
31 #include "src/gpu/graphite/vk/VulkanTexture.h"
32 #include "src/gpu/vk/VulkanUtilsPriv.h"
33
34 using namespace skia_private;
35
36 namespace skgpu::graphite {
37
38 class VulkanDescriptorSet;
39
Make(const VulkanSharedContext * sharedContext,VulkanResourceProvider * resourceProvider,Protected isProtected)40 std::unique_ptr<VulkanCommandBuffer> VulkanCommandBuffer::Make(
41 const VulkanSharedContext* sharedContext,
42 VulkanResourceProvider* resourceProvider,
43 Protected isProtected) {
44 // Create VkCommandPool
45 VkCommandPoolCreateFlags cmdPoolCreateFlags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
46 if (isProtected == Protected::kYes) {
47 cmdPoolCreateFlags |= VK_COMMAND_POOL_CREATE_PROTECTED_BIT;
48 }
49
50 const VkCommandPoolCreateInfo cmdPoolInfo = {
51 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
52 nullptr, // pNext
53 cmdPoolCreateFlags, // CmdPoolCreateFlags
54 sharedContext->queueIndex(), // queueFamilyIndex
55 };
56 VkResult result;
57 VkCommandPool pool;
58 VULKAN_CALL_RESULT(sharedContext,
59 result,
60 CreateCommandPool(sharedContext->device(), &cmdPoolInfo, nullptr, &pool));
61 if (result != VK_SUCCESS) {
62 return nullptr;
63 }
64
65 const VkCommandBufferAllocateInfo cmdInfo = {
66 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
67 nullptr, // pNext
68 pool, // commandPool
69 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
70 1 // bufferCount
71 };
72
73 VkCommandBuffer primaryCmdBuffer;
74 VULKAN_CALL_RESULT(
75 sharedContext,
76 result,
77 AllocateCommandBuffers(sharedContext->device(), &cmdInfo, &primaryCmdBuffer));
78 if (result != VK_SUCCESS) {
79 VULKAN_CALL(sharedContext->interface(),
80 DestroyCommandPool(sharedContext->device(), pool, nullptr));
81 return nullptr;
82 }
83
84 return std::unique_ptr<VulkanCommandBuffer>(new VulkanCommandBuffer(pool,
85 primaryCmdBuffer,
86 sharedContext,
87 resourceProvider,
88 isProtected));
89 }
90
VulkanCommandBuffer(VkCommandPool pool,VkCommandBuffer primaryCommandBuffer,const VulkanSharedContext * sharedContext,VulkanResourceProvider * resourceProvider,Protected isProtected)91 VulkanCommandBuffer::VulkanCommandBuffer(VkCommandPool pool,
92 VkCommandBuffer primaryCommandBuffer,
93 const VulkanSharedContext* sharedContext,
94 VulkanResourceProvider* resourceProvider,
95 Protected isProtected)
96 : CommandBuffer(isProtected)
97 , fPool(pool)
98 , fPrimaryCommandBuffer(primaryCommandBuffer)
99 , fSharedContext(sharedContext)
100 , fResourceProvider(resourceProvider) {
101 // When making a new command buffer, we automatically begin the command buffer
102 this->begin();
103 }
104
~VulkanCommandBuffer()105 VulkanCommandBuffer::~VulkanCommandBuffer() {
106 if (fActive) {
107 // Need to end command buffer before deleting it
108 VULKAN_CALL(fSharedContext->interface(), EndCommandBuffer(fPrimaryCommandBuffer));
109 fActive = false;
110 }
111
112 if (VK_NULL_HANDLE != fSubmitFence) {
113 VULKAN_CALL(fSharedContext->interface(),
114 DestroyFence(fSharedContext->device(), fSubmitFence, nullptr));
115 }
116 // This should delete any command buffers as well.
117 VULKAN_CALL(fSharedContext->interface(),
118 DestroyCommandPool(fSharedContext->device(), fPool, nullptr));
119 }
120
onResetCommandBuffer()121 void VulkanCommandBuffer::onResetCommandBuffer() {
122 SkASSERT(!fActive);
123 VULKAN_CALL_ERRCHECK(fSharedContext, ResetCommandPool(fSharedContext->device(), fPool, 0));
124 fActiveGraphicsPipeline = nullptr;
125 fBindUniformBuffers = true;
126 fBoundIndexBuffer = VK_NULL_HANDLE;
127 fBoundIndexBufferOffset = 0;
128 fBoundIndirectBuffer = VK_NULL_HANDLE;
129 fBoundIndirectBufferOffset = 0;
130 fTextureSamplerDescSetToBind = VK_NULL_HANDLE;
131 fNumTextureSamplers = 0;
132 fUniformBuffersToBind.fill({});
133 for (int i = 0; i < 4; ++i) {
134 fCachedBlendConstant[i] = -1.0;
135 }
136 for (auto& boundInputBuffer : fBoundInputBuffers) {
137 boundInputBuffer = VK_NULL_HANDLE;
138 }
139 for (auto& boundInputOffset : fBoundInputBufferOffsets) {
140 boundInputOffset = 0;
141 }
142 }
143
setNewCommandBufferResources()144 bool VulkanCommandBuffer::setNewCommandBufferResources() {
145 this->begin();
146 return true;
147 }
148
begin()149 void VulkanCommandBuffer::begin() {
150 SkASSERT(!fActive);
151 VkCommandBufferBeginInfo cmdBufferBeginInfo;
152 memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
153 cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
154 cmdBufferBeginInfo.pNext = nullptr;
155 cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
156 cmdBufferBeginInfo.pInheritanceInfo = nullptr;
157
158 VULKAN_CALL_ERRCHECK(fSharedContext,
159 BeginCommandBuffer(fPrimaryCommandBuffer, &cmdBufferBeginInfo));
160 fActive = true;
161 }
162
end()163 void VulkanCommandBuffer::end() {
164 SkASSERT(fActive);
165 SkASSERT(!fActiveRenderPass);
166
167 this->submitPipelineBarriers();
168
169 VULKAN_CALL_ERRCHECK(fSharedContext, EndCommandBuffer(fPrimaryCommandBuffer));
170
171 fActive = false;
172 }
173
addWaitSemaphores(size_t numWaitSemaphores,const BackendSemaphore * waitSemaphores)174 void VulkanCommandBuffer::addWaitSemaphores(size_t numWaitSemaphores,
175 const BackendSemaphore* waitSemaphores) {
176 if (!waitSemaphores) {
177 SkASSERT(numWaitSemaphores == 0);
178 return;
179 }
180
181 for (size_t i = 0; i < numWaitSemaphores; ++i) {
182 auto& semaphore = waitSemaphores[i];
183 if (semaphore.isValid() && semaphore.backend() == BackendApi::kVulkan) {
184 fWaitSemaphores.push_back(BackendSemaphores::GetVkSemaphore(semaphore));
185 }
186 }
187 }
188
addSignalSemaphores(size_t numSignalSemaphores,const BackendSemaphore * signalSemaphores)189 void VulkanCommandBuffer::addSignalSemaphores(size_t numSignalSemaphores,
190 const BackendSemaphore* signalSemaphores) {
191 if (!signalSemaphores) {
192 SkASSERT(numSignalSemaphores == 0);
193 return;
194 }
195
196 for (size_t i = 0; i < numSignalSemaphores; ++i) {
197 auto& semaphore = signalSemaphores[i];
198 if (semaphore.isValid() && semaphore.backend() == BackendApi::kVulkan) {
199 fSignalSemaphores.push_back(BackendSemaphores::GetVkSemaphore(semaphore));
200 }
201 }
202 }
203
prepareSurfaceForStateUpdate(SkSurface * targetSurface,const MutableTextureState * newState)204 void VulkanCommandBuffer::prepareSurfaceForStateUpdate(SkSurface* targetSurface,
205 const MutableTextureState* newState) {
206 TextureProxy* textureProxy = static_cast<Surface*>(targetSurface)->backingTextureProxy();
207 VulkanTexture* texture = static_cast<VulkanTexture*>(textureProxy->texture());
208
209 // Even though internally we use this helper for getting src access flags and stages they
210 // can also be used for general dst flags since we don't know exactly what the client
211 // plans on using the image for.
212 VkImageLayout newLayout = skgpu::MutableTextureStates::GetVkImageLayout(newState);
213 if (newLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
214 newLayout = texture->currentLayout();
215 }
216 VkPipelineStageFlags dstStage = VulkanTexture::LayoutToPipelineSrcStageFlags(newLayout);
217 VkAccessFlags dstAccess = VulkanTexture::LayoutToSrcAccessMask(newLayout);
218
219 uint32_t currentQueueFamilyIndex = texture->currentQueueFamilyIndex();
220 uint32_t newQueueFamilyIndex = skgpu::MutableTextureStates::GetVkQueueFamilyIndex(newState);
221 auto isSpecialQueue = [](uint32_t queueFamilyIndex) {
222 return queueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
223 queueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT;
224 };
225 if (isSpecialQueue(currentQueueFamilyIndex) && isSpecialQueue(newQueueFamilyIndex)) {
226 // It is illegal to have both the new and old queue be special queue families (i.e. external
227 // or foreign).
228 return;
229 }
230
231 this->trackCommandBufferResource(sk_ref_sp(texture));
232
233 texture->setImageLayoutAndQueueIndex(this,
234 newLayout,
235 dstAccess,
236 dstStage,
237 false,
238 newQueueFamilyIndex);
239 }
240
submit_to_queue(const VulkanSharedContext * sharedContext,VkQueue queue,VkFence fence,uint32_t waitCount,const VkSemaphore * waitSemaphores,const VkPipelineStageFlags * waitStages,uint32_t commandBufferCount,const VkCommandBuffer * commandBuffers,uint32_t signalCount,const VkSemaphore * signalSemaphores,Protected protectedContext)241 static VkResult submit_to_queue(const VulkanSharedContext* sharedContext,
242 VkQueue queue,
243 VkFence fence,
244 uint32_t waitCount,
245 const VkSemaphore* waitSemaphores,
246 const VkPipelineStageFlags* waitStages,
247 uint32_t commandBufferCount,
248 const VkCommandBuffer* commandBuffers,
249 uint32_t signalCount,
250 const VkSemaphore* signalSemaphores,
251 Protected protectedContext) {
252 VkProtectedSubmitInfo protectedSubmitInfo;
253 if (protectedContext == Protected::kYes) {
254 memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
255 protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
256 protectedSubmitInfo.pNext = nullptr;
257 protectedSubmitInfo.protectedSubmit = VK_TRUE;
258 }
259
260 VkSubmitInfo submitInfo;
261 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
262 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
263 submitInfo.pNext = protectedContext == Protected::kYes ? &protectedSubmitInfo : nullptr;
264 submitInfo.waitSemaphoreCount = waitCount;
265 submitInfo.pWaitSemaphores = waitSemaphores;
266 submitInfo.pWaitDstStageMask = waitStages;
267 submitInfo.commandBufferCount = commandBufferCount;
268 submitInfo.pCommandBuffers = commandBuffers;
269 submitInfo.signalSemaphoreCount = signalCount;
270 submitInfo.pSignalSemaphores = signalSemaphores;
271 VkResult result;
272 VULKAN_CALL_RESULT(sharedContext, result, QueueSubmit(queue, 1, &submitInfo, fence));
273 return result;
274 }
275
submit(VkQueue queue)276 bool VulkanCommandBuffer::submit(VkQueue queue) {
277 this->end();
278
279 auto device = fSharedContext->device();
280 VkResult err;
281
282 if (fSubmitFence == VK_NULL_HANDLE) {
283 VkFenceCreateInfo fenceInfo;
284 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
285 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
286 VULKAN_CALL_RESULT(
287 fSharedContext, err, CreateFence(device, &fenceInfo, nullptr, &fSubmitFence));
288 if (err) {
289 fSubmitFence = VK_NULL_HANDLE;
290 return false;
291 }
292 } else {
293 // This cannot return DEVICE_LOST so we assert we succeeded.
294 VULKAN_CALL_RESULT(fSharedContext, err, ResetFences(device, 1, &fSubmitFence));
295 SkASSERT(err == VK_SUCCESS);
296 }
297
298 SkASSERT(fSubmitFence != VK_NULL_HANDLE);
299 int waitCount = fWaitSemaphores.size();
300 TArray<VkPipelineStageFlags> vkWaitStages(waitCount);
301 for (int i = 0; i < waitCount; ++i) {
302 vkWaitStages.push_back(VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
303 VK_PIPELINE_STAGE_TRANSFER_BIT);
304 }
305
306 VkResult submitResult = submit_to_queue(fSharedContext,
307 queue,
308 fSubmitFence,
309 waitCount,
310 fWaitSemaphores.data(),
311 vkWaitStages.data(),
312 /*commandBufferCount*/ 1,
313 &fPrimaryCommandBuffer,
314 fSignalSemaphores.size(),
315 fSignalSemaphores.data(),
316 this->isProtected());
317 fWaitSemaphores.clear();
318 fSignalSemaphores.clear();
319 if (submitResult != VK_SUCCESS) {
320 // If we failed to submit because of a device lost, we still need to wait for the fence to
321 // signal before deleting. However, there is an ARM bug (b/359822580) where the driver early
322 // outs on the fence wait if in a device lost state and thus we can't wait on it. Instead,
323 // we just wait on the queue to finish. We're already in a state that's going to cause us to
324 // restart the whole device, so waiting on the queue shouldn't have any performance impact.
325 if (submitResult == VK_ERROR_DEVICE_LOST) {
326 VULKAN_CALL(fSharedContext->interface(), QueueWaitIdle(queue));
327 } else {
328 SkASSERT(submitResult == VK_ERROR_OUT_OF_HOST_MEMORY ||
329 submitResult == VK_ERROR_OUT_OF_DEVICE_MEMORY);
330 }
331
332 VULKAN_CALL(fSharedContext->interface(), DestroyFence(device, fSubmitFence, nullptr));
333 fSubmitFence = VK_NULL_HANDLE;
334 return false;
335 }
336 return true;
337 }
338
isFinished()339 bool VulkanCommandBuffer::isFinished() {
340 SkASSERT(!fActive);
341 if (VK_NULL_HANDLE == fSubmitFence) {
342 return true;
343 }
344
345 VkResult err;
346 VULKAN_CALL_RESULT_NOCHECK(fSharedContext->interface(), err,
347 GetFenceStatus(fSharedContext->device(), fSubmitFence));
348 switch (err) {
349 case VK_SUCCESS:
350 case VK_ERROR_DEVICE_LOST:
351 return true;
352
353 case VK_NOT_READY:
354 return false;
355
356 default:
357 SKGPU_LOG_F("Error calling vkGetFenceStatus. Error: %d", err);
358 SK_ABORT("Got an invalid fence status");
359 return false;
360 }
361 }
362
waitUntilFinished()363 void VulkanCommandBuffer::waitUntilFinished() {
364 if (fSubmitFence == VK_NULL_HANDLE) {
365 return;
366 }
367 VULKAN_CALL_ERRCHECK(fSharedContext,
368 WaitForFences(fSharedContext->device(),
369 1,
370 &fSubmitFence,
371 /*waitAll=*/true,
372 /*timeout=*/UINT64_MAX));
373 }
374
pushConstants(const PushConstantInfo & pushConstantInfo,VkPipelineLayout compatibleLayout)375 void VulkanCommandBuffer::pushConstants(const PushConstantInfo& pushConstantInfo,
376 VkPipelineLayout compatibleLayout) {
377 // size must be within limits. Vulkan spec dictates each device supports at least 128 bytes
378 SkASSERT(pushConstantInfo.fSize < 128);
379 // offset and size must be a multiple of 4
380 SkASSERT(!SkToBool(pushConstantInfo.fOffset & 0x3));
381 SkASSERT(!SkToBool(pushConstantInfo.fSize & 0x3));
382
383 VULKAN_CALL(fSharedContext->interface(),
384 CmdPushConstants(fPrimaryCommandBuffer,
385 compatibleLayout,
386 pushConstantInfo.fShaderStageFlagBits,
387 pushConstantInfo.fOffset,
388 pushConstantInfo.fSize,
389 pushConstantInfo.fValues));
390 }
391
onAddRenderPass(const RenderPassDesc & renderPassDesc,SkIRect renderPassBounds,const Texture * colorTexture,const Texture * resolveTexture,const Texture * depthStencilTexture,SkIRect viewport,const DrawPassList & drawPasses)392 bool VulkanCommandBuffer::onAddRenderPass(const RenderPassDesc& renderPassDesc,
393 SkIRect renderPassBounds,
394 const Texture* colorTexture,
395 const Texture* resolveTexture,
396 const Texture* depthStencilTexture,
397 SkIRect viewport,
398 const DrawPassList& drawPasses) {
399 for (const auto& drawPass : drawPasses) {
400 // Our current implementation of setting texture image layouts does not allow layout changes
401 // once we have already begun a render pass, so prior to any other commands, set the layout
402 // of all sampled textures from the drawpass so they can be sampled from the shader.
403 const skia_private::TArray<sk_sp<TextureProxy>>& sampledTextureProxies =
404 drawPass->sampledTextures();
405 for (const sk_sp<TextureProxy>& textureProxy : sampledTextureProxies) {
406 VulkanTexture* vulkanTexture = const_cast<VulkanTexture*>(
407 static_cast<const VulkanTexture*>(
408 textureProxy->texture()));
409 vulkanTexture->setImageLayout(this,
410 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
411 VK_ACCESS_SHADER_READ_BIT,
412 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
413 false);
414 }
415 }
416 if (fDstCopy.first) {
417 VulkanTexture* vulkanTexture =
418 const_cast<VulkanTexture*>(static_cast<const VulkanTexture*>(fDstCopy.first));
419 vulkanTexture->setImageLayout(this,
420 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
421 VK_ACCESS_SHADER_READ_BIT,
422 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
423 false);
424 }
425
426 this->setViewport(viewport);
427
428 if (!this->beginRenderPass(renderPassDesc,
429 renderPassBounds,
430 colorTexture,
431 resolveTexture,
432 depthStencilTexture)) {
433 return false;
434 }
435
436 // After loading msaa from resolve if needed, update intrinsic push constant values. Neither the
437 // dst copy bounds nor the rtAdjust components of the intrinsic constants change throughout the
438 // course of a RenderPass, so we can simply calculate & update the push constants once per RP.
439 {
440 // TODO(b/374997389): Somehow convey & enforce Layout::kStd430 for push constants.
441 UniformManager intrinsicValues{Layout::kStd140};
442 CollectIntrinsicUniforms(
443 fSharedContext->caps(), viewport, fDstReadBounds, &intrinsicValues);
444 SkSpan<const char> bytes = intrinsicValues.finish();
445 SkASSERT(bytes.size_bytes() == VulkanResourceProvider::kIntrinsicConstantSize);
446
447 PushConstantInfo pushConstantInfo;
448 pushConstantInfo.fOffset = 0;
449 pushConstantInfo.fSize = VulkanResourceProvider::kIntrinsicConstantSize;
450 pushConstantInfo.fShaderStageFlagBits =
451 VulkanResourceProvider::kIntrinsicConstantStageFlags;
452 pushConstantInfo.fValues = bytes.data();
453
454 // Use the mock pipeline layout (which has compatible push constant parameters with real
455 // pipeline layouts) to update push constants even if we do not have a pipeline bound yet.
456 this->pushConstants(pushConstantInfo, fResourceProvider->mockPushConstantPipelineLayout());
457 }
458
459 for (const auto& drawPass : drawPasses) {
460 this->addDrawPass(drawPass.get());
461 }
462
463 this->endRenderPass();
464 return true;
465 }
466
updateAndBindInputAttachment(const VulkanTexture & texture,const int setIdx)467 bool VulkanCommandBuffer::updateAndBindInputAttachment(const VulkanTexture& texture,
468 const int setIdx) {
469 // Fetch a descriptor set that contains one input attachment (we do not support using more than
470 // one per set at this time).
471 STArray<1, DescriptorData> inputDesc = {VulkanGraphicsPipeline::kInputAttachmentDescriptor};
472 sk_sp<VulkanDescriptorSet> set = fResourceProvider->findOrCreateDescriptorSet(
473 SkSpan<DescriptorData>{&inputDesc.front(), inputDesc.size()});
474 if (!set) {
475 return false;
476 }
477
478 // Update and write to the descriptor given the provided texture, binding it afterwards.
479 VkDescriptorImageInfo textureInfo;
480 memset(&textureInfo, 0, sizeof(VkDescriptorImageInfo));
481 textureInfo.sampler = VK_NULL_HANDLE;
482 textureInfo.imageView =
483 texture.getImageView(VulkanImageView::Usage::kAttachment)->imageView();
484 textureInfo.imageLayout = texture.currentLayout();
485
486 VkWriteDescriptorSet writeInfo;
487 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
488 writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
489 writeInfo.pNext = nullptr;
490 writeInfo.dstSet = *set->descriptorSet();
491 writeInfo.dstBinding = 0;
492 writeInfo.dstArrayElement = 0;
493 writeInfo.descriptorCount = 1;
494 writeInfo.descriptorType = DsTypeEnumToVkDs(DescriptorType::kInputAttachment);
495 writeInfo.pImageInfo = &textureInfo;
496 writeInfo.pBufferInfo = nullptr;
497 writeInfo.pTexelBufferView = nullptr;
498
499 VULKAN_CALL(fSharedContext->interface(),
500 UpdateDescriptorSets(fSharedContext->device(),
501 /*descriptorWriteCount=*/1,
502 &writeInfo,
503 /*descriptorCopyCount=*/0,
504 /*pDescriptorCopies=*/nullptr));
505
506 VULKAN_CALL(fSharedContext->interface(),
507 CmdBindDescriptorSets(fPrimaryCommandBuffer,
508 VK_PIPELINE_BIND_POINT_GRAPHICS,
509 fActiveGraphicsPipeline->layout(),
510 setIdx,
511 /*setCount=*/1,
512 set->descriptorSet(),
513 /*dynamicOffsetCount=*/0,
514 /*dynamicOffsets=*/nullptr));
515
516 this->trackResource(std::move(set));
517 return true;
518 }
519
loadMSAAFromResolve(const RenderPassDesc & renderPassDesc,VulkanTexture & resolveTexture,SkISize dstDimensions,const SkIRect nativeDrawBounds)520 bool VulkanCommandBuffer::loadMSAAFromResolve(const RenderPassDesc& renderPassDesc,
521 VulkanTexture& resolveTexture,
522 SkISize dstDimensions,
523 const SkIRect nativeDrawBounds) {
524 sk_sp<VulkanGraphicsPipeline> loadPipeline =
525 fResourceProvider->findOrCreateLoadMSAAPipeline(renderPassDesc);
526 if (!loadPipeline) {
527 SKGPU_LOG_E("Unable to create pipeline to load resolve texture into MSAA attachment");
528 return false;
529 }
530
531 // Update and bind uniform descriptor set
532 int w = nativeDrawBounds.width();
533 int h = nativeDrawBounds.height();
534
535 // dst rect edges in NDC (-1 to 1)
536 int dw = dstDimensions.width();
537 int dh = dstDimensions.height();
538 float dx0 = 2.f * nativeDrawBounds.fLeft / dw - 1.f;
539 float dx1 = 2.f * (nativeDrawBounds.fLeft + w) / dw - 1.f;
540 float dy0 = 2.f * nativeDrawBounds.fTop / dh - 1.f;
541 float dy1 = 2.f * (nativeDrawBounds.fTop + h) / dh - 1.f;
542 float uniData[] = {dx1 - dx0, dy1 - dy0, dx0, dy0}; // posXform
543 SkASSERT(sizeof(uniData) == VulkanResourceProvider::kLoadMSAAPushConstantSize);
544
545 this->bindGraphicsPipeline(loadPipeline.get());
546
547 PushConstantInfo loadMsaaPushConstantInfo;
548 loadMsaaPushConstantInfo.fOffset = 0;
549 loadMsaaPushConstantInfo.fSize = VulkanResourceProvider::kLoadMSAAPushConstantSize;
550 loadMsaaPushConstantInfo.fShaderStageFlagBits =
551 VulkanResourceProvider::kLoadMSAAPushConstantStageFlags;
552 loadMsaaPushConstantInfo.fValues = uniData;
553 this->pushConstants(loadMsaaPushConstantInfo, loadPipeline->layout());
554
555 // Make sure we do not attempt to bind uniform or texture/sampler descriptors because we do
556 // not use them for loading MSAA from resolve.
557 fBindUniformBuffers = false;
558 fBindTextureSamplers = false;
559
560 this->setScissor(SkIRect::MakeXYWH(0, 0, dstDimensions.width(), dstDimensions.height()));
561
562 if (!this->updateAndBindInputAttachment(
563 resolveTexture, VulkanGraphicsPipeline::kLoadMsaaFromResolveInputDescSetIndex)) {
564 SKGPU_LOG_E("Unable to update and bind an input attachment descriptor for loading MSAA "
565 "from resolve");
566 return false;
567 }
568
569 this->draw(PrimitiveType::kTriangleStrip, /*baseVertex=*/0, /*vertexCount=*/4);
570 this->nextSubpass();
571
572 // If we loaded the resolve attachment, then we would have set the image layout to be
573 // VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL so that it could be used at the start as an
574 // input attachment. However, when we switched to the main subpass it will transition the
575 // layout internally to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL. Thus we need to update our
576 // tracking of the layout to match the new layout.
577 resolveTexture.updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
578
579 // After using a distinct descriptor set layout for loading MSAA from resolve, we will need to
580 // (re-)bind any descriptor sets.
581 fBindUniformBuffers = true;
582 fBindTextureSamplers = true;
583 return true;
584 }
585
586 namespace {
setup_texture_layouts(VulkanCommandBuffer * cmdBuf,VulkanTexture * colorTexture,VulkanTexture * resolveTexture,VulkanTexture * depthStencilTexture,bool loadMSAAFromResolve)587 void setup_texture_layouts(VulkanCommandBuffer* cmdBuf,
588 VulkanTexture* colorTexture,
589 VulkanTexture* resolveTexture,
590 VulkanTexture* depthStencilTexture,
591 bool loadMSAAFromResolve) {
592 if (colorTexture) {
593 colorTexture->setImageLayout(cmdBuf,
594 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
595 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
596 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
597 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
598 /*byRegion=*/false);
599 if (resolveTexture) {
600 if (loadMSAAFromResolve) {
601 // When loading MSAA from resolve, the texture is used in the first subpass as an
602 // input attachment. Subsequent subpass(es) need the resolve texture to provide read
603 // access to the color attachment (for use cases such as blending), so add access
604 // and pipeline stage flags for both usages.
605 resolveTexture->setImageLayout(cmdBuf,
606 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
607 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
608 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT,
609 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
610 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
611 /*byRegion=*/false);
612 } else {
613 resolveTexture->setImageLayout(cmdBuf,
614 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
615 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
616 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
617 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
618 /*byRegion=*/false);
619 }
620 }
621 }
622 if (depthStencilTexture) {
623 depthStencilTexture->setImageLayout(cmdBuf,
624 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
625 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
626 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
627 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
628 /*byRegion=*/false);
629 }
630 }
631
gather_clear_values(STArray<VulkanRenderPass::kMaxExpectedAttachmentCount,VkClearValue> & clearValues,const RenderPassDesc & renderPassDesc,VulkanTexture * colorTexture,VulkanTexture * depthStencilTexture,int depthStencilAttachmentIdx)632 void gather_clear_values(
633 STArray<VulkanRenderPass::kMaxExpectedAttachmentCount, VkClearValue>& clearValues,
634 const RenderPassDesc& renderPassDesc,
635 VulkanTexture* colorTexture,
636 VulkanTexture* depthStencilTexture,
637 int depthStencilAttachmentIdx) {
638 clearValues.push_back_n(VulkanRenderPass::kMaxExpectedAttachmentCount);
639 if (colorTexture) {
640 VkClearValue& colorAttachmentClear =
641 clearValues.at(VulkanRenderPass::kColorAttachmentIdx);
642 memset(&colorAttachmentClear, 0, sizeof(VkClearValue));
643 colorAttachmentClear.color = {{renderPassDesc.fClearColor[0],
644 renderPassDesc.fClearColor[1],
645 renderPassDesc.fClearColor[2],
646 renderPassDesc.fClearColor[3]}};
647 }
648 // Resolve texture does not have a clear value
649 if (depthStencilTexture) {
650 VkClearValue& depthStencilAttachmentClear = clearValues.at(depthStencilAttachmentIdx);
651 memset(&depthStencilAttachmentClear, 0, sizeof(VkClearValue));
652 depthStencilAttachmentClear.depthStencil = {renderPassDesc.fClearDepth,
653 renderPassDesc.fClearStencil};
654 }
655 }
656
657 // The RenderArea bounds we pass into BeginRenderPass must have a start x value that is a multiple
658 // of the granularity. The width must also be a multiple of the granularity or equal to the width
659 // of the entire attachment. Similar requirements apply to the y and height components.
get_render_area(const SkIRect & srcBounds,const VkExtent2D & granularity,int maxWidth,int maxHeight)660 VkRect2D get_render_area(const SkIRect& srcBounds,
661 const VkExtent2D& granularity,
662 int maxWidth,
663 int maxHeight) {
664 SkIRect dstBounds;
665 // Adjust Width
666 if (granularity.width == 0 || granularity.width == 1) {
667 dstBounds.fLeft = srcBounds.fLeft;
668 dstBounds.fRight = srcBounds.fRight;
669 } else {
670 // Start with the right side of rect so we know if we end up going past the maxWidth.
671 int rightAdj = srcBounds.fRight % granularity.width;
672 if (rightAdj != 0) {
673 rightAdj = granularity.width - rightAdj;
674 }
675 dstBounds.fRight = srcBounds.fRight + rightAdj;
676 if (dstBounds.fRight > maxWidth) {
677 dstBounds.fRight = maxWidth;
678 dstBounds.fLeft = 0;
679 } else {
680 dstBounds.fLeft = srcBounds.fLeft - srcBounds.fLeft % granularity.width;
681 }
682 }
683
684 if (granularity.height == 0 || granularity.height == 1) {
685 dstBounds.fTop = srcBounds.fTop;
686 dstBounds.fBottom = srcBounds.fBottom;
687 } else {
688 // Start with the bottom side of rect so we know if we end up going past the maxHeight.
689 int bottomAdj = srcBounds.fBottom % granularity.height;
690 if (bottomAdj != 0) {
691 bottomAdj = granularity.height - bottomAdj;
692 }
693 dstBounds.fBottom = srcBounds.fBottom + bottomAdj;
694 if (dstBounds.fBottom > maxHeight) {
695 dstBounds.fBottom = maxHeight;
696 dstBounds.fTop = 0;
697 } else {
698 dstBounds.fTop = srcBounds.fTop - srcBounds.fTop % granularity.height;
699 }
700 }
701
702 VkRect2D renderArea;
703 renderArea.offset = { dstBounds.fLeft , dstBounds.fTop };
704 renderArea.extent = { (uint32_t)dstBounds.width(), (uint32_t)dstBounds.height() };
705 return renderArea;
706 }
707
708 } // anonymous namespace
709
beginRenderPass(const RenderPassDesc & renderPassDesc,SkIRect renderPassBounds,const Texture * colorTexture,const Texture * resolveTexture,const Texture * depthStencilTexture)710 bool VulkanCommandBuffer::beginRenderPass(const RenderPassDesc& renderPassDesc,
711 SkIRect renderPassBounds,
712 const Texture* colorTexture,
713 const Texture* resolveTexture,
714 const Texture* depthStencilTexture) {
715 // TODO: Check that Textures match RenderPassDesc
716 VulkanTexture* vulkanColorTexture =
717 const_cast<VulkanTexture*>(static_cast<const VulkanTexture*>(colorTexture));
718 VulkanTexture* vulkanResolveTexture =
719 const_cast<VulkanTexture*>(static_cast<const VulkanTexture*>(resolveTexture));
720 VulkanTexture* vulkanDepthStencilTexture =
721 const_cast<VulkanTexture*>(static_cast<const VulkanTexture*>(depthStencilTexture));
722
723 SkASSERT(resolveTexture ? renderPassDesc.fColorResolveAttachment.fStoreOp == StoreOp::kStore
724 : true);
725
726 // Determine if we need to load MSAA from resolve, and if so, make certain that key conditions
727 // are met before proceeding.
728 bool loadMSAAFromResolve = renderPassDesc.fColorResolveAttachment.fTextureInfo.isValid() &&
729 renderPassDesc.fColorResolveAttachment.fLoadOp == LoadOp::kLoad;
730 if (loadMSAAFromResolve && (!vulkanResolveTexture || !vulkanColorTexture ||
731 !vulkanResolveTexture->supportsInputAttachmentUsage())) {
732 SKGPU_LOG_E("Cannot begin render pass. In order to load MSAA from resolve, the color "
733 "attachment must have input attachment usage and both the color and resolve "
734 "attachments must be valid.");
735 return false;
736 }
737
738 // Before beginning a renderpass, set all textures to the appropriate image layout.
739 setup_texture_layouts(this,
740 vulkanColorTexture,
741 vulkanResolveTexture,
742 vulkanDepthStencilTexture,
743 loadMSAAFromResolve);
744
745 static constexpr int kMaxNumAttachments = 3;
746
747 // Gather clear values needed for RenderPassBeginInfo. Indexed by attachment number.
748 STArray<kMaxNumAttachments, VkClearValue> clearValues;
749 // The depth/stencil attachment can be at attachment index 1 or 2 depending on whether there is
750 // a resolve texture attachment for this renderpass.
751 int depthStencilAttachmentIndex = resolveTexture ? 2 : 1;
752 gather_clear_values(clearValues,
753 renderPassDesc,
754 vulkanColorTexture,
755 vulkanDepthStencilTexture,
756 depthStencilAttachmentIndex);
757
758 sk_sp<VulkanRenderPass> vulkanRenderPass =
759 fResourceProvider->findOrCreateRenderPass(renderPassDesc, /*compatibleOnly=*/false);
760 if (!vulkanRenderPass) {
761 SKGPU_LOG_W("Could not create Vulkan RenderPass");
762 return false;
763 }
764 this->submitPipelineBarriers();
765 this->trackResource(vulkanRenderPass);
766
767 int frameBufferWidth = 0;
768 int frameBufferHeight = 0;
769 if (colorTexture) {
770 frameBufferWidth = colorTexture->dimensions().width();
771 frameBufferHeight = colorTexture->dimensions().height();
772 } else if (depthStencilTexture) {
773 frameBufferWidth = depthStencilTexture->dimensions().width();
774 frameBufferHeight = depthStencilTexture->dimensions().height();
775 }
776 sk_sp<VulkanFramebuffer> framebuffer =
777 fResourceProvider->createFramebuffer(fSharedContext,
778 vulkanColorTexture,
779 vulkanResolveTexture,
780 vulkanDepthStencilTexture,
781 renderPassDesc,
782 *vulkanRenderPass,
783 frameBufferWidth,
784 frameBufferHeight);
785 if (!framebuffer) {
786 SKGPU_LOG_W("Could not create Vulkan Framebuffer");
787 return false;
788 }
789
790 VkExtent2D granularity;
791 // Get granularity for this render pass
792 VULKAN_CALL(fSharedContext->interface(),
793 GetRenderAreaGranularity(fSharedContext->device(),
794 vulkanRenderPass->renderPass(),
795 &granularity));
796
797 bool useFullBounds = loadMSAAFromResolve &&
798 fSharedContext->vulkanCaps().mustLoadFullImageForMSAA();
799
800 VkRect2D renderArea = get_render_area(useFullBounds ? SkIRect::MakeWH(frameBufferWidth,
801 frameBufferHeight)
802 : renderPassBounds,
803 granularity,
804 frameBufferWidth,
805 frameBufferHeight);
806
807 VkRenderPassBeginInfo beginInfo;
808 memset(&beginInfo, 0, sizeof(VkRenderPassBeginInfo));
809 beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
810 beginInfo.pNext = nullptr;
811 beginInfo.renderPass = vulkanRenderPass->renderPass();
812 beginInfo.framebuffer = framebuffer->framebuffer();
813 beginInfo.renderArea = renderArea;
814 beginInfo.clearValueCount = clearValues.size();
815 beginInfo.pClearValues = clearValues.begin();
816
817 // Submit pipeline barriers to ensure any image layout transitions are recorded prior to
818 // beginning the render pass.
819 this->submitPipelineBarriers();
820 // TODO: If we add support for secondary command buffers, dynamically determine subpass contents
821 VULKAN_CALL(fSharedContext->interface(),
822 CmdBeginRenderPass(fPrimaryCommandBuffer,
823 &beginInfo,
824 VK_SUBPASS_CONTENTS_INLINE));
825 fActiveRenderPass = true;
826
827 SkIRect nativeBounds = SkIRect::MakeXYWH(renderArea.offset.x,
828 renderArea.offset.y,
829 renderArea.extent.width,
830 renderArea.extent.height);
831 if (loadMSAAFromResolve && !this->loadMSAAFromResolve(renderPassDesc,
832 *vulkanResolveTexture,
833 vulkanColorTexture->dimensions(),
834 nativeBounds)) {
835 SKGPU_LOG_E("Failed to load MSAA from resolve");
836 this->endRenderPass();
837 return false;
838 }
839
840 // Once we have an active render pass, the command buffer should hold on to a frame buffer ref.
841 this->trackResource(std::move(framebuffer));
842 return true;
843 }
844
endRenderPass()845 void VulkanCommandBuffer::endRenderPass() {
846 SkASSERT(fActive);
847 VULKAN_CALL(fSharedContext->interface(), CmdEndRenderPass(fPrimaryCommandBuffer));
848 fActiveRenderPass = false;
849 }
850
addDrawPass(const DrawPass * drawPass)851 void VulkanCommandBuffer::addDrawPass(const DrawPass* drawPass) {
852 drawPass->addResourceRefs(this);
853 for (auto [type, cmdPtr] : drawPass->commands()) {
854 switch (type) {
855 case DrawPassCommands::Type::kBindGraphicsPipeline: {
856 auto bgp = static_cast<DrawPassCommands::BindGraphicsPipeline*>(cmdPtr);
857 this->bindGraphicsPipeline(drawPass->getPipeline(bgp->fPipelineIndex));
858 break;
859 }
860 case DrawPassCommands::Type::kSetBlendConstants: {
861 auto sbc = static_cast<DrawPassCommands::SetBlendConstants*>(cmdPtr);
862 this->setBlendConstants(sbc->fBlendConstants);
863 break;
864 }
865 case DrawPassCommands::Type::kBindUniformBuffer: {
866 auto bub = static_cast<DrawPassCommands::BindUniformBuffer*>(cmdPtr);
867 this->recordBufferBindingInfo(bub->fInfo, bub->fSlot);
868 break;
869 }
870 case DrawPassCommands::Type::kBindDrawBuffers: {
871 auto bdb = static_cast<DrawPassCommands::BindDrawBuffers*>(cmdPtr);
872 this->bindDrawBuffers(
873 bdb->fVertices, bdb->fInstances, bdb->fIndices, bdb->fIndirect);
874 break;
875 }
876 case DrawPassCommands::Type::kBindTexturesAndSamplers: {
877 auto bts = static_cast<DrawPassCommands::BindTexturesAndSamplers*>(cmdPtr);
878 this->recordTextureAndSamplerDescSet(drawPass, bts);
879 break;
880 }
881 case DrawPassCommands::Type::kSetScissor: {
882 auto ss = static_cast<DrawPassCommands::SetScissor*>(cmdPtr);
883 this->setScissor(ss->fScissor);
884 break;
885 }
886 case DrawPassCommands::Type::kDraw: {
887 auto draw = static_cast<DrawPassCommands::Draw*>(cmdPtr);
888 this->draw(draw->fType, draw->fBaseVertex, draw->fVertexCount);
889 break;
890 }
891 case DrawPassCommands::Type::kDrawIndexed: {
892 auto draw = static_cast<DrawPassCommands::DrawIndexed*>(cmdPtr);
893 this->drawIndexed(
894 draw->fType, draw->fBaseIndex, draw->fIndexCount, draw->fBaseVertex);
895 break;
896 }
897 case DrawPassCommands::Type::kDrawInstanced: {
898 auto draw = static_cast<DrawPassCommands::DrawInstanced*>(cmdPtr);
899 this->drawInstanced(draw->fType,
900 draw->fBaseVertex,
901 draw->fVertexCount,
902 draw->fBaseInstance,
903 draw->fInstanceCount);
904 break;
905 }
906 case DrawPassCommands::Type::kDrawIndexedInstanced: {
907 auto draw = static_cast<DrawPassCommands::DrawIndexedInstanced*>(cmdPtr);
908 this->drawIndexedInstanced(draw->fType,
909 draw->fBaseIndex,
910 draw->fIndexCount,
911 draw->fBaseVertex,
912 draw->fBaseInstance,
913 draw->fInstanceCount);
914 break;
915 }
916 case DrawPassCommands::Type::kDrawIndirect: {
917 auto draw = static_cast<DrawPassCommands::DrawIndirect*>(cmdPtr);
918 this->drawIndirect(draw->fType);
919 break;
920 }
921 case DrawPassCommands::Type::kDrawIndexedIndirect: {
922 auto draw = static_cast<DrawPassCommands::DrawIndexedIndirect*>(cmdPtr);
923 this->drawIndexedIndirect(draw->fType);
924 break;
925 }
926 case DrawPassCommands::Type::kAddBarrier: {
927 auto barrierCmd = static_cast<DrawPassCommands::AddBarrier*>(cmdPtr);
928 this->addBarrier(barrierCmd->fType);
929 break;
930 }
931 }
932 }
933 }
934
bindGraphicsPipeline(const GraphicsPipeline * graphicsPipeline)935 void VulkanCommandBuffer::bindGraphicsPipeline(const GraphicsPipeline* graphicsPipeline) {
936 SkASSERT(fActiveRenderPass);
937 fActiveGraphicsPipeline = static_cast<const VulkanGraphicsPipeline*>(graphicsPipeline);
938 VULKAN_CALL(fSharedContext->interface(), CmdBindPipeline(fPrimaryCommandBuffer,
939 VK_PIPELINE_BIND_POINT_GRAPHICS,
940 fActiveGraphicsPipeline->pipeline()));
941 // TODO(b/293924877): Compare pipeline layouts. If 2 pipelines have the same pipeline layout,
942 // then descriptor sets do not need to be re-bound. For now, simply force a re-binding of
943 // descriptor sets with any new bindGraphicsPipeline DrawPassCommand.
944 fBindUniformBuffers = true;
945
946 if (graphicsPipeline->dstReadStrategy() == DstReadStrategy::kTextureCopy &&
947 graphicsPipeline->numFragTexturesAndSamplers() == 1) {
948 // The only texture-sampler that the pipeline declares must be the dstCopy, which means
949 // there are no other textures that will trigger BindTextureAndSampler commands in a
950 // DrawPass (e.g. solid-color + dst-read-requiring blend). Configure the texture binding
951 // up front in this case.
952 this->recordTextureAndSamplerDescSet(/*drawPass=*/nullptr, /*command=*/nullptr);
953 }
954 }
955
setBlendConstants(float * blendConstants)956 void VulkanCommandBuffer::setBlendConstants(float* blendConstants) {
957 SkASSERT(fActive);
958 if (0 != memcmp(blendConstants, fCachedBlendConstant, 4 * sizeof(float))) {
959 VULKAN_CALL(fSharedContext->interface(),
960 CmdSetBlendConstants(fPrimaryCommandBuffer, blendConstants));
961 memcpy(fCachedBlendConstant, blendConstants, 4 * sizeof(float));
962 }
963 }
964
addBarrier(BarrierType type)965 void VulkanCommandBuffer::addBarrier(BarrierType type) {
966 // TODO(b/383769988): Implement.
967 }
968
recordBufferBindingInfo(const BindBufferInfo & info,UniformSlot slot)969 void VulkanCommandBuffer::recordBufferBindingInfo(const BindBufferInfo& info, UniformSlot slot) {
970 unsigned int bufferIndex = 0;
971 switch (slot) {
972 case UniformSlot::kRenderStep:
973 bufferIndex = VulkanGraphicsPipeline::kRenderStepUniformBufferIndex;
974 break;
975 case UniformSlot::kPaint:
976 bufferIndex = VulkanGraphicsPipeline::kPaintUniformBufferIndex;
977 break;
978 case UniformSlot::kGradient:
979 bufferIndex = VulkanGraphicsPipeline::kGradientBufferIndex;
980 break;
981 default:
982 SkASSERT(false);
983 }
984
985 fUniformBuffersToBind[bufferIndex] = info;
986 fBindUniformBuffers = true;
987 }
988
syncDescriptorSets()989 void VulkanCommandBuffer::syncDescriptorSets() {
990 if (fBindUniformBuffers) {
991 this->bindUniformBuffers();
992 // Changes to descriptor sets in lower slot numbers disrupt later set bindings. Currently,
993 // the descriptor set which houses uniform buffers is at a lower slot than the texture /
994 // sampler set, so rebinding uniform buffers necessitates re-binding any texture/samplers.
995 fBindTextureSamplers = true;
996 }
997 if (fBindTextureSamplers) {
998 this->bindTextureSamplers();
999 }
1000 }
1001
bindUniformBuffers()1002 void VulkanCommandBuffer::bindUniformBuffers() {
1003 fBindUniformBuffers = false;
1004
1005 // Define a container with size reserved for up to kNumUniformBuffers descriptors. Only add
1006 // DescriptorData for uniforms that actually are used and need to be bound.
1007 STArray<VulkanGraphicsPipeline::kNumUniformBuffers, DescriptorData> descriptors;
1008
1009 // Up to kNumUniformBuffers can be used and require rebinding depending upon render pass info.
1010 DescriptorType uniformBufferType =
1011 fSharedContext->caps()->storageBufferSupport() ? DescriptorType::kStorageBuffer
1012 : DescriptorType::kUniformBuffer;
1013 if (fActiveGraphicsPipeline->hasStepUniforms() &&
1014 fUniformBuffersToBind[VulkanGraphicsPipeline::kRenderStepUniformBufferIndex].fBuffer) {
1015 descriptors.push_back({
1016 uniformBufferType,
1017 /*count=*/1,
1018 VulkanGraphicsPipeline::kRenderStepUniformBufferIndex,
1019 PipelineStageFlags::kVertexShader | PipelineStageFlags::kFragmentShader });
1020 }
1021 if (fActiveGraphicsPipeline->hasPaintUniforms() &&
1022 fUniformBuffersToBind[VulkanGraphicsPipeline::kPaintUniformBufferIndex].fBuffer) {
1023 descriptors.push_back({ uniformBufferType, /*count=*/1,
1024 VulkanGraphicsPipeline::kPaintUniformBufferIndex,
1025 PipelineStageFlags::kFragmentShader });
1026 }
1027 if (fActiveGraphicsPipeline->hasGradientBuffer() &&
1028 fUniformBuffersToBind[VulkanGraphicsPipeline::kGradientBufferIndex].fBuffer) {
1029 SkASSERT(fSharedContext->caps()->gradientBufferSupport() &&
1030 fSharedContext->caps()->storageBufferSupport());
1031 descriptors.push_back({ DescriptorType::kStorageBuffer, /*count=*/1,
1032 VulkanGraphicsPipeline::kGradientBufferIndex,
1033 PipelineStageFlags::kFragmentShader });
1034 }
1035
1036 // If no uniforms are used, we can go ahead and return since no descriptors need to be bound.
1037 if (descriptors.empty()) {
1038 return;
1039 }
1040
1041 skia_private::AutoSTMalloc<VulkanGraphicsPipeline::kNumUniformBuffers, uint32_t>
1042 dynamicOffsets(descriptors.size());
1043 for (int i = 0; i < descriptors.size(); i++) {
1044 int descriptorBindingIndex = descriptors[i].fBindingIndex;
1045 SkASSERT(static_cast<unsigned long>(descriptorBindingIndex) < fUniformBuffersToBind.size());
1046 const auto& bindInfo = fUniformBuffersToBind[descriptorBindingIndex];
1047 #ifdef SK_DEBUG
1048 if (descriptors[i].fPipelineStageFlags & PipelineStageFlags::kVertexShader) {
1049 SkASSERT(bindInfo.fBuffer->isProtected() == Protected::kNo);
1050 }
1051 #endif
1052 dynamicOffsets[i] = bindInfo.fOffset;
1053 }
1054
1055 sk_sp<VulkanDescriptorSet> descSet = fResourceProvider->findOrCreateUniformBuffersDescriptorSet(
1056 descriptors, fUniformBuffersToBind);
1057 if (!descSet) {
1058 SKGPU_LOG_E("Unable to find or create uniform descriptor set");
1059 return;
1060 }
1061
1062 VULKAN_CALL(fSharedContext->interface(),
1063 CmdBindDescriptorSets(fPrimaryCommandBuffer,
1064 VK_PIPELINE_BIND_POINT_GRAPHICS,
1065 fActiveGraphicsPipeline->layout(),
1066 VulkanGraphicsPipeline::kUniformBufferDescSetIndex,
1067 /*setCount=*/1,
1068 descSet->descriptorSet(),
1069 descriptors.size(),
1070 dynamicOffsets.get()));
1071 this->trackResource(std::move(descSet));
1072 }
1073
bindDrawBuffers(const BindBufferInfo & vertices,const BindBufferInfo & instances,const BindBufferInfo & indices,const BindBufferInfo & indirect)1074 void VulkanCommandBuffer::bindDrawBuffers(const BindBufferInfo& vertices,
1075 const BindBufferInfo& instances,
1076 const BindBufferInfo& indices,
1077 const BindBufferInfo& indirect) {
1078 this->bindVertexBuffers(vertices.fBuffer,
1079 vertices.fOffset,
1080 instances.fBuffer,
1081 instances.fOffset);
1082 this->bindIndexBuffer(indices.fBuffer, indices.fOffset);
1083 this->bindIndirectBuffer(indirect.fBuffer, indirect.fOffset);
1084 }
1085
bindVertexBuffers(const Buffer * vertexBuffer,size_t vertexOffset,const Buffer * instanceBuffer,size_t instanceOffset)1086 void VulkanCommandBuffer::bindVertexBuffers(const Buffer* vertexBuffer,
1087 size_t vertexOffset,
1088 const Buffer* instanceBuffer,
1089 size_t instanceOffset) {
1090 this->bindInputBuffer(vertexBuffer, vertexOffset,
1091 VulkanGraphicsPipeline::kVertexBufferIndex);
1092 this->bindInputBuffer(instanceBuffer, instanceOffset,
1093 VulkanGraphicsPipeline::kInstanceBufferIndex);
1094 }
1095
bindInputBuffer(const Buffer * buffer,VkDeviceSize offset,uint32_t binding)1096 void VulkanCommandBuffer::bindInputBuffer(const Buffer* buffer, VkDeviceSize offset,
1097 uint32_t binding) {
1098 if (buffer) {
1099 SkASSERT(buffer->isProtected() == Protected::kNo);
1100 VkBuffer vkBuffer = static_cast<const VulkanBuffer*>(buffer)->vkBuffer();
1101 SkASSERT(vkBuffer != VK_NULL_HANDLE);
1102 if (vkBuffer != fBoundInputBuffers[binding] ||
1103 offset != fBoundInputBufferOffsets[binding]) {
1104 VULKAN_CALL(fSharedContext->interface(),
1105 CmdBindVertexBuffers(fPrimaryCommandBuffer,
1106 binding,
1107 /*bindingCount=*/1,
1108 &vkBuffer,
1109 &offset));
1110 fBoundInputBuffers[binding] = vkBuffer;
1111 fBoundInputBufferOffsets[binding] = offset;
1112 }
1113 }
1114 }
1115
bindIndexBuffer(const Buffer * indexBuffer,size_t offset)1116 void VulkanCommandBuffer::bindIndexBuffer(const Buffer* indexBuffer, size_t offset) {
1117 if (indexBuffer) {
1118 SkASSERT(indexBuffer->isProtected() == Protected::kNo);
1119 VkBuffer vkBuffer = static_cast<const VulkanBuffer*>(indexBuffer)->vkBuffer();
1120 SkASSERT(vkBuffer != VK_NULL_HANDLE);
1121 if (vkBuffer != fBoundIndexBuffer || offset != fBoundIndexBufferOffset) {
1122 VULKAN_CALL(fSharedContext->interface(), CmdBindIndexBuffer(fPrimaryCommandBuffer,
1123 vkBuffer,
1124 offset,
1125 VK_INDEX_TYPE_UINT16));
1126 fBoundIndexBuffer = vkBuffer;
1127 fBoundIndexBufferOffset = offset;
1128 }
1129 } else {
1130 fBoundIndexBuffer = VK_NULL_HANDLE;
1131 fBoundIndexBufferOffset = 0;
1132 }
1133 }
1134
bindIndirectBuffer(const Buffer * indirectBuffer,size_t offset)1135 void VulkanCommandBuffer::bindIndirectBuffer(const Buffer* indirectBuffer, size_t offset) {
1136 // Indirect buffers are not bound via the command buffer, but specified in the draw cmd.
1137 if (indirectBuffer) {
1138 SkASSERT(indirectBuffer->isProtected() == Protected::kNo);
1139 fBoundIndirectBuffer = static_cast<const VulkanBuffer*>(indirectBuffer)->vkBuffer();
1140 fBoundIndirectBufferOffset = offset;
1141 } else {
1142 fBoundIndirectBuffer = VK_NULL_HANDLE;
1143 fBoundIndirectBufferOffset = 0;
1144 }
1145 }
1146
recordTextureAndSamplerDescSet(const DrawPass * drawPass,const DrawPassCommands::BindTexturesAndSamplers * command)1147 void VulkanCommandBuffer::recordTextureAndSamplerDescSet(
1148 const DrawPass* drawPass, const DrawPassCommands::BindTexturesAndSamplers* command) {
1149 SkASSERT(SkToBool(drawPass) == SkToBool(command));
1150 SkASSERT(fActiveGraphicsPipeline);
1151 // Add one extra texture for dst copies, which is not included in the command itself.
1152 int numTexSamplers = command ? command->fNumTexSamplers : 0;
1153 if (fActiveGraphicsPipeline->dstReadStrategy() == DstReadStrategy::kTextureCopy) {
1154 numTexSamplers++;
1155 }
1156
1157 if (numTexSamplers == 0) {
1158 fNumTextureSamplers = 0;
1159 fTextureSamplerDescSetToBind = VK_NULL_HANDLE;
1160 fBindTextureSamplers = false;
1161 return;
1162 }
1163
1164 sk_sp<VulkanDescriptorSet> set;
1165 const VulkanTexture* singleTexture = nullptr;
1166 const Sampler* singleSampler = nullptr;
1167 if (numTexSamplers == 1) {
1168 if (fActiveGraphicsPipeline->dstReadStrategy() == DstReadStrategy::kTextureCopy) {
1169 singleTexture = static_cast<const VulkanTexture*>(fDstCopy.first);
1170 singleSampler = static_cast<const VulkanSampler*>(fDstCopy.second);
1171 } else {
1172 SkASSERT(command);
1173 singleTexture = static_cast<const VulkanTexture*>(
1174 drawPass->getTexture(command->fTextureIndices[0]));
1175 singleSampler = drawPass->getSampler(command->fSamplerIndices[0]);
1176 }
1177 SkASSERT(singleTexture && singleSampler);
1178 set = singleTexture->getCachedSingleTextureDescriptorSet(singleSampler);
1179 }
1180
1181 if (!set) {
1182 // Query resource provider to obtain a descriptor set for the texture/samplers
1183 TArray<DescriptorData> descriptors(numTexSamplers);
1184 if (command) {
1185 for (int i = 0; i < command->fNumTexSamplers; i++) {
1186 auto sampler = static_cast<const VulkanSampler*>(
1187 drawPass->getSampler(command->fSamplerIndices[i]));
1188
1189 const Sampler* immutableSampler = (sampler && sampler->ycbcrConversion()) ? sampler
1190 : nullptr;
1191 descriptors.push_back({DescriptorType::kCombinedTextureSampler,
1192 /*count=*/1,
1193 /*bindingIdx=*/i,
1194 PipelineStageFlags::kFragmentShader,
1195 immutableSampler});
1196 }
1197 }
1198 // If required the dst copy texture+sampler is the last one in the descriptor set
1199 if (fActiveGraphicsPipeline->dstReadStrategy() == DstReadStrategy::kTextureCopy) {
1200 descriptors.push_back({DescriptorType::kCombinedTextureSampler,
1201 /*count=*/1,
1202 /*bindingIdx=*/numTexSamplers-1,
1203 PipelineStageFlags::kFragmentShader,
1204 /*immutableSampler=*/nullptr});
1205 }
1206 SkASSERT(descriptors.size() == numTexSamplers);
1207 set = fResourceProvider->findOrCreateDescriptorSet(
1208 SkSpan<DescriptorData>{&descriptors.front(), descriptors.size()});
1209
1210 if (!set) {
1211 SKGPU_LOG_E("Unable to find or create descriptor set");
1212 fNumTextureSamplers = 0;
1213 fTextureSamplerDescSetToBind = VK_NULL_HANDLE;
1214 fBindTextureSamplers = false;
1215 return;
1216 }
1217 // Populate the descriptor set with texture/sampler descriptors
1218 TArray<VkWriteDescriptorSet> writeDescriptorSets(numTexSamplers);
1219 TArray<VkDescriptorImageInfo> descriptorImageInfos(numTexSamplers);
1220 auto appendTextureSampler = [&](const VulkanTexture* texture,
1221 const VulkanSampler* sampler) {
1222 if (!texture || !sampler) {
1223 // TODO(b/294198324): Investigate the root cause for null texture or samplers on
1224 // Ubuntu QuadP400 GPU
1225 SKGPU_LOG_E("Texture and sampler must not be null");
1226 fNumTextureSamplers = 0;
1227 fTextureSamplerDescSetToBind = VK_NULL_HANDLE;
1228 fBindTextureSamplers = false;
1229 return false;
1230 }
1231
1232 VkDescriptorImageInfo& textureInfo = descriptorImageInfos.push_back();
1233 memset(&textureInfo, 0, sizeof(VkDescriptorImageInfo));
1234 textureInfo.sampler = sampler->ycbcrConversion() ? VK_NULL_HANDLE
1235 : sampler->vkSampler();
1236 textureInfo.imageView =
1237 texture->getImageView(VulkanImageView::Usage::kShaderInput)->imageView();
1238 textureInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
1239
1240 VkWriteDescriptorSet& writeInfo = writeDescriptorSets.push_back();
1241 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
1242 writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1243 writeInfo.pNext = nullptr;
1244 writeInfo.dstSet = *set->descriptorSet();
1245 writeInfo.dstBinding = writeDescriptorSets.size() - 1;
1246 writeInfo.dstArrayElement = 0;
1247 writeInfo.descriptorCount = 1;
1248 writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1249 writeInfo.pImageInfo = &textureInfo;
1250 writeInfo.pBufferInfo = nullptr;
1251 writeInfo.pTexelBufferView = nullptr;
1252
1253 return true;
1254 };
1255
1256 if (command) {
1257 for (int i = 0; i < command->fNumTexSamplers; ++i) {
1258 auto texture = static_cast<const VulkanTexture*>(
1259 drawPass->getTexture(command->fTextureIndices[i]));
1260 auto sampler = static_cast<const VulkanSampler*>(
1261 drawPass->getSampler(command->fSamplerIndices[i]));
1262 if (!appendTextureSampler(texture, sampler)) {
1263 return;
1264 }
1265 }
1266 }
1267 if (fActiveGraphicsPipeline->dstReadStrategy() == DstReadStrategy::kTextureCopy) {
1268 auto texture = static_cast<const VulkanTexture*>(fDstCopy.first);
1269 auto sampler = static_cast<const VulkanSampler*>(fDstCopy.second);
1270 if (!appendTextureSampler(texture, sampler)) {
1271 return;
1272 }
1273 }
1274
1275 SkASSERT(writeDescriptorSets.size() == numTexSamplers &&
1276 descriptorImageInfos.size() == numTexSamplers);
1277 VULKAN_CALL(fSharedContext->interface(),
1278 UpdateDescriptorSets(fSharedContext->device(),
1279 numTexSamplers,
1280 &writeDescriptorSets[0],
1281 /*descriptorCopyCount=*/0,
1282 /*pDescriptorCopies=*/nullptr));
1283
1284 if (numTexSamplers == 1) {
1285 SkASSERT(singleTexture && singleSampler);
1286 singleTexture->addCachedSingleTextureDescriptorSet(set, sk_ref_sp(singleSampler));
1287 }
1288 }
1289
1290 // Store the updated descriptor set to be actually bound later on. This avoids binding and
1291 // potentially having to re-bind in cases where earlier descriptor sets change while going
1292 // through drawpass commands.
1293 fTextureSamplerDescSetToBind = *set->descriptorSet();
1294 fBindTextureSamplers = true;
1295 fNumTextureSamplers = numTexSamplers;
1296 this->trackResource(std::move(set));
1297 }
1298
bindTextureSamplers()1299 void VulkanCommandBuffer::bindTextureSamplers() {
1300 fBindTextureSamplers = false;
1301 if (fTextureSamplerDescSetToBind != VK_NULL_HANDLE &&
1302 fActiveGraphicsPipeline->numFragTexturesAndSamplers() == fNumTextureSamplers) {
1303 VULKAN_CALL(fSharedContext->interface(),
1304 CmdBindDescriptorSets(fPrimaryCommandBuffer,
1305 VK_PIPELINE_BIND_POINT_GRAPHICS,
1306 fActiveGraphicsPipeline->layout(),
1307 VulkanGraphicsPipeline::kTextureBindDescSetIndex,
1308 /*setCount=*/1,
1309 &fTextureSamplerDescSetToBind,
1310 /*dynamicOffsetCount=*/0,
1311 /*dynamicOffsets=*/nullptr));
1312 }
1313 }
1314
setScissor(const Scissor & scissor)1315 void VulkanCommandBuffer::setScissor(const Scissor& scissor) {
1316 this->setScissor(scissor.getRect(fReplayTranslation, fRenderPassBounds));
1317 }
1318
setScissor(const SkIRect & rect)1319 void VulkanCommandBuffer::setScissor(const SkIRect& rect) {
1320 VkRect2D scissor = {
1321 {rect.x(), rect.y()},
1322 {static_cast<unsigned int>(rect.width()), static_cast<unsigned int>(rect.height())}};
1323 VULKAN_CALL(fSharedContext->interface(),
1324 CmdSetScissor(fPrimaryCommandBuffer,
1325 /*firstScissor=*/0,
1326 /*scissorCount=*/1,
1327 &scissor));
1328 }
1329
draw(PrimitiveType,unsigned int baseVertex,unsigned int vertexCount)1330 void VulkanCommandBuffer::draw(PrimitiveType,
1331 unsigned int baseVertex,
1332 unsigned int vertexCount) {
1333 SkASSERT(fActiveRenderPass);
1334 this->syncDescriptorSets();
1335 // TODO: set primitive type via dynamic state if available
1336 VULKAN_CALL(fSharedContext->interface(),
1337 CmdDraw(fPrimaryCommandBuffer,
1338 vertexCount,
1339 /*instanceCount=*/1,
1340 baseVertex,
1341 /*firstInstance=*/0));
1342 }
1343
drawIndexed(PrimitiveType,unsigned int baseIndex,unsigned int indexCount,unsigned int baseVertex)1344 void VulkanCommandBuffer::drawIndexed(PrimitiveType,
1345 unsigned int baseIndex,
1346 unsigned int indexCount,
1347 unsigned int baseVertex) {
1348 SkASSERT(fActiveRenderPass);
1349 this->syncDescriptorSets();
1350 // TODO: set primitive type via dynamic state if available
1351 VULKAN_CALL(fSharedContext->interface(),
1352 CmdDrawIndexed(fPrimaryCommandBuffer,
1353 indexCount,
1354 /*instanceCount=*/1,
1355 baseIndex,
1356 baseVertex,
1357 /*firstInstance=*/0));
1358 }
1359
drawInstanced(PrimitiveType,unsigned int baseVertex,unsigned int vertexCount,unsigned int baseInstance,unsigned int instanceCount)1360 void VulkanCommandBuffer::drawInstanced(PrimitiveType,
1361 unsigned int baseVertex,
1362 unsigned int vertexCount,
1363 unsigned int baseInstance,
1364 unsigned int instanceCount) {
1365 SkASSERT(fActiveRenderPass);
1366 this->syncDescriptorSets();
1367 // TODO: set primitive type via dynamic state if available
1368 VULKAN_CALL(fSharedContext->interface(),
1369 CmdDraw(fPrimaryCommandBuffer,
1370 vertexCount,
1371 instanceCount,
1372 baseVertex,
1373 baseInstance));
1374 }
1375
drawIndexedInstanced(PrimitiveType,unsigned int baseIndex,unsigned int indexCount,unsigned int baseVertex,unsigned int baseInstance,unsigned int instanceCount)1376 void VulkanCommandBuffer::drawIndexedInstanced(PrimitiveType,
1377 unsigned int baseIndex,
1378 unsigned int indexCount,
1379 unsigned int baseVertex,
1380 unsigned int baseInstance,
1381 unsigned int instanceCount) {
1382 SkASSERT(fActiveRenderPass);
1383 this->syncDescriptorSets();
1384 // TODO: set primitive type via dynamic state if available
1385 VULKAN_CALL(fSharedContext->interface(),
1386 CmdDrawIndexed(fPrimaryCommandBuffer,
1387 indexCount,
1388 instanceCount,
1389 baseIndex,
1390 baseVertex,
1391 baseInstance));
1392 }
1393
drawIndirect(PrimitiveType)1394 void VulkanCommandBuffer::drawIndirect(PrimitiveType) {
1395 SkASSERT(fActiveRenderPass);
1396 this->syncDescriptorSets();
1397 // TODO: set primitive type via dynamic state if available
1398 // Currently we can only support doing one indirect draw operation at a time,
1399 // so stride is irrelevant.
1400 VULKAN_CALL(fSharedContext->interface(),
1401 CmdDrawIndirect(fPrimaryCommandBuffer,
1402 fBoundIndirectBuffer,
1403 fBoundIndirectBufferOffset,
1404 /*drawCount=*/1,
1405 /*stride=*/0));
1406 }
1407
drawIndexedIndirect(PrimitiveType)1408 void VulkanCommandBuffer::drawIndexedIndirect(PrimitiveType) {
1409 SkASSERT(fActiveRenderPass);
1410 this->syncDescriptorSets();
1411 // TODO: set primitive type via dynamic state if available
1412 // Currently we can only support doing one indirect draw operation at a time,
1413 // so stride is irrelevant.
1414 VULKAN_CALL(fSharedContext->interface(),
1415 CmdDrawIndexedIndirect(fPrimaryCommandBuffer,
1416 fBoundIndirectBuffer,
1417 fBoundIndirectBufferOffset,
1418 /*drawCount=*/1,
1419 /*stride=*/0));
1420 }
1421
onAddComputePass(DispatchGroupSpan)1422 bool VulkanCommandBuffer::onAddComputePass(DispatchGroupSpan) { return false; }
1423
onCopyBufferToBuffer(const Buffer * srcBuffer,size_t srcOffset,const Buffer * dstBuffer,size_t dstOffset,size_t size)1424 bool VulkanCommandBuffer::onCopyBufferToBuffer(const Buffer* srcBuffer,
1425 size_t srcOffset,
1426 const Buffer* dstBuffer,
1427 size_t dstOffset,
1428 size_t size) {
1429 auto vkSrcBuffer = static_cast<const VulkanBuffer*>(srcBuffer);
1430 auto vkDstBuffer = static_cast<const VulkanBuffer*>(dstBuffer);
1431
1432 SkASSERT(vkSrcBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
1433 SkASSERT(vkDstBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_TRANSFER_DST_BIT);
1434
1435 vkSrcBuffer->setBufferAccess(this, VK_ACCESS_TRANSFER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
1436 vkDstBuffer->setBufferAccess(
1437 this, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
1438
1439 VkBufferCopy region;
1440 memset(®ion, 0, sizeof(VkBufferCopy));
1441 region.srcOffset = srcOffset;
1442 region.dstOffset = dstOffset;
1443 region.size = size;
1444
1445 this->submitPipelineBarriers();
1446
1447 VULKAN_CALL(fSharedContext->interface(),
1448 CmdCopyBuffer(fPrimaryCommandBuffer,
1449 vkSrcBuffer->vkBuffer(),
1450 vkDstBuffer->vkBuffer(),
1451 /*regionCount=*/1,
1452 ®ion));
1453
1454 // TODO (b/394121386): We don't currently have a list of tracked buffers that are used on a
1455 // RenderPass in order to put in any needed barriers (like we do for textures). If we did have
1456 // one, then we would add the needed barriers for the buffers at the start of a render pass.
1457 // Until we have such a system, we need to do some hackyness here to put in a barrier with the
1458 // assumption that the buffer will be read after this write from the copy. The only buffer types
1459 // we allow to be used as the dst of a transfer are vertex and index buffers. So we check the
1460 // buffers usages for either of those and then set the corresponding access flag.
1461 VkAccessFlags dstAccess = 0;
1462 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
1463 VkBufferUsageFlags bufferUsageFlags = vkDstBuffer->bufferUsageFlags();
1464 if (bufferUsageFlags & VK_BUFFER_USAGE_VERTEX_BUFFER_BIT) {
1465 dstAccess = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
1466 } else if (bufferUsageFlags & VK_BUFFER_USAGE_INDEX_BUFFER_BIT) {
1467 dstAccess = VK_ACCESS_INDEX_READ_BIT;
1468 } else {
1469 SkDEBUGFAIL("Trying to copy to non vertex or index buffer\n");
1470 return false;
1471 }
1472 vkDstBuffer->setBufferAccess(this, dstAccess, dstStageMask);
1473
1474 return true;
1475 }
1476
onCopyTextureToBuffer(const Texture * texture,SkIRect srcRect,const Buffer * buffer,size_t bufferOffset,size_t bufferRowBytes)1477 bool VulkanCommandBuffer::onCopyTextureToBuffer(const Texture* texture,
1478 SkIRect srcRect,
1479 const Buffer* buffer,
1480 size_t bufferOffset,
1481 size_t bufferRowBytes) {
1482 const VulkanTexture* srcTexture = static_cast<const VulkanTexture*>(texture);
1483 auto dstBuffer = static_cast<const VulkanBuffer*>(buffer);
1484 SkASSERT(dstBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_TRANSFER_DST_BIT);
1485
1486 size_t bytesPerBlock = VkFormatBytesPerBlock(srcTexture->vulkanTextureInfo().fFormat);
1487
1488 // Set up copy region
1489 VkBufferImageCopy region;
1490 memset(®ion, 0, sizeof(VkBufferImageCopy));
1491 region.bufferOffset = bufferOffset;
1492 // Vulkan expects bufferRowLength in texels, not bytes.
1493 region.bufferRowLength = (uint32_t)(bufferRowBytes/bytesPerBlock);
1494 region.bufferImageHeight = 0; // Tightly packed
1495 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, /*mipLevel=*/0, 0, 1 };
1496 region.imageOffset = { srcRect.left(), srcRect.top(), /*z=*/0 };
1497 region.imageExtent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), /*depth=*/1 };
1498
1499 // Enable editing of the source texture so we can change its layout so it can be copied from.
1500 const_cast<VulkanTexture*>(srcTexture)->setImageLayout(this,
1501 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1502 VK_ACCESS_TRANSFER_READ_BIT,
1503 VK_PIPELINE_STAGE_TRANSFER_BIT,
1504 false);
1505 // Set current access mask for buffer
1506 const_cast<VulkanBuffer*>(dstBuffer)->setBufferAccess(this,
1507 VK_ACCESS_TRANSFER_WRITE_BIT,
1508 VK_PIPELINE_STAGE_TRANSFER_BIT);
1509
1510 this->submitPipelineBarriers();
1511
1512 VULKAN_CALL(fSharedContext->interface(),
1513 CmdCopyImageToBuffer(fPrimaryCommandBuffer,
1514 srcTexture->vkImage(),
1515 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1516 dstBuffer->vkBuffer(),
1517 /*regionCount=*/1,
1518 ®ion));
1519 return true;
1520 }
1521
onCopyBufferToTexture(const Buffer * buffer,const Texture * texture,const BufferTextureCopyData * copyData,int count)1522 bool VulkanCommandBuffer::onCopyBufferToTexture(const Buffer* buffer,
1523 const Texture* texture,
1524 const BufferTextureCopyData* copyData,
1525 int count) {
1526 auto srcBuffer = static_cast<const VulkanBuffer*>(buffer);
1527 SkASSERT(srcBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
1528 const VulkanTexture* dstTexture = static_cast<const VulkanTexture*>(texture);
1529
1530 TextureFormat format = TextureInfoPriv::ViewFormat(dstTexture->textureInfo());
1531 size_t bytesPerBlock = TextureFormatBytesPerBlock(format);
1532 SkISize oneBlockDims = CompressedDimensions(TextureFormatCompressionType(format), {1, 1});
1533
1534 // Set up copy regions.
1535 TArray<VkBufferImageCopy> regions(count);
1536 for (int i = 0; i < count; ++i) {
1537 VkBufferImageCopy& region = regions.push_back();
1538 memset(®ion, 0, sizeof(VkBufferImageCopy));
1539 region.bufferOffset = copyData[i].fBufferOffset;
1540 // copyData provides row length in bytes, but Vulkan expects bufferRowLength in texels.
1541 // For compressed this is the number of logical pixels not the number of blocks.
1542 region.bufferRowLength =
1543 (uint32_t)((copyData[i].fBufferRowBytes/bytesPerBlock) * oneBlockDims.fWidth);
1544 region.bufferImageHeight = 0; // Tightly packed
1545 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, copyData[i].fMipLevel, 0, 1 };
1546 region.imageOffset = { copyData[i].fRect.left(),
1547 copyData[i].fRect.top(),
1548 /*z=*/0 };
1549 region.imageExtent = { (uint32_t)copyData[i].fRect.width(),
1550 (uint32_t)copyData[i].fRect.height(),
1551 /*depth=*/1 };
1552 }
1553
1554 // Enable editing of the destination texture so we can change its layout so it can be copied to.
1555 const_cast<VulkanTexture*>(dstTexture)->setImageLayout(this,
1556 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1557 VK_ACCESS_TRANSFER_WRITE_BIT,
1558 VK_PIPELINE_STAGE_TRANSFER_BIT,
1559 false);
1560
1561 this->submitPipelineBarriers();
1562
1563 VULKAN_CALL(fSharedContext->interface(),
1564 CmdCopyBufferToImage(fPrimaryCommandBuffer,
1565 srcBuffer->vkBuffer(),
1566 dstTexture->vkImage(),
1567 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1568 regions.size(),
1569 regions.begin()));
1570 return true;
1571 }
1572
onCopyTextureToTexture(const Texture * src,SkIRect srcRect,const Texture * dst,SkIPoint dstPoint,int mipLevel)1573 bool VulkanCommandBuffer::onCopyTextureToTexture(const Texture* src,
1574 SkIRect srcRect,
1575 const Texture* dst,
1576 SkIPoint dstPoint,
1577 int mipLevel) {
1578 const VulkanTexture* srcTexture = static_cast<const VulkanTexture*>(src);
1579 const VulkanTexture* dstTexture = static_cast<const VulkanTexture*>(dst);
1580
1581 VkImageCopy copyRegion;
1582 memset(©Region, 0, sizeof(VkImageCopy));
1583 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1584 copyRegion.srcOffset = { srcRect.fLeft, srcRect.fTop, 0 };
1585 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, (uint32_t)mipLevel, 0, 1 };
1586 copyRegion.dstOffset = { dstPoint.fX, dstPoint.fY, 0 };
1587 copyRegion.extent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1 };
1588
1589 // Enable editing of the src texture so we can change its layout so it can be copied from.
1590 const_cast<VulkanTexture*>(srcTexture)->setImageLayout(this,
1591 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1592 VK_ACCESS_TRANSFER_READ_BIT,
1593 VK_PIPELINE_STAGE_TRANSFER_BIT,
1594 false);
1595 // Enable editing of the destination texture so we can change its layout so it can be copied to.
1596 const_cast<VulkanTexture*>(dstTexture)->setImageLayout(this,
1597 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1598 VK_ACCESS_TRANSFER_WRITE_BIT,
1599 VK_PIPELINE_STAGE_TRANSFER_BIT,
1600 false);
1601
1602 this->submitPipelineBarriers();
1603
1604 VULKAN_CALL(fSharedContext->interface(),
1605 CmdCopyImage(fPrimaryCommandBuffer,
1606 srcTexture->vkImage(),
1607 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1608 dstTexture->vkImage(),
1609 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1610 /*regionCount=*/1,
1611 ©Region));
1612
1613 return true;
1614 }
1615
onSynchronizeBufferToCpu(const Buffer * buffer,bool * outDidResultInWork)1616 bool VulkanCommandBuffer::onSynchronizeBufferToCpu(const Buffer* buffer, bool* outDidResultInWork) {
1617 static_cast<const VulkanBuffer*>(buffer)->setBufferAccess(this,
1618 VK_ACCESS_HOST_READ_BIT,
1619 VK_PIPELINE_STAGE_HOST_BIT);
1620
1621 *outDidResultInWork = true;
1622 return true;
1623 }
1624
onClearBuffer(const Buffer *,size_t offset,size_t size)1625 bool VulkanCommandBuffer::onClearBuffer(const Buffer*, size_t offset, size_t size) {
1626 return false;
1627 }
1628
addBufferMemoryBarrier(const Resource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkBufferMemoryBarrier * barrier)1629 void VulkanCommandBuffer::addBufferMemoryBarrier(const Resource* resource,
1630 VkPipelineStageFlags srcStageMask,
1631 VkPipelineStageFlags dstStageMask,
1632 VkBufferMemoryBarrier* barrier) {
1633 SkASSERT(resource);
1634 this->pipelineBarrier(resource,
1635 srcStageMask,
1636 dstStageMask,
1637 /*byRegion=*/false,
1638 kBufferMemory_BarrierType,
1639 barrier);
1640 }
1641
addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkBufferMemoryBarrier * barrier)1642 void VulkanCommandBuffer::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
1643 VkPipelineStageFlags dstStageMask,
1644 VkBufferMemoryBarrier* barrier) {
1645 // We don't pass in a resource here to the command buffer. The command buffer only is using it
1646 // to hold a ref, but every place where we add a buffer memory barrier we are doing some other
1647 // command with the buffer on the command buffer. Thus those other commands will already cause
1648 // the command buffer to be holding a ref to the buffer.
1649 this->pipelineBarrier(/*resource=*/nullptr,
1650 srcStageMask,
1651 dstStageMask,
1652 /*byRegion=*/false,
1653 kBufferMemory_BarrierType,
1654 barrier);
1655 }
1656
addImageMemoryBarrier(const Resource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkImageMemoryBarrier * barrier)1657 void VulkanCommandBuffer::addImageMemoryBarrier(const Resource* resource,
1658 VkPipelineStageFlags srcStageMask,
1659 VkPipelineStageFlags dstStageMask,
1660 bool byRegion,
1661 VkImageMemoryBarrier* barrier) {
1662 SkASSERT(resource);
1663 this->pipelineBarrier(resource,
1664 srcStageMask,
1665 dstStageMask,
1666 byRegion,
1667 kImageMemory_BarrierType,
1668 barrier);
1669 }
1670
pipelineBarrier(const Resource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,PipelineBarrierType barrierType,void * barrier)1671 void VulkanCommandBuffer::pipelineBarrier(const Resource* resource,
1672 VkPipelineStageFlags srcStageMask,
1673 VkPipelineStageFlags dstStageMask,
1674 bool byRegion,
1675 PipelineBarrierType barrierType,
1676 void* barrier) {
1677 // TODO: Do we need to handle wrapped command buffers?
1678 // SkASSERT(!this->isWrapped());
1679 SkASSERT(fActive);
1680 #ifdef SK_DEBUG
1681 // For images we can have barriers inside of render passes but they require us to add more
1682 // support in subpasses which need self dependencies to have barriers inside them. Also, we can
1683 // never have buffer barriers inside of a render pass. For now we will just assert that we are
1684 // not in a render pass.
1685 bool isValidSubpassBarrier = false;
1686 if (barrierType == kImageMemory_BarrierType) {
1687 VkImageMemoryBarrier* imgBarrier = static_cast<VkImageMemoryBarrier*>(barrier);
1688 isValidSubpassBarrier = (imgBarrier->newLayout == imgBarrier->oldLayout) &&
1689 (imgBarrier->srcQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) &&
1690 (imgBarrier->dstQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) &&
1691 byRegion;
1692 }
1693 SkASSERT(!fActiveRenderPass || isValidSubpassBarrier);
1694 #endif
1695
1696 if (barrierType == kBufferMemory_BarrierType) {
1697 const VkBufferMemoryBarrier* barrierPtr = static_cast<VkBufferMemoryBarrier*>(barrier);
1698 fBufferBarriers.push_back(*barrierPtr);
1699 } else {
1700 SkASSERT(barrierType == kImageMemory_BarrierType);
1701 const VkImageMemoryBarrier* barrierPtr = static_cast<VkImageMemoryBarrier*>(barrier);
1702 // We need to check if we are adding a pipeline barrier that covers part of the same
1703 // subresource range as a barrier that is already in current batch. If it does, then we must
1704 // submit the first batch because the vulkan spec does not define a specific ordering for
1705 // barriers submitted in the same batch.
1706 // TODO: Look if we can gain anything by merging barriers together instead of submitting
1707 // the old ones.
1708 for (int i = 0; i < fImageBarriers.size(); ++i) {
1709 VkImageMemoryBarrier& currentBarrier = fImageBarriers[i];
1710 if (barrierPtr->image == currentBarrier.image) {
1711 const VkImageSubresourceRange newRange = barrierPtr->subresourceRange;
1712 const VkImageSubresourceRange oldRange = currentBarrier.subresourceRange;
1713 SkASSERT(newRange.aspectMask == oldRange.aspectMask);
1714 SkASSERT(newRange.baseArrayLayer == oldRange.baseArrayLayer);
1715 SkASSERT(newRange.layerCount == oldRange.layerCount);
1716 uint32_t newStart = newRange.baseMipLevel;
1717 uint32_t newEnd = newRange.baseMipLevel + newRange.levelCount - 1;
1718 uint32_t oldStart = oldRange.baseMipLevel;
1719 uint32_t oldEnd = oldRange.baseMipLevel + oldRange.levelCount - 1;
1720 if (std::max(newStart, oldStart) <= std::min(newEnd, oldEnd)) {
1721 this->submitPipelineBarriers();
1722 break;
1723 }
1724 }
1725 }
1726 fImageBarriers.push_back(*barrierPtr);
1727 }
1728 fBarriersByRegion |= byRegion;
1729 fSrcStageMask = fSrcStageMask | srcStageMask;
1730 fDstStageMask = fDstStageMask | dstStageMask;
1731
1732 if (fActiveRenderPass) {
1733 this->submitPipelineBarriers(true);
1734 }
1735 }
1736
submitPipelineBarriers(bool forSelfDependency)1737 void VulkanCommandBuffer::submitPipelineBarriers(bool forSelfDependency) {
1738 SkASSERT(fActive);
1739
1740 // TODO: Do we need to handle SecondaryCommandBuffers as well?
1741
1742 // Currently we never submit a pipeline barrier without at least one buffer or image barrier.
1743 if (!fBufferBarriers.empty() || !fImageBarriers.empty()) {
1744 // For images we can have barriers inside of render passes but they require us to add more
1745 // support in subpasses which need self dependencies to have barriers inside them. Also, we
1746 // can never have buffer barriers inside of a render pass. For now we will just assert that
1747 // we are not in a render pass.
1748 SkASSERT(!fActiveRenderPass || forSelfDependency);
1749 // TODO: Do we need to handle wrapped CommandBuffers?
1750 // SkASSERT(!this->isWrapped());
1751 SkASSERT(fSrcStageMask && fDstStageMask);
1752
1753 VkDependencyFlags dependencyFlags = fBarriersByRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0;
1754 VULKAN_CALL(fSharedContext->interface(),
1755 CmdPipelineBarrier(fPrimaryCommandBuffer, fSrcStageMask, fDstStageMask,
1756 dependencyFlags,
1757 /*memoryBarrierCount=*/0, /*pMemoryBarrier=*/nullptr,
1758 fBufferBarriers.size(), fBufferBarriers.begin(),
1759 fImageBarriers.size(), fImageBarriers.begin()));
1760 fBufferBarriers.clear();
1761 fImageBarriers.clear();
1762 fBarriersByRegion = false;
1763 fSrcStageMask = 0;
1764 fDstStageMask = 0;
1765 }
1766 SkASSERT(fBufferBarriers.empty());
1767 SkASSERT(fImageBarriers.empty());
1768 SkASSERT(!fBarriersByRegion);
1769 SkASSERT(!fSrcStageMask);
1770 SkASSERT(!fDstStageMask);
1771 }
1772
nextSubpass()1773 void VulkanCommandBuffer::nextSubpass() {
1774 // TODO: Use VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS if we add secondary cmd buffers
1775 VULKAN_CALL(fSharedContext->interface(),
1776 CmdNextSubpass(fPrimaryCommandBuffer, VK_SUBPASS_CONTENTS_INLINE));
1777 }
1778
setViewport(SkIRect viewport)1779 void VulkanCommandBuffer::setViewport(SkIRect viewport) {
1780 VkViewport vkViewport = {
1781 (float) viewport.fLeft,
1782 (float) viewport.fTop,
1783 (float) viewport.width(),
1784 (float) viewport.height(),
1785 0.0f, // minDepth
1786 1.0f, // maxDepth
1787 };
1788 VULKAN_CALL(fSharedContext->interface(),
1789 CmdSetViewport(fPrimaryCommandBuffer,
1790 /*firstViewport=*/0,
1791 /*viewportCount=*/1,
1792 &vkViewport));
1793 }
1794
1795 } // namespace skgpu::graphite
1796