1 /*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/graphite/vk/VulkanCommandBuffer.h"
9
10 #include "include/gpu/MutableTextureState.h"
11 #include "include/gpu/graphite/BackendSemaphore.h"
12 #include "include/gpu/vk/VulkanMutableTextureState.h"
13 #include "include/private/base/SkTArray.h"
14 #include "src/gpu/DataUtils.h"
15 #include "src/gpu/graphite/DescriptorData.h"
16 #include "src/gpu/graphite/Log.h"
17 #include "src/gpu/graphite/RenderPassDesc.h"
18 #include "src/gpu/graphite/Surface_Graphite.h"
19 #include "src/gpu/graphite/TextureProxy.h"
20 #include "src/gpu/graphite/vk/VulkanBuffer.h"
21 #include "src/gpu/graphite/vk/VulkanDescriptorSet.h"
22 #include "src/gpu/graphite/vk/VulkanFramebuffer.h"
23 #include "src/gpu/graphite/vk/VulkanGraphiteUtilsPriv.h"
24 #include "src/gpu/graphite/vk/VulkanRenderPass.h"
25 #include "src/gpu/graphite/vk/VulkanResourceProvider.h"
26 #include "src/gpu/graphite/vk/VulkanSampler.h"
27 #include "src/gpu/graphite/vk/VulkanSharedContext.h"
28 #include "src/gpu/graphite/vk/VulkanTexture.h"
29 #include "src/gpu/vk/VulkanUtilsPriv.h"
30
31 using namespace skia_private;
32
33 namespace skgpu::graphite {
34
35 class VulkanDescriptorSet;
36
Make(const VulkanSharedContext * sharedContext,VulkanResourceProvider * resourceProvider)37 std::unique_ptr<VulkanCommandBuffer> VulkanCommandBuffer::Make(
38 const VulkanSharedContext* sharedContext,
39 VulkanResourceProvider* resourceProvider) {
40 // Create VkCommandPool
41 VkCommandPoolCreateFlags cmdPoolCreateFlags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
42 if (sharedContext->isProtected() == Protected::kYes) {
43 cmdPoolCreateFlags |= VK_COMMAND_POOL_CREATE_PROTECTED_BIT;
44 }
45
46 const VkCommandPoolCreateInfo cmdPoolInfo = {
47 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
48 nullptr, // pNext
49 cmdPoolCreateFlags, // CmdPoolCreateFlags
50 sharedContext->queueIndex(), // queueFamilyIndex
51 };
52 VkResult result;
53 VkCommandPool pool;
54 VULKAN_CALL_RESULT(sharedContext,
55 result,
56 CreateCommandPool(sharedContext->device(), &cmdPoolInfo, nullptr, &pool));
57 if (result != VK_SUCCESS) {
58 return nullptr;
59 }
60
61 const VkCommandBufferAllocateInfo cmdInfo = {
62 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
63 nullptr, // pNext
64 pool, // commandPool
65 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
66 1 // bufferCount
67 };
68
69 VkCommandBuffer primaryCmdBuffer;
70 VULKAN_CALL_RESULT(
71 sharedContext,
72 result,
73 AllocateCommandBuffers(sharedContext->device(), &cmdInfo, &primaryCmdBuffer));
74 if (result != VK_SUCCESS) {
75 VULKAN_CALL(sharedContext->interface(),
76 DestroyCommandPool(sharedContext->device(), pool, nullptr));
77 return nullptr;
78 }
79
80 return std::unique_ptr<VulkanCommandBuffer>(new VulkanCommandBuffer(pool,
81 primaryCmdBuffer,
82 sharedContext,
83 resourceProvider));
84 }
85
VulkanCommandBuffer(VkCommandPool pool,VkCommandBuffer primaryCommandBuffer,const VulkanSharedContext * sharedContext,VulkanResourceProvider * resourceProvider)86 VulkanCommandBuffer::VulkanCommandBuffer(VkCommandPool pool,
87 VkCommandBuffer primaryCommandBuffer,
88 const VulkanSharedContext* sharedContext,
89 VulkanResourceProvider* resourceProvider)
90 : fPool(pool)
91 , fPrimaryCommandBuffer(primaryCommandBuffer)
92 , fSharedContext(sharedContext)
93 , fResourceProvider(resourceProvider) {
94 // When making a new command buffer, we automatically begin the command buffer
95 this->begin();
96 }
97
~VulkanCommandBuffer()98 VulkanCommandBuffer::~VulkanCommandBuffer() {
99 if (fActive) {
100 // Need to end command buffer before deleting it
101 VULKAN_CALL(fSharedContext->interface(), EndCommandBuffer(fPrimaryCommandBuffer));
102 fActive = false;
103 }
104
105 if (VK_NULL_HANDLE != fSubmitFence) {
106 VULKAN_CALL(fSharedContext->interface(),
107 DestroyFence(fSharedContext->device(), fSubmitFence, nullptr));
108 }
109 // This should delete any command buffers as well.
110 VULKAN_CALL(fSharedContext->interface(),
111 DestroyCommandPool(fSharedContext->device(), fPool, nullptr));
112 }
113
onResetCommandBuffer()114 void VulkanCommandBuffer::onResetCommandBuffer() {
115 SkASSERT(!fActive);
116 VULKAN_CALL_ERRCHECK(fSharedContext, ResetCommandPool(fSharedContext->device(), fPool, 0));
117 fActiveGraphicsPipeline = nullptr;
118 fBindUniformBuffers = true;
119 fBoundIndexBuffer = VK_NULL_HANDLE;
120 fBoundIndexBufferOffset = 0;
121 fBoundIndirectBuffer = VK_NULL_HANDLE;
122 fBoundIndirectBufferOffset = 0;
123 fTextureSamplerDescSetToBind = VK_NULL_HANDLE;
124 fNumTextureSamplers = 0;
125 fUniformBuffersToBind.fill({});
126 for (int i = 0; i < 4; ++i) {
127 fCachedBlendConstant[i] = -1.0;
128 }
129 for (auto& boundInputBuffer : fBoundInputBuffers) {
130 boundInputBuffer = VK_NULL_HANDLE;
131 }
132 for (auto& boundInputOffset : fBoundInputBufferOffsets) {
133 boundInputOffset = 0;
134 }
135 }
136
setNewCommandBufferResources()137 bool VulkanCommandBuffer::setNewCommandBufferResources() {
138 this->begin();
139 return true;
140 }
141
begin()142 void VulkanCommandBuffer::begin() {
143 SkASSERT(!fActive);
144 VkCommandBufferBeginInfo cmdBufferBeginInfo;
145 memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
146 cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
147 cmdBufferBeginInfo.pNext = nullptr;
148 cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
149 cmdBufferBeginInfo.pInheritanceInfo = nullptr;
150
151 VULKAN_CALL_ERRCHECK(fSharedContext,
152 BeginCommandBuffer(fPrimaryCommandBuffer, &cmdBufferBeginInfo));
153 fActive = true;
154 }
155
end()156 void VulkanCommandBuffer::end() {
157 SkASSERT(fActive);
158 SkASSERT(!fActiveRenderPass);
159
160 this->submitPipelineBarriers();
161
162 VULKAN_CALL_ERRCHECK(fSharedContext, EndCommandBuffer(fPrimaryCommandBuffer));
163
164 fActive = false;
165 }
166
addWaitSemaphores(size_t numWaitSemaphores,const BackendSemaphore * waitSemaphores)167 void VulkanCommandBuffer::addWaitSemaphores(size_t numWaitSemaphores,
168 const BackendSemaphore* waitSemaphores) {
169 if (!waitSemaphores) {
170 SkASSERT(numWaitSemaphores == 0);
171 return;
172 }
173
174 for (size_t i = 0; i < numWaitSemaphores; ++i) {
175 auto& semaphore = waitSemaphores[i];
176 if (semaphore.isValid() && semaphore.backend() == BackendApi::kVulkan) {
177 fWaitSemaphores.push_back(semaphore.getVkSemaphore());
178 }
179 }
180 }
181
addSignalSemaphores(size_t numSignalSemaphores,const BackendSemaphore * signalSemaphores)182 void VulkanCommandBuffer::addSignalSemaphores(size_t numSignalSemaphores,
183 const BackendSemaphore* signalSemaphores) {
184 if (!signalSemaphores) {
185 SkASSERT(numSignalSemaphores == 0);
186 return;
187 }
188
189 for (size_t i = 0; i < numSignalSemaphores; ++i) {
190 auto& semaphore = signalSemaphores[i];
191 if (semaphore.isValid() && semaphore.backend() == BackendApi::kVulkan) {
192 fSignalSemaphores.push_back(semaphore.getVkSemaphore());
193 }
194 }
195 }
196
prepareSurfaceForStateUpdate(SkSurface * targetSurface,const MutableTextureState * newState)197 void VulkanCommandBuffer::prepareSurfaceForStateUpdate(SkSurface* targetSurface,
198 const MutableTextureState* newState) {
199 TextureProxy* textureProxy = static_cast<Surface*>(targetSurface)->backingTextureProxy();
200 VulkanTexture* texture = static_cast<VulkanTexture*>(textureProxy->texture());
201
202 // Even though internally we use this helper for getting src access flags and stages they
203 // can also be used for general dst flags since we don't know exactly what the client
204 // plans on using the image for.
205 VkImageLayout newLayout = skgpu::MutableTextureStates::GetVkImageLayout(newState);
206 if (newLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
207 newLayout = texture->currentLayout();
208 }
209 VkPipelineStageFlags dstStage = VulkanTexture::LayoutToPipelineSrcStageFlags(newLayout);
210 VkAccessFlags dstAccess = VulkanTexture::LayoutToSrcAccessMask(newLayout);
211
212 uint32_t currentQueueFamilyIndex = texture->currentQueueFamilyIndex();
213 uint32_t newQueueFamilyIndex = skgpu::MutableTextureStates::GetVkQueueFamilyIndex(newState);
214 auto isSpecialQueue = [](uint32_t queueFamilyIndex) {
215 return queueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
216 queueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT;
217 };
218 if (isSpecialQueue(currentQueueFamilyIndex) && isSpecialQueue(newQueueFamilyIndex)) {
219 // It is illegal to have both the new and old queue be special queue families (i.e. external
220 // or foreign).
221 return;
222 }
223
224 texture->setImageLayoutAndQueueIndex(this,
225 newLayout,
226 dstAccess,
227 dstStage,
228 false,
229 newQueueFamilyIndex);
230 }
231
submit_to_queue(const VulkanSharedContext * sharedContext,VkQueue queue,VkFence fence,uint32_t waitCount,const VkSemaphore * waitSemaphores,const VkPipelineStageFlags * waitStages,uint32_t commandBufferCount,const VkCommandBuffer * commandBuffers,uint32_t signalCount,const VkSemaphore * signalSemaphores,Protected protectedContext)232 static bool submit_to_queue(const VulkanSharedContext* sharedContext,
233 VkQueue queue,
234 VkFence fence,
235 uint32_t waitCount,
236 const VkSemaphore* waitSemaphores,
237 const VkPipelineStageFlags* waitStages,
238 uint32_t commandBufferCount,
239 const VkCommandBuffer* commandBuffers,
240 uint32_t signalCount,
241 const VkSemaphore* signalSemaphores,
242 Protected protectedContext) {
243 VkProtectedSubmitInfo protectedSubmitInfo;
244 if (protectedContext == Protected::kYes) {
245 memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
246 protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
247 protectedSubmitInfo.pNext = nullptr;
248 protectedSubmitInfo.protectedSubmit = VK_TRUE;
249 }
250
251 VkSubmitInfo submitInfo;
252 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
253 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
254 submitInfo.pNext = protectedContext == Protected::kYes ? &protectedSubmitInfo : nullptr;
255 submitInfo.waitSemaphoreCount = waitCount;
256 submitInfo.pWaitSemaphores = waitSemaphores;
257 submitInfo.pWaitDstStageMask = waitStages;
258 submitInfo.commandBufferCount = commandBufferCount;
259 submitInfo.pCommandBuffers = commandBuffers;
260 submitInfo.signalSemaphoreCount = signalCount;
261 submitInfo.pSignalSemaphores = signalSemaphores;
262 VkResult result;
263 VULKAN_CALL_RESULT(sharedContext, result, QueueSubmit(queue, 1, &submitInfo, fence));
264 return result == VK_SUCCESS;
265 }
266
submit(VkQueue queue)267 bool VulkanCommandBuffer::submit(VkQueue queue) {
268 this->end();
269
270 auto device = fSharedContext->device();
271 VkResult err;
272
273 if (fSubmitFence == VK_NULL_HANDLE) {
274 VkFenceCreateInfo fenceInfo;
275 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
276 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
277 VULKAN_CALL_RESULT(
278 fSharedContext, err, CreateFence(device, &fenceInfo, nullptr, &fSubmitFence));
279 if (err) {
280 fSubmitFence = VK_NULL_HANDLE;
281 return false;
282 }
283 } else {
284 // This cannot return DEVICE_LOST so we assert we succeeded.
285 VULKAN_CALL_RESULT(fSharedContext, err, ResetFences(device, 1, &fSubmitFence));
286 SkASSERT(err == VK_SUCCESS);
287 }
288
289 SkASSERT(fSubmitFence != VK_NULL_HANDLE);
290 int waitCount = fWaitSemaphores.size();
291 TArray<VkPipelineStageFlags> vkWaitStages(waitCount);
292 for (int i = 0; i < waitCount; ++i) {
293 vkWaitStages.push_back(VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
294 VK_PIPELINE_STAGE_TRANSFER_BIT);
295 }
296
297 bool submitted = submit_to_queue(fSharedContext,
298 queue,
299 fSubmitFence,
300 waitCount,
301 fWaitSemaphores.data(),
302 vkWaitStages.data(),
303 /*commandBufferCount*/ 1,
304 &fPrimaryCommandBuffer,
305 fSignalSemaphores.size(),
306 fSignalSemaphores.data(),
307 fSharedContext->isProtected());
308 fWaitSemaphores.clear();
309 fSignalSemaphores.clear();
310 if (!submitted) {
311 // Destroy the fence or else we will try to wait forever for it to finish.
312 VULKAN_CALL(fSharedContext->interface(), DestroyFence(device, fSubmitFence, nullptr));
313 fSubmitFence = VK_NULL_HANDLE;
314 return false;
315 }
316 return true;
317 }
318
isFinished()319 bool VulkanCommandBuffer::isFinished() {
320 SkASSERT(!fActive);
321 if (VK_NULL_HANDLE == fSubmitFence) {
322 return true;
323 }
324
325 VkResult err;
326 VULKAN_CALL_RESULT_NOCHECK(fSharedContext->interface(), err,
327 GetFenceStatus(fSharedContext->device(), fSubmitFence));
328 switch (err) {
329 case VK_SUCCESS:
330 case VK_ERROR_DEVICE_LOST:
331 return true;
332
333 case VK_NOT_READY:
334 return false;
335
336 default:
337 SKGPU_LOG_F("Error calling vkGetFenceStatus. Error: %d", err);
338 SK_ABORT("Got an invalid fence status");
339 return false;
340 }
341 }
342
waitUntilFinished()343 void VulkanCommandBuffer::waitUntilFinished() {
344 if (fSubmitFence == VK_NULL_HANDLE) {
345 return;
346 }
347 VULKAN_CALL_ERRCHECK(fSharedContext,
348 WaitForFences(fSharedContext->device(),
349 1,
350 &fSubmitFence,
351 /*waitAll=*/true,
352 /*timeout=*/UINT64_MAX));
353 }
354
updateRtAdjustUniform(const SkRect & viewport)355 void VulkanCommandBuffer::updateRtAdjustUniform(const SkRect& viewport) {
356 SkASSERT(fActive && !fActiveRenderPass);
357
358 // Vulkan's framebuffer space has (0, 0) at the top left. This agrees with Skia's device coords.
359 // However, in NDC (-1, -1) is the bottom left. So we flip the origin here (assuming all
360 // surfaces we have are TopLeft origin). We then store the adjustment values as a uniform.
361 const float x = viewport.x() - fReplayTranslation.x();
362 const float y = viewport.y() - fReplayTranslation.y();
363 float invTwoW = 2.f / viewport.width();
364 float invTwoH = 2.f / viewport.height();
365 const float rtAdjust[4] = {invTwoW, invTwoH, -1.f - x * invTwoW, -1.f - y * invTwoH};
366
367 sk_sp<Buffer> intrinsicUniformBuffer = fResourceProvider->refIntrinsicConstantBuffer();
368 const VulkanBuffer* intrinsicVulkanBuffer =
369 static_cast<VulkanBuffer*>(intrinsicUniformBuffer.get());
370 SkASSERT(intrinsicVulkanBuffer);
371
372 fUniformBuffersToBind[VulkanGraphicsPipeline::kIntrinsicUniformBufferIndex] = {
373 {intrinsicUniformBuffer.get(), /*offset=*/0},
374 VulkanResourceProvider::kIntrinsicConstantSize};
375
376 this->updateBuffer(intrinsicVulkanBuffer,
377 &rtAdjust,
378 VulkanResourceProvider::kIntrinsicConstantSize);
379
380 // Ensure the buffer update is completed and made visible before reading
381 intrinsicVulkanBuffer->setBufferAccess(this, VK_ACCESS_UNIFORM_READ_BIT,
382 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT);
383 this->trackResource(std::move(intrinsicUniformBuffer));
384 }
385
onAddRenderPass(const RenderPassDesc & renderPassDesc,const Texture * colorTexture,const Texture * resolveTexture,const Texture * depthStencilTexture,SkRect viewport,const DrawPassList & drawPasses)386 bool VulkanCommandBuffer::onAddRenderPass(const RenderPassDesc& renderPassDesc,
387 const Texture* colorTexture,
388 const Texture* resolveTexture,
389 const Texture* depthStencilTexture,
390 SkRect viewport,
391 const DrawPassList& drawPasses) {
392 for (const auto& drawPass : drawPasses) {
393 // Our current implementation of setting texture image layouts does not allow layout changes
394 // once we have already begun a render pass, so prior to any other commands, set the layout
395 // of all sampled textures from the drawpass so they can be sampled from the shader.
396 const skia_private::TArray<sk_sp<TextureProxy>>& sampledTextureProxies =
397 drawPass->sampledTextures();
398 for (const sk_sp<TextureProxy>& textureProxy : sampledTextureProxies) {
399 VulkanTexture* vulkanTexture = const_cast<VulkanTexture*>(
400 static_cast<const VulkanTexture*>(
401 textureProxy->texture()));
402 vulkanTexture->setImageLayout(this,
403 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
404 VK_ACCESS_SHADER_READ_BIT,
405 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
406 false);
407 this->submitPipelineBarriers();
408 }
409 }
410
411 this->updateRtAdjustUniform(viewport);
412 this->setViewport(viewport);
413
414 if (!this->beginRenderPass(renderPassDesc, colorTexture, resolveTexture, depthStencilTexture)) {
415 return false;
416 }
417
418 for (const auto& drawPass : drawPasses) {
419 this->addDrawPass(drawPass.get());
420 }
421
422 this->endRenderPass();
423 return true;
424 }
425
updateLoadMSAAVertexBuffer()426 bool VulkanCommandBuffer::updateLoadMSAAVertexBuffer() {
427 const Buffer* vertexBuffer = fResourceProvider->loadMSAAVertexBuffer();
428 if (!vertexBuffer) {
429 return false;
430 }
431 const VulkanBuffer* vulkanVertexBuffer = static_cast<const VulkanBuffer*>(vertexBuffer);
432 SkASSERT(vulkanVertexBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
433
434 // Determine vertices in NDC. TODO: When only wanting to draw a portion of the resolve
435 // texture, these values will need to be dynamically determined. For now, simply span the
436 // range of NDC since we want to reference the entire resolve texture.
437 static constexpr float kVertices[8] = { 1.f, 1.f,
438 1.f, -1.f,
439 -1.f, 1.f,
440 -1.f, -1.f };
441 this->updateBuffer(vulkanVertexBuffer,
442 &kVertices,
443 VulkanResourceProvider::kLoadMSAAVertexBufferSize);
444
445 // Ensure the buffer update is completed and made visible before reading
446 vulkanVertexBuffer->setBufferAccess(this, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
447 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT);
448
449 return true;
450 }
451
updateAndBindLoadMSAAInputAttachment(const VulkanTexture & resolveTexture)452 bool VulkanCommandBuffer::updateAndBindLoadMSAAInputAttachment(const VulkanTexture& resolveTexture)
453 {
454 // Fetch a descriptor set that contains one input attachment
455 STArray<1, DescriptorData> inputDescriptors =
456 {VulkanGraphicsPipeline::kInputAttachmentDescriptor};
457 sk_sp<VulkanDescriptorSet> set = fResourceProvider->findOrCreateDescriptorSet(
458 SkSpan<DescriptorData>{&inputDescriptors.front(), inputDescriptors.size()});
459 if (!set) {
460 return false;
461 }
462
463 VkDescriptorImageInfo textureInfo;
464 memset(&textureInfo, 0, sizeof(VkDescriptorImageInfo));
465 textureInfo.sampler = VK_NULL_HANDLE;
466 textureInfo.imageView =
467 resolveTexture.getImageView(VulkanImageView::Usage::kAttachment)->imageView();
468 textureInfo.imageLayout = resolveTexture.currentLayout();
469
470 VkWriteDescriptorSet writeInfo;
471 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
472 writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
473 writeInfo.pNext = nullptr;
474 writeInfo.dstSet = *set->descriptorSet();
475 writeInfo.dstBinding = VulkanGraphicsPipeline::kInputAttachmentBindingIndex;
476 writeInfo.dstArrayElement = 0;
477 writeInfo.descriptorCount = 1;
478 writeInfo.descriptorType = DsTypeEnumToVkDs(DescriptorType::kInputAttachment);
479 writeInfo.pImageInfo = &textureInfo;
480 writeInfo.pBufferInfo = nullptr;
481 writeInfo.pTexelBufferView = nullptr;
482
483 VULKAN_CALL(fSharedContext->interface(),
484 UpdateDescriptorSets(fSharedContext->device(),
485 /*descriptorWriteCount=*/1,
486 &writeInfo,
487 /*descriptorCopyCount=*/0,
488 /*pDescriptorCopies=*/nullptr));
489
490 VULKAN_CALL(fSharedContext->interface(),
491 CmdBindDescriptorSets(fPrimaryCommandBuffer,
492 VK_PIPELINE_BIND_POINT_GRAPHICS,
493 fActiveGraphicsPipeline->layout(),
494 VulkanGraphicsPipeline::kInputAttachmentDescSetIndex,
495 /*setCount=*/1,
496 set->descriptorSet(),
497 /*dynamicOffsetCount=*/0,
498 /*dynamicOffsets=*/nullptr));
499
500 this->trackResource(std::move(set));
501 return true;
502 }
503
loadMSAAFromResolve(const RenderPassDesc & renderPassDesc,VulkanTexture & resolveTexture,SkISize dstDimensions)504 bool VulkanCommandBuffer::loadMSAAFromResolve(const RenderPassDesc& renderPassDesc,
505 VulkanTexture& resolveTexture,
506 SkISize dstDimensions) {
507 sk_sp<VulkanGraphicsPipeline> loadPipeline =
508 fResourceProvider->findOrCreateLoadMSAAPipeline(renderPassDesc);
509 if (!loadPipeline) {
510 SKGPU_LOG_E("Unable to create pipeline to load resolve texture into MSAA attachment");
511 return false;
512 }
513
514 this->bindGraphicsPipeline(loadPipeline.get());
515 // Make sure we do not attempt to bind uniform or texture/sampler descriptors because we do
516 // not use them for loading MSAA from resolve.
517 fBindUniformBuffers = false;
518 fBindTextureSamplers = false;
519
520 this->setScissor(/*left=*/0, /*top=*/0, dstDimensions.width(), dstDimensions.height());
521
522 if (!this->updateAndBindLoadMSAAInputAttachment(resolveTexture)) {
523 SKGPU_LOG_E("Unable to update and bind an input attachment descriptor for loading MSAA "
524 "from resolve");
525 return false;
526 }
527
528 SkASSERT(fResourceProvider->loadMSAAVertexBuffer());
529 this->bindVertexBuffers(fResourceProvider->loadMSAAVertexBuffer(),
530 /*vertexOffset=*/0,
531 /*instanceBuffer=*/nullptr,
532 /*instanceOffset=*/0);
533
534 this->draw(PrimitiveType::kTriangleStrip, /*baseVertex=*/0, /*vertexCount=*/4);
535 this->nextSubpass();
536
537 // If we loaded the resolve attachment, then we would have set the image layout to be
538 // VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL so that it could be used at the start as an
539 // input attachment. However, when we switched to the main subpass it will transition the
540 // layout internally to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL. Thus we need to update our
541 // tracking of the layout to match the new layout.
542 resolveTexture.updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
543
544 // After using a distinct descriptor set layout for loading MSAA from resolve, we will need to
545 // (re-)bind any descriptor sets.
546 fBindUniformBuffers = true;
547 fBindTextureSamplers = true;
548 return true;
549 }
550
551 namespace {
setup_texture_layouts(VulkanCommandBuffer * cmdBuf,VulkanTexture * colorTexture,VulkanTexture * resolveTexture,VulkanTexture * depthStencilTexture,bool loadMSAAFromResolve)552 void setup_texture_layouts(VulkanCommandBuffer* cmdBuf,
553 VulkanTexture* colorTexture,
554 VulkanTexture* resolveTexture,
555 VulkanTexture* depthStencilTexture,
556 bool loadMSAAFromResolve) {
557 if (colorTexture) {
558 colorTexture->setImageLayout(cmdBuf,
559 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
560 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
561 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
562 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
563 /*byRegion=*/false);
564 if (resolveTexture) {
565 if (loadMSAAFromResolve) {
566 // When loading MSAA from resolve, the texture is used in the first subpass as an
567 // input attachment. Subsequent subpass(es) need the resolve texture to provide read
568 // access to the color attachment (for use cases such as blending), so add access
569 // and pipeline stage flags for both usages.
570 resolveTexture->setImageLayout(cmdBuf,
571 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
572 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
573 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT,
574 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
575 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
576 /*byRegion=*/false);
577 } else {
578 resolveTexture->setImageLayout(cmdBuf,
579 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
580 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
581 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
582 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
583 /*byRegion=*/false);
584 }
585 }
586 }
587 if (depthStencilTexture) {
588 depthStencilTexture->setImageLayout(cmdBuf,
589 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
590 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
591 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
592 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
593 /*byRegion=*/false);
594 }
595 }
596
track_attachments(VulkanCommandBuffer * cmdBuf,VulkanTexture * colorTexture,VulkanTexture * resolveTexture,VulkanTexture * depthStencilTexture)597 void track_attachments(VulkanCommandBuffer* cmdBuf,
598 VulkanTexture* colorTexture,
599 VulkanTexture* resolveTexture,
600 VulkanTexture* depthStencilTexture) {
601 if (colorTexture) {
602 cmdBuf->trackResource(sk_ref_sp(colorTexture));
603 }
604 if (resolveTexture){
605 cmdBuf->trackResource(sk_ref_sp(resolveTexture));
606 }
607 if (depthStencilTexture) {
608 cmdBuf->trackResource(sk_ref_sp(depthStencilTexture));
609 }
610 }
611
gather_attachment_views(skia_private::TArray<VkImageView> & attachmentViews,VulkanTexture * colorTexture,VulkanTexture * resolveTexture,VulkanTexture * depthStencilTexture)612 void gather_attachment_views(skia_private::TArray<VkImageView>& attachmentViews,
613 VulkanTexture* colorTexture,
614 VulkanTexture* resolveTexture,
615 VulkanTexture* depthStencilTexture) {
616 if (colorTexture) {
617 VkImageView& colorAttachmentView = attachmentViews.push_back();
618 colorAttachmentView =
619 colorTexture->getImageView(VulkanImageView::Usage::kAttachment)->imageView();
620
621 if (resolveTexture) {
622 VkImageView& resolveView = attachmentViews.push_back();
623 resolveView =
624 resolveTexture->getImageView(VulkanImageView::Usage::kAttachment)->imageView();
625 }
626 }
627
628 if (depthStencilTexture) {
629 VkImageView& stencilView = attachmentViews.push_back();
630 stencilView =
631 depthStencilTexture->getImageView(VulkanImageView::Usage::kAttachment)->imageView();
632 }
633 }
634
gather_clear_values(STArray<VulkanRenderPass::kMaxExpectedAttachmentCount,VkClearValue> & clearValues,const RenderPassDesc & renderPassDesc,VulkanTexture * colorTexture,VulkanTexture * depthStencilTexture,int depthStencilAttachmentIdx)635 void gather_clear_values(
636 STArray<VulkanRenderPass::kMaxExpectedAttachmentCount, VkClearValue>& clearValues,
637 const RenderPassDesc& renderPassDesc,
638 VulkanTexture* colorTexture,
639 VulkanTexture* depthStencilTexture,
640 int depthStencilAttachmentIdx) {
641 clearValues.push_back_n(VulkanRenderPass::kMaxExpectedAttachmentCount);
642 if (colorTexture) {
643 VkClearValue& colorAttachmentClear =
644 clearValues.at(VulkanRenderPass::kColorAttachmentIdx);
645 memset(&colorAttachmentClear, 0, sizeof(VkClearValue));
646 colorAttachmentClear.color = {{renderPassDesc.fClearColor[0],
647 renderPassDesc.fClearColor[1],
648 renderPassDesc.fClearColor[2],
649 renderPassDesc.fClearColor[3]}};
650 }
651 // Resolve texture does not have a clear value
652 if (depthStencilTexture) {
653 VkClearValue& depthStencilAttachmentClear = clearValues.at(depthStencilAttachmentIdx);
654 memset(&depthStencilAttachmentClear, 0, sizeof(VkClearValue));
655 depthStencilAttachmentClear.depthStencil = {renderPassDesc.fClearDepth,
656 renderPassDesc.fClearStencil};
657 }
658 }
659
660 } // anonymous namespace
661
beginRenderPass(const RenderPassDesc & renderPassDesc,const Texture * colorTexture,const Texture * resolveTexture,const Texture * depthStencilTexture)662 bool VulkanCommandBuffer::beginRenderPass(const RenderPassDesc& renderPassDesc,
663 const Texture* colorTexture,
664 const Texture* resolveTexture,
665 const Texture* depthStencilTexture) {
666 // TODO: Check that Textures match RenderPassDesc
667 VulkanTexture* vulkanColorTexture =
668 const_cast<VulkanTexture*>(static_cast<const VulkanTexture*>(colorTexture));
669 VulkanTexture* vulkanResolveTexture =
670 const_cast<VulkanTexture*>(static_cast<const VulkanTexture*>(resolveTexture));
671 VulkanTexture* vulkanDepthStencilTexture =
672 const_cast<VulkanTexture*>(static_cast<const VulkanTexture*>(depthStencilTexture));
673
674 SkASSERT(resolveTexture ? renderPassDesc.fColorResolveAttachment.fStoreOp == StoreOp::kStore
675 : true);
676
677 // Determine if we need to load MSAA from resolve, and if so, make certain that key conditions
678 // are met before proceeding.
679 bool loadMSAAFromResolve = renderPassDesc.fColorResolveAttachment.fTextureInfo.isValid() &&
680 renderPassDesc.fColorResolveAttachment.fLoadOp == LoadOp::kLoad;
681 if (loadMSAAFromResolve && (!vulkanResolveTexture || !vulkanColorTexture ||
682 !vulkanResolveTexture->supportsInputAttachmentUsage())) {
683 SKGPU_LOG_E("Cannot begin render pass. In order to load MSAA from resolve, the color "
684 "attachment must have input attachment usage and both the color and resolve "
685 "attachments must be valid.");
686 return false;
687 }
688
689 track_attachments(this, vulkanColorTexture, vulkanResolveTexture, vulkanDepthStencilTexture);
690
691 // Before beginning a renderpass, set all textures to the appropriate image layout.
692 setup_texture_layouts(this,
693 vulkanColorTexture,
694 vulkanResolveTexture,
695 vulkanDepthStencilTexture,
696 loadMSAAFromResolve);
697
698 static constexpr int kMaxNumAttachments = 3;
699 // Gather attachment views neeeded for frame buffer creation.
700 skia_private::TArray<VkImageView> attachmentViews;
701 gather_attachment_views(
702 attachmentViews, vulkanColorTexture, vulkanResolveTexture, vulkanDepthStencilTexture);
703
704 // Gather clear values needed for RenderPassBeginInfo. Indexed by attachment number.
705 STArray<kMaxNumAttachments, VkClearValue> clearValues;
706 // The depth/stencil attachment can be at attachment index 1 or 2 depending on whether there is
707 // a resolve texture attachment for this renderpass.
708 int depthStencilAttachmentIndex = resolveTexture ? 2 : 1;
709 gather_clear_values(clearValues,
710 renderPassDesc,
711 vulkanColorTexture,
712 vulkanDepthStencilTexture,
713 depthStencilAttachmentIndex);
714
715 sk_sp<VulkanRenderPass> vulkanRenderPass =
716 fResourceProvider->findOrCreateRenderPass(renderPassDesc, /*compatibleOnly=*/false);
717 if (!vulkanRenderPass) {
718 SKGPU_LOG_W("Could not create Vulkan RenderPass");
719 return false;
720 }
721 this->submitPipelineBarriers();
722 this->trackResource(vulkanRenderPass);
723
724 int frameBufferWidth = 0;
725 int frameBufferHeight = 0;
726 // TODO: Get frame buffer render area from RenderPassDesc. Account for granularity if it wasn't
727 // already. For now, simply set the render area to be the entire frame buffer.
728 if (colorTexture) {
729 frameBufferWidth = colorTexture->dimensions().width();
730 frameBufferHeight = colorTexture->dimensions().height();
731 } else if (depthStencilTexture) {
732 frameBufferWidth = depthStencilTexture->dimensions().width();
733 frameBufferHeight = depthStencilTexture->dimensions().height();
734 }
735 sk_sp<VulkanFramebuffer> framebuffer = fResourceProvider->createFramebuffer(fSharedContext,
736 attachmentViews,
737 *vulkanRenderPass,
738 frameBufferWidth,
739 frameBufferHeight);
740 if (!framebuffer) {
741 SKGPU_LOG_W("Could not create Vulkan Framebuffer");
742 return false;
743 }
744
745 VkRenderPassBeginInfo beginInfo;
746 memset(&beginInfo, 0, sizeof(VkRenderPassBeginInfo));
747 beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
748 beginInfo.pNext = nullptr;
749 beginInfo.renderPass = vulkanRenderPass->renderPass();
750 beginInfo.framebuffer = framebuffer->framebuffer();
751 beginInfo.renderArea = {{ 0, 0 },
752 { (unsigned int) frameBufferWidth, (unsigned int) frameBufferHeight }};
753 beginInfo.clearValueCount = clearValues.size();
754 beginInfo.pClearValues = clearValues.begin();
755
756 // If loading MSAA from resolve, we need to update and bind a vertex buffer w/ NDC. This entails
757 // take care of some necessary preparations that must be performed while there is not an active
758 // renderpass.
759 if (loadMSAAFromResolve) {
760 // We manually load the contents of the resolve texture into the MSAA attachment as a draw,
761 // so the MSAA attachment's load op should be LoadOp::kDiscard.
762 SkASSERT(renderPassDesc.fColorAttachment.fLoadOp == LoadOp::kDiscard);
763 SkASSERT(!fActiveRenderPass);
764 SkASSERT(resolveTexture);
765
766 if (!this->updateLoadMSAAVertexBuffer()) {
767 SKGPU_LOG_E("Failed to update vertex buffer for loading MSAA from resolve");
768 return false;
769 }
770 }
771
772 // Submit pipeline barriers to ensure any image layout transitions are recorded prior to
773 // beginning the render pass.
774 this->submitPipelineBarriers();
775 // TODO: If we add support for secondary command buffers, dynamically determine subpass contents
776 VULKAN_CALL(fSharedContext->interface(),
777 CmdBeginRenderPass(fPrimaryCommandBuffer,
778 &beginInfo,
779 VK_SUBPASS_CONTENTS_INLINE));
780 fActiveRenderPass = true;
781
782 if (loadMSAAFromResolve && !this->loadMSAAFromResolve(renderPassDesc,
783 *vulkanResolveTexture,
784 vulkanColorTexture->dimensions())) {
785 SKGPU_LOG_E("Failed to load MSAA from resolve");
786 this->endRenderPass();
787 return false;
788 }
789
790 // Once we have an active render pass, the command buffer should hold on to a frame buffer ref.
791 this->trackResource(std::move(framebuffer));
792 return true;
793 }
794
endRenderPass()795 void VulkanCommandBuffer::endRenderPass() {
796 SkASSERT(fActive);
797 VULKAN_CALL(fSharedContext->interface(), CmdEndRenderPass(fPrimaryCommandBuffer));
798 fActiveRenderPass = false;
799 }
800
addDrawPass(const DrawPass * drawPass)801 void VulkanCommandBuffer::addDrawPass(const DrawPass* drawPass) {
802 drawPass->addResourceRefs(this);
803 for (auto [type, cmdPtr] : drawPass->commands()) {
804 switch (type) {
805 case DrawPassCommands::Type::kBindGraphicsPipeline: {
806 auto bgp = static_cast<DrawPassCommands::BindGraphicsPipeline*>(cmdPtr);
807 this->bindGraphicsPipeline(drawPass->getPipeline(bgp->fPipelineIndex));
808 break;
809 }
810 case DrawPassCommands::Type::kSetBlendConstants: {
811 auto sbc = static_cast<DrawPassCommands::SetBlendConstants*>(cmdPtr);
812 this->setBlendConstants(sbc->fBlendConstants);
813 break;
814 }
815 case DrawPassCommands::Type::kBindUniformBuffer: {
816 auto bub = static_cast<DrawPassCommands::BindUniformBuffer*>(cmdPtr);
817 this->recordBufferBindingInfo(bub->fInfo, bub->fSlot);
818 break;
819 }
820 case DrawPassCommands::Type::kBindDrawBuffers: {
821 auto bdb = static_cast<DrawPassCommands::BindDrawBuffers*>(cmdPtr);
822 this->bindDrawBuffers(
823 bdb->fVertices, bdb->fInstances, bdb->fIndices, bdb->fIndirect);
824 break;
825 }
826 case DrawPassCommands::Type::kBindTexturesAndSamplers: {
827 auto bts = static_cast<DrawPassCommands::BindTexturesAndSamplers*>(cmdPtr);
828 this->recordTextureAndSamplerDescSet(*drawPass, *bts);
829 break;
830 }
831 case DrawPassCommands::Type::kSetScissor: {
832 auto ss = static_cast<DrawPassCommands::SetScissor*>(cmdPtr);
833 const SkIRect& rect = ss->fScissor;
834 this->setScissor(rect.fLeft, rect.fTop, rect.width(), rect.height());
835 break;
836 }
837 case DrawPassCommands::Type::kDraw: {
838 auto draw = static_cast<DrawPassCommands::Draw*>(cmdPtr);
839 this->draw(draw->fType, draw->fBaseVertex, draw->fVertexCount);
840 break;
841 }
842 case DrawPassCommands::Type::kDrawIndexed: {
843 auto draw = static_cast<DrawPassCommands::DrawIndexed*>(cmdPtr);
844 this->drawIndexed(
845 draw->fType, draw->fBaseIndex, draw->fIndexCount, draw->fBaseVertex);
846 break;
847 }
848 case DrawPassCommands::Type::kDrawInstanced: {
849 auto draw = static_cast<DrawPassCommands::DrawInstanced*>(cmdPtr);
850 this->drawInstanced(draw->fType,
851 draw->fBaseVertex,
852 draw->fVertexCount,
853 draw->fBaseInstance,
854 draw->fInstanceCount);
855 break;
856 }
857 case DrawPassCommands::Type::kDrawIndexedInstanced: {
858 auto draw = static_cast<DrawPassCommands::DrawIndexedInstanced*>(cmdPtr);
859 this->drawIndexedInstanced(draw->fType,
860 draw->fBaseIndex,
861 draw->fIndexCount,
862 draw->fBaseVertex,
863 draw->fBaseInstance,
864 draw->fInstanceCount);
865 break;
866 }
867 case DrawPassCommands::Type::kDrawIndirect: {
868 auto draw = static_cast<DrawPassCommands::DrawIndirect*>(cmdPtr);
869 this->drawIndirect(draw->fType);
870 break;
871 }
872 case DrawPassCommands::Type::kDrawIndexedIndirect: {
873 auto draw = static_cast<DrawPassCommands::DrawIndexedIndirect*>(cmdPtr);
874 this->drawIndexedIndirect(draw->fType);
875 break;
876 }
877 }
878 }
879 }
880
bindGraphicsPipeline(const GraphicsPipeline * graphicsPipeline)881 void VulkanCommandBuffer::bindGraphicsPipeline(const GraphicsPipeline* graphicsPipeline) {
882 fActiveGraphicsPipeline = static_cast<const VulkanGraphicsPipeline*>(graphicsPipeline);
883 SkASSERT(fActiveRenderPass);
884 VULKAN_CALL(fSharedContext->interface(), CmdBindPipeline(fPrimaryCommandBuffer,
885 VK_PIPELINE_BIND_POINT_GRAPHICS,
886 fActiveGraphicsPipeline->pipeline()));
887 // TODO(b/293924877): Compare pipeline layouts. If 2 pipelines have the same pipeline layout,
888 // then descriptor sets do not need to be re-bound. For now, simply force a re-binding of
889 // descriptor sets with any new bindGraphicsPipeline DrawPassCommand.
890 fBindUniformBuffers = true;
891 }
892
setBlendConstants(float * blendConstants)893 void VulkanCommandBuffer::setBlendConstants(float* blendConstants) {
894 SkASSERT(fActive);
895 if (0 != memcmp(blendConstants, fCachedBlendConstant, 4 * sizeof(float))) {
896 VULKAN_CALL(fSharedContext->interface(),
897 CmdSetBlendConstants(fPrimaryCommandBuffer, blendConstants));
898 memcpy(fCachedBlendConstant, blendConstants, 4 * sizeof(float));
899 }
900 }
901
recordBufferBindingInfo(const BindUniformBufferInfo & info,UniformSlot slot)902 void VulkanCommandBuffer::recordBufferBindingInfo(const BindUniformBufferInfo& info,
903 UniformSlot slot) {
904 unsigned int bufferIndex = 0;
905 switch (slot) {
906 case UniformSlot::kRenderStep:
907 bufferIndex = VulkanGraphicsPipeline::kRenderStepUniformBufferIndex;
908 break;
909 case UniformSlot::kPaint:
910 bufferIndex = VulkanGraphicsPipeline::kPaintUniformBufferIndex;
911 break;
912 default:
913 SkASSERT(false);
914 }
915
916 fUniformBuffersToBind[bufferIndex] = info;
917 fBindUniformBuffers = true;
918 }
919
syncDescriptorSets()920 void VulkanCommandBuffer::syncDescriptorSets() {
921 if (fBindUniformBuffers) {
922 this->bindUniformBuffers();
923 // Changes to descriptor sets in lower slot numbers disrupt later set bindings. Currently,
924 // the descriptor set which houses uniform buffers is at a lower slot than the texture /
925 // sampler set, so rebinding uniform buffers necessitates re-binding any texture/samplers.
926 fBindTextureSamplers = true;
927 }
928 if (fBindTextureSamplers) {
929 this->bindTextureSamplers();
930 }
931 }
932
bindUniformBuffers()933 void VulkanCommandBuffer::bindUniformBuffers() {
934 fBindUniformBuffers = false;
935
936 // We always bind at least one uniform buffer descriptor for intrinsic uniforms, but can bind
937 // up to three (one for render step uniforms, one for paint uniforms).
938 STArray<VulkanGraphicsPipeline::kNumUniformBuffers, DescriptorData> descriptors;
939 descriptors.push_back(VulkanGraphicsPipeline::kIntrinsicUniformBufferDescriptor);
940 if (fActiveGraphicsPipeline->hasStepUniforms() &&
941 fUniformBuffersToBind[VulkanGraphicsPipeline::kRenderStepUniformBufferIndex].fBuffer) {
942 descriptors.push_back(VulkanGraphicsPipeline::kRenderStepUniformDescriptor);
943 }
944 if (fActiveGraphicsPipeline->hasFragmentUniforms() &&
945 fUniformBuffersToBind[VulkanGraphicsPipeline::kPaintUniformBufferIndex].fBuffer) {
946 descriptors.push_back(VulkanGraphicsPipeline::kPaintUniformDescriptor);
947 }
948
949 sk_sp<VulkanDescriptorSet> descSet = fResourceProvider->findOrCreateUniformBuffersDescriptorSet(
950 descriptors, fUniformBuffersToBind);
951 if (!descSet) {
952 SKGPU_LOG_E("Unable to find or create uniform descriptor set");
953 return;
954 }
955 skia_private::AutoSTMalloc<3, uint32_t> dynamicOffsets(descriptors.size());
956 for (int i = 0; i < descriptors.size(); i++) {
957 int descriptorBindingIndex = descriptors[i].fBindingIndex;
958 SkASSERT(static_cast<unsigned long>(descriptorBindingIndex) < fUniformBuffersToBind.size());
959 const auto& bindInfo = fUniformBuffersToBind[descriptorBindingIndex];
960 dynamicOffsets[i] = bindInfo.fOffset;
961 }
962
963 VULKAN_CALL(fSharedContext->interface(),
964 CmdBindDescriptorSets(fPrimaryCommandBuffer,
965 VK_PIPELINE_BIND_POINT_GRAPHICS,
966 fActiveGraphicsPipeline->layout(),
967 VulkanGraphicsPipeline::kUniformBufferDescSetIndex,
968 /*setCount=*/1,
969 descSet->descriptorSet(),
970 descriptors.size(),
971 dynamicOffsets.get()));
972 this->trackResource(std::move(descSet));
973 }
974
bindDrawBuffers(const BindBufferInfo & vertices,const BindBufferInfo & instances,const BindBufferInfo & indices,const BindBufferInfo & indirect)975 void VulkanCommandBuffer::bindDrawBuffers(const BindBufferInfo& vertices,
976 const BindBufferInfo& instances,
977 const BindBufferInfo& indices,
978 const BindBufferInfo& indirect) {
979 this->bindVertexBuffers(vertices.fBuffer,
980 vertices.fOffset,
981 instances.fBuffer,
982 instances.fOffset);
983 this->bindIndexBuffer(indices.fBuffer, indices.fOffset);
984 this->bindIndirectBuffer(indirect.fBuffer, indirect.fOffset);
985 }
986
bindVertexBuffers(const Buffer * vertexBuffer,size_t vertexOffset,const Buffer * instanceBuffer,size_t instanceOffset)987 void VulkanCommandBuffer::bindVertexBuffers(const Buffer* vertexBuffer,
988 size_t vertexOffset,
989 const Buffer* instanceBuffer,
990 size_t instanceOffset) {
991 this->bindInputBuffer(vertexBuffer, vertexOffset,
992 VulkanGraphicsPipeline::kVertexBufferIndex);
993 this->bindInputBuffer(instanceBuffer, instanceOffset,
994 VulkanGraphicsPipeline::kInstanceBufferIndex);
995 }
996
bindInputBuffer(const Buffer * buffer,VkDeviceSize offset,uint32_t binding)997 void VulkanCommandBuffer::bindInputBuffer(const Buffer* buffer, VkDeviceSize offset,
998 uint32_t binding) {
999 if (buffer) {
1000 VkBuffer vkBuffer = static_cast<const VulkanBuffer*>(buffer)->vkBuffer();
1001 SkASSERT(vkBuffer != VK_NULL_HANDLE);
1002 if (vkBuffer != fBoundInputBuffers[binding] ||
1003 offset != fBoundInputBufferOffsets[binding]) {
1004 VULKAN_CALL(fSharedContext->interface(),
1005 CmdBindVertexBuffers(fPrimaryCommandBuffer,
1006 binding,
1007 /*bindingCount=*/1,
1008 &vkBuffer,
1009 &offset));
1010 fBoundInputBuffers[binding] = vkBuffer;
1011 fBoundInputBufferOffsets[binding] = offset;
1012 this->trackResource(sk_ref_sp(buffer));
1013 }
1014 }
1015 }
1016
bindIndexBuffer(const Buffer * indexBuffer,size_t offset)1017 void VulkanCommandBuffer::bindIndexBuffer(const Buffer* indexBuffer, size_t offset) {
1018 if (indexBuffer) {
1019 VkBuffer vkBuffer = static_cast<const VulkanBuffer*>(indexBuffer)->vkBuffer();
1020 SkASSERT(vkBuffer != VK_NULL_HANDLE);
1021 if (vkBuffer != fBoundIndexBuffer || offset != fBoundIndexBufferOffset) {
1022 VULKAN_CALL(fSharedContext->interface(), CmdBindIndexBuffer(fPrimaryCommandBuffer,
1023 vkBuffer,
1024 offset,
1025 VK_INDEX_TYPE_UINT16));
1026 fBoundIndexBuffer = vkBuffer;
1027 fBoundIndexBufferOffset = offset;
1028 this->trackResource(sk_ref_sp(indexBuffer));
1029 }
1030 } else {
1031 fBoundIndexBuffer = VK_NULL_HANDLE;
1032 fBoundIndexBufferOffset = 0;
1033 }
1034 }
1035
bindIndirectBuffer(const Buffer * indirectBuffer,size_t offset)1036 void VulkanCommandBuffer::bindIndirectBuffer(const Buffer* indirectBuffer, size_t offset) {
1037 // Indirect buffers are not bound via the command buffer, but specified in the draw cmd.
1038 if (indirectBuffer) {
1039 fBoundIndirectBuffer = static_cast<const VulkanBuffer*>(indirectBuffer)->vkBuffer();
1040 fBoundIndirectBufferOffset = offset;
1041 this->trackResource(sk_ref_sp(indirectBuffer));
1042 } else {
1043 fBoundIndirectBuffer = VK_NULL_HANDLE;
1044 fBoundIndirectBufferOffset = 0;
1045 }
1046 }
1047
recordTextureAndSamplerDescSet(const DrawPass & drawPass,const DrawPassCommands::BindTexturesAndSamplers & command)1048 void VulkanCommandBuffer::recordTextureAndSamplerDescSet(
1049 const DrawPass& drawPass, const DrawPassCommands::BindTexturesAndSamplers& command) {
1050 if (command.fNumTexSamplers == 0) {
1051 fNumTextureSamplers = 0;
1052 fTextureSamplerDescSetToBind = VK_NULL_HANDLE;
1053 fBindTextureSamplers = false;
1054 return;
1055 }
1056 // Query resource provider to obtain a descriptor set for the texture/samplers
1057 TArray<DescriptorData> descriptors(command.fNumTexSamplers);
1058 for (int i = 0; i < command.fNumTexSamplers; i++) {
1059 descriptors.push_back({DescriptorType::kCombinedTextureSampler,
1060 /*count=*/1,
1061 /*bindingIdx=*/i,
1062 PipelineStageFlags::kFragmentShader});
1063 }
1064 sk_sp<VulkanDescriptorSet> set = fResourceProvider->findOrCreateDescriptorSet(
1065 SkSpan<DescriptorData>{&descriptors.front(), descriptors.size()});
1066
1067 if (!set) {
1068 SKGPU_LOG_E("Unable to find or create descriptor set");
1069 fNumTextureSamplers = 0;
1070 fTextureSamplerDescSetToBind = VK_NULL_HANDLE;
1071 fBindTextureSamplers = false;
1072 return;
1073 }
1074 // Populate the descriptor set with texture/sampler descriptors
1075 TArray<VkWriteDescriptorSet> writeDescriptorSets(command.fNumTexSamplers);
1076 TArray<VkDescriptorImageInfo> descriptorImageInfos(command.fNumTexSamplers);
1077 for (int i = 0; i < command.fNumTexSamplers; ++i) {
1078 auto texture = const_cast<VulkanTexture*>(static_cast<const VulkanTexture*>(
1079 drawPass.getTexture(command.fTextureIndices[i])));
1080 auto sampler = static_cast<const VulkanSampler*>(
1081 drawPass.getSampler(command.fSamplerIndices[i]));
1082 if (!texture || !sampler) {
1083 // TODO(b/294198324): Investigate the root cause for null texture or samplers on
1084 // Ubuntu QuadP400 GPU
1085 SKGPU_LOG_E("Texture and sampler must not be null");
1086 fNumTextureSamplers = 0;
1087 fTextureSamplerDescSetToBind = VK_NULL_HANDLE;
1088 fBindTextureSamplers = false;
1089 return;
1090 }
1091
1092 VkDescriptorImageInfo& textureInfo = descriptorImageInfos.push_back();
1093 memset(&textureInfo, 0, sizeof(VkDescriptorImageInfo));
1094 textureInfo.sampler = sampler->vkSampler();
1095 textureInfo.imageView =
1096 texture->getImageView(VulkanImageView::Usage::kShaderInput)->imageView();
1097 textureInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
1098
1099 VkWriteDescriptorSet& writeInfo = writeDescriptorSets.push_back();
1100 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
1101 writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1102 writeInfo.pNext = nullptr;
1103 writeInfo.dstSet = *set->descriptorSet();
1104 writeInfo.dstBinding = i;
1105 writeInfo.dstArrayElement = 0;
1106 writeInfo.descriptorCount = 1;
1107 writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1108 writeInfo.pImageInfo = &textureInfo;
1109 writeInfo.pBufferInfo = nullptr;
1110 writeInfo.pTexelBufferView = nullptr;
1111 }
1112
1113 VULKAN_CALL(fSharedContext->interface(), UpdateDescriptorSets(fSharedContext->device(),
1114 command.fNumTexSamplers,
1115 &writeDescriptorSets[0],
1116 /*descriptorCopyCount=*/0,
1117 /*pDescriptorCopies=*/nullptr));
1118
1119 // Store the updated descriptor set to be actually bound later on. This avoids binding and
1120 // potentially having to re-bind in cases where earlier descriptor sets change while going
1121 // through drawpass commands.
1122 fTextureSamplerDescSetToBind = *set->descriptorSet();
1123 fBindTextureSamplers = true;
1124 fNumTextureSamplers = command.fNumTexSamplers;
1125 this->trackResource(std::move(set));
1126 }
1127
bindTextureSamplers()1128 void VulkanCommandBuffer::bindTextureSamplers() {
1129 fBindTextureSamplers = false;
1130 if (fTextureSamplerDescSetToBind != VK_NULL_HANDLE &&
1131 fActiveGraphicsPipeline->numTextureSamplers() == fNumTextureSamplers) {
1132 VULKAN_CALL(fSharedContext->interface(),
1133 CmdBindDescriptorSets(fPrimaryCommandBuffer,
1134 VK_PIPELINE_BIND_POINT_GRAPHICS,
1135 fActiveGraphicsPipeline->layout(),
1136 VulkanGraphicsPipeline::kTextureBindDescSetIndex,
1137 /*setCount=*/1,
1138 &fTextureSamplerDescSetToBind,
1139 /*dynamicOffsetCount=*/0,
1140 /*dynamicOffsets=*/nullptr));
1141 }
1142 }
1143
setScissor(unsigned int left,unsigned int top,unsigned int width,unsigned int height)1144 void VulkanCommandBuffer::setScissor(unsigned int left, unsigned int top, unsigned int width,
1145 unsigned int height) {
1146 VkRect2D scissor = {
1147 {(int32_t)left, (int32_t)top},
1148 {width, height}
1149 };
1150 VULKAN_CALL(fSharedContext->interface(),
1151 CmdSetScissor(fPrimaryCommandBuffer,
1152 /*firstScissor=*/0,
1153 /*scissorCount=*/1,
1154 &scissor));
1155 }
1156
draw(PrimitiveType,unsigned int baseVertex,unsigned int vertexCount)1157 void VulkanCommandBuffer::draw(PrimitiveType,
1158 unsigned int baseVertex,
1159 unsigned int vertexCount) {
1160 SkASSERT(fActiveRenderPass);
1161 this->syncDescriptorSets();
1162 // TODO: set primitive type via dynamic state if available
1163 VULKAN_CALL(fSharedContext->interface(),
1164 CmdDraw(fPrimaryCommandBuffer,
1165 vertexCount,
1166 /*instanceCount=*/1,
1167 baseVertex,
1168 /*firstInstance=*/0));
1169 }
1170
drawIndexed(PrimitiveType,unsigned int baseIndex,unsigned int indexCount,unsigned int baseVertex)1171 void VulkanCommandBuffer::drawIndexed(PrimitiveType,
1172 unsigned int baseIndex,
1173 unsigned int indexCount,
1174 unsigned int baseVertex) {
1175 SkASSERT(fActiveRenderPass);
1176 this->syncDescriptorSets();
1177 // TODO: set primitive type via dynamic state if available
1178 VULKAN_CALL(fSharedContext->interface(),
1179 CmdDrawIndexed(fPrimaryCommandBuffer,
1180 indexCount,
1181 /*instanceCount=*/1,
1182 baseIndex,
1183 baseVertex,
1184 /*firstInstance=*/0));
1185 }
1186
drawInstanced(PrimitiveType,unsigned int baseVertex,unsigned int vertexCount,unsigned int baseInstance,unsigned int instanceCount)1187 void VulkanCommandBuffer::drawInstanced(PrimitiveType,
1188 unsigned int baseVertex,
1189 unsigned int vertexCount,
1190 unsigned int baseInstance,
1191 unsigned int instanceCount) {
1192 SkASSERT(fActiveRenderPass);
1193 this->syncDescriptorSets();
1194 // TODO: set primitive type via dynamic state if available
1195 VULKAN_CALL(fSharedContext->interface(),
1196 CmdDraw(fPrimaryCommandBuffer,
1197 vertexCount,
1198 instanceCount,
1199 baseVertex,
1200 baseInstance));
1201 }
1202
drawIndexedInstanced(PrimitiveType,unsigned int baseIndex,unsigned int indexCount,unsigned int baseVertex,unsigned int baseInstance,unsigned int instanceCount)1203 void VulkanCommandBuffer::drawIndexedInstanced(PrimitiveType,
1204 unsigned int baseIndex,
1205 unsigned int indexCount,
1206 unsigned int baseVertex,
1207 unsigned int baseInstance,
1208 unsigned int instanceCount) {
1209 SkASSERT(fActiveRenderPass);
1210 this->syncDescriptorSets();
1211 // TODO: set primitive type via dynamic state if available
1212 VULKAN_CALL(fSharedContext->interface(),
1213 CmdDrawIndexed(fPrimaryCommandBuffer,
1214 indexCount,
1215 instanceCount,
1216 baseIndex,
1217 baseVertex,
1218 baseInstance));
1219 }
1220
drawIndirect(PrimitiveType)1221 void VulkanCommandBuffer::drawIndirect(PrimitiveType) {
1222 SkASSERT(fActiveRenderPass);
1223 this->syncDescriptorSets();
1224 // TODO: set primitive type via dynamic state if available
1225 // Currently we can only support doing one indirect draw operation at a time,
1226 // so stride is irrelevant.
1227 VULKAN_CALL(fSharedContext->interface(),
1228 CmdDrawIndirect(fPrimaryCommandBuffer,
1229 fBoundIndirectBuffer,
1230 fBoundIndirectBufferOffset,
1231 /*drawCount=*/1,
1232 /*stride=*/0));
1233 }
1234
drawIndexedIndirect(PrimitiveType)1235 void VulkanCommandBuffer::drawIndexedIndirect(PrimitiveType) {
1236 SkASSERT(fActiveRenderPass);
1237 this->syncDescriptorSets();
1238 // TODO: set primitive type via dynamic state if available
1239 // Currently we can only support doing one indirect draw operation at a time,
1240 // so stride is irrelevant.
1241 VULKAN_CALL(fSharedContext->interface(),
1242 CmdDrawIndexedIndirect(fPrimaryCommandBuffer,
1243 fBoundIndirectBuffer,
1244 fBoundIndirectBufferOffset,
1245 /*drawCount=*/1,
1246 /*stride=*/0));
1247 }
1248
onAddComputePass(DispatchGroupSpan)1249 bool VulkanCommandBuffer::onAddComputePass(DispatchGroupSpan) { return false; }
1250
onCopyBufferToBuffer(const Buffer * srcBuffer,size_t srcOffset,const Buffer * dstBuffer,size_t dstOffset,size_t size)1251 bool VulkanCommandBuffer::onCopyBufferToBuffer(const Buffer* srcBuffer,
1252 size_t srcOffset,
1253 const Buffer* dstBuffer,
1254 size_t dstOffset,
1255 size_t size) {
1256 auto vkSrcBuffer = static_cast<const VulkanBuffer*>(srcBuffer);
1257 auto vkDstBuffer = static_cast<const VulkanBuffer*>(dstBuffer);
1258
1259 SkASSERT(vkSrcBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
1260 SkASSERT(vkDstBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_TRANSFER_DST_BIT);
1261
1262 VkBufferCopy region;
1263 memset(®ion, 0, sizeof(VkBufferCopy));
1264 region.srcOffset = srcOffset;
1265 region.dstOffset = dstOffset;
1266 region.size = size;
1267
1268 this->submitPipelineBarriers();
1269
1270 VULKAN_CALL(fSharedContext->interface(),
1271 CmdCopyBuffer(fPrimaryCommandBuffer,
1272 vkSrcBuffer->vkBuffer(),
1273 vkDstBuffer->vkBuffer(),
1274 /*regionCount=*/1,
1275 ®ion));
1276
1277 return true;
1278 }
1279
onCopyTextureToBuffer(const Texture * texture,SkIRect srcRect,const Buffer * buffer,size_t bufferOffset,size_t bufferRowBytes)1280 bool VulkanCommandBuffer::onCopyTextureToBuffer(const Texture* texture,
1281 SkIRect srcRect,
1282 const Buffer* buffer,
1283 size_t bufferOffset,
1284 size_t bufferRowBytes) {
1285 const VulkanTexture* srcTexture = static_cast<const VulkanTexture*>(texture);
1286 auto dstBuffer = static_cast<const VulkanBuffer*>(buffer);
1287 SkASSERT(dstBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_TRANSFER_DST_BIT);
1288
1289 // Obtain the VkFormat of the source texture so we can determine bytes per block.
1290 VulkanTextureInfo srcTextureInfo;
1291 texture->textureInfo().getVulkanTextureInfo(&srcTextureInfo);
1292 size_t bytesPerBlock = VkFormatBytesPerBlock(srcTextureInfo.fFormat);
1293
1294 // Set up copy region
1295 VkBufferImageCopy region;
1296 memset(®ion, 0, sizeof(VkBufferImageCopy));
1297 region.bufferOffset = bufferOffset;
1298 // Vulkan expects bufferRowLength in texels, not bytes.
1299 region.bufferRowLength = (uint32_t)(bufferRowBytes/bytesPerBlock);
1300 region.bufferImageHeight = 0; // Tightly packed
1301 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, /*mipLevel=*/0, 0, 1 };
1302 region.imageOffset = { srcRect.left(), srcRect.top(), /*z=*/0 };
1303 region.imageExtent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), /*depth=*/1 };
1304
1305 // Enable editing of the source texture so we can change its layout so it can be copied from.
1306 const_cast<VulkanTexture*>(srcTexture)->setImageLayout(this,
1307 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1308 VK_ACCESS_TRANSFER_READ_BIT,
1309 VK_PIPELINE_STAGE_TRANSFER_BIT,
1310 false);
1311 // Set current access mask for buffer
1312 const_cast<VulkanBuffer*>(dstBuffer)->setBufferAccess(this,
1313 VK_ACCESS_TRANSFER_WRITE_BIT,
1314 VK_PIPELINE_STAGE_TRANSFER_BIT);
1315
1316 this->submitPipelineBarriers();
1317
1318 VULKAN_CALL(fSharedContext->interface(),
1319 CmdCopyImageToBuffer(fPrimaryCommandBuffer,
1320 srcTexture->vkImage(),
1321 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1322 dstBuffer->vkBuffer(),
1323 /*regionCount=*/1,
1324 ®ion));
1325 return true;
1326 }
1327
onCopyBufferToTexture(const Buffer * buffer,const Texture * texture,const BufferTextureCopyData * copyData,int count)1328 bool VulkanCommandBuffer::onCopyBufferToTexture(const Buffer* buffer,
1329 const Texture* texture,
1330 const BufferTextureCopyData* copyData,
1331 int count) {
1332 auto srcBuffer = static_cast<const VulkanBuffer*>(buffer);
1333 SkASSERT(srcBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
1334 const VulkanTexture* dstTexture = static_cast<const VulkanTexture*>(texture);
1335
1336 // Obtain the VkFormat of the destination texture so we can determine bytes per block.
1337 VulkanTextureInfo dstTextureInfo;
1338 dstTexture->textureInfo().getVulkanTextureInfo(&dstTextureInfo);
1339 size_t bytesPerBlock = VkFormatBytesPerBlock(dstTextureInfo.fFormat);
1340 SkISize oneBlockDims = CompressedDimensions(dstTexture->textureInfo().compressionType(),
1341 {1, 1});
1342
1343 // Set up copy regions.
1344 TArray<VkBufferImageCopy> regions(count);
1345 for (int i = 0; i < count; ++i) {
1346 VkBufferImageCopy& region = regions.push_back();
1347 memset(®ion, 0, sizeof(VkBufferImageCopy));
1348 region.bufferOffset = copyData[i].fBufferOffset;
1349 // copyData provides row length in bytes, but Vulkan expects bufferRowLength in texels.
1350 // For compressed this is the number of logical pixels not the number of blocks.
1351 region.bufferRowLength =
1352 (uint32_t)((copyData[i].fBufferRowBytes/bytesPerBlock) * oneBlockDims.fWidth);
1353 region.bufferImageHeight = 0; // Tightly packed
1354 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, copyData[i].fMipLevel, 0, 1 };
1355 region.imageOffset = { copyData[i].fRect.left(),
1356 copyData[i].fRect.top(),
1357 /*z=*/0 };
1358 region.imageExtent = { (uint32_t)copyData[i].fRect.width(),
1359 (uint32_t)copyData[i].fRect.height(),
1360 /*depth=*/1 };
1361 }
1362
1363 // Enable editing of the destination texture so we can change its layout so it can be copied to.
1364 const_cast<VulkanTexture*>(dstTexture)->setImageLayout(this,
1365 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1366 VK_ACCESS_TRANSFER_WRITE_BIT,
1367 VK_PIPELINE_STAGE_TRANSFER_BIT,
1368 false);
1369
1370 this->submitPipelineBarriers();
1371
1372 VULKAN_CALL(fSharedContext->interface(),
1373 CmdCopyBufferToImage(fPrimaryCommandBuffer,
1374 srcBuffer->vkBuffer(),
1375 dstTexture->vkImage(),
1376 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1377 regions.size(),
1378 regions.begin()));
1379 return true;
1380 }
1381
onCopyTextureToTexture(const Texture * src,SkIRect srcRect,const Texture * dst,SkIPoint dstPoint,int mipLevel)1382 bool VulkanCommandBuffer::onCopyTextureToTexture(const Texture* src,
1383 SkIRect srcRect,
1384 const Texture* dst,
1385 SkIPoint dstPoint,
1386 int mipLevel) {
1387 const VulkanTexture* srcTexture = static_cast<const VulkanTexture*>(src);
1388 const VulkanTexture* dstTexture = static_cast<const VulkanTexture*>(dst);
1389
1390 VkImageCopy copyRegion;
1391 memset(©Region, 0, sizeof(VkImageCopy));
1392 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1393 copyRegion.srcOffset = { srcRect.fLeft, srcRect.fTop, 0 };
1394 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, (uint32_t)mipLevel, 0, 1 };
1395 copyRegion.dstOffset = { dstPoint.fX, dstPoint.fY, 0 };
1396 copyRegion.extent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1 };
1397
1398 // Enable editing of the src texture so we can change its layout so it can be copied from.
1399 const_cast<VulkanTexture*>(srcTexture)->setImageLayout(this,
1400 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1401 VK_ACCESS_TRANSFER_READ_BIT,
1402 VK_PIPELINE_STAGE_TRANSFER_BIT,
1403 false);
1404 // Enable editing of the destination texture so we can change its layout so it can be copied to.
1405 const_cast<VulkanTexture*>(dstTexture)->setImageLayout(this,
1406 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1407 VK_ACCESS_TRANSFER_WRITE_BIT,
1408 VK_PIPELINE_STAGE_TRANSFER_BIT,
1409 false);
1410
1411 this->submitPipelineBarriers();
1412
1413 VULKAN_CALL(fSharedContext->interface(),
1414 CmdCopyImage(fPrimaryCommandBuffer,
1415 srcTexture->vkImage(),
1416 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1417 dstTexture->vkImage(),
1418 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1419 /*regionCount=*/1,
1420 ©Region));
1421
1422 return true;
1423 }
1424
onSynchronizeBufferToCpu(const Buffer * buffer,bool * outDidResultInWork)1425 bool VulkanCommandBuffer::onSynchronizeBufferToCpu(const Buffer* buffer, bool* outDidResultInWork) {
1426 static_cast<const VulkanBuffer*>(buffer)->setBufferAccess(this,
1427 VK_ACCESS_HOST_READ_BIT,
1428 VK_PIPELINE_STAGE_HOST_BIT);
1429
1430 *outDidResultInWork = true;
1431 return true;
1432 }
1433
onClearBuffer(const Buffer *,size_t offset,size_t size)1434 bool VulkanCommandBuffer::onClearBuffer(const Buffer*, size_t offset, size_t size) {
1435 return false;
1436 }
1437
addBufferMemoryBarrier(const Resource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkBufferMemoryBarrier * barrier)1438 void VulkanCommandBuffer::addBufferMemoryBarrier(const Resource* resource,
1439 VkPipelineStageFlags srcStageMask,
1440 VkPipelineStageFlags dstStageMask,
1441 VkBufferMemoryBarrier* barrier) {
1442 SkASSERT(resource);
1443 this->pipelineBarrier(resource,
1444 srcStageMask,
1445 dstStageMask,
1446 /*byRegion=*/false,
1447 kBufferMemory_BarrierType,
1448 barrier);
1449 }
1450
addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkBufferMemoryBarrier * barrier)1451 void VulkanCommandBuffer::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
1452 VkPipelineStageFlags dstStageMask,
1453 VkBufferMemoryBarrier* barrier) {
1454 // We don't pass in a resource here to the command buffer. The command buffer only is using it
1455 // to hold a ref, but every place where we add a buffer memory barrier we are doing some other
1456 // command with the buffer on the command buffer. Thus those other commands will already cause
1457 // the command buffer to be holding a ref to the buffer.
1458 this->pipelineBarrier(/*resource=*/nullptr,
1459 srcStageMask,
1460 dstStageMask,
1461 /*byRegion=*/false,
1462 kBufferMemory_BarrierType,
1463 barrier);
1464 }
1465
addImageMemoryBarrier(const Resource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkImageMemoryBarrier * barrier)1466 void VulkanCommandBuffer::addImageMemoryBarrier(const Resource* resource,
1467 VkPipelineStageFlags srcStageMask,
1468 VkPipelineStageFlags dstStageMask,
1469 bool byRegion,
1470 VkImageMemoryBarrier* barrier) {
1471 SkASSERT(resource);
1472 this->pipelineBarrier(resource,
1473 srcStageMask,
1474 dstStageMask,
1475 byRegion,
1476 kImageMemory_BarrierType,
1477 barrier);
1478 }
1479
pipelineBarrier(const Resource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,BarrierType barrierType,void * barrier)1480 void VulkanCommandBuffer::pipelineBarrier(const Resource* resource,
1481 VkPipelineStageFlags srcStageMask,
1482 VkPipelineStageFlags dstStageMask,
1483 bool byRegion,
1484 BarrierType barrierType,
1485 void* barrier) {
1486 // TODO: Do we need to handle wrapped command buffers?
1487 // SkASSERT(!this->isWrapped());
1488 SkASSERT(fActive);
1489 #ifdef SK_DEBUG
1490 // For images we can have barriers inside of render passes but they require us to add more
1491 // support in subpasses which need self dependencies to have barriers inside them. Also, we can
1492 // never have buffer barriers inside of a render pass. For now we will just assert that we are
1493 // not in a render pass.
1494 bool isValidSubpassBarrier = false;
1495 if (barrierType == kImageMemory_BarrierType) {
1496 VkImageMemoryBarrier* imgBarrier = static_cast<VkImageMemoryBarrier*>(barrier);
1497 isValidSubpassBarrier = (imgBarrier->newLayout == imgBarrier->oldLayout) &&
1498 (imgBarrier->srcQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) &&
1499 (imgBarrier->dstQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) &&
1500 byRegion;
1501 }
1502 SkASSERT(!fActiveRenderPass || isValidSubpassBarrier);
1503 #endif
1504
1505 if (barrierType == kBufferMemory_BarrierType) {
1506 const VkBufferMemoryBarrier* barrierPtr = static_cast<VkBufferMemoryBarrier*>(barrier);
1507 fBufferBarriers.push_back(*barrierPtr);
1508 } else {
1509 SkASSERT(barrierType == kImageMemory_BarrierType);
1510 const VkImageMemoryBarrier* barrierPtr = static_cast<VkImageMemoryBarrier*>(barrier);
1511 // We need to check if we are adding a pipeline barrier that covers part of the same
1512 // subresource range as a barrier that is already in current batch. If it does, then we must
1513 // submit the first batch because the vulkan spec does not define a specific ordering for
1514 // barriers submitted in the same batch.
1515 // TODO: Look if we can gain anything by merging barriers together instead of submitting
1516 // the old ones.
1517 for (int i = 0; i < fImageBarriers.size(); ++i) {
1518 VkImageMemoryBarrier& currentBarrier = fImageBarriers[i];
1519 if (barrierPtr->image == currentBarrier.image) {
1520 const VkImageSubresourceRange newRange = barrierPtr->subresourceRange;
1521 const VkImageSubresourceRange oldRange = currentBarrier.subresourceRange;
1522 SkASSERT(newRange.aspectMask == oldRange.aspectMask);
1523 SkASSERT(newRange.baseArrayLayer == oldRange.baseArrayLayer);
1524 SkASSERT(newRange.layerCount == oldRange.layerCount);
1525 uint32_t newStart = newRange.baseMipLevel;
1526 uint32_t newEnd = newRange.baseMipLevel + newRange.levelCount - 1;
1527 uint32_t oldStart = oldRange.baseMipLevel;
1528 uint32_t oldEnd = oldRange.baseMipLevel + oldRange.levelCount - 1;
1529 if (std::max(newStart, oldStart) <= std::min(newEnd, oldEnd)) {
1530 this->submitPipelineBarriers();
1531 break;
1532 }
1533 }
1534 }
1535 fImageBarriers.push_back(*barrierPtr);
1536 }
1537 fBarriersByRegion |= byRegion;
1538 fSrcStageMask = fSrcStageMask | srcStageMask;
1539 fDstStageMask = fDstStageMask | dstStageMask;
1540
1541 if (resource) {
1542 this->trackResource(sk_ref_sp(resource));
1543 }
1544 if (fActiveRenderPass) {
1545 this->submitPipelineBarriers(true);
1546 }
1547 }
1548
submitPipelineBarriers(bool forSelfDependency)1549 void VulkanCommandBuffer::submitPipelineBarriers(bool forSelfDependency) {
1550 SkASSERT(fActive);
1551
1552 // TODO: Do we need to handle SecondaryCommandBuffers as well?
1553
1554 // Currently we never submit a pipeline barrier without at least one buffer or image barrier.
1555 if (fBufferBarriers.size() || fImageBarriers.size()) {
1556 // For images we can have barriers inside of render passes but they require us to add more
1557 // support in subpasses which need self dependencies to have barriers inside them. Also, we
1558 // can never have buffer barriers inside of a render pass. For now we will just assert that
1559 // we are not in a render pass.
1560 SkASSERT(!fActiveRenderPass || forSelfDependency);
1561 // TODO: Do we need to handle wrapped CommandBuffers?
1562 // SkASSERT(!this->isWrapped());
1563 SkASSERT(fSrcStageMask && fDstStageMask);
1564
1565 VkDependencyFlags dependencyFlags = fBarriersByRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0;
1566 VULKAN_CALL(fSharedContext->interface(),
1567 CmdPipelineBarrier(fPrimaryCommandBuffer, fSrcStageMask, fDstStageMask,
1568 dependencyFlags,
1569 /*memoryBarrierCount=*/0, /*pMemoryBarrier=*/nullptr,
1570 fBufferBarriers.size(), fBufferBarriers.begin(),
1571 fImageBarriers.size(), fImageBarriers.begin()));
1572 fBufferBarriers.clear();
1573 fImageBarriers.clear();
1574 fBarriersByRegion = false;
1575 fSrcStageMask = 0;
1576 fDstStageMask = 0;
1577 }
1578 SkASSERT(!fBufferBarriers.size());
1579 SkASSERT(!fImageBarriers.size());
1580 SkASSERT(!fBarriersByRegion);
1581 SkASSERT(!fSrcStageMask);
1582 SkASSERT(!fDstStageMask);
1583 }
1584
updateBuffer(const VulkanBuffer * buffer,const void * data,size_t dataSize,size_t dstOffset)1585 void VulkanCommandBuffer::updateBuffer(const VulkanBuffer* buffer,
1586 const void* data,
1587 size_t dataSize,
1588 size_t dstOffset) {
1589 // vkCmdUpdateBuffer can only be called outside of a render pass.
1590 SkASSERT(fActive && !fActiveRenderPass);
1591 if (!buffer || buffer->vkBuffer() == VK_NULL_HANDLE) {
1592 SKGPU_LOG_W("VulkanCommandBuffer::updateBuffer requires a valid VulkanBuffer pointer backed"
1593 "by a valid VkBuffer handle");
1594 return;
1595 }
1596
1597 // Per the spec, vkCmdUpdateBuffer is treated as a “transfer" operation for the purposes of
1598 // synchronization barriers. Ensure this write operation occurs after any previous read
1599 // operations and without clobbering any other write operations on the same memory in the cache.
1600 buffer->setBufferAccess(this, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
1601 this->submitPipelineBarriers();
1602
1603 VULKAN_CALL(fSharedContext->interface(), CmdUpdateBuffer(fPrimaryCommandBuffer,
1604 buffer->vkBuffer(),
1605 dstOffset,
1606 dataSize,
1607 data));
1608 }
1609
nextSubpass()1610 void VulkanCommandBuffer::nextSubpass() {
1611 // TODO: Use VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS if we add secondary cmd buffers
1612 VULKAN_CALL(fSharedContext->interface(),
1613 CmdNextSubpass(fPrimaryCommandBuffer, VK_SUBPASS_CONTENTS_INLINE));
1614 }
1615
setViewport(const SkRect & viewport)1616 void VulkanCommandBuffer::setViewport(const SkRect& viewport) {
1617 VkViewport vkViewport = {
1618 viewport.fLeft,
1619 viewport.fTop,
1620 viewport.width(),
1621 viewport.height(),
1622 0.0f, // minDepth
1623 1.0f, // maxDepth
1624 };
1625 VULKAN_CALL(fSharedContext->interface(),
1626 CmdSetViewport(fPrimaryCommandBuffer,
1627 /*firstViewport=*/0,
1628 /*viewportCount=*/1,
1629 &vkViewport));
1630 }
1631
1632 } // namespace skgpu::graphite
1633