1 /*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/graphite/vk/VulkanCommandBuffer.h"
9
10 #include "include/gpu/MutableTextureState.h"
11 #include "include/gpu/graphite/BackendSemaphore.h"
12 #include "include/gpu/graphite/vk/VulkanGraphiteTypes.h"
13 #include "include/gpu/vk/VulkanMutableTextureState.h"
14 #include "include/private/base/SkTArray.h"
15 #include "src/gpu/DataUtils.h"
16 #include "src/gpu/graphite/ContextUtils.h"
17 #include "src/gpu/graphite/DescriptorData.h"
18 #include "src/gpu/graphite/Log.h"
19 #include "src/gpu/graphite/RenderPassDesc.h"
20 #include "src/gpu/graphite/Surface_Graphite.h"
21 #include "src/gpu/graphite/TextureProxy.h"
22 #include "src/gpu/graphite/UniformManager.h"
23 #include "src/gpu/graphite/vk/VulkanBuffer.h"
24 #include "src/gpu/graphite/vk/VulkanCaps.h"
25 #include "src/gpu/graphite/vk/VulkanDescriptorSet.h"
26 #include "src/gpu/graphite/vk/VulkanFramebuffer.h"
27 #include "src/gpu/graphite/vk/VulkanGraphiteUtilsPriv.h"
28 #include "src/gpu/graphite/vk/VulkanRenderPass.h"
29 #include "src/gpu/graphite/vk/VulkanSampler.h"
30 #include "src/gpu/graphite/vk/VulkanSharedContext.h"
31 #include "src/gpu/graphite/vk/VulkanTexture.h"
32 #include "src/gpu/vk/VulkanUtilsPriv.h"
33
34 using namespace skia_private;
35
36 namespace skgpu::graphite {
37
38 class VulkanDescriptorSet;
39
Make(const VulkanSharedContext * sharedContext,VulkanResourceProvider * resourceProvider,Protected isProtected)40 std::unique_ptr<VulkanCommandBuffer> VulkanCommandBuffer::Make(
41 const VulkanSharedContext* sharedContext,
42 VulkanResourceProvider* resourceProvider,
43 Protected isProtected) {
44 // Create VkCommandPool
45 VkCommandPoolCreateFlags cmdPoolCreateFlags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
46 if (isProtected == Protected::kYes) {
47 cmdPoolCreateFlags |= VK_COMMAND_POOL_CREATE_PROTECTED_BIT;
48 }
49
50 const VkCommandPoolCreateInfo cmdPoolInfo = {
51 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
52 nullptr, // pNext
53 cmdPoolCreateFlags, // CmdPoolCreateFlags
54 sharedContext->queueIndex(), // queueFamilyIndex
55 };
56 VkResult result;
57 VkCommandPool pool;
58 VULKAN_CALL_RESULT(sharedContext,
59 result,
60 CreateCommandPool(sharedContext->device(), &cmdPoolInfo, nullptr, &pool));
61 if (result != VK_SUCCESS) {
62 return nullptr;
63 }
64
65 const VkCommandBufferAllocateInfo cmdInfo = {
66 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
67 nullptr, // pNext
68 pool, // commandPool
69 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
70 1 // bufferCount
71 };
72
73 VkCommandBuffer primaryCmdBuffer;
74 VULKAN_CALL_RESULT(
75 sharedContext,
76 result,
77 AllocateCommandBuffers(sharedContext->device(), &cmdInfo, &primaryCmdBuffer));
78 if (result != VK_SUCCESS) {
79 VULKAN_CALL(sharedContext->interface(),
80 DestroyCommandPool(sharedContext->device(), pool, nullptr));
81 return nullptr;
82 }
83
84 return std::unique_ptr<VulkanCommandBuffer>(new VulkanCommandBuffer(pool,
85 primaryCmdBuffer,
86 sharedContext,
87 resourceProvider,
88 isProtected));
89 }
90
VulkanCommandBuffer(VkCommandPool pool,VkCommandBuffer primaryCommandBuffer,const VulkanSharedContext * sharedContext,VulkanResourceProvider * resourceProvider,Protected isProtected)91 VulkanCommandBuffer::VulkanCommandBuffer(VkCommandPool pool,
92 VkCommandBuffer primaryCommandBuffer,
93 const VulkanSharedContext* sharedContext,
94 VulkanResourceProvider* resourceProvider,
95 Protected isProtected)
96 : CommandBuffer(isProtected)
97 , fPool(pool)
98 , fPrimaryCommandBuffer(primaryCommandBuffer)
99 , fSharedContext(sharedContext)
100 , fResourceProvider(resourceProvider) {
101 // When making a new command buffer, we automatically begin the command buffer
102 this->begin();
103 }
104
~VulkanCommandBuffer()105 VulkanCommandBuffer::~VulkanCommandBuffer() {
106 if (fActive) {
107 // Need to end command buffer before deleting it
108 VULKAN_CALL(fSharedContext->interface(), EndCommandBuffer(fPrimaryCommandBuffer));
109 fActive = false;
110 }
111
112 if (VK_NULL_HANDLE != fSubmitFence) {
113 VULKAN_CALL(fSharedContext->interface(),
114 DestroyFence(fSharedContext->device(), fSubmitFence, nullptr));
115 }
116 // This should delete any command buffers as well.
117 VULKAN_CALL(fSharedContext->interface(),
118 DestroyCommandPool(fSharedContext->device(), fPool, nullptr));
119 }
120
onResetCommandBuffer()121 void VulkanCommandBuffer::onResetCommandBuffer() {
122 SkASSERT(!fActive);
123 VULKAN_CALL_ERRCHECK(fSharedContext, ResetCommandPool(fSharedContext->device(), fPool, 0));
124 fActiveGraphicsPipeline = nullptr;
125 fBindUniformBuffers = true;
126 fBoundIndexBuffer = VK_NULL_HANDLE;
127 fBoundIndexBufferOffset = 0;
128 fBoundIndirectBuffer = VK_NULL_HANDLE;
129 fBoundIndirectBufferOffset = 0;
130 fTextureSamplerDescSetToBind = VK_NULL_HANDLE;
131 fNumTextureSamplers = 0;
132 fUniformBuffersToBind.fill({});
133 for (int i = 0; i < 4; ++i) {
134 fCachedBlendConstant[i] = -1.0;
135 }
136 for (auto& boundInputBuffer : fBoundInputBuffers) {
137 boundInputBuffer = VK_NULL_HANDLE;
138 }
139 for (auto& boundInputOffset : fBoundInputBufferOffsets) {
140 boundInputOffset = 0;
141 }
142 }
143
setNewCommandBufferResources()144 bool VulkanCommandBuffer::setNewCommandBufferResources() {
145 this->begin();
146 return true;
147 }
148
begin()149 void VulkanCommandBuffer::begin() {
150 SkASSERT(!fActive);
151 VkCommandBufferBeginInfo cmdBufferBeginInfo;
152 memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
153 cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
154 cmdBufferBeginInfo.pNext = nullptr;
155 cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
156 cmdBufferBeginInfo.pInheritanceInfo = nullptr;
157
158 VULKAN_CALL_ERRCHECK(fSharedContext,
159 BeginCommandBuffer(fPrimaryCommandBuffer, &cmdBufferBeginInfo));
160 fActive = true;
161 }
162
end()163 void VulkanCommandBuffer::end() {
164 SkASSERT(fActive);
165 SkASSERT(!fActiveRenderPass);
166
167 this->submitPipelineBarriers();
168
169 VULKAN_CALL_ERRCHECK(fSharedContext, EndCommandBuffer(fPrimaryCommandBuffer));
170
171 fActive = false;
172 }
173
addWaitSemaphores(size_t numWaitSemaphores,const BackendSemaphore * waitSemaphores)174 void VulkanCommandBuffer::addWaitSemaphores(size_t numWaitSemaphores,
175 const BackendSemaphore* waitSemaphores) {
176 if (!waitSemaphores) {
177 SkASSERT(numWaitSemaphores == 0);
178 return;
179 }
180
181 for (size_t i = 0; i < numWaitSemaphores; ++i) {
182 auto& semaphore = waitSemaphores[i];
183 if (semaphore.isValid() && semaphore.backend() == BackendApi::kVulkan) {
184 fWaitSemaphores.push_back(BackendSemaphores::GetVkSemaphore(semaphore));
185 }
186 }
187 }
188
addSignalSemaphores(size_t numSignalSemaphores,const BackendSemaphore * signalSemaphores)189 void VulkanCommandBuffer::addSignalSemaphores(size_t numSignalSemaphores,
190 const BackendSemaphore* signalSemaphores) {
191 if (!signalSemaphores) {
192 SkASSERT(numSignalSemaphores == 0);
193 return;
194 }
195
196 for (size_t i = 0; i < numSignalSemaphores; ++i) {
197 auto& semaphore = signalSemaphores[i];
198 if (semaphore.isValid() && semaphore.backend() == BackendApi::kVulkan) {
199 fSignalSemaphores.push_back(BackendSemaphores::GetVkSemaphore(semaphore));
200 }
201 }
202 }
203
prepareSurfaceForStateUpdate(SkSurface * targetSurface,const MutableTextureState * newState)204 void VulkanCommandBuffer::prepareSurfaceForStateUpdate(SkSurface* targetSurface,
205 const MutableTextureState* newState) {
206 TextureProxy* textureProxy = static_cast<Surface*>(targetSurface)->backingTextureProxy();
207 VulkanTexture* texture = static_cast<VulkanTexture*>(textureProxy->texture());
208
209 // Even though internally we use this helper for getting src access flags and stages they
210 // can also be used for general dst flags since we don't know exactly what the client
211 // plans on using the image for.
212 VkImageLayout newLayout = skgpu::MutableTextureStates::GetVkImageLayout(newState);
213 if (newLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
214 newLayout = texture->currentLayout();
215 }
216 VkPipelineStageFlags dstStage = VulkanTexture::LayoutToPipelineSrcStageFlags(newLayout);
217 VkAccessFlags dstAccess = VulkanTexture::LayoutToSrcAccessMask(newLayout);
218
219 uint32_t currentQueueFamilyIndex = texture->currentQueueFamilyIndex();
220 uint32_t newQueueFamilyIndex = skgpu::MutableTextureStates::GetVkQueueFamilyIndex(newState);
221 auto isSpecialQueue = [](uint32_t queueFamilyIndex) {
222 return queueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
223 queueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT;
224 };
225 if (isSpecialQueue(currentQueueFamilyIndex) && isSpecialQueue(newQueueFamilyIndex)) {
226 // It is illegal to have both the new and old queue be special queue families (i.e. external
227 // or foreign).
228 return;
229 }
230
231 this->trackCommandBufferResource(sk_ref_sp(texture));
232
233 texture->setImageLayoutAndQueueIndex(this,
234 newLayout,
235 dstAccess,
236 dstStage,
237 false,
238 newQueueFamilyIndex);
239 }
240
submit_to_queue(const VulkanSharedContext * sharedContext,VkQueue queue,VkFence fence,uint32_t waitCount,const VkSemaphore * waitSemaphores,const VkPipelineStageFlags * waitStages,uint32_t commandBufferCount,const VkCommandBuffer * commandBuffers,uint32_t signalCount,const VkSemaphore * signalSemaphores,Protected protectedContext)241 static VkResult submit_to_queue(const VulkanSharedContext* sharedContext,
242 VkQueue queue,
243 VkFence fence,
244 uint32_t waitCount,
245 const VkSemaphore* waitSemaphores,
246 const VkPipelineStageFlags* waitStages,
247 uint32_t commandBufferCount,
248 const VkCommandBuffer* commandBuffers,
249 uint32_t signalCount,
250 const VkSemaphore* signalSemaphores,
251 Protected protectedContext) {
252 VkProtectedSubmitInfo protectedSubmitInfo;
253 if (protectedContext == Protected::kYes) {
254 memset(&protectedSubmitInfo, 0, sizeof(VkProtectedSubmitInfo));
255 protectedSubmitInfo.sType = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO;
256 protectedSubmitInfo.pNext = nullptr;
257 protectedSubmitInfo.protectedSubmit = VK_TRUE;
258 }
259
260 VkSubmitInfo submitInfo;
261 memset(&submitInfo, 0, sizeof(VkSubmitInfo));
262 submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
263 submitInfo.pNext = protectedContext == Protected::kYes ? &protectedSubmitInfo : nullptr;
264 submitInfo.waitSemaphoreCount = waitCount;
265 submitInfo.pWaitSemaphores = waitSemaphores;
266 submitInfo.pWaitDstStageMask = waitStages;
267 submitInfo.commandBufferCount = commandBufferCount;
268 submitInfo.pCommandBuffers = commandBuffers;
269 submitInfo.signalSemaphoreCount = signalCount;
270 submitInfo.pSignalSemaphores = signalSemaphores;
271 VkResult result;
272 VULKAN_CALL_RESULT(sharedContext, result, QueueSubmit(queue, 1, &submitInfo, fence));
273 return result;
274 }
275
submit(VkQueue queue)276 bool VulkanCommandBuffer::submit(VkQueue queue) {
277 this->end();
278
279 auto device = fSharedContext->device();
280 VkResult err;
281
282 if (fSubmitFence == VK_NULL_HANDLE) {
283 VkFenceCreateInfo fenceInfo;
284 memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
285 fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
286 VULKAN_CALL_RESULT(
287 fSharedContext, err, CreateFence(device, &fenceInfo, nullptr, &fSubmitFence));
288 if (err) {
289 fSubmitFence = VK_NULL_HANDLE;
290 return false;
291 }
292 } else {
293 // This cannot return DEVICE_LOST so we assert we succeeded.
294 VULKAN_CALL_RESULT(fSharedContext, err, ResetFences(device, 1, &fSubmitFence));
295 SkASSERT(err == VK_SUCCESS);
296 }
297
298 SkASSERT(fSubmitFence != VK_NULL_HANDLE);
299 int waitCount = fWaitSemaphores.size();
300 TArray<VkPipelineStageFlags> vkWaitStages(waitCount);
301 for (int i = 0; i < waitCount; ++i) {
302 vkWaitStages.push_back(VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
303 VK_PIPELINE_STAGE_TRANSFER_BIT);
304 }
305
306 VkResult submitResult = submit_to_queue(fSharedContext,
307 queue,
308 fSubmitFence,
309 waitCount,
310 fWaitSemaphores.data(),
311 vkWaitStages.data(),
312 /*commandBufferCount*/ 1,
313 &fPrimaryCommandBuffer,
314 fSignalSemaphores.size(),
315 fSignalSemaphores.data(),
316 this->isProtected());
317 fWaitSemaphores.clear();
318 fSignalSemaphores.clear();
319 if (submitResult != VK_SUCCESS) {
320 // If we failed to submit because of a device lost, we still need to wait for the fence to
321 // signal before deleting. However, there is an ARM bug (b/359822580) where the driver early
322 // outs on the fence wait if in a device lost state and thus we can't wait on it. Instead,
323 // we just wait on the queue to finish. We're already in a state that's going to cause us to
324 // restart the whole device, so waiting on the queue shouldn't have any performance impact.
325 if (submitResult == VK_ERROR_DEVICE_LOST) {
326 VULKAN_CALL(fSharedContext->interface(), QueueWaitIdle(queue));
327 } else {
328 SkASSERT(submitResult == VK_ERROR_OUT_OF_HOST_MEMORY ||
329 submitResult == VK_ERROR_OUT_OF_DEVICE_MEMORY);
330 }
331
332 VULKAN_CALL(fSharedContext->interface(), DestroyFence(device, fSubmitFence, nullptr));
333 fSubmitFence = VK_NULL_HANDLE;
334 return false;
335 }
336 return true;
337 }
338
isFinished()339 bool VulkanCommandBuffer::isFinished() {
340 SkASSERT(!fActive);
341 if (VK_NULL_HANDLE == fSubmitFence) {
342 return true;
343 }
344
345 VkResult err;
346 VULKAN_CALL_RESULT_NOCHECK(fSharedContext->interface(), err,
347 GetFenceStatus(fSharedContext->device(), fSubmitFence));
348 switch (err) {
349 case VK_SUCCESS:
350 case VK_ERROR_DEVICE_LOST:
351 return true;
352
353 case VK_NOT_READY:
354 return false;
355
356 default:
357 SKGPU_LOG_F("Error calling vkGetFenceStatus. Error: %d", err);
358 SK_ABORT("Got an invalid fence status");
359 return false;
360 }
361 }
362
waitUntilFinished()363 void VulkanCommandBuffer::waitUntilFinished() {
364 if (fSubmitFence == VK_NULL_HANDLE) {
365 return;
366 }
367 VULKAN_CALL_ERRCHECK(fSharedContext,
368 WaitForFences(fSharedContext->device(),
369 1,
370 &fSubmitFence,
371 /*waitAll=*/true,
372 /*timeout=*/UINT64_MAX));
373 }
374
pushConstants(const PushConstantInfo & pushConstantInfo,VkPipelineLayout compatibleLayout)375 void VulkanCommandBuffer::pushConstants(const PushConstantInfo& pushConstantInfo,
376 VkPipelineLayout compatibleLayout) {
377 // size must be within limits. Vulkan spec dictates each device supports at least 128 bytes
378 SkASSERT(pushConstantInfo.fSize < 128);
379 // offset and size must be a multiple of 4
380 SkASSERT(!SkToBool(pushConstantInfo.fOffset & 0x3));
381 SkASSERT(!SkToBool(pushConstantInfo.fSize & 0x3));
382
383 VULKAN_CALL(fSharedContext->interface(),
384 CmdPushConstants(fPrimaryCommandBuffer,
385 compatibleLayout,
386 pushConstantInfo.fShaderStageFlagBits,
387 pushConstantInfo.fOffset,
388 pushConstantInfo.fSize,
389 pushConstantInfo.fValues));
390 }
391
onAddRenderPass(const RenderPassDesc & renderPassDesc,SkIRect renderPassBounds,const Texture * colorTexture,const Texture * resolveTexture,const Texture * depthStencilTexture,SkIRect viewport,const DrawPassList & drawPasses)392 bool VulkanCommandBuffer::onAddRenderPass(const RenderPassDesc& renderPassDesc,
393 SkIRect renderPassBounds,
394 const Texture* colorTexture,
395 const Texture* resolveTexture,
396 const Texture* depthStencilTexture,
397 SkIRect viewport,
398 const DrawPassList& drawPasses) {
399 for (const auto& drawPass : drawPasses) {
400 // Our current implementation of setting texture image layouts does not allow layout changes
401 // once we have already begun a render pass, so prior to any other commands, set the layout
402 // of all sampled textures from the drawpass so they can be sampled from the shader.
403 const skia_private::TArray<sk_sp<TextureProxy>>& sampledTextureProxies =
404 drawPass->sampledTextures();
405 for (const sk_sp<TextureProxy>& textureProxy : sampledTextureProxies) {
406 VulkanTexture* vulkanTexture = const_cast<VulkanTexture*>(
407 static_cast<const VulkanTexture*>(
408 textureProxy->texture()));
409 vulkanTexture->setImageLayout(this,
410 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
411 VK_ACCESS_SHADER_READ_BIT,
412 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
413 false);
414 }
415 }
416 if (fDstCopy.first) {
417 VulkanTexture* vulkanTexture =
418 const_cast<VulkanTexture*>(static_cast<const VulkanTexture*>(fDstCopy.first));
419 vulkanTexture->setImageLayout(this,
420 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
421 VK_ACCESS_SHADER_READ_BIT,
422 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
423 false);
424 }
425
426 this->setViewport(viewport);
427
428 if (!this->beginRenderPass(renderPassDesc,
429 renderPassBounds,
430 colorTexture,
431 resolveTexture,
432 depthStencilTexture)) {
433 return false;
434 }
435
436 // After loading msaa from resolve if needed, update intrinsic push constant values. Neither the
437 // dst copy bounds nor the rtAdjust components of the intrinsic constants change throughout the
438 // course of a RenderPass, so we can simply calculate & update the push constants once per RP.
439 {
440 // TODO(b/374997389): Somehow convey & enforce Layout::kStd430 for push constants.
441 UniformManager intrinsicValues{Layout::kStd140};
442 CollectIntrinsicUniforms(
443 fSharedContext->caps(), viewport, fDstCopyBounds, &intrinsicValues);
444 SkSpan<const char> bytes = intrinsicValues.finish();
445 SkASSERT(bytes.size_bytes() == VulkanResourceProvider::kIntrinsicConstantSize);
446
447 PushConstantInfo pushConstantInfo;
448 pushConstantInfo.fOffset = 0;
449 pushConstantInfo.fSize = VulkanResourceProvider::kIntrinsicConstantSize;
450 pushConstantInfo.fShaderStageFlagBits =
451 VulkanResourceProvider::kIntrinsicConstantStageFlags;
452 pushConstantInfo.fValues = bytes.data();
453
454 // Use the mock pipeline layout (which has compatible push constant parameters with real
455 // pipeline layouts) to update push constants even if we do not have a pipeline bound yet.
456 this->pushConstants(pushConstantInfo, fResourceProvider->mockPushConstantPipelineLayout());
457 }
458
459 for (const auto& drawPass : drawPasses) {
460 this->addDrawPass(drawPass.get());
461 }
462
463 this->endRenderPass();
464 return true;
465 }
466
updateAndBindLoadMSAAInputAttachment(const VulkanTexture & resolveTexture)467 bool VulkanCommandBuffer::updateAndBindLoadMSAAInputAttachment(const VulkanTexture& resolveTexture)
468 {
469 // Fetch a descriptor set that contains one input attachment
470 STArray<1, DescriptorData> inputDescriptors =
471 {VulkanGraphicsPipeline::kInputAttachmentDescriptor};
472 sk_sp<VulkanDescriptorSet> set = fResourceProvider->findOrCreateDescriptorSet(
473 SkSpan<DescriptorData>{&inputDescriptors.front(), inputDescriptors.size()});
474 if (!set) {
475 return false;
476 }
477
478 VkDescriptorImageInfo textureInfo;
479 memset(&textureInfo, 0, sizeof(VkDescriptorImageInfo));
480 textureInfo.sampler = VK_NULL_HANDLE;
481 textureInfo.imageView =
482 resolveTexture.getImageView(VulkanImageView::Usage::kAttachment)->imageView();
483 textureInfo.imageLayout = resolveTexture.currentLayout();
484
485 VkWriteDescriptorSet writeInfo;
486 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
487 writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
488 writeInfo.pNext = nullptr;
489 writeInfo.dstSet = *set->descriptorSet();
490 writeInfo.dstBinding = 0;
491 writeInfo.dstArrayElement = 0;
492 writeInfo.descriptorCount = 1;
493 writeInfo.descriptorType = DsTypeEnumToVkDs(DescriptorType::kInputAttachment);
494 writeInfo.pImageInfo = &textureInfo;
495 writeInfo.pBufferInfo = nullptr;
496 writeInfo.pTexelBufferView = nullptr;
497
498 VULKAN_CALL(fSharedContext->interface(),
499 UpdateDescriptorSets(fSharedContext->device(),
500 /*descriptorWriteCount=*/1,
501 &writeInfo,
502 /*descriptorCopyCount=*/0,
503 /*pDescriptorCopies=*/nullptr));
504
505 VULKAN_CALL(fSharedContext->interface(),
506 CmdBindDescriptorSets(fPrimaryCommandBuffer,
507 VK_PIPELINE_BIND_POINT_GRAPHICS,
508 fActiveGraphicsPipeline->layout(),
509 VulkanGraphicsPipeline::kInputAttachmentDescSetIndex,
510 /*setCount=*/1,
511 set->descriptorSet(),
512 /*dynamicOffsetCount=*/0,
513 /*dynamicOffsets=*/nullptr));
514
515 this->trackResource(std::move(set));
516 return true;
517 }
518
loadMSAAFromResolve(const RenderPassDesc & renderPassDesc,VulkanTexture & resolveTexture,SkISize dstDimensions,const SkIRect nativeDrawBounds)519 bool VulkanCommandBuffer::loadMSAAFromResolve(const RenderPassDesc& renderPassDesc,
520 VulkanTexture& resolveTexture,
521 SkISize dstDimensions,
522 const SkIRect nativeDrawBounds) {
523 sk_sp<VulkanGraphicsPipeline> loadPipeline =
524 fResourceProvider->findOrCreateLoadMSAAPipeline(renderPassDesc);
525 if (!loadPipeline) {
526 SKGPU_LOG_E("Unable to create pipeline to load resolve texture into MSAA attachment");
527 return false;
528 }
529
530 // Update and bind uniform descriptor set
531 int w = nativeDrawBounds.width();
532 int h = nativeDrawBounds.height();
533
534 // dst rect edges in NDC (-1 to 1)
535 int dw = dstDimensions.width();
536 int dh = dstDimensions.height();
537 float dx0 = 2.f * nativeDrawBounds.fLeft / dw - 1.f;
538 float dx1 = 2.f * (nativeDrawBounds.fLeft + w) / dw - 1.f;
539 float dy0 = 2.f * nativeDrawBounds.fTop / dh - 1.f;
540 float dy1 = 2.f * (nativeDrawBounds.fTop + h) / dh - 1.f;
541 float uniData[] = {dx1 - dx0, dy1 - dy0, dx0, dy0}; // posXform
542 SkASSERT(sizeof(uniData) == VulkanResourceProvider::kLoadMSAAPushConstantSize);
543
544 this->bindGraphicsPipeline(loadPipeline.get());
545
546 PushConstantInfo loadMsaaPushConstantInfo;
547 loadMsaaPushConstantInfo.fOffset = 0;
548 loadMsaaPushConstantInfo.fSize = VulkanResourceProvider::kLoadMSAAPushConstantSize;
549 loadMsaaPushConstantInfo.fShaderStageFlagBits =
550 VulkanResourceProvider::kLoadMSAAPushConstantStageFlags;
551 loadMsaaPushConstantInfo.fValues = uniData;
552 this->pushConstants(loadMsaaPushConstantInfo, loadPipeline->layout());
553
554 // Make sure we do not attempt to bind uniform or texture/sampler descriptors because we do
555 // not use them for loading MSAA from resolve.
556 fBindUniformBuffers = false;
557 fBindTextureSamplers = false;
558
559 this->setScissor(SkIRect::MakeXYWH(0, 0, dstDimensions.width(), dstDimensions.height()));
560
561 if (!this->updateAndBindLoadMSAAInputAttachment(resolveTexture)) {
562 SKGPU_LOG_E("Unable to update and bind an input attachment descriptor for loading MSAA "
563 "from resolve");
564 return false;
565 }
566
567 this->draw(PrimitiveType::kTriangleStrip, /*baseVertex=*/0, /*vertexCount=*/4);
568 this->nextSubpass();
569
570 // If we loaded the resolve attachment, then we would have set the image layout to be
571 // VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL so that it could be used at the start as an
572 // input attachment. However, when we switched to the main subpass it will transition the
573 // layout internally to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL. Thus we need to update our
574 // tracking of the layout to match the new layout.
575 resolveTexture.updateImageLayout(VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
576
577 // After using a distinct descriptor set layout for loading MSAA from resolve, we will need to
578 // (re-)bind any descriptor sets.
579 fBindUniformBuffers = true;
580 fBindTextureSamplers = true;
581 return true;
582 }
583
584 namespace {
setup_texture_layouts(VulkanCommandBuffer * cmdBuf,VulkanTexture * colorTexture,VulkanTexture * resolveTexture,VulkanTexture * depthStencilTexture,bool loadMSAAFromResolve)585 void setup_texture_layouts(VulkanCommandBuffer* cmdBuf,
586 VulkanTexture* colorTexture,
587 VulkanTexture* resolveTexture,
588 VulkanTexture* depthStencilTexture,
589 bool loadMSAAFromResolve) {
590 if (colorTexture) {
591 colorTexture->setImageLayout(cmdBuf,
592 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
593 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
594 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
595 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
596 /*byRegion=*/false);
597 if (resolveTexture) {
598 if (loadMSAAFromResolve) {
599 // When loading MSAA from resolve, the texture is used in the first subpass as an
600 // input attachment. Subsequent subpass(es) need the resolve texture to provide read
601 // access to the color attachment (for use cases such as blending), so add access
602 // and pipeline stage flags for both usages.
603 resolveTexture->setImageLayout(cmdBuf,
604 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
605 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
606 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT,
607 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
608 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
609 /*byRegion=*/false);
610 } else {
611 resolveTexture->setImageLayout(cmdBuf,
612 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
613 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
614 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
615 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
616 /*byRegion=*/false);
617 }
618 }
619 }
620 if (depthStencilTexture) {
621 depthStencilTexture->setImageLayout(cmdBuf,
622 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
623 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
624 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
625 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
626 /*byRegion=*/false);
627 }
628 }
629
gather_attachment_views(skia_private::TArray<VkImageView> & attachmentViews,VulkanTexture * colorTexture,VulkanTexture * resolveTexture,VulkanTexture * depthStencilTexture)630 void gather_attachment_views(skia_private::TArray<VkImageView>& attachmentViews,
631 VulkanTexture* colorTexture,
632 VulkanTexture* resolveTexture,
633 VulkanTexture* depthStencilTexture) {
634 if (colorTexture) {
635 VkImageView& colorAttachmentView = attachmentViews.push_back();
636 colorAttachmentView =
637 colorTexture->getImageView(VulkanImageView::Usage::kAttachment)->imageView();
638
639 if (resolveTexture) {
640 VkImageView& resolveView = attachmentViews.push_back();
641 resolveView =
642 resolveTexture->getImageView(VulkanImageView::Usage::kAttachment)->imageView();
643 }
644 }
645
646 if (depthStencilTexture) {
647 VkImageView& stencilView = attachmentViews.push_back();
648 stencilView =
649 depthStencilTexture->getImageView(VulkanImageView::Usage::kAttachment)->imageView();
650 }
651 }
652
gather_clear_values(STArray<VulkanRenderPass::kMaxExpectedAttachmentCount,VkClearValue> & clearValues,const RenderPassDesc & renderPassDesc,VulkanTexture * colorTexture,VulkanTexture * depthStencilTexture,int depthStencilAttachmentIdx)653 void gather_clear_values(
654 STArray<VulkanRenderPass::kMaxExpectedAttachmentCount, VkClearValue>& clearValues,
655 const RenderPassDesc& renderPassDesc,
656 VulkanTexture* colorTexture,
657 VulkanTexture* depthStencilTexture,
658 int depthStencilAttachmentIdx) {
659 clearValues.push_back_n(VulkanRenderPass::kMaxExpectedAttachmentCount);
660 if (colorTexture) {
661 VkClearValue& colorAttachmentClear =
662 clearValues.at(VulkanRenderPass::kColorAttachmentIdx);
663 memset(&colorAttachmentClear, 0, sizeof(VkClearValue));
664 colorAttachmentClear.color = {{renderPassDesc.fClearColor[0],
665 renderPassDesc.fClearColor[1],
666 renderPassDesc.fClearColor[2],
667 renderPassDesc.fClearColor[3]}};
668 }
669 // Resolve texture does not have a clear value
670 if (depthStencilTexture) {
671 VkClearValue& depthStencilAttachmentClear = clearValues.at(depthStencilAttachmentIdx);
672 memset(&depthStencilAttachmentClear, 0, sizeof(VkClearValue));
673 depthStencilAttachmentClear.depthStencil = {renderPassDesc.fClearDepth,
674 renderPassDesc.fClearStencil};
675 }
676 }
677
678 // The RenderArea bounds we pass into BeginRenderPass must have a start x value that is a multiple
679 // of the granularity. The width must also be a multiple of the granularity or equal to the width
680 // of the entire attachment. Similar requirements apply to the y and height components.
get_render_area(const SkIRect & srcBounds,const VkExtent2D & granularity,int maxWidth,int maxHeight)681 VkRect2D get_render_area(const SkIRect& srcBounds,
682 const VkExtent2D& granularity,
683 int maxWidth,
684 int maxHeight) {
685 SkIRect dstBounds;
686 // Adjust Width
687 if (granularity.width == 0 || granularity.width == 1) {
688 dstBounds.fLeft = srcBounds.fLeft;
689 dstBounds.fRight = srcBounds.fRight;
690 } else {
691 // Start with the right side of rect so we know if we end up going past the maxWidth.
692 int rightAdj = srcBounds.fRight % granularity.width;
693 if (rightAdj != 0) {
694 rightAdj = granularity.width - rightAdj;
695 }
696 dstBounds.fRight = srcBounds.fRight + rightAdj;
697 if (dstBounds.fRight > maxWidth) {
698 dstBounds.fRight = maxWidth;
699 dstBounds.fLeft = 0;
700 } else {
701 dstBounds.fLeft = srcBounds.fLeft - srcBounds.fLeft % granularity.width;
702 }
703 }
704
705 if (granularity.height == 0 || granularity.height == 1) {
706 dstBounds.fTop = srcBounds.fTop;
707 dstBounds.fBottom = srcBounds.fBottom;
708 } else {
709 // Start with the bottom side of rect so we know if we end up going past the maxHeight.
710 int bottomAdj = srcBounds.fBottom % granularity.height;
711 if (bottomAdj != 0) {
712 bottomAdj = granularity.height - bottomAdj;
713 }
714 dstBounds.fBottom = srcBounds.fBottom + bottomAdj;
715 if (dstBounds.fBottom > maxHeight) {
716 dstBounds.fBottom = maxHeight;
717 dstBounds.fTop = 0;
718 } else {
719 dstBounds.fTop = srcBounds.fTop - srcBounds.fTop % granularity.height;
720 }
721 }
722
723 VkRect2D renderArea;
724 renderArea.offset = { dstBounds.fLeft , dstBounds.fTop };
725 renderArea.extent = { (uint32_t)dstBounds.width(), (uint32_t)dstBounds.height() };
726 return renderArea;
727 }
728
729 } // anonymous namespace
730
beginRenderPass(const RenderPassDesc & renderPassDesc,SkIRect renderPassBounds,const Texture * colorTexture,const Texture * resolveTexture,const Texture * depthStencilTexture)731 bool VulkanCommandBuffer::beginRenderPass(const RenderPassDesc& renderPassDesc,
732 SkIRect renderPassBounds,
733 const Texture* colorTexture,
734 const Texture* resolveTexture,
735 const Texture* depthStencilTexture) {
736 // TODO: Check that Textures match RenderPassDesc
737 VulkanTexture* vulkanColorTexture =
738 const_cast<VulkanTexture*>(static_cast<const VulkanTexture*>(colorTexture));
739 VulkanTexture* vulkanResolveTexture =
740 const_cast<VulkanTexture*>(static_cast<const VulkanTexture*>(resolveTexture));
741 VulkanTexture* vulkanDepthStencilTexture =
742 const_cast<VulkanTexture*>(static_cast<const VulkanTexture*>(depthStencilTexture));
743
744 SkASSERT(resolveTexture ? renderPassDesc.fColorResolveAttachment.fStoreOp == StoreOp::kStore
745 : true);
746
747 // Determine if we need to load MSAA from resolve, and if so, make certain that key conditions
748 // are met before proceeding.
749 bool loadMSAAFromResolve = renderPassDesc.fColorResolveAttachment.fTextureInfo.isValid() &&
750 renderPassDesc.fColorResolveAttachment.fLoadOp == LoadOp::kLoad;
751 if (loadMSAAFromResolve && (!vulkanResolveTexture || !vulkanColorTexture ||
752 !vulkanResolveTexture->supportsInputAttachmentUsage())) {
753 SKGPU_LOG_E("Cannot begin render pass. In order to load MSAA from resolve, the color "
754 "attachment must have input attachment usage and both the color and resolve "
755 "attachments must be valid.");
756 return false;
757 }
758
759 // Before beginning a renderpass, set all textures to the appropriate image layout.
760 setup_texture_layouts(this,
761 vulkanColorTexture,
762 vulkanResolveTexture,
763 vulkanDepthStencilTexture,
764 loadMSAAFromResolve);
765
766 static constexpr int kMaxNumAttachments = 3;
767 // Gather attachment views neeeded for frame buffer creation.
768 skia_private::TArray<VkImageView> attachmentViews;
769 gather_attachment_views(
770 attachmentViews, vulkanColorTexture, vulkanResolveTexture, vulkanDepthStencilTexture);
771
772 // Gather clear values needed for RenderPassBeginInfo. Indexed by attachment number.
773 STArray<kMaxNumAttachments, VkClearValue> clearValues;
774 // The depth/stencil attachment can be at attachment index 1 or 2 depending on whether there is
775 // a resolve texture attachment for this renderpass.
776 int depthStencilAttachmentIndex = resolveTexture ? 2 : 1;
777 gather_clear_values(clearValues,
778 renderPassDesc,
779 vulkanColorTexture,
780 vulkanDepthStencilTexture,
781 depthStencilAttachmentIndex);
782
783 sk_sp<VulkanRenderPass> vulkanRenderPass =
784 fResourceProvider->findOrCreateRenderPass(renderPassDesc, /*compatibleOnly=*/false);
785 if (!vulkanRenderPass) {
786 SKGPU_LOG_W("Could not create Vulkan RenderPass");
787 return false;
788 }
789 this->submitPipelineBarriers();
790 this->trackResource(vulkanRenderPass);
791
792 int frameBufferWidth = 0;
793 int frameBufferHeight = 0;
794 if (colorTexture) {
795 frameBufferWidth = colorTexture->dimensions().width();
796 frameBufferHeight = colorTexture->dimensions().height();
797 } else if (depthStencilTexture) {
798 frameBufferWidth = depthStencilTexture->dimensions().width();
799 frameBufferHeight = depthStencilTexture->dimensions().height();
800 }
801 sk_sp<VulkanFramebuffer> framebuffer = fResourceProvider->createFramebuffer(fSharedContext,
802 attachmentViews,
803 *vulkanRenderPass,
804 frameBufferWidth,
805 frameBufferHeight);
806 if (!framebuffer) {
807 SKGPU_LOG_W("Could not create Vulkan Framebuffer");
808 return false;
809 }
810
811 VkExtent2D granularity;
812 // Get granularity for this render pass
813 VULKAN_CALL(fSharedContext->interface(),
814 GetRenderAreaGranularity(fSharedContext->device(),
815 vulkanRenderPass->renderPass(),
816 &granularity));
817
818 bool useFullBounds = loadMSAAFromResolve &&
819 fSharedContext->vulkanCaps().mustLoadFullImageForMSAA();
820
821 VkRect2D renderArea = get_render_area(useFullBounds ? SkIRect::MakeWH(frameBufferWidth,
822 frameBufferHeight)
823 : renderPassBounds,
824 granularity,
825 frameBufferWidth,
826 frameBufferHeight);
827
828 VkRenderPassBeginInfo beginInfo;
829 memset(&beginInfo, 0, sizeof(VkRenderPassBeginInfo));
830 beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
831 beginInfo.pNext = nullptr;
832 beginInfo.renderPass = vulkanRenderPass->renderPass();
833 beginInfo.framebuffer = framebuffer->framebuffer();
834 beginInfo.renderArea = renderArea;
835 beginInfo.clearValueCount = clearValues.size();
836 beginInfo.pClearValues = clearValues.begin();
837
838 // Submit pipeline barriers to ensure any image layout transitions are recorded prior to
839 // beginning the render pass.
840 this->submitPipelineBarriers();
841 // TODO: If we add support for secondary command buffers, dynamically determine subpass contents
842 VULKAN_CALL(fSharedContext->interface(),
843 CmdBeginRenderPass(fPrimaryCommandBuffer,
844 &beginInfo,
845 VK_SUBPASS_CONTENTS_INLINE));
846 fActiveRenderPass = true;
847
848 SkIRect nativeBounds = SkIRect::MakeXYWH(renderArea.offset.x,
849 renderArea.offset.y,
850 renderArea.extent.width,
851 renderArea.extent.height);
852 if (loadMSAAFromResolve && !this->loadMSAAFromResolve(renderPassDesc,
853 *vulkanResolveTexture,
854 vulkanColorTexture->dimensions(),
855 nativeBounds)) {
856 SKGPU_LOG_E("Failed to load MSAA from resolve");
857 this->endRenderPass();
858 return false;
859 }
860
861 // Once we have an active render pass, the command buffer should hold on to a frame buffer ref.
862 this->trackResource(std::move(framebuffer));
863 return true;
864 }
865
endRenderPass()866 void VulkanCommandBuffer::endRenderPass() {
867 SkASSERT(fActive);
868 VULKAN_CALL(fSharedContext->interface(), CmdEndRenderPass(fPrimaryCommandBuffer));
869 fActiveRenderPass = false;
870 }
871
addDrawPass(const DrawPass * drawPass)872 void VulkanCommandBuffer::addDrawPass(const DrawPass* drawPass) {
873 drawPass->addResourceRefs(this);
874 for (auto [type, cmdPtr] : drawPass->commands()) {
875 switch (type) {
876 case DrawPassCommands::Type::kBindGraphicsPipeline: {
877 auto bgp = static_cast<DrawPassCommands::BindGraphicsPipeline*>(cmdPtr);
878 this->bindGraphicsPipeline(drawPass->getPipeline(bgp->fPipelineIndex));
879 break;
880 }
881 case DrawPassCommands::Type::kSetBlendConstants: {
882 auto sbc = static_cast<DrawPassCommands::SetBlendConstants*>(cmdPtr);
883 this->setBlendConstants(sbc->fBlendConstants);
884 break;
885 }
886 case DrawPassCommands::Type::kBindUniformBuffer: {
887 auto bub = static_cast<DrawPassCommands::BindUniformBuffer*>(cmdPtr);
888 this->recordBufferBindingInfo(bub->fInfo, bub->fSlot);
889 break;
890 }
891 case DrawPassCommands::Type::kBindDrawBuffers: {
892 auto bdb = static_cast<DrawPassCommands::BindDrawBuffers*>(cmdPtr);
893 this->bindDrawBuffers(
894 bdb->fVertices, bdb->fInstances, bdb->fIndices, bdb->fIndirect);
895 break;
896 }
897 case DrawPassCommands::Type::kBindTexturesAndSamplers: {
898 auto bts = static_cast<DrawPassCommands::BindTexturesAndSamplers*>(cmdPtr);
899 this->recordTextureAndSamplerDescSet(drawPass, bts);
900 break;
901 }
902 case DrawPassCommands::Type::kSetScissor: {
903 auto ss = static_cast<DrawPassCommands::SetScissor*>(cmdPtr);
904 this->setScissor(ss->fScissor);
905 break;
906 }
907 case DrawPassCommands::Type::kDraw: {
908 auto draw = static_cast<DrawPassCommands::Draw*>(cmdPtr);
909 this->draw(draw->fType, draw->fBaseVertex, draw->fVertexCount);
910 break;
911 }
912 case DrawPassCommands::Type::kDrawIndexed: {
913 auto draw = static_cast<DrawPassCommands::DrawIndexed*>(cmdPtr);
914 this->drawIndexed(
915 draw->fType, draw->fBaseIndex, draw->fIndexCount, draw->fBaseVertex);
916 break;
917 }
918 case DrawPassCommands::Type::kDrawInstanced: {
919 auto draw = static_cast<DrawPassCommands::DrawInstanced*>(cmdPtr);
920 this->drawInstanced(draw->fType,
921 draw->fBaseVertex,
922 draw->fVertexCount,
923 draw->fBaseInstance,
924 draw->fInstanceCount);
925 break;
926 }
927 case DrawPassCommands::Type::kDrawIndexedInstanced: {
928 auto draw = static_cast<DrawPassCommands::DrawIndexedInstanced*>(cmdPtr);
929 this->drawIndexedInstanced(draw->fType,
930 draw->fBaseIndex,
931 draw->fIndexCount,
932 draw->fBaseVertex,
933 draw->fBaseInstance,
934 draw->fInstanceCount);
935 break;
936 }
937 case DrawPassCommands::Type::kDrawIndirect: {
938 auto draw = static_cast<DrawPassCommands::DrawIndirect*>(cmdPtr);
939 this->drawIndirect(draw->fType);
940 break;
941 }
942 case DrawPassCommands::Type::kDrawIndexedIndirect: {
943 auto draw = static_cast<DrawPassCommands::DrawIndexedIndirect*>(cmdPtr);
944 this->drawIndexedIndirect(draw->fType);
945 break;
946 }
947 }
948 }
949 }
950
bindGraphicsPipeline(const GraphicsPipeline * graphicsPipeline)951 void VulkanCommandBuffer::bindGraphicsPipeline(const GraphicsPipeline* graphicsPipeline) {
952 SkASSERT(fActiveRenderPass);
953 fActiveGraphicsPipeline = static_cast<const VulkanGraphicsPipeline*>(graphicsPipeline);
954 VULKAN_CALL(fSharedContext->interface(), CmdBindPipeline(fPrimaryCommandBuffer,
955 VK_PIPELINE_BIND_POINT_GRAPHICS,
956 fActiveGraphicsPipeline->pipeline()));
957 // TODO(b/293924877): Compare pipeline layouts. If 2 pipelines have the same pipeline layout,
958 // then descriptor sets do not need to be re-bound. For now, simply force a re-binding of
959 // descriptor sets with any new bindGraphicsPipeline DrawPassCommand.
960 fBindUniformBuffers = true;
961
962 if (graphicsPipeline->dstReadRequirement() == DstReadRequirement::kTextureCopy &&
963 graphicsPipeline->numFragTexturesAndSamplers() == 1) {
964 // The only texture-sampler that the pipeline declares must be the dstCopy, which means
965 // there are no other textures that will trigger BindTextureAndSampler commands in a
966 // DrawPass (e.g. solid-color + dst-read-requiring blend). Configure the texture binding
967 // up front in this case.
968 this->recordTextureAndSamplerDescSet(/*drawPass=*/nullptr, /*command=*/nullptr);
969 }
970 }
971
setBlendConstants(float * blendConstants)972 void VulkanCommandBuffer::setBlendConstants(float* blendConstants) {
973 SkASSERT(fActive);
974 if (0 != memcmp(blendConstants, fCachedBlendConstant, 4 * sizeof(float))) {
975 VULKAN_CALL(fSharedContext->interface(),
976 CmdSetBlendConstants(fPrimaryCommandBuffer, blendConstants));
977 memcpy(fCachedBlendConstant, blendConstants, 4 * sizeof(float));
978 }
979 }
980
recordBufferBindingInfo(const BindBufferInfo & info,UniformSlot slot)981 void VulkanCommandBuffer::recordBufferBindingInfo(const BindBufferInfo& info, UniformSlot slot) {
982 unsigned int bufferIndex = 0;
983 switch (slot) {
984 case UniformSlot::kRenderStep:
985 bufferIndex = VulkanGraphicsPipeline::kRenderStepUniformBufferIndex;
986 break;
987 case UniformSlot::kPaint:
988 bufferIndex = VulkanGraphicsPipeline::kPaintUniformBufferIndex;
989 break;
990 case UniformSlot::kGradient:
991 bufferIndex = VulkanGraphicsPipeline::kGradientBufferIndex;
992 break;
993 default:
994 SkASSERT(false);
995 }
996
997 fUniformBuffersToBind[bufferIndex] = info;
998 fBindUniformBuffers = true;
999 }
1000
syncDescriptorSets()1001 void VulkanCommandBuffer::syncDescriptorSets() {
1002 if (fBindUniformBuffers) {
1003 this->bindUniformBuffers();
1004 // Changes to descriptor sets in lower slot numbers disrupt later set bindings. Currently,
1005 // the descriptor set which houses uniform buffers is at a lower slot than the texture /
1006 // sampler set, so rebinding uniform buffers necessitates re-binding any texture/samplers.
1007 fBindTextureSamplers = true;
1008 }
1009 if (fBindTextureSamplers) {
1010 this->bindTextureSamplers();
1011 }
1012 }
1013
bindUniformBuffers()1014 void VulkanCommandBuffer::bindUniformBuffers() {
1015 fBindUniformBuffers = false;
1016
1017 // Define a container with size reserved for up to kNumUniformBuffers descriptors. Only add
1018 // DescriptorData for uniforms that actually are used and need to be bound.
1019 STArray<VulkanGraphicsPipeline::kNumUniformBuffers, DescriptorData> descriptors;
1020
1021 // Up to kNumUniformBuffers can be used and require rebinding depending upon render pass info.
1022 DescriptorType uniformBufferType =
1023 fSharedContext->caps()->storageBufferSupport() ? DescriptorType::kStorageBuffer
1024 : DescriptorType::kUniformBuffer;
1025 if (fActiveGraphicsPipeline->hasStepUniforms() &&
1026 fUniformBuffersToBind[VulkanGraphicsPipeline::kRenderStepUniformBufferIndex].fBuffer) {
1027 descriptors.push_back({
1028 uniformBufferType,
1029 /*count=*/1,
1030 VulkanGraphicsPipeline::kRenderStepUniformBufferIndex,
1031 PipelineStageFlags::kVertexShader | PipelineStageFlags::kFragmentShader });
1032 }
1033 if (fActiveGraphicsPipeline->hasPaintUniforms() &&
1034 fUniformBuffersToBind[VulkanGraphicsPipeline::kPaintUniformBufferIndex].fBuffer) {
1035 descriptors.push_back({ uniformBufferType, /*count=*/1,
1036 VulkanGraphicsPipeline::kPaintUniformBufferIndex,
1037 PipelineStageFlags::kFragmentShader });
1038 }
1039 if (fActiveGraphicsPipeline->hasGradientBuffer() &&
1040 fUniformBuffersToBind[VulkanGraphicsPipeline::kGradientBufferIndex].fBuffer) {
1041 SkASSERT(fSharedContext->caps()->gradientBufferSupport() &&
1042 fSharedContext->caps()->storageBufferSupport());
1043 descriptors.push_back({ DescriptorType::kStorageBuffer, /*count=*/1,
1044 VulkanGraphicsPipeline::kGradientBufferIndex,
1045 PipelineStageFlags::kFragmentShader });
1046 }
1047
1048 // If no uniforms are used, we can go ahead and return since no descriptors need to be bound.
1049 if (descriptors.empty()) {
1050 return;
1051 }
1052
1053 skia_private::AutoSTMalloc<VulkanGraphicsPipeline::kNumUniformBuffers, uint32_t>
1054 dynamicOffsets(descriptors.size());
1055 for (int i = 0; i < descriptors.size(); i++) {
1056 int descriptorBindingIndex = descriptors[i].fBindingIndex;
1057 SkASSERT(static_cast<unsigned long>(descriptorBindingIndex) < fUniformBuffersToBind.size());
1058 const auto& bindInfo = fUniformBuffersToBind[descriptorBindingIndex];
1059 #ifdef SK_DEBUG
1060 if (descriptors[i].fPipelineStageFlags & PipelineStageFlags::kVertexShader) {
1061 SkASSERT(bindInfo.fBuffer->isProtected() == Protected::kNo);
1062 }
1063 #endif
1064 dynamicOffsets[i] = bindInfo.fOffset;
1065 }
1066
1067 sk_sp<VulkanDescriptorSet> descSet = fResourceProvider->findOrCreateUniformBuffersDescriptorSet(
1068 descriptors, fUniformBuffersToBind);
1069 if (!descSet) {
1070 SKGPU_LOG_E("Unable to find or create uniform descriptor set");
1071 return;
1072 }
1073
1074 VULKAN_CALL(fSharedContext->interface(),
1075 CmdBindDescriptorSets(fPrimaryCommandBuffer,
1076 VK_PIPELINE_BIND_POINT_GRAPHICS,
1077 fActiveGraphicsPipeline->layout(),
1078 VulkanGraphicsPipeline::kUniformBufferDescSetIndex,
1079 /*setCount=*/1,
1080 descSet->descriptorSet(),
1081 descriptors.size(),
1082 dynamicOffsets.get()));
1083 this->trackResource(std::move(descSet));
1084 }
1085
bindDrawBuffers(const BindBufferInfo & vertices,const BindBufferInfo & instances,const BindBufferInfo & indices,const BindBufferInfo & indirect)1086 void VulkanCommandBuffer::bindDrawBuffers(const BindBufferInfo& vertices,
1087 const BindBufferInfo& instances,
1088 const BindBufferInfo& indices,
1089 const BindBufferInfo& indirect) {
1090 this->bindVertexBuffers(vertices.fBuffer,
1091 vertices.fOffset,
1092 instances.fBuffer,
1093 instances.fOffset);
1094 this->bindIndexBuffer(indices.fBuffer, indices.fOffset);
1095 this->bindIndirectBuffer(indirect.fBuffer, indirect.fOffset);
1096 }
1097
bindVertexBuffers(const Buffer * vertexBuffer,size_t vertexOffset,const Buffer * instanceBuffer,size_t instanceOffset)1098 void VulkanCommandBuffer::bindVertexBuffers(const Buffer* vertexBuffer,
1099 size_t vertexOffset,
1100 const Buffer* instanceBuffer,
1101 size_t instanceOffset) {
1102 this->bindInputBuffer(vertexBuffer, vertexOffset,
1103 VulkanGraphicsPipeline::kVertexBufferIndex);
1104 this->bindInputBuffer(instanceBuffer, instanceOffset,
1105 VulkanGraphicsPipeline::kInstanceBufferIndex);
1106 }
1107
bindInputBuffer(const Buffer * buffer,VkDeviceSize offset,uint32_t binding)1108 void VulkanCommandBuffer::bindInputBuffer(const Buffer* buffer, VkDeviceSize offset,
1109 uint32_t binding) {
1110 if (buffer) {
1111 SkASSERT(buffer->isProtected() == Protected::kNo);
1112 VkBuffer vkBuffer = static_cast<const VulkanBuffer*>(buffer)->vkBuffer();
1113 SkASSERT(vkBuffer != VK_NULL_HANDLE);
1114 if (vkBuffer != fBoundInputBuffers[binding] ||
1115 offset != fBoundInputBufferOffsets[binding]) {
1116 VULKAN_CALL(fSharedContext->interface(),
1117 CmdBindVertexBuffers(fPrimaryCommandBuffer,
1118 binding,
1119 /*bindingCount=*/1,
1120 &vkBuffer,
1121 &offset));
1122 fBoundInputBuffers[binding] = vkBuffer;
1123 fBoundInputBufferOffsets[binding] = offset;
1124 }
1125 }
1126 }
1127
bindIndexBuffer(const Buffer * indexBuffer,size_t offset)1128 void VulkanCommandBuffer::bindIndexBuffer(const Buffer* indexBuffer, size_t offset) {
1129 if (indexBuffer) {
1130 SkASSERT(indexBuffer->isProtected() == Protected::kNo);
1131 VkBuffer vkBuffer = static_cast<const VulkanBuffer*>(indexBuffer)->vkBuffer();
1132 SkASSERT(vkBuffer != VK_NULL_HANDLE);
1133 if (vkBuffer != fBoundIndexBuffer || offset != fBoundIndexBufferOffset) {
1134 VULKAN_CALL(fSharedContext->interface(), CmdBindIndexBuffer(fPrimaryCommandBuffer,
1135 vkBuffer,
1136 offset,
1137 VK_INDEX_TYPE_UINT16));
1138 fBoundIndexBuffer = vkBuffer;
1139 fBoundIndexBufferOffset = offset;
1140 }
1141 } else {
1142 fBoundIndexBuffer = VK_NULL_HANDLE;
1143 fBoundIndexBufferOffset = 0;
1144 }
1145 }
1146
bindIndirectBuffer(const Buffer * indirectBuffer,size_t offset)1147 void VulkanCommandBuffer::bindIndirectBuffer(const Buffer* indirectBuffer, size_t offset) {
1148 // Indirect buffers are not bound via the command buffer, but specified in the draw cmd.
1149 if (indirectBuffer) {
1150 SkASSERT(indirectBuffer->isProtected() == Protected::kNo);
1151 fBoundIndirectBuffer = static_cast<const VulkanBuffer*>(indirectBuffer)->vkBuffer();
1152 fBoundIndirectBufferOffset = offset;
1153 } else {
1154 fBoundIndirectBuffer = VK_NULL_HANDLE;
1155 fBoundIndirectBufferOffset = 0;
1156 }
1157 }
1158
recordTextureAndSamplerDescSet(const DrawPass * drawPass,const DrawPassCommands::BindTexturesAndSamplers * command)1159 void VulkanCommandBuffer::recordTextureAndSamplerDescSet(
1160 const DrawPass* drawPass, const DrawPassCommands::BindTexturesAndSamplers* command) {
1161 SkASSERT(SkToBool(drawPass) == SkToBool(command));
1162 SkASSERT(fActiveGraphicsPipeline);
1163 // Add one extra texture for dst copies, which is not included in the command itself.
1164 int numTexSamplers = command ? command->fNumTexSamplers : 0;
1165 if (fActiveGraphicsPipeline->dstReadRequirement() == DstReadRequirement::kTextureCopy) {
1166 numTexSamplers++;
1167 }
1168
1169 if (numTexSamplers == 0) {
1170 fNumTextureSamplers = 0;
1171 fTextureSamplerDescSetToBind = VK_NULL_HANDLE;
1172 fBindTextureSamplers = false;
1173 return;
1174 }
1175
1176 // Query resource provider to obtain a descriptor set for the texture/samplers
1177 TArray<DescriptorData> descriptors(numTexSamplers);
1178 if (command) {
1179 for (int i = 0; i < command->fNumTexSamplers; i++) {
1180 auto sampler = static_cast<const VulkanSampler*>(
1181 drawPass->getSampler(command->fSamplerIndices[i]));
1182
1183 const Sampler* immutableSampler = (sampler && sampler->ycbcrConversion()) ? sampler
1184 : nullptr;
1185 descriptors.push_back({DescriptorType::kCombinedTextureSampler,
1186 /*count=*/1,
1187 /*bindingIdx=*/i,
1188 PipelineStageFlags::kFragmentShader,
1189 immutableSampler});
1190 }
1191 }
1192 // If required the dst copy texture+sampler is the last one in the descriptor set
1193 if (fActiveGraphicsPipeline->dstReadRequirement() == DstReadRequirement::kTextureCopy) {
1194 descriptors.push_back({DescriptorType::kCombinedTextureSampler,
1195 /*count=*/1,
1196 /*bindingIdx=*/numTexSamplers-1,
1197 PipelineStageFlags::kFragmentShader,
1198 /*immutableSampler=*/nullptr});
1199 }
1200 SkASSERT(descriptors.size() == numTexSamplers);
1201 sk_sp<VulkanDescriptorSet> set = fResourceProvider->findOrCreateDescriptorSet(
1202 SkSpan<DescriptorData>{&descriptors.front(), descriptors.size()});
1203
1204 if (!set) {
1205 SKGPU_LOG_E("Unable to find or create descriptor set");
1206 fNumTextureSamplers = 0;
1207 fTextureSamplerDescSetToBind = VK_NULL_HANDLE;
1208 fBindTextureSamplers = false;
1209 return;
1210 }
1211 // Populate the descriptor set with texture/sampler descriptors
1212 TArray<VkWriteDescriptorSet> writeDescriptorSets(numTexSamplers);
1213 TArray<VkDescriptorImageInfo> descriptorImageInfos(numTexSamplers);
1214 auto appendTextureSampler = [&](const VulkanTexture* texture, const VulkanSampler* sampler) {
1215 if (!texture || !sampler) {
1216 // TODO(b/294198324): Investigate the root cause for null texture or samplers on
1217 // Ubuntu QuadP400 GPU
1218 SKGPU_LOG_E("Texture and sampler must not be null");
1219 fNumTextureSamplers = 0;
1220 fTextureSamplerDescSetToBind = VK_NULL_HANDLE;
1221 fBindTextureSamplers = false;
1222 return false;
1223 }
1224
1225 VkDescriptorImageInfo& textureInfo = descriptorImageInfos.push_back();
1226 memset(&textureInfo, 0, sizeof(VkDescriptorImageInfo));
1227 textureInfo.sampler = sampler->ycbcrConversion() ? VK_NULL_HANDLE : sampler->vkSampler();
1228 textureInfo.imageView =
1229 texture->getImageView(VulkanImageView::Usage::kShaderInput)->imageView();
1230 textureInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
1231
1232 VkWriteDescriptorSet& writeInfo = writeDescriptorSets.push_back();
1233 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
1234 writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1235 writeInfo.pNext = nullptr;
1236 writeInfo.dstSet = *set->descriptorSet();
1237 writeInfo.dstBinding = writeDescriptorSets.size() - 1;
1238 writeInfo.dstArrayElement = 0;
1239 writeInfo.descriptorCount = 1;
1240 writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1241 writeInfo.pImageInfo = &textureInfo;
1242 writeInfo.pBufferInfo = nullptr;
1243 writeInfo.pTexelBufferView = nullptr;
1244
1245 return true;
1246 };
1247
1248 if (command) {
1249 for (int i = 0; i < command->fNumTexSamplers; ++i) {
1250 auto texture = static_cast<const VulkanTexture*>(
1251 drawPass->getTexture(command->fTextureIndices[i]));
1252 auto sampler = static_cast<const VulkanSampler*>(
1253 drawPass->getSampler(command->fSamplerIndices[i]));
1254 if (!appendTextureSampler(texture, sampler)) {
1255 return;
1256 }
1257 }
1258 }
1259 if (fActiveGraphicsPipeline->dstReadRequirement() == DstReadRequirement::kTextureCopy) {
1260 auto texture = static_cast<const VulkanTexture*>(fDstCopy.first);
1261 auto sampler = static_cast<const VulkanSampler*>(fDstCopy.second);
1262 if (!appendTextureSampler(texture, sampler)) {
1263 return;
1264 }
1265 }
1266
1267 SkASSERT(writeDescriptorSets.size() == numTexSamplers &&
1268 descriptorImageInfos.size() == numTexSamplers);
1269 VULKAN_CALL(fSharedContext->interface(), UpdateDescriptorSets(fSharedContext->device(),
1270 numTexSamplers,
1271 &writeDescriptorSets[0],
1272 /*descriptorCopyCount=*/0,
1273 /*pDescriptorCopies=*/nullptr));
1274
1275 // Store the updated descriptor set to be actually bound later on. This avoids binding and
1276 // potentially having to re-bind in cases where earlier descriptor sets change while going
1277 // through drawpass commands.
1278 fTextureSamplerDescSetToBind = *set->descriptorSet();
1279 fBindTextureSamplers = true;
1280 fNumTextureSamplers = numTexSamplers;
1281 this->trackResource(std::move(set));
1282 }
1283
bindTextureSamplers()1284 void VulkanCommandBuffer::bindTextureSamplers() {
1285 fBindTextureSamplers = false;
1286 if (fTextureSamplerDescSetToBind != VK_NULL_HANDLE &&
1287 fActiveGraphicsPipeline->numFragTexturesAndSamplers() == fNumTextureSamplers) {
1288 VULKAN_CALL(fSharedContext->interface(),
1289 CmdBindDescriptorSets(fPrimaryCommandBuffer,
1290 VK_PIPELINE_BIND_POINT_GRAPHICS,
1291 fActiveGraphicsPipeline->layout(),
1292 VulkanGraphicsPipeline::kTextureBindDescSetIndex,
1293 /*setCount=*/1,
1294 &fTextureSamplerDescSetToBind,
1295 /*dynamicOffsetCount=*/0,
1296 /*dynamicOffsets=*/nullptr));
1297 }
1298 }
1299
setScissor(const Scissor & scissor)1300 void VulkanCommandBuffer::setScissor(const Scissor& scissor) {
1301 this->setScissor(scissor.getRect(fReplayTranslation, fReplayClip));
1302 }
1303
setScissor(const SkIRect & rect)1304 void VulkanCommandBuffer::setScissor(const SkIRect& rect) {
1305 VkRect2D scissor = {
1306 {rect.x(), rect.y()},
1307 {static_cast<unsigned int>(rect.width()), static_cast<unsigned int>(rect.height())}};
1308 VULKAN_CALL(fSharedContext->interface(),
1309 CmdSetScissor(fPrimaryCommandBuffer,
1310 /*firstScissor=*/0,
1311 /*scissorCount=*/1,
1312 &scissor));
1313 }
1314
draw(PrimitiveType,unsigned int baseVertex,unsigned int vertexCount)1315 void VulkanCommandBuffer::draw(PrimitiveType,
1316 unsigned int baseVertex,
1317 unsigned int vertexCount) {
1318 SkASSERT(fActiveRenderPass);
1319 this->syncDescriptorSets();
1320 // TODO: set primitive type via dynamic state if available
1321 VULKAN_CALL(fSharedContext->interface(),
1322 CmdDraw(fPrimaryCommandBuffer,
1323 vertexCount,
1324 /*instanceCount=*/1,
1325 baseVertex,
1326 /*firstInstance=*/0));
1327 }
1328
drawIndexed(PrimitiveType,unsigned int baseIndex,unsigned int indexCount,unsigned int baseVertex)1329 void VulkanCommandBuffer::drawIndexed(PrimitiveType,
1330 unsigned int baseIndex,
1331 unsigned int indexCount,
1332 unsigned int baseVertex) {
1333 SkASSERT(fActiveRenderPass);
1334 this->syncDescriptorSets();
1335 // TODO: set primitive type via dynamic state if available
1336 VULKAN_CALL(fSharedContext->interface(),
1337 CmdDrawIndexed(fPrimaryCommandBuffer,
1338 indexCount,
1339 /*instanceCount=*/1,
1340 baseIndex,
1341 baseVertex,
1342 /*firstInstance=*/0));
1343 }
1344
drawInstanced(PrimitiveType,unsigned int baseVertex,unsigned int vertexCount,unsigned int baseInstance,unsigned int instanceCount)1345 void VulkanCommandBuffer::drawInstanced(PrimitiveType,
1346 unsigned int baseVertex,
1347 unsigned int vertexCount,
1348 unsigned int baseInstance,
1349 unsigned int instanceCount) {
1350 SkASSERT(fActiveRenderPass);
1351 this->syncDescriptorSets();
1352 // TODO: set primitive type via dynamic state if available
1353 VULKAN_CALL(fSharedContext->interface(),
1354 CmdDraw(fPrimaryCommandBuffer,
1355 vertexCount,
1356 instanceCount,
1357 baseVertex,
1358 baseInstance));
1359 }
1360
drawIndexedInstanced(PrimitiveType,unsigned int baseIndex,unsigned int indexCount,unsigned int baseVertex,unsigned int baseInstance,unsigned int instanceCount)1361 void VulkanCommandBuffer::drawIndexedInstanced(PrimitiveType,
1362 unsigned int baseIndex,
1363 unsigned int indexCount,
1364 unsigned int baseVertex,
1365 unsigned int baseInstance,
1366 unsigned int instanceCount) {
1367 SkASSERT(fActiveRenderPass);
1368 this->syncDescriptorSets();
1369 // TODO: set primitive type via dynamic state if available
1370 VULKAN_CALL(fSharedContext->interface(),
1371 CmdDrawIndexed(fPrimaryCommandBuffer,
1372 indexCount,
1373 instanceCount,
1374 baseIndex,
1375 baseVertex,
1376 baseInstance));
1377 }
1378
drawIndirect(PrimitiveType)1379 void VulkanCommandBuffer::drawIndirect(PrimitiveType) {
1380 SkASSERT(fActiveRenderPass);
1381 this->syncDescriptorSets();
1382 // TODO: set primitive type via dynamic state if available
1383 // Currently we can only support doing one indirect draw operation at a time,
1384 // so stride is irrelevant.
1385 VULKAN_CALL(fSharedContext->interface(),
1386 CmdDrawIndirect(fPrimaryCommandBuffer,
1387 fBoundIndirectBuffer,
1388 fBoundIndirectBufferOffset,
1389 /*drawCount=*/1,
1390 /*stride=*/0));
1391 }
1392
drawIndexedIndirect(PrimitiveType)1393 void VulkanCommandBuffer::drawIndexedIndirect(PrimitiveType) {
1394 SkASSERT(fActiveRenderPass);
1395 this->syncDescriptorSets();
1396 // TODO: set primitive type via dynamic state if available
1397 // Currently we can only support doing one indirect draw operation at a time,
1398 // so stride is irrelevant.
1399 VULKAN_CALL(fSharedContext->interface(),
1400 CmdDrawIndexedIndirect(fPrimaryCommandBuffer,
1401 fBoundIndirectBuffer,
1402 fBoundIndirectBufferOffset,
1403 /*drawCount=*/1,
1404 /*stride=*/0));
1405 }
1406
onAddComputePass(DispatchGroupSpan)1407 bool VulkanCommandBuffer::onAddComputePass(DispatchGroupSpan) { return false; }
1408
onCopyBufferToBuffer(const Buffer * srcBuffer,size_t srcOffset,const Buffer * dstBuffer,size_t dstOffset,size_t size)1409 bool VulkanCommandBuffer::onCopyBufferToBuffer(const Buffer* srcBuffer,
1410 size_t srcOffset,
1411 const Buffer* dstBuffer,
1412 size_t dstOffset,
1413 size_t size) {
1414 auto vkSrcBuffer = static_cast<const VulkanBuffer*>(srcBuffer);
1415 auto vkDstBuffer = static_cast<const VulkanBuffer*>(dstBuffer);
1416
1417 SkASSERT(vkSrcBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
1418 SkASSERT(vkDstBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_TRANSFER_DST_BIT);
1419
1420 VkBufferCopy region;
1421 memset(®ion, 0, sizeof(VkBufferCopy));
1422 region.srcOffset = srcOffset;
1423 region.dstOffset = dstOffset;
1424 region.size = size;
1425
1426 this->submitPipelineBarriers();
1427
1428 VULKAN_CALL(fSharedContext->interface(),
1429 CmdCopyBuffer(fPrimaryCommandBuffer,
1430 vkSrcBuffer->vkBuffer(),
1431 vkDstBuffer->vkBuffer(),
1432 /*regionCount=*/1,
1433 ®ion));
1434
1435 return true;
1436 }
1437
onCopyTextureToBuffer(const Texture * texture,SkIRect srcRect,const Buffer * buffer,size_t bufferOffset,size_t bufferRowBytes)1438 bool VulkanCommandBuffer::onCopyTextureToBuffer(const Texture* texture,
1439 SkIRect srcRect,
1440 const Buffer* buffer,
1441 size_t bufferOffset,
1442 size_t bufferRowBytes) {
1443 const VulkanTexture* srcTexture = static_cast<const VulkanTexture*>(texture);
1444 auto dstBuffer = static_cast<const VulkanBuffer*>(buffer);
1445 SkASSERT(dstBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_TRANSFER_DST_BIT);
1446
1447 // Obtain the VkFormat of the source texture so we can determine bytes per block.
1448 VulkanTextureInfo srcTextureInfo;
1449 SkAssertResult(TextureInfos::GetVulkanTextureInfo(texture->textureInfo(), &srcTextureInfo));
1450 size_t bytesPerBlock = VkFormatBytesPerBlock(srcTextureInfo.fFormat);
1451
1452 // Set up copy region
1453 VkBufferImageCopy region;
1454 memset(®ion, 0, sizeof(VkBufferImageCopy));
1455 region.bufferOffset = bufferOffset;
1456 // Vulkan expects bufferRowLength in texels, not bytes.
1457 region.bufferRowLength = (uint32_t)(bufferRowBytes/bytesPerBlock);
1458 region.bufferImageHeight = 0; // Tightly packed
1459 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, /*mipLevel=*/0, 0, 1 };
1460 region.imageOffset = { srcRect.left(), srcRect.top(), /*z=*/0 };
1461 region.imageExtent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), /*depth=*/1 };
1462
1463 // Enable editing of the source texture so we can change its layout so it can be copied from.
1464 const_cast<VulkanTexture*>(srcTexture)->setImageLayout(this,
1465 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1466 VK_ACCESS_TRANSFER_READ_BIT,
1467 VK_PIPELINE_STAGE_TRANSFER_BIT,
1468 false);
1469 // Set current access mask for buffer
1470 const_cast<VulkanBuffer*>(dstBuffer)->setBufferAccess(this,
1471 VK_ACCESS_TRANSFER_WRITE_BIT,
1472 VK_PIPELINE_STAGE_TRANSFER_BIT);
1473
1474 this->submitPipelineBarriers();
1475
1476 VULKAN_CALL(fSharedContext->interface(),
1477 CmdCopyImageToBuffer(fPrimaryCommandBuffer,
1478 srcTexture->vkImage(),
1479 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1480 dstBuffer->vkBuffer(),
1481 /*regionCount=*/1,
1482 ®ion));
1483 return true;
1484 }
1485
onCopyBufferToTexture(const Buffer * buffer,const Texture * texture,const BufferTextureCopyData * copyData,int count)1486 bool VulkanCommandBuffer::onCopyBufferToTexture(const Buffer* buffer,
1487 const Texture* texture,
1488 const BufferTextureCopyData* copyData,
1489 int count) {
1490 auto srcBuffer = static_cast<const VulkanBuffer*>(buffer);
1491 SkASSERT(srcBuffer->bufferUsageFlags() & VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
1492 const VulkanTexture* dstTexture = static_cast<const VulkanTexture*>(texture);
1493
1494 // Obtain the VkFormat of the destination texture so we can determine bytes per block.
1495 VulkanTextureInfo dstTextureInfo;
1496 SkAssertResult(TextureInfos::GetVulkanTextureInfo(dstTexture->textureInfo(), &dstTextureInfo));
1497 size_t bytesPerBlock = VkFormatBytesPerBlock(dstTextureInfo.fFormat);
1498 SkISize oneBlockDims = CompressedDimensions(dstTexture->textureInfo().compressionType(),
1499 {1, 1});
1500
1501 // Set up copy regions.
1502 TArray<VkBufferImageCopy> regions(count);
1503 for (int i = 0; i < count; ++i) {
1504 VkBufferImageCopy& region = regions.push_back();
1505 memset(®ion, 0, sizeof(VkBufferImageCopy));
1506 region.bufferOffset = copyData[i].fBufferOffset;
1507 // copyData provides row length in bytes, but Vulkan expects bufferRowLength in texels.
1508 // For compressed this is the number of logical pixels not the number of blocks.
1509 region.bufferRowLength =
1510 (uint32_t)((copyData[i].fBufferRowBytes/bytesPerBlock) * oneBlockDims.fWidth);
1511 region.bufferImageHeight = 0; // Tightly packed
1512 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, copyData[i].fMipLevel, 0, 1 };
1513 region.imageOffset = { copyData[i].fRect.left(),
1514 copyData[i].fRect.top(),
1515 /*z=*/0 };
1516 region.imageExtent = { (uint32_t)copyData[i].fRect.width(),
1517 (uint32_t)copyData[i].fRect.height(),
1518 /*depth=*/1 };
1519 }
1520
1521 // Enable editing of the destination texture so we can change its layout so it can be copied to.
1522 const_cast<VulkanTexture*>(dstTexture)->setImageLayout(this,
1523 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1524 VK_ACCESS_TRANSFER_WRITE_BIT,
1525 VK_PIPELINE_STAGE_TRANSFER_BIT,
1526 false);
1527
1528 this->submitPipelineBarriers();
1529
1530 VULKAN_CALL(fSharedContext->interface(),
1531 CmdCopyBufferToImage(fPrimaryCommandBuffer,
1532 srcBuffer->vkBuffer(),
1533 dstTexture->vkImage(),
1534 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1535 regions.size(),
1536 regions.begin()));
1537 return true;
1538 }
1539
onCopyTextureToTexture(const Texture * src,SkIRect srcRect,const Texture * dst,SkIPoint dstPoint,int mipLevel)1540 bool VulkanCommandBuffer::onCopyTextureToTexture(const Texture* src,
1541 SkIRect srcRect,
1542 const Texture* dst,
1543 SkIPoint dstPoint,
1544 int mipLevel) {
1545 const VulkanTexture* srcTexture = static_cast<const VulkanTexture*>(src);
1546 const VulkanTexture* dstTexture = static_cast<const VulkanTexture*>(dst);
1547
1548 VkImageCopy copyRegion;
1549 memset(©Region, 0, sizeof(VkImageCopy));
1550 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
1551 copyRegion.srcOffset = { srcRect.fLeft, srcRect.fTop, 0 };
1552 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, (uint32_t)mipLevel, 0, 1 };
1553 copyRegion.dstOffset = { dstPoint.fX, dstPoint.fY, 0 };
1554 copyRegion.extent = { (uint32_t)srcRect.width(), (uint32_t)srcRect.height(), 1 };
1555
1556 // Enable editing of the src texture so we can change its layout so it can be copied from.
1557 const_cast<VulkanTexture*>(srcTexture)->setImageLayout(this,
1558 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1559 VK_ACCESS_TRANSFER_READ_BIT,
1560 VK_PIPELINE_STAGE_TRANSFER_BIT,
1561 false);
1562 // Enable editing of the destination texture so we can change its layout so it can be copied to.
1563 const_cast<VulkanTexture*>(dstTexture)->setImageLayout(this,
1564 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1565 VK_ACCESS_TRANSFER_WRITE_BIT,
1566 VK_PIPELINE_STAGE_TRANSFER_BIT,
1567 false);
1568
1569 this->submitPipelineBarriers();
1570
1571 VULKAN_CALL(fSharedContext->interface(),
1572 CmdCopyImage(fPrimaryCommandBuffer,
1573 srcTexture->vkImage(),
1574 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1575 dstTexture->vkImage(),
1576 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1577 /*regionCount=*/1,
1578 ©Region));
1579
1580 return true;
1581 }
1582
onSynchronizeBufferToCpu(const Buffer * buffer,bool * outDidResultInWork)1583 bool VulkanCommandBuffer::onSynchronizeBufferToCpu(const Buffer* buffer, bool* outDidResultInWork) {
1584 static_cast<const VulkanBuffer*>(buffer)->setBufferAccess(this,
1585 VK_ACCESS_HOST_READ_BIT,
1586 VK_PIPELINE_STAGE_HOST_BIT);
1587
1588 *outDidResultInWork = true;
1589 return true;
1590 }
1591
onClearBuffer(const Buffer *,size_t offset,size_t size)1592 bool VulkanCommandBuffer::onClearBuffer(const Buffer*, size_t offset, size_t size) {
1593 return false;
1594 }
1595
addBufferMemoryBarrier(const Resource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkBufferMemoryBarrier * barrier)1596 void VulkanCommandBuffer::addBufferMemoryBarrier(const Resource* resource,
1597 VkPipelineStageFlags srcStageMask,
1598 VkPipelineStageFlags dstStageMask,
1599 VkBufferMemoryBarrier* barrier) {
1600 SkASSERT(resource);
1601 this->pipelineBarrier(resource,
1602 srcStageMask,
1603 dstStageMask,
1604 /*byRegion=*/false,
1605 kBufferMemory_BarrierType,
1606 barrier);
1607 }
1608
addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkBufferMemoryBarrier * barrier)1609 void VulkanCommandBuffer::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
1610 VkPipelineStageFlags dstStageMask,
1611 VkBufferMemoryBarrier* barrier) {
1612 // We don't pass in a resource here to the command buffer. The command buffer only is using it
1613 // to hold a ref, but every place where we add a buffer memory barrier we are doing some other
1614 // command with the buffer on the command buffer. Thus those other commands will already cause
1615 // the command buffer to be holding a ref to the buffer.
1616 this->pipelineBarrier(/*resource=*/nullptr,
1617 srcStageMask,
1618 dstStageMask,
1619 /*byRegion=*/false,
1620 kBufferMemory_BarrierType,
1621 barrier);
1622 }
1623
addImageMemoryBarrier(const Resource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,VkImageMemoryBarrier * barrier)1624 void VulkanCommandBuffer::addImageMemoryBarrier(const Resource* resource,
1625 VkPipelineStageFlags srcStageMask,
1626 VkPipelineStageFlags dstStageMask,
1627 bool byRegion,
1628 VkImageMemoryBarrier* barrier) {
1629 SkASSERT(resource);
1630 this->pipelineBarrier(resource,
1631 srcStageMask,
1632 dstStageMask,
1633 byRegion,
1634 kImageMemory_BarrierType,
1635 barrier);
1636 }
1637
pipelineBarrier(const Resource * resource,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion,BarrierType barrierType,void * barrier)1638 void VulkanCommandBuffer::pipelineBarrier(const Resource* resource,
1639 VkPipelineStageFlags srcStageMask,
1640 VkPipelineStageFlags dstStageMask,
1641 bool byRegion,
1642 BarrierType barrierType,
1643 void* barrier) {
1644 // TODO: Do we need to handle wrapped command buffers?
1645 // SkASSERT(!this->isWrapped());
1646 SkASSERT(fActive);
1647 #ifdef SK_DEBUG
1648 // For images we can have barriers inside of render passes but they require us to add more
1649 // support in subpasses which need self dependencies to have barriers inside them. Also, we can
1650 // never have buffer barriers inside of a render pass. For now we will just assert that we are
1651 // not in a render pass.
1652 bool isValidSubpassBarrier = false;
1653 if (barrierType == kImageMemory_BarrierType) {
1654 VkImageMemoryBarrier* imgBarrier = static_cast<VkImageMemoryBarrier*>(barrier);
1655 isValidSubpassBarrier = (imgBarrier->newLayout == imgBarrier->oldLayout) &&
1656 (imgBarrier->srcQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) &&
1657 (imgBarrier->dstQueueFamilyIndex == VK_QUEUE_FAMILY_IGNORED) &&
1658 byRegion;
1659 }
1660 SkASSERT(!fActiveRenderPass || isValidSubpassBarrier);
1661 #endif
1662
1663 if (barrierType == kBufferMemory_BarrierType) {
1664 const VkBufferMemoryBarrier* barrierPtr = static_cast<VkBufferMemoryBarrier*>(barrier);
1665 fBufferBarriers.push_back(*barrierPtr);
1666 } else {
1667 SkASSERT(barrierType == kImageMemory_BarrierType);
1668 const VkImageMemoryBarrier* barrierPtr = static_cast<VkImageMemoryBarrier*>(barrier);
1669 // We need to check if we are adding a pipeline barrier that covers part of the same
1670 // subresource range as a barrier that is already in current batch. If it does, then we must
1671 // submit the first batch because the vulkan spec does not define a specific ordering for
1672 // barriers submitted in the same batch.
1673 // TODO: Look if we can gain anything by merging barriers together instead of submitting
1674 // the old ones.
1675 for (int i = 0; i < fImageBarriers.size(); ++i) {
1676 VkImageMemoryBarrier& currentBarrier = fImageBarriers[i];
1677 if (barrierPtr->image == currentBarrier.image) {
1678 const VkImageSubresourceRange newRange = barrierPtr->subresourceRange;
1679 const VkImageSubresourceRange oldRange = currentBarrier.subresourceRange;
1680 SkASSERT(newRange.aspectMask == oldRange.aspectMask);
1681 SkASSERT(newRange.baseArrayLayer == oldRange.baseArrayLayer);
1682 SkASSERT(newRange.layerCount == oldRange.layerCount);
1683 uint32_t newStart = newRange.baseMipLevel;
1684 uint32_t newEnd = newRange.baseMipLevel + newRange.levelCount - 1;
1685 uint32_t oldStart = oldRange.baseMipLevel;
1686 uint32_t oldEnd = oldRange.baseMipLevel + oldRange.levelCount - 1;
1687 if (std::max(newStart, oldStart) <= std::min(newEnd, oldEnd)) {
1688 this->submitPipelineBarriers();
1689 break;
1690 }
1691 }
1692 }
1693 fImageBarriers.push_back(*barrierPtr);
1694 }
1695 fBarriersByRegion |= byRegion;
1696 fSrcStageMask = fSrcStageMask | srcStageMask;
1697 fDstStageMask = fDstStageMask | dstStageMask;
1698
1699 if (fActiveRenderPass) {
1700 this->submitPipelineBarriers(true);
1701 }
1702 }
1703
submitPipelineBarriers(bool forSelfDependency)1704 void VulkanCommandBuffer::submitPipelineBarriers(bool forSelfDependency) {
1705 SkASSERT(fActive);
1706
1707 // TODO: Do we need to handle SecondaryCommandBuffers as well?
1708
1709 // Currently we never submit a pipeline barrier without at least one buffer or image barrier.
1710 if (!fBufferBarriers.empty() || !fImageBarriers.empty()) {
1711 // For images we can have barriers inside of render passes but they require us to add more
1712 // support in subpasses which need self dependencies to have barriers inside them. Also, we
1713 // can never have buffer barriers inside of a render pass. For now we will just assert that
1714 // we are not in a render pass.
1715 SkASSERT(!fActiveRenderPass || forSelfDependency);
1716 // TODO: Do we need to handle wrapped CommandBuffers?
1717 // SkASSERT(!this->isWrapped());
1718 SkASSERT(fSrcStageMask && fDstStageMask);
1719
1720 VkDependencyFlags dependencyFlags = fBarriersByRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0;
1721 VULKAN_CALL(fSharedContext->interface(),
1722 CmdPipelineBarrier(fPrimaryCommandBuffer, fSrcStageMask, fDstStageMask,
1723 dependencyFlags,
1724 /*memoryBarrierCount=*/0, /*pMemoryBarrier=*/nullptr,
1725 fBufferBarriers.size(), fBufferBarriers.begin(),
1726 fImageBarriers.size(), fImageBarriers.begin()));
1727 fBufferBarriers.clear();
1728 fImageBarriers.clear();
1729 fBarriersByRegion = false;
1730 fSrcStageMask = 0;
1731 fDstStageMask = 0;
1732 }
1733 SkASSERT(fBufferBarriers.empty());
1734 SkASSERT(fImageBarriers.empty());
1735 SkASSERT(!fBarriersByRegion);
1736 SkASSERT(!fSrcStageMask);
1737 SkASSERT(!fDstStageMask);
1738 }
1739
updateBuffer(const VulkanBuffer * buffer,const void * data,size_t dataSize,size_t dstOffset)1740 void VulkanCommandBuffer::updateBuffer(const VulkanBuffer* buffer,
1741 const void* data,
1742 size_t dataSize,
1743 size_t dstOffset) {
1744 // vkCmdUpdateBuffer can only be called outside of a render pass.
1745 SkASSERT(fActive && !fActiveRenderPass);
1746 if (!buffer || buffer->vkBuffer() == VK_NULL_HANDLE) {
1747 SKGPU_LOG_W("VulkanCommandBuffer::updateBuffer requires a valid VulkanBuffer pointer backed"
1748 "by a valid VkBuffer handle");
1749 return;
1750 }
1751
1752 // Per the spec, vkCmdUpdateBuffer is treated as a “transfer" operation for the purposes of
1753 // synchronization barriers. Ensure this write operation occurs after any previous read
1754 // operations and without clobbering any other write operations on the same memory in the cache.
1755 buffer->setBufferAccess(this, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
1756 this->submitPipelineBarriers();
1757
1758 VULKAN_CALL(fSharedContext->interface(), CmdUpdateBuffer(fPrimaryCommandBuffer,
1759 buffer->vkBuffer(),
1760 dstOffset,
1761 dataSize,
1762 data));
1763 }
1764
nextSubpass()1765 void VulkanCommandBuffer::nextSubpass() {
1766 // TODO: Use VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS if we add secondary cmd buffers
1767 VULKAN_CALL(fSharedContext->interface(),
1768 CmdNextSubpass(fPrimaryCommandBuffer, VK_SUBPASS_CONTENTS_INLINE));
1769 }
1770
setViewport(SkIRect viewport)1771 void VulkanCommandBuffer::setViewport(SkIRect viewport) {
1772 VkViewport vkViewport = {
1773 (float) viewport.fLeft,
1774 (float) viewport.fTop,
1775 (float) viewport.width(),
1776 (float) viewport.height(),
1777 0.0f, // minDepth
1778 1.0f, // maxDepth
1779 };
1780 VULKAN_CALL(fSharedContext->interface(),
1781 CmdSetViewport(fPrimaryCommandBuffer,
1782 /*firstViewport=*/0,
1783 /*viewportCount=*/1,
1784 &vkViewport));
1785 }
1786
1787 } // namespace skgpu::graphite
1788