• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "node_context_pool_manager_vk.h"
17 
18 #include <cstdint>
19 #include <vulkan/vulkan_core.h>
20 
21 #include <base/containers/fixed_string.h>
22 #include <base/math/mathf.h>
23 #include <base/util/compile_time_hashes.h>
24 #include <render/device/pipeline_state_desc.h>
25 #include <render/namespace.h>
26 
27 #include "device/device.h"
28 #include "device/gpu_resource_handle_util.h"
29 #include "device/gpu_resource_manager.h"
30 #include "nodecontext/node_context_pool_manager.h"
31 #include "nodecontext/render_command_list.h"
32 #include "util/log.h"
33 #include "vulkan/device_vk.h"
34 #include "vulkan/gpu_image_vk.h"
35 #include "vulkan/gpu_resource_util_vk.h"
36 #include "vulkan/pipeline_create_functions_vk.h"
37 #include "vulkan/validate_vk.h"
38 
39 using namespace BASE_NS;
40 
41 template<>
hash(const RENDER_NS::ImageLayout & val)42 uint64_t BASE_NS::hash(const RENDER_NS::ImageLayout& val)
43 {
44     return static_cast<uint64_t>(val);
45 }
46 template<>
hash(const RENDER_NS::RenderPassSubpassDesc & subpass)47 uint64_t BASE_NS::hash(const RENDER_NS::RenderPassSubpassDesc& subpass)
48 {
49     uint64_t seed = 0;
50     HashRange(seed, subpass.inputAttachmentIndices, subpass.inputAttachmentIndices + subpass.inputAttachmentCount);
51     HashRange(seed, subpass.colorAttachmentIndices, subpass.colorAttachmentIndices + subpass.colorAttachmentCount);
52     HashRange(
53         seed, subpass.resolveAttachmentIndices, subpass.resolveAttachmentIndices + subpass.resolveAttachmentCount);
54     if (subpass.depthAttachmentCount) {
55         HashCombine(seed, static_cast<uint64_t>(subpass.depthAttachmentIndex));
56     }
57     if (subpass.viewMask > 1U) {
58         HashCombine(seed, subpass.viewMask);
59     }
60     return seed;
61 }
62 
63 RENDER_BEGIN_NAMESPACE()
64 namespace {
65 struct FBSize {
66     uint32_t width { 0 };
67     uint32_t height { 0 };
68     uint32_t layers { 1 };
69 };
70 
HashRenderPassCompatibility(uint64_t & hash,const RenderPassDesc & renderPassDesc,const LowLevelRenderPassCompatibilityDescVk & renderPassCompatibilityDesc,const RenderPassSubpassDesc & subpasses,const RenderPassAttachmentResourceStates & intputResourceStates)71 inline void HashRenderPassCompatibility(uint64_t& hash, const RenderPassDesc& renderPassDesc,
72     const LowLevelRenderPassCompatibilityDescVk& renderPassCompatibilityDesc, const RenderPassSubpassDesc& subpasses,
73     const RenderPassAttachmentResourceStates& intputResourceStates)
74 {
75     for (uint32_t idx = 0; idx < renderPassDesc.attachmentCount; ++idx) {
76         const LowLevelRenderPassCompatibilityDescVk::Attachment& atCompatibilityDesc =
77             renderPassCompatibilityDesc.attachments[idx];
78         HashCombine(hash, (static_cast<uint64_t>(atCompatibilityDesc.format) << 32ULL) |
79                               (static_cast<uint64_t>(atCompatibilityDesc.sampleCountFlags)));
80         // render pass needs have matching stage masks (creates often different hash at first frame)
81         // soft reset in render graph tries to prevent too many render passes
82         HashCombine(hash, static_cast<uint64_t>(intputResourceStates.states[idx].pipelineStageFlags));
83         if (subpasses.viewMask > 1U) {
84             // with multi-view extension, renderpass updated for all mips
85             HashCombine(hash, (static_cast<uint64_t>(renderPassDesc.attachments[idx].layer) << 32ULL) |
86                                   (static_cast<uint64_t>(renderPassDesc.attachments[idx].mipLevel)));
87         }
88     }
89     // NOTE: subpass resources states are not hashed
90     HashRange(hash, &subpasses, &subpasses + renderPassDesc.subpassCount);
91 }
92 
HashRenderPassLayouts(uint64_t & hash,const RenderPassDesc & renderPassDesc,const RenderPassImageLayouts & renderPassImageLayouts)93 inline void HashRenderPassLayouts(
94     uint64_t& hash, const RenderPassDesc& renderPassDesc, const RenderPassImageLayouts& renderPassImageLayouts)
95 {
96     for (uint32_t idx = 0; idx < renderPassDesc.attachmentCount; ++idx) {
97         HashCombine(hash, (static_cast<uint64_t>(renderPassImageLayouts.attachmentInitialLayouts[idx]) << 32ULL) |
98                               (static_cast<uint64_t>(renderPassImageLayouts.attachmentFinalLayouts[idx])));
99     }
100 }
101 
HashFramebuffer(uint64_t & hash,const RenderPassDesc & renderPassDesc,const GpuResourceManager & gpuResourceMgr)102 inline void HashFramebuffer(
103     uint64_t& hash, const RenderPassDesc& renderPassDesc, const GpuResourceManager& gpuResourceMgr)
104 {
105     for (uint32_t idx = 0; idx < renderPassDesc.attachmentCount; ++idx) {
106         const RenderPassDesc::AttachmentDesc& atDesc = renderPassDesc.attachments[idx];
107         // generation counters and hashing with handles is not enough
108         // the reason is that e.g. shallow handles can point to to different GPU handles
109         // and have counter of zero in their index
110         // this can lead with handle re-use to situations where the gen counter is zero
111         // NOTE: we hash with our own gpuHandle and vulkan image (if vulkan image id would be re-used)
112         const RenderHandle clientHandle = renderPassDesc.attachmentHandles[idx];
113         const EngineResourceHandle gpuHandle = gpuResourceMgr.GetGpuHandle(clientHandle);
114         uint64_t imageId = clientHandle.id;
115         if (const GpuImageVk* image = gpuResourceMgr.GetImage<GpuImageVk>(clientHandle); image) {
116             imageId = VulkanHandleCast<uint64_t>(image->GetPlatformData().image);
117         }
118         const uint64_t perAttachmentData[2U] { gpuHandle.id, imageId };
119         HashCombine(hash, FNV1aHash(perAttachmentData),
120             ((static_cast<uint64_t>(atDesc.layer) << 32ULL) | static_cast<uint64_t>(atDesc.mipLevel)));
121     }
122 }
123 
HashRenderPassOps(uint64_t & hash,const RenderPassDesc & renderPassDesc)124 inline void HashRenderPassOps(uint64_t& hash, const RenderPassDesc& renderPassDesc)
125 {
126     for (uint32_t idx = 0; idx < renderPassDesc.attachmentCount; ++idx) {
127         const auto& attachRef = renderPassDesc.attachments[idx];
128         const uint64_t opHash = (static_cast<uint64_t>(attachRef.loadOp) << 48ULL) |
129                                 (static_cast<uint64_t>(attachRef.storeOp) << 32ULL) |
130                                 (static_cast<uint64_t>(attachRef.stencilLoadOp) << 16ULL) |
131                                 (static_cast<uint64_t>(attachRef.stencilStoreOp));
132         HashCombine(hash, opHash);
133     }
134 }
135 
136 struct RenderPassHashes {
137     uint64_t renderPassCompatibilityHash { 0 };
138     uint64_t renderPassHash { 0 };  // continued hashing from compatibility
139     uint64_t frameBufferHash { 0 }; // only framebuffer related hash
140 };
141 
HashBeginRenderPass(const RenderCommandBeginRenderPass & beginRenderPass,const LowLevelRenderPassCompatibilityDescVk & renderPassCompatibilityDesc,const GpuResourceManager & gpuResourceMgr)142 inline RenderPassHashes HashBeginRenderPass(const RenderCommandBeginRenderPass& beginRenderPass,
143     const LowLevelRenderPassCompatibilityDescVk& renderPassCompatibilityDesc, const GpuResourceManager& gpuResourceMgr)
144 {
145     RenderPassHashes rpHashes;
146 
147     const auto& renderPassDesc = beginRenderPass.renderPassDesc;
148 
149     PLUGIN_ASSERT(renderPassDesc.subpassCount > 0);
150     HashRenderPassCompatibility(rpHashes.renderPassCompatibilityHash, renderPassDesc, renderPassCompatibilityDesc,
151         beginRenderPass.subpasses[0], beginRenderPass.inputResourceStates);
152 
153     rpHashes.renderPassHash = rpHashes.renderPassCompatibilityHash; // for starting point
154     HashRenderPassLayouts(rpHashes.renderPassHash, renderPassDesc, beginRenderPass.imageLayouts);
155     HashRenderPassOps(rpHashes.renderPassHash, renderPassDesc);
156 
157     rpHashes.frameBufferHash = rpHashes.renderPassCompatibilityHash; // depends on the compatible render pass
158     HashFramebuffer(rpHashes.frameBufferHash, renderPassDesc, gpuResourceMgr);
159 
160     return rpHashes;
161 }
162 
CreateFramebuffer(const GpuResourceManager & gpuResourceMgr,const RenderPassDesc & renderPassDesc,const LowLevelRenderPassDataVk & renderPassData,const VkDevice device)163 VkFramebuffer CreateFramebuffer(const GpuResourceManager& gpuResourceMgr, const RenderPassDesc& renderPassDesc,
164     const LowLevelRenderPassDataVk& renderPassData, const VkDevice device)
165 {
166     const uint32_t attachmentCount = renderPassDesc.attachmentCount;
167     PLUGIN_ASSERT(attachmentCount <= PipelineStateConstants::MAX_RENDER_PASS_ATTACHMENT_COUNT);
168 
169     // the size is taken from the render pass data
170     // there might e.g. fragment shading rate images whose size differ
171     FBSize size { renderPassData.framebufferSize.width, renderPassData.framebufferSize.height, 1u };
172     VkImageView imageViews[PipelineStateConstants::MAX_RENDER_PASS_ATTACHMENT_COUNT] = {};
173     uint32_t viewIndex = 0;
174 
175     bool validImageViews = true;
176     for (uint32_t idx = 0; idx < attachmentCount; ++idx) {
177         const RenderHandle handle = renderPassDesc.attachmentHandles[idx];
178         const RenderPassDesc::AttachmentDesc& attachmentDesc = renderPassDesc.attachments[idx];
179         if (const GpuImageVk* image = gpuResourceMgr.GetImage<GpuImageVk>(handle); image) {
180             const GpuImagePlatformDataVk& plat = image->GetPlatformData();
181             const GpuImagePlatformDataViewsVk& imagePlat = image->GetPlatformDataViews();
182             imageViews[viewIndex] = plat.imageViewBase;
183             if ((renderPassData.viewMask > 1u) && (plat.arrayLayers > 1u)) {
184                 // multi-view, we select the view with all the layers, but the layers count is 1
185                 if ((!imagePlat.mipImageAllLayerViews.empty()) &&
186                     (attachmentDesc.mipLevel < static_cast<uint32_t>(imagePlat.mipImageAllLayerViews.size()))) {
187                     imageViews[viewIndex] = imagePlat.mipImageAllLayerViews[attachmentDesc.mipLevel];
188                 } else {
189                     imageViews[viewIndex] = plat.imageView;
190                 }
191                 size.layers = 1u;
192             } else if ((attachmentDesc.mipLevel >= 1) && (attachmentDesc.mipLevel < imagePlat.mipImageViews.size())) {
193                 imageViews[viewIndex] = imagePlat.mipImageViews[attachmentDesc.mipLevel];
194             } else if ((attachmentDesc.layer >= 1) && (attachmentDesc.layer < imagePlat.layerImageViews.size())) {
195                 imageViews[viewIndex] = imagePlat.layerImageViews[attachmentDesc.layer];
196             }
197             viewIndex++;
198         }
199         if (!imageViews[idx]) {
200             validImageViews = false;
201         }
202     }
203 #if (RENDER_VALIDATION_ENABLED == 1)
204     if (!validImageViews || (viewIndex != attachmentCount)) {
205         PLUGIN_LOG_E("RENDER_VALIDATION: invalid image attachment in FBO creation");
206     }
207 #endif
208     VkFramebuffer framebuffer = VK_NULL_HANDLE;
209     if (validImageViews && (viewIndex == attachmentCount)) {
210         const VkFramebufferCreateInfo framebufferCreateInfo {
211             VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // sType
212             nullptr,                                   // pNext
213             VkFramebufferCreateFlags { 0 },            // flags
214             renderPassData.renderPassCompatibility,    // renderPass
215             attachmentCount,                           // attachmentCount
216             imageViews,                                // pAttachments
217             size.width,                                // width
218             size.height,                               // height
219             size.layers,                               // layers
220         };
221 
222         VALIDATE_VK_RESULT(vkCreateFramebuffer(device, // device
223             &framebufferCreateInfo,                    // pCreateInfo
224             nullptr,                                   // pAllocator
225             &framebuffer));                            // pFramebuffer
226     }
227 
228     return framebuffer;
229 }
230 
CreateContextCommandPool(const VkDevice device,const VkCommandBufferLevel cmdBufferLevel,const uint32_t queueFamilyIndex)231 ContextCommandPoolVk CreateContextCommandPool(
232     const VkDevice device, const VkCommandBufferLevel cmdBufferLevel, const uint32_t queueFamilyIndex)
233 {
234     constexpr VkCommandPoolCreateFlags commandPoolCreateFlags { 0u };
235     const VkCommandPoolCreateInfo commandPoolCreateInfo {
236         VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
237         nullptr,                                    // pNext
238         commandPoolCreateFlags,                     // flags
239         queueFamilyIndex,                           // queueFamilyIndexlayers
240     };
241     constexpr VkSemaphoreCreateFlags semaphoreCreateFlags { 0 };
242     constexpr VkSemaphoreCreateInfo semaphoreCreateInfo {
243         VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, // sType
244         nullptr,                                 // pNext
245         semaphoreCreateFlags,                    // flags
246     };
247 
248     ContextCommandPoolVk ctxPool;
249     VALIDATE_VK_RESULT(vkCreateCommandPool(device, // device
250         &commandPoolCreateInfo,                    // pCreateInfo
251         nullptr,                                   // pAllocator
252         &ctxPool.commandPool));                    // pCommandPool
253 
254     // pre-create command buffers and semaphores
255     const VkCommandBufferAllocateInfo commandBufferAllocateInfo {
256         VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
257         nullptr,                                        // pNext
258         ctxPool.commandPool,                            // commandPool
259         cmdBufferLevel,                                 // level
260         1,                                              // commandBufferCount
261     };
262 
263     VALIDATE_VK_RESULT(vkAllocateCommandBuffers(device, // device
264         &commandBufferAllocateInfo,                     // pAllocateInfo
265         &ctxPool.commandBuffer.commandBuffer));         // pCommandBuffers
266 
267     VALIDATE_VK_RESULT(vkCreateSemaphore(device, // device
268         &semaphoreCreateInfo,                    // pCreateInfo
269         nullptr,                                 // pAllocator
270         &ctxPool.commandBuffer.semaphore));      // pSemaphore
271 
272     return ctxPool;
273 }
274 } // namespace
275 
NodeContextPoolManagerVk(Device & device,GpuResourceManager & gpuResourceManager,const GpuQueue & gpuQueue)276 NodeContextPoolManagerVk::NodeContextPoolManagerVk(
277     Device& device, GpuResourceManager& gpuResourceManager, const GpuQueue& gpuQueue)
278     : NodeContextPoolManager(), device_ { device }, gpuResourceMgr_ { gpuResourceManager }, gpuQueue_(gpuQueue)
279 {
280     const auto& deviceVk = static_cast<const DeviceVk&>(device_);
281     const VkDevice vkDevice = static_cast<const DevicePlatformDataVk&>(device_.GetPlatformData()).device;
282 
283     const LowLevelGpuQueueVk lowLevelGpuQueue = deviceVk.GetGpuQueue(gpuQueue);
284     const uint32_t bufferingCount = device_.GetCommandBufferingCount();
285     if (bufferingCount > 0) {
286         // prepare and create command buffers
287         commandPools_.resize(bufferingCount);
288         commandSecondaryPools_.resize(bufferingCount);
289         const uint32_t queueFamilyIndex = lowLevelGpuQueue.queueInfo.queueFamilyIndex;
290         for (uint32_t frameIdx = 0; frameIdx < commandPools_.size(); ++frameIdx) {
291             commandPools_[frameIdx] = CreateContextCommandPool(
292                 vkDevice, VkCommandBufferLevel::VK_COMMAND_BUFFER_LEVEL_PRIMARY, queueFamilyIndex);
293             commandSecondaryPools_[frameIdx] = CreateContextCommandPool(
294                 vkDevice, VkCommandBufferLevel::VK_COMMAND_BUFFER_LEVEL_SECONDARY, queueFamilyIndex);
295         }
296         // NOTE: cmd buffers tagged in first beginFrame
297     }
298 }
299 
~NodeContextPoolManagerVk()300 NodeContextPoolManagerVk::~NodeContextPoolManagerVk()
301 {
302     const VkDevice device = ((const DevicePlatformDataVk&)device_.GetPlatformData()).device;
303 
304     auto DestroyContextCommandPool = [](const auto& device, const auto& commandPools) {
305         for (auto& cmdPoolRef : commandPools) {
306             if (cmdPoolRef.commandBuffer.semaphore) {
307                 vkDestroySemaphore(device,              // device
308                     cmdPoolRef.commandBuffer.semaphore, // semaphore
309                     nullptr);                           // pAllocator
310             }
311             if (cmdPoolRef.commandPool) {
312                 vkDestroyCommandPool(device, // device
313                     cmdPoolRef.commandPool,  // commandPool
314                     nullptr);                // pAllocator
315             }
316         }
317     };
318     DestroyContextCommandPool(device, commandPools_);
319     DestroyContextCommandPool(device, commandSecondaryPools_);
320 
321     for (auto& ref : framebufferCache_.hashToElement) {
322         if (ref.second.frameBuffer != VK_NULL_HANDLE) {
323             vkDestroyFramebuffer(device, // device
324                 ref.second.frameBuffer,  // framebuffer
325                 nullptr);                // pAllocator
326         }
327     }
328     for (auto& ref : renderPassCache_.hashToElement) {
329         if (ref.second.renderPass != VK_NULL_HANDLE) {
330             renderPassCreator_.DestroyRenderPass(device, ref.second.renderPass);
331         }
332     }
333     for (auto& ref : renderPassCompatibilityCache_.hashToElement) {
334         if (ref.second.renderPass != VK_NULL_HANDLE) {
335             renderPassCreator_.DestroyRenderPass(device, ref.second.renderPass);
336         }
337     }
338 }
339 
BeginFrame()340 void NodeContextPoolManagerVk::BeginFrame()
341 {
342 #if (RENDER_VALIDATION_ENABLED == 1)
343     frameIndexFront_ = device_.GetFrameCount();
344 #endif
345 }
346 
BeginBackendFrame()347 void NodeContextPoolManagerVk::BeginBackendFrame()
348 {
349     const uint64_t frameCount = device_.GetFrameCount();
350 
351 #if (RENDER_VALIDATION_ENABLED == 1)
352     PLUGIN_ASSERT(frameIndexBack_ != frameCount); // prevent multiple calls per frame
353     frameIndexBack_ = frameCount;
354     PLUGIN_ASSERT(frameIndexFront_ == frameIndexBack_);
355 #endif
356 #if (RENDER_VULKAN_VALIDATION_ENABLED == 1)
357     if (firstFrame_) {
358         firstFrame_ = false;
359         for (const auto& cmdPoolRef : commandPools_) {
360             GpuResourceUtil::DebugObjectNameVk(device_, VK_OBJECT_TYPE_COMMAND_BUFFER,
361                 VulkanHandleCast<uint64_t>(cmdPoolRef.commandBuffer.commandBuffer), debugName_ + "_cmd_buf");
362         }
363         // deferred creation
364         for (const auto& cmdPoolRef : commandSecondaryPools_) {
365             GpuResourceUtil::DebugObjectNameVk(device_, VK_OBJECT_TYPE_COMMAND_BUFFER,
366                 VulkanHandleCast<uint64_t>(cmdPoolRef.commandBuffer.commandBuffer), debugName_ + "_secondary_cmd_buf");
367         }
368     }
369 #endif
370 
371     bufferingIndex_ = (bufferingIndex_ + 1) % (uint32_t)commandPools_.size();
372 
373     constexpr uint64_t additionalFrameCount { 2u };
374     const auto minAge = device_.GetCommandBufferingCount() + additionalFrameCount;
375     const auto ageLimit = (frameCount < minAge) ? 0 : (frameCount - minAge);
376 
377     const VkDevice device = ((const DevicePlatformDataVk&)device_.GetPlatformData()).device;
378     {
379         auto& cache = framebufferCache_.hashToElement;
380         for (auto iter = cache.begin(); iter != cache.end();) {
381             if (iter->second.frameUseIndex < ageLimit) {
382                 if (iter->second.frameBuffer) {
383                     vkDestroyFramebuffer(device, iter->second.frameBuffer, nullptr);
384                 }
385                 iter = cache.erase(iter);
386             } else {
387                 ++iter;
388             }
389         }
390     }
391     {
392         auto& cache = renderPassCache_.hashToElement;
393         for (auto iter = cache.begin(); iter != cache.end();) {
394             if (iter->second.frameUseIndex < ageLimit) {
395                 if (iter->second.renderPass) {
396                     renderPassCreator_.DestroyRenderPass(device, iter->second.renderPass);
397                 }
398                 iter = cache.erase(iter);
399             } else {
400                 ++iter;
401             }
402         }
403     }
404 }
405 
GetContextCommandPool() const406 const ContextCommandPoolVk& NodeContextPoolManagerVk::GetContextCommandPool() const
407 {
408 #if (RENDER_VALIDATION_ENABLED == 1)
409     PLUGIN_ASSERT(frameIndexFront_ == frameIndexBack_);
410 #endif
411     return commandPools_[bufferingIndex_];
412 }
413 
GetContextSecondaryCommandPool() const414 const ContextCommandPoolVk& NodeContextPoolManagerVk::GetContextSecondaryCommandPool() const
415 {
416 #if (RENDER_VALIDATION_ENABLED == 1)
417     PLUGIN_ASSERT(frameIndexFront_ == frameIndexBack_);
418 #endif
419     PLUGIN_ASSERT(bufferingIndex_ < static_cast<uint32_t>(commandSecondaryPools_.size()));
420     return commandSecondaryPools_[bufferingIndex_];
421 }
422 
GetRenderPassData(const RenderCommandBeginRenderPass & beginRenderPass)423 LowLevelRenderPassDataVk NodeContextPoolManagerVk::GetRenderPassData(
424     const RenderCommandBeginRenderPass& beginRenderPass)
425 {
426     LowLevelRenderPassDataVk renderPassData;
427     renderPassData.subpassIndex = beginRenderPass.subpassStartIndex;
428 
429     PLUGIN_ASSERT(renderPassData.subpassIndex < static_cast<uint32_t>(beginRenderPass.subpasses.size()));
430     const auto& deviceVk = (const DeviceVk&)device_;
431     if (deviceVk.GetCommonDeviceExtensions().multiView) {
432         renderPassData.viewMask = beginRenderPass.subpasses[renderPassData.subpassIndex].viewMask;
433     }
434 
435     // collect render pass attachment compatibility info and default viewport/scissor
436     for (uint32_t idx = 0; idx < beginRenderPass.renderPassDesc.attachmentCount; ++idx) {
437         const RenderHandle imageHandle = beginRenderPass.renderPassDesc.attachmentHandles[idx];
438         if (const auto* image = gpuResourceMgr_.GetImage<const GpuImageVk>(imageHandle); image) {
439             const auto& platData = image->GetPlatformData();
440             renderPassData.renderPassCompatibilityDesc.attachments[idx] = { platData.format, platData.samples,
441                 platData.aspectFlags };
442             if (idx == 0) {
443                 uint32_t maxFbWidth = platData.extent.width;
444                 uint32_t maxFbHeight = platData.extent.height;
445                 const auto& attachmentRef = beginRenderPass.renderPassDesc.attachments[idx];
446                 if ((attachmentRef.mipLevel >= 1) && (attachmentRef.mipLevel < platData.mipLevels)) {
447                     maxFbWidth = Math::max(1u, maxFbWidth >> attachmentRef.mipLevel);
448                     maxFbHeight = Math::max(1u, maxFbHeight >> attachmentRef.mipLevel);
449                 }
450                 renderPassData.viewport = { 0.0f, 0.0f, static_cast<float>(maxFbWidth), static_cast<float>(maxFbHeight),
451                     0.0f, 1.0f };
452                 renderPassData.scissor = { { 0, 0 }, { maxFbWidth, maxFbHeight } };
453                 renderPassData.framebufferSize = { maxFbWidth, maxFbHeight };
454 
455                 // currently swapchain check and rotation only supported for single target rotation
456                 if (RenderHandleUtil::IsSwapchain(imageHandle)) {
457                     renderPassData.isSwapchain = true;
458                     renderPassData.surfaceTransformFlags = deviceVk.GetSurfaceTransformFlags(imageHandle);
459                 }
460             }
461         }
462     }
463 
464     {
465         const RenderPassHashes rpHashes =
466             HashBeginRenderPass(beginRenderPass, renderPassData.renderPassCompatibilityDesc, gpuResourceMgr_);
467         renderPassData.renderPassCompatibilityHash = rpHashes.renderPassCompatibilityHash;
468         renderPassData.renderPassHash = rpHashes.renderPassHash;
469         renderPassData.frameBufferHash = rpHashes.frameBufferHash;
470     }
471 
472     const VkDevice device = ((const DevicePlatformDataVk&)device_.GetPlatformData()).device;
473     const uint64_t frameCount = device_.GetFrameCount();
474 
475     {
476         auto& cache = renderPassCompatibilityCache_;
477         if (const auto iter = cache.hashToElement.find(renderPassData.renderPassCompatibilityHash);
478             iter != cache.hashToElement.cend()) {
479             renderPassData.renderPassCompatibility = iter->second.renderPass;
480         } else { // new
481             renderPassData.renderPassCompatibility =
482                 renderPassCreator_.CreateRenderPassCompatibility(deviceVk, beginRenderPass, renderPassData);
483             cache.hashToElement[renderPassData.renderPassCompatibilityHash] = { 0,
484                 renderPassData.renderPassCompatibility };
485 #if (RENDER_VULKAN_VALIDATION_ENABLED == 1)
486             GpuResourceUtil::DebugObjectNameVk(device_, VK_OBJECT_TYPE_RENDER_PASS,
487                 VulkanHandleCast<uint64_t>(renderPassData.renderPassCompatibility), debugName_ + "_rp_compatibility");
488 #endif
489         }
490     }
491 
492     {
493         auto& cache = framebufferCache_;
494         if (auto iter = cache.hashToElement.find(renderPassData.frameBufferHash); iter != cache.hashToElement.cend()) {
495             iter->second.frameUseIndex = frameCount;
496             renderPassData.framebuffer = iter->second.frameBuffer;
497         } else { // new
498             renderPassData.framebuffer =
499                 CreateFramebuffer(gpuResourceMgr_, beginRenderPass.renderPassDesc, renderPassData, device);
500             cache.hashToElement[renderPassData.frameBufferHash] = { frameCount, renderPassData.framebuffer };
501 #if (RENDER_VULKAN_VALIDATION_ENABLED == 1)
502             GpuResourceUtil::DebugObjectNameVk(device_, VK_OBJECT_TYPE_FRAMEBUFFER,
503                 VulkanHandleCast<uint64_t>(renderPassData.framebuffer),
504                 debugName_ + "_fbo_" + to_string(renderPassData.framebufferSize.width) + "_" +
505                     to_string(renderPassData.framebufferSize.height));
506 #endif
507         }
508     }
509 
510     {
511         auto& cache = renderPassCache_;
512         if (const auto iter = cache.hashToElement.find(renderPassData.renderPassHash);
513             iter != cache.hashToElement.cend()) {
514             iter->second.frameUseIndex = frameCount;
515             renderPassData.renderPass = iter->second.renderPass;
516         } else { // new
517             renderPassData.renderPass = renderPassCreator_.CreateRenderPass(deviceVk, beginRenderPass, renderPassData);
518             cache.hashToElement[renderPassData.renderPassHash] = { frameCount, renderPassData.renderPass };
519 #if (RENDER_VULKAN_VALIDATION_ENABLED == 1)
520             GpuResourceUtil::DebugObjectNameVk(device_, VK_OBJECT_TYPE_RENDER_PASS,
521                 VulkanHandleCast<uint64_t>(renderPassData.renderPass), debugName_ + "_rp");
522 #endif
523         }
524     }
525 
526     return renderPassData;
527 }
528 
529 #if ((RENDER_VALIDATION_ENABLED == 1) || (RENDER_VULKAN_VALIDATION_ENABLED == 1))
SetValidationDebugName(const string_view debugName)530 void NodeContextPoolManagerVk::SetValidationDebugName(const string_view debugName)
531 {
532     debugName_ = debugName;
533 }
534 #endif
535 RENDER_END_NAMESPACE()
536