1 /*
2 * Copyright (c) 2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "node_context_pool_manager_vk.h"
17
18 #include <cstdint>
19 #include <vulkan/vulkan_core.h>
20
21 #include <base/containers/fixed_string.h>
22 #include <base/math/mathf.h>
23 #include <base/util/compile_time_hashes.h>
24 #include <render/device/pipeline_state_desc.h>
25 #include <render/namespace.h>
26
27 #include "device/device.h"
28 #include "device/gpu_resource_handle_util.h"
29 #include "device/gpu_resource_manager.h"
30 #include "nodecontext/node_context_pool_manager.h"
31 #include "nodecontext/render_command_list.h"
32 #include "util/log.h"
33 #include "vulkan/device_vk.h"
34 #include "vulkan/gpu_image_vk.h"
35 #include "vulkan/gpu_resource_util_vk.h"
36 #include "vulkan/pipeline_create_functions_vk.h"
37 #include "vulkan/validate_vk.h"
38
39 using namespace BASE_NS;
40
41 template<>
hash(const RENDER_NS::ImageLayout & val)42 uint64_t BASE_NS::hash(const RENDER_NS::ImageLayout& val)
43 {
44 return static_cast<uint64_t>(val);
45 }
46 template<>
hash(const RENDER_NS::RenderPassSubpassDesc & subpass)47 uint64_t BASE_NS::hash(const RENDER_NS::RenderPassSubpassDesc& subpass)
48 {
49 uint64_t seed = 0;
50 HashRange(seed, subpass.inputAttachmentIndices, subpass.inputAttachmentIndices + subpass.inputAttachmentCount);
51 HashRange(seed, subpass.colorAttachmentIndices, subpass.colorAttachmentIndices + subpass.colorAttachmentCount);
52 HashRange(
53 seed, subpass.resolveAttachmentIndices, subpass.resolveAttachmentIndices + subpass.resolveAttachmentCount);
54 if (subpass.depthAttachmentCount) {
55 HashCombine(seed, static_cast<uint64_t>(subpass.depthAttachmentIndex));
56 }
57 if (subpass.viewMask > 1U) {
58 HashCombine(seed, subpass.viewMask);
59 }
60 return seed;
61 }
62
63 RENDER_BEGIN_NAMESPACE()
64 namespace {
65 struct FBSize {
66 uint32_t width { 0 };
67 uint32_t height { 0 };
68 uint32_t layers { 1 };
69 };
70
HashRenderPassCompatibility(uint64_t & hash,const RenderPassDesc & renderPassDesc,const LowLevelRenderPassCompatibilityDescVk & renderPassCompatibilityDesc,const RenderPassSubpassDesc & subpasses,const RenderPassAttachmentResourceStates & intputResourceStates)71 inline void HashRenderPassCompatibility(uint64_t& hash, const RenderPassDesc& renderPassDesc,
72 const LowLevelRenderPassCompatibilityDescVk& renderPassCompatibilityDesc, const RenderPassSubpassDesc& subpasses,
73 const RenderPassAttachmentResourceStates& intputResourceStates)
74 {
75 for (uint32_t idx = 0; idx < renderPassDesc.attachmentCount; ++idx) {
76 const LowLevelRenderPassCompatibilityDescVk::Attachment& atCompatibilityDesc =
77 renderPassCompatibilityDesc.attachments[idx];
78 HashCombine(hash, static_cast<uint64_t>(atCompatibilityDesc.format),
79 static_cast<uint64_t>(atCompatibilityDesc.sampleCountFlags));
80 // render pass needs have matching stage masks (creates often different hash at first frame)
81 // soft reset in render graph tries to prevent too many render passes
82 HashCombine(hash, static_cast<uint64_t>(intputResourceStates.states[idx].pipelineStageFlags));
83 if (subpasses.viewMask > 1U) {
84 // with multi-view extension, renderpass updated for all mips
85 HashCombine(hash, (static_cast<uint64_t>(renderPassDesc.attachments[idx].layer) << 32ULL) |
86 (static_cast<uint64_t>(renderPassDesc.attachments[idx].mipLevel)));
87 }
88 }
89 // NOTE: subpass resources states are not hashed
90 HashRange(hash, &subpasses, &subpasses + renderPassDesc.subpassCount);
91 }
92
HashRenderPassLayouts(uint64_t & hash,const RenderPassDesc & renderPassDesc,const RenderPassImageLayouts & renderPassImageLayouts)93 inline void HashRenderPassLayouts(
94 uint64_t& hash, const RenderPassDesc& renderPassDesc, const RenderPassImageLayouts& renderPassImageLayouts)
95 {
96 for (uint32_t idx = 0; idx < renderPassDesc.attachmentCount; ++idx) {
97 HashCombine(hash, renderPassImageLayouts.attachmentInitialLayouts[idx],
98 renderPassImageLayouts.attachmentFinalLayouts[idx]);
99 }
100 }
101
HashFramebuffer(uint64_t & hash,const RenderPassDesc & renderPassDesc,const GpuResourceManager & gpuResourceMgr)102 inline void HashFramebuffer(
103 uint64_t& hash, const RenderPassDesc& renderPassDesc, const GpuResourceManager& gpuResourceMgr)
104 {
105 for (uint32_t idx = 0; idx < renderPassDesc.attachmentCount; ++idx) {
106 const RenderPassDesc::AttachmentDesc& atDesc = renderPassDesc.attachments[idx];
107 // generation counters and hashing with handles is not enough
108 // the reason is that e.g. shallow handles can point to to different GPU handles
109 // and have counter of zero in their index
110 // this can lead with handle re-use to situations where the gen counter is zero
111 // NOTE: we hash with our own gpuHandle and vulkan image (if vulkan image id would be re-used)
112 const RenderHandle clientHandle = renderPassDesc.attachmentHandles[idx];
113 const EngineResourceHandle gpuHandle = gpuResourceMgr.GetGpuHandle(clientHandle);
114 uint64_t imageId = clientHandle.id;
115 if (const GpuImageVk* image = gpuResourceMgr.GetImage<GpuImageVk>(clientHandle); image) {
116 imageId = VulkanHandleCast<uint64_t>(image->GetPlatformData().image);
117 }
118 HashCombine(
119 hash, gpuHandle.id, imageId, static_cast<uint64_t>(atDesc.layer), static_cast<uint64_t>(atDesc.mipLevel));
120 }
121 }
122
HashRenderPassOps(uint64_t & hash,const RenderPassDesc & renderPassDesc)123 inline void HashRenderPassOps(uint64_t& hash, const RenderPassDesc& renderPassDesc)
124 {
125 for (uint32_t idx = 0; idx < renderPassDesc.attachmentCount; ++idx) {
126 const auto& attachRef = renderPassDesc.attachments[idx];
127 const uint64_t opHash = (static_cast<uint64_t>(attachRef.loadOp) << 48ULL) |
128 (static_cast<uint64_t>(attachRef.storeOp) << 32ULL) |
129 (static_cast<uint64_t>(attachRef.stencilLoadOp) << 16ULL) |
130 (static_cast<uint64_t>(attachRef.stencilStoreOp));
131 HashCombine(hash, opHash);
132 }
133 }
134
135 struct RenderPassHashes {
136 uint64_t renderPassCompatibilityHash { 0 };
137 uint64_t renderPassHash { 0 }; // continued hashing from compatibility
138 uint64_t frameBufferHash { 0 }; // only framebuffer related hash
139 };
140
HashBeginRenderPass(const RenderCommandBeginRenderPass & beginRenderPass,const LowLevelRenderPassCompatibilityDescVk & renderPassCompatibilityDesc,const GpuResourceManager & gpuResourceMgr)141 inline RenderPassHashes HashBeginRenderPass(const RenderCommandBeginRenderPass& beginRenderPass,
142 const LowLevelRenderPassCompatibilityDescVk& renderPassCompatibilityDesc, const GpuResourceManager& gpuResourceMgr)
143 {
144 RenderPassHashes rpHashes;
145
146 const auto& renderPassDesc = beginRenderPass.renderPassDesc;
147
148 PLUGIN_ASSERT(renderPassDesc.subpassCount > 0);
149 HashRenderPassCompatibility(rpHashes.renderPassCompatibilityHash, renderPassDesc, renderPassCompatibilityDesc,
150 beginRenderPass.subpasses[0], beginRenderPass.inputResourceStates);
151
152 rpHashes.renderPassHash = rpHashes.renderPassCompatibilityHash; // for starting point
153 HashRenderPassLayouts(rpHashes.renderPassHash, renderPassDesc, beginRenderPass.imageLayouts);
154 HashRenderPassOps(rpHashes.renderPassHash, renderPassDesc);
155
156 rpHashes.frameBufferHash = rpHashes.renderPassCompatibilityHash; // depends on the compatible render pass
157 HashFramebuffer(rpHashes.frameBufferHash, renderPassDesc, gpuResourceMgr);
158
159 return rpHashes;
160 }
161
CreateFramebuffer(const GpuResourceManager & gpuResourceMgr,const RenderPassDesc & renderPassDesc,const LowLevelRenderPassDataVk & renderPassData,const VkDevice device)162 VkFramebuffer CreateFramebuffer(const GpuResourceManager& gpuResourceMgr, const RenderPassDesc& renderPassDesc,
163 const LowLevelRenderPassDataVk& renderPassData, const VkDevice device)
164 {
165 const uint32_t attachmentCount = renderPassDesc.attachmentCount;
166 PLUGIN_ASSERT(attachmentCount <= PipelineStateConstants::MAX_RENDER_PASS_ATTACHMENT_COUNT);
167
168 // the size is taken from the render pass data
169 // there might e.g. fragment shading rate images whose size differ
170 FBSize size { renderPassData.framebufferSize.width, renderPassData.framebufferSize.height, 1u };
171 VkImageView imageViews[PipelineStateConstants::MAX_RENDER_PASS_ATTACHMENT_COUNT] = {};
172 uint32_t viewIndex = 0;
173
174 bool validImageViews = true;
175 for (uint32_t idx = 0; idx < attachmentCount; ++idx) {
176 const RenderHandle handle = renderPassDesc.attachmentHandles[idx];
177 const RenderPassDesc::AttachmentDesc& attachmentDesc = renderPassDesc.attachments[idx];
178 if (const GpuImageVk* image = gpuResourceMgr.GetImage<GpuImageVk>(handle); image) {
179 const GpuImagePlatformDataVk& plat = image->GetPlatformData();
180 const GpuImagePlatformDataViewsVk& imagePlat = image->GetPlatformDataViews();
181 imageViews[viewIndex] = plat.imageViewBase;
182 if ((renderPassData.viewMask > 1u) && (plat.arrayLayers > 1u)) {
183 // multi-view, we select the view with all the layers, but the layers count is 1
184 if ((!imagePlat.mipImageAllLayerViews.empty()) &&
185 (attachmentDesc.mipLevel < static_cast<uint32_t>(imagePlat.mipImageAllLayerViews.size()))) {
186 imageViews[viewIndex] = imagePlat.mipImageAllLayerViews[attachmentDesc.mipLevel];
187 } else {
188 imageViews[viewIndex] = plat.imageView;
189 }
190 size.layers = 1u;
191 } else if ((attachmentDesc.mipLevel >= 1) && (attachmentDesc.mipLevel < imagePlat.mipImageViews.size())) {
192 imageViews[viewIndex] = imagePlat.mipImageViews[attachmentDesc.mipLevel];
193 } else if ((attachmentDesc.layer >= 1) && (attachmentDesc.layer < imagePlat.layerImageViews.size())) {
194 imageViews[viewIndex] = imagePlat.layerImageViews[attachmentDesc.layer];
195 }
196 viewIndex++;
197 }
198 if (!imageViews[idx]) {
199 validImageViews = false;
200 }
201 }
202 #if (RENDER_VALIDATION_ENABLED == 1)
203 if (!validImageViews || (viewIndex != attachmentCount)) {
204 PLUGIN_LOG_E("RENDER_VALIDATION: invalid image attachment in FBO creation");
205 }
206 #endif
207 VkFramebuffer framebuffer = VK_NULL_HANDLE;
208 if (validImageViews && (viewIndex == attachmentCount)) {
209 const VkFramebufferCreateInfo framebufferCreateInfo {
210 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // sType
211 nullptr, // pNext
212 VkFramebufferCreateFlags { 0 }, // flags
213 renderPassData.renderPassCompatibility, // renderPass
214 attachmentCount, // attachmentCount
215 imageViews, // pAttachments
216 size.width, // width
217 size.height, // height
218 size.layers, // layers
219 };
220
221 VALIDATE_VK_RESULT(vkCreateFramebuffer(device, // device
222 &framebufferCreateInfo, // pCreateInfo
223 nullptr, // pAllocator
224 &framebuffer)); // pFramebuffer
225 }
226
227 return framebuffer;
228 }
229
CreateContextCommandPool(const VkDevice device,const VkCommandBufferLevel cmdBufferLevel,const uint32_t queueFamilyIndex)230 ContextCommandPoolVk CreateContextCommandPool(
231 const VkDevice device, const VkCommandBufferLevel cmdBufferLevel, const uint32_t queueFamilyIndex)
232 {
233 constexpr VkCommandPoolCreateFlags commandPoolCreateFlags { 0u };
234 const VkCommandPoolCreateInfo commandPoolCreateInfo {
235 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
236 nullptr, // pNext
237 commandPoolCreateFlags, // flags
238 queueFamilyIndex, // queueFamilyIndexlayers
239 };
240 constexpr VkSemaphoreCreateFlags semaphoreCreateFlags { 0 };
241 constexpr VkSemaphoreCreateInfo semaphoreCreateInfo {
242 VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, // sType
243 nullptr, // pNext
244 semaphoreCreateFlags, // flags
245 };
246
247 ContextCommandPoolVk ctxPool;
248 VALIDATE_VK_RESULT(vkCreateCommandPool(device, // device
249 &commandPoolCreateInfo, // pCreateInfo
250 nullptr, // pAllocator
251 &ctxPool.commandPool)); // pCommandPool
252
253 // pre-create command buffers and semaphores
254 const VkCommandBufferAllocateInfo commandBufferAllocateInfo {
255 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
256 nullptr, // pNext
257 ctxPool.commandPool, // commandPool
258 cmdBufferLevel, // level
259 1, // commandBufferCount
260 };
261
262 VALIDATE_VK_RESULT(vkAllocateCommandBuffers(device, // device
263 &commandBufferAllocateInfo, // pAllocateInfo
264 &ctxPool.commandBuffer.commandBuffer)); // pCommandBuffers
265
266 VALIDATE_VK_RESULT(vkCreateSemaphore(device, // device
267 &semaphoreCreateInfo, // pCreateInfo
268 nullptr, // pAllocator
269 &ctxPool.commandBuffer.semaphore)); // pSemaphore
270
271 return ctxPool;
272 }
273 } // namespace
274
NodeContextPoolManagerVk(Device & device,GpuResourceManager & gpuResourceManager,const GpuQueue & gpuQueue)275 NodeContextPoolManagerVk::NodeContextPoolManagerVk(
276 Device& device, GpuResourceManager& gpuResourceManager, const GpuQueue& gpuQueue)
277 : NodeContextPoolManager(), device_ { device }, gpuResourceMgr_ { gpuResourceManager }, gpuQueue_(gpuQueue)
278 {
279 const auto& deviceVk = static_cast<const DeviceVk&>(device_);
280 const VkDevice vkDevice = static_cast<const DevicePlatformDataVk&>(device_.GetPlatformData()).device;
281
282 const LowLevelGpuQueueVk lowLevelGpuQueue = deviceVk.GetGpuQueue(gpuQueue);
283 const uint32_t bufferingCount = device_.GetCommandBufferingCount();
284 if (bufferingCount > 0) {
285 // prepare and create command buffers
286 commandPools_.resize(bufferingCount);
287 commandSecondaryPools_.resize(bufferingCount);
288 const uint32_t queueFamilyIndex = lowLevelGpuQueue.queueInfo.queueFamilyIndex;
289 for (uint32_t frameIdx = 0; frameIdx < commandPools_.size(); ++frameIdx) {
290 commandPools_[frameIdx] = CreateContextCommandPool(
291 vkDevice, VkCommandBufferLevel::VK_COMMAND_BUFFER_LEVEL_PRIMARY, queueFamilyIndex);
292 commandSecondaryPools_[frameIdx] = CreateContextCommandPool(
293 vkDevice, VkCommandBufferLevel::VK_COMMAND_BUFFER_LEVEL_SECONDARY, queueFamilyIndex);
294 }
295 // NOTE: cmd buffers tagged in first beginFrame
296 }
297 }
298
~NodeContextPoolManagerVk()299 NodeContextPoolManagerVk::~NodeContextPoolManagerVk()
300 {
301 const VkDevice device = ((const DevicePlatformDataVk&)device_.GetPlatformData()).device;
302
303 auto DestroyContextCommandPool = [](const auto& device, const auto& commandPools) {
304 for (auto& cmdPoolRef : commandPools) {
305 if (cmdPoolRef.commandBuffer.semaphore) {
306 vkDestroySemaphore(device, // device
307 cmdPoolRef.commandBuffer.semaphore, // semaphore
308 nullptr); // pAllocator
309 }
310 if (cmdPoolRef.commandPool) {
311 vkDestroyCommandPool(device, // device
312 cmdPoolRef.commandPool, // commandPool
313 nullptr); // pAllocator
314 }
315 }
316 };
317 DestroyContextCommandPool(device, commandPools_);
318 DestroyContextCommandPool(device, commandSecondaryPools_);
319
320 for (auto& ref : framebufferCache_.hashToElement) {
321 if (ref.second.frameBuffer != VK_NULL_HANDLE) {
322 vkDestroyFramebuffer(device, // device
323 ref.second.frameBuffer, // framebuffer
324 nullptr); // pAllocator
325 }
326 }
327 for (auto& ref : renderPassCache_.hashToElement) {
328 if (ref.second.renderPass != VK_NULL_HANDLE) {
329 renderPassCreator_.DestroyRenderPass(device, ref.second.renderPass);
330 }
331 }
332 for (auto& ref : renderPassCompatibilityCache_.hashToElement) {
333 if (ref.second.renderPass != VK_NULL_HANDLE) {
334 renderPassCreator_.DestroyRenderPass(device, ref.second.renderPass);
335 }
336 }
337 }
338
BeginFrame()339 void NodeContextPoolManagerVk::BeginFrame()
340 {
341 #if (RENDER_VALIDATION_ENABLED == 1)
342 frameIndexFront_ = device_.GetFrameCount();
343 #endif
344 }
345
BeginBackendFrame()346 void NodeContextPoolManagerVk::BeginBackendFrame()
347 {
348 const uint64_t frameCount = device_.GetFrameCount();
349
350 #if (RENDER_VALIDATION_ENABLED == 1)
351 PLUGIN_ASSERT(frameIndexBack_ != frameCount); // prevent multiple calls per frame
352 frameIndexBack_ = frameCount;
353 PLUGIN_ASSERT(frameIndexFront_ == frameIndexBack_);
354 #endif
355 #if (RENDER_VULKAN_VALIDATION_ENABLED == 1)
356 if (firstFrame_) {
357 firstFrame_ = false;
358 for (const auto& cmdPoolRef : commandPools_) {
359 GpuResourceUtil::DebugObjectNameVk(device_, VK_OBJECT_TYPE_COMMAND_BUFFER,
360 VulkanHandleCast<uint64_t>(cmdPoolRef.commandBuffer.commandBuffer), debugName_ + "_cmd_buf");
361 }
362 // TODO: deferred creation
363 for (const auto& cmdPoolRef : commandSecondaryPools_) {
364 GpuResourceUtil::DebugObjectNameVk(device_, VK_OBJECT_TYPE_COMMAND_BUFFER,
365 VulkanHandleCast<uint64_t>(cmdPoolRef.commandBuffer.commandBuffer), debugName_ + "_secondary_cmd_buf");
366 }
367 }
368 #endif
369
370 bufferingIndex_ = (bufferingIndex_ + 1) % (uint32_t)commandPools_.size();
371
372 constexpr uint64_t additionalFrameCount { 2u };
373 const auto minAge = device_.GetCommandBufferingCount() + additionalFrameCount;
374 const auto ageLimit = (frameCount < minAge) ? 0 : (frameCount - minAge);
375
376 const VkDevice device = ((const DevicePlatformDataVk&)device_.GetPlatformData()).device;
377 {
378 auto& cache = framebufferCache_.hashToElement;
379 for (auto iter = cache.begin(); iter != cache.end();) {
380 if (iter->second.frameUseIndex < ageLimit) {
381 if (iter->second.frameBuffer) {
382 vkDestroyFramebuffer(device, iter->second.frameBuffer, nullptr);
383 }
384 iter = cache.erase(iter);
385 } else {
386 ++iter;
387 }
388 }
389 }
390 {
391 auto& cache = renderPassCache_.hashToElement;
392 for (auto iter = cache.begin(); iter != cache.end();) {
393 if (iter->second.frameUseIndex < ageLimit) {
394 if (iter->second.renderPass) {
395 renderPassCreator_.DestroyRenderPass(device, iter->second.renderPass);
396 }
397 iter = cache.erase(iter);
398 } else {
399 ++iter;
400 }
401 }
402 }
403 }
404
GetContextCommandPool() const405 const ContextCommandPoolVk& NodeContextPoolManagerVk::GetContextCommandPool() const
406 {
407 #if (RENDER_VALIDATION_ENABLED == 1)
408 PLUGIN_ASSERT(frameIndexFront_ == frameIndexBack_);
409 #endif
410 return commandPools_[bufferingIndex_];
411 }
412
GetContextSecondaryCommandPool() const413 const ContextCommandPoolVk& NodeContextPoolManagerVk::GetContextSecondaryCommandPool() const
414 {
415 #if (RENDER_VALIDATION_ENABLED == 1)
416 PLUGIN_ASSERT(frameIndexFront_ == frameIndexBack_);
417 #endif
418 PLUGIN_ASSERT(bufferingIndex_ < static_cast<uint32_t>(commandSecondaryPools_.size()));
419 return commandSecondaryPools_[bufferingIndex_];
420 }
421
GetRenderPassData(const RenderCommandBeginRenderPass & beginRenderPass)422 LowLevelRenderPassDataVk NodeContextPoolManagerVk::GetRenderPassData(
423 const RenderCommandBeginRenderPass& beginRenderPass)
424 {
425 LowLevelRenderPassDataVk renderPassData;
426 renderPassData.subpassIndex = beginRenderPass.subpassStartIndex;
427
428 PLUGIN_ASSERT(renderPassData.subpassIndex < static_cast<uint32_t>(beginRenderPass.subpasses.size()));
429 const auto& deviceVk = (const DeviceVk&)device_;
430 if (deviceVk.GetCommonDeviceExtensions().multiView) {
431 renderPassData.viewMask = beginRenderPass.subpasses[renderPassData.subpassIndex].viewMask;
432 }
433
434 // collect render pass attachment compatibility info and default viewport/scissor
435 for (uint32_t idx = 0; idx < beginRenderPass.renderPassDesc.attachmentCount; ++idx) {
436 const RenderHandle imageHandle = beginRenderPass.renderPassDesc.attachmentHandles[idx];
437 if (const auto* image = gpuResourceMgr_.GetImage<const GpuImageVk>(imageHandle); image) {
438 const auto& platData = image->GetPlatformData();
439 renderPassData.renderPassCompatibilityDesc.attachments[idx] = { platData.format, platData.samples,
440 platData.aspectFlags };
441 if (idx == 0) {
442 uint32_t maxFbWidth = platData.extent.width;
443 uint32_t maxFbHeight = platData.extent.height;
444 const auto& attachmentRef = beginRenderPass.renderPassDesc.attachments[idx];
445 if ((attachmentRef.mipLevel >= 1) && (attachmentRef.mipLevel < platData.mipLevels)) {
446 maxFbWidth = Math::max(1u, maxFbWidth >> attachmentRef.mipLevel);
447 maxFbHeight = Math::max(1u, maxFbHeight >> attachmentRef.mipLevel);
448 }
449 renderPassData.viewport = { 0.0f, 0.0f, static_cast<float>(maxFbWidth), static_cast<float>(maxFbHeight),
450 0.0f, 1.0f };
451 renderPassData.scissor = { { 0, 0 }, { maxFbWidth, maxFbHeight } };
452 renderPassData.framebufferSize = { maxFbWidth, maxFbHeight };
453
454 // currently swapchain check and rotation only supported for single target rotation
455 if (RenderHandleUtil::IsSwapchain(imageHandle)) {
456 renderPassData.isSwapchain = true;
457 renderPassData.surfaceTransformFlags = deviceVk.GetSurfaceTransformFlags(imageHandle);
458 }
459 }
460 }
461 }
462
463 {
464 const RenderPassHashes rpHashes =
465 HashBeginRenderPass(beginRenderPass, renderPassData.renderPassCompatibilityDesc, gpuResourceMgr_);
466 renderPassData.renderPassCompatibilityHash = rpHashes.renderPassCompatibilityHash;
467 renderPassData.renderPassHash = rpHashes.renderPassHash;
468 renderPassData.frameBufferHash = rpHashes.frameBufferHash;
469 }
470
471 const VkDevice device = ((const DevicePlatformDataVk&)device_.GetPlatformData()).device;
472 const uint64_t frameCount = device_.GetFrameCount();
473
474 {
475 auto& cache = renderPassCompatibilityCache_;
476 if (const auto iter = cache.hashToElement.find(renderPassData.renderPassCompatibilityHash);
477 iter != cache.hashToElement.cend()) {
478 renderPassData.renderPassCompatibility = iter->second.renderPass;
479 } else { // new
480 renderPassData.renderPassCompatibility =
481 renderPassCreator_.CreateRenderPassCompatibility(deviceVk, beginRenderPass, renderPassData);
482 cache.hashToElement[renderPassData.renderPassCompatibilityHash] = { 0,
483 renderPassData.renderPassCompatibility };
484 #if (RENDER_VULKAN_VALIDATION_ENABLED == 1)
485 GpuResourceUtil::DebugObjectNameVk(device_, VK_OBJECT_TYPE_RENDER_PASS,
486 VulkanHandleCast<uint64_t>(renderPassData.renderPassCompatibility), debugName_ + "_rp_compatibility");
487 #endif
488 }
489 }
490
491 {
492 auto& cache = framebufferCache_;
493 if (auto iter = cache.hashToElement.find(renderPassData.frameBufferHash); iter != cache.hashToElement.cend()) {
494 iter->second.frameUseIndex = frameCount;
495 renderPassData.framebuffer = iter->second.frameBuffer;
496 } else { // new
497 renderPassData.framebuffer =
498 CreateFramebuffer(gpuResourceMgr_, beginRenderPass.renderPassDesc, renderPassData, device);
499 cache.hashToElement[renderPassData.frameBufferHash] = { frameCount, renderPassData.framebuffer };
500 #if (RENDER_VULKAN_VALIDATION_ENABLED == 1)
501 GpuResourceUtil::DebugObjectNameVk(device_, VK_OBJECT_TYPE_FRAMEBUFFER,
502 VulkanHandleCast<uint64_t>(renderPassData.framebuffer),
503 debugName_ + "_fbo_" + to_string(renderPassData.framebufferSize.width) + "_" +
504 to_string(renderPassData.framebufferSize.height));
505 #endif
506 }
507 }
508
509 {
510 auto& cache = renderPassCache_;
511 if (const auto iter = cache.hashToElement.find(renderPassData.renderPassHash);
512 iter != cache.hashToElement.cend()) {
513 iter->second.frameUseIndex = frameCount;
514 renderPassData.renderPass = iter->second.renderPass;
515 } else { // new
516 renderPassData.renderPass = renderPassCreator_.CreateRenderPass(deviceVk, beginRenderPass, renderPassData);
517 cache.hashToElement[renderPassData.renderPassHash] = { frameCount, renderPassData.renderPass };
518 #if (RENDER_VULKAN_VALIDATION_ENABLED == 1)
519 GpuResourceUtil::DebugObjectNameVk(device_, VK_OBJECT_TYPE_RENDER_PASS,
520 VulkanHandleCast<uint64_t>(renderPassData.renderPass), debugName_ + "_rp");
521 #endif
522 }
523 }
524
525 return renderPassData;
526 }
527
528 #if ((RENDER_VALIDATION_ENABLED == 1) || (RENDER_VULKAN_VALIDATION_ENABLED == 1))
SetValidationDebugName(const string_view debugName)529 void NodeContextPoolManagerVk::SetValidationDebugName(const string_view debugName)
530 {
531 debugName_ = debugName;
532 }
533 #endif
534 RENDER_END_NAMESPACE()
535