1 /*
2 * Copyright (C) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "gpu_memory_allocator_vk.h"
17
18 #include <vulkan/vulkan_core.h>
19
20 // Including the external library header <vk_mem_alloc.h> causes a warning. Disabling it.
21 #ifdef _MSC_VER
22 #pragma warning(push)
23 #pragma warning(disable : 4701)
24 #endif
25 #define VMA_IMPLEMENTATION
26 #include <VulkanMemoryAllocator/src/vk_mem_alloc.h>
27 #ifdef _MSC_VER
28 #pragma warning(pop)
29 #endif
30
31 #include <base/containers/vector.h>
32 #include <base/util/formats.h>
33
34 #if (RENDER_PERF_ENABLED == 1)
35 #include <core/implementation_uids.h>
36 #include <core/perf/intf_performance_data_manager.h>
37 #endif
38
39 #include <render/device/gpu_resource_desc.h>
40 #include <render/namespace.h>
41
42 #include "util/log.h"
43 #include "vulkan/validate_vk.h"
44
45 using namespace BASE_NS;
46
47 template<>
hash(const RENDER_NS::GpuBufferDesc & desc)48 uint64_t BASE_NS::hash(const RENDER_NS::GpuBufferDesc& desc)
49 {
50 const uint64_t importantEngineCreationFlags =
51 (desc.engineCreationFlags &
52 RENDER_NS::EngineBufferCreationFlagBits::CORE_ENGINE_BUFFER_CREATION_SINGLE_SHOT_STAGING) |
53 (desc.engineCreationFlags &
54 RENDER_NS::EngineBufferCreationFlagBits::CORE_ENGINE_BUFFER_CREATION_DYNAMIC_RING_BUFFER);
55
56 uint64_t seed = importantEngineCreationFlags;
57 HashCombine(seed, (uint64_t)desc.usageFlags, (uint64_t)desc.memoryPropertyFlags);
58 return seed;
59 }
60
61 template<>
hash(const RENDER_NS::GpuImageDesc & desc)62 uint64_t BASE_NS::hash(const RENDER_NS::GpuImageDesc& desc)
63 {
64 const uint64_t importantImageUsageFlags =
65 (desc.usageFlags & RENDER_NS::CORE_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) |
66 (desc.usageFlags & RENDER_NS::CORE_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
67
68 uint64_t seed = importantImageUsageFlags;
69 HashCombine(seed, (uint64_t)desc.imageType, (uint64_t)desc.memoryPropertyFlags);
70 return seed;
71 }
72
73 RENDER_BEGIN_NAMESPACE()
74 namespace {
75 #if (RENDER_PERF_ENABLED == 1)
LogStats(VmaAllocator aAllocator)76 void LogStats(VmaAllocator aAllocator)
77 {
78 if (auto* inst = CORE_NS::GetInstance<CORE_NS::IPerformanceDataManagerFactory>(CORE_NS::UID_PERFORMANCE_FACTORY);
79 inst) {
80 CORE_NS::IPerformanceDataManager* pdm = inst->Get("Memory");
81
82 VmaBudget budgets[VK_MAX_MEMORY_HEAPS] {};
83 vmaGetHeapBudgets(aAllocator, budgets);
84 VmaStatistics stats {};
85 for (const VmaBudget& budget : budgets) {
86 stats.blockBytes += budget.statistics.blockBytes;
87 stats.allocationBytes += budget.statistics.allocationBytes;
88 }
89 pdm->UpdateData("VMA Memory", "AllGraphicsMemory", int64_t(stats.blockBytes));
90 pdm->UpdateData("VMA Memory", "GraphicsMemoryInUse", int64_t(stats.allocationBytes));
91 pdm->UpdateData("VMA Memory", "GraphicsMemoryNotInUse", int64_t(stats.blockBytes - stats.allocationBytes));
92 }
93 }
94 #endif
95
GetVmaVulkanFunctions(VkInstance instance,VkDevice device)96 VmaVulkanFunctions GetVmaVulkanFunctions(VkInstance instance, VkDevice device)
97 {
98 VmaVulkanFunctions funs {};
99 funs.vkGetInstanceProcAddr = vkGetInstanceProcAddr;
100 funs.vkGetDeviceProcAddr = vkGetDeviceProcAddr;
101 funs.vkGetPhysicalDeviceProperties = vkGetPhysicalDeviceProperties;
102 funs.vkGetPhysicalDeviceMemoryProperties = vkGetPhysicalDeviceMemoryProperties;
103 funs.vkAllocateMemory = vkAllocateMemory;
104 funs.vkFreeMemory = vkFreeMemory;
105 funs.vkMapMemory = vkMapMemory;
106 funs.vkUnmapMemory = vkUnmapMemory;
107 funs.vkFlushMappedMemoryRanges = vkFlushMappedMemoryRanges;
108 funs.vkInvalidateMappedMemoryRanges = vkInvalidateMappedMemoryRanges;
109 funs.vkBindBufferMemory = vkBindBufferMemory;
110 funs.vkBindImageMemory = vkBindImageMemory;
111 funs.vkGetBufferMemoryRequirements = vkGetBufferMemoryRequirements;
112 funs.vkGetImageMemoryRequirements = vkGetImageMemoryRequirements;
113 funs.vkCreateBuffer = vkCreateBuffer;
114 funs.vkDestroyBuffer = vkDestroyBuffer;
115 funs.vkCreateImage = vkCreateImage;
116 funs.vkDestroyImage = vkDestroyImage;
117 funs.vkCmdCopyBuffer = vkCmdCopyBuffer;
118 #if VK_VERSION_1_1
119 // NOTE: on some platforms Vulkan library has only the entrypoints for 1.0. To avoid variation just fetch the
120 // function always.
121 funs.vkGetBufferMemoryRequirements2KHR =
122 (PFN_vkGetBufferMemoryRequirements2)vkGetDeviceProcAddr(device, "vkGetBufferMemoryRequirements2");
123 funs.vkGetImageMemoryRequirements2KHR =
124 (PFN_vkGetImageMemoryRequirements2)vkGetDeviceProcAddr(device, "vkGetImageMemoryRequirements2");
125 funs.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkGetDeviceProcAddr(device, "vkBindBufferMemory2");
126 funs.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkGetDeviceProcAddr(device, "vkBindImageMemory2");
127 funs.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetInstanceProcAddr(
128 instance, "vkGetPhysicalDeviceMemoryProperties2");
129 #else
130 // VK_KHR_get_memory_requirements2
131 funs.vkGetBufferMemoryRequirements2KHR =
132 (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(device, "vkGetBufferMemoryRequirements2KHR");
133 funs.vkGetImageMemoryRequirements2KHR =
134 (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(device, "vkGetImageMemoryRequirements2KHR");
135 // VK_KHR_bind_memory2
136 funs.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(device, "vkBindBufferMemory2KHR");
137 funs.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(device, "vkBindImageMemory2KHR");
138 // VK_KHR_get_physical_device_properties2
139 funs.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(
140 instance, "vkGetPhysicalDeviceMemoryProperties2KHR");
141 #endif
142 return funs;
143 }
144 } // namespace
145
PlatformGpuMemoryAllocator(VkInstance instance,VkPhysicalDevice physicalDevice,VkDevice device,const GpuMemoryAllocatorCreateInfo & createInfo)146 PlatformGpuMemoryAllocator::PlatformGpuMemoryAllocator(VkInstance instance, VkPhysicalDevice physicalDevice,
147 VkDevice device, const GpuMemoryAllocatorCreateInfo& createInfo)
148 {
149 {
150 // 0 -> 1.0
151 uint32_t vulkanApiVersion = 0;
152 // synchronized by the engine
153 VmaAllocatorCreateFlags vmaCreateFlags =
154 VmaAllocatorCreateFlagBits::VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT;
155 #if (RENDER_VULKAN_RT_ENABLED == 1)
156 vmaCreateFlags |= VmaAllocatorCreateFlagBits::VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT;
157 vulkanApiVersion = VK_API_VERSION_1_2;
158 #endif
159 VmaVulkanFunctions funs = GetVmaVulkanFunctions(instance, device);
160 VmaAllocatorCreateInfo allocatorInfo = {};
161 allocatorInfo.vulkanApiVersion = vulkanApiVersion;
162 allocatorInfo.flags = vmaCreateFlags;
163 allocatorInfo.preferredLargeHeapBlockSize = createInfo.preferredLargeHeapBlockSize;
164 allocatorInfo.instance = instance;
165 allocatorInfo.physicalDevice = physicalDevice;
166 allocatorInfo.device = device;
167 allocatorInfo.pVulkanFunctions = &funs;
168 vmaCreateAllocator(&allocatorInfo, &allocator_);
169 }
170
171 // custom pools
172 for (const auto& ref : createInfo.customPools) {
173 PLUGIN_ASSERT(ref.resourceType == MemoryAllocatorResourceType::GPU_BUFFER ||
174 ref.resourceType == MemoryAllocatorResourceType::GPU_IMAGE);
175
176 if (ref.resourceType == MemoryAllocatorResourceType::GPU_BUFFER) {
177 CreatePoolForBuffers(ref);
178 } else if (ref.resourceType == MemoryAllocatorResourceType::GPU_IMAGE) {
179 CreatePoolForImages(ref);
180 }
181 }
182 }
183
~PlatformGpuMemoryAllocator()184 PlatformGpuMemoryAllocator::~PlatformGpuMemoryAllocator()
185 {
186 #if (RENDER_PERF_ENABLED == 1)
187 PLUGIN_ASSERT(memoryDebugStruct_.buffer == 0);
188 PLUGIN_ASSERT(memoryDebugStruct_.image == 0);
189 #endif
190
191 for (auto ref : customGpuBufferPools_) {
192 if (ref != VK_NULL_HANDLE) {
193 vmaDestroyPool(allocator_, // allocator
194 ref); // pool
195 }
196 }
197 for (auto ref : customGpuImagePools_) {
198 if (ref != VK_NULL_HANDLE) {
199 vmaDestroyPool(allocator_, // allocator
200 ref); // pool
201 }
202 }
203
204 vmaDestroyAllocator(allocator_);
205 }
206
CreateBuffer(const VkBufferCreateInfo & bufferCreateInfo,const VmaAllocationCreateInfo & allocationCreateInfo,VkBuffer & buffer,VmaAllocation & allocation,VmaAllocationInfo & allocationInfo)207 void PlatformGpuMemoryAllocator::CreateBuffer(const VkBufferCreateInfo& bufferCreateInfo,
208 const VmaAllocationCreateInfo& allocationCreateInfo, VkBuffer& buffer, VmaAllocation& allocation,
209 VmaAllocationInfo& allocationInfo)
210 {
211 VkResult result = vmaCreateBuffer(allocator_, // allocator,
212 &bufferCreateInfo, // pBufferCreateInfo
213 &allocationCreateInfo, // pAllocationCreateInfo
214 &buffer, // pBuffer
215 &allocation, // pAllocation
216 &allocationInfo); // pAllocationInfo
217 if (result != VK_SUCCESS) {
218 if ((result == VK_ERROR_OUT_OF_DEVICE_MEMORY) && (allocationCreateInfo.pool != VK_NULL_HANDLE)) {
219 PLUGIN_LOG_D(
220 "Out of buffer memory with custom memory pool (i.e. block size might be exceeded), bytesize: %u, "
221 "falling back to default memory pool",
222 (uint32_t)bufferCreateInfo.size);
223 VmaAllocationCreateInfo fallBackAllocationCreateInfo = allocationCreateInfo;
224 fallBackAllocationCreateInfo.pool = VK_NULL_HANDLE;
225 result = vmaCreateBuffer(
226 allocator_, &bufferCreateInfo, &fallBackAllocationCreateInfo, &buffer, &allocation, &allocationInfo);
227 }
228 if (result != VK_SUCCESS) {
229 PLUGIN_LOG_E("VKResult not VK_SUCCESS in buffer memory allocation(result : %i), allocation bytesize : %u",
230 (int32_t)result, (uint32_t)bufferCreateInfo.size);
231 PLUGIN_ASSERT(false);
232 }
233 }
234
235 #if (RENDER_PERF_ENABLED == 1)
236 memoryDebugStruct_.buffer += (uint64_t)allocation->GetSize();
237 LogStats(allocator_);
238 #endif
239 }
240
DestroyBuffer(VkBuffer buffer,VmaAllocation allocation)241 void PlatformGpuMemoryAllocator::DestroyBuffer(VkBuffer buffer, VmaAllocation allocation)
242 {
243 #if (RENDER_PERF_ENABLED == 1)
244 const uint64_t byteSize = (uint64_t)allocation->GetSize();
245 #endif
246
247 vmaDestroyBuffer(allocator_, // allocator
248 buffer, // buffer
249 allocation); // allocation
250
251 #if (RENDER_PERF_ENABLED == 1)
252 memoryDebugStruct_.buffer -= byteSize;
253 LogStats(allocator_);
254 #endif
255 }
256
CreateImage(const VkImageCreateInfo & imageCreateInfo,const VmaAllocationCreateInfo & allocationCreateInfo,VkImage & image,VmaAllocation & allocation,VmaAllocationInfo & allocationInfo)257 void PlatformGpuMemoryAllocator::CreateImage(const VkImageCreateInfo& imageCreateInfo,
258 const VmaAllocationCreateInfo& allocationCreateInfo, VkImage& image, VmaAllocation& allocation,
259 VmaAllocationInfo& allocationInfo)
260 {
261 VkResult result = vmaCreateImage(allocator_, // allocator,
262 &imageCreateInfo, // pImageCreateInfo
263 &allocationCreateInfo, // pAllocationCreateInfo
264 &image, // pImage
265 &allocation, // pAllocation
266 &allocationInfo); // pAllocationInfo
267 if ((result == VK_ERROR_OUT_OF_DEVICE_MEMORY) && (allocationCreateInfo.pool != VK_NULL_HANDLE)) {
268 PLUGIN_LOG_D("Out of memory for image with custom memory pool (i.e. block size might be exceeded), falling "
269 "back to default memory pool");
270 VmaAllocationCreateInfo fallBackAllocationCreateInfo = allocationCreateInfo;
271 fallBackAllocationCreateInfo.pool = VK_NULL_HANDLE;
272 result = vmaCreateImage(
273 allocator_, &imageCreateInfo, &fallBackAllocationCreateInfo, &image, &allocation, &allocationInfo);
274 }
275 if (result != VK_SUCCESS) {
276 PLUGIN_LOG_E("VKResult not VK_SUCCESS in image memory allocation(result : %i)", (int32_t)result);
277 PLUGIN_ASSERT(false);
278 }
279
280 #if (RENDER_PERF_ENABLED == 1)
281 memoryDebugStruct_.image += (uint64_t)allocation->GetSize();
282 LogStats(allocator_);
283 #endif
284 }
285
DestroyImage(VkImage image,VmaAllocation allocation)286 void PlatformGpuMemoryAllocator::DestroyImage(VkImage image, VmaAllocation allocation)
287 {
288 #if (RENDER_PERF_ENABLED == 1)
289 const uint64_t byteSize = (uint64_t)allocation->GetSize();
290 #endif
291
292 vmaDestroyImage(allocator_, // allocator
293 image, // image
294 allocation); // allocation
295
296 #if (RENDER_PERF_ENABLED == 1)
297 memoryDebugStruct_.image -= byteSize;
298 LogStats(allocator_);
299 #endif
300 }
301
GetMemoryTypeProperties(const uint32_t memoryType)302 uint32_t PlatformGpuMemoryAllocator::GetMemoryTypeProperties(const uint32_t memoryType)
303 {
304 VkMemoryPropertyFlags memPropertyFlags = 0;
305 vmaGetMemoryTypeProperties(allocator_, memoryType, &memPropertyFlags);
306 return (uint32_t)memPropertyFlags;
307 }
308
FlushAllocation(const VmaAllocation & allocation,const VkDeviceSize offset,const VkDeviceSize byteSize)309 void PlatformGpuMemoryAllocator::FlushAllocation(
310 const VmaAllocation& allocation, const VkDeviceSize offset, const VkDeviceSize byteSize)
311 {
312 vmaFlushAllocation(allocator_, allocation, offset, byteSize);
313 }
314
InvalidateAllocation(const VmaAllocation & allocation,const VkDeviceSize offset,const VkDeviceSize byteSize)315 void PlatformGpuMemoryAllocator::InvalidateAllocation(
316 const VmaAllocation& allocation, const VkDeviceSize offset, const VkDeviceSize byteSize)
317 {
318 vmaInvalidateAllocation(allocator_, allocation, offset, byteSize);
319 }
320
GetBufferPool(const GpuBufferDesc & desc) const321 VmaPool PlatformGpuMemoryAllocator::GetBufferPool(const GpuBufferDesc& desc) const
322 {
323 VmaPool result = VK_NULL_HANDLE;
324 const uint64_t hash = BASE_NS::hash(desc);
325 if (const auto iter = hashToGpuBufferPoolIndex_.find(hash); iter != hashToGpuBufferPoolIndex_.cend()) {
326 PLUGIN_ASSERT(iter->second < (uint32_t)customGpuBufferPools_.size());
327 result = customGpuBufferPools_[iter->second];
328 }
329 return result;
330 }
331
GetImagePool(const GpuImageDesc & desc) const332 VmaPool PlatformGpuMemoryAllocator::GetImagePool(const GpuImageDesc& desc) const
333 {
334 VmaPool result = VK_NULL_HANDLE;
335 const uint64_t hash = BASE_NS::hash(desc);
336 if (const auto iter = hashToGpuImagePoolIndex_.find(hash); iter != hashToGpuImagePoolIndex_.cend()) {
337 PLUGIN_ASSERT(iter->second < (uint32_t)customGpuImagePools_.size());
338 result = customGpuImagePools_[iter->second];
339 }
340 return result;
341 }
342
MapMemory(VmaAllocation allocation)343 void* PlatformGpuMemoryAllocator::MapMemory(VmaAllocation allocation)
344 {
345 void* data { nullptr };
346 VALIDATE_VK_RESULT(vmaMapMemory(allocator_, allocation, &data));
347 return data;
348 }
349
UnmapMemory(VmaAllocation allocation)350 void PlatformGpuMemoryAllocator::UnmapMemory(VmaAllocation allocation)
351 {
352 vmaUnmapMemory(allocator_, allocation);
353 }
354
CreatePoolForBuffers(const GpuMemoryAllocatorCustomPool & customPool)355 void PlatformGpuMemoryAllocator::CreatePoolForBuffers(const GpuMemoryAllocatorCustomPool& customPool)
356 {
357 PLUGIN_ASSERT(customPool.resourceType == MemoryAllocatorResourceType::GPU_BUFFER);
358
359 const auto& buf = customPool.gpuResourceDesc.buffer;
360
361 VmaAllocationCreateInfo allocationCreateInfo = {};
362 allocationCreateInfo.preferredFlags = (VkMemoryPropertyFlags)buf.memoryPropertyFlags;
363 allocationCreateInfo.requiredFlags = allocationCreateInfo.preferredFlags;
364 if (allocationCreateInfo.preferredFlags ==
365 ((VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) | (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
366 allocationCreateInfo.flags = VmaAllocationCreateFlagBits::VMA_ALLOCATION_CREATE_MAPPED_BIT;
367 }
368
369 VkBufferCreateInfo bufferCreateInfo = {};
370 bufferCreateInfo.sType = VkStructureType::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
371 bufferCreateInfo.usage = VkBufferUsageFlagBits(buf.usageFlags);
372 bufferCreateInfo.sharingMode = VkSharingMode::VK_SHARING_MODE_EXCLUSIVE;
373 bufferCreateInfo.size = 1024u; // default reference size
374
375 uint32_t memoryTypeIndex = 0;
376 VALIDATE_VK_RESULT(vmaFindMemoryTypeIndexForBufferInfo(allocator_, // pImageCreateInfo
377 &bufferCreateInfo, // pBufferCreateInfo
378 &allocationCreateInfo, // pAllocationCreateInfo
379 &memoryTypeIndex)); // pMemoryTypeIndex
380
381 VmaPoolCreateInfo poolCreateInfo = {};
382 poolCreateInfo.flags = (customPool.linearAllocationAlgorithm)
383 ? VmaPoolCreateFlagBits::VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
384 : (VkFlags)0;
385 poolCreateInfo.memoryTypeIndex = memoryTypeIndex;
386 poolCreateInfo.blockSize = customPool.blockSize;
387
388 const uint64_t hash = BASE_NS::hash(buf);
389 const size_t hashCount = hashToGpuBufferPoolIndex_.count(hash);
390
391 PLUGIN_ASSERT_MSG(hashCount == 0, "similar buffer pool already created");
392 if (hashCount == 0) {
393 VmaPool pool = VK_NULL_HANDLE;
394 VALIDATE_VK_RESULT(vmaCreatePool(allocator_, // allocator
395 &poolCreateInfo, // pCreateInfo
396 &pool)); // pPool
397
398 customGpuBufferPools_.push_back(pool);
399 hashToGpuBufferPoolIndex_[hash] = (uint32_t)customGpuBufferPools_.size() - 1;
400 #if (RENDER_PERF_ENABLED == 1)
401 customGpuBufferPoolNames_.push_back(customPool.name);
402 #endif
403 }
404 }
405
CreatePoolForImages(const GpuMemoryAllocatorCustomPool & customPool)406 void PlatformGpuMemoryAllocator::CreatePoolForImages(const GpuMemoryAllocatorCustomPool& customPool)
407 {
408 PLUGIN_ASSERT(customPool.resourceType == MemoryAllocatorResourceType::GPU_IMAGE);
409
410 const auto& img = customPool.gpuResourceDesc.image;
411
412 VmaAllocationCreateInfo allocationCreateInfo = {};
413 allocationCreateInfo.preferredFlags = (VkMemoryPropertyFlags)img.memoryPropertyFlags;
414 allocationCreateInfo.requiredFlags = allocationCreateInfo.preferredFlags;
415
416 VkImageCreateInfo imageCreateInfo = {};
417 imageCreateInfo.sType = VkStructureType::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
418 imageCreateInfo.usage = VkImageUsageFlagBits(img.usageFlags);
419 imageCreateInfo.imageType = VkImageType(img.imageType);
420 imageCreateInfo.format = VkFormat::VK_FORMAT_R8G8B8A8_UNORM;
421 imageCreateInfo.sharingMode = VkSharingMode::VK_SHARING_MODE_EXCLUSIVE;
422 imageCreateInfo.extent = VkExtent3D { 1024u, 1024u, 1 }; // default reference size
423 imageCreateInfo.arrayLayers = 1;
424 imageCreateInfo.mipLevels = 1;
425 imageCreateInfo.samples = VkSampleCountFlagBits::VK_SAMPLE_COUNT_1_BIT;
426
427 uint32_t memoryTypeIndex = 0;
428 VALIDATE_VK_RESULT(vmaFindMemoryTypeIndexForImageInfo(allocator_, // pImageCreateInfo
429 &imageCreateInfo, // pImageCreateInfo
430 &allocationCreateInfo, // pAllocationCreateInfo
431 &memoryTypeIndex)); // pMemoryTypeIndex
432
433 VmaPoolCreateInfo poolCreateInfo = {};
434 poolCreateInfo.flags = (customPool.linearAllocationAlgorithm)
435 ? VmaPoolCreateFlagBits::VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
436 : (VkFlags)0;
437 poolCreateInfo.memoryTypeIndex = memoryTypeIndex;
438
439 const uint64_t hash = BASE_NS::hash(img);
440 const size_t hashCount = hashToGpuImagePoolIndex_.count(hash);
441
442 PLUGIN_ASSERT_MSG(hashCount == 0, "similar image pool already created");
443 if (hashCount == 0) {
444 VmaPool pool = VK_NULL_HANDLE;
445 VALIDATE_VK_RESULT(vmaCreatePool(allocator_, // allocator
446 &poolCreateInfo, // pCreateInfo
447 &pool)); // pPool
448
449 customGpuImagePools_.push_back(pool);
450 hashToGpuImagePoolIndex_[hash] = (uint32_t)customGpuImagePools_.size() - 1;
451 #if (RENDER_PERF_ENABLED == 1)
452 customGpuImagePoolNames_.push_back(customPool.name);
453 #endif
454 }
455 }
456
457 #if (RENDER_PERF_ENABLED == 1)
GetBufferPoolDebugName(const GpuBufferDesc & desc) const458 string PlatformGpuMemoryAllocator::GetBufferPoolDebugName(const GpuBufferDesc& desc) const
459 {
460 const size_t hash = BASE_NS::hash(desc);
461 if (const auto iter = hashToGpuBufferPoolIndex_.find(hash); iter != hashToGpuBufferPoolIndex_.cend()) {
462 PLUGIN_ASSERT(iter->second < (uint32_t)customGpuBufferPools_.size());
463 return customGpuBufferPoolNames_[iter->second];
464 }
465 return {};
466 }
GetImagePoolDebugName(const GpuImageDesc & desc) const467 string PlatformGpuMemoryAllocator::GetImagePoolDebugName(const GpuImageDesc& desc) const
468 {
469 const size_t hash = BASE_NS::hash(desc);
470 if (const auto iter = hashToGpuImagePoolIndex_.find(hash); iter != hashToGpuImagePoolIndex_.cend()) {
471 PLUGIN_ASSERT(iter->second < (uint32_t)customGpuImagePools_.size());
472 return customGpuImagePoolNames_[iter->second];
473 }
474 return {};
475 }
476 #endif
477 RENDER_END_NAMESPACE()
478