1 /* Copyright (c) 2018-2019 The Khronos Group Inc. 2 * Copyright (c) 2018-2019 Valve Corporation 3 * Copyright (c) 2018-2019 LunarG, Inc. 4 * Copyright (C) 2018-2019 Google Inc. 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 * 18 */ 19 20 #ifndef VULKAN_GPU_VALIDATION_H 21 #define VULKAN_GPU_VALIDATION_H 22 23 // Class to encapsulate Vulkan Device Memory allocations. 24 // It allocates device memory in large chunks for efficiency and to avoid 25 // hitting the device limit of the number of allocations. 26 // This manager handles only fixed-sized blocks of "data_size" bytes. 27 // The interface allows the caller to "get" and "put back" blocks. 28 // The manager allocates and frees chunks as needed. 29 30 class CoreChecks; 31 typedef CoreChecks layer_data; 32 33 class GpuDeviceMemoryManager { 34 public: 35 GpuDeviceMemoryManager(layer_data *dev_data, uint32_t data_size); 36 ~GpuDeviceMemoryManager(); 37 GetBlockSize()38 uint32_t GetBlockSize() { return block_size_; } 39 40 VkResult GetBlock(GpuDeviceMemoryBlock *block); 41 void PutBackBlock(VkBuffer buffer, VkDeviceMemory memory, uint32_t offset); 42 void PutBackBlock(GpuDeviceMemoryBlock &block); 43 void FreeAllBlocks(); 44 45 private: 46 // Define allocation granularity of Vulkan resources. 47 // Things like device memory and descriptors are allocated in "chunks". 48 // This number should be chosen to try to avoid too many chunk allocations 49 // and chunk allocations that are too large. 50 static const uint32_t kItemsPerChunk = 512; 51 52 struct MemoryChunk { 53 VkBuffer buffer; 54 VkDeviceMemory memory; 55 std::vector<uint32_t> available_offsets; 56 }; 57 58 layer_data *dev_data_; 59 uint32_t record_size_; 60 uint32_t block_size_; 61 uint32_t blocks_per_chunk_; 62 uint32_t chunk_size_; 63 std::list<MemoryChunk> chunk_list_; 64 65 bool MemoryTypeFromProperties(uint32_t typeBits, VkFlags requirements_mask, uint32_t *typeIndex); 66 VkResult AllocMemoryChunk(MemoryChunk &chunk); 67 void FreeMemoryChunk(MemoryChunk &chunk); 68 }; 69 70 // Class to encapsulate Descriptor Set allocation. This manager creates and destroys Descriptor Pools 71 // as needed to satisfy requests for descriptor sets. 72 class GpuDescriptorSetManager { 73 public: 74 GpuDescriptorSetManager(layer_data *dev_data); 75 ~GpuDescriptorSetManager(); 76 77 VkResult GetDescriptorSets(uint32_t count, VkDescriptorPool *pool, std::vector<VkDescriptorSet> *desc_sets); 78 void PutBackDescriptorSet(VkDescriptorPool desc_pool, VkDescriptorSet desc_set); 79 void DestroyDescriptorPools(); 80 81 private: 82 static const uint32_t kItemsPerChunk = 512; 83 struct PoolTracker { 84 uint32_t size; 85 uint32_t used; 86 }; 87 88 layer_data *dev_data_; 89 std::unordered_map<VkDescriptorPool, struct PoolTracker> desc_pool_map_; 90 }; 91 92 using mutex_t = std::mutex; 93 using lock_guard_t = std::lock_guard<mutex_t>; 94 using unique_lock_t = std::unique_lock<mutex_t>; 95 96 #endif // VULKAN_GPU_VALIDATION_H 97