• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright (c) 2015-2019 The Khronos Group Inc.
2  * Copyright (c) 2015-2019 Valve Corporation
3  * Copyright (c) 2015-2019 LunarG, Inc.
4  * Copyright (C) 2015-2019 Google Inc.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * Author: Courtney Goeltzenleuchter <courtneygo@google.com>
19  * Author: Tobin Ehlis <tobine@google.com>
20  * Author: Chris Forbes <chrisf@ijw.co.nz>
21  * Author: Mark Lobodzinski <mark@lunarg.com>
22  * Author: Dave Houlton <daveh@lunarg.com>
23  */
24 #ifndef CORE_VALIDATION_TYPES_H_
25 #define CORE_VALIDATION_TYPES_H_
26 
27 #include "hash_vk_types.h"
28 #include "vk_safe_struct.h"
29 #include "vulkan/vulkan.h"
30 #include "vk_layer_logging.h"
31 #include "vk_object_types.h"
32 #include "vk_extension_helper.h"
33 #include "vk_typemap_helper.h"
34 #include "convert_to_renderpass2.h"
35 #include <atomic>
36 #include <functional>
37 #include <list>
38 #include <map>
39 #include <memory>
40 #include <set>
41 #include <string.h>
42 #include <unordered_map>
43 #include <unordered_set>
44 #include <vector>
45 #include <memory>
46 #include <list>
47 
48 #ifdef VK_USE_PLATFORM_ANDROID_KHR
49 #include "android_ndk_types.h"
50 #endif  // VK_USE_PLATFORM_ANDROID_KHR
51 
52 class CoreChecks;
53 typedef CoreChecks layer_data;
54 typedef CoreChecks instance_layer_data;
55 
56 // Fwd declarations -- including descriptor_set.h creates an ugly include loop
57 namespace cvdescriptorset {
58 class DescriptorSetLayoutDef;
59 class DescriptorSetLayout;
60 class DescriptorSet;
61 }  // namespace cvdescriptorset
62 
63 struct GLOBAL_CB_NODE;
64 
65 enum CALL_STATE {
66     UNCALLED,       // Function has not been called
67     QUERY_COUNT,    // Function called once to query a count
68     QUERY_DETAILS,  // Function called w/ a count to query details
69 };
70 
71 class BASE_NODE {
72    public:
73     // Track when object is being used by an in-flight command buffer
74     std::atomic_int in_use;
75     // Track command buffers that this object is bound to
76     //  binding initialized when cmd referencing object is bound to command buffer
77     //  binding removed when command buffer is reset or destroyed
78     // When an object is destroyed, any bound cbs are set to INVALID
79     std::unordered_set<GLOBAL_CB_NODE *> cb_bindings;
80 
BASE_NODE()81     BASE_NODE() { in_use.store(0); };
82 };
83 
84 // Track command pools and their command buffers
85 struct COMMAND_POOL_NODE : public BASE_NODE {
86     VkCommandPoolCreateFlags createFlags;
87     uint32_t queueFamilyIndex;
88     // Cmd buffers allocated from this pool
89     std::unordered_set<VkCommandBuffer> commandBuffers;
90 };
91 
92 // Utilities for barriers and the commmand pool
93 template <typename Barrier>
IsTransferOp(const Barrier * barrier)94 static bool IsTransferOp(const Barrier *barrier) {
95     return barrier->srcQueueFamilyIndex != barrier->dstQueueFamilyIndex;
96 }
97 
98 template <typename Barrier, bool assume_transfer = false>
TempIsReleaseOp(const COMMAND_POOL_NODE * pool,const Barrier * barrier)99 static bool TempIsReleaseOp(const COMMAND_POOL_NODE *pool, const Barrier *barrier) {
100     return (assume_transfer || IsTransferOp(barrier)) && (pool->queueFamilyIndex == barrier->srcQueueFamilyIndex);
101 }
102 
103 template <typename Barrier, bool assume_transfer = false>
IsAcquireOp(const COMMAND_POOL_NODE * pool,const Barrier * barrier)104 static bool IsAcquireOp(const COMMAND_POOL_NODE *pool, const Barrier *barrier) {
105     return (assume_transfer || IsTransferOp(barrier)) && (pool->queueFamilyIndex == barrier->dstQueueFamilyIndex);
106 }
107 
IsSpecial(const uint32_t queue_family_index)108 inline bool IsSpecial(const uint32_t queue_family_index) {
109     return (queue_family_index == VK_QUEUE_FAMILY_EXTERNAL_KHR) || (queue_family_index == VK_QUEUE_FAMILY_FOREIGN_EXT);
110 }
111 
112 // Generic wrapper for vulkan objects
113 struct VK_OBJECT {
114     uint64_t handle;
115     VulkanObjectType type;
116 };
117 
118 inline bool operator==(VK_OBJECT a, VK_OBJECT b) NOEXCEPT { return a.handle == b.handle && a.type == b.type; }
119 
120 namespace std {
121 template <>
122 struct hash<VK_OBJECT> {
123     size_t operator()(VK_OBJECT obj) const NOEXCEPT { return hash<uint64_t>()(obj.handle) ^ hash<uint32_t>()(obj.type); }
124 };
125 }  // namespace std
126 
127 // Flags describing requirements imposed by the pipeline on a descriptor. These
128 // can't be checked at pipeline creation time as they depend on the Image or
129 // ImageView bound.
130 enum descriptor_req {
131     DESCRIPTOR_REQ_VIEW_TYPE_1D = 1 << VK_IMAGE_VIEW_TYPE_1D,
132     DESCRIPTOR_REQ_VIEW_TYPE_1D_ARRAY = 1 << VK_IMAGE_VIEW_TYPE_1D_ARRAY,
133     DESCRIPTOR_REQ_VIEW_TYPE_2D = 1 << VK_IMAGE_VIEW_TYPE_2D,
134     DESCRIPTOR_REQ_VIEW_TYPE_2D_ARRAY = 1 << VK_IMAGE_VIEW_TYPE_2D_ARRAY,
135     DESCRIPTOR_REQ_VIEW_TYPE_3D = 1 << VK_IMAGE_VIEW_TYPE_3D,
136     DESCRIPTOR_REQ_VIEW_TYPE_CUBE = 1 << VK_IMAGE_VIEW_TYPE_CUBE,
137     DESCRIPTOR_REQ_VIEW_TYPE_CUBE_ARRAY = 1 << VK_IMAGE_VIEW_TYPE_CUBE_ARRAY,
138 
139     DESCRIPTOR_REQ_ALL_VIEW_TYPE_BITS = (1 << (VK_IMAGE_VIEW_TYPE_END_RANGE + 1)) - 1,
140 
141     DESCRIPTOR_REQ_SINGLE_SAMPLE = 2 << VK_IMAGE_VIEW_TYPE_END_RANGE,
142     DESCRIPTOR_REQ_MULTI_SAMPLE = DESCRIPTOR_REQ_SINGLE_SAMPLE << 1,
143 
144     DESCRIPTOR_REQ_COMPONENT_TYPE_FLOAT = DESCRIPTOR_REQ_MULTI_SAMPLE << 1,
145     DESCRIPTOR_REQ_COMPONENT_TYPE_SINT = DESCRIPTOR_REQ_COMPONENT_TYPE_FLOAT << 1,
146     DESCRIPTOR_REQ_COMPONENT_TYPE_UINT = DESCRIPTOR_REQ_COMPONENT_TYPE_SINT << 1,
147 };
148 
149 struct DESCRIPTOR_POOL_STATE : BASE_NODE {
150     VkDescriptorPool pool;
151     uint32_t maxSets;        // Max descriptor sets allowed in this pool
152     uint32_t availableSets;  // Available descriptor sets in this pool
153 
154     safe_VkDescriptorPoolCreateInfo createInfo;
155     std::unordered_set<cvdescriptorset::DescriptorSet *> sets;  // Collection of all sets in this pool
156     std::map<uint32_t, uint32_t> maxDescriptorTypeCount;        // Max # of descriptors of each type in this pool
157     std::map<uint32_t, uint32_t> availableDescriptorTypeCount;  // Available # of descriptors of each type in this pool
158 
159     DESCRIPTOR_POOL_STATE(const VkDescriptorPool pool, const VkDescriptorPoolCreateInfo *pCreateInfo)
160         : pool(pool),
161           maxSets(pCreateInfo->maxSets),
162           availableSets(pCreateInfo->maxSets),
163           createInfo(pCreateInfo),
164           maxDescriptorTypeCount(),
165           availableDescriptorTypeCount() {
166         // Collect maximums per descriptor type.
167         for (uint32_t i = 0; i < createInfo.poolSizeCount; ++i) {
168             uint32_t typeIndex = static_cast<uint32_t>(createInfo.pPoolSizes[i].type);
169             // Same descriptor types can appear several times
170             maxDescriptorTypeCount[typeIndex] += createInfo.pPoolSizes[i].descriptorCount;
171             availableDescriptorTypeCount[typeIndex] = maxDescriptorTypeCount[typeIndex];
172         }
173     }
174 };
175 
176 // Generic memory binding struct to track objects bound to objects
177 struct MEM_BINDING {
178     VkDeviceMemory mem;
179     VkDeviceSize offset;
180     VkDeviceSize size;
181 };
182 
183 struct BufferBinding {
184     VkBuffer buffer;
185     VkDeviceSize size;
186     VkDeviceSize offset;
187 };
188 
189 struct IndexBufferBinding : BufferBinding {
190     VkIndexType index_type;
191 };
192 
193 inline bool operator==(MEM_BINDING a, MEM_BINDING b) NOEXCEPT { return a.mem == b.mem && a.offset == b.offset && a.size == b.size; }
194 
195 namespace std {
196 template <>
197 struct hash<MEM_BINDING> {
198     size_t operator()(MEM_BINDING mb) const NOEXCEPT {
199         auto intermediate = hash<uint64_t>()(reinterpret_cast<uint64_t &>(mb.mem)) ^ hash<uint64_t>()(mb.offset);
200         return intermediate ^ hash<uint64_t>()(mb.size);
201     }
202 };
203 }  // namespace std
204 
205 // Superclass for bindable object state (currently images and buffers)
206 class BINDABLE : public BASE_NODE {
207    public:
208     bool sparse;  // Is this object being bound with sparse memory or not?
209     // Non-sparse binding data
210     MEM_BINDING binding;
211     // Memory requirements for this BINDABLE
212     VkMemoryRequirements requirements;
213     // bool to track if memory requirements were checked
214     bool memory_requirements_checked;
215     // Sparse binding data, initially just tracking MEM_BINDING per mem object
216     //  There's more data for sparse bindings so need better long-term solution
217     // TODO : Need to update solution to track all sparse binding data
218     std::unordered_set<MEM_BINDING> sparse_bindings;
219 
220     std::unordered_set<VkDeviceMemory> bound_memory_set_;
221 
222     BINDABLE()
223         : sparse(false), binding{}, requirements{}, memory_requirements_checked(false), sparse_bindings{}, bound_memory_set_{} {};
224 
225     // Update the cached set of memory bindings.
226     // Code that changes binding.mem or sparse_bindings must call UpdateBoundMemorySet()
227     void UpdateBoundMemorySet() {
228         bound_memory_set_.clear();
229         if (!sparse) {
230             bound_memory_set_.insert(binding.mem);
231         } else {
232             for (auto sb : sparse_bindings) {
233                 bound_memory_set_.insert(sb.mem);
234             }
235         }
236     }
237 
238     // Return unordered set of memory objects that are bound
239     // Instead of creating a set from scratch each query, return the cached one
240     const std::unordered_set<VkDeviceMemory> &GetBoundMemory() const { return bound_memory_set_; }
241 };
242 
243 class BUFFER_STATE : public BINDABLE {
244    public:
245     VkBuffer buffer;
246     VkBufferCreateInfo createInfo;
247     BUFFER_STATE(VkBuffer buff, const VkBufferCreateInfo *pCreateInfo) : buffer(buff), createInfo(*pCreateInfo) {
248         if ((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) && (createInfo.queueFamilyIndexCount > 0)) {
249             uint32_t *pQueueFamilyIndices = new uint32_t[createInfo.queueFamilyIndexCount];
250             for (uint32_t i = 0; i < createInfo.queueFamilyIndexCount; i++) {
251                 pQueueFamilyIndices[i] = pCreateInfo->pQueueFamilyIndices[i];
252             }
253             createInfo.pQueueFamilyIndices = pQueueFamilyIndices;
254         }
255 
256         if (createInfo.flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
257             sparse = true;
258         }
259     };
260 
261     BUFFER_STATE(BUFFER_STATE const &rh_obj) = delete;
262 
263     ~BUFFER_STATE() {
264         if ((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) && (createInfo.queueFamilyIndexCount > 0)) {
265             delete[] createInfo.pQueueFamilyIndices;
266             createInfo.pQueueFamilyIndices = nullptr;
267         }
268     };
269 };
270 
271 class BUFFER_VIEW_STATE : public BASE_NODE {
272    public:
273     VkBufferView buffer_view;
274     VkBufferViewCreateInfo create_info;
275     BUFFER_VIEW_STATE(VkBufferView bv, const VkBufferViewCreateInfo *ci) : buffer_view(bv), create_info(*ci){};
276     BUFFER_VIEW_STATE(const BUFFER_VIEW_STATE &rh_obj) = delete;
277 };
278 
279 struct SAMPLER_STATE : public BASE_NODE {
280     VkSampler sampler;
281     VkSamplerCreateInfo createInfo;
282     VkSamplerYcbcrConversion samplerConversion = VK_NULL_HANDLE;
283 
284     SAMPLER_STATE(const VkSampler *ps, const VkSamplerCreateInfo *pci) : sampler(*ps), createInfo(*pci) {
285         auto *conversionInfo = lvl_find_in_chain<VkSamplerYcbcrConversionInfo>(pci->pNext);
286         if (conversionInfo) samplerConversion = conversionInfo->conversion;
287     }
288 };
289 
290 class IMAGE_STATE : public BINDABLE {
291    public:
292     VkImage image;
293     VkImageCreateInfo createInfo;
294     bool valid;                   // If this is a swapchain image backing memory track valid here as it doesn't have DEVICE_MEM_INFO
295     bool acquired;                // If this is a swapchain image, has it been acquired by the app.
296     bool shared_presentable;      // True for a front-buffered swapchain image
297     bool layout_locked;           // A front-buffered image that has been presented can never have layout transitioned
298     bool get_sparse_reqs_called;  // Track if GetImageSparseMemoryRequirements() has been called for this image
299     bool sparse_metadata_required;  // Track if sparse metadata aspect is required for this image
300     bool sparse_metadata_bound;     // Track if sparse metadata aspect is bound to this image
301     bool imported_ahb;              // True if image was imported from an Android Hardware Buffer
302     bool has_ahb_format;            // True if image was created with an external Android format
303     uint64_t ahb_format;            // External Android format, if provided
304     std::vector<VkSparseImageMemoryRequirements> sparse_requirements;
305     IMAGE_STATE(VkImage img, const VkImageCreateInfo *pCreateInfo)
306         : image(img),
307           createInfo(*pCreateInfo),
308           valid(false),
309           acquired(false),
310           shared_presentable(false),
311           layout_locked(false),
312           get_sparse_reqs_called(false),
313           sparse_metadata_required(false),
314           sparse_metadata_bound(false),
315           imported_ahb(false),
316           has_ahb_format(false),
317           ahb_format(0),
318           sparse_requirements{} {
319         if ((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) && (createInfo.queueFamilyIndexCount > 0)) {
320             uint32_t *pQueueFamilyIndices = new uint32_t[createInfo.queueFamilyIndexCount];
321             for (uint32_t i = 0; i < createInfo.queueFamilyIndexCount; i++) {
322                 pQueueFamilyIndices[i] = pCreateInfo->pQueueFamilyIndices[i];
323             }
324             createInfo.pQueueFamilyIndices = pQueueFamilyIndices;
325         }
326 
327         if (createInfo.flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) {
328             sparse = true;
329         }
330     };
331 
332     IMAGE_STATE(IMAGE_STATE const &rh_obj) = delete;
333 
334     ~IMAGE_STATE() {
335         if ((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) && (createInfo.queueFamilyIndexCount > 0)) {
336             delete[] createInfo.pQueueFamilyIndices;
337             createInfo.pQueueFamilyIndices = nullptr;
338         }
339     };
340 };
341 
342 class IMAGE_VIEW_STATE : public BASE_NODE {
343    public:
344     VkImageView image_view;
345     VkImageViewCreateInfo create_info;
346     VkSamplerYcbcrConversion samplerConversion;  // Handle of the ycbcr sampler conversion the image was created with, if any
347     IMAGE_VIEW_STATE(VkImageView iv, const VkImageViewCreateInfo *ci)
348         : image_view(iv), create_info(*ci), samplerConversion(VK_NULL_HANDLE) {
349         auto *conversionInfo = lvl_find_in_chain<VkSamplerYcbcrConversionInfo>(create_info.pNext);
350         if (conversionInfo) samplerConversion = conversionInfo->conversion;
351     };
352     IMAGE_VIEW_STATE(const IMAGE_VIEW_STATE &rh_obj) = delete;
353 };
354 
355 struct MemRange {
356     VkDeviceSize offset;
357     VkDeviceSize size;
358 };
359 
360 struct MEMORY_RANGE {
361     uint64_t handle;
362     bool image;   // True for image, false for buffer
363     bool linear;  // True for buffers and linear images
364     VkDeviceMemory memory;
365     VkDeviceSize start;
366     VkDeviceSize size;
367     VkDeviceSize end;  // Store this pre-computed for simplicity
368     // Set of ptrs to every range aliased with this one
369     std::unordered_set<MEMORY_RANGE *> aliases;
370 };
371 
372 // Data struct for tracking memory object
373 struct DEVICE_MEM_INFO : public BASE_NODE {
374     void *object;  // Dispatchable object used to create this memory (device of swapchain)
375     VkDeviceMemory mem;
376     VkMemoryAllocateInfo alloc_info;
377     bool is_dedicated;
378     VkBuffer dedicated_buffer;
379     VkImage dedicated_image;
380     bool is_export;
381     VkExternalMemoryHandleTypeFlags export_handle_type_flags;
382     std::unordered_set<VK_OBJECT> obj_bindings;               // objects bound to this memory
383     std::unordered_map<uint64_t, MEMORY_RANGE> bound_ranges;  // Map of object to its binding range
384     // Convenience vectors image/buff handles to speed up iterating over images or buffers independently
385     std::unordered_set<uint64_t> bound_images;
386     std::unordered_set<uint64_t> bound_buffers;
387 
388     MemRange mem_range;
389     void *shadow_copy_base;    // Base of layer's allocation for guard band, data, and alignment space
390     void *shadow_copy;         // Pointer to start of guard-band data before mapped region
391     uint64_t shadow_pad_size;  // Size of the guard-band data before and after actual data. It MUST be a
392                                // multiple of limits.minMemoryMapAlignment
393     void *p_driver_data;       // Pointer to application's actual memory
394 
395     DEVICE_MEM_INFO(void *disp_object, const VkDeviceMemory in_mem, const VkMemoryAllocateInfo *p_alloc_info)
396         : object(disp_object),
397           mem(in_mem),
398           alloc_info(*p_alloc_info),
399           is_dedicated(false),
400           dedicated_buffer(VK_NULL_HANDLE),
401           dedicated_image(VK_NULL_HANDLE),
402           is_export(false),
403           export_handle_type_flags(0),
404           mem_range{},
405           shadow_copy_base(0),
406           shadow_copy(0),
407           shadow_pad_size(0),
408           p_driver_data(0){};
409 };
410 
411 class SWAPCHAIN_NODE {
412    public:
413     safe_VkSwapchainCreateInfoKHR createInfo;
414     VkSwapchainKHR swapchain;
415     std::vector<VkImage> images;
416     bool retired = false;
417     bool shared_presentable = false;
418     CALL_STATE vkGetSwapchainImagesKHRState = UNCALLED;
419     uint32_t get_swapchain_image_count = 0;
420     SWAPCHAIN_NODE(const VkSwapchainCreateInfoKHR *pCreateInfo, VkSwapchainKHR swapchain)
421         : createInfo(pCreateInfo), swapchain(swapchain) {}
422 };
423 
424 class IMAGE_CMD_BUF_LAYOUT_NODE {
425    public:
426     IMAGE_CMD_BUF_LAYOUT_NODE() = default;
427     IMAGE_CMD_BUF_LAYOUT_NODE(VkImageLayout initialLayoutInput, VkImageLayout layoutInput)
428         : initialLayout(initialLayoutInput), layout(layoutInput) {}
429 
430     VkImageLayout initialLayout;
431     VkImageLayout layout;
432 };
433 
434 // Store the DAG.
435 struct DAGNode {
436     uint32_t pass;
437     std::vector<uint32_t> prev;
438     std::vector<uint32_t> next;
439 };
440 
441 struct RENDER_PASS_STATE : public BASE_NODE {
442     VkRenderPass renderPass;
443     safe_VkRenderPassCreateInfo2KHR createInfo;
444     std::vector<std::vector<uint32_t>> self_dependencies;
445     std::vector<DAGNode> subpassToNode;
446     std::unordered_map<uint32_t, bool> attachment_first_read;
447 
448     RENDER_PASS_STATE(VkRenderPassCreateInfo2KHR const *pCreateInfo) : createInfo(pCreateInfo) {}
449     RENDER_PASS_STATE(VkRenderPassCreateInfo const *pCreateInfo) { ConvertVkRenderPassCreateInfoToV2KHR(pCreateInfo, &createInfo); }
450 };
451 
452 // vkCmd tracking -- complete as of header 1.0.68
453 // please keep in "none, then sorted" order
454 // Note: grepping vulkan.h for VKAPI_CALL.*vkCmd will return all functions except vkEndCommandBuffer
455 
456 enum CMD_TYPE {
457     CMD_NONE,
458     CMD_BEGINQUERY,
459     CMD_BEGINRENDERPASS,
460     CMD_BEGINRENDERPASS2KHR,
461     CMD_BINDDESCRIPTORSETS,
462     CMD_BINDINDEXBUFFER,
463     CMD_BINDPIPELINE,
464     CMD_BINDSHADINGRATEIMAGE,
465     CMD_BINDVERTEXBUFFERS,
466     CMD_BLITIMAGE,
467     CMD_CLEARATTACHMENTS,
468     CMD_CLEARCOLORIMAGE,
469     CMD_CLEARDEPTHSTENCILIMAGE,
470     CMD_COPYBUFFER,
471     CMD_COPYBUFFERTOIMAGE,
472     CMD_COPYIMAGE,
473     CMD_COPYIMAGETOBUFFER,
474     CMD_COPYQUERYPOOLRESULTS,
475     CMD_DEBUGMARKERBEGINEXT,
476     CMD_DEBUGMARKERENDEXT,
477     CMD_DEBUGMARKERINSERTEXT,
478     CMD_DISPATCH,
479     CMD_DISPATCHBASEKHX,
480     CMD_DISPATCHINDIRECT,
481     CMD_DRAW,
482     CMD_DRAWINDEXED,
483     CMD_DRAWINDEXEDINDIRECT,
484     CMD_DRAWINDEXEDINDIRECTCOUNTAMD,
485     CMD_DRAWINDEXEDINDIRECTCOUNTKHR,
486     CMD_DRAWINDIRECT,
487     CMD_DRAWINDIRECTCOUNTAMD,
488     CMD_DRAWINDIRECTCOUNTKHR,
489     CMD_DRAWMESHTASKSNV,
490     CMD_DRAWMESHTASKSINDIRECTNV,
491     CMD_DRAWMESHTASKSINDIRECTCOUNTNV,
492     CMD_ENDCOMMANDBUFFER,  // Should be the last command in any RECORDED cmd buffer
493     CMD_ENDQUERY,
494     CMD_ENDRENDERPASS,
495     CMD_ENDRENDERPASS2KHR,
496     CMD_EXECUTECOMMANDS,
497     CMD_FILLBUFFER,
498     CMD_NEXTSUBPASS,
499     CMD_NEXTSUBPASS2KHR,
500     CMD_PIPELINEBARRIER,
501     CMD_PROCESSCOMMANDSNVX,
502     CMD_PUSHCONSTANTS,
503     CMD_PUSHDESCRIPTORSETKHR,
504     CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR,
505     CMD_RESERVESPACEFORCOMMANDSNVX,
506     CMD_RESETEVENT,
507     CMD_RESETQUERYPOOL,
508     CMD_RESOLVEIMAGE,
509     CMD_SETBLENDCONSTANTS,
510     CMD_SETDEPTHBIAS,
511     CMD_SETDEPTHBOUNDS,
512     CMD_SETDEVICEMASKKHX,
513     CMD_SETDISCARDRECTANGLEEXT,
514     CMD_SETEVENT,
515     CMD_SETEXCLUSIVESCISSOR,
516     CMD_SETLINEWIDTH,
517     CMD_SETSAMPLELOCATIONSEXT,
518     CMD_SETSCISSOR,
519     CMD_SETSTENCILCOMPAREMASK,
520     CMD_SETSTENCILREFERENCE,
521     CMD_SETSTENCILWRITEMASK,
522     CMD_SETVIEWPORT,
523     CMD_SETVIEWPORTSHADINGRATEPALETTE,
524     CMD_SETVIEWPORTWSCALINGNV,
525     CMD_UPDATEBUFFER,
526     CMD_WAITEVENTS,
527     CMD_WRITETIMESTAMP,
528 };
529 
530 enum CB_STATE {
531     CB_NEW,                 // Newly created CB w/o any cmds
532     CB_RECORDING,           // BeginCB has been called on this CB
533     CB_RECORDED,            // EndCB has been called on this CB
534     CB_INVALID_COMPLETE,    // had a complete recording, but was since invalidated
535     CB_INVALID_INCOMPLETE,  // fouled before recording was completed
536 };
537 
538 // CB Status -- used to track status of various bindings on cmd buffer objects
539 typedef VkFlags CBStatusFlags;
540 enum CBStatusFlagBits {
541     // clang-format off
542     CBSTATUS_NONE                   = 0x00000000,   // No status is set
543     CBSTATUS_LINE_WIDTH_SET         = 0x00000001,   // Line width has been set
544     CBSTATUS_DEPTH_BIAS_SET         = 0x00000002,   // Depth bias has been set
545     CBSTATUS_BLEND_CONSTANTS_SET    = 0x00000004,   // Blend constants state has been set
546     CBSTATUS_DEPTH_BOUNDS_SET       = 0x00000008,   // Depth bounds state object has been set
547     CBSTATUS_STENCIL_READ_MASK_SET  = 0x00000010,   // Stencil read mask has been set
548     CBSTATUS_STENCIL_WRITE_MASK_SET = 0x00000020,   // Stencil write mask has been set
549     CBSTATUS_STENCIL_REFERENCE_SET  = 0x00000040,   // Stencil reference has been set
550     CBSTATUS_VIEWPORT_SET           = 0x00000080,
551     CBSTATUS_SCISSOR_SET            = 0x00000100,
552     CBSTATUS_INDEX_BUFFER_BOUND     = 0x00000200,   // Index buffer has been set
553     CBSTATUS_EXCLUSIVE_SCISSOR_SET  = 0x00000400,
554     CBSTATUS_SHADING_RATE_PALETTE_SET = 0x00000800,
555     CBSTATUS_ALL_STATE_SET          = 0x00000DFF,   // All state set (intentionally exclude index buffer)
556     // clang-format on
557 };
558 
559 struct QueryObject {
560     VkQueryPool pool;
561     uint32_t index;
562 };
563 
564 inline bool operator==(const QueryObject &query1, const QueryObject &query2) {
565     return (query1.pool == query2.pool && query1.index == query2.index);
566 }
567 
568 namespace std {
569 template <>
570 struct hash<QueryObject> {
571     size_t operator()(QueryObject query) const throw() {
572         return hash<uint64_t>()((uint64_t)(query.pool)) ^ hash<uint32_t>()(query.index);
573     }
574 };
575 }  // namespace std
576 
577 struct DrawData {
578     std::vector<BufferBinding> vertex_buffer_bindings;
579 };
580 
581 struct ImageSubresourcePair {
582     VkImage image;
583     bool hasSubresource;
584     VkImageSubresource subresource;
585 };
586 
587 inline bool operator==(const ImageSubresourcePair &img1, const ImageSubresourcePair &img2) {
588     if (img1.image != img2.image || img1.hasSubresource != img2.hasSubresource) return false;
589     return !img1.hasSubresource ||
590            (img1.subresource.aspectMask == img2.subresource.aspectMask && img1.subresource.mipLevel == img2.subresource.mipLevel &&
591             img1.subresource.arrayLayer == img2.subresource.arrayLayer);
592 }
593 
594 namespace std {
595 template <>
596 struct hash<ImageSubresourcePair> {
597     size_t operator()(ImageSubresourcePair img) const throw() {
598         size_t hashVal = hash<uint64_t>()(reinterpret_cast<uint64_t &>(img.image));
599         hashVal ^= hash<bool>()(img.hasSubresource);
600         if (img.hasSubresource) {
601             hashVal ^= hash<uint32_t>()(reinterpret_cast<uint32_t &>(img.subresource.aspectMask));
602             hashVal ^= hash<uint32_t>()(img.subresource.mipLevel);
603             hashVal ^= hash<uint32_t>()(img.subresource.arrayLayer);
604         }
605         return hashVal;
606     }
607 };
608 }  // namespace std
609 
610 // Canonical dictionary for PushConstantRanges
611 using PushConstantRangesDict = hash_util::Dictionary<PushConstantRanges>;
612 using PushConstantRangesId = PushConstantRangesDict::Id;
613 
614 // Canonical dictionary for the pipeline layout's layout of descriptorsetlayouts
615 using DescriptorSetLayoutDef = cvdescriptorset::DescriptorSetLayoutDef;
616 using DescriptorSetLayoutId = std::shared_ptr<const DescriptorSetLayoutDef>;
617 using PipelineLayoutSetLayoutsDef = std::vector<DescriptorSetLayoutId>;
618 using PipelineLayoutSetLayoutsDict =
619     hash_util::Dictionary<PipelineLayoutSetLayoutsDef, hash_util::IsOrderedContainer<PipelineLayoutSetLayoutsDef>>;
620 using PipelineLayoutSetLayoutsId = PipelineLayoutSetLayoutsDict::Id;
621 
622 // Defines/stores a compatibility defintion for set N
623 // The "layout layout" must store at least set+1 entries, but only the first set+1 are considered for hash and equality testing
624 // Note: the "cannonical" data are referenced by Id, not including handle or device specific state
625 // Note: hash and equality only consider layout_id entries [0, set] for determining uniqueness
626 struct PipelineLayoutCompatDef {
627     uint32_t set;
628     PushConstantRangesId push_constant_ranges;
629     PipelineLayoutSetLayoutsId set_layouts_id;
630     PipelineLayoutCompatDef(const uint32_t set_index, const PushConstantRangesId pcr_id, const PipelineLayoutSetLayoutsId sl_id)
631         : set(set_index), push_constant_ranges(pcr_id), set_layouts_id(sl_id) {}
632     size_t hash() const;
633     bool operator==(const PipelineLayoutCompatDef &other) const;
634 };
635 
636 // Canonical dictionary for PipelineLayoutCompat records
637 using PipelineLayoutCompatDict = hash_util::Dictionary<PipelineLayoutCompatDef, hash_util::HasHashMember<PipelineLayoutCompatDef>>;
638 using PipelineLayoutCompatId = PipelineLayoutCompatDict::Id;
639 
640 // Store layouts and pushconstants for PipelineLayout
641 struct PIPELINE_LAYOUT_NODE {
642     VkPipelineLayout layout;
643     std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts;
644     PushConstantRangesId push_constant_ranges;
645     std::vector<PipelineLayoutCompatId> compat_for_set;
646 
647     PIPELINE_LAYOUT_NODE() : layout(VK_NULL_HANDLE), set_layouts{}, push_constant_ranges{}, compat_for_set{} {}
648 
649     void reset() {
650         layout = VK_NULL_HANDLE;
651         set_layouts.clear();
652         push_constant_ranges.reset();
653         compat_for_set.clear();
654     }
655 };
656 
657 static inline bool CompatForSet(uint32_t set, const std::vector<PipelineLayoutCompatId> &a,
658                                 const std::vector<PipelineLayoutCompatId> &b) {
659     bool result = (set < a.size()) && (set < b.size()) && (a[set] == b[set]);
660     return result;
661 }
662 
663 static inline bool CompatForSet(uint32_t set, const PIPELINE_LAYOUT_NODE *a, const PIPELINE_LAYOUT_NODE *b) {
664     // Intentionally have a result variable to simplify debugging
665     bool result = a && b && CompatForSet(set, a->compat_for_set, b->compat_for_set);
666     return result;
667 }
668 
669 class PIPELINE_STATE : public BASE_NODE {
670    public:
671     VkPipeline pipeline;
672     safe_VkGraphicsPipelineCreateInfo graphicsPipelineCI;
673     // Hold shared ptr to RP in case RP itself is destroyed
674     std::shared_ptr<RENDER_PASS_STATE> rp_state;
675     safe_VkComputePipelineCreateInfo computePipelineCI;
676     safe_VkRayTracingPipelineCreateInfoNV raytracingPipelineCI;
677     // Flag of which shader stages are active for this pipeline
678     uint32_t active_shaders;
679     uint32_t duplicate_shaders;
680     // Capture which slots (set#->bindings) are actually used by the shaders of this pipeline
681     std::unordered_map<uint32_t, std::map<uint32_t, descriptor_req>> active_slots;
682     // Vtx input info (if any)
683     std::vector<VkVertexInputBindingDescription> vertex_binding_descriptions_;
684     std::vector<VkVertexInputAttributeDescription> vertex_attribute_descriptions_;
685     std::unordered_map<uint32_t, uint32_t> vertex_binding_to_index_map_;
686     std::vector<VkPipelineColorBlendAttachmentState> attachments;
687     bool blendConstantsEnabled;  // Blend constants enabled for any attachments
688     PIPELINE_LAYOUT_NODE pipeline_layout;
689     VkPrimitiveTopology topology_at_rasterizer;
690 
691     // Default constructor
692     PIPELINE_STATE()
693         : pipeline{},
694           graphicsPipelineCI{},
695           rp_state(nullptr),
696           computePipelineCI{},
697           raytracingPipelineCI{},
698           active_shaders(0),
699           duplicate_shaders(0),
700           active_slots(),
701           vertex_binding_descriptions_(),
702           vertex_attribute_descriptions_(),
703           vertex_binding_to_index_map_(),
704           attachments(),
705           blendConstantsEnabled(false),
706           pipeline_layout(),
707           topology_at_rasterizer{} {}
708 
709     void initGraphicsPipeline(const VkGraphicsPipelineCreateInfo *pCreateInfo, std::shared_ptr<RENDER_PASS_STATE> &&rpstate) {
710         bool uses_color_attachment = false;
711         bool uses_depthstencil_attachment = false;
712         if (pCreateInfo->subpass < rpstate->createInfo.subpassCount) {
713             const auto &subpass = rpstate->createInfo.pSubpasses[pCreateInfo->subpass];
714 
715             for (uint32_t i = 0; i < subpass.colorAttachmentCount; ++i) {
716                 if (subpass.pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
717                     uses_color_attachment = true;
718                     break;
719                 }
720             }
721 
722             if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
723                 uses_depthstencil_attachment = true;
724             }
725         }
726         graphicsPipelineCI.initialize(pCreateInfo, uses_color_attachment, uses_depthstencil_attachment);
727         // Make sure compute pipeline is null
728         VkComputePipelineCreateInfo emptyComputeCI = {};
729         computePipelineCI.initialize(&emptyComputeCI);
730         for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
731             const VkPipelineShaderStageCreateInfo *pPSSCI = &pCreateInfo->pStages[i];
732             this->duplicate_shaders |= this->active_shaders & pPSSCI->stage;
733             this->active_shaders |= pPSSCI->stage;
734         }
735         if (graphicsPipelineCI.pVertexInputState) {
736             const auto pVICI = graphicsPipelineCI.pVertexInputState;
737             if (pVICI->vertexBindingDescriptionCount) {
738                 this->vertex_binding_descriptions_ = std::vector<VkVertexInputBindingDescription>(
739                     pVICI->pVertexBindingDescriptions, pVICI->pVertexBindingDescriptions + pVICI->vertexBindingDescriptionCount);
740 
741                 this->vertex_binding_to_index_map_.reserve(pVICI->vertexBindingDescriptionCount);
742                 for (uint32_t i = 0; i < pVICI->vertexBindingDescriptionCount; ++i) {
743                     this->vertex_binding_to_index_map_[pVICI->pVertexBindingDescriptions[i].binding] = i;
744                 }
745             }
746             if (pVICI->vertexAttributeDescriptionCount) {
747                 this->vertex_attribute_descriptions_ = std::vector<VkVertexInputAttributeDescription>(
748                     pVICI->pVertexAttributeDescriptions,
749                     pVICI->pVertexAttributeDescriptions + pVICI->vertexAttributeDescriptionCount);
750             }
751         }
752         if (graphicsPipelineCI.pColorBlendState) {
753             const auto pCBCI = graphicsPipelineCI.pColorBlendState;
754             if (pCBCI->attachmentCount) {
755                 this->attachments = std::vector<VkPipelineColorBlendAttachmentState>(pCBCI->pAttachments,
756                                                                                      pCBCI->pAttachments + pCBCI->attachmentCount);
757             }
758         }
759         if (graphicsPipelineCI.pInputAssemblyState) {
760             topology_at_rasterizer = graphicsPipelineCI.pInputAssemblyState->topology;
761         }
762         rp_state = rpstate;
763     }
764 
765     void initComputePipeline(const VkComputePipelineCreateInfo *pCreateInfo) {
766         computePipelineCI.initialize(pCreateInfo);
767         // Make sure gfx pipeline is null
768         VkGraphicsPipelineCreateInfo emptyGraphicsCI = {};
769         graphicsPipelineCI.initialize(&emptyGraphicsCI, false, false);
770         switch (computePipelineCI.stage.stage) {
771             case VK_SHADER_STAGE_COMPUTE_BIT:
772                 this->active_shaders |= VK_SHADER_STAGE_COMPUTE_BIT;
773                 break;
774             default:
775                 // TODO : Flag error
776                 break;
777         }
778     }
779     void initRayTracingPipelineNV(const VkRayTracingPipelineCreateInfoNV *pCreateInfo) {
780         raytracingPipelineCI.initialize(pCreateInfo);
781         // Make sure gfx and compute pipeline is null
782         VkGraphicsPipelineCreateInfo emptyGraphicsCI = {};
783         VkComputePipelineCreateInfo emptyComputeCI = {};
784         computePipelineCI.initialize(&emptyComputeCI);
785         graphicsPipelineCI.initialize(&emptyGraphicsCI, false, false);
786         switch (raytracingPipelineCI.pStages->stage) {
787             case VK_SHADER_STAGE_RAYGEN_BIT_NV:
788                 this->active_shaders |= VK_SHADER_STAGE_RAYGEN_BIT_NV;
789                 break;
790             case VK_SHADER_STAGE_ANY_HIT_BIT_NV:
791                 this->active_shaders |= VK_SHADER_STAGE_ANY_HIT_BIT_NV;
792                 break;
793             case VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV:
794                 this->active_shaders |= VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV;
795                 break;
796             case VK_SHADER_STAGE_MISS_BIT_NV:
797                 this->active_shaders = VK_SHADER_STAGE_MISS_BIT_NV;
798                 break;
799             case VK_SHADER_STAGE_INTERSECTION_BIT_NV:
800                 this->active_shaders = VK_SHADER_STAGE_INTERSECTION_BIT_NV;
801                 break;
802             case VK_SHADER_STAGE_CALLABLE_BIT_NV:
803                 this->active_shaders |= VK_SHADER_STAGE_CALLABLE_BIT_NV;
804                 break;
805             default:
806                 // TODO : Flag error
807                 break;
808         }
809     }
810 };
811 
812 // Track last states that are bound per pipeline bind point (Gfx & Compute)
813 struct LAST_BOUND_STATE {
814     LAST_BOUND_STATE() { reset(); }  // must define default constructor for portability reasons
815     PIPELINE_STATE *pipeline_state;
816     VkPipelineLayout pipeline_layout;
817     // Track each set that has been bound
818     // Ordered bound set tracking where index is set# that given set is bound to
819     std::vector<cvdescriptorset::DescriptorSet *> boundDescriptorSets;
820     std::unique_ptr<cvdescriptorset::DescriptorSet> push_descriptor_set;
821     // one dynamic offset per dynamic descriptor bound to this CB
822     std::vector<std::vector<uint32_t>> dynamicOffsets;
823     std::vector<PipelineLayoutCompatId> compat_id_for_set;
824 
825     void reset() {
826         pipeline_state = nullptr;
827         pipeline_layout = VK_NULL_HANDLE;
828         boundDescriptorSets.clear();
829         push_descriptor_set = nullptr;
830         dynamicOffsets.clear();
831         compat_id_for_set.clear();
832     }
833 };
834 
835 // Types to store queue family ownership (QFO) Transfers
836 
837 // Common to image and buffer memory barriers
838 template <typename Handle, typename Barrier>
839 struct QFOTransferBarrierBase {
840     using HandleType = Handle;
841     using BarrierType = Barrier;
842     struct Tag {};
843     HandleType handle = VK_NULL_HANDLE;
844     uint32_t srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
845     uint32_t dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
846 
847     QFOTransferBarrierBase() = default;
848     QFOTransferBarrierBase(const BarrierType &barrier, const HandleType &resource_handle)
849         : handle(resource_handle),
850           srcQueueFamilyIndex(barrier.srcQueueFamilyIndex),
851           dstQueueFamilyIndex(barrier.dstQueueFamilyIndex) {}
852 
853     hash_util::HashCombiner base_hash_combiner() const {
854         hash_util::HashCombiner hc;
855         hc << srcQueueFamilyIndex << dstQueueFamilyIndex << handle;
856         return hc;
857     }
858 
859     bool operator==(const QFOTransferBarrierBase &rhs) const {
860         return (srcQueueFamilyIndex == rhs.srcQueueFamilyIndex) && (dstQueueFamilyIndex == rhs.dstQueueFamilyIndex) &&
861                (handle == rhs.handle);
862     }
863 };
864 
865 template <typename Barrier>
866 struct QFOTransferBarrier {};
867 
868 // Image barrier specific implementation
869 template <>
870 struct QFOTransferBarrier<VkImageMemoryBarrier> : public QFOTransferBarrierBase<VkImage, VkImageMemoryBarrier> {
871     using BaseType = QFOTransferBarrierBase<VkImage, VkImageMemoryBarrier>;
872     VkImageLayout oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
873     VkImageLayout newLayout = VK_IMAGE_LAYOUT_UNDEFINED;
874     VkImageSubresourceRange subresourceRange;
875 
876     QFOTransferBarrier() = default;
877     QFOTransferBarrier(const BarrierType &barrier)
878         : BaseType(barrier, barrier.image),
879           oldLayout(barrier.oldLayout),
880           newLayout(barrier.newLayout),
881           subresourceRange(barrier.subresourceRange) {}
882     size_t hash() const {
883         // Ignoring the layout information for the purpose of the hash, as we're interested in QFO release/acquisition w.r.t.
884         // the subresource affected, an layout transitions are current validated on another path
885         auto hc = base_hash_combiner() << subresourceRange;
886         return hc.Value();
887     }
888     bool operator==(const QFOTransferBarrier<BarrierType> &rhs) const {
889         // Ignoring layout w.r.t. equality. See comment in hash above.
890         return (static_cast<BaseType>(*this) == static_cast<BaseType>(rhs)) && (subresourceRange == rhs.subresourceRange);
891     }
892     // TODO: codegen a comprehensive complie time type -> string (and or other traits) template family
893     static const char *BarrierName() { return "VkImageMemoryBarrier"; }
894     static const char *HandleName() { return "VkImage"; }
895     // UNASSIGNED-VkImageMemoryBarrier-image-00001 QFO transfer image barrier must not duplicate QFO recorded in command buffer
896     static const char *ErrMsgDuplicateQFOInCB() { return "UNASSIGNED-VkImageMemoryBarrier-image-00001"; }
897     // UNASSIGNED-VkImageMemoryBarrier-image-00002 QFO transfer image barrier must not duplicate QFO submitted in batch
898     static const char *ErrMsgDuplicateQFOInSubmit() { return "UNASSIGNED-VkImageMemoryBarrier-image-00002"; }
899     // UNASSIGNED-VkImageMemoryBarrier-image-00003 QFO transfer image barrier must not duplicate QFO submitted previously
900     static const char *ErrMsgDuplicateQFOSubmitted() { return "UNASSIGNED-VkImageMemoryBarrier-image-00003"; }
901     // UNASSIGNED-VkImageMemoryBarrier-image-00004 QFO acquire image barrier must have matching QFO release submitted previously
902     static const char *ErrMsgMissingQFOReleaseInSubmit() { return "UNASSIGNED-VkImageMemoryBarrier-image-00004"; }
903 };
904 
905 // Buffer barrier specific implementation
906 template <>
907 struct QFOTransferBarrier<VkBufferMemoryBarrier> : public QFOTransferBarrierBase<VkBuffer, VkBufferMemoryBarrier> {
908     using BaseType = QFOTransferBarrierBase<VkBuffer, VkBufferMemoryBarrier>;
909     VkDeviceSize offset = 0;
910     VkDeviceSize size = 0;
911     QFOTransferBarrier(const VkBufferMemoryBarrier &barrier)
912         : BaseType(barrier, barrier.buffer), offset(barrier.offset), size(barrier.size) {}
913     size_t hash() const {
914         auto hc = base_hash_combiner() << offset << size;
915         return hc.Value();
916     }
917     bool operator==(const QFOTransferBarrier<BarrierType> &rhs) const {
918         return (static_cast<BaseType>(*this) == static_cast<BaseType>(rhs)) && (offset == rhs.offset) && (size == rhs.size);
919     }
920     static const char *BarrierName() { return "VkBufferMemoryBarrier"; }
921     static const char *HandleName() { return "VkBuffer"; }
922     // UNASSIGNED-VkImageMemoryBarrier-buffer-00001 QFO transfer buffer barrier must not duplicate QFO recorded in command buffer
923     static const char *ErrMsgDuplicateQFOInCB() { return "UNASSIGNED-VkBufferMemoryBarrier-buffer-00001"; }
924     // UNASSIGNED-VkBufferMemoryBarrier-buffer-00002 QFO transfer buffer barrier must not duplicate QFO submitted in batch
925     static const char *ErrMsgDuplicateQFOInSubmit() { return "UNASSIGNED-VkBufferMemoryBarrier-buffer-00002"; }
926     // UNASSIGNED-VkBufferMemoryBarrier-buffer-00003 QFO transfer buffer barrier must not duplicate QFO submitted previously
927     static const char *ErrMsgDuplicateQFOSubmitted() { return "UNASSIGNED-VkBufferMemoryBarrier-buffer-00003"; }
928     // UNASSIGNED-VkBufferMemoryBarrier-buffer-00004 QFO acquire buffer barrier must have matching QFO release submitted previously
929     static const char *ErrMsgMissingQFOReleaseInSubmit() { return "UNASSIGNED-VkBufferMemoryBarrier-buffer-00004"; }
930 };
931 
932 template <typename Barrier>
933 using QFOTransferBarrierHash = hash_util::HasHashMember<QFOTransferBarrier<Barrier>>;
934 
935 // Command buffers store the set of barriers recorded
936 template <typename Barrier>
937 using QFOTransferBarrierSet = std::unordered_set<QFOTransferBarrier<Barrier>, QFOTransferBarrierHash<Barrier>>;
938 template <typename Barrier>
939 struct QFOTransferBarrierSets {
940     QFOTransferBarrierSet<Barrier> release;
941     QFOTransferBarrierSet<Barrier> acquire;
942     void Reset() {
943         acquire.clear();
944         release.clear();
945     }
946 };
947 
948 // The layer_data stores the map of pending release barriers
949 template <typename Barrier>
950 using GlobalQFOTransferBarrierMap =
951     std::unordered_map<typename QFOTransferBarrier<Barrier>::HandleType, QFOTransferBarrierSet<Barrier>>;
952 
953 // Submit queue uses the Scoreboard to track all release/acquire operations in a batch.
954 template <typename Barrier>
955 using QFOTransferCBScoreboard =
956     std::unordered_map<QFOTransferBarrier<Barrier>, const GLOBAL_CB_NODE *, QFOTransferBarrierHash<Barrier>>;
957 template <typename Barrier>
958 struct QFOTransferCBScoreboards {
959     QFOTransferCBScoreboard<Barrier> acquire;
960     QFOTransferCBScoreboard<Barrier> release;
961 };
962 
963 struct GpuDeviceMemoryBlock {
964     VkBuffer buffer;
965     VkDeviceMemory memory;
966     uint32_t offset;
967 };
968 
969 struct GpuBufferInfo {
970     GpuDeviceMemoryBlock mem_block;
971     VkDescriptorSet desc_set;
972     VkDescriptorPool desc_pool;
973     GpuBufferInfo(GpuDeviceMemoryBlock mem_block, VkDescriptorSet desc_set, VkDescriptorPool desc_pool)
974         : mem_block(mem_block), desc_set(desc_set), desc_pool(desc_pool){};
975 };
976 
977 // Cmd Buffer Wrapper Struct - TODO : This desperately needs its own class
978 struct GLOBAL_CB_NODE : public BASE_NODE {
979     VkCommandBuffer commandBuffer;
980     VkCommandBufferAllocateInfo createInfo = {};
981     VkCommandBufferBeginInfo beginInfo;
982     VkCommandBufferInheritanceInfo inheritanceInfo;
983     VkDevice device;  // device this CB belongs to
984     bool hasDrawCmd;
985     CB_STATE state;        // Track cmd buffer update state
986     uint64_t submitCount;  // Number of times CB has been submitted
987     typedef uint64_t ImageLayoutUpdateCount;
988     ImageLayoutUpdateCount image_layout_change_count;  // The sequence number for changes to image layout (for cached validation)
989     CBStatusFlags status;                              // Track status of various bindings on cmd buffer
990     CBStatusFlags static_status;                       // All state bits provided by current graphics pipeline
991                                                        // rather than dynamic state
992     // Currently storing "lastBound" objects on per-CB basis
993     //  long-term may want to create caches of "lastBound" states and could have
994     //  each individual CMD_NODE referencing its own "lastBound" state
995     // Store last bound state for Gfx & Compute pipeline bind points
996     std::map<uint32_t, LAST_BOUND_STATE> lastBound;
997 
998     uint32_t viewportMask;
999     uint32_t scissorMask;
1000     VkRenderPassBeginInfo activeRenderPassBeginInfo;
1001     RENDER_PASS_STATE *activeRenderPass;
1002     VkSubpassContents activeSubpassContents;
1003     uint32_t activeSubpass;
1004     VkFramebuffer activeFramebuffer;
1005     std::unordered_set<VkFramebuffer> framebuffers;
1006     // Unified data structs to track objects bound to this command buffer as well as object
1007     //  dependencies that have been broken : either destroyed objects, or updated descriptor sets
1008     std::unordered_set<VK_OBJECT> object_bindings;
1009     std::vector<VK_OBJECT> broken_bindings;
1010 
1011     QFOTransferBarrierSets<VkBufferMemoryBarrier> qfo_transfer_buffer_barriers;
1012     QFOTransferBarrierSets<VkImageMemoryBarrier> qfo_transfer_image_barriers;
1013 
1014     std::unordered_set<VkEvent> waitedEvents;
1015     std::vector<VkEvent> writeEventsBeforeWait;
1016     std::vector<VkEvent> events;
1017     std::unordered_map<QueryObject, std::unordered_set<VkEvent>> waitedEventsBeforeQueryReset;
1018     std::unordered_map<QueryObject, bool> queryToStateMap;  // 0 is unavailable, 1 is available
1019     std::unordered_set<QueryObject> activeQueries;
1020     std::unordered_set<QueryObject> startedQueries;
1021     std::unordered_map<ImageSubresourcePair, IMAGE_CMD_BUF_LAYOUT_NODE> imageLayoutMap;
1022     std::unordered_map<VkEvent, VkPipelineStageFlags> eventToStageMap;
1023     std::vector<DrawData> draw_data;
1024     DrawData current_draw_data;
1025     bool vertex_buffer_used;  // Track for perf warning to make sure any bound vtx buffer used
1026     VkCommandBuffer primaryCommandBuffer;
1027     // Track images and buffers that are updated by this CB at the point of a draw
1028     std::unordered_set<VkImageView> updateImages;
1029     std::unordered_set<VkBuffer> updateBuffers;
1030     // If primary, the secondary command buffers we will call.
1031     // If secondary, the primary command buffers we will be called by.
1032     std::unordered_set<GLOBAL_CB_NODE *> linkedCommandBuffers;
1033     // Validation functions run at primary CB queue submit time
1034     std::vector<std::function<bool()>> queue_submit_functions;
1035     // Validation functions run when secondary CB is executed in primary
1036     std::vector<std::function<bool(GLOBAL_CB_NODE *, VkFramebuffer)>> cmd_execute_commands_functions;
1037     std::unordered_set<VkDeviceMemory> memObjs;
1038     std::vector<std::function<bool(VkQueue)>> eventUpdates;
1039     std::vector<std::function<bool(VkQueue)>> queryUpdates;
1040     std::unordered_set<cvdescriptorset::DescriptorSet *> validated_descriptor_sets;
1041     // Contents valid only after an index buffer is bound (CBSTATUS_INDEX_BUFFER_BOUND set)
1042     IndexBufferBinding index_buffer_binding;
1043     // GPU Validation data
1044     std::vector<GpuBufferInfo> gpu_buffer_list;
1045 };
1046 
1047 static inline QFOTransferBarrierSets<VkImageMemoryBarrier> &GetQFOBarrierSets(
1048     GLOBAL_CB_NODE *cb, const QFOTransferBarrier<VkImageMemoryBarrier>::Tag &type_tag) {
1049     return cb->qfo_transfer_image_barriers;
1050 }
1051 static inline QFOTransferBarrierSets<VkBufferMemoryBarrier> &GetQFOBarrierSets(
1052     GLOBAL_CB_NODE *cb, const QFOTransferBarrier<VkBufferMemoryBarrier>::Tag &type_tag) {
1053     return cb->qfo_transfer_buffer_barriers;
1054 }
1055 
1056 struct SEMAPHORE_WAIT {
1057     VkSemaphore semaphore;
1058     VkQueue queue;
1059     uint64_t seq;
1060 };
1061 
1062 struct CB_SUBMISSION {
1063     CB_SUBMISSION(std::vector<VkCommandBuffer> const &cbs, std::vector<SEMAPHORE_WAIT> const &waitSemaphores,
1064                   std::vector<VkSemaphore> const &signalSemaphores, std::vector<VkSemaphore> const &externalSemaphores,
1065                   VkFence fence)
1066         : cbs(cbs),
1067           waitSemaphores(waitSemaphores),
1068           signalSemaphores(signalSemaphores),
1069           externalSemaphores(externalSemaphores),
1070           fence(fence) {}
1071 
1072     std::vector<VkCommandBuffer> cbs;
1073     std::vector<SEMAPHORE_WAIT> waitSemaphores;
1074     std::vector<VkSemaphore> signalSemaphores;
1075     std::vector<VkSemaphore> externalSemaphores;
1076     VkFence fence;
1077 };
1078 
1079 struct IMAGE_LAYOUT_NODE {
1080     VkImageLayout layout;
1081     VkFormat format;
1082 };
1083 
1084 struct MT_FB_ATTACHMENT_INFO {
1085     IMAGE_VIEW_STATE *view_state;
1086     VkImage image;
1087 };
1088 
1089 class FRAMEBUFFER_STATE : public BASE_NODE {
1090    public:
1091     VkFramebuffer framebuffer;
1092     safe_VkFramebufferCreateInfo createInfo;
1093     std::shared_ptr<RENDER_PASS_STATE> rp_state;
1094 #ifdef FRAMEBUFFER_ATTACHMENT_STATE_CACHE
1095     // TODO Re-enable attachment state cache once staleness protection is implemented
1096     //      For staleness protection destoryed images and image view must invalidate the cached data and tag the framebuffer object
1097     //      as no longer valid
1098     std::vector<MT_FB_ATTACHMENT_INFO> attachments;
1099 #endif
1100     FRAMEBUFFER_STATE(VkFramebuffer fb, const VkFramebufferCreateInfo *pCreateInfo, std::shared_ptr<RENDER_PASS_STATE> &&rpstate)
1101         : framebuffer(fb), createInfo(pCreateInfo), rp_state(rpstate){};
1102 };
1103 
1104 struct shader_module;
1105 struct DeviceExtensions;
1106 
1107 struct DeviceFeatures {
1108     VkPhysicalDeviceFeatures core;
1109     VkPhysicalDeviceDescriptorIndexingFeaturesEXT descriptor_indexing;
1110     VkPhysicalDevice8BitStorageFeaturesKHR eight_bit_storage;
1111     VkPhysicalDeviceExclusiveScissorFeaturesNV exclusive_scissor;
1112     VkPhysicalDeviceShadingRateImageFeaturesNV shading_rate_image;
1113     VkPhysicalDeviceMeshShaderFeaturesNV mesh_shader;
1114     VkPhysicalDeviceInlineUniformBlockFeaturesEXT inline_uniform_block;
1115     VkPhysicalDeviceTransformFeedbackFeaturesEXT transform_feedback_features;
1116     VkPhysicalDeviceFloat16Int8FeaturesKHR float16_int8;
1117     VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT vtx_attrib_divisor_features;
1118     VkPhysicalDeviceScalarBlockLayoutFeaturesEXT scalar_block_layout_features;
1119     VkPhysicalDeviceBufferAddressFeaturesEXT buffer_address;
1120 };
1121 
1122 enum RenderPassCreateVersion { RENDER_PASS_VERSION_1 = 0, RENDER_PASS_VERSION_2 = 1 };
1123 
1124 class GpuDeviceMemoryManager;
1125 class GpuDescriptorSetManager;
1126 struct ShaderTracker {
1127     VkPipeline pipeline;
1128     VkShaderModule shader_module;
1129     std::vector<unsigned int> pgm;
1130 };
1131 struct GpuValidationState {
1132     bool aborted;
1133     bool reserve_binding_slot;
1134     VkDescriptorSetLayout debug_desc_layout;
1135     VkDescriptorSetLayout dummy_desc_layout;
1136     uint32_t adjusted_max_desc_sets;
1137     uint32_t desc_set_bind_index;
1138     uint32_t unique_shader_module_id;
1139     std::unordered_map<uint32_t, ShaderTracker> shader_map;
1140     std::unique_ptr<GpuDeviceMemoryManager> memory_manager;
1141     std::unique_ptr<GpuDescriptorSetManager> desc_set_manager;
1142     VkCommandPool barrier_command_pool;
1143     VkCommandBuffer barrier_command_buffer;
1144 };
1145 
1146 enum BarrierOperationsType {
1147     kAllAcquire,  // All Barrier operations are "ownership acquire" operations
1148     kAllRelease,  // All Barrier operations are "ownership release" operations
1149     kGeneral,     // Either no ownership operations or a mix of ownership operation types and/or non-ownership operations
1150 };
1151 
1152 std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> const GetDescriptorSetLayout(layer_data const *, VkDescriptorSetLayout);
1153 
1154 #endif  // CORE_VALIDATION_TYPES_H_
1155