• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright (c) 2015-2019 The Khronos Group Inc.
2  * Copyright (c) 2015-2019 Valve Corporation
3  * Copyright (c) 2015-2019 LunarG, Inc.
4  * Copyright (C) 2015-2019 Google Inc.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * Author: Courtney Goeltzenleuchter <courtneygo@google.com>
19  * Author: Tobin Ehlis <tobine@google.com>
20  * Author: Chris Forbes <chrisf@ijw.co.nz>
21  * Author: Mark Lobodzinski <mark@lunarg.com>
22  * Author: Dave Houlton <daveh@lunarg.com>
23  * Author: John Zulauf <jzulauf@lunarg.com>
24  */
25 #ifndef CORE_VALIDATION_TYPES_H_
26 #define CORE_VALIDATION_TYPES_H_
27 
28 #include "cast_utils.h"
29 #include "hash_vk_types.h"
30 #include "sparse_containers.h"
31 #include "vk_safe_struct.h"
32 #include "vulkan/vulkan.h"
33 #include "vk_layer_logging.h"
34 #include "vk_object_types.h"
35 #include "vk_extension_helper.h"
36 #include "vk_typemap_helper.h"
37 #include "convert_to_renderpass2.h"
38 #include "layer_chassis_dispatch.h"
39 
40 #include <array>
41 #include <atomic>
42 #include <functional>
43 #include <list>
44 #include <map>
45 #include <memory>
46 #include <set>
47 #include <string.h>
48 #include <unordered_map>
49 #include <unordered_set>
50 #include <vector>
51 #include <memory>
52 #include <list>
53 
54 #ifdef VK_USE_PLATFORM_ANDROID_KHR
55 #include "android_ndk_types.h"
56 #endif  // VK_USE_PLATFORM_ANDROID_KHR
57 
58 // Fwd declarations -- including descriptor_set.h creates an ugly include loop
59 namespace cvdescriptorset {
60 class DescriptorSetLayoutDef;
61 class DescriptorSetLayout;
62 class DescriptorSet;
63 }  // namespace cvdescriptorset
64 
65 struct CMD_BUFFER_STATE;
66 class CoreChecks;
67 class ValidationStateTracker;
68 
69 enum CALL_STATE {
70     UNCALLED,       // Function has not been called
71     QUERY_COUNT,    // Function called once to query a count
72     QUERY_DETAILS,  // Function called w/ a count to query details
73 };
74 
75 class BASE_NODE {
76    public:
77     // Track when object is being used by an in-flight command buffer
78     std::atomic_int in_use;
79     // Track command buffers that this object is bound to
80     //  binding initialized when cmd referencing object is bound to command buffer
81     //  binding removed when command buffer is reset or destroyed
82     // When an object is destroyed, any bound cbs are set to INVALID
83     std::unordered_set<CMD_BUFFER_STATE *> cb_bindings;
84 
BASE_NODE()85     BASE_NODE() { in_use.store(0); };
86 };
87 
88 // Track command pools and their command buffers
89 struct COMMAND_POOL_STATE : public BASE_NODE {
90     VkCommandPoolCreateFlags createFlags;
91     uint32_t queueFamilyIndex;
92     // Cmd buffers allocated from this pool
93     std::unordered_set<VkCommandBuffer> commandBuffers;
94 };
95 
96 // Utilities for barriers and the commmand pool
97 template <typename Barrier>
IsTransferOp(const Barrier * barrier)98 static bool IsTransferOp(const Barrier *barrier) {
99     return barrier->srcQueueFamilyIndex != barrier->dstQueueFamilyIndex;
100 }
101 
102 template <typename Barrier, bool assume_transfer = false>
TempIsReleaseOp(const COMMAND_POOL_STATE * pool,const Barrier * barrier)103 static bool TempIsReleaseOp(const COMMAND_POOL_STATE *pool, const Barrier *barrier) {
104     return (assume_transfer || IsTransferOp(barrier)) && (pool->queueFamilyIndex == barrier->srcQueueFamilyIndex);
105 }
106 
107 template <typename Barrier, bool assume_transfer = false>
IsAcquireOp(const COMMAND_POOL_STATE * pool,const Barrier * barrier)108 static bool IsAcquireOp(const COMMAND_POOL_STATE *pool, const Barrier *barrier) {
109     return (assume_transfer || IsTransferOp(barrier)) && (pool->queueFamilyIndex == barrier->dstQueueFamilyIndex);
110 }
111 
IsSpecial(const uint32_t queue_family_index)112 inline bool IsSpecial(const uint32_t queue_family_index) {
113     return (queue_family_index == VK_QUEUE_FAMILY_EXTERNAL_KHR) || (queue_family_index == VK_QUEUE_FAMILY_FOREIGN_EXT);
114 }
115 
116 inline bool operator==(const VulkanTypedHandle &a, const VulkanTypedHandle &b) NOEXCEPT {
117     return a.handle == b.handle && a.type == b.type;
118 }
119 
120 namespace std {
121 template <>
122 struct hash<VulkanTypedHandle> {
123     size_t operator()(VulkanTypedHandle obj) const NOEXCEPT { return hash<uint64_t>()(obj.handle) ^ hash<uint32_t>()(obj.type); }
124 };
125 }  // namespace std
126 
127 // Flags describing requirements imposed by the pipeline on a descriptor. These
128 // can't be checked at pipeline creation time as they depend on the Image or
129 // ImageView bound.
130 enum descriptor_req {
131     DESCRIPTOR_REQ_VIEW_TYPE_1D = 1 << VK_IMAGE_VIEW_TYPE_1D,
132     DESCRIPTOR_REQ_VIEW_TYPE_1D_ARRAY = 1 << VK_IMAGE_VIEW_TYPE_1D_ARRAY,
133     DESCRIPTOR_REQ_VIEW_TYPE_2D = 1 << VK_IMAGE_VIEW_TYPE_2D,
134     DESCRIPTOR_REQ_VIEW_TYPE_2D_ARRAY = 1 << VK_IMAGE_VIEW_TYPE_2D_ARRAY,
135     DESCRIPTOR_REQ_VIEW_TYPE_3D = 1 << VK_IMAGE_VIEW_TYPE_3D,
136     DESCRIPTOR_REQ_VIEW_TYPE_CUBE = 1 << VK_IMAGE_VIEW_TYPE_CUBE,
137     DESCRIPTOR_REQ_VIEW_TYPE_CUBE_ARRAY = 1 << VK_IMAGE_VIEW_TYPE_CUBE_ARRAY,
138 
139     DESCRIPTOR_REQ_ALL_VIEW_TYPE_BITS = (1 << (VK_IMAGE_VIEW_TYPE_END_RANGE + 1)) - 1,
140 
141     DESCRIPTOR_REQ_SINGLE_SAMPLE = 2 << VK_IMAGE_VIEW_TYPE_END_RANGE,
142     DESCRIPTOR_REQ_MULTI_SAMPLE = DESCRIPTOR_REQ_SINGLE_SAMPLE << 1,
143 
144     DESCRIPTOR_REQ_COMPONENT_TYPE_FLOAT = DESCRIPTOR_REQ_MULTI_SAMPLE << 1,
145     DESCRIPTOR_REQ_COMPONENT_TYPE_SINT = DESCRIPTOR_REQ_COMPONENT_TYPE_FLOAT << 1,
146     DESCRIPTOR_REQ_COMPONENT_TYPE_UINT = DESCRIPTOR_REQ_COMPONENT_TYPE_SINT << 1,
147 };
148 
149 extern unsigned DescriptorRequirementsBitsFromFormat(VkFormat fmt);
150 
151 typedef std::map<uint32_t, descriptor_req> BindingReqMap;
152 
153 struct DESCRIPTOR_POOL_STATE : BASE_NODE {
154     VkDescriptorPool pool;
155     uint32_t maxSets;        // Max descriptor sets allowed in this pool
156     uint32_t availableSets;  // Available descriptor sets in this pool
157 
158     safe_VkDescriptorPoolCreateInfo createInfo;
159     std::unordered_set<cvdescriptorset::DescriptorSet *> sets;  // Collection of all sets in this pool
160     std::map<uint32_t, uint32_t> maxDescriptorTypeCount;        // Max # of descriptors of each type in this pool
161     std::map<uint32_t, uint32_t> availableDescriptorTypeCount;  // Available # of descriptors of each type in this pool
162 
163     DESCRIPTOR_POOL_STATE(const VkDescriptorPool pool, const VkDescriptorPoolCreateInfo *pCreateInfo)
164         : pool(pool),
165           maxSets(pCreateInfo->maxSets),
166           availableSets(pCreateInfo->maxSets),
167           createInfo(pCreateInfo),
168           maxDescriptorTypeCount(),
169           availableDescriptorTypeCount() {
170         // Collect maximums per descriptor type.
171         for (uint32_t i = 0; i < createInfo.poolSizeCount; ++i) {
172             uint32_t typeIndex = static_cast<uint32_t>(createInfo.pPoolSizes[i].type);
173             // Same descriptor types can appear several times
174             maxDescriptorTypeCount[typeIndex] += createInfo.pPoolSizes[i].descriptorCount;
175             availableDescriptorTypeCount[typeIndex] = maxDescriptorTypeCount[typeIndex];
176         }
177     }
178 };
179 
180 // Generic memory binding struct to track objects bound to objects
181 struct MEM_BINDING {
182     VkDeviceMemory mem;
183     VkDeviceSize offset;
184     VkDeviceSize size;
185 };
186 
187 struct BufferBinding {
188     VkBuffer buffer;
189     VkDeviceSize size;
190     VkDeviceSize offset;
191 };
192 
193 struct IndexBufferBinding : BufferBinding {
194     VkIndexType index_type;
195 };
196 
197 inline bool operator==(MEM_BINDING a, MEM_BINDING b) NOEXCEPT { return a.mem == b.mem && a.offset == b.offset && a.size == b.size; }
198 
199 namespace std {
200 template <>
201 struct hash<MEM_BINDING> {
202     size_t operator()(MEM_BINDING mb) const NOEXCEPT {
203         auto intermediate = hash<uint64_t>()(reinterpret_cast<uint64_t &>(mb.mem)) ^ hash<uint64_t>()(mb.offset);
204         return intermediate ^ hash<uint64_t>()(mb.size);
205     }
206 };
207 }  // namespace std
208 
209 // Superclass for bindable object state (currently images and buffers)
210 class BINDABLE : public BASE_NODE {
211    public:
212     bool sparse;  // Is this object being bound with sparse memory or not?
213     // Non-sparse binding data
214     MEM_BINDING binding;
215     // Memory requirements for this BINDABLE
216     VkMemoryRequirements requirements;
217     // bool to track if memory requirements were checked
218     bool memory_requirements_checked;
219     // Sparse binding data, initially just tracking MEM_BINDING per mem object
220     //  There's more data for sparse bindings so need better long-term solution
221     // TODO : Need to update solution to track all sparse binding data
222     std::unordered_set<MEM_BINDING> sparse_bindings;
223 
224     std::unordered_set<VkDeviceMemory> bound_memory_set_;
225 
226     BINDABLE()
227         : sparse(false), binding{}, requirements{}, memory_requirements_checked(false), sparse_bindings{}, bound_memory_set_{} {};
228 
229     // Update the cached set of memory bindings.
230     // Code that changes binding.mem or sparse_bindings must call UpdateBoundMemorySet()
231     void UpdateBoundMemorySet() {
232         bound_memory_set_.clear();
233         if (!sparse) {
234             bound_memory_set_.insert(binding.mem);
235         } else {
236             for (auto sb : sparse_bindings) {
237                 bound_memory_set_.insert(sb.mem);
238             }
239         }
240     }
241 
242     // Return unordered set of memory objects that are bound
243     // Instead of creating a set from scratch each query, return the cached one
244     const std::unordered_set<VkDeviceMemory> &GetBoundMemory() const { return bound_memory_set_; }
245 };
246 
247 class BUFFER_STATE : public BINDABLE {
248    public:
249     VkBuffer buffer;
250     VkBufferCreateInfo createInfo;
251     BUFFER_STATE(VkBuffer buff, const VkBufferCreateInfo *pCreateInfo) : buffer(buff), createInfo(*pCreateInfo) {
252         if ((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) && (createInfo.queueFamilyIndexCount > 0)) {
253             uint32_t *pQueueFamilyIndices = new uint32_t[createInfo.queueFamilyIndexCount];
254             for (uint32_t i = 0; i < createInfo.queueFamilyIndexCount; i++) {
255                 pQueueFamilyIndices[i] = pCreateInfo->pQueueFamilyIndices[i];
256             }
257             createInfo.pQueueFamilyIndices = pQueueFamilyIndices;
258         }
259 
260         if (createInfo.flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
261             sparse = true;
262         }
263     };
264 
265     BUFFER_STATE(BUFFER_STATE const &rh_obj) = delete;
266 
267     ~BUFFER_STATE() {
268         if ((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) && (createInfo.queueFamilyIndexCount > 0)) {
269             delete[] createInfo.pQueueFamilyIndices;
270             createInfo.pQueueFamilyIndices = nullptr;
271         }
272     };
273 };
274 
275 class BUFFER_VIEW_STATE : public BASE_NODE {
276    public:
277     VkBufferView buffer_view;
278     VkBufferViewCreateInfo create_info;
279     BUFFER_VIEW_STATE(VkBufferView bv, const VkBufferViewCreateInfo *ci) : buffer_view(bv), create_info(*ci){};
280     BUFFER_VIEW_STATE(const BUFFER_VIEW_STATE &rh_obj) = delete;
281 };
282 
283 struct SAMPLER_STATE : public BASE_NODE {
284     VkSampler sampler;
285     VkSamplerCreateInfo createInfo;
286     VkSamplerYcbcrConversion samplerConversion = VK_NULL_HANDLE;
287 
288     SAMPLER_STATE(const VkSampler *ps, const VkSamplerCreateInfo *pci) : sampler(*ps), createInfo(*pci) {
289         auto *conversionInfo = lvl_find_in_chain<VkSamplerYcbcrConversionInfo>(pci->pNext);
290         if (conversionInfo) samplerConversion = conversionInfo->conversion;
291     }
292 };
293 
294 class IMAGE_STATE : public BINDABLE {
295    public:
296     VkImage image;
297     VkImageCreateInfo createInfo;
298     bool valid;               // If this is a swapchain image backing memory track valid here as it doesn't have DEVICE_MEMORY_STATE
299     bool acquired;            // If this is a swapchain image, has it been acquired by the app.
300     bool shared_presentable;  // True for a front-buffered swapchain image
301     bool layout_locked;       // A front-buffered image that has been presented can never have layout transitioned
302     bool get_sparse_reqs_called;         // Track if GetImageSparseMemoryRequirements() has been called for this image
303     bool sparse_metadata_required;       // Track if sparse metadata aspect is required for this image
304     bool sparse_metadata_bound;          // Track if sparse metadata aspect is bound to this image
305     bool imported_ahb;                   // True if image was imported from an Android Hardware Buffer
306     bool has_ahb_format;                 // True if image was created with an external Android format
307     uint64_t ahb_format;                 // External Android format, if provided
308     VkImageSubresourceRange full_range;  // The normalized ISR for all levels, layers (slices), and aspects
309     VkSwapchainKHR create_from_swapchain;
310     VkSwapchainKHR bind_swapchain;
311     uint32_t bind_swapchain_imageIndex;
312 
313 #ifdef VK_USE_PLATFORM_ANDROID_KHR
314     uint64_t external_format_android;
315 #endif  // VK_USE_PLATFORM_ANDROID_KHR
316 
317     std::vector<VkSparseImageMemoryRequirements> sparse_requirements;
318     IMAGE_STATE(VkImage img, const VkImageCreateInfo *pCreateInfo);
319     IMAGE_STATE(IMAGE_STATE const &rh_obj) = delete;
320 
321     ~IMAGE_STATE() {
322         if ((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) && (createInfo.queueFamilyIndexCount > 0)) {
323             delete[] createInfo.pQueueFamilyIndices;
324             createInfo.pQueueFamilyIndices = nullptr;
325         }
326     };
327 };
328 
329 class IMAGE_VIEW_STATE : public BASE_NODE {
330    public:
331     VkImageView image_view;
332     VkImageViewCreateInfo create_info;
333     VkImageSubresourceRange normalized_subresource_range;
334     VkSampleCountFlagBits samples;
335     unsigned descriptor_format_bits;
336     VkSamplerYcbcrConversion samplerConversion;  // Handle of the ycbcr sampler conversion the image was created with, if any
337     IMAGE_VIEW_STATE(const IMAGE_STATE *image_state, VkImageView iv, const VkImageViewCreateInfo *ci);
338     IMAGE_VIEW_STATE(const IMAGE_VIEW_STATE &rh_obj) = delete;
339 };
340 
341 class ACCELERATION_STRUCTURE_STATE : public BINDABLE {
342    public:
343     VkAccelerationStructureNV acceleration_structure;
344     safe_VkAccelerationStructureCreateInfoNV create_info;
345     bool memory_requirements_checked = false;
346     VkMemoryRequirements2KHR memory_requirements;
347     bool build_scratch_memory_requirements_checked = false;
348     VkMemoryRequirements2KHR build_scratch_memory_requirements;
349     bool update_scratch_memory_requirements_checked = false;
350     VkMemoryRequirements2KHR update_scratch_memory_requirements;
351     bool built = false;
352     safe_VkAccelerationStructureInfoNV build_info;
353     ACCELERATION_STRUCTURE_STATE(VkAccelerationStructureNV as, const VkAccelerationStructureCreateInfoNV *ci)
354         : acceleration_structure(as),
355           create_info(ci),
356           memory_requirements{},
357           build_scratch_memory_requirements_checked{},
358           update_scratch_memory_requirements_checked{} {}
359     ACCELERATION_STRUCTURE_STATE(const ACCELERATION_STRUCTURE_STATE &rh_obj) = delete;
360 };
361 
362 struct MemRange {
363     VkDeviceSize offset;
364     VkDeviceSize size;
365 };
366 
367 // Data struct for tracking memory object
368 struct DEVICE_MEMORY_STATE : public BASE_NODE {
369     void *object;  // Dispatchable object used to create this memory (device of swapchain)
370     VkDeviceMemory mem;
371     VkMemoryAllocateInfo alloc_info;
372     bool is_dedicated;
373     VkBuffer dedicated_buffer;
374     VkImage dedicated_image;
375     bool is_export;
376     VkExternalMemoryHandleTypeFlags export_handle_type_flags;
377     std::unordered_set<VulkanTypedHandle> obj_bindings;  // objects bound to this memory
378     // Convenience vectors of handles to speed up iterating over objects independently
379     std::unordered_set<uint64_t> bound_images;
380     std::unordered_set<uint64_t> bound_buffers;
381     std::unordered_set<uint64_t> bound_acceleration_structures;
382 
383     MemRange mem_range;
384     void *shadow_copy_base;    // Base of layer's allocation for guard band, data, and alignment space
385     void *shadow_copy;         // Pointer to start of guard-band data before mapped region
386     uint64_t shadow_pad_size;  // Size of the guard-band data before and after actual data. It MUST be a
387                                // multiple of limits.minMemoryMapAlignment
388     void *p_driver_data;       // Pointer to application's actual memory
389 
390     DEVICE_MEMORY_STATE(void *disp_object, const VkDeviceMemory in_mem, const VkMemoryAllocateInfo *p_alloc_info)
391         : object(disp_object),
392           mem(in_mem),
393           alloc_info(*p_alloc_info),
394           is_dedicated(false),
395           dedicated_buffer(VK_NULL_HANDLE),
396           dedicated_image(VK_NULL_HANDLE),
397           is_export(false),
398           export_handle_type_flags(0),
399           mem_range{},
400           shadow_copy_base(0),
401           shadow_copy(0),
402           shadow_pad_size(0),
403           p_driver_data(0){};
404 };
405 
406 class SWAPCHAIN_NODE {
407    public:
408     safe_VkSwapchainCreateInfoKHR createInfo;
409     VkSwapchainKHR swapchain;
410     std::vector<VkImage> images;
411     bool retired = false;
412     bool shared_presentable = false;
413     CALL_STATE vkGetSwapchainImagesKHRState = UNCALLED;
414     uint32_t get_swapchain_image_count = 0;
415     SWAPCHAIN_NODE(const VkSwapchainCreateInfoKHR *pCreateInfo, VkSwapchainKHR swapchain)
416         : createInfo(pCreateInfo), swapchain(swapchain) {}
417 };
418 
419 struct ColorAspectTraits {
420     static const uint32_t kAspectCount = 1;
421     static int Index(VkImageAspectFlags mask) { return 0; };
422     static VkImageAspectFlags AspectMask() { return VK_IMAGE_ASPECT_COLOR_BIT; }
423     static const std::array<VkImageAspectFlagBits, kAspectCount> &AspectBits() {
424         static std::array<VkImageAspectFlagBits, kAspectCount> kAspectBits{{VK_IMAGE_ASPECT_COLOR_BIT}};
425         return kAspectBits;
426     }
427 };
428 
429 struct DepthAspectTraits {
430     static const uint32_t kAspectCount = 1;
431     static int Index(VkImageAspectFlags mask) { return 0; };
432     static VkImageAspectFlags AspectMask() { return VK_IMAGE_ASPECT_DEPTH_BIT; }
433     static const std::array<VkImageAspectFlagBits, kAspectCount> &AspectBits() {
434         static std::array<VkImageAspectFlagBits, kAspectCount> kAspectBits{{VK_IMAGE_ASPECT_DEPTH_BIT}};
435         return kAspectBits;
436     }
437 };
438 
439 struct StencilAspectTraits {
440     static const uint32_t kAspectCount = 1;
441     static int Index(VkImageAspectFlags mask) { return 0; };
442     static VkImageAspectFlags AspectMask() { return VK_IMAGE_ASPECT_STENCIL_BIT; }
443     static const std::array<VkImageAspectFlagBits, kAspectCount> &AspectBits() {
444         static std::array<VkImageAspectFlagBits, kAspectCount> kAspectBits{{VK_IMAGE_ASPECT_STENCIL_BIT}};
445         return kAspectBits;
446     }
447 };
448 
449 struct DepthStencilAspectTraits {
450     // VK_IMAGE_ASPECT_DEPTH_BIT = 0x00000002,  >> 1 -> 1 -1 -> 0
451     // VK_IMAGE_ASPECT_STENCIL_BIT = 0x00000004, >> 1 -> 2 -1 = 1
452     static const uint32_t kAspectCount = 2;
453     static uint32_t Index(VkImageAspectFlags mask) {
454         uint32_t index = (mask >> 1) - 1;
455         assert((index == 0) || (index == 1));
456         return index;
457     };
458     static VkImageAspectFlags AspectMask() { return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; }
459     static const std::array<VkImageAspectFlagBits, kAspectCount> &AspectBits() {
460         static std::array<VkImageAspectFlagBits, kAspectCount> kAspectBits{
461             {VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_ASPECT_STENCIL_BIT}};
462         return kAspectBits;
463     }
464 };
465 
466 struct Multiplane2AspectTraits {
467     // VK_IMAGE_ASPECT_PLANE_0_BIT = 0x00000010, >> 4 - 1 -> 0
468     // VK_IMAGE_ASPECT_PLANE_1_BIT = 0x00000020, >> 4 - 1 -> 1
469     static const uint32_t kAspectCount = 2;
470     static uint32_t Index(VkImageAspectFlags mask) {
471         uint32_t index = (mask >> 4) - 1;
472         assert((index == 0) || (index == 1));
473         return index;
474     };
475     static VkImageAspectFlags AspectMask() { return VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT; }
476     static const std::array<VkImageAspectFlagBits, kAspectCount> &AspectBits() {
477         static std::array<VkImageAspectFlagBits, kAspectCount> kAspectBits{
478             {VK_IMAGE_ASPECT_PLANE_0_BIT, VK_IMAGE_ASPECT_PLANE_1_BIT}};
479         return kAspectBits;
480     }
481 };
482 
483 struct Multiplane3AspectTraits {
484     // VK_IMAGE_ASPECT_PLANE_0_BIT = 0x00000010, >> 4 - 1 -> 0
485     // VK_IMAGE_ASPECT_PLANE_1_BIT = 0x00000020, >> 4 - 1 -> 1
486     // VK_IMAGE_ASPECT_PLANE_2_BIT = 0x00000040, >> 4 - 1 -> 3
487     static const uint32_t kAspectCount = 3;
488     static uint32_t Index(VkImageAspectFlags mask) {
489         uint32_t index = (mask >> 4) - 1;
490         index = index > 2 ? 2 : index;
491         assert((index == 0) || (index == 1) || (index == 2));
492         return index;
493     };
494     static VkImageAspectFlags AspectMask() {
495         return VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT;
496     }
497     static const std::array<VkImageAspectFlagBits, kAspectCount> &AspectBits() {
498         static std::array<VkImageAspectFlagBits, kAspectCount> kAspectBits{
499             {VK_IMAGE_ASPECT_PLANE_0_BIT, VK_IMAGE_ASPECT_PLANE_1_BIT, VK_IMAGE_ASPECT_PLANE_2_BIT}};
500         return kAspectBits;
501     }
502 };
503 
504 std::string FormatDebugLabel(const char *prefix, const LoggingLabel &label);
505 
506 const static VkImageLayout kInvalidLayout = VK_IMAGE_LAYOUT_MAX_ENUM;
507 // Interface class.
508 class ImageSubresourceLayoutMap {
509    public:
510     typedef std::function<bool(const VkImageSubresource &, VkImageLayout, VkImageLayout)> Callback;
511     struct InitialLayoutState {
512         VkImageView image_view;          // For relaxed matching rule evaluation, else VK_NULL_HANDLE
513         VkImageAspectFlags aspect_mask;  // For relaxed matching rules... else 0
514         LoggingLabel label;
515         InitialLayoutState(const CMD_BUFFER_STATE &cb_state_, const IMAGE_VIEW_STATE *view_state);
516         InitialLayoutState() : image_view(VK_NULL_HANDLE), aspect_mask(0), label() {}
517     };
518 
519     struct SubresourceLayout {
520         VkImageSubresource subresource;
521         VkImageLayout layout;
522     };
523 
524     struct SubresourceRangeLayout {
525         VkImageSubresourceRange range;
526         VkImageLayout layout;
527     };
528 
529     class ConstIteratorInterface {
530        public:
531         // Make the value accessor non virtual
532         const SubresourceLayout &operator*() const { return value_; }
533 
534         virtual ConstIteratorInterface &operator++() = 0;
535         virtual bool AtEnd() const = 0;
536         virtual ~ConstIteratorInterface(){};
537 
538        protected:
539         SubresourceLayout value_;
540     };
541 
542     class ConstIterator {
543        public:
544         ConstIterator &operator++() {
545             ++(*it_);
546             return *this;
547         }
548         const SubresourceLayout &operator*() const { return *(*it_); }
549         ConstIterator(ConstIteratorInterface *it) : it_(it){};
550         bool AtEnd() const { return it_->AtEnd(); }
551 
552        protected:
553         std::unique_ptr<ConstIteratorInterface> it_;
554     };
555 
556     virtual ConstIterator BeginInitialUse() const = 0;
557     virtual ConstIterator BeginSetLayout() const = 0;
558 
559     virtual bool SetSubresourceRangeLayout(const CMD_BUFFER_STATE &cb_state, const VkImageSubresourceRange &range,
560                                            VkImageLayout layout, VkImageLayout expected_layout = kInvalidLayout) = 0;
561     virtual bool SetSubresourceRangeInitialLayout(const CMD_BUFFER_STATE &cb_state, const VkImageSubresourceRange &range,
562                                                   VkImageLayout layout, const IMAGE_VIEW_STATE *view_state = nullptr) = 0;
563     virtual bool ForRange(const VkImageSubresourceRange &range, const Callback &callback, bool skip_invalid = true,
564                           bool always_get_initial = false) const = 0;
565     virtual VkImageLayout GetSubresourceLayout(const VkImageSubresource subresource) const = 0;
566     virtual VkImageLayout GetSubresourceInitialLayout(const VkImageSubresource subresource) const = 0;
567     virtual const InitialLayoutState *GetSubresourceInitialLayoutState(const VkImageSubresource subresource) const = 0;
568     virtual bool UpdateFrom(const ImageSubresourceLayoutMap &from) = 0;
569     virtual uintptr_t CompatibilityKey() const = 0;
570     ImageSubresourceLayoutMap() {}
571     virtual ~ImageSubresourceLayoutMap() {}
572 };
573 
574 template <typename AspectTraits_, size_t kSparseThreshold = 64U>
575 class ImageSubresourceLayoutMapImpl : public ImageSubresourceLayoutMap {
576    public:
577     typedef ImageSubresourceLayoutMap Base;
578     typedef AspectTraits_ AspectTraits;
579     typedef Base::SubresourceLayout SubresourceLayout;
580     typedef sparse_container::SparseVector<size_t, VkImageLayout, true, kInvalidLayout, kSparseThreshold> LayoutMap;
581     typedef sparse_container::SparseVector<size_t, VkImageLayout, false, kInvalidLayout, kSparseThreshold> InitialLayoutMap;
582 
583     struct Layouts {
584         LayoutMap current;
585         InitialLayoutMap initial;
586         Layouts(size_t size) : current(0, size), initial(0, size) {}
587     };
588 
589     template <typename Container>
590     class ConstIteratorImpl : public Base::ConstIteratorInterface {
591        public:
592         ConstIteratorImpl &operator++() override {
593             ++it_;
594             UpdateValue();
595             return *this;
596         }
597         // Just good enough for cend checks
598         ConstIteratorImpl(const ImageSubresourceLayoutMapImpl &map, const Container &container)
599             : map_(&map), container_(&container), the_end_(false) {
600             it_ = container_->cbegin();
601             UpdateValue();
602         }
603         ~ConstIteratorImpl() override {}
604         virtual bool AtEnd() const override { return the_end_; }
605 
606        protected:
607         void UpdateValue() {
608             if (it_ != container_->cend()) {
609                 value_.subresource = map_->Decode((*it_).first);
610                 value_.layout = (*it_).second;
611             } else {
612                 the_end_ = true;
613                 value_.layout = kInvalidLayout;
614             }
615         }
616 
617         typedef typename Container::const_iterator ContainerIterator;
618         const ImageSubresourceLayoutMapImpl *map_;
619         const Container *container_;
620         bool the_end_;
621         ContainerIterator it_;
622     };
623 
624     Base::ConstIterator BeginInitialUse() const override {
625         return Base::ConstIterator(new ConstIteratorImpl<InitialLayoutMap>(*this, layouts_.initial));
626     }
627 
628     Base::ConstIterator BeginSetLayout() const override {
629         return Base::ConstIterator(new ConstIteratorImpl<LayoutMap>(*this, layouts_.current));
630     }
631 
632     bool SetSubresourceRangeLayout(const CMD_BUFFER_STATE &cb_state, const VkImageSubresourceRange &range, VkImageLayout layout,
633                                    VkImageLayout expected_layout = kInvalidLayout) override {
634         bool updated = false;
635         if (expected_layout == kInvalidLayout) {
636             // Set the initial layout to the set layout as we had no other layout to reference
637             expected_layout = layout;
638         }
639         if (!InRange(range)) return false;  // Don't even try to track bogus subreources
640 
641         InitialLayoutState *initial_state = nullptr;
642         const uint32_t end_mip = range.baseMipLevel + range.levelCount;
643         const auto &aspects = AspectTraits::AspectBits();
644         for (uint32_t aspect_index = 0; aspect_index < AspectTraits::kAspectCount; aspect_index++) {
645             if (0 == (range.aspectMask & aspects[aspect_index])) continue;
646             size_t array_offset = Encode(aspect_index, range.baseMipLevel);
647             for (uint32_t mip_level = range.baseMipLevel; mip_level < end_mip; ++mip_level, array_offset += mip_size_) {
648                 size_t start = array_offset + range.baseArrayLayer;
649                 size_t end = start + range.layerCount;
650                 bool updated_level = layouts_.current.SetRange(start, end, layout);
651                 if (updated_level) {
652                     // We only need to try setting the initial layout, if we changed any of the layout values above
653                     updated = true;
654                     if (layouts_.initial.SetRange(start, end, expected_layout)) {
655                         // We only need to try setting the initial layout *state* if the initial layout was updated
656                         initial_state = UpdateInitialLayoutState(start, end, initial_state, cb_state, nullptr);
657                     }
658                 }
659             }
660         }
661         if (updated) version_++;
662         return updated;
663     }
664 
665     bool SetSubresourceRangeInitialLayout(const CMD_BUFFER_STATE &cb_state, const VkImageSubresourceRange &range,
666                                           VkImageLayout layout, const IMAGE_VIEW_STATE *view_state = nullptr) override {
667         bool updated = false;
668         if (!InRange(range)) return false;  // Don't even try to track bogus subreources
669 
670         InitialLayoutState *initial_state = nullptr;
671         const uint32_t end_mip = range.baseMipLevel + range.levelCount;
672         const auto &aspects = AspectTraits::AspectBits();
673         for (uint32_t aspect_index = 0; aspect_index < AspectTraits::kAspectCount; aspect_index++) {
674             if (0 == (range.aspectMask & aspects[aspect_index])) continue;
675             size_t array_offset = Encode(aspect_index, range.baseMipLevel);
676             for (uint32_t mip_level = range.baseMipLevel; mip_level < end_mip; ++mip_level, array_offset += mip_size_) {
677                 size_t start = array_offset + range.baseArrayLayer;
678                 size_t end = start + range.layerCount;
679                 bool updated_level = layouts_.initial.SetRange(start, end, layout);
680                 if (updated_level) {
681                     updated = true;
682                     // We only need to try setting the initial layout *state* if the initial layout was updated
683                     initial_state = UpdateInitialLayoutState(start, end, initial_state, cb_state, view_state);
684                 }
685             }
686         }
687         if (updated) version_++;
688         return updated;
689     }
690 
691     // Loop over the given range calling the callback, primarily for
692     // validation checks.  By default the initial_value is only looked
693     // up if the set value isn't found.
694     bool ForRange(const VkImageSubresourceRange &range, const Callback &callback, bool skip_invalid = true,
695                   bool always_get_initial = false) const override {
696         if (!InRange(range)) return false;  // Don't even try to process bogus subreources
697 
698         VkImageSubresource subres;
699         auto &level = subres.mipLevel;
700         auto &layer = subres.arrayLayer;
701         auto &aspect = subres.aspectMask;
702         const auto &aspects = AspectTraits::AspectBits();
703         bool keep_on = true;
704         const uint32_t end_mip = range.baseMipLevel + range.levelCount;
705         const uint32_t end_layer = range.baseArrayLayer + range.layerCount;
706         for (uint32_t aspect_index = 0; aspect_index < AspectTraits::kAspectCount; aspect_index++) {
707             if (0 == (range.aspectMask & aspects[aspect_index])) continue;
708             aspect = aspects[aspect_index];  // noting that this and the following loop indices are references
709             size_t array_offset = Encode(aspect_index, range.baseMipLevel);
710             for (level = range.baseMipLevel; level < end_mip; ++level, array_offset += mip_size_) {
711                 for (layer = range.baseArrayLayer; layer < end_layer; layer++) {
712                     // TODO -- would an interator with range check be faster?
713                     size_t index = array_offset + layer;
714                     VkImageLayout layout = layouts_.current.Get(index);
715                     VkImageLayout initial_layout = kInvalidLayout;
716                     if (always_get_initial || (layout == kInvalidLayout)) {
717                         initial_layout = layouts_.initial.Get(index);
718                     }
719 
720                     if (!skip_invalid || (layout != kInvalidLayout) || (initial_layout != kInvalidLayout)) {
721                         keep_on = callback(subres, layout, initial_layout);
722                         if (!keep_on) return keep_on;  // False value from the callback aborts the range traversal
723                     }
724                 }
725             }
726         }
727         return keep_on;
728     }
729     VkImageLayout GetSubresourceInitialLayout(const VkImageSubresource subresource) const override {
730         if (!InRange(subresource)) return kInvalidLayout;
731         uint32_t aspect_index = AspectTraits::Index(subresource.aspectMask);
732         size_t index = Encode(aspect_index, subresource.mipLevel, subresource.arrayLayer);
733         return layouts_.initial.Get(index);
734     }
735 
736     const InitialLayoutState *GetSubresourceInitialLayoutState(const VkImageSubresource subresource) const override {
737         if (!InRange(subresource)) return nullptr;
738         uint32_t aspect_index = AspectTraits::Index(subresource.aspectMask);
739         size_t index = Encode(aspect_index, subresource.mipLevel, subresource.arrayLayer);
740         return initial_layout_state_map_.Get(index);
741     }
742 
743     VkImageLayout GetSubresourceLayout(const VkImageSubresource subresource) const override {
744         if (!InRange(subresource)) return kInvalidLayout;
745         uint32_t aspect_index = AspectTraits::Index(subresource.aspectMask);
746         size_t index = Encode(aspect_index, subresource.mipLevel, subresource.arrayLayer);
747         return layouts_.current.Get(index);
748     }
749 
750     // TODO: make sure this paranoia check is sufficient and not too much.
751     uintptr_t CompatibilityKey() const override {
752         return (reinterpret_cast<const uintptr_t>(&image_state_) ^ AspectTraits::AspectMask() ^ kSparseThreshold);
753     }
754 
755     bool UpdateFrom(const ImageSubresourceLayoutMap &other) override {
756         // Must be from matching images for the reinterpret cast to be valid
757         assert(CompatibilityKey() == other.CompatibilityKey());
758         if (CompatibilityKey() != other.CompatibilityKey()) return false;
759 
760         const auto &from = reinterpret_cast<const ImageSubresourceLayoutMapImpl &>(other);
761         bool updated = false;
762         updated |= layouts_.initial.Merge(from.layouts_.initial);
763         updated |= layouts_.current.Merge(from.layouts_.current);
764         initial_layout_state_map_.Merge(from.initial_layout_state_map_);
765 
766         return updated;
767     }
768 
769     ImageSubresourceLayoutMapImpl() : Base() {}
770     ImageSubresourceLayoutMapImpl(const IMAGE_STATE &image_state)
771         : Base(),
772           image_state_(image_state),
773           mip_size_(image_state.full_range.layerCount),
774           aspect_size_(mip_size_ * image_state.full_range.levelCount),
775           version_(0),
776           layouts_(aspect_size_ * AspectTraits::kAspectCount),
777           initial_layout_states_(),
778           initial_layout_state_map_(0, aspect_size_ * AspectTraits::kAspectCount) {
779         // Setup the row <-> aspect/mip_level base Encode/Decode LUT...
780         aspect_offsets_[0] = 0;
781         for (size_t i = 1; i < aspect_offsets_.size(); ++i) {  // Size is a compile time constant
782             aspect_offsets_[i] = aspect_offsets_[i - 1] + aspect_size_;
783         }
784     }
785     ~ImageSubresourceLayoutMapImpl() override {}
786 
787    protected:
788     // This looks a bit ponderous but kAspectCount is a compile time constant
789     VkImageSubresource Decode(size_t index) const {
790         VkImageSubresource subres;
791         // find aspect index
792         uint32_t aspect_index = 0;
793         if (AspectTraits::kAspectCount == 2) {
794             if (index >= aspect_offsets_[1]) {
795                 aspect_index = 1;
796                 index = index - aspect_offsets_[aspect_index];
797             }
798         } else if (AspectTraits::kAspectCount == 3) {
799             if (index >= aspect_offsets_[2]) {
800                 aspect_index = 2;
801             } else if (index >= aspect_offsets_[1]) {
802                 aspect_index = 1;
803             }
804             index = index - aspect_offsets_[aspect_index];
805         } else {
806             assert(AspectTraits::kAspectCount == 1);  // Only aspect counts of 1, 2, and 3 supported
807         }
808 
809         subres.aspectMask = AspectTraits::AspectBits()[aspect_index];
810         subres.mipLevel =
811             static_cast<uint32_t>(index / mip_size_);  // One hopes the compiler with optimize this pair of divisions...
812         subres.arrayLayer = static_cast<uint32_t>(index % mip_size_);
813 
814         return subres;
815     }
816 
817     uint32_t LevelLimit(uint32_t level) const { return (std::min)(image_state_.full_range.levelCount, level); }
818     uint32_t LayerLimit(uint32_t layer) const { return (std::min)(image_state_.full_range.layerCount, layer); }
819 
820     bool InRange(const VkImageSubresource &subres) const {
821         bool in_range = (subres.mipLevel < image_state_.full_range.levelCount) &&
822                         (subres.arrayLayer < image_state_.full_range.layerCount) &&
823                         (subres.aspectMask & AspectTraits::AspectMask());
824         return in_range;
825     }
826 
827     bool InRange(const VkImageSubresourceRange &range) const {
828         bool in_range = (range.baseMipLevel < image_state_.full_range.levelCount) &&
829                         ((range.baseMipLevel + range.levelCount) <= image_state_.full_range.levelCount) &&
830                         (range.baseArrayLayer < image_state_.full_range.layerCount) &&
831                         ((range.baseArrayLayer + range.layerCount) <= image_state_.full_range.layerCount) &&
832                         (range.aspectMask & AspectTraits::AspectMask());
833         return in_range;
834     }
835 
836     inline size_t Encode(uint32_t aspect_index) const {
837         return (AspectTraits::kAspectCount == 1) ? 0 : aspect_offsets_[aspect_index];
838     }
839     inline size_t Encode(uint32_t aspect_index, uint32_t mip_level) const { return Encode(aspect_index) + mip_level * mip_size_; }
840     inline size_t Encode(uint32_t aspect_index, uint32_t mip_level, uint32_t array_layer) const {
841         return Encode(aspect_index, mip_level) + array_layer;
842     }
843 
844     InitialLayoutState *UpdateInitialLayoutState(size_t start, size_t end, InitialLayoutState *initial_state,
845                                                  const CMD_BUFFER_STATE &cb_state, const IMAGE_VIEW_STATE *view_state) {
846         if (!initial_state) {
847             // Allocate on demand...  initial_layout_states_ holds ownership as a unique_ptr, while
848             // each subresource has a non-owning copy of the plain pointer.
849             initial_state = new InitialLayoutState(cb_state, view_state);
850             initial_layout_states_.emplace_back(initial_state);
851         }
852         assert(initial_state);
853         initial_layout_state_map_.SetRange(start, end, initial_state);
854         return initial_state;
855     }
856 
857     typedef std::vector<std::unique_ptr<InitialLayoutState>> InitialLayoutStates;
858     // This map *also* needs "write once" semantics
859     typedef sparse_container::SparseVector<size_t, InitialLayoutState *, false, nullptr, kSparseThreshold> InitialLayoutStateMap;
860 
861     const IMAGE_STATE &image_state_;
862     const size_t mip_size_;
863     const size_t aspect_size_;
864     uint64_t version_ = 0;
865     Layouts layouts_;
866     InitialLayoutStates initial_layout_states_;
867     InitialLayoutStateMap initial_layout_state_map_;
868     std::array<size_t, AspectTraits::kAspectCount> aspect_offsets_;
869 };
870 
871 static VkImageLayout NormalizeImageLayout(VkImageLayout layout, VkImageLayout non_normal, VkImageLayout normal) {
872     return (layout == non_normal) ? normal : layout;
873 }
874 
875 static VkImageLayout NormalizeDepthImageLayout(VkImageLayout layout) {
876     return NormalizeImageLayout(layout, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
877                                 VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL);
878 }
879 
880 static VkImageLayout NormalizeStencilImageLayout(VkImageLayout layout) {
881     return NormalizeImageLayout(layout, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
882                                 VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL);
883 }
884 
885 static bool ImageLayoutMatches(const VkImageAspectFlags aspect_mask, VkImageLayout a, VkImageLayout b) {
886     bool matches = (a == b);
887     if (!matches) {
888         // Relaxed rules when referencing *only* the depth or stencil aspects
889         if (aspect_mask == VK_IMAGE_ASPECT_DEPTH_BIT) {
890             matches = NormalizeDepthImageLayout(a) == NormalizeDepthImageLayout(b);
891         } else if (aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) {
892             matches = NormalizeStencilImageLayout(a) == NormalizeStencilImageLayout(b);
893         }
894     }
895     return matches;
896 }
897 
898 // Utility type for ForRange callbacks
899 struct LayoutUseCheckAndMessage {
900     const static VkImageAspectFlags kDepthOrStencil = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
901     const ImageSubresourceLayoutMap *layout_map;
902     const VkImageAspectFlags aspect_mask;
903     const char *message;
904     VkImageLayout layout;
905 
906     LayoutUseCheckAndMessage() = delete;
907     LayoutUseCheckAndMessage(const ImageSubresourceLayoutMap *layout_map_, const VkImageAspectFlags aspect_mask_ = 0)
908         : layout_map(layout_map_), aspect_mask{aspect_mask_}, message(nullptr), layout(kInvalidLayout) {}
909     bool Check(const VkImageSubresource &subres, VkImageLayout check, VkImageLayout current_layout, VkImageLayout initial_layout) {
910         message = nullptr;
911         layout = kInvalidLayout;  // Success status
912         if (current_layout != kInvalidLayout && !ImageLayoutMatches(aspect_mask, check, current_layout)) {
913             message = "previous known";
914             layout = current_layout;
915         } else if ((initial_layout != kInvalidLayout) && !ImageLayoutMatches(aspect_mask, check, initial_layout)) {
916             // To check the relaxed rule matching we need to see how the initial use was used
917             const auto initial_layout_state = layout_map->GetSubresourceInitialLayoutState(subres);
918             assert(initial_layout_state);  // If we have an initial layout, we better have a state for it
919             if (!((initial_layout_state->aspect_mask & kDepthOrStencil) &&
920                   ImageLayoutMatches(initial_layout_state->aspect_mask, check, initial_layout))) {
921                 message = "previously used";
922                 layout = initial_layout;
923             }
924         }
925         return layout == kInvalidLayout;
926     }
927 };
928 
929 // Store the DAG.
930 struct DAGNode {
931     uint32_t pass;
932     std::vector<uint32_t> prev;
933     std::vector<uint32_t> next;
934 };
935 
936 struct RENDER_PASS_STATE : public BASE_NODE {
937     VkRenderPass renderPass;
938     safe_VkRenderPassCreateInfo2KHR createInfo;
939     std::vector<std::vector<uint32_t>> self_dependencies;
940     std::vector<DAGNode> subpassToNode;
941     std::unordered_map<uint32_t, bool> attachment_first_read;
942 
943     RENDER_PASS_STATE(VkRenderPassCreateInfo2KHR const *pCreateInfo) : createInfo(pCreateInfo) {}
944     RENDER_PASS_STATE(VkRenderPassCreateInfo const *pCreateInfo) { ConvertVkRenderPassCreateInfoToV2KHR(pCreateInfo, &createInfo); }
945 };
946 
947 // Autogenerated as part of the vk_validation_error_message.h codegen
948 enum CMD_TYPE { VUID_CMD_ENUM_LIST(CMD_) };
949 
950 enum CB_STATE {
951     CB_NEW,                 // Newly created CB w/o any cmds
952     CB_RECORDING,           // BeginCB has been called on this CB
953     CB_RECORDED,            // EndCB has been called on this CB
954     CB_INVALID_COMPLETE,    // had a complete recording, but was since invalidated
955     CB_INVALID_INCOMPLETE,  // fouled before recording was completed
956 };
957 
958 // CB Status -- used to track status of various bindings on cmd buffer objects
959 typedef VkFlags CBStatusFlags;
960 enum CBStatusFlagBits {
961     // clang-format off
962     CBSTATUS_NONE                   = 0x00000000,   // No status is set
963     CBSTATUS_LINE_WIDTH_SET         = 0x00000001,   // Line width has been set
964     CBSTATUS_DEPTH_BIAS_SET         = 0x00000002,   // Depth bias has been set
965     CBSTATUS_BLEND_CONSTANTS_SET    = 0x00000004,   // Blend constants state has been set
966     CBSTATUS_DEPTH_BOUNDS_SET       = 0x00000008,   // Depth bounds state object has been set
967     CBSTATUS_STENCIL_READ_MASK_SET  = 0x00000010,   // Stencil read mask has been set
968     CBSTATUS_STENCIL_WRITE_MASK_SET = 0x00000020,   // Stencil write mask has been set
969     CBSTATUS_STENCIL_REFERENCE_SET  = 0x00000040,   // Stencil reference has been set
970     CBSTATUS_VIEWPORT_SET           = 0x00000080,
971     CBSTATUS_SCISSOR_SET            = 0x00000100,
972     CBSTATUS_INDEX_BUFFER_BOUND     = 0x00000200,   // Index buffer has been set
973     CBSTATUS_EXCLUSIVE_SCISSOR_SET  = 0x00000400,
974     CBSTATUS_SHADING_RATE_PALETTE_SET = 0x00000800,
975     CBSTATUS_LINE_STIPPLE_SET       = 0x00001000,
976     CBSTATUS_ALL_STATE_SET          = 0x00001DFF,   // All state set (intentionally exclude index buffer)
977     // clang-format on
978 };
979 
980 struct QueryObject {
981     VkQueryPool pool;
982     uint32_t query;
983     // These next two fields are *not* used in hash or comparison, they are effectively a data payload
984     uint32_t index;  // must be zero if !indexed
985     bool indexed;
986     QueryObject(VkQueryPool pool_, uint32_t query_) : pool(pool_), query(query_), index(0), indexed(false) {}
987     QueryObject(VkQueryPool pool_, uint32_t query_, uint32_t index_) : pool(pool_), query(query_), index(index_), indexed(true) {}
988     bool operator<(const QueryObject &rhs) const { return (pool == rhs.pool) ? query < rhs.query : pool < rhs.pool; }
989 };
990 
991 enum QueryState {
992     QUERYSTATE_UNKNOWN,    // Initial state.
993     QUERYSTATE_RESET,      // After resetting.
994     QUERYSTATE_RUNNING,    // Query running.
995     QUERYSTATE_ENDED,      // Query ended but results may not be available.
996     QUERYSTATE_AVAILABLE,  // Results available.
997 };
998 
999 enum QueryResultType {
1000     QUERYRESULT_UNKNOWN,
1001     QUERYRESULT_NO_DATA,
1002     QUERYRESULT_MAYBE_NO_DATA,
1003     QUERYRESULT_SOME_DATA,
1004     QUERYRESULT_WAIT_ON_RESET,
1005     QUERYRESULT_WAIT_ON_RUNNING,
1006 };
1007 
1008 inline const char *string_QueryResultType(QueryResultType result_type) {
1009     switch (result_type) {
1010         case QUERYRESULT_UNKNOWN:
1011             return "query may be in an unknown state";
1012         case QUERYRESULT_NO_DATA:
1013         case QUERYRESULT_MAYBE_NO_DATA:
1014             return "query may return no data";
1015         case QUERYRESULT_SOME_DATA:
1016             return "query will return some data or availability bit";
1017         case QUERYRESULT_WAIT_ON_RESET:
1018             return "waiting on a query that has been reset and not issued yet";
1019         case QUERYRESULT_WAIT_ON_RUNNING:
1020             return "waiting on a query that has not ended yet";
1021     }
1022     assert(false);
1023     return "UNKNOWN QUERY STATE";  // Unreachable.
1024 }
1025 
1026 inline bool operator==(const QueryObject &query1, const QueryObject &query2) {
1027     return ((query1.pool == query2.pool) && (query1.query == query2.query));
1028 }
1029 
1030 namespace std {
1031 template <>
1032 struct hash<QueryObject> {
1033     size_t operator()(QueryObject query) const throw() {
1034         return hash<uint64_t>()((uint64_t)(query.pool)) ^ hash<uint32_t>()(query.query);
1035     }
1036 };
1037 }  // namespace std
1038 
1039 struct CBVertexBufferBindingInfo {
1040     std::vector<BufferBinding> vertex_buffer_bindings;
1041 };
1042 
1043 struct ImageSubresourcePair {
1044     VkImage image;
1045     bool hasSubresource;
1046     VkImageSubresource subresource;
1047 };
1048 
1049 inline bool operator==(const ImageSubresourcePair &img1, const ImageSubresourcePair &img2) {
1050     if (img1.image != img2.image || img1.hasSubresource != img2.hasSubresource) return false;
1051     return !img1.hasSubresource ||
1052            (img1.subresource.aspectMask == img2.subresource.aspectMask && img1.subresource.mipLevel == img2.subresource.mipLevel &&
1053             img1.subresource.arrayLayer == img2.subresource.arrayLayer);
1054 }
1055 
1056 namespace std {
1057 template <>
1058 struct hash<ImageSubresourcePair> {
1059     size_t operator()(ImageSubresourcePair img) const throw() {
1060         size_t hashVal = hash<uint64_t>()(reinterpret_cast<uint64_t &>(img.image));
1061         hashVal ^= hash<bool>()(img.hasSubresource);
1062         if (img.hasSubresource) {
1063             hashVal ^= hash<uint32_t>()(reinterpret_cast<uint32_t &>(img.subresource.aspectMask));
1064             hashVal ^= hash<uint32_t>()(img.subresource.mipLevel);
1065             hashVal ^= hash<uint32_t>()(img.subresource.arrayLayer);
1066         }
1067         return hashVal;
1068     }
1069 };
1070 }  // namespace std
1071 
1072 // Canonical dictionary for PushConstantRanges
1073 using PushConstantRangesDict = hash_util::Dictionary<PushConstantRanges>;
1074 using PushConstantRangesId = PushConstantRangesDict::Id;
1075 
1076 // Canonical dictionary for the pipeline layout's layout of descriptorsetlayouts
1077 using DescriptorSetLayoutDef = cvdescriptorset::DescriptorSetLayoutDef;
1078 using DescriptorSetLayoutId = std::shared_ptr<const DescriptorSetLayoutDef>;
1079 using PipelineLayoutSetLayoutsDef = std::vector<DescriptorSetLayoutId>;
1080 using PipelineLayoutSetLayoutsDict =
1081     hash_util::Dictionary<PipelineLayoutSetLayoutsDef, hash_util::IsOrderedContainer<PipelineLayoutSetLayoutsDef>>;
1082 using PipelineLayoutSetLayoutsId = PipelineLayoutSetLayoutsDict::Id;
1083 
1084 // Defines/stores a compatibility defintion for set N
1085 // The "layout layout" must store at least set+1 entries, but only the first set+1 are considered for hash and equality testing
1086 // Note: the "cannonical" data are referenced by Id, not including handle or device specific state
1087 // Note: hash and equality only consider layout_id entries [0, set] for determining uniqueness
1088 struct PipelineLayoutCompatDef {
1089     uint32_t set;
1090     PushConstantRangesId push_constant_ranges;
1091     PipelineLayoutSetLayoutsId set_layouts_id;
1092     PipelineLayoutCompatDef(const uint32_t set_index, const PushConstantRangesId pcr_id, const PipelineLayoutSetLayoutsId sl_id)
1093         : set(set_index), push_constant_ranges(pcr_id), set_layouts_id(sl_id) {}
1094     size_t hash() const;
1095     bool operator==(const PipelineLayoutCompatDef &other) const;
1096 };
1097 
1098 // Canonical dictionary for PipelineLayoutCompat records
1099 using PipelineLayoutCompatDict = hash_util::Dictionary<PipelineLayoutCompatDef, hash_util::HasHashMember<PipelineLayoutCompatDef>>;
1100 using PipelineLayoutCompatId = PipelineLayoutCompatDict::Id;
1101 
1102 // Store layouts and pushconstants for PipelineLayout
1103 struct PIPELINE_LAYOUT_STATE {
1104     VkPipelineLayout layout;
1105     std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts;
1106     PushConstantRangesId push_constant_ranges;
1107     std::vector<PipelineLayoutCompatId> compat_for_set;
1108 
1109     PIPELINE_LAYOUT_STATE() : layout(VK_NULL_HANDLE), set_layouts{}, push_constant_ranges{}, compat_for_set{} {}
1110 
1111     void reset() {
1112         layout = VK_NULL_HANDLE;
1113         set_layouts.clear();
1114         push_constant_ranges.reset();
1115         compat_for_set.clear();
1116     }
1117 };
1118 // Shader typedefs needed to store StageStage below
1119 struct interface_var {
1120     uint32_t id;
1121     uint32_t type_id;
1122     uint32_t offset;
1123     bool is_patch;
1124     bool is_block_member;
1125     bool is_relaxed_precision;
1126     // TODO: collect the name, too? Isn't required to be present.
1127 };
1128 typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1129 
1130 class PIPELINE_STATE : public BASE_NODE {
1131    public:
1132     struct StageState {
1133         std::unordered_set<uint32_t> accessible_ids;
1134         std::vector<std::pair<descriptor_slot_t, interface_var>> descriptor_uses;
1135         bool has_writable_descriptor;
1136     };
1137 
1138     VkPipeline pipeline;
1139     safe_VkGraphicsPipelineCreateInfo graphicsPipelineCI;
1140     safe_VkComputePipelineCreateInfo computePipelineCI;
1141     safe_VkRayTracingPipelineCreateInfoNV raytracingPipelineCI;
1142     // Hold shared ptr to RP in case RP itself is destroyed
1143     std::shared_ptr<RENDER_PASS_STATE> rp_state;
1144     // Flag of which shader stages are active for this pipeline
1145     uint32_t active_shaders;
1146     uint32_t duplicate_shaders;
1147     // Capture which slots (set#->bindings) are actually used by the shaders of this pipeline
1148     std::unordered_map<uint32_t, BindingReqMap> active_slots;
1149     // Additional metadata needed by pipeline_state initialization and validation
1150     std::vector<StageState> stage_state;
1151     // Vtx input info (if any)
1152     std::vector<VkVertexInputBindingDescription> vertex_binding_descriptions_;
1153     std::vector<VkVertexInputAttributeDescription> vertex_attribute_descriptions_;
1154     std::unordered_map<uint32_t, uint32_t> vertex_binding_to_index_map_;
1155     std::vector<VkPipelineColorBlendAttachmentState> attachments;
1156     bool blendConstantsEnabled;  // Blend constants enabled for any attachments
1157     PIPELINE_LAYOUT_STATE pipeline_layout;
1158     VkPrimitiveTopology topology_at_rasterizer;
1159 
1160     // Default constructor
1161     PIPELINE_STATE()
1162         : pipeline{},
1163           graphicsPipelineCI{},
1164           computePipelineCI{},
1165           raytracingPipelineCI{},
1166           rp_state(nullptr),
1167           active_shaders(0),
1168           duplicate_shaders(0),
1169           active_slots(),
1170           vertex_binding_descriptions_(),
1171           vertex_attribute_descriptions_(),
1172           vertex_binding_to_index_map_(),
1173           attachments(),
1174           blendConstantsEnabled(false),
1175           pipeline_layout(),
1176           topology_at_rasterizer{} {}
1177 
1178     void reset() {
1179         VkGraphicsPipelineCreateInfo emptyGraphicsCI = {};
1180         graphicsPipelineCI.initialize(&emptyGraphicsCI, false, false);
1181         VkComputePipelineCreateInfo emptyComputeCI = {};
1182         computePipelineCI.initialize(&emptyComputeCI);
1183         VkRayTracingPipelineCreateInfoNV emptyRayTracingCI = {};
1184         raytracingPipelineCI.initialize(&emptyRayTracingCI);
1185         stage_state.clear();
1186     }
1187 
1188     void initGraphicsPipeline(ValidationStateTracker *state_data, const VkGraphicsPipelineCreateInfo *pCreateInfo,
1189                               std::shared_ptr<RENDER_PASS_STATE> &&rpstate);
1190     void initComputePipeline(ValidationStateTracker *state_data, const VkComputePipelineCreateInfo *pCreateInfo);
1191     void initRayTracingPipelineNV(ValidationStateTracker *state_data, const VkRayTracingPipelineCreateInfoNV *pCreateInfo);
1192 
1193     inline VkPipelineBindPoint getPipelineType() const {
1194         if (graphicsPipelineCI.sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO)
1195             return VK_PIPELINE_BIND_POINT_GRAPHICS;
1196         else if (computePipelineCI.sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO)
1197             return VK_PIPELINE_BIND_POINT_COMPUTE;
1198         else if (raytracingPipelineCI.sType == VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV)
1199             return VK_PIPELINE_BIND_POINT_RAY_TRACING_NV;
1200         else
1201             return VK_PIPELINE_BIND_POINT_MAX_ENUM;
1202     }
1203 
1204     inline VkPipelineCreateFlags getPipelineCreateFlags() const {
1205         if (graphicsPipelineCI.sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO)
1206             return graphicsPipelineCI.flags;
1207         else if (computePipelineCI.sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO)
1208             return computePipelineCI.flags;
1209         else if (raytracingPipelineCI.sType == VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV)
1210             return raytracingPipelineCI.flags;
1211         else
1212             return 0;
1213     }
1214 };
1215 
1216 // Track last states that are bound per pipeline bind point (Gfx & Compute)
1217 struct LAST_BOUND_STATE {
1218     LAST_BOUND_STATE() { reset(); }  // must define default constructor for portability reasons
1219     PIPELINE_STATE *pipeline_state;
1220     VkPipelineLayout pipeline_layout;
1221     std::unique_ptr<cvdescriptorset::DescriptorSet> push_descriptor_set;
1222 
1223     // Ordered bound set tracking where index is set# that given set is bound to
1224     struct PER_SET {
1225         PER_SET()
1226             : bound_descriptor_set(nullptr),
1227               compat_id_for_set(0),
1228               validated_set(nullptr),
1229               validated_set_change_count(~0ULL),
1230               validated_set_image_layout_change_count(~0ULL),
1231               validated_set_binding_req_map() {}
1232 
1233         cvdescriptorset::DescriptorSet *bound_descriptor_set;
1234         // one dynamic offset per dynamic descriptor bound to this CB
1235         std::vector<uint32_t> dynamicOffsets;
1236         PipelineLayoutCompatId compat_id_for_set;
1237 
1238         // Cache most recently validated descriptor state for ValidateCmdBufDrawState/UpdateDrawState
1239         const cvdescriptorset::DescriptorSet *validated_set;
1240         uint64_t validated_set_change_count;
1241         uint64_t validated_set_image_layout_change_count;
1242         BindingReqMap validated_set_binding_req_map;
1243     };
1244 
1245     std::vector<PER_SET> per_set;
1246 
1247     void reset() {
1248         pipeline_state = nullptr;
1249         pipeline_layout = VK_NULL_HANDLE;
1250         push_descriptor_set = nullptr;
1251         per_set.clear();
1252     }
1253 
1254     void UnbindAndResetPushDescriptorSet(cvdescriptorset::DescriptorSet *ds) {
1255         if (push_descriptor_set) {
1256             for (std::size_t i = 0; i < per_set.size(); i++) {
1257                 if (per_set[i].bound_descriptor_set == push_descriptor_set.get()) {
1258                     per_set[i].bound_descriptor_set = nullptr;
1259                 }
1260             }
1261         }
1262         push_descriptor_set.reset(ds);
1263     }
1264 };
1265 
1266 static inline bool CompatForSet(uint32_t set, const LAST_BOUND_STATE &a, const std::vector<PipelineLayoutCompatId> &b) {
1267     bool result = (set < a.per_set.size()) && (set < b.size()) && (a.per_set[set].compat_id_for_set == b[set]);
1268     return result;
1269 }
1270 
1271 static inline bool CompatForSet(uint32_t set, const PIPELINE_LAYOUT_STATE *a, const PIPELINE_LAYOUT_STATE *b) {
1272     // Intentionally have a result variable to simplify debugging
1273     bool result = a && b && (set < a->compat_for_set.size()) && (set < b->compat_for_set.size()) &&
1274                   (a->compat_for_set[set] == b->compat_for_set[set]);
1275     return result;
1276 }
1277 
1278 // Types to store queue family ownership (QFO) Transfers
1279 
1280 // Common to image and buffer memory barriers
1281 template <typename Handle, typename Barrier>
1282 struct QFOTransferBarrierBase {
1283     using HandleType = Handle;
1284     using BarrierType = Barrier;
1285     struct Tag {};
1286     HandleType handle = VK_NULL_HANDLE;
1287     uint32_t srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1288     uint32_t dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
1289 
1290     QFOTransferBarrierBase() = default;
1291     QFOTransferBarrierBase(const BarrierType &barrier, const HandleType &resource_handle)
1292         : handle(resource_handle),
1293           srcQueueFamilyIndex(barrier.srcQueueFamilyIndex),
1294           dstQueueFamilyIndex(barrier.dstQueueFamilyIndex) {}
1295 
1296     hash_util::HashCombiner base_hash_combiner() const {
1297         hash_util::HashCombiner hc;
1298         hc << srcQueueFamilyIndex << dstQueueFamilyIndex << handle;
1299         return hc;
1300     }
1301 
1302     bool operator==(const QFOTransferBarrierBase &rhs) const {
1303         return (srcQueueFamilyIndex == rhs.srcQueueFamilyIndex) && (dstQueueFamilyIndex == rhs.dstQueueFamilyIndex) &&
1304                (handle == rhs.handle);
1305     }
1306 };
1307 
1308 template <typename Barrier>
1309 struct QFOTransferBarrier {};
1310 
1311 // Image barrier specific implementation
1312 template <>
1313 struct QFOTransferBarrier<VkImageMemoryBarrier> : public QFOTransferBarrierBase<VkImage, VkImageMemoryBarrier> {
1314     using BaseType = QFOTransferBarrierBase<VkImage, VkImageMemoryBarrier>;
1315     VkImageLayout oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
1316     VkImageLayout newLayout = VK_IMAGE_LAYOUT_UNDEFINED;
1317     VkImageSubresourceRange subresourceRange;
1318 
1319     QFOTransferBarrier() = default;
1320     QFOTransferBarrier(const BarrierType &barrier)
1321         : BaseType(barrier, barrier.image),
1322           oldLayout(barrier.oldLayout),
1323           newLayout(barrier.newLayout),
1324           subresourceRange(barrier.subresourceRange) {}
1325     size_t hash() const {
1326         // Ignoring the layout information for the purpose of the hash, as we're interested in QFO release/acquisition w.r.t.
1327         // the subresource affected, an layout transitions are current validated on another path
1328         auto hc = base_hash_combiner() << subresourceRange;
1329         return hc.Value();
1330     }
1331     bool operator==(const QFOTransferBarrier<BarrierType> &rhs) const {
1332         // Ignoring layout w.r.t. equality. See comment in hash above.
1333         return (static_cast<BaseType>(*this) == static_cast<BaseType>(rhs)) && (subresourceRange == rhs.subresourceRange);
1334     }
1335     // TODO: codegen a comprehensive complie time type -> string (and or other traits) template family
1336     static const char *BarrierName() { return "VkImageMemoryBarrier"; }
1337     static const char *HandleName() { return "VkImage"; }
1338     // UNASSIGNED-VkImageMemoryBarrier-image-00001 QFO transfer image barrier must not duplicate QFO recorded in command buffer
1339     static const char *ErrMsgDuplicateQFOInCB() { return "UNASSIGNED-VkImageMemoryBarrier-image-00001"; }
1340     // UNASSIGNED-VkImageMemoryBarrier-image-00002 QFO transfer image barrier must not duplicate QFO submitted in batch
1341     static const char *ErrMsgDuplicateQFOInSubmit() { return "UNASSIGNED-VkImageMemoryBarrier-image-00002"; }
1342     // UNASSIGNED-VkImageMemoryBarrier-image-00003 QFO transfer image barrier must not duplicate QFO submitted previously
1343     static const char *ErrMsgDuplicateQFOSubmitted() { return "UNASSIGNED-VkImageMemoryBarrier-image-00003"; }
1344     // UNASSIGNED-VkImageMemoryBarrier-image-00004 QFO acquire image barrier must have matching QFO release submitted previously
1345     static const char *ErrMsgMissingQFOReleaseInSubmit() { return "UNASSIGNED-VkImageMemoryBarrier-image-00004"; }
1346 };
1347 
1348 // Buffer barrier specific implementation
1349 template <>
1350 struct QFOTransferBarrier<VkBufferMemoryBarrier> : public QFOTransferBarrierBase<VkBuffer, VkBufferMemoryBarrier> {
1351     using BaseType = QFOTransferBarrierBase<VkBuffer, VkBufferMemoryBarrier>;
1352     VkDeviceSize offset = 0;
1353     VkDeviceSize size = 0;
1354     QFOTransferBarrier(const VkBufferMemoryBarrier &barrier)
1355         : BaseType(barrier, barrier.buffer), offset(barrier.offset), size(barrier.size) {}
1356     size_t hash() const {
1357         auto hc = base_hash_combiner() << offset << size;
1358         return hc.Value();
1359     }
1360     bool operator==(const QFOTransferBarrier<BarrierType> &rhs) const {
1361         return (static_cast<BaseType>(*this) == static_cast<BaseType>(rhs)) && (offset == rhs.offset) && (size == rhs.size);
1362     }
1363     static const char *BarrierName() { return "VkBufferMemoryBarrier"; }
1364     static const char *HandleName() { return "VkBuffer"; }
1365     // UNASSIGNED-VkImageMemoryBarrier-buffer-00001 QFO transfer buffer barrier must not duplicate QFO recorded in command buffer
1366     static const char *ErrMsgDuplicateQFOInCB() { return "UNASSIGNED-VkBufferMemoryBarrier-buffer-00001"; }
1367     // UNASSIGNED-VkBufferMemoryBarrier-buffer-00002 QFO transfer buffer barrier must not duplicate QFO submitted in batch
1368     static const char *ErrMsgDuplicateQFOInSubmit() { return "UNASSIGNED-VkBufferMemoryBarrier-buffer-00002"; }
1369     // UNASSIGNED-VkBufferMemoryBarrier-buffer-00003 QFO transfer buffer barrier must not duplicate QFO submitted previously
1370     static const char *ErrMsgDuplicateQFOSubmitted() { return "UNASSIGNED-VkBufferMemoryBarrier-buffer-00003"; }
1371     // UNASSIGNED-VkBufferMemoryBarrier-buffer-00004 QFO acquire buffer barrier must have matching QFO release submitted previously
1372     static const char *ErrMsgMissingQFOReleaseInSubmit() { return "UNASSIGNED-VkBufferMemoryBarrier-buffer-00004"; }
1373 };
1374 
1375 template <typename Barrier>
1376 using QFOTransferBarrierHash = hash_util::HasHashMember<QFOTransferBarrier<Barrier>>;
1377 
1378 // Command buffers store the set of barriers recorded
1379 template <typename Barrier>
1380 using QFOTransferBarrierSet = std::unordered_set<QFOTransferBarrier<Barrier>, QFOTransferBarrierHash<Barrier>>;
1381 template <typename Barrier>
1382 struct QFOTransferBarrierSets {
1383     QFOTransferBarrierSet<Barrier> release;
1384     QFOTransferBarrierSet<Barrier> acquire;
1385     void Reset() {
1386         acquire.clear();
1387         release.clear();
1388     }
1389 };
1390 
1391 // The layer_data stores the map of pending release barriers
1392 template <typename Barrier>
1393 using GlobalQFOTransferBarrierMap =
1394     std::unordered_map<typename QFOTransferBarrier<Barrier>::HandleType, QFOTransferBarrierSet<Barrier>>;
1395 
1396 // Submit queue uses the Scoreboard to track all release/acquire operations in a batch.
1397 template <typename Barrier>
1398 using QFOTransferCBScoreboard =
1399     std::unordered_map<QFOTransferBarrier<Barrier>, const CMD_BUFFER_STATE *, QFOTransferBarrierHash<Barrier>>;
1400 template <typename Barrier>
1401 struct QFOTransferCBScoreboards {
1402     QFOTransferCBScoreboard<Barrier> acquire;
1403     QFOTransferCBScoreboard<Barrier> release;
1404 };
1405 
1406 // Cmd Buffer Wrapper Struct - TODO : This desperately needs its own class
1407 struct CMD_BUFFER_STATE : public BASE_NODE {
1408     VkCommandBuffer commandBuffer;
1409     VkCommandBufferAllocateInfo createInfo = {};
1410     VkCommandBufferBeginInfo beginInfo;
1411     VkCommandBufferInheritanceInfo inheritanceInfo;
1412     VkDevice device;  // device this CB belongs to
1413     bool hasDrawCmd;
1414     bool hasTraceRaysCmd;
1415     bool hasDispatchCmd;
1416     CB_STATE state;        // Track cmd buffer update state
1417     uint64_t submitCount;  // Number of times CB has been submitted
1418     typedef uint64_t ImageLayoutUpdateCount;
1419     ImageLayoutUpdateCount image_layout_change_count;  // The sequence number for changes to image layout (for cached validation)
1420     CBStatusFlags status;                              // Track status of various bindings on cmd buffer
1421     CBStatusFlags static_status;                       // All state bits provided by current graphics pipeline
1422                                                        // rather than dynamic state
1423     // Currently storing "lastBound" objects on per-CB basis
1424     //  long-term may want to create caches of "lastBound" states and could have
1425     //  each individual CMD_NODE referencing its own "lastBound" state
1426     // Store last bound state for Gfx & Compute pipeline bind points
1427     std::map<uint32_t, LAST_BOUND_STATE> lastBound;
1428 
1429     uint32_t viewportMask;
1430     uint32_t scissorMask;
1431     uint32_t initial_device_mask;
1432 
1433     VkRenderPassBeginInfo activeRenderPassBeginInfo;
1434     RENDER_PASS_STATE *activeRenderPass;
1435     VkSubpassContents activeSubpassContents;
1436     uint32_t active_render_pass_device_mask;
1437     uint32_t activeSubpass;
1438     VkFramebuffer activeFramebuffer;
1439     std::unordered_set<VkFramebuffer> framebuffers;
1440     // Unified data structs to track objects bound to this command buffer as well as object
1441     //  dependencies that have been broken : either destroyed objects, or updated descriptor sets
1442     std::unordered_set<VulkanTypedHandle> object_bindings;
1443     std::vector<VulkanTypedHandle> broken_bindings;
1444 
1445     QFOTransferBarrierSets<VkBufferMemoryBarrier> qfo_transfer_buffer_barriers;
1446     QFOTransferBarrierSets<VkImageMemoryBarrier> qfo_transfer_image_barriers;
1447 
1448     std::unordered_set<VkEvent> waitedEvents;
1449     std::vector<VkEvent> writeEventsBeforeWait;
1450     std::vector<VkEvent> events;
1451     std::map<QueryObject, QueryState> queryToStateMap;
1452     std::unordered_set<QueryObject> activeQueries;
1453     std::unordered_set<QueryObject> startedQueries;
1454     typedef std::unordered_map<VkImage, std::unique_ptr<ImageSubresourceLayoutMap>> ImageLayoutMap;
1455     ImageLayoutMap image_layout_map;
1456     std::unordered_map<VkEvent, VkPipelineStageFlags> eventToStageMap;
1457     std::vector<CBVertexBufferBindingInfo> cb_vertex_buffer_binding_info;
1458     CBVertexBufferBindingInfo current_vertex_buffer_binding_info;
1459     bool vertex_buffer_used;  // Track for perf warning to make sure any bound vtx buffer used
1460     VkCommandBuffer primaryCommandBuffer;
1461     // If primary, the secondary command buffers we will call.
1462     // If secondary, the primary command buffers we will be called by.
1463     std::unordered_set<CMD_BUFFER_STATE *> linkedCommandBuffers;
1464     // Validation functions run at primary CB queue submit time
1465     std::vector<std::function<bool()>> queue_submit_functions;
1466     // Validation functions run when secondary CB is executed in primary
1467     std::vector<std::function<bool(const CMD_BUFFER_STATE *, VkFramebuffer)>> cmd_execute_commands_functions;
1468     std::unordered_set<VkDeviceMemory> memObjs;
1469     std::vector<std::function<bool(VkQueue)>> eventUpdates;
1470     std::vector<std::function<bool(VkQueue)>> queryUpdates;
1471     std::unordered_set<cvdescriptorset::DescriptorSet *> validated_descriptor_sets;
1472     // Contents valid only after an index buffer is bound (CBSTATUS_INDEX_BUFFER_BOUND set)
1473     IndexBufferBinding index_buffer_binding;
1474 
1475     // Cache of current insert label...
1476     LoggingLabel debug_label;
1477 };
1478 
1479 static inline const QFOTransferBarrierSets<VkImageMemoryBarrier> &GetQFOBarrierSets(
1480     const CMD_BUFFER_STATE *cb, const QFOTransferBarrier<VkImageMemoryBarrier>::Tag &type_tag) {
1481     return cb->qfo_transfer_image_barriers;
1482 }
1483 static inline const QFOTransferBarrierSets<VkBufferMemoryBarrier> &GetQFOBarrierSets(
1484     const CMD_BUFFER_STATE *cb, const QFOTransferBarrier<VkBufferMemoryBarrier>::Tag &type_tag) {
1485     return cb->qfo_transfer_buffer_barriers;
1486 }
1487 static inline QFOTransferBarrierSets<VkImageMemoryBarrier> &GetQFOBarrierSets(
1488     CMD_BUFFER_STATE *cb, const QFOTransferBarrier<VkImageMemoryBarrier>::Tag &type_tag) {
1489     return cb->qfo_transfer_image_barriers;
1490 }
1491 static inline QFOTransferBarrierSets<VkBufferMemoryBarrier> &GetQFOBarrierSets(
1492     CMD_BUFFER_STATE *cb, const QFOTransferBarrier<VkBufferMemoryBarrier>::Tag &type_tag) {
1493     return cb->qfo_transfer_buffer_barriers;
1494 }
1495 
1496 struct SEMAPHORE_WAIT {
1497     VkSemaphore semaphore;
1498     VkQueue queue;
1499     uint64_t seq;
1500 };
1501 
1502 struct CB_SUBMISSION {
1503     CB_SUBMISSION(std::vector<VkCommandBuffer> const &cbs, std::vector<SEMAPHORE_WAIT> const &waitSemaphores,
1504                   std::vector<VkSemaphore> const &signalSemaphores, std::vector<VkSemaphore> const &externalSemaphores,
1505                   VkFence fence)
1506         : cbs(cbs),
1507           waitSemaphores(waitSemaphores),
1508           signalSemaphores(signalSemaphores),
1509           externalSemaphores(externalSemaphores),
1510           fence(fence) {}
1511 
1512     std::vector<VkCommandBuffer> cbs;
1513     std::vector<SEMAPHORE_WAIT> waitSemaphores;
1514     std::vector<VkSemaphore> signalSemaphores;
1515     std::vector<VkSemaphore> externalSemaphores;
1516     VkFence fence;
1517 };
1518 
1519 struct IMAGE_LAYOUT_STATE {
1520     VkImageLayout layout;
1521     VkFormat format;
1522 };
1523 
1524 struct MT_FB_ATTACHMENT_INFO {
1525     IMAGE_VIEW_STATE *view_state;
1526     VkImage image;
1527 };
1528 
1529 class FRAMEBUFFER_STATE : public BASE_NODE {
1530    public:
1531     VkFramebuffer framebuffer;
1532     safe_VkFramebufferCreateInfo createInfo;
1533     std::shared_ptr<RENDER_PASS_STATE> rp_state;
1534     FRAMEBUFFER_STATE(VkFramebuffer fb, const VkFramebufferCreateInfo *pCreateInfo, std::shared_ptr<RENDER_PASS_STATE> &&rpstate)
1535         : framebuffer(fb), createInfo(pCreateInfo), rp_state(rpstate){};
1536 };
1537 
1538 struct SHADER_MODULE_STATE;
1539 struct DeviceExtensions;
1540 
1541 struct DeviceFeatures {
1542     VkPhysicalDeviceFeatures core;
1543     VkPhysicalDeviceDescriptorIndexingFeaturesEXT descriptor_indexing;
1544     VkPhysicalDevice8BitStorageFeaturesKHR eight_bit_storage;
1545     VkPhysicalDeviceExclusiveScissorFeaturesNV exclusive_scissor;
1546     VkPhysicalDeviceShadingRateImageFeaturesNV shading_rate_image;
1547     VkPhysicalDeviceMeshShaderFeaturesNV mesh_shader;
1548     VkPhysicalDeviceInlineUniformBlockFeaturesEXT inline_uniform_block;
1549     VkPhysicalDeviceTransformFeedbackFeaturesEXT transform_feedback_features;
1550     VkPhysicalDeviceFloat16Int8FeaturesKHR float16_int8;
1551     VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT vtx_attrib_divisor_features;
1552     VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR uniform_buffer_standard_layout;
1553     VkPhysicalDeviceScalarBlockLayoutFeaturesEXT scalar_block_layout_features;
1554     VkPhysicalDeviceBufferAddressFeaturesEXT buffer_address;
1555     VkPhysicalDeviceCooperativeMatrixFeaturesNV cooperative_matrix_features;
1556     VkPhysicalDeviceFloatControlsPropertiesKHR float_controls;
1557     VkPhysicalDeviceHostQueryResetFeaturesEXT host_query_reset_features;
1558     VkPhysicalDeviceComputeShaderDerivativesFeaturesNV compute_shader_derivatives_features;
1559     VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV fragment_shader_barycentric_features;
1560     VkPhysicalDeviceShaderImageFootprintFeaturesNV shader_image_footprint_features;
1561     VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT fragment_shader_interlock_features;
1562     VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT demote_to_helper_invocation_features;
1563     VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT texel_buffer_alignment_features;
1564     VkPhysicalDeviceImagelessFramebufferFeaturesKHR imageless_framebuffer_features;
1565     VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR pipeline_exe_props_features;
1566 };
1567 
1568 enum RenderPassCreateVersion { RENDER_PASS_VERSION_1 = 0, RENDER_PASS_VERSION_2 = 1 };
1569 
1570 struct ShaderTracker {
1571     VkPipeline pipeline;
1572     VkShaderModule shader_module;
1573     std::vector<unsigned int> pgm;
1574 };
1575 
1576 enum BarrierOperationsType {
1577     kAllAcquire,  // All Barrier operations are "ownership acquire" operations
1578     kAllRelease,  // All Barrier operations are "ownership release" operations
1579     kGeneral,     // Either no ownership operations or a mix of ownership operation types and/or non-ownership operations
1580 };
1581 
1582 std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> const GetDescriptorSetLayout(const ValidationStateTracker *,
1583                                                                                          VkDescriptorSetLayout);
1584 
1585 ImageSubresourceLayoutMap *GetImageSubresourceLayoutMap(CMD_BUFFER_STATE *cb_state, const IMAGE_STATE &image_state);
1586 const ImageSubresourceLayoutMap *GetImageSubresourceLayoutMap(const CMD_BUFFER_STATE *cb_state, VkImage image);
1587 
1588 #endif  // CORE_VALIDATION_TYPES_H_
1589