• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright (c) 2015-2017 The Khronos Group Inc.
2  * Copyright (c) 2015-2017 Valve Corporation
3  * Copyright (c) 2015-2017 LunarG, Inc.
4  * Copyright (C) 2015-2017 Google Inc.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * Author: Cody Northrop <cnorthrop@google.com>
19  * Author: Michael Lentine <mlentine@google.com>
20  * Author: Tobin Ehlis <tobine@google.com>
21  * Author: Chia-I Wu <olv@google.com>
22  * Author: Chris Forbes <chrisf@ijw.co.nz>
23  * Author: Mark Lobodzinski <mark@lunarg.com>
24  * Author: Ian Elliott <ianelliott@google.com>
25  * Author: Dave Houlton <daveh@lunarg.com>
26  * Author: Dustin Graves <dustin@lunarg.com>
27  * Author: Jeremy Hayes <jeremy@lunarg.com>
28  * Author: Jon Ashburn <jon@lunarg.com>
29  * Author: Karl Schultz <karl@lunarg.com>
30  * Author: Mark Young <marky@lunarg.com>
31  * Author: Mike Schuchardt <mikes@lunarg.com>
32  * Author: Mike Weiblen <mikew@lunarg.com>
33  * Author: Tony Barbour <tony@LunarG.com>
34  */
35 
36 // Allow use of STL min and max functions in Windows
37 #define NOMINMAX
38 
39 #include <algorithm>
40 #include <array>
41 #include <assert.h>
42 #include <iostream>
43 #include <list>
44 #include <map>
45 #include <memory>
46 #include <mutex>
47 #include <set>
48 #include <sstream>
49 #include <stdio.h>
50 #include <stdlib.h>
51 #include <string.h>
52 #include <string>
53 #include <valarray>
54 #include <inttypes.h>
55 
56 #include "vk_loader_platform.h"
57 #include "vk_dispatch_table_helper.h"
58 #include "vk_enum_string_helper.h"
59 #if defined(__GNUC__)
60 #pragma GCC diagnostic ignored "-Wwrite-strings"
61 #endif
62 #if defined(__GNUC__)
63 #pragma GCC diagnostic warning "-Wwrite-strings"
64 #endif
65 #include "core_validation.h"
66 #include "buffer_validation.h"
67 #include "shader_validation.h"
68 #include "vk_layer_table.h"
69 #include "vk_layer_data.h"
70 #include "vk_layer_extension_utils.h"
71 #include "vk_layer_utils.h"
72 #include "vk_typemap_helper.h"
73 
74 #if defined __ANDROID__
75 #include <android/log.h>
76 #define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
77 #else
78 #define LOGCONSOLE(...)      \
79     {                        \
80         printf(__VA_ARGS__); \
81         printf("\n");        \
82     }
83 #endif
84 
85 // This intentionally includes a cpp file
86 #include "vk_safe_struct.cpp"
87 
88 using mutex_t = std::mutex;
89 using lock_guard_t = std::lock_guard<mutex_t>;
90 using unique_lock_t = std::unique_lock<mutex_t>;
91 
92 namespace core_validation {
93 
94 using std::max;
95 using std::string;
96 using std::stringstream;
97 using std::unique_ptr;
98 using std::unordered_map;
99 using std::unordered_set;
100 using std::vector;
101 
102 // WSI Image Objects bypass usual Image Object creation methods.  A special Memory
103 // Object value will be used to identify them internally.
104 static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
105 // 2nd special memory handle used to flag object as unbound from memory
106 static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
107 
108 struct instance_layer_data {
109     VkInstance instance = VK_NULL_HANDLE;
110     debug_report_data *report_data = nullptr;
111     std::vector<VkDebugReportCallbackEXT> logging_callback;
112     VkLayerInstanceDispatchTable dispatch_table;
113 
114     CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
115     uint32_t physical_devices_count = 0;
116     CALL_STATE vkEnumeratePhysicalDeviceGroupsState = UNCALLED;
117     uint32_t physical_device_groups_count = 0;
118     CHECK_DISABLED disabled = {};
119 
120     unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
121     unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
122 
123     InstanceExtensions extensions;
124 };
125 
126 struct layer_data {
127     debug_report_data *report_data = nullptr;
128     VkLayerDispatchTable dispatch_table;
129 
130     DeviceExtensions extensions = {};
131     unordered_set<VkQueue> queues;  // All queues under given device
132     // Layer specific data
133     unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
134     unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
135     unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
136     unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
137     unordered_map<VkBuffer, unique_ptr<BUFFER_STATE>> bufferMap;
138     unordered_map<VkPipeline, unique_ptr<PIPELINE_STATE>> pipelineMap;
139     unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
140     unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
141     unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
142     unordered_map<VkDescriptorSetLayout, std::shared_ptr<cvdescriptorset::DescriptorSetLayout>> descriptorSetLayoutMap;
143     unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
144     unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
145     unordered_map<VkFence, FENCE_NODE> fenceMap;
146     unordered_map<VkQueue, QUEUE_STATE> queueMap;
147     unordered_map<VkEvent, EVENT_STATE> eventMap;
148     unordered_map<QueryObject, bool> queryToStateMap;
149     unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
150     unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
151     unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
152     unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
153     unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
154     unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
155     unordered_map<VkRenderPass, std::shared_ptr<RENDER_PASS_STATE>> renderPassMap;
156     unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
157     unordered_map<VkDescriptorUpdateTemplateKHR, unique_ptr<TEMPLATE_STATE>> desc_template_map;
158     unordered_map<VkSwapchainKHR, std::unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
159 
160     VkDevice device = VK_NULL_HANDLE;
161     VkPhysicalDevice physical_device = VK_NULL_HANDLE;
162 
163     instance_layer_data *instance_data = nullptr;  // from device to enclosing instance
164 
165     VkPhysicalDeviceFeatures enabled_features = {};
166     // Device specific data
167     PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
168     VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
169     VkPhysicalDeviceProperties phys_dev_props = {};
170     // Device extension properties -- storing properties gathered from VkPhysicalDeviceProperties2KHR::pNext chain
171     struct DeviceExtensionProperties {
172         uint32_t max_push_descriptors;  // from VkPhysicalDevicePushDescriptorPropertiesKHR::maxPushDescriptors
173     };
174     DeviceExtensionProperties phys_dev_ext_props = {};
175     bool external_sync_warning = false;
176 };
177 
178 // TODO : Do we need to guard access to layer_data_map w/ lock?
179 static unordered_map<void *, layer_data *> layer_data_map;
180 static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
181 
182 static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
183 
184 static const VkLayerProperties global_layer = {
185     "VK_LAYER_LUNARG_core_validation",
186     VK_LAYER_API_VERSION,
187     1,
188     "LunarG Validation Layer",
189 };
190 
191 static const VkExtensionProperties device_extensions[] = {
192     {VK_EXT_VALIDATION_CACHE_EXTENSION_NAME, VK_EXT_VALIDATION_CACHE_SPEC_VERSION},
193 };
194 
195 template <class TCreateInfo>
ValidateLayerOrdering(const TCreateInfo & createInfo)196 void ValidateLayerOrdering(const TCreateInfo &createInfo) {
197     bool foundLayer = false;
198     for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
199         if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
200             foundLayer = true;
201         }
202         // This has to be logged to console as we don't have a callback at this point.
203         if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
204             LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.", global_layer.layerName);
205         }
206     }
207 }
208 
209 // TODO : This can be much smarter, using separate locks for separate global data
210 static mutex_t global_lock;
211 
212 // Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
GetImageViewState(const layer_data * dev_data,VkImageView image_view)213 IMAGE_VIEW_STATE *GetImageViewState(const layer_data *dev_data, VkImageView image_view) {
214     auto iv_it = dev_data->imageViewMap.find(image_view);
215     if (iv_it == dev_data->imageViewMap.end()) {
216         return nullptr;
217     }
218     return iv_it->second.get();
219 }
220 // Return sampler node ptr for specified sampler or else NULL
GetSamplerState(const layer_data * dev_data,VkSampler sampler)221 SAMPLER_STATE *GetSamplerState(const layer_data *dev_data, VkSampler sampler) {
222     auto sampler_it = dev_data->samplerMap.find(sampler);
223     if (sampler_it == dev_data->samplerMap.end()) {
224         return nullptr;
225     }
226     return sampler_it->second.get();
227 }
228 // Return image state ptr for specified image or else NULL
GetImageState(const layer_data * dev_data,VkImage image)229 IMAGE_STATE *GetImageState(const layer_data *dev_data, VkImage image) {
230     auto img_it = dev_data->imageMap.find(image);
231     if (img_it == dev_data->imageMap.end()) {
232         return nullptr;
233     }
234     return img_it->second.get();
235 }
236 // Return buffer state ptr for specified buffer or else NULL
GetBufferState(const layer_data * dev_data,VkBuffer buffer)237 BUFFER_STATE *GetBufferState(const layer_data *dev_data, VkBuffer buffer) {
238     auto buff_it = dev_data->bufferMap.find(buffer);
239     if (buff_it == dev_data->bufferMap.end()) {
240         return nullptr;
241     }
242     return buff_it->second.get();
243 }
244 // Return swapchain node for specified swapchain or else NULL
GetSwapchainNode(const layer_data * dev_data,VkSwapchainKHR swapchain)245 SWAPCHAIN_NODE *GetSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
246     auto swp_it = dev_data->swapchainMap.find(swapchain);
247     if (swp_it == dev_data->swapchainMap.end()) {
248         return nullptr;
249     }
250     return swp_it->second.get();
251 }
252 // Return buffer node ptr for specified buffer or else NULL
GetBufferViewState(const layer_data * dev_data,VkBufferView buffer_view)253 BUFFER_VIEW_STATE *GetBufferViewState(const layer_data *dev_data, VkBufferView buffer_view) {
254     auto bv_it = dev_data->bufferViewMap.find(buffer_view);
255     if (bv_it == dev_data->bufferViewMap.end()) {
256         return nullptr;
257     }
258     return bv_it->second.get();
259 }
260 
GetFenceNode(layer_data * dev_data,VkFence fence)261 FENCE_NODE *GetFenceNode(layer_data *dev_data, VkFence fence) {
262     auto it = dev_data->fenceMap.find(fence);
263     if (it == dev_data->fenceMap.end()) {
264         return nullptr;
265     }
266     return &it->second;
267 }
268 
GetEventNode(layer_data * dev_data,VkEvent event)269 EVENT_STATE *GetEventNode(layer_data *dev_data, VkEvent event) {
270     auto it = dev_data->eventMap.find(event);
271     if (it == dev_data->eventMap.end()) {
272         return nullptr;
273     }
274     return &it->second;
275 }
276 
GetQueryPoolNode(layer_data * dev_data,VkQueryPool query_pool)277 QUERY_POOL_NODE *GetQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
278     auto it = dev_data->queryPoolMap.find(query_pool);
279     if (it == dev_data->queryPoolMap.end()) {
280         return nullptr;
281     }
282     return &it->second;
283 }
284 
GetQueueState(layer_data * dev_data,VkQueue queue)285 QUEUE_STATE *GetQueueState(layer_data *dev_data, VkQueue queue) {
286     auto it = dev_data->queueMap.find(queue);
287     if (it == dev_data->queueMap.end()) {
288         return nullptr;
289     }
290     return &it->second;
291 }
292 
GetSemaphoreNode(layer_data * dev_data,VkSemaphore semaphore)293 SEMAPHORE_NODE *GetSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
294     auto it = dev_data->semaphoreMap.find(semaphore);
295     if (it == dev_data->semaphoreMap.end()) {
296         return nullptr;
297     }
298     return &it->second;
299 }
300 
GetCommandPoolNode(layer_data * dev_data,VkCommandPool pool)301 COMMAND_POOL_NODE *GetCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
302     auto it = dev_data->commandPoolMap.find(pool);
303     if (it == dev_data->commandPoolMap.end()) {
304         return nullptr;
305     }
306     return &it->second;
307 }
308 
GetPhysicalDeviceState(instance_layer_data * instance_data,VkPhysicalDevice phys)309 PHYSICAL_DEVICE_STATE *GetPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
310     auto it = instance_data->physical_device_map.find(phys);
311     if (it == instance_data->physical_device_map.end()) {
312         return nullptr;
313     }
314     return &it->second;
315 }
316 
GetSurfaceState(instance_layer_data * instance_data,VkSurfaceKHR surface)317 SURFACE_STATE *GetSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
318     auto it = instance_data->surface_map.find(surface);
319     if (it == instance_data->surface_map.end()) {
320         return nullptr;
321     }
322     return &it->second;
323 }
324 
GetEnabledExtensions(layer_data const * dev_data)325 DeviceExtensions const *GetEnabledExtensions(layer_data const *dev_data) { return &dev_data->extensions; }
326 
327 // Return ptr to memory binding for given handle of specified type
GetObjectMemBinding(layer_data * dev_data,uint64_t handle,VulkanObjectType type)328 static BINDABLE *GetObjectMemBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
329     switch (type) {
330         case kVulkanObjectTypeImage:
331             return GetImageState(dev_data, VkImage(handle));
332         case kVulkanObjectTypeBuffer:
333             return GetBufferState(dev_data, VkBuffer(handle));
334         default:
335             break;
336     }
337     return nullptr;
338 }
339 // prototype
340 GLOBAL_CB_NODE *GetCBNode(layer_data const *, const VkCommandBuffer);
341 
342 // Return ptr to info in map container containing mem, or NULL if not found
343 //  Calls to this function should be wrapped in mutex
GetMemObjInfo(const layer_data * dev_data,const VkDeviceMemory mem)344 DEVICE_MEM_INFO *GetMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
345     auto mem_it = dev_data->memObjMap.find(mem);
346     if (mem_it == dev_data->memObjMap.end()) {
347         return NULL;
348     }
349     return mem_it->second.get();
350 }
351 
add_mem_obj_info(layer_data * dev_data,void * object,const VkDeviceMemory mem,const VkMemoryAllocateInfo * pAllocateInfo)352 static void add_mem_obj_info(layer_data *dev_data, void *object, const VkDeviceMemory mem,
353                              const VkMemoryAllocateInfo *pAllocateInfo) {
354     assert(object != NULL);
355 
356     dev_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
357 
358     if (pAllocateInfo->pNext) {
359         auto struct_header = reinterpret_cast<const GENERIC_HEADER *>(pAllocateInfo->pNext);
360         while (struct_header) {
361             if (VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR == struct_header->sType ||
362                 VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR == struct_header->sType) {
363                 dev_data->memObjMap[mem]->global_valid = true;
364                 break;
365             }
366             struct_header = reinterpret_cast<const GENERIC_HEADER *>(struct_header->pNext);
367         }
368     }
369 }
370 
371 // For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
ValidateMemoryIsValid(layer_data * dev_data,VkDeviceMemory mem,uint64_t bound_object_handle,VulkanObjectType type,const char * functionName)372 static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle, VulkanObjectType type,
373                                   const char *functionName) {
374     DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
375     if (mem_info) {
376         if (!mem_info->bound_ranges[bound_object_handle].valid) {
377             return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
378                            HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
379                            "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
380                            ", please fill the memory before using.",
381                            functionName, HandleToUint64(mem), object_string[type], bound_object_handle);
382         }
383     }
384     return false;
385 }
386 // For given image_state
387 //  If mem is special swapchain key, then verify that image_state valid member is true
388 //  Else verify that the image's bound memory range is valid
ValidateImageMemoryIsValid(layer_data * dev_data,IMAGE_STATE * image_state,const char * functionName)389 bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_STATE *image_state, const char *functionName) {
390     if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
391         if (!image_state->valid) {
392             return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
393                            HandleToUint64(image_state->binding.mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
394                            "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
395                            functionName, HandleToUint64(image_state->image));
396         }
397     } else {
398         return ValidateMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), kVulkanObjectTypeImage,
399                                      functionName);
400     }
401     return false;
402 }
403 // For given buffer_state, verify that the range it's bound to is valid
ValidateBufferMemoryIsValid(layer_data * dev_data,BUFFER_STATE * buffer_state,const char * functionName)404 bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_STATE *buffer_state, const char *functionName) {
405     return ValidateMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer,
406                                  functionName);
407 }
408 // For the given memory allocation, set the range bound by the given handle object to the valid param value
SetMemoryValid(layer_data * dev_data,VkDeviceMemory mem,uint64_t handle,bool valid)409 static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
410     DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
411     if (mem_info) {
412         mem_info->bound_ranges[handle].valid = valid;
413     }
414 }
415 // For given image node
416 //  If mem is special swapchain key, then set entire image_state to valid param value
417 //  Else set the image's bound memory range to valid param value
SetImageMemoryValid(layer_data * dev_data,IMAGE_STATE * image_state,bool valid)418 void SetImageMemoryValid(layer_data *dev_data, IMAGE_STATE *image_state, bool valid) {
419     if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
420         image_state->valid = valid;
421     } else {
422         SetMemoryValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), valid);
423     }
424 }
425 // For given buffer node set the buffer's bound memory range to valid param value
SetBufferMemoryValid(layer_data * dev_data,BUFFER_STATE * buffer_state,bool valid)426 void SetBufferMemoryValid(layer_data *dev_data, BUFFER_STATE *buffer_state, bool valid) {
427     SetMemoryValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), valid);
428 }
429 
430 // Create binding link between given sampler and command buffer node
AddCommandBufferBindingSampler(GLOBAL_CB_NODE * cb_node,SAMPLER_STATE * sampler_state)431 void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
432     sampler_state->cb_bindings.insert(cb_node);
433     cb_node->object_bindings.insert({HandleToUint64(sampler_state->sampler), kVulkanObjectTypeSampler});
434 }
435 
436 // Create binding link between given image node and command buffer node
AddCommandBufferBindingImage(const layer_data * dev_data,GLOBAL_CB_NODE * cb_node,IMAGE_STATE * image_state)437 void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
438     // Skip validation if this image was created through WSI
439     if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
440         // First update CB binding in MemObj mini CB list
441         for (auto mem_binding : image_state->GetBoundMemory()) {
442             DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
443             if (pMemInfo) {
444                 pMemInfo->cb_bindings.insert(cb_node);
445                 // Now update CBInfo's Mem reference list
446                 cb_node->memObjs.insert(mem_binding);
447             }
448         }
449         // Now update cb binding for image
450         cb_node->object_bindings.insert({HandleToUint64(image_state->image), kVulkanObjectTypeImage});
451         image_state->cb_bindings.insert(cb_node);
452     }
453 }
454 
455 // Create binding link between given image view node and its image with command buffer node
AddCommandBufferBindingImageView(const layer_data * dev_data,GLOBAL_CB_NODE * cb_node,IMAGE_VIEW_STATE * view_state)456 void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
457     // First add bindings for imageView
458     view_state->cb_bindings.insert(cb_node);
459     cb_node->object_bindings.insert({HandleToUint64(view_state->image_view), kVulkanObjectTypeImageView});
460     auto image_state = GetImageState(dev_data, view_state->create_info.image);
461     // Add bindings for image within imageView
462     if (image_state) {
463         AddCommandBufferBindingImage(dev_data, cb_node, image_state);
464     }
465 }
466 
467 // Create binding link between given buffer node and command buffer node
AddCommandBufferBindingBuffer(const layer_data * dev_data,GLOBAL_CB_NODE * cb_node,BUFFER_STATE * buffer_state)468 void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
469     // First update CB binding in MemObj mini CB list
470     for (auto mem_binding : buffer_state->GetBoundMemory()) {
471         DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
472         if (pMemInfo) {
473             pMemInfo->cb_bindings.insert(cb_node);
474             // Now update CBInfo's Mem reference list
475             cb_node->memObjs.insert(mem_binding);
476         }
477     }
478     // Now update cb binding for buffer
479     cb_node->object_bindings.insert({HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer});
480     buffer_state->cb_bindings.insert(cb_node);
481 }
482 
483 // Create binding link between given buffer view node and its buffer with command buffer node
AddCommandBufferBindingBufferView(const layer_data * dev_data,GLOBAL_CB_NODE * cb_node,BUFFER_VIEW_STATE * view_state)484 void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
485     // First add bindings for bufferView
486     view_state->cb_bindings.insert(cb_node);
487     cb_node->object_bindings.insert({HandleToUint64(view_state->buffer_view), kVulkanObjectTypeBufferView});
488     auto buffer_state = GetBufferState(dev_data, view_state->create_info.buffer);
489     // Add bindings for buffer within bufferView
490     if (buffer_state) {
491         AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state);
492     }
493 }
494 
495 // For every mem obj bound to particular CB, free bindings related to that CB
clear_cmd_buf_and_mem_references(layer_data * dev_data,GLOBAL_CB_NODE * cb_node)496 static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
497     if (cb_node) {
498         if (cb_node->memObjs.size() > 0) {
499             for (auto mem : cb_node->memObjs) {
500                 DEVICE_MEM_INFO *pInfo = GetMemObjInfo(dev_data, mem);
501                 if (pInfo) {
502                     pInfo->cb_bindings.erase(cb_node);
503                 }
504             }
505             cb_node->memObjs.clear();
506         }
507     }
508 }
509 
510 // Clear a single object binding from given memory object, or report error if binding is missing
ClearMemoryObjectBinding(layer_data * dev_data,uint64_t handle,VulkanObjectType type,VkDeviceMemory mem)511 static bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type, VkDeviceMemory mem) {
512     DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
513     // This obj is bound to a memory object. Remove the reference to this object in that memory object's list
514     if (mem_info) {
515         mem_info->obj_bindings.erase({handle, type});
516     }
517     return false;
518 }
519 
520 // ClearMemoryObjectBindings clears the binding of objects to memory
521 //  For the given object it pulls the memory bindings and makes sure that the bindings
522 //  no longer refer to the object being cleared. This occurs when objects are destroyed.
ClearMemoryObjectBindings(layer_data * dev_data,uint64_t handle,VulkanObjectType type)523 bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
524     bool skip = false;
525     BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
526     if (mem_binding) {
527         if (!mem_binding->sparse) {
528             skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
529         } else {  // Sparse, clear all bindings
530             for (auto &sparse_mem_binding : mem_binding->sparse_bindings) {
531                 skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
532             }
533         }
534     }
535     return skip;
536 }
537 
538 // For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
VerifyBoundMemoryIsValid(const layer_data * dev_data,VkDeviceMemory mem,uint64_t handle,const char * api_name,const char * type_name,UNIQUE_VALIDATION_ERROR_CODE error_code)539 bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
540                               const char *type_name, UNIQUE_VALIDATION_ERROR_CODE error_code) {
541     bool result = false;
542     if (VK_NULL_HANDLE == mem) {
543         result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
544                          __LINE__, error_code, "MEM",
545                          "%s: Vk%s object 0x%" PRIx64
546                          " used with no memory bound. Memory should be bound by calling vkBind%sMemory(). %s",
547                          api_name, type_name, handle, type_name, validation_error_map[error_code]);
548     } else if (MEMORY_UNBOUND == mem) {
549         result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
550                          __LINE__, error_code, "MEM",
551                          "%s: Vk%s object 0x%" PRIx64
552                          " used with no memory bound and previously bound memory was freed. Memory must not be freed prior to this "
553                          "operation. %s",
554                          api_name, type_name, handle, validation_error_map[error_code]);
555     }
556     return result;
557 }
558 
559 // Check to see if memory was ever bound to this image
ValidateMemoryIsBoundToImage(const layer_data * dev_data,const IMAGE_STATE * image_state,const char * api_name,UNIQUE_VALIDATION_ERROR_CODE error_code)560 bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
561                                   UNIQUE_VALIDATION_ERROR_CODE error_code) {
562     bool result = false;
563     if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
564         result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), api_name, "Image",
565                                           error_code);
566     }
567     return result;
568 }
569 
570 // Check to see if memory was bound to this buffer
ValidateMemoryIsBoundToBuffer(const layer_data * dev_data,const BUFFER_STATE * buffer_state,const char * api_name,UNIQUE_VALIDATION_ERROR_CODE error_code)571 bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
572                                    UNIQUE_VALIDATION_ERROR_CODE error_code) {
573     bool result = false;
574     if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
575         result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), api_name,
576                                           "Buffer", error_code);
577     }
578     return result;
579 }
580 
581 // SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object.
582 // Corresponding valid usage checks are in ValidateSetMemBinding().
SetMemBinding(layer_data * dev_data,VkDeviceMemory mem,BINDABLE * mem_binding,VkDeviceSize memory_offset,uint64_t handle,VulkanObjectType type,const char * apiName)583 static void SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, BINDABLE *mem_binding, VkDeviceSize memory_offset,
584                           uint64_t handle, VulkanObjectType type, const char *apiName) {
585     assert(mem_binding);
586     mem_binding->binding.mem = mem;
587     mem_binding->UpdateBoundMemorySet();  // force recreation of cached set
588     mem_binding->binding.offset = memory_offset;
589     mem_binding->binding.size = mem_binding->requirements.size;
590 
591     if (mem != VK_NULL_HANDLE) {
592         DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
593         if (mem_info) {
594             mem_info->obj_bindings.insert({handle, type});
595             // For image objects, make sure default memory state is correctly set
596             // TODO : What's the best/correct way to handle this?
597             if (kVulkanObjectTypeImage == type) {
598                 auto const image_state = reinterpret_cast<const IMAGE_STATE *>(mem_binding);
599                 if (image_state) {
600                     VkImageCreateInfo ici = image_state->createInfo;
601                     if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
602                         // TODO::  More memory state transition stuff.
603                     }
604                 }
605             }
606         }
607     }
608 }
609 
610 // Valid usage checks for a call to SetMemBinding().
611 // For NULL mem case, output warning
612 // Make sure given object is in global object map
613 //  IF a previous binding existed, output validation error
614 //  Otherwise, add reference from objectInfo to memoryInfo
615 //  Add reference off of objInfo
616 // TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
ValidateSetMemBinding(layer_data * dev_data,VkDeviceMemory mem,uint64_t handle,VulkanObjectType type,const char * apiName)617 static bool ValidateSetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type,
618                                   const char *apiName) {
619     bool skip = false;
620     // It's an error to bind an object to NULL memory
621     if (mem != VK_NULL_HANDLE) {
622         BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
623         assert(mem_binding);
624         if (mem_binding->sparse) {
625             UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_1740082a;
626             const char *handle_type = "IMAGE";
627             if (type == kVulkanObjectTypeBuffer) {
628                 error_code = VALIDATION_ERROR_1700080c;
629                 handle_type = "BUFFER";
630             } else {
631                 assert(type == kVulkanObjectTypeImage);
632             }
633             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
634                             HandleToUint64(mem), __LINE__, error_code, "MEM",
635                             "In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64
636                             ") which was created with sparse memory flags (VK_%s_CREATE_SPARSE_*_BIT). %s",
637                             apiName, HandleToUint64(mem), handle, handle_type, validation_error_map[error_code]);
638         }
639         DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
640         if (mem_info) {
641             DEVICE_MEM_INFO *prev_binding = GetMemObjInfo(dev_data, mem_binding->binding.mem);
642             if (prev_binding) {
643                 UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_17400828;
644                 if (type == kVulkanObjectTypeBuffer) {
645                     error_code = VALIDATION_ERROR_1700080a;
646                 } else {
647                     assert(type == kVulkanObjectTypeImage);
648                 }
649                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
650                                 HandleToUint64(mem), __LINE__, error_code, "MEM",
651                                 "In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64
652                                 ") which has already been bound to mem object 0x%" PRIx64 ". %s",
653                                 apiName, HandleToUint64(mem), handle, HandleToUint64(prev_binding->mem),
654                                 validation_error_map[error_code]);
655             } else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
656                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
657                                 HandleToUint64(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
658                                 "In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64
659                                 ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
660                                 "Vulkan so this attempt to bind to new memory is not allowed.",
661                                 apiName, HandleToUint64(mem), handle);
662             }
663         }
664     }
665     return skip;
666 }
667 
668 // For NULL mem case, clear any previous binding Else...
669 // Make sure given object is in its object map
670 //  IF a previous binding existed, update binding
671 //  Add reference from objectInfo to memoryInfo
672 //  Add reference off of object's binding info
673 // Return VK_TRUE if addition is successful, VK_FALSE otherwise
SetSparseMemBinding(layer_data * dev_data,MEM_BINDING binding,uint64_t handle,VulkanObjectType type)674 static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VulkanObjectType type) {
675     bool skip = VK_FALSE;
676     // Handle NULL case separately, just clear previous binding & decrement reference
677     if (binding.mem == VK_NULL_HANDLE) {
678         // TODO : This should cause the range of the resource to be unbound according to spec
679     } else {
680         BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
681         assert(mem_binding);
682         assert(mem_binding->sparse);
683         DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, binding.mem);
684         if (mem_info) {
685             mem_info->obj_bindings.insert({handle, type});
686             // Need to set mem binding for this object
687             mem_binding->sparse_bindings.insert(binding);
688             mem_binding->UpdateBoundMemorySet();
689         }
690     }
691     return skip;
692 }
693 
694 // Check object status for selected flag state
validate_status(layer_data * dev_data,GLOBAL_CB_NODE * pNode,CBStatusFlags status_mask,VkFlags msg_flags,const char * fail_msg,UNIQUE_VALIDATION_ERROR_CODE const msg_code)695 static bool validate_status(layer_data *dev_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
696                             const char *fail_msg, UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
697     if (!(pNode->status & status_mask)) {
698         char const *const message = validation_error_map[msg_code];
699         return log_msg(dev_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
700                        HandleToUint64(pNode->commandBuffer), __LINE__, msg_code, "DS",
701                        "command buffer object 0x%" PRIx64 ": %s. %s.", HandleToUint64(pNode->commandBuffer), fail_msg, message);
702     }
703     return false;
704 }
705 
706 // Retrieve pipeline node ptr for given pipeline object
getPipelineState(layer_data const * dev_data,VkPipeline pipeline)707 static PIPELINE_STATE *getPipelineState(layer_data const *dev_data, VkPipeline pipeline) {
708     auto it = dev_data->pipelineMap.find(pipeline);
709     if (it == dev_data->pipelineMap.end()) {
710         return nullptr;
711     }
712     return it->second.get();
713 }
714 
GetRenderPassState(layer_data const * dev_data,VkRenderPass renderpass)715 RENDER_PASS_STATE *GetRenderPassState(layer_data const *dev_data, VkRenderPass renderpass) {
716     auto it = dev_data->renderPassMap.find(renderpass);
717     if (it == dev_data->renderPassMap.end()) {
718         return nullptr;
719     }
720     return it->second.get();
721 }
722 
GetRenderPassStateSharedPtr(layer_data const * dev_data,VkRenderPass renderpass)723 std::shared_ptr<RENDER_PASS_STATE> GetRenderPassStateSharedPtr(layer_data const *dev_data, VkRenderPass renderpass) {
724     auto it = dev_data->renderPassMap.find(renderpass);
725     if (it == dev_data->renderPassMap.end()) {
726         return nullptr;
727     }
728     return it->second;
729 }
730 
GetFramebufferState(const layer_data * dev_data,VkFramebuffer framebuffer)731 FRAMEBUFFER_STATE *GetFramebufferState(const layer_data *dev_data, VkFramebuffer framebuffer) {
732     auto it = dev_data->frameBufferMap.find(framebuffer);
733     if (it == dev_data->frameBufferMap.end()) {
734         return nullptr;
735     }
736     return it->second.get();
737 }
738 
GetDescriptorSetLayout(layer_data const * dev_data,VkDescriptorSetLayout dsLayout)739 std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> const GetDescriptorSetLayout(layer_data const *dev_data,
740                                                                                          VkDescriptorSetLayout dsLayout) {
741     auto it = dev_data->descriptorSetLayoutMap.find(dsLayout);
742     if (it == dev_data->descriptorSetLayoutMap.end()) {
743         return nullptr;
744     }
745     return it->second;
746 }
747 
getPipelineLayout(layer_data const * dev_data,VkPipelineLayout pipeLayout)748 static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *dev_data, VkPipelineLayout pipeLayout) {
749     auto it = dev_data->pipelineLayoutMap.find(pipeLayout);
750     if (it == dev_data->pipelineLayoutMap.end()) {
751         return nullptr;
752     }
753     return &it->second;
754 }
755 
GetShaderModuleState(layer_data const * dev_data,VkShaderModule module)756 shader_module const *GetShaderModuleState(layer_data const *dev_data, VkShaderModule module) {
757     auto it = dev_data->shaderModuleMap.find(module);
758     if (it == dev_data->shaderModuleMap.end()) {
759         return nullptr;
760     }
761     return it->second.get();
762 }
763 
764 // Return true if for a given PSO, the given state enum is dynamic, else return false
isDynamic(const PIPELINE_STATE * pPipeline,const VkDynamicState state)765 static bool isDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
766     if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
767         for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
768             if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true;
769         }
770     }
771     return false;
772 }
773 
774 // Validate state stored as flags at time of draw call
validate_draw_state_flags(layer_data * dev_data,GLOBAL_CB_NODE * pCB,const PIPELINE_STATE * pPipe,bool indexed,UNIQUE_VALIDATION_ERROR_CODE const msg_code)775 static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
776                                       UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
777     bool result = false;
778     if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
779         ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
780          (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
781         result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
782                                   "Dynamic line width state not set for this command buffer", msg_code);
783     }
784     if (pPipe->graphicsPipelineCI.pRasterizationState &&
785         (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
786         result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
787                                   "Dynamic depth bias state not set for this command buffer", msg_code);
788     }
789     if (pPipe->blendConstantsEnabled) {
790         result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
791                                   "Dynamic blend constants state not set for this command buffer", msg_code);
792     }
793     if (pPipe->graphicsPipelineCI.pDepthStencilState &&
794         (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
795         result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
796                                   "Dynamic depth bounds state not set for this command buffer", msg_code);
797     }
798     if (pPipe->graphicsPipelineCI.pDepthStencilState &&
799         (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
800         result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
801                                   "Dynamic stencil read mask state not set for this command buffer", msg_code);
802         result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
803                                   "Dynamic stencil write mask state not set for this command buffer", msg_code);
804         result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
805                                   "Dynamic stencil reference state not set for this command buffer", msg_code);
806     }
807     if (indexed) {
808         result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
809                                   "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
810     }
811 
812     return result;
813 }
814 
logInvalidAttachmentMessage(layer_data const * dev_data,const char * type1_string,const RENDER_PASS_STATE * rp1_state,const char * type2_string,const RENDER_PASS_STATE * rp2_state,uint32_t primary_attach,uint32_t secondary_attach,const char * msg,const char * caller,UNIQUE_VALIDATION_ERROR_CODE error_code)815 static bool logInvalidAttachmentMessage(layer_data const *dev_data, const char *type1_string, const RENDER_PASS_STATE *rp1_state,
816                                         const char *type2_string, const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach,
817                                         uint32_t secondary_attach, const char *msg, const char *caller,
818                                         UNIQUE_VALIDATION_ERROR_CODE error_code) {
819     return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
820                    HandleToUint64(rp1_state->renderPass), __LINE__, error_code, "DS",
821                    "%s: RenderPasses incompatible between %s w/ renderPass 0x%" PRIx64 " and %s w/ renderPass 0x%" PRIx64
822                    " Attachment %u is not compatible with %u: %s. %s",
823                    caller, type1_string, HandleToUint64(rp1_state->renderPass), type2_string, HandleToUint64(rp2_state->renderPass),
824                    primary_attach, secondary_attach, msg, validation_error_map[error_code]);
825 }
826 
validateAttachmentCompatibility(layer_data const * dev_data,const char * type1_string,const RENDER_PASS_STATE * rp1_state,const char * type2_string,const RENDER_PASS_STATE * rp2_state,uint32_t primary_attach,uint32_t secondary_attach,const char * caller,UNIQUE_VALIDATION_ERROR_CODE error_code)827 static bool validateAttachmentCompatibility(layer_data const *dev_data, const char *type1_string,
828                                             const RENDER_PASS_STATE *rp1_state, const char *type2_string,
829                                             const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach,
830                                             const char *caller, UNIQUE_VALIDATION_ERROR_CODE error_code) {
831     bool skip = false;
832     const auto &primaryPassCI = rp1_state->createInfo;
833     const auto &secondaryPassCI = rp2_state->createInfo;
834     if (primaryPassCI.attachmentCount <= primary_attach) {
835         primary_attach = VK_ATTACHMENT_UNUSED;
836     }
837     if (secondaryPassCI.attachmentCount <= secondary_attach) {
838         secondary_attach = VK_ATTACHMENT_UNUSED;
839     }
840     if (primary_attach == VK_ATTACHMENT_UNUSED && secondary_attach == VK_ATTACHMENT_UNUSED) {
841         return skip;
842     }
843     if (primary_attach == VK_ATTACHMENT_UNUSED) {
844         skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
845                                             secondary_attach, "The first is unused while the second is not.", caller, error_code);
846         return skip;
847     }
848     if (secondary_attach == VK_ATTACHMENT_UNUSED) {
849         skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
850                                             secondary_attach, "The second is unused while the first is not.", caller, error_code);
851         return skip;
852     }
853     if (primaryPassCI.pAttachments[primary_attach].format != secondaryPassCI.pAttachments[secondary_attach].format) {
854         skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
855                                             secondary_attach, "They have different formats.", caller, error_code);
856     }
857     if (primaryPassCI.pAttachments[primary_attach].samples != secondaryPassCI.pAttachments[secondary_attach].samples) {
858         skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
859                                             secondary_attach, "They have different samples.", caller, error_code);
860     }
861     if (primaryPassCI.pAttachments[primary_attach].flags != secondaryPassCI.pAttachments[secondary_attach].flags) {
862         skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
863                                             secondary_attach, "They have different flags.", caller, error_code);
864     }
865 
866     return skip;
867 }
868 
validateSubpassCompatibility(layer_data const * dev_data,const char * type1_string,const RENDER_PASS_STATE * rp1_state,const char * type2_string,const RENDER_PASS_STATE * rp2_state,const int subpass,const char * caller,UNIQUE_VALIDATION_ERROR_CODE error_code)869 static bool validateSubpassCompatibility(layer_data const *dev_data, const char *type1_string, const RENDER_PASS_STATE *rp1_state,
870                                          const char *type2_string, const RENDER_PASS_STATE *rp2_state, const int subpass,
871                                          const char *caller, UNIQUE_VALIDATION_ERROR_CODE error_code) {
872     bool skip = false;
873     const auto &primary_desc = rp1_state->createInfo.pSubpasses[subpass];
874     const auto &secondary_desc = rp2_state->createInfo.pSubpasses[subpass];
875     uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
876     for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
877         uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
878         if (i < primary_desc.inputAttachmentCount) {
879             primary_input_attach = primary_desc.pInputAttachments[i].attachment;
880         }
881         if (i < secondary_desc.inputAttachmentCount) {
882             secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
883         }
884         skip |= validateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_input_attach,
885                                                 secondary_input_attach, caller, error_code);
886     }
887     uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
888     for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
889         uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
890         if (i < primary_desc.colorAttachmentCount) {
891             primary_color_attach = primary_desc.pColorAttachments[i].attachment;
892         }
893         if (i < secondary_desc.colorAttachmentCount) {
894             secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
895         }
896         skip |= validateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_color_attach,
897                                                 secondary_color_attach, caller, error_code);
898         uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
899         if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
900             primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
901         }
902         if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
903             secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
904         }
905         skip |= validateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_resolve_attach,
906                                                 secondary_resolve_attach, caller, error_code);
907     }
908     uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
909     if (primary_desc.pDepthStencilAttachment) {
910         primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
911     }
912     if (secondary_desc.pDepthStencilAttachment) {
913         secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
914     }
915     skip |= validateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_depthstencil_attach,
916                                             secondary_depthstencil_attach, caller, error_code);
917     return skip;
918 }
919 
920 // Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
921 //  This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
922 //  will then feed into this function
validateRenderPassCompatibility(layer_data const * dev_data,const char * type1_string,const RENDER_PASS_STATE * rp1_state,const char * type2_string,const RENDER_PASS_STATE * rp2_state,const char * caller,UNIQUE_VALIDATION_ERROR_CODE error_code)923 static bool validateRenderPassCompatibility(layer_data const *dev_data, const char *type1_string,
924                                             const RENDER_PASS_STATE *rp1_state, const char *type2_string,
925                                             const RENDER_PASS_STATE *rp2_state, const char *caller,
926                                             UNIQUE_VALIDATION_ERROR_CODE error_code) {
927     bool skip = false;
928 
929     if (rp1_state->createInfo.subpassCount != rp2_state->createInfo.subpassCount) {
930         skip |=
931             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
932                     HandleToUint64(rp1_state->renderPass), __LINE__, error_code, "DS",
933                     "%s: RenderPasses incompatible between %s w/ renderPass 0x%" PRIx64
934                     " with a subpassCount of %u and %s w/ renderPass 0x%" PRIx64 " with a subpassCount of %u. %s",
935                     caller, type1_string, HandleToUint64(rp1_state->renderPass), rp1_state->createInfo.subpassCount, type2_string,
936                     HandleToUint64(rp2_state->renderPass), rp2_state->createInfo.subpassCount, validation_error_map[error_code]);
937     } else {
938         for (uint32_t i = 0; i < rp1_state->createInfo.subpassCount; ++i) {
939             skip |= validateSubpassCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, i, caller, error_code);
940         }
941     }
942     return skip;
943 }
944 
945 // Return Set node ptr for specified set or else NULL
GetSetNode(const layer_data * dev_data,VkDescriptorSet set)946 cvdescriptorset::DescriptorSet *GetSetNode(const layer_data *dev_data, VkDescriptorSet set) {
947     auto set_it = dev_data->setMap.find(set);
948     if (set_it == dev_data->setMap.end()) {
949         return NULL;
950     }
951     return set_it->second;
952 }
953 
954 // For given pipeline, return number of MSAA samples, or one if MSAA disabled
getNumSamples(PIPELINE_STATE const * pipe)955 static VkSampleCountFlagBits getNumSamples(PIPELINE_STATE const *pipe) {
956     if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
957         VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
958         return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
959     }
960     return VK_SAMPLE_COUNT_1_BIT;
961 }
962 
list_bits(std::ostream & s,uint32_t bits)963 static void list_bits(std::ostream &s, uint32_t bits) {
964     for (int i = 0; i < 32 && bits; i++) {
965         if (bits & (1 << i)) {
966             s << i;
967             bits &= ~(1 << i);
968             if (bits) {
969                 s << ",";
970             }
971         }
972     }
973 }
974 
975 // Validate draw-time state related to the PSO
ValidatePipelineDrawtimeState(layer_data const * dev_data,LAST_BOUND_STATE const & state,const GLOBAL_CB_NODE * pCB,CMD_TYPE cmd_type,PIPELINE_STATE const * pPipeline,const char * caller)976 static bool ValidatePipelineDrawtimeState(layer_data const *dev_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
977                                           CMD_TYPE cmd_type, PIPELINE_STATE const *pPipeline, const char *caller) {
978     bool skip = false;
979 
980     // Verify vertex binding
981     if (pPipeline->vertexBindingDescriptions.size() > 0) {
982         for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
983             auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
984             if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
985                 (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
986                 skip |=
987                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
988                             HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
989                             "The Pipeline State Object (0x%" PRIx64
990                             ") expects that this Command Buffer's vertex binding Index %u should be set via "
991                             "vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at "
992                             "index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
993                             HandleToUint64(state.pipeline_state->pipeline), vertex_binding, i, vertex_binding);
994             }
995         }
996     } else {
997         if (!pCB->currentDrawData.buffers.empty() && !pCB->vertex_buffer_used) {
998             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
999                             VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), __LINE__,
1000                             DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
1001                             "Vertex buffers are bound to command buffer (0x%" PRIx64
1002                             ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIx64 ").",
1003                             HandleToUint64(pCB->commandBuffer), HandleToUint64(state.pipeline_state->pipeline));
1004         }
1005     }
1006     // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
1007     // Skip check if rasterization is disabled or there is no viewport.
1008     if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
1009          (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
1010         pPipeline->graphicsPipelineCI.pViewportState) {
1011         bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
1012         bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
1013 
1014         if (dynViewport) {
1015             auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
1016             auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
1017             if (missingViewportMask) {
1018                 std::stringstream ss;
1019                 ss << "Dynamic viewport(s) ";
1020                 list_bits(ss, missingViewportMask);
1021                 ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
1022                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1023                                 __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "%s", ss.str().c_str());
1024             }
1025         }
1026 
1027         if (dynScissor) {
1028             auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
1029             auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
1030             if (missingScissorMask) {
1031                 std::stringstream ss;
1032                 ss << "Dynamic scissor(s) ";
1033                 list_bits(ss, missingScissorMask);
1034                 ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
1035                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1036                                 __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "%s", ss.str().c_str());
1037             }
1038         }
1039     }
1040 
1041     // Verify that any MSAA request in PSO matches sample# in bound FB
1042     // Skip the check if rasterization is disabled.
1043     if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
1044         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
1045         VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
1046         if (pCB->activeRenderPass) {
1047             auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
1048             const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
1049             uint32_t i;
1050             unsigned subpass_num_samples = 0;
1051 
1052             for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
1053                 auto attachment = subpass_desc->pColorAttachments[i].attachment;
1054                 if (attachment != VK_ATTACHMENT_UNUSED)
1055                     subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
1056             }
1057 
1058             if (subpass_desc->pDepthStencilAttachment &&
1059                 subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1060                 auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
1061                 subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
1062             }
1063 
1064             if (!dev_data->extensions.vk_amd_mixed_attachment_samples &&
1065                 ((subpass_num_samples & static_cast<unsigned>(pso_num_samples)) != subpass_num_samples)) {
1066                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1067                                 HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
1068                                 "Num samples mismatch! At draw-time in Pipeline (0x%" PRIx64
1069                                 ") with %u samples while current RenderPass (0x%" PRIx64 ") w/ %u samples!",
1070                                 HandleToUint64(pPipeline->pipeline), pso_num_samples,
1071                                 HandleToUint64(pCB->activeRenderPass->renderPass), subpass_num_samples);
1072             }
1073         } else {
1074             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1075                             HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
1076                             "No active render pass found at draw-time in Pipeline (0x%" PRIx64 ")!",
1077                             HandleToUint64(pPipeline->pipeline));
1078         }
1079     }
1080     // Verify that PSO creation renderPass is compatible with active renderPass
1081     if (pCB->activeRenderPass) {
1082         // TODO: Move all of the error codes common across different Draws into a LUT accessed by cmd_type
1083         // TODO: AMD extension codes are included here, but actual function entrypoints are not yet intercepted
1084         // Error codes for renderpass and subpass mismatches
1085         auto rp_error = VALIDATION_ERROR_1a200366, sp_error = VALIDATION_ERROR_1a200368;
1086         switch (cmd_type) {
1087             case CMD_DRAWINDEXED:
1088                 rp_error = VALIDATION_ERROR_1a40038c;
1089                 sp_error = VALIDATION_ERROR_1a40038e;
1090                 break;
1091             case CMD_DRAWINDIRECT:
1092                 rp_error = VALIDATION_ERROR_1aa003be;
1093                 sp_error = VALIDATION_ERROR_1aa003c0;
1094                 break;
1095             case CMD_DRAWINDIRECTCOUNTAMD:
1096                 rp_error = VALIDATION_ERROR_1ac003f6;
1097                 sp_error = VALIDATION_ERROR_1ac003f8;
1098                 break;
1099             case CMD_DRAWINDEXEDINDIRECT:
1100                 rp_error = VALIDATION_ERROR_1a600426;
1101                 sp_error = VALIDATION_ERROR_1a600428;
1102                 break;
1103             case CMD_DRAWINDEXEDINDIRECTCOUNTAMD:
1104                 rp_error = VALIDATION_ERROR_1a800460;
1105                 sp_error = VALIDATION_ERROR_1a800462;
1106                 break;
1107             default:
1108                 assert(CMD_DRAW == cmd_type);
1109                 break;
1110         }
1111         std::string err_string;
1112         if (pCB->activeRenderPass->renderPass != pPipeline->rp_state->renderPass) {
1113             // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
1114             skip |= validateRenderPassCompatibility(dev_data, "active render pass", pCB->activeRenderPass, "pipeline state object",
1115                                                     pPipeline->rp_state.get(), caller, rp_error);
1116         }
1117         if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
1118             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1119                             HandleToUint64(pPipeline->pipeline), __LINE__, sp_error, "DS",
1120                             "Pipeline was built for subpass %u but used in subpass %u. %s", pPipeline->graphicsPipelineCI.subpass,
1121                             pCB->activeSubpass, validation_error_map[sp_error]);
1122         }
1123     }
1124 
1125     return skip;
1126 }
1127 
1128 // For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
1129 // pipelineLayout[layoutIndex]
verify_set_layout_compatibility(const cvdescriptorset::DescriptorSet * descriptor_set,PIPELINE_LAYOUT_NODE const * pipeline_layout,const uint32_t layoutIndex,string & errorMsg)1130 static bool verify_set_layout_compatibility(const cvdescriptorset::DescriptorSet *descriptor_set,
1131                                             PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
1132                                             string &errorMsg) {
1133     auto num_sets = pipeline_layout->set_layouts.size();
1134     if (layoutIndex >= num_sets) {
1135         stringstream errorStr;
1136         errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
1137                  << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
1138                  << layoutIndex;
1139         errorMsg = errorStr.str();
1140         return false;
1141     }
1142     if (descriptor_set->IsPushDescriptor()) return true;
1143     auto layout_node = pipeline_layout->set_layouts[layoutIndex];
1144     return descriptor_set->IsCompatible(layout_node.get(), &errorMsg);
1145 }
1146 
1147 // Validate overall state at the time of a draw call
ValidateDrawState(layer_data * dev_data,GLOBAL_CB_NODE * cb_node,CMD_TYPE cmd_type,const bool indexed,const VkPipelineBindPoint bind_point,const char * function,UNIQUE_VALIDATION_ERROR_CODE const msg_code)1148 static bool ValidateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, CMD_TYPE cmd_type, const bool indexed,
1149                               const VkPipelineBindPoint bind_point, const char *function,
1150                               UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
1151     bool result = false;
1152     auto const &state = cb_node->lastBound[bind_point];
1153     PIPELINE_STATE *pPipe = state.pipeline_state;
1154     if (nullptr == pPipe) {
1155         result |= log_msg(
1156             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1157             HandleToUint64(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
1158             "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
1159         // Early return as any further checks below will be busted w/o a pipeline
1160         if (result) return true;
1161     }
1162     // First check flag states
1163     if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
1164         result = validate_draw_state_flags(dev_data, cb_node, pPipe, indexed, msg_code);
1165 
1166     // Now complete other state checks
1167     if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
1168         string errorString;
1169         auto pipeline_layout = pPipe->pipeline_layout;
1170 
1171         for (const auto &set_binding_pair : pPipe->active_slots) {
1172             uint32_t setIndex = set_binding_pair.first;
1173             // If valid set is not bound throw an error
1174             if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
1175                 result |= log_msg(
1176                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1177                     HandleToUint64(cb_node->commandBuffer), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
1178                     "VkPipeline 0x%" PRIx64 " uses set #%u but that set is not bound.", HandleToUint64(pPipe->pipeline), setIndex);
1179             } else if (!verify_set_layout_compatibility(state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex,
1180                                                         errorString)) {
1181                 // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
1182                 VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
1183                 result |=
1184                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1185                             HandleToUint64(setHandle), __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
1186                             "VkDescriptorSet (0x%" PRIx64
1187                             ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIx64 " due to: %s",
1188                             HandleToUint64(setHandle), setIndex, HandleToUint64(pipeline_layout.layout), errorString.c_str());
1189             } else {  // Valid set is bound and layout compatible, validate that it's updated
1190                 // Pull the set node
1191                 cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1192                 // Validate the draw-time state for this descriptor set
1193                 std::string err_str;
1194                 if (!descriptor_set->IsPushDescriptor()) {
1195                     // For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor
1196                     // binding validation. Take the requested binding set and prefilter it to eliminate redundant validation checks.
1197                     // Here, the currently bound pipeline determines whether an image validation check is redundant...
1198                     // for images are the "req" portion of the binding_req is indirectly (but tightly) coupled to the pipeline.
1199                     const cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second, cb_node,
1200                                                                                pPipe);
1201                     const auto &binding_req_map = reduced_map.Map();
1202 
1203                     if (!descriptor_set->ValidateDrawState(binding_req_map, state.dynamicOffsets[setIndex], cb_node, function,
1204                                                            &err_str)) {
1205                         auto set = descriptor_set->GetSet();
1206                         result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1207                                           VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(set), __LINE__,
1208                                           DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
1209                                           "Descriptor set 0x%" PRIx64 " encountered the following validation error at %s time: %s",
1210                                           HandleToUint64(set), function, err_str.c_str());
1211                     }
1212                 }
1213             }
1214         }
1215     }
1216 
1217     // Check general pipeline state that needs to be validated at drawtime
1218     if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
1219         result |= ValidatePipelineDrawtimeState(dev_data, state, cb_node, cmd_type, pPipe, function);
1220 
1221     return result;
1222 }
1223 
UpdateDrawState(layer_data * dev_data,GLOBAL_CB_NODE * cb_state,const VkPipelineBindPoint bind_point)1224 static void UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
1225     auto const &state = cb_state->lastBound[bind_point];
1226     PIPELINE_STATE *pPipe = state.pipeline_state;
1227     if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
1228         for (const auto &set_binding_pair : pPipe->active_slots) {
1229             uint32_t setIndex = set_binding_pair.first;
1230             // Pull the set node
1231             cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1232             if (!descriptor_set->IsPushDescriptor()) {
1233                 // For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor binding
1234                 const cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second, cb_state);
1235                 const auto &binding_req_map = reduced_map.Map();
1236 
1237                 // Bind this set and its active descriptor resources to the command buffer
1238                 descriptor_set->BindCommandBuffer(cb_state, binding_req_map);
1239                 // For given active slots record updated images & buffers
1240                 descriptor_set->GetStorageUpdates(binding_req_map, &cb_state->updateBuffers, &cb_state->updateImages);
1241             }
1242         }
1243     }
1244     if (pPipe->vertexBindingDescriptions.size() > 0) {
1245         cb_state->vertex_buffer_used = true;
1246     }
1247 }
1248 
ValidatePipelineLocked(layer_data * dev_data,std::vector<std::unique_ptr<PIPELINE_STATE>> const & pPipelines,int pipelineIndex)1249 static bool ValidatePipelineLocked(layer_data *dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines,
1250                                    int pipelineIndex) {
1251     bool skip = false;
1252 
1253     PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
1254 
1255     // If create derivative bit is set, check that we've specified a base
1256     // pipeline correctly, and that the base pipeline was created to allow
1257     // derivatives.
1258     if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
1259         PIPELINE_STATE *pBasePipeline = nullptr;
1260         if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
1261               (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
1262             // This check is a superset of VALIDATION_ERROR_096005a8 and VALIDATION_ERROR_096005aa
1263             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1264                             HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
1265                             "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
1266         } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
1267             if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
1268                 skip |=
1269                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1270                             HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_208005a0, "DS",
1271                             "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline. %s",
1272                             validation_error_map[VALIDATION_ERROR_208005a0]);
1273             } else {
1274                 pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex].get();
1275             }
1276         } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
1277             pBasePipeline = getPipelineState(dev_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
1278         }
1279 
1280         if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
1281             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1282                             HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
1283                             "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
1284         }
1285     }
1286 
1287     return skip;
1288 }
1289 
1290 // UNLOCKED pipeline validation. DO NOT lookup objects in the layer_data->* maps in this function.
ValidatePipelineUnlocked(layer_data * dev_data,std::vector<std::unique_ptr<PIPELINE_STATE>> const & pPipelines,int pipelineIndex)1291 static bool ValidatePipelineUnlocked(layer_data *dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines,
1292                                      int pipelineIndex) {
1293     bool skip = false;
1294 
1295     PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
1296 
1297     // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
1298     // produces nonsense errors that confuse users. Other layers should already
1299     // emit errors for renderpass being invalid.
1300     auto subpass_desc = &pPipeline->rp_state->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass];
1301     if (pPipeline->graphicsPipelineCI.subpass >= pPipeline->rp_state->createInfo.subpassCount) {
1302         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1303                         HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005ee, "DS",
1304                         "Invalid Pipeline CreateInfo State: Subpass index %u is out of range for this renderpass (0..%u). %s",
1305                         pPipeline->graphicsPipelineCI.subpass, pPipeline->rp_state->createInfo.subpassCount - 1,
1306                         validation_error_map[VALIDATION_ERROR_096005ee]);
1307         subpass_desc = nullptr;
1308     }
1309 
1310     if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
1311         const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
1312         if (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
1313             skip |= log_msg(
1314                 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1315                 HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005d4, "DS",
1316                 "vkCreateGraphicsPipelines(): Render pass (0x%" PRIx64
1317                 ") subpass %u has colorAttachmentCount of %u which doesn't match the pColorBlendState->attachmentCount of %u. %s",
1318                 HandleToUint64(pPipeline->rp_state->renderPass), pPipeline->graphicsPipelineCI.subpass,
1319                 subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount,
1320                 validation_error_map[VALIDATION_ERROR_096005d4]);
1321         }
1322         if (!dev_data->enabled_features.independentBlend) {
1323             if (pPipeline->attachments.size() > 1) {
1324                 VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
1325                 for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
1326                     // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
1327                     // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
1328                     // only attachment state, so memcmp is best suited for the comparison
1329                     if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
1330                                sizeof(pAttachments[0]))) {
1331                         skip |=
1332                             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1333                                     HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_0f4004ba, "DS",
1334                                     "Invalid Pipeline CreateInfo: If independent blend feature not enabled, all elements of "
1335                                     "pAttachments must be identical. %s",
1336                                     validation_error_map[VALIDATION_ERROR_0f4004ba]);
1337                         break;
1338                     }
1339                 }
1340             }
1341         }
1342         if (!dev_data->enabled_features.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
1343             skip |=
1344                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1345                         HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_0f4004bc, "DS",
1346                         "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE. %s",
1347                         validation_error_map[VALIDATION_ERROR_0f4004bc]);
1348         }
1349     }
1350 
1351     if (validate_and_capture_pipeline_shader_state(dev_data, pPipeline)) {
1352         skip = true;
1353     }
1354     // Each shader's stage must be unique
1355     if (pPipeline->duplicate_shaders) {
1356         for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
1357             if (pPipeline->duplicate_shaders & stage) {
1358                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1359                                 HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
1360                                 "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
1361                                 string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
1362             }
1363         }
1364     }
1365     // VS is required
1366     if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
1367         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1368                         HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005ae, "DS",
1369                         "Invalid Pipeline CreateInfo State: Vertex Shader required. %s",
1370                         validation_error_map[VALIDATION_ERROR_096005ae]);
1371     }
1372     // Either both or neither TC/TE shaders should be defined
1373     bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0;
1374     bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0;
1375     if (has_control && !has_eval) {
1376         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1377                         HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005b2, "DS",
1378                         "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
1379                         validation_error_map[VALIDATION_ERROR_096005b2]);
1380     }
1381     if (!has_control && has_eval) {
1382         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1383                         HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005b4, "DS",
1384                         "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
1385                         validation_error_map[VALIDATION_ERROR_096005b4]);
1386     }
1387     // Compute shaders should be specified independent of Gfx shaders
1388     if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
1389         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1390                         HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005b0, "DS",
1391                         "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline. %s",
1392                         validation_error_map[VALIDATION_ERROR_096005b0]);
1393     }
1394     // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
1395     // Mismatching primitive topology and tessellation fails graphics pipeline creation.
1396     if (has_control && has_eval &&
1397         (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
1398          pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
1399         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1400                         HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005c0, "DS",
1401                         "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA topology for "
1402                         "tessellation pipelines. %s",
1403                         validation_error_map[VALIDATION_ERROR_096005c0]);
1404     }
1405     if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
1406         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
1407         if (!has_control || !has_eval) {
1408             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1409                             HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005c2, "DS",
1410                             "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid "
1411                             "for tessellation pipelines. %s",
1412                             validation_error_map[VALIDATION_ERROR_096005c2]);
1413         }
1414     }
1415 
1416     // If a rasterization state is provided...
1417     if (pPipeline->graphicsPipelineCI.pRasterizationState) {
1418         if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) &&
1419             (!dev_data->enabled_features.depthClamp)) {
1420             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1421                             HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_1020061c, "DS",
1422                             "vkCreateGraphicsPipelines(): the depthClamp device feature is disabled: the depthClampEnable member "
1423                             "of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE. %s",
1424                             validation_error_map[VALIDATION_ERROR_1020061c]);
1425         }
1426 
1427         if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) &&
1428             (pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) &&
1429             (!dev_data->enabled_features.depthBiasClamp)) {
1430             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1431                             HandleToUint64(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_FEATURE, "DS",
1432                             "vkCreateGraphicsPipelines(): the depthBiasClamp device feature is disabled: the depthBiasClamp member "
1433                             "of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the "
1434                             "VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled");
1435         }
1436 
1437         // If rasterization is enabled...
1438         if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) {
1439             if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) &&
1440                 (!dev_data->enabled_features.alphaToOne)) {
1441                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1442                                 HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_10000622, "DS",
1443                                 "vkCreateGraphicsPipelines(): the alphaToOne device feature is disabled: the alphaToOneEnable "
1444                                 "member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE. %s",
1445                                 validation_error_map[VALIDATION_ERROR_10000622]);
1446             }
1447 
1448             // If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure
1449             if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
1450                 subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1451                 if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
1452                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1453                                     HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005e0, "DS",
1454                                     "Invalid Pipeline CreateInfo State: pDepthStencilState is NULL when rasterization is enabled "
1455                                     "and subpass uses a depth/stencil attachment. %s",
1456                                     validation_error_map[VALIDATION_ERROR_096005e0]);
1457 
1458                 } else if ((pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) &&
1459                            (!dev_data->enabled_features.depthBounds)) {
1460                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1461                                     HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_0f6004ac, "DS",
1462                                     "vkCreateGraphicsPipelines(): the depthBounds device feature is disabled: the "
1463                                     "depthBoundsTestEnable member of the VkPipelineDepthStencilStateCreateInfo structure must be "
1464                                     "set to VK_FALSE. %s",
1465                                     validation_error_map[VALIDATION_ERROR_0f6004ac]);
1466                 }
1467             }
1468 
1469             // If subpass uses color attachments, pColorBlendState must be valid pointer
1470             if (subpass_desc) {
1471                 uint32_t color_attachment_count = 0;
1472                 for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
1473                     if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1474                         ++color_attachment_count;
1475                     }
1476                 }
1477                 if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) {
1478                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1479                                     HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_096005e2, "DS",
1480                                     "Invalid Pipeline CreateInfo State: pColorBlendState is NULL when rasterization is enabled and "
1481                                     "subpass uses color attachments. %s",
1482                                     validation_error_map[VALIDATION_ERROR_096005e2]);
1483                 }
1484             }
1485         }
1486     }
1487 
1488     auto vi = pPipeline->graphicsPipelineCI.pVertexInputState;
1489     if (vi != NULL) {
1490         for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) {
1491             VkFormat format = vi->pVertexAttributeDescriptions[j].format;
1492             // Internal call to get format info.  Still goes through layers, could potentially go directly to ICD.
1493             VkFormatProperties properties;
1494             dev_data->instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(dev_data->physical_device, format,
1495                                                                                       &properties);
1496             if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
1497                 skip |=
1498                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1499                             __LINE__, VALIDATION_ERROR_14a004de, "IMAGE",
1500                             "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
1501                             "(%s) is not a supported vertex buffer format. %s",
1502                             pipelineIndex, j, string_VkFormat(format), validation_error_map[VALIDATION_ERROR_14a004de]);
1503             }
1504         }
1505     }
1506 
1507     if (dev_data->extensions.vk_amd_mixed_attachment_samples) {
1508         VkSampleCountFlagBits max_sample_count = static_cast<VkSampleCountFlagBits>(0);
1509         for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
1510             if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1511                 max_sample_count =
1512                     std::max(max_sample_count,
1513                              pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pColorAttachments[i].attachment].samples);
1514             }
1515         }
1516         if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1517             max_sample_count =
1518                 std::max(max_sample_count,
1519                          pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples);
1520         }
1521         if (pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples != max_sample_count) {
1522             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1523                             HandleToUint64(pPipeline->pipeline), __LINE__, VALIDATION_ERROR_09600bc2, "DS",
1524                             "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%s) != max "
1525                             "attachment samples (%s) used in subpass %u. %s",
1526                             pipelineIndex,
1527                             string_VkSampleCountFlagBits(pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples),
1528                             string_VkSampleCountFlagBits(max_sample_count), pPipeline->graphicsPipelineCI.subpass,
1529                             validation_error_map[VALIDATION_ERROR_09600bc2]);
1530         }
1531     }
1532 
1533     return skip;
1534 }
1535 
1536 // Block of code at start here specifically for managing/tracking DSs
1537 
1538 // Return Pool node ptr for specified pool or else NULL
GetDescriptorPoolState(const layer_data * dev_data,const VkDescriptorPool pool)1539 DESCRIPTOR_POOL_STATE *GetDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
1540     auto pool_it = dev_data->descriptorPoolMap.find(pool);
1541     if (pool_it == dev_data->descriptorPoolMap.end()) {
1542         return NULL;
1543     }
1544     return pool_it->second;
1545 }
1546 
1547 // Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
1548 // func_str is the name of the calling function
1549 // Return false if no errors occur
1550 // Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
validateIdleDescriptorSet(const layer_data * dev_data,VkDescriptorSet set,std::string func_str)1551 static bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
1552     if (dev_data->instance_data->disabled.idle_descriptor_set) return false;
1553     bool skip = false;
1554     auto set_node = dev_data->setMap.find(set);
1555     if (set_node == dev_data->setMap.end()) {
1556         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1557                         HandleToUint64(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
1558                         "Cannot call %s() on descriptor set 0x%" PRIx64 " that has not been allocated.", func_str.c_str(),
1559                         HandleToUint64(set));
1560     } else {
1561         // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
1562         if (set_node->second->in_use.load()) {
1563             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1564                             HandleToUint64(set), __LINE__, VALIDATION_ERROR_2860026a, "DS",
1565                             "Cannot call %s() on descriptor set 0x%" PRIx64 " that is in use by a command buffer. %s",
1566                             func_str.c_str(), HandleToUint64(set), validation_error_map[VALIDATION_ERROR_2860026a]);
1567         }
1568     }
1569     return skip;
1570 }
1571 
1572 // Remove set from setMap and delete the set
freeDescriptorSet(layer_data * dev_data,cvdescriptorset::DescriptorSet * descriptor_set)1573 static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
1574     dev_data->setMap.erase(descriptor_set->GetSet());
1575     delete descriptor_set;
1576 }
1577 // Free all DS Pools including their Sets & related sub-structs
1578 // NOTE : Calls to this function should be wrapped in mutex
deletePools(layer_data * dev_data)1579 static void deletePools(layer_data *dev_data) {
1580     for (auto ii = dev_data->descriptorPoolMap.begin(); ii != dev_data->descriptorPoolMap.end();) {
1581         // Remove this pools' sets from setMap and delete them
1582         for (auto ds : ii->second->sets) {
1583             freeDescriptorSet(dev_data, ds);
1584         }
1585         ii->second->sets.clear();
1586         delete ii->second;
1587         ii = dev_data->descriptorPoolMap.erase(ii);
1588     }
1589 }
1590 
clearDescriptorPool(layer_data * dev_data,const VkDevice device,const VkDescriptorPool pool,VkDescriptorPoolResetFlags flags)1591 static void clearDescriptorPool(layer_data *dev_data, const VkDevice device, const VkDescriptorPool pool,
1592                                 VkDescriptorPoolResetFlags flags) {
1593     DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(dev_data, pool);
1594     // TODO: validate flags
1595     // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
1596     for (auto ds : pPool->sets) {
1597         freeDescriptorSet(dev_data, ds);
1598     }
1599     pPool->sets.clear();
1600     // Reset available count for each type and available sets for this pool
1601     for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
1602         pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
1603     }
1604     pPool->availableSets = pPool->maxSets;
1605 }
1606 
1607 // For given CB object, fetch associated CB Node from map
GetCBNode(layer_data const * dev_data,const VkCommandBuffer cb)1608 GLOBAL_CB_NODE *GetCBNode(layer_data const *dev_data, const VkCommandBuffer cb) {
1609     auto it = dev_data->commandBufferMap.find(cb);
1610     if (it == dev_data->commandBufferMap.end()) {
1611         return NULL;
1612     }
1613     return it->second;
1614 }
1615 
1616 // If a renderpass is active, verify that the given command type is appropriate for current subpass state
ValidateCmdSubpassState(const layer_data * dev_data,const GLOBAL_CB_NODE * pCB,const CMD_TYPE cmd_type)1617 bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
1618     if (!pCB->activeRenderPass) return false;
1619     bool skip = false;
1620     if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
1621         (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
1622         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1623                         HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
1624                         "Commands cannot be called in a subpass using secondary command buffers.");
1625     } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
1626         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1627                         HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
1628                         "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
1629     }
1630     return skip;
1631 }
1632 
ValidateCmdQueueFlags(layer_data * dev_data,const GLOBAL_CB_NODE * cb_node,const char * caller_name,VkQueueFlags required_flags,UNIQUE_VALIDATION_ERROR_CODE error_code)1633 bool ValidateCmdQueueFlags(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *caller_name,
1634                            VkQueueFlags required_flags, UNIQUE_VALIDATION_ERROR_CODE error_code) {
1635     auto pool = GetCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
1636     if (pool) {
1637         VkQueueFlags queue_flags = dev_data->phys_dev_properties.queue_family_properties[pool->queueFamilyIndex].queueFlags;
1638         if (!(required_flags & queue_flags)) {
1639             string required_flags_string;
1640             for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT}) {
1641                 if (flag & required_flags) {
1642                     if (required_flags_string.size()) {
1643                         required_flags_string += " or ";
1644                     }
1645                     required_flags_string += string_VkQueueFlagBits(flag);
1646                 }
1647             }
1648             return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1649                            HandleToUint64(cb_node->commandBuffer), __LINE__, error_code, "DS",
1650                            "Cannot call %s on a command buffer allocated from a pool without %s capabilities. %s.", caller_name,
1651                            required_flags_string.c_str(), validation_error_map[error_code]);
1652         }
1653     }
1654     return false;
1655 }
1656 
GetCauseStr(VK_OBJECT obj)1657 static char const *GetCauseStr(VK_OBJECT obj) {
1658     if (obj.type == kVulkanObjectTypeDescriptorSet) return "destroyed or updated";
1659     if (obj.type == kVulkanObjectTypeCommandBuffer) return "destroyed or rerecorded";
1660     return "destroyed";
1661 }
1662 
ReportInvalidCommandBuffer(layer_data * dev_data,const GLOBAL_CB_NODE * cb_state,const char * call_source)1663 static bool ReportInvalidCommandBuffer(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const char *call_source) {
1664     bool skip = false;
1665     for (auto obj : cb_state->broken_bindings) {
1666         const char *type_str = object_string[obj.type];
1667         const char *cause_str = GetCauseStr(obj);
1668         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1669                         HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
1670                         "You are adding %s to command buffer 0x%" PRIx64 " that is invalid because bound %s 0x%" PRIx64 " was %s.",
1671                         call_source, HandleToUint64(cb_state->commandBuffer), type_str, obj.handle, cause_str);
1672     }
1673     return skip;
1674 }
1675 
1676 // 'commandBuffer must be in the recording state' valid usage error code for each command
1677 // Note: grepping for ^^^^^^^^^ in vk_validation_database is easily massaged into the following list
1678 // Note: C++11 doesn't automatically devolve enum types to the underlying type for hash traits purposes (fixed in C++14)
1679 using CmdTypeHashType = std::underlying_type<CMD_TYPE>::type;
1680 static const std::unordered_map<CmdTypeHashType, UNIQUE_VALIDATION_ERROR_CODE> must_be_recording_map = {
1681     {CMD_NONE, VALIDATION_ERROR_UNDEFINED},  // UNMATCHED
1682     {CMD_BEGINQUERY, VALIDATION_ERROR_17802413},
1683     {CMD_BEGINRENDERPASS, VALIDATION_ERROR_17a02413},
1684     {CMD_BINDDESCRIPTORSETS, VALIDATION_ERROR_17c02413},
1685     {CMD_BINDINDEXBUFFER, VALIDATION_ERROR_17e02413},
1686     {CMD_BINDPIPELINE, VALIDATION_ERROR_18002413},
1687     {CMD_BINDVERTEXBUFFERS, VALIDATION_ERROR_18202413},
1688     {CMD_BLITIMAGE, VALIDATION_ERROR_18402413},
1689     {CMD_CLEARATTACHMENTS, VALIDATION_ERROR_18602413},
1690     {CMD_CLEARCOLORIMAGE, VALIDATION_ERROR_18802413},
1691     {CMD_CLEARDEPTHSTENCILIMAGE, VALIDATION_ERROR_18a02413},
1692     {CMD_COPYBUFFER, VALIDATION_ERROR_18c02413},
1693     {CMD_COPYBUFFERTOIMAGE, VALIDATION_ERROR_18e02413},
1694     {CMD_COPYIMAGE, VALIDATION_ERROR_19002413},
1695     {CMD_COPYIMAGETOBUFFER, VALIDATION_ERROR_19202413},
1696     {CMD_COPYQUERYPOOLRESULTS, VALIDATION_ERROR_19402413},
1697     {CMD_DEBUGMARKERBEGINEXT, VALIDATION_ERROR_19602413},
1698     {CMD_DEBUGMARKERENDEXT, VALIDATION_ERROR_19802413},
1699     {CMD_DEBUGMARKERINSERTEXT, VALIDATION_ERROR_19a02413},
1700     {CMD_DISPATCH, VALIDATION_ERROR_19c02413},
1701     // Exclude KHX (if not already present) { CMD_DISPATCHBASEKHX, VALIDATION_ERROR_19e02413 },
1702     {CMD_DISPATCHINDIRECT, VALIDATION_ERROR_1a002413},
1703     {CMD_DRAW, VALIDATION_ERROR_1a202413},
1704     {CMD_DRAWINDEXED, VALIDATION_ERROR_1a402413},
1705     {CMD_DRAWINDEXEDINDIRECT, VALIDATION_ERROR_1a602413},
1706     // Exclude vendor ext (if not already present) { CMD_DRAWINDEXEDINDIRECTCOUNTAMD, VALIDATION_ERROR_1a802413 },
1707     {CMD_DRAWINDIRECT, VALIDATION_ERROR_1aa02413},
1708     // Exclude vendor ext (if not already present) { CMD_DRAWINDIRECTCOUNTAMD, VALIDATION_ERROR_1ac02413 },
1709     {CMD_ENDCOMMANDBUFFER, VALIDATION_ERROR_27400076},
1710     {CMD_ENDQUERY, VALIDATION_ERROR_1ae02413},
1711     {CMD_ENDRENDERPASS, VALIDATION_ERROR_1b002413},
1712     {CMD_EXECUTECOMMANDS, VALIDATION_ERROR_1b202413},
1713     {CMD_FILLBUFFER, VALIDATION_ERROR_1b402413},
1714     {CMD_NEXTSUBPASS, VALIDATION_ERROR_1b602413},
1715     {CMD_PIPELINEBARRIER, VALIDATION_ERROR_1b802413},
1716     // Exclude vendor ext (if not already present) { CMD_PROCESSCOMMANDSNVX, VALIDATION_ERROR_1ba02413 },
1717     {CMD_PUSHCONSTANTS, VALIDATION_ERROR_1bc02413},
1718     {CMD_PUSHDESCRIPTORSETKHR, VALIDATION_ERROR_1be02413},
1719     {CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, VALIDATION_ERROR_1c002413},
1720     // Exclude vendor ext (if not already present) { CMD_RESERVESPACEFORCOMMANDSNVX, VALIDATION_ERROR_1c202413 },
1721     {CMD_RESETEVENT, VALIDATION_ERROR_1c402413},
1722     {CMD_RESETQUERYPOOL, VALIDATION_ERROR_1c602413},
1723     {CMD_RESOLVEIMAGE, VALIDATION_ERROR_1c802413},
1724     {CMD_SETBLENDCONSTANTS, VALIDATION_ERROR_1ca02413},
1725     {CMD_SETDEPTHBIAS, VALIDATION_ERROR_1cc02413},
1726     {CMD_SETDEPTHBOUNDS, VALIDATION_ERROR_1ce02413},
1727     // Exclude KHX (if not already present) { CMD_SETDEVICEMASKKHX, VALIDATION_ERROR_1d002413 },
1728     {CMD_SETDISCARDRECTANGLEEXT, VALIDATION_ERROR_1d202413},
1729     {CMD_SETEVENT, VALIDATION_ERROR_1d402413},
1730     {CMD_SETLINEWIDTH, VALIDATION_ERROR_1d602413},
1731     {CMD_SETSAMPLELOCATIONSEXT, VALIDATION_ERROR_3e202413},
1732     {CMD_SETSCISSOR, VALIDATION_ERROR_1d802413},
1733     {CMD_SETSTENCILCOMPAREMASK, VALIDATION_ERROR_1da02413},
1734     {CMD_SETSTENCILREFERENCE, VALIDATION_ERROR_1dc02413},
1735     {CMD_SETSTENCILWRITEMASK, VALIDATION_ERROR_1de02413},
1736     {CMD_SETVIEWPORT, VALIDATION_ERROR_1e002413},
1737     // Exclude vendor ext (if not already present) { CMD_SETVIEWPORTWSCALINGNV, VALIDATION_ERROR_1e202413 },
1738     {CMD_UPDATEBUFFER, VALIDATION_ERROR_1e402413},
1739     {CMD_WAITEVENTS, VALIDATION_ERROR_1e602413},
1740     {CMD_WRITETIMESTAMP, VALIDATION_ERROR_1e802413},
1741 };
1742 
1743 // Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if
1744 // there's an issue with the Cmd ordering
ValidateCmd(layer_data * dev_data,const GLOBAL_CB_NODE * cb_state,const CMD_TYPE cmd,const char * caller_name)1745 bool ValidateCmd(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd, const char *caller_name) {
1746     switch (cb_state->state) {
1747         case CB_RECORDING:
1748             return ValidateCmdSubpassState(dev_data, cb_state, cmd);
1749 
1750         case CB_INVALID_COMPLETE:
1751         case CB_INVALID_INCOMPLETE:
1752             return ReportInvalidCommandBuffer(dev_data, cb_state, caller_name);
1753 
1754         default:
1755             auto error_it = must_be_recording_map.find(cmd);
1756             // This assert lets us know that a vkCmd.* entrypoint has been added without enabling it in the map
1757             assert(error_it != must_be_recording_map.cend());
1758             if (error_it == must_be_recording_map.cend()) {
1759                 error_it = must_be_recording_map.find(CMD_NONE);  // But we'll handle the asserting case, in case of a test gap
1760             }
1761             const auto error = error_it->second;
1762             return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1763                            HandleToUint64(cb_state->commandBuffer), __LINE__, error, "DS",
1764                            "You must call vkBeginCommandBuffer() before this call to %s. %s", caller_name,
1765                            validation_error_map[error]);
1766     }
1767 }
1768 
1769 // For given object struct return a ptr of BASE_NODE type for its wrapping struct
GetStateStructPtrFromObject(layer_data * dev_data,VK_OBJECT object_struct)1770 BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
1771     BASE_NODE *base_ptr = nullptr;
1772     switch (object_struct.type) {
1773         case kVulkanObjectTypeDescriptorSet: {
1774             base_ptr = GetSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
1775             break;
1776         }
1777         case kVulkanObjectTypeSampler: {
1778             base_ptr = GetSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
1779             break;
1780         }
1781         case kVulkanObjectTypeQueryPool: {
1782             base_ptr = GetQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
1783             break;
1784         }
1785         case kVulkanObjectTypePipeline: {
1786             base_ptr = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
1787             break;
1788         }
1789         case kVulkanObjectTypeBuffer: {
1790             base_ptr = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
1791             break;
1792         }
1793         case kVulkanObjectTypeBufferView: {
1794             base_ptr = GetBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
1795             break;
1796         }
1797         case kVulkanObjectTypeImage: {
1798             base_ptr = GetImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
1799             break;
1800         }
1801         case kVulkanObjectTypeImageView: {
1802             base_ptr = GetImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
1803             break;
1804         }
1805         case kVulkanObjectTypeEvent: {
1806             base_ptr = GetEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
1807             break;
1808         }
1809         case kVulkanObjectTypeDescriptorPool: {
1810             base_ptr = GetDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
1811             break;
1812         }
1813         case kVulkanObjectTypeCommandPool: {
1814             base_ptr = GetCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
1815             break;
1816         }
1817         case kVulkanObjectTypeFramebuffer: {
1818             base_ptr = GetFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
1819             break;
1820         }
1821         case kVulkanObjectTypeRenderPass: {
1822             base_ptr = GetRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
1823             break;
1824         }
1825         case kVulkanObjectTypeDeviceMemory: {
1826             base_ptr = GetMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
1827             break;
1828         }
1829         default:
1830             // TODO : Any other objects to be handled here?
1831             assert(0);
1832             break;
1833     }
1834     return base_ptr;
1835 }
1836 
1837 // Tie the VK_OBJECT to the cmd buffer which includes:
1838 //  Add object_binding to cmd buffer
1839 //  Add cb_binding to object
addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE * > * cb_bindings,VK_OBJECT obj,GLOBAL_CB_NODE * cb_node)1840 static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
1841     cb_bindings->insert(cb_node);
1842     cb_node->object_bindings.insert(obj);
1843 }
1844 // For a given object, if cb_node is in that objects cb_bindings, remove cb_node
removeCommandBufferBinding(layer_data * dev_data,VK_OBJECT const * object,GLOBAL_CB_NODE * cb_node)1845 static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
1846     BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
1847     if (base_obj) base_obj->cb_bindings.erase(cb_node);
1848 }
1849 // Reset the command buffer state
1850 //  Maintain the createInfo and set state to CB_NEW, but clear all other state
ResetCommandBufferState(layer_data * dev_data,const VkCommandBuffer cb)1851 static void ResetCommandBufferState(layer_data *dev_data, const VkCommandBuffer cb) {
1852     GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
1853     if (pCB) {
1854         pCB->in_use.store(0);
1855         // Reset CB state (note that createInfo is not cleared)
1856         pCB->commandBuffer = cb;
1857         memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1858         memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
1859         pCB->hasDrawCmd = false;
1860         pCB->state = CB_NEW;
1861         pCB->submitCount = 0;
1862         pCB->image_layout_change_count = 1;  // Start at 1. 0 is insert value for validation cache versions, s.t. new == dirty
1863         pCB->status = 0;
1864         pCB->static_status = 0;
1865         pCB->viewportMask = 0;
1866         pCB->scissorMask = 0;
1867 
1868         for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
1869             pCB->lastBound[i].reset();
1870         }
1871 
1872         memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
1873         pCB->activeRenderPass = nullptr;
1874         pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
1875         pCB->activeSubpass = 0;
1876         pCB->broken_bindings.clear();
1877         pCB->waitedEvents.clear();
1878         pCB->events.clear();
1879         pCB->writeEventsBeforeWait.clear();
1880         pCB->waitedEventsBeforeQueryReset.clear();
1881         pCB->queryToStateMap.clear();
1882         pCB->activeQueries.clear();
1883         pCB->startedQueries.clear();
1884         pCB->imageLayoutMap.clear();
1885         pCB->eventToStageMap.clear();
1886         pCB->drawData.clear();
1887         pCB->currentDrawData.buffers.clear();
1888         pCB->vertex_buffer_used = false;
1889         pCB->primaryCommandBuffer = VK_NULL_HANDLE;
1890         // If secondary, invalidate any primary command buffer that may call us.
1891         if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
1892             invalidateCommandBuffers(dev_data, pCB->linkedCommandBuffers, {HandleToUint64(cb), kVulkanObjectTypeCommandBuffer});
1893         }
1894 
1895         // Remove reverse command buffer links.
1896         for (auto pSubCB : pCB->linkedCommandBuffers) {
1897             pSubCB->linkedCommandBuffers.erase(pCB);
1898         }
1899         pCB->linkedCommandBuffers.clear();
1900         pCB->updateImages.clear();
1901         pCB->updateBuffers.clear();
1902         clear_cmd_buf_and_mem_references(dev_data, pCB);
1903         pCB->queue_submit_functions.clear();
1904         pCB->cmd_execute_commands_functions.clear();
1905         pCB->eventUpdates.clear();
1906         pCB->queryUpdates.clear();
1907 
1908         // Remove object bindings
1909         for (auto obj : pCB->object_bindings) {
1910             removeCommandBufferBinding(dev_data, &obj, pCB);
1911         }
1912         pCB->object_bindings.clear();
1913         // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
1914         for (auto framebuffer : pCB->framebuffers) {
1915             auto fb_state = GetFramebufferState(dev_data, framebuffer);
1916             if (fb_state) fb_state->cb_bindings.erase(pCB);
1917         }
1918         pCB->framebuffers.clear();
1919         pCB->activeFramebuffer = VK_NULL_HANDLE;
1920     }
1921 }
1922 
MakeStaticStateMask(VkPipelineDynamicStateCreateInfo const * ds)1923 CBStatusFlags MakeStaticStateMask(VkPipelineDynamicStateCreateInfo const *ds) {
1924     // initially assume everything is static state
1925     CBStatusFlags flags = CBSTATUS_ALL_STATE_SET;
1926 
1927     if (ds) {
1928         for (uint32_t i = 0; i < ds->dynamicStateCount; i++) {
1929             switch (ds->pDynamicStates[i]) {
1930                 case VK_DYNAMIC_STATE_LINE_WIDTH:
1931                     flags &= ~CBSTATUS_LINE_WIDTH_SET;
1932                     break;
1933                 case VK_DYNAMIC_STATE_DEPTH_BIAS:
1934                     flags &= ~CBSTATUS_DEPTH_BIAS_SET;
1935                     break;
1936                 case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
1937                     flags &= ~CBSTATUS_BLEND_CONSTANTS_SET;
1938                     break;
1939                 case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
1940                     flags &= ~CBSTATUS_DEPTH_BOUNDS_SET;
1941                     break;
1942                 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
1943                     flags &= ~CBSTATUS_STENCIL_READ_MASK_SET;
1944                     break;
1945                 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
1946                     flags &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
1947                     break;
1948                 case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
1949                     flags &= ~CBSTATUS_STENCIL_REFERENCE_SET;
1950                     break;
1951                 case VK_DYNAMIC_STATE_SCISSOR:
1952                     flags &= ~CBSTATUS_SCISSOR_SET;
1953                     break;
1954                 case VK_DYNAMIC_STATE_VIEWPORT:
1955                     flags &= ~CBSTATUS_VIEWPORT_SET;
1956                     break;
1957                 default:
1958                     break;
1959             }
1960         }
1961     }
1962 
1963     return flags;
1964 }
1965 
1966 // Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
1967 // render pass.
insideRenderPass(const layer_data * dev_data,const GLOBAL_CB_NODE * pCB,const char * apiName,UNIQUE_VALIDATION_ERROR_CODE msgCode)1968 bool insideRenderPass(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const char *apiName,
1969                       UNIQUE_VALIDATION_ERROR_CODE msgCode) {
1970     bool inside = false;
1971     if (pCB->activeRenderPass) {
1972         inside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1973                          HandleToUint64(pCB->commandBuffer), __LINE__, msgCode, "DS",
1974                          "%s: It is invalid to issue this call inside an active render pass (0x%" PRIx64 "). %s", apiName,
1975                          HandleToUint64(pCB->activeRenderPass->renderPass), validation_error_map[msgCode]);
1976     }
1977     return inside;
1978 }
1979 
1980 // Flags validation error if the associated call is made outside a render pass. The apiName
1981 // routine should ONLY be called inside a render pass.
outsideRenderPass(const layer_data * dev_data,GLOBAL_CB_NODE * pCB,const char * apiName,UNIQUE_VALIDATION_ERROR_CODE msgCode)1982 bool outsideRenderPass(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *apiName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
1983     bool outside = false;
1984     if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
1985         ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
1986          !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
1987         outside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1988                           HandleToUint64(pCB->commandBuffer), __LINE__, msgCode, "DS",
1989                           "%s: This call must be issued inside an active render pass. %s", apiName, validation_error_map[msgCode]);
1990     }
1991     return outside;
1992 }
1993 
init_core_validation(instance_layer_data * instance_data,const VkAllocationCallbacks * pAllocator)1994 static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
1995     layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
1996 }
1997 
1998 // For the given ValidationCheck enum, set all relevant instance disabled flags to true
SetDisabledFlags(instance_layer_data * instance_data,const VkValidationFlagsEXT * val_flags_struct)1999 void SetDisabledFlags(instance_layer_data *instance_data, const VkValidationFlagsEXT *val_flags_struct) {
2000     for (uint32_t i = 0; i < val_flags_struct->disabledValidationCheckCount; ++i) {
2001         switch (val_flags_struct->pDisabledValidationChecks[i]) {
2002             case VK_VALIDATION_CHECK_SHADERS_EXT:
2003                 instance_data->disabled.shader_validation = true;
2004                 break;
2005             case VK_VALIDATION_CHECK_ALL_EXT:
2006                 // Set all disabled flags to true
2007                 instance_data->disabled.SetAll(true);
2008                 break;
2009             default:
2010                 break;
2011         }
2012     }
2013 }
2014 
CreateInstance(const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkInstance * pInstance)2015 VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
2016                                               VkInstance *pInstance) {
2017     VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
2018 
2019     assert(chain_info->u.pLayerInfo);
2020     PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
2021     PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
2022     if (fpCreateInstance == NULL) return VK_ERROR_INITIALIZATION_FAILED;
2023 
2024     // Advance the link info for the next element on the chain
2025     chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
2026 
2027     VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
2028     if (result != VK_SUCCESS) return result;
2029 
2030     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), instance_layer_data_map);
2031     instance_data->instance = *pInstance;
2032     layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
2033     instance_data->report_data = debug_report_create_instance(
2034         &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
2035     instance_data->extensions.InitFromInstanceCreateInfo(pCreateInfo);
2036     init_core_validation(instance_data, pAllocator);
2037 
2038     ValidateLayerOrdering(*pCreateInfo);
2039     // Parse any pNext chains
2040     const auto *validation_flags_ext = lvl_find_in_chain<VkValidationFlagsEXT>(pCreateInfo->pNext);
2041     if (validation_flags_ext) {
2042         SetDisabledFlags(instance_data, validation_flags_ext);
2043     }
2044 
2045     return result;
2046 }
2047 
2048 // Hook DestroyInstance to remove tableInstanceMap entry
DestroyInstance(VkInstance instance,const VkAllocationCallbacks * pAllocator)2049 VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
2050     // TODOSC : Shouldn't need any customization here
2051     dispatch_key key = get_dispatch_key(instance);
2052     // TBD: Need any locking this early, in case this function is called at the
2053     // same time by more than one thread?
2054     instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map);
2055     instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
2056 
2057     lock_guard_t lock(global_lock);
2058     // Clean up logging callback, if any
2059     while (instance_data->logging_callback.size() > 0) {
2060         VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
2061         layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
2062         instance_data->logging_callback.pop_back();
2063     }
2064 
2065     layer_debug_report_destroy_instance(instance_data->report_data);
2066     FreeLayerDataPtr(key, instance_layer_data_map);
2067 }
2068 
ValidatePhysicalDeviceQueueFamily(instance_layer_data * instance_data,const PHYSICAL_DEVICE_STATE * pd_state,uint32_t requested_queue_family,int32_t err_code,const char * cmd_name,const char * queue_family_var_name,const char * vu_note=nullptr)2069 static bool ValidatePhysicalDeviceQueueFamily(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
2070                                               uint32_t requested_queue_family, int32_t err_code, const char *cmd_name,
2071                                               const char *queue_family_var_name, const char *vu_note = nullptr) {
2072     bool skip = false;
2073 
2074     if (!vu_note) vu_note = validation_error_map[err_code];
2075 
2076     const char *conditional_ext_cmd =
2077         instance_data->extensions.vk_khr_get_physical_device_properties_2 ? "or vkGetPhysicalDeviceQueueFamilyProperties2KHR" : "";
2078 
2079     std::string count_note = (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
2080                                  ? "the pQueueFamilyPropertyCount was never obtained"
2081                                  : "i.e. is not less than " + std::to_string(pd_state->queue_family_count);
2082 
2083     if (requested_queue_family >= pd_state->queue_family_count) {
2084         skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2085                         HandleToUint64(pd_state->phys_device), __LINE__, err_code, "DL",
2086                         "%s: %s (= %" PRIu32
2087                         ") is not less than any previously obtained pQueueFamilyPropertyCount from "
2088                         "vkGetPhysicalDeviceQueueFamilyProperties%s (%s). %s",
2089                         cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd, count_note.c_str(), vu_note);
2090     }
2091     return skip;
2092 }
2093 
2094 // Verify VkDeviceQueueCreateInfos
ValidateDeviceQueueCreateInfos(instance_layer_data * instance_data,const PHYSICAL_DEVICE_STATE * pd_state,uint32_t info_count,const VkDeviceQueueCreateInfo * infos)2095 static bool ValidateDeviceQueueCreateInfos(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
2096                                            uint32_t info_count, const VkDeviceQueueCreateInfo *infos) {
2097     bool skip = false;
2098 
2099     for (uint32_t i = 0; i < info_count; ++i) {
2100         const auto requested_queue_family = infos[i].queueFamilyIndex;
2101 
2102         // Verify that requested queue family is known to be valid at this point in time
2103         std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex";
2104         skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, requested_queue_family, VALIDATION_ERROR_06c002fa,
2105                                                   "vkCreateDevice", queue_family_var_name.c_str());
2106 
2107         // Verify that requested  queue count of queue family is known to be valid at this point in time
2108         if (requested_queue_family < pd_state->queue_family_count) {
2109             const auto requested_queue_count = infos[i].queueCount;
2110             const auto queue_family_props_count = pd_state->queue_family_properties.size();
2111             const bool queue_family_has_props = requested_queue_family < queue_family_props_count;
2112             const char *conditional_ext_cmd = instance_data->extensions.vk_khr_get_physical_device_properties_2
2113                                                   ? "or vkGetPhysicalDeviceQueueFamilyProperties2KHR"
2114                                                   : "";
2115             std::string count_note =
2116                 !queue_family_has_props
2117                     ? "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained"
2118                     : "i.e. is not less than or equal to " +
2119                           std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount);
2120 
2121             if (!queue_family_has_props ||
2122                 requested_queue_count > pd_state->queue_family_properties[requested_queue_family].queueCount) {
2123                 skip |= log_msg(
2124                     instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2125                     HandleToUint64(pd_state->phys_device), __LINE__, VALIDATION_ERROR_06c002fc, "DL",
2126                     "vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32
2127                     ") is not less than or equal to available queue count for this pCreateInfo->pQueueCreateInfos[%" PRIu32
2128                     "].queueFamilyIndex} (=%" PRIu32
2129                     ") obtained previously from vkGetPhysicalDeviceQueueFamilyProperties%s (%s). %s",
2130                     i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str(),
2131                     validation_error_map[VALIDATION_ERROR_06c002fc]);
2132             }
2133         }
2134     }
2135 
2136     return skip;
2137 }
2138 
2139 // Verify that features have been queried and that they are available
ValidateRequestedFeatures(instance_layer_data * instance_data,const PHYSICAL_DEVICE_STATE * pd_state,const VkPhysicalDeviceFeatures * requested_features)2140 static bool ValidateRequestedFeatures(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
2141                                       const VkPhysicalDeviceFeatures *requested_features) {
2142     bool skip = false;
2143 
2144     const VkBool32 *actual = reinterpret_cast<const VkBool32 *>(&pd_state->features);
2145     const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
2146     // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
2147     //  Need to provide the struct member name with the issue. To do that seems like we'll
2148     //  have to loop through each struct member which should be done w/ codegen to keep in synch.
2149     uint32_t errors = 0;
2150     uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
2151     for (uint32_t i = 0; i < total_bools; i++) {
2152         if (requested[i] > actual[i]) {
2153             skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2154                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL",
2155                             "While calling vkCreateDevice(), requesting feature '%s' in VkPhysicalDeviceFeatures struct, which is "
2156                             "not available on this device.",
2157                             GetPhysDevFeatureString(i));
2158             errors++;
2159         }
2160     }
2161     if (errors && (UNCALLED == pd_state->vkGetPhysicalDeviceFeaturesState)) {
2162         // If user didn't request features, notify them that they should
2163         // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
2164         skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2165                         0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL",
2166                         "You requested features that are unavailable on this device. You should first query feature availability "
2167                         "by calling vkGetPhysicalDeviceFeatures().");
2168     }
2169     return skip;
2170 }
2171 
CreateDevice(VkPhysicalDevice gpu,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice)2172 VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
2173                                             const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
2174     bool skip = false;
2175     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map);
2176 
2177     unique_lock_t lock(global_lock);
2178     auto pd_state = GetPhysicalDeviceState(instance_data, gpu);
2179 
2180     // TODO: object_tracker should perhaps do this instead
2181     //       and it does not seem to currently work anyway -- the loader just crashes before this point
2182     if (!GetPhysicalDeviceState(instance_data, gpu)) {
2183         skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2184                         0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
2185                         "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
2186     }
2187 
2188     // Check that any requested features are available
2189     // The enabled features can come from either pEnabledFeatures, or from the pNext chain
2190     const VkPhysicalDeviceFeatures *enabled_features_found = pCreateInfo->pEnabledFeatures;
2191     if (nullptr == enabled_features_found) {
2192         const auto *features2 = lvl_find_in_chain<VkPhysicalDeviceFeatures2KHR>(pCreateInfo->pNext);
2193         if (features2) {
2194             enabled_features_found = &(features2->features);
2195         }
2196     }
2197 
2198     if (enabled_features_found) {
2199         skip |= ValidateRequestedFeatures(instance_data, pd_state, enabled_features_found);
2200     }
2201 
2202     skip |=
2203         ValidateDeviceQueueCreateInfos(instance_data, pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos);
2204 
2205     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2206 
2207     VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
2208 
2209     assert(chain_info->u.pLayerInfo);
2210     PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
2211     PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
2212     PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(instance_data->instance, "vkCreateDevice");
2213     if (fpCreateDevice == NULL) {
2214         return VK_ERROR_INITIALIZATION_FAILED;
2215     }
2216 
2217     // Advance the link info for the next element on the chain
2218     chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
2219 
2220     lock.unlock();
2221 
2222     VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
2223     if (result != VK_SUCCESS) {
2224         return result;
2225     }
2226 
2227     lock.lock();
2228     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
2229 
2230     device_data->instance_data = instance_data;
2231     // Setup device dispatch table
2232     layer_init_device_dispatch_table(*pDevice, &device_data->dispatch_table, fpGetDeviceProcAddr);
2233     device_data->device = *pDevice;
2234     // Save PhysicalDevice handle
2235     device_data->physical_device = gpu;
2236 
2237     device_data->report_data = layer_debug_report_create_device(instance_data->report_data, *pDevice);
2238     device_data->extensions.InitFromDeviceCreateInfo(&instance_data->extensions, pCreateInfo);
2239 
2240     // Get physical device limits for this device
2241     instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(device_data->phys_dev_properties.properties));
2242     uint32_t count;
2243     instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
2244     device_data->phys_dev_properties.queue_family_properties.resize(count);
2245     instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
2246         gpu, &count, &device_data->phys_dev_properties.queue_family_properties[0]);
2247     // TODO: device limits should make sure these are compatible
2248     if (enabled_features_found) {
2249         device_data->enabled_features = *enabled_features_found;
2250     } else {
2251         memset(&device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
2252     }
2253     // Store physical device properties and physical device mem limits into device layer_data structs
2254     instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &device_data->phys_dev_mem_props);
2255     instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &device_data->phys_dev_props);
2256 
2257     if (device_data->extensions.vk_khr_push_descriptor) {
2258         // Get the needed push_descriptor limits
2259         auto push_descriptor_prop = lvl_init_struct<VkPhysicalDevicePushDescriptorPropertiesKHR>();
2260         auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&push_descriptor_prop);
2261         instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
2262         device_data->phys_dev_ext_props.max_push_descriptors = push_descriptor_prop.maxPushDescriptors;
2263     }
2264 
2265     lock.unlock();
2266 
2267     ValidateLayerOrdering(*pCreateInfo);
2268 
2269     return result;
2270 }
2271 
2272 // prototype
DestroyDevice(VkDevice device,const VkAllocationCallbacks * pAllocator)2273 VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
2274     // TODOSC : Shouldn't need any customization here
2275     dispatch_key key = get_dispatch_key(device);
2276     layer_data *dev_data = GetLayerDataPtr(key, layer_data_map);
2277     // Free all the memory
2278     unique_lock_t lock(global_lock);
2279     dev_data->pipelineMap.clear();
2280     dev_data->renderPassMap.clear();
2281     for (auto ii = dev_data->commandBufferMap.begin(); ii != dev_data->commandBufferMap.end(); ++ii) {
2282         delete (*ii).second;
2283     }
2284     dev_data->commandBufferMap.clear();
2285     // This will also delete all sets in the pool & remove them from setMap
2286     deletePools(dev_data);
2287     // All sets should be removed
2288     assert(dev_data->setMap.empty());
2289     dev_data->descriptorSetLayoutMap.clear();
2290     dev_data->imageViewMap.clear();
2291     dev_data->imageMap.clear();
2292     dev_data->imageSubresourceMap.clear();
2293     dev_data->imageLayoutMap.clear();
2294     dev_data->bufferViewMap.clear();
2295     dev_data->bufferMap.clear();
2296     // Queues persist until device is destroyed
2297     dev_data->queueMap.clear();
2298     // Report any memory leaks
2299     layer_debug_report_destroy_device(device);
2300     lock.unlock();
2301 
2302 #if DISPATCH_MAP_DEBUG
2303     fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
2304 #endif
2305 
2306     dev_data->dispatch_table.DestroyDevice(device, pAllocator);
2307     FreeLayerDataPtr(key, layer_data_map);
2308 }
2309 
2310 static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
2311 
2312 // For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id
2313 //   and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id
ValidateStageMaskGsTsEnables(layer_data * dev_data,VkPipelineStageFlags stageMask,const char * caller,UNIQUE_VALIDATION_ERROR_CODE geo_error_id,UNIQUE_VALIDATION_ERROR_CODE tess_error_id)2314 static bool ValidateStageMaskGsTsEnables(layer_data *dev_data, VkPipelineStageFlags stageMask, const char *caller,
2315                                          UNIQUE_VALIDATION_ERROR_CODE geo_error_id, UNIQUE_VALIDATION_ERROR_CODE tess_error_id) {
2316     bool skip = false;
2317     if (!dev_data->enabled_features.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
2318         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
2319                         geo_error_id, "DL",
2320                         "%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when device does not have "
2321                         "geometryShader feature enabled. %s",
2322                         caller, validation_error_map[geo_error_id]);
2323     }
2324     if (!dev_data->enabled_features.tessellationShader &&
2325         (stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) {
2326         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
2327                         tess_error_id, "DL",
2328                         "%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT and/or "
2329                         "VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device does not have "
2330                         "tessellationShader feature enabled. %s",
2331                         caller, validation_error_map[tess_error_id]);
2332     }
2333     return skip;
2334 }
2335 
2336 // Loop through bound objects and increment their in_use counts.
IncrementBoundObjects(layer_data * dev_data,GLOBAL_CB_NODE const * cb_node)2337 static void IncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2338     for (auto obj : cb_node->object_bindings) {
2339         auto base_obj = GetStateStructPtrFromObject(dev_data, obj);
2340         if (base_obj) {
2341             base_obj->in_use.fetch_add(1);
2342         }
2343     }
2344 }
2345 // Track which resources are in-flight by atomically incrementing their "in_use" count
incrementResources(layer_data * dev_data,GLOBAL_CB_NODE * cb_node)2346 static void incrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2347     cb_node->submitCount++;
2348     cb_node->in_use.fetch_add(1);
2349 
2350     // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
2351     IncrementBoundObjects(dev_data, cb_node);
2352     // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2353     //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2354     //  should then be flagged prior to calling this function
2355     for (auto drawDataElement : cb_node->drawData) {
2356         for (auto buffer : drawDataElement.buffers) {
2357             auto buffer_state = GetBufferState(dev_data, buffer);
2358             if (buffer_state) {
2359                 buffer_state->in_use.fetch_add(1);
2360             }
2361         }
2362     }
2363     for (auto event : cb_node->writeEventsBeforeWait) {
2364         auto event_state = GetEventNode(dev_data, event);
2365         if (event_state) event_state->write_in_use++;
2366     }
2367 }
2368 
2369 // Note: This function assumes that the global lock is held by the calling thread.
2370 // For the given queue, verify the queue state up to the given seq number.
2371 // Currently the only check is to make sure that if there are events to be waited on prior to
2372 //  a QueryReset, make sure that all such events have been signalled.
VerifyQueueStateToSeq(layer_data * dev_data,QUEUE_STATE * initial_queue,uint64_t initial_seq)2373 static bool VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *initial_queue, uint64_t initial_seq) {
2374     bool skip = false;
2375 
2376     // sequence number we want to validate up to, per queue
2377     std::unordered_map<QUEUE_STATE *, uint64_t> target_seqs{{initial_queue, initial_seq}};
2378     // sequence number we've completed validation for, per queue
2379     std::unordered_map<QUEUE_STATE *, uint64_t> done_seqs;
2380     std::vector<QUEUE_STATE *> worklist{initial_queue};
2381 
2382     while (worklist.size()) {
2383         auto queue = worklist.back();
2384         worklist.pop_back();
2385 
2386         auto target_seq = target_seqs[queue];
2387         auto seq = std::max(done_seqs[queue], queue->seq);
2388         auto sub_it = queue->submissions.begin() + int(seq - queue->seq);  // seq >= queue->seq
2389 
2390         for (; seq < target_seq; ++sub_it, ++seq) {
2391             for (auto &wait : sub_it->waitSemaphores) {
2392                 auto other_queue = GetQueueState(dev_data, wait.queue);
2393 
2394                 if (other_queue == queue) continue;  // semaphores /always/ point backwards, so no point here.
2395 
2396                 auto other_target_seq = std::max(target_seqs[other_queue], wait.seq);
2397                 auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq);
2398 
2399                 // if this wait is for another queue, and covers new sequence
2400                 // numbers beyond what we've already validated, mark the new
2401                 // target seq and (possibly-re)add the queue to the worklist.
2402                 if (other_done_seq < other_target_seq) {
2403                     target_seqs[other_queue] = other_target_seq;
2404                     worklist.push_back(other_queue);
2405                 }
2406             }
2407 
2408             for (auto cb : sub_it->cbs) {
2409                 auto cb_node = GetCBNode(dev_data, cb);
2410                 if (cb_node) {
2411                     for (auto queryEventsPair : cb_node->waitedEventsBeforeQueryReset) {
2412                         for (auto event : queryEventsPair.second) {
2413                             if (dev_data->eventMap[event].needsSignaled) {
2414                                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2415                                                 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
2416                                                 "Cannot get query results on queryPool 0x%" PRIx64
2417                                                 " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
2418                                                 HandleToUint64(queryEventsPair.first.pool), queryEventsPair.first.index,
2419                                                 HandleToUint64(event));
2420                             }
2421                         }
2422                     }
2423                 }
2424             }
2425         }
2426 
2427         // finally mark the point we've now validated this queue to.
2428         done_seqs[queue] = seq;
2429     }
2430 
2431     return skip;
2432 }
2433 
2434 // When the given fence is retired, verify outstanding queue operations through the point of the fence
VerifyQueueStateToFence(layer_data * dev_data,VkFence fence)2435 static bool VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
2436     auto fence_state = GetFenceNode(dev_data, fence);
2437     if (fence_state->scope == kSyncScopeInternal && VK_NULL_HANDLE != fence_state->signaler.first) {
2438         return VerifyQueueStateToSeq(dev_data, GetQueueState(dev_data, fence_state->signaler.first), fence_state->signaler.second);
2439     }
2440     return false;
2441 }
2442 
2443 // Decrement in-use count for objects bound to command buffer
DecrementBoundResources(layer_data * dev_data,GLOBAL_CB_NODE const * cb_node)2444 static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2445     BASE_NODE *base_obj = nullptr;
2446     for (auto obj : cb_node->object_bindings) {
2447         base_obj = GetStateStructPtrFromObject(dev_data, obj);
2448         if (base_obj) {
2449             base_obj->in_use.fetch_sub(1);
2450         }
2451     }
2452 }
2453 
RetireWorkOnQueue(layer_data * dev_data,QUEUE_STATE * pQueue,uint64_t seq)2454 static void RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
2455     std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
2456 
2457     // Roll this queue forward, one submission at a time.
2458     while (pQueue->seq < seq) {
2459         auto &submission = pQueue->submissions.front();
2460 
2461         for (auto &wait : submission.waitSemaphores) {
2462             auto pSemaphore = GetSemaphoreNode(dev_data, wait.semaphore);
2463             if (pSemaphore) {
2464                 pSemaphore->in_use.fetch_sub(1);
2465             }
2466             auto &lastSeq = otherQueueSeqs[wait.queue];
2467             lastSeq = std::max(lastSeq, wait.seq);
2468         }
2469 
2470         for (auto &semaphore : submission.signalSemaphores) {
2471             auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2472             if (pSemaphore) {
2473                 pSemaphore->in_use.fetch_sub(1);
2474             }
2475         }
2476 
2477         for (auto &semaphore : submission.externalSemaphores) {
2478             auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2479             if (pSemaphore) {
2480                 pSemaphore->in_use.fetch_sub(1);
2481             }
2482         }
2483 
2484         for (auto cb : submission.cbs) {
2485             auto cb_node = GetCBNode(dev_data, cb);
2486             if (!cb_node) {
2487                 continue;
2488             }
2489             // First perform decrement on general case bound objects
2490             DecrementBoundResources(dev_data, cb_node);
2491             for (auto drawDataElement : cb_node->drawData) {
2492                 for (auto buffer : drawDataElement.buffers) {
2493                     auto buffer_state = GetBufferState(dev_data, buffer);
2494                     if (buffer_state) {
2495                         buffer_state->in_use.fetch_sub(1);
2496                     }
2497                 }
2498             }
2499             for (auto event : cb_node->writeEventsBeforeWait) {
2500                 auto eventNode = dev_data->eventMap.find(event);
2501                 if (eventNode != dev_data->eventMap.end()) {
2502                     eventNode->second.write_in_use--;
2503                 }
2504             }
2505             for (auto queryStatePair : cb_node->queryToStateMap) {
2506                 dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
2507             }
2508             for (auto eventStagePair : cb_node->eventToStageMap) {
2509                 dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
2510             }
2511 
2512             cb_node->in_use.fetch_sub(1);
2513         }
2514 
2515         auto pFence = GetFenceNode(dev_data, submission.fence);
2516         if (pFence && pFence->scope == kSyncScopeInternal) {
2517             pFence->state = FENCE_RETIRED;
2518         }
2519 
2520         pQueue->submissions.pop_front();
2521         pQueue->seq++;
2522     }
2523 
2524     // Roll other queues forward to the highest seq we saw a wait for
2525     for (auto qs : otherQueueSeqs) {
2526         RetireWorkOnQueue(dev_data, GetQueueState(dev_data, qs.first), qs.second);
2527     }
2528 }
2529 
2530 // Submit a fence to a queue, delimiting previous fences and previous untracked
2531 // work by it.
SubmitFence(QUEUE_STATE * pQueue,FENCE_NODE * pFence,uint64_t submitCount)2532 static void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) {
2533     pFence->state = FENCE_INFLIGHT;
2534     pFence->signaler.first = pQueue->queue;
2535     pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
2536 }
2537 
validateCommandBufferSimultaneousUse(layer_data * dev_data,GLOBAL_CB_NODE * pCB,int current_submit_count)2538 static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
2539     bool skip = false;
2540     if ((pCB->in_use.load() || current_submit_count > 1) &&
2541         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2542         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2543                         __LINE__, VALIDATION_ERROR_31a0008e, "DS",
2544                         "Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use. %s",
2545                         HandleToUint64(pCB->commandBuffer), validation_error_map[VALIDATION_ERROR_31a0008e]);
2546     }
2547     return skip;
2548 }
2549 
validateCommandBufferState(layer_data * dev_data,GLOBAL_CB_NODE * cb_state,const char * call_source,int current_submit_count,UNIQUE_VALIDATION_ERROR_CODE vu_id)2550 static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const char *call_source,
2551                                        int current_submit_count, UNIQUE_VALIDATION_ERROR_CODE vu_id) {
2552     bool skip = false;
2553     if (dev_data->instance_data->disabled.command_buffer_state) return skip;
2554     // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
2555     if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) &&
2556         (cb_state->submitCount + current_submit_count > 1)) {
2557         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2558                         __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
2559                         "Commandbuffer 0x%" PRIx64
2560                         " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted 0x%" PRIxLEAST64
2561                         " times.",
2562                         HandleToUint64(cb_state->commandBuffer), cb_state->submitCount + current_submit_count);
2563     }
2564 
2565     // Validate that cmd buffers have been updated
2566     switch (cb_state->state) {
2567         case CB_INVALID_INCOMPLETE:
2568         case CB_INVALID_COMPLETE:
2569             skip |= ReportInvalidCommandBuffer(dev_data, cb_state, call_source);
2570             break;
2571 
2572         case CB_NEW:
2573             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2574                             (uint64_t)(cb_state->commandBuffer), __LINE__, vu_id, "DS",
2575                             "Command buffer 0x%" PRIx64 " used in the call to %s is unrecorded and contains no commands. %s",
2576                             HandleToUint64(cb_state->commandBuffer), call_source, validation_error_map[vu_id]);
2577             break;
2578 
2579         case CB_RECORDING:
2580             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2581                             HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
2582                             "You must call vkEndCommandBuffer() on command buffer 0x%" PRIx64 " before this call to %s!",
2583                             HandleToUint64(cb_state->commandBuffer), call_source);
2584             break;
2585 
2586         default: /* recorded */
2587             break;
2588     }
2589     return skip;
2590 }
2591 
validateResources(layer_data * dev_data,GLOBAL_CB_NODE * cb_node)2592 static bool validateResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2593     bool skip = false;
2594 
2595     // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2596     //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2597     //  should then be flagged prior to calling this function
2598     for (auto drawDataElement : cb_node->drawData) {
2599         for (auto buffer : drawDataElement.buffers) {
2600             auto buffer_state = GetBufferState(dev_data, buffer);
2601             if (buffer != VK_NULL_HANDLE && !buffer_state) {
2602                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
2603                                 HandleToUint64(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
2604                                 "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", HandleToUint64(buffer));
2605             }
2606         }
2607     }
2608     return skip;
2609 }
2610 
2611 // Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices
ValidImageBufferQueue(layer_data * dev_data,GLOBAL_CB_NODE * cb_node,const VK_OBJECT * object,VkQueue queue,uint32_t count,const uint32_t * indices)2612 bool ValidImageBufferQueue(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const VK_OBJECT *object, VkQueue queue, uint32_t count,
2613                            const uint32_t *indices) {
2614     bool found = false;
2615     bool skip = false;
2616     auto queue_state = GetQueueState(dev_data, queue);
2617     if (queue_state) {
2618         for (uint32_t i = 0; i < count; i++) {
2619             if (indices[i] == queue_state->queueFamilyIndex) {
2620                 found = true;
2621                 break;
2622             }
2623         }
2624 
2625         if (!found) {
2626             skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object->type],
2627                            object->handle, __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
2628                            "vkQueueSubmit: Command buffer 0x%" PRIx64 " contains %s 0x%" PRIx64
2629                            " which was not created allowing concurrent access to this queue family %d.",
2630                            HandleToUint64(cb_node->commandBuffer), object_string[object->type], object->handle,
2631                            queue_state->queueFamilyIndex);
2632         }
2633     }
2634     return skip;
2635 }
2636 
2637 // Validate that queueFamilyIndices of primary command buffers match this queue
2638 // Secondary command buffers were previously validated in vkCmdExecuteCommands().
validateQueueFamilyIndices(layer_data * dev_data,GLOBAL_CB_NODE * pCB,VkQueue queue)2639 static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
2640     bool skip = false;
2641     auto pPool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
2642     auto queue_state = GetQueueState(dev_data, queue);
2643 
2644     if (pPool && queue_state) {
2645         if (pPool->queueFamilyIndex != queue_state->queueFamilyIndex) {
2646             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2647                             HandleToUint64(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_31a00094, "DS",
2648                             "vkQueueSubmit: Primary command buffer 0x%" PRIx64
2649                             " created in queue family %d is being submitted on queue 0x%" PRIx64 " from queue family %d. %s",
2650                             HandleToUint64(pCB->commandBuffer), pPool->queueFamilyIndex, HandleToUint64(queue),
2651                             queue_state->queueFamilyIndex, validation_error_map[VALIDATION_ERROR_31a00094]);
2652         }
2653 
2654         // Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family
2655         for (auto object : pCB->object_bindings) {
2656             if (object.type == kVulkanObjectTypeImage) {
2657                 auto image_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(object.handle));
2658                 if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2659                     skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, image_state->createInfo.queueFamilyIndexCount,
2660                                                   image_state->createInfo.pQueueFamilyIndices);
2661                 }
2662             } else if (object.type == kVulkanObjectTypeBuffer) {
2663                 auto buffer_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object.handle));
2664                 if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2665                     skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, buffer_state->createInfo.queueFamilyIndexCount,
2666                                                   buffer_state->createInfo.pQueueFamilyIndices);
2667                 }
2668             }
2669         }
2670     }
2671 
2672     return skip;
2673 }
2674 
validatePrimaryCommandBufferState(layer_data * dev_data,GLOBAL_CB_NODE * pCB,int current_submit_count)2675 static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
2676     // Track in-use for resources off of primary and any secondary CBs
2677     bool skip = false;
2678 
2679     // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
2680     // on device
2681     skip |= validateCommandBufferSimultaneousUse(dev_data, pCB, current_submit_count);
2682 
2683     skip |= validateResources(dev_data, pCB);
2684 
2685     for (auto pSubCB : pCB->linkedCommandBuffers) {
2686         skip |= validateResources(dev_data, pSubCB);
2687         // TODO: replace with invalidateCommandBuffers() at recording.
2688         if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
2689             !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2690             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2691                     __LINE__, VALIDATION_ERROR_31a00092, "DS",
2692                     "Commandbuffer 0x%" PRIx64 " was submitted with secondary buffer 0x%" PRIx64
2693                     " but that buffer has subsequently been bound to primary cmd buffer 0x%" PRIx64
2694                     " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set. %s",
2695                     HandleToUint64(pCB->commandBuffer), HandleToUint64(pSubCB->commandBuffer),
2696                     HandleToUint64(pSubCB->primaryCommandBuffer), validation_error_map[VALIDATION_ERROR_31a00092]);
2697         }
2698     }
2699 
2700     skip |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()", current_submit_count, VALIDATION_ERROR_31a00090);
2701 
2702     return skip;
2703 }
2704 
ValidateFenceForSubmit(layer_data * dev_data,FENCE_NODE * pFence)2705 static bool ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence) {
2706     bool skip = false;
2707 
2708     if (pFence && pFence->scope == kSyncScopeInternal) {
2709         if (pFence->state == FENCE_INFLIGHT) {
2710             // TODO: opportunities for VALIDATION_ERROR_31a00080, VALIDATION_ERROR_316008b4, VALIDATION_ERROR_16400a0e
2711             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2712                             HandleToUint64(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
2713                             "Fence 0x%" PRIx64 " is already in use by another submission.", HandleToUint64(pFence->fence));
2714         }
2715 
2716         else if (pFence->state == FENCE_RETIRED) {
2717             // TODO: opportunities for VALIDATION_ERROR_31a0007e, VALIDATION_ERROR_316008b2, VALIDATION_ERROR_16400a0e
2718             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2719                             HandleToUint64(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
2720                             "Fence 0x%" PRIx64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
2721                             HandleToUint64(pFence->fence));
2722         }
2723     }
2724 
2725     return skip;
2726 }
2727 
PostCallRecordQueueSubmit(layer_data * dev_data,VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)2728 static void PostCallRecordQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
2729                                       VkFence fence) {
2730     uint64_t early_retire_seq = 0;
2731     auto pQueue = GetQueueState(dev_data, queue);
2732     auto pFence = GetFenceNode(dev_data, fence);
2733 
2734     if (pFence) {
2735         if (pFence->scope == kSyncScopeInternal) {
2736             // Mark fence in use
2737             SubmitFence(pQueue, pFence, std::max(1u, submitCount));
2738             if (!submitCount) {
2739                 // If no submissions, but just dropping a fence on the end of the queue,
2740                 // record an empty submission with just the fence, so we can determine
2741                 // its completion.
2742                 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(),
2743                                                  std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence);
2744             }
2745         } else {
2746             // Retire work up until this fence early, we will not see the wait that corresponds to this signal
2747             early_retire_seq = pQueue->seq + pQueue->submissions.size();
2748             if (!dev_data->external_sync_warning) {
2749                 dev_data->external_sync_warning = true;
2750                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2751                         HandleToUint64(fence), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
2752                         "vkQueueSubmit(): Signaling external fence 0x%" PRIx64 " on queue 0x%" PRIx64
2753                         " will disable validation of preceding command buffer lifecycle states and the in-use status of associated "
2754                         "objects.",
2755                         HandleToUint64(fence), HandleToUint64(queue));
2756             }
2757         }
2758     }
2759 
2760     // Now process each individual submit
2761     for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
2762         std::vector<VkCommandBuffer> cbs;
2763         const VkSubmitInfo *submit = &pSubmits[submit_idx];
2764         vector<SEMAPHORE_WAIT> semaphore_waits;
2765         vector<VkSemaphore> semaphore_signals;
2766         vector<VkSemaphore> semaphore_externals;
2767         for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
2768             VkSemaphore semaphore = submit->pWaitSemaphores[i];
2769             auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2770             if (pSemaphore) {
2771                 if (pSemaphore->scope == kSyncScopeInternal) {
2772                     if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
2773                         semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
2774                         pSemaphore->in_use.fetch_add(1);
2775                     }
2776                     pSemaphore->signaler.first = VK_NULL_HANDLE;
2777                     pSemaphore->signaled = false;
2778                 } else {
2779                     semaphore_externals.push_back(semaphore);
2780                     pSemaphore->in_use.fetch_add(1);
2781                     if (pSemaphore->scope == kSyncScopeExternalTemporary) {
2782                         pSemaphore->scope = kSyncScopeInternal;
2783                     }
2784                 }
2785             }
2786         }
2787         for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
2788             VkSemaphore semaphore = submit->pSignalSemaphores[i];
2789             auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2790             if (pSemaphore) {
2791                 if (pSemaphore->scope == kSyncScopeInternal) {
2792                     pSemaphore->signaler.first = queue;
2793                     pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
2794                     pSemaphore->signaled = true;
2795                     pSemaphore->in_use.fetch_add(1);
2796                     semaphore_signals.push_back(semaphore);
2797                 } else {
2798                     // Retire work up until this submit early, we will not see the wait that corresponds to this signal
2799                     early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1);
2800                     if (!dev_data->external_sync_warning) {
2801                         dev_data->external_sync_warning = true;
2802                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2803                                 HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
2804                                 "vkQueueSubmit(): Signaling external semaphore 0x%" PRIx64 " on queue 0x%" PRIx64
2805                                 " will disable validation of preceding command buffer lifecycle states and the in-use status of "
2806                                 "associated objects.",
2807                                 HandleToUint64(semaphore), HandleToUint64(queue));
2808                     }
2809                 }
2810             }
2811         }
2812         for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
2813             auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
2814             if (cb_node) {
2815                 cbs.push_back(submit->pCommandBuffers[i]);
2816                 for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
2817                     cbs.push_back(secondaryCmdBuffer->commandBuffer);
2818                 }
2819                 UpdateCmdBufImageLayouts(dev_data, cb_node);
2820                 incrementResources(dev_data, cb_node);
2821                 for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
2822                     incrementResources(dev_data, secondaryCmdBuffer);
2823                 }
2824             }
2825         }
2826         pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals, semaphore_externals,
2827                                          submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
2828     }
2829 
2830     if (early_retire_seq) {
2831         RetireWorkOnQueue(dev_data, pQueue, early_retire_seq);
2832     }
2833 }
2834 
PreCallValidateQueueSubmit(layer_data * dev_data,VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)2835 static bool PreCallValidateQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
2836                                        VkFence fence) {
2837     auto pFence = GetFenceNode(dev_data, fence);
2838     bool skip = ValidateFenceForSubmit(dev_data, pFence);
2839     if (skip) {
2840         return true;
2841     }
2842 
2843     unordered_set<VkSemaphore> signaled_semaphores;
2844     unordered_set<VkSemaphore> unsignaled_semaphores;
2845     unordered_set<VkSemaphore> internal_semaphores;
2846     vector<VkCommandBuffer> current_cmds;
2847     unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> localImageLayoutMap;
2848     // Now verify each individual submit
2849     for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
2850         const VkSubmitInfo *submit = &pSubmits[submit_idx];
2851         for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
2852             skip |= ValidateStageMaskGsTsEnables(dev_data, submit->pWaitDstStageMask[i], "vkQueueSubmit()",
2853                                                  VALIDATION_ERROR_13c00098, VALIDATION_ERROR_13c0009a);
2854             VkSemaphore semaphore = submit->pWaitSemaphores[i];
2855             auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2856             if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
2857                 if (unsignaled_semaphores.count(semaphore) ||
2858                     (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
2859                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2860                                     HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
2861                                     "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
2862                                     HandleToUint64(queue), HandleToUint64(semaphore));
2863                 } else {
2864                     signaled_semaphores.erase(semaphore);
2865                     unsignaled_semaphores.insert(semaphore);
2866                 }
2867             }
2868             if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) {
2869                 internal_semaphores.insert(semaphore);
2870             }
2871         }
2872         for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
2873             VkSemaphore semaphore = submit->pSignalSemaphores[i];
2874             auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2875             if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
2876                 if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
2877                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2878                                     HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
2879                                     "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
2880                                     " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
2881                                     HandleToUint64(queue), HandleToUint64(semaphore), HandleToUint64(pSemaphore->signaler.first));
2882                 } else {
2883                     unsignaled_semaphores.erase(semaphore);
2884                     signaled_semaphores.insert(semaphore);
2885                 }
2886             }
2887         }
2888         for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
2889             auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
2890             if (cb_node) {
2891                 skip |= ValidateCmdBufImageLayouts(dev_data, cb_node, dev_data->imageLayoutMap, localImageLayoutMap);
2892                 current_cmds.push_back(submit->pCommandBuffers[i]);
2893                 skip |= validatePrimaryCommandBufferState(
2894                     dev_data, cb_node, (int)std::count(current_cmds.begin(), current_cmds.end(), submit->pCommandBuffers[i]));
2895                 skip |= validateQueueFamilyIndices(dev_data, cb_node, queue);
2896 
2897                 // Potential early exit here as bad object state may crash in delayed function calls
2898                 if (skip) {
2899                     return true;
2900                 }
2901 
2902                 // Call submit-time functions to validate/update state
2903                 for (auto &function : cb_node->queue_submit_functions) {
2904                     skip |= function();
2905                 }
2906                 for (auto &function : cb_node->eventUpdates) {
2907                     skip |= function(queue);
2908                 }
2909                 for (auto &function : cb_node->queryUpdates) {
2910                     skip |= function(queue);
2911                 }
2912             }
2913         }
2914     }
2915     return skip;
2916 }
2917 
QueueSubmit(VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)2918 VKAPI_ATTR VkResult VKAPI_CALL QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
2919     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
2920     unique_lock_t lock(global_lock);
2921 
2922     bool skip = PreCallValidateQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
2923     lock.unlock();
2924 
2925     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2926 
2927     VkResult result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
2928 
2929     lock.lock();
2930     PostCallRecordQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
2931     lock.unlock();
2932     return result;
2933 }
2934 
PreCallValidateAllocateMemory(layer_data * dev_data)2935 static bool PreCallValidateAllocateMemory(layer_data *dev_data) {
2936     bool skip = false;
2937     if (dev_data->memObjMap.size() >= dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount) {
2938         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2939                         HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_16c004f8, "MEM",
2940                         "Number of currently valid memory objects is not less than the maximum allowed (%u). %s",
2941                         dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount,
2942                         validation_error_map[VALIDATION_ERROR_16c004f8]);
2943     }
2944     return skip;
2945 }
2946 
PostCallRecordAllocateMemory(layer_data * dev_data,const VkMemoryAllocateInfo * pAllocateInfo,VkDeviceMemory * pMemory)2947 static void PostCallRecordAllocateMemory(layer_data *dev_data, const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) {
2948     add_mem_obj_info(dev_data, dev_data->device, *pMemory, pAllocateInfo);
2949     return;
2950 }
2951 
AllocateMemory(VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMemory)2952 VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
2953                                               const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
2954     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
2955     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
2956     unique_lock_t lock(global_lock);
2957     bool skip = PreCallValidateAllocateMemory(dev_data);
2958     if (!skip) {
2959         lock.unlock();
2960         result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
2961         lock.lock();
2962         if (VK_SUCCESS == result) {
2963             PostCallRecordAllocateMemory(dev_data, pAllocateInfo, pMemory);
2964         }
2965     }
2966     return result;
2967 }
2968 
2969 // For given obj node, if it is use, flag a validation error and return callback result, else return false
ValidateObjectNotInUse(const layer_data * dev_data,BASE_NODE * obj_node,VK_OBJECT obj_struct,const char * caller_name,UNIQUE_VALIDATION_ERROR_CODE error_code)2970 bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct, const char *caller_name,
2971                             UNIQUE_VALIDATION_ERROR_CODE error_code) {
2972     if (dev_data->instance_data->disabled.object_in_use) return false;
2973     bool skip = false;
2974     if (obj_node->in_use.load()) {
2975         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_struct.type],
2976                         obj_struct.handle, __LINE__, error_code, "DS",
2977                         "Cannot call %s on %s 0x%" PRIx64 " that is currently in use by a command buffer. %s", caller_name,
2978                         object_string[obj_struct.type], obj_struct.handle, validation_error_map[error_code]);
2979     }
2980     return skip;
2981 }
2982 
PreCallValidateFreeMemory(layer_data * dev_data,VkDeviceMemory mem,DEVICE_MEM_INFO ** mem_info,VK_OBJECT * obj_struct)2983 static bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) {
2984     *mem_info = GetMemObjInfo(dev_data, mem);
2985     *obj_struct = {HandleToUint64(mem), kVulkanObjectTypeDeviceMemory};
2986     if (dev_data->instance_data->disabled.free_memory) return false;
2987     bool skip = false;
2988     if (*mem_info) {
2989         skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, "vkFreeMemory", VALIDATION_ERROR_2880054a);
2990     }
2991     return skip;
2992 }
2993 
PostCallRecordFreeMemory(layer_data * dev_data,VkDeviceMemory mem,DEVICE_MEM_INFO * mem_info,VK_OBJECT obj_struct)2994 static void PostCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) {
2995     // Clear mem binding for any bound objects
2996     for (auto obj : mem_info->obj_bindings) {
2997         log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, get_debug_report_enum[obj.type], obj.handle, __LINE__,
2998                 MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIx64 " still has a reference to mem obj 0x%" PRIx64,
2999                 HandleToUint64(obj.handle), HandleToUint64(mem_info->mem));
3000         BINDABLE *bindable_state = nullptr;
3001         switch (obj.type) {
3002             case kVulkanObjectTypeImage:
3003                 bindable_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
3004                 break;
3005             case kVulkanObjectTypeBuffer:
3006                 bindable_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
3007                 break;
3008             default:
3009                 // Should only have buffer or image objects bound to memory
3010                 assert(0);
3011         }
3012 
3013         assert(bindable_state);
3014         bindable_state->binding.mem = MEMORY_UNBOUND;
3015         bindable_state->UpdateBoundMemorySet();
3016     }
3017     // Any bound cmd buffers are now invalid
3018     invalidateCommandBuffers(dev_data, mem_info->cb_bindings, obj_struct);
3019     dev_data->memObjMap.erase(mem);
3020 }
3021 
FreeMemory(VkDevice device,VkDeviceMemory mem,const VkAllocationCallbacks * pAllocator)3022 VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
3023     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3024     DEVICE_MEM_INFO *mem_info = nullptr;
3025     VK_OBJECT obj_struct;
3026     unique_lock_t lock(global_lock);
3027     bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct);
3028     if (!skip) {
3029         lock.unlock();
3030         dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
3031         lock.lock();
3032         if (mem != VK_NULL_HANDLE) {
3033             PostCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct);
3034         }
3035     }
3036 }
3037 
3038 // Validate that given Map memory range is valid. This means that the memory should not already be mapped,
3039 //  and that the size of the map range should be:
3040 //  1. Not zero
3041 //  2. Within the size of the memory allocation
ValidateMapMemRange(layer_data * dev_data,VkDeviceMemory mem,VkDeviceSize offset,VkDeviceSize size)3042 static bool ValidateMapMemRange(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
3043     bool skip = false;
3044 
3045     if (size == 0) {
3046         skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3047                        HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
3048                        "VkMapMemory: Attempting to map memory range of size zero");
3049     }
3050 
3051     auto mem_element = dev_data->memObjMap.find(mem);
3052     if (mem_element != dev_data->memObjMap.end()) {
3053         auto mem_info = mem_element->second.get();
3054         // It is an application error to call VkMapMemory on an object that is already mapped
3055         if (mem_info->mem_range.size != 0) {
3056             skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3057                            HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
3058                            "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIx64, HandleToUint64(mem));
3059         }
3060 
3061         // Validate that offset + size is within object's allocationSize
3062         if (size == VK_WHOLE_SIZE) {
3063             if (offset >= mem_info->alloc_info.allocationSize) {
3064                 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3065                                HandleToUint64(mem), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
3066                                "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
3067                                " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
3068                                offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
3069             }
3070         } else {
3071             if ((offset + size) > mem_info->alloc_info.allocationSize) {
3072                 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3073                                HandleToUint64(mem), __LINE__, VALIDATION_ERROR_31200552, "MEM",
3074                                "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ". %s",
3075                                offset, size + offset, mem_info->alloc_info.allocationSize,
3076                                validation_error_map[VALIDATION_ERROR_31200552]);
3077             }
3078         }
3079     }
3080     return skip;
3081 }
3082 
storeMemRanges(layer_data * dev_data,VkDeviceMemory mem,VkDeviceSize offset,VkDeviceSize size)3083 static void storeMemRanges(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
3084     auto mem_info = GetMemObjInfo(dev_data, mem);
3085     if (mem_info) {
3086         mem_info->mem_range.offset = offset;
3087         mem_info->mem_range.size = size;
3088     }
3089 }
3090 
deleteMemRanges(layer_data * dev_data,VkDeviceMemory mem)3091 static bool deleteMemRanges(layer_data *dev_data, VkDeviceMemory mem) {
3092     bool skip = false;
3093     auto mem_info = GetMemObjInfo(dev_data, mem);
3094     if (mem_info) {
3095         if (!mem_info->mem_range.size) {
3096             // Valid Usage: memory must currently be mapped
3097             skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3098                            HandleToUint64(mem), __LINE__, VALIDATION_ERROR_33600562, "MEM",
3099                            "Unmapping Memory without memory being mapped: mem obj 0x%" PRIx64 ". %s", HandleToUint64(mem),
3100                            validation_error_map[VALIDATION_ERROR_33600562]);
3101         }
3102         mem_info->mem_range.size = 0;
3103         if (mem_info->shadow_copy) {
3104             free(mem_info->shadow_copy_base);
3105             mem_info->shadow_copy_base = 0;
3106             mem_info->shadow_copy = 0;
3107         }
3108     }
3109     return skip;
3110 }
3111 
3112 // Guard value for pad data
3113 static char NoncoherentMemoryFillValue = 0xb;
3114 
initializeAndTrackMemory(layer_data * dev_data,VkDeviceMemory mem,VkDeviceSize offset,VkDeviceSize size,void ** ppData)3115 static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
3116                                      void **ppData) {
3117     auto mem_info = GetMemObjInfo(dev_data, mem);
3118     if (mem_info) {
3119         mem_info->p_driver_data = *ppData;
3120         uint32_t index = mem_info->alloc_info.memoryTypeIndex;
3121         if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
3122             mem_info->shadow_copy = 0;
3123         } else {
3124             if (size == VK_WHOLE_SIZE) {
3125                 size = mem_info->alloc_info.allocationSize - offset;
3126             }
3127             mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
3128             assert(SafeModulo(mem_info->shadow_pad_size, dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) ==
3129                    0);
3130             // Ensure start of mapped region reflects hardware alignment constraints
3131             uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
3132 
3133             // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
3134             uint64_t start_offset = offset % map_alignment;
3135             // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
3136             mem_info->shadow_copy_base =
3137                 malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
3138 
3139             mem_info->shadow_copy =
3140                 reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
3141                                          ~(map_alignment - 1)) +
3142                 start_offset;
3143             assert(SafeModulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
3144                               map_alignment) == 0);
3145 
3146             memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
3147             *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
3148         }
3149     }
3150 }
3151 
3152 // Verify that state for fence being waited on is appropriate. That is,
3153 //  a fence being waited on should not already be signaled and
3154 //  it should have been submitted on a queue or during acquire next image
verifyWaitFenceState(layer_data * dev_data,VkFence fence,const char * apiCall)3155 static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
3156     bool skip = false;
3157 
3158     auto pFence = GetFenceNode(dev_data, fence);
3159     if (pFence && pFence->scope == kSyncScopeInternal) {
3160         if (pFence->state == FENCE_UNSIGNALED) {
3161             skip |=
3162                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
3163                         HandleToUint64(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
3164                         "%s called for fence 0x%" PRIx64 " which has not been submitted on a Queue or during acquire next image.",
3165                         apiCall, HandleToUint64(fence));
3166         }
3167     }
3168     return skip;
3169 }
3170 
RetireFence(layer_data * dev_data,VkFence fence)3171 static void RetireFence(layer_data *dev_data, VkFence fence) {
3172     auto pFence = GetFenceNode(dev_data, fence);
3173     if (pFence->scope == kSyncScopeInternal) {
3174         if (pFence->signaler.first != VK_NULL_HANDLE) {
3175             // Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
3176             RetireWorkOnQueue(dev_data, GetQueueState(dev_data, pFence->signaler.first), pFence->signaler.second);
3177         } else {
3178             // Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
3179             // the fence as retired.
3180             pFence->state = FENCE_RETIRED;
3181         }
3182     }
3183 }
3184 
PreCallValidateWaitForFences(layer_data * dev_data,uint32_t fence_count,const VkFence * fences)3185 static bool PreCallValidateWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences) {
3186     if (dev_data->instance_data->disabled.wait_for_fences) return false;
3187     bool skip = false;
3188     for (uint32_t i = 0; i < fence_count; i++) {
3189         skip |= verifyWaitFenceState(dev_data, fences[i], "vkWaitForFences");
3190         skip |= VerifyQueueStateToFence(dev_data, fences[i]);
3191     }
3192     return skip;
3193 }
3194 
PostCallRecordWaitForFences(layer_data * dev_data,uint32_t fence_count,const VkFence * fences,VkBool32 wait_all)3195 static void PostCallRecordWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences, VkBool32 wait_all) {
3196     // When we know that all fences are complete we can clean/remove their CBs
3197     if ((VK_TRUE == wait_all) || (1 == fence_count)) {
3198         for (uint32_t i = 0; i < fence_count; i++) {
3199             RetireFence(dev_data, fences[i]);
3200         }
3201     }
3202     // NOTE : Alternate case not handled here is when some fences have completed. In
3203     //  this case for app to guarantee which fences completed it will have to call
3204     //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
3205 }
3206 
WaitForFences(VkDevice device,uint32_t fenceCount,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout)3207 VKAPI_ATTR VkResult VKAPI_CALL WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
3208                                              uint64_t timeout) {
3209     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3210     // Verify fence status of submitted fences
3211     unique_lock_t lock(global_lock);
3212     bool skip = PreCallValidateWaitForFences(dev_data, fenceCount, pFences);
3213     lock.unlock();
3214     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3215 
3216     VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
3217 
3218     if (result == VK_SUCCESS) {
3219         lock.lock();
3220         PostCallRecordWaitForFences(dev_data, fenceCount, pFences, waitAll);
3221         lock.unlock();
3222     }
3223     return result;
3224 }
3225 
PreCallValidateGetFenceStatus(layer_data * dev_data,VkFence fence)3226 static bool PreCallValidateGetFenceStatus(layer_data *dev_data, VkFence fence) {
3227     if (dev_data->instance_data->disabled.get_fence_state) return false;
3228     return verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
3229 }
3230 
PostCallRecordGetFenceStatus(layer_data * dev_data,VkFence fence)3231 static void PostCallRecordGetFenceStatus(layer_data *dev_data, VkFence fence) { RetireFence(dev_data, fence); }
3232 
GetFenceStatus(VkDevice device,VkFence fence)3233 VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
3234     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3235     unique_lock_t lock(global_lock);
3236     bool skip = PreCallValidateGetFenceStatus(dev_data, fence);
3237     lock.unlock();
3238     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3239 
3240     VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
3241     if (result == VK_SUCCESS) {
3242         lock.lock();
3243         PostCallRecordGetFenceStatus(dev_data, fence);
3244         lock.unlock();
3245     }
3246     return result;
3247 }
3248 
PostCallRecordGetDeviceQueue(layer_data * dev_data,uint32_t q_family_index,VkQueue queue)3249 static void PostCallRecordGetDeviceQueue(layer_data *dev_data, uint32_t q_family_index, VkQueue queue) {
3250     // Add queue to tracking set only if it is new
3251     auto result = dev_data->queues.emplace(queue);
3252     if (result.second == true) {
3253         QUEUE_STATE *queue_state = &dev_data->queueMap[queue];
3254         queue_state->queue = queue;
3255         queue_state->queueFamilyIndex = q_family_index;
3256         queue_state->seq = 0;
3257     }
3258 }
3259 
GetDeviceQueue(VkDevice device,uint32_t queueFamilyIndex,uint32_t queueIndex,VkQueue * pQueue)3260 VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
3261     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3262     dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
3263     lock_guard_t lock(global_lock);
3264 
3265     PostCallRecordGetDeviceQueue(dev_data, queueFamilyIndex, *pQueue);
3266 }
3267 
PreCallValidateQueueWaitIdle(layer_data * dev_data,VkQueue queue,QUEUE_STATE ** queue_state)3268 static bool PreCallValidateQueueWaitIdle(layer_data *dev_data, VkQueue queue, QUEUE_STATE **queue_state) {
3269     *queue_state = GetQueueState(dev_data, queue);
3270     if (dev_data->instance_data->disabled.queue_wait_idle) return false;
3271     return VerifyQueueStateToSeq(dev_data, *queue_state, (*queue_state)->seq + (*queue_state)->submissions.size());
3272 }
3273 
PostCallRecordQueueWaitIdle(layer_data * dev_data,QUEUE_STATE * queue_state)3274 static void PostCallRecordQueueWaitIdle(layer_data *dev_data, QUEUE_STATE *queue_state) {
3275     RetireWorkOnQueue(dev_data, queue_state, queue_state->seq + queue_state->submissions.size());
3276 }
3277 
QueueWaitIdle(VkQueue queue)3278 VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
3279     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
3280     QUEUE_STATE *queue_state = nullptr;
3281     unique_lock_t lock(global_lock);
3282     bool skip = PreCallValidateQueueWaitIdle(dev_data, queue, &queue_state);
3283     lock.unlock();
3284     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3285     VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
3286     if (VK_SUCCESS == result) {
3287         lock.lock();
3288         PostCallRecordQueueWaitIdle(dev_data, queue_state);
3289         lock.unlock();
3290     }
3291     return result;
3292 }
3293 
PreCallValidateDeviceWaitIdle(layer_data * dev_data)3294 static bool PreCallValidateDeviceWaitIdle(layer_data *dev_data) {
3295     if (dev_data->instance_data->disabled.device_wait_idle) return false;
3296     bool skip = false;
3297     for (auto &queue : dev_data->queueMap) {
3298         skip |= VerifyQueueStateToSeq(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
3299     }
3300     return skip;
3301 }
3302 
PostCallRecordDeviceWaitIdle(layer_data * dev_data)3303 static void PostCallRecordDeviceWaitIdle(layer_data *dev_data) {
3304     for (auto &queue : dev_data->queueMap) {
3305         RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
3306     }
3307 }
3308 
DeviceWaitIdle(VkDevice device)3309 VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
3310     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3311     unique_lock_t lock(global_lock);
3312     bool skip = PreCallValidateDeviceWaitIdle(dev_data);
3313     lock.unlock();
3314     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3315     VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
3316     if (VK_SUCCESS == result) {
3317         lock.lock();
3318         PostCallRecordDeviceWaitIdle(dev_data);
3319         lock.unlock();
3320     }
3321     return result;
3322 }
3323 
PreCallValidateDestroyFence(layer_data * dev_data,VkFence fence,FENCE_NODE ** fence_node,VK_OBJECT * obj_struct)3324 static bool PreCallValidateDestroyFence(layer_data *dev_data, VkFence fence, FENCE_NODE **fence_node, VK_OBJECT *obj_struct) {
3325     *fence_node = GetFenceNode(dev_data, fence);
3326     *obj_struct = {HandleToUint64(fence), kVulkanObjectTypeFence};
3327     if (dev_data->instance_data->disabled.destroy_fence) return false;
3328     bool skip = false;
3329     if (*fence_node) {
3330         if ((*fence_node)->scope == kSyncScopeInternal && (*fence_node)->state == FENCE_INFLIGHT) {
3331             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
3332                             HandleToUint64(fence), __LINE__, VALIDATION_ERROR_24e008c0, "DS", "Fence 0x%" PRIx64 " is in use. %s",
3333                             HandleToUint64(fence), validation_error_map[VALIDATION_ERROR_24e008c0]);
3334         }
3335     }
3336     return skip;
3337 }
3338 
PostCallRecordDestroyFence(layer_data * dev_data,VkFence fence)3339 static void PostCallRecordDestroyFence(layer_data *dev_data, VkFence fence) { dev_data->fenceMap.erase(fence); }
3340 
DestroyFence(VkDevice device,VkFence fence,const VkAllocationCallbacks * pAllocator)3341 VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
3342     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3343     // Common data objects used pre & post call
3344     FENCE_NODE *fence_node = nullptr;
3345     VK_OBJECT obj_struct;
3346     unique_lock_t lock(global_lock);
3347     bool skip = PreCallValidateDestroyFence(dev_data, fence, &fence_node, &obj_struct);
3348 
3349     if (!skip) {
3350         lock.unlock();
3351         dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
3352         lock.lock();
3353         PostCallRecordDestroyFence(dev_data, fence);
3354     }
3355 }
3356 
PreCallValidateDestroySemaphore(layer_data * dev_data,VkSemaphore semaphore,SEMAPHORE_NODE ** sema_node,VK_OBJECT * obj_struct)3357 static bool PreCallValidateDestroySemaphore(layer_data *dev_data, VkSemaphore semaphore, SEMAPHORE_NODE **sema_node,
3358                                             VK_OBJECT *obj_struct) {
3359     *sema_node = GetSemaphoreNode(dev_data, semaphore);
3360     *obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
3361     if (dev_data->instance_data->disabled.destroy_semaphore) return false;
3362     bool skip = false;
3363     if (*sema_node) {
3364         skip |= ValidateObjectNotInUse(dev_data, *sema_node, *obj_struct, "vkDestroySemaphore", VALIDATION_ERROR_268008e2);
3365     }
3366     return skip;
3367 }
3368 
PostCallRecordDestroySemaphore(layer_data * dev_data,VkSemaphore sema)3369 static void PostCallRecordDestroySemaphore(layer_data *dev_data, VkSemaphore sema) { dev_data->semaphoreMap.erase(sema); }
3370 
DestroySemaphore(VkDevice device,VkSemaphore semaphore,const VkAllocationCallbacks * pAllocator)3371 VKAPI_ATTR void VKAPI_CALL DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
3372     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3373     SEMAPHORE_NODE *sema_node;
3374     VK_OBJECT obj_struct;
3375     unique_lock_t lock(global_lock);
3376     bool skip = PreCallValidateDestroySemaphore(dev_data, semaphore, &sema_node, &obj_struct);
3377     if (!skip) {
3378         lock.unlock();
3379         dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
3380         lock.lock();
3381         PostCallRecordDestroySemaphore(dev_data, semaphore);
3382     }
3383 }
3384 
PreCallValidateDestroyEvent(layer_data * dev_data,VkEvent event,EVENT_STATE ** event_state,VK_OBJECT * obj_struct)3385 static bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) {
3386     *event_state = GetEventNode(dev_data, event);
3387     *obj_struct = {HandleToUint64(event), kVulkanObjectTypeEvent};
3388     if (dev_data->instance_data->disabled.destroy_event) return false;
3389     bool skip = false;
3390     if (*event_state) {
3391         skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, "vkDestroyEvent", VALIDATION_ERROR_24c008f2);
3392     }
3393     return skip;
3394 }
3395 
PostCallRecordDestroyEvent(layer_data * dev_data,VkEvent event,EVENT_STATE * event_state,VK_OBJECT obj_struct)3396 static void PostCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) {
3397     invalidateCommandBuffers(dev_data, event_state->cb_bindings, obj_struct);
3398     dev_data->eventMap.erase(event);
3399 }
3400 
DestroyEvent(VkDevice device,VkEvent event,const VkAllocationCallbacks * pAllocator)3401 VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
3402     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3403     EVENT_STATE *event_state = nullptr;
3404     VK_OBJECT obj_struct;
3405     unique_lock_t lock(global_lock);
3406     bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct);
3407     if (!skip) {
3408         lock.unlock();
3409         dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
3410         lock.lock();
3411         if (event != VK_NULL_HANDLE) {
3412             PostCallRecordDestroyEvent(dev_data, event, event_state, obj_struct);
3413         }
3414     }
3415 }
3416 
PreCallValidateDestroyQueryPool(layer_data * dev_data,VkQueryPool query_pool,QUERY_POOL_NODE ** qp_state,VK_OBJECT * obj_struct)3417 static bool PreCallValidateDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE **qp_state,
3418                                             VK_OBJECT *obj_struct) {
3419     *qp_state = GetQueryPoolNode(dev_data, query_pool);
3420     *obj_struct = {HandleToUint64(query_pool), kVulkanObjectTypeQueryPool};
3421     if (dev_data->instance_data->disabled.destroy_query_pool) return false;
3422     bool skip = false;
3423     if (*qp_state) {
3424         skip |= ValidateObjectNotInUse(dev_data, *qp_state, *obj_struct, "vkDestroyQueryPool", VALIDATION_ERROR_26200632);
3425     }
3426     return skip;
3427 }
3428 
PostCallRecordDestroyQueryPool(layer_data * dev_data,VkQueryPool query_pool,QUERY_POOL_NODE * qp_state,VK_OBJECT obj_struct)3429 static void PostCallRecordDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE *qp_state,
3430                                            VK_OBJECT obj_struct) {
3431     invalidateCommandBuffers(dev_data, qp_state->cb_bindings, obj_struct);
3432     dev_data->queryPoolMap.erase(query_pool);
3433 }
3434 
DestroyQueryPool(VkDevice device,VkQueryPool queryPool,const VkAllocationCallbacks * pAllocator)3435 VKAPI_ATTR void VKAPI_CALL DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
3436     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3437     QUERY_POOL_NODE *qp_state = nullptr;
3438     VK_OBJECT obj_struct;
3439     unique_lock_t lock(global_lock);
3440     bool skip = PreCallValidateDestroyQueryPool(dev_data, queryPool, &qp_state, &obj_struct);
3441     if (!skip) {
3442         lock.unlock();
3443         dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
3444         lock.lock();
3445         if (queryPool != VK_NULL_HANDLE) {
3446             PostCallRecordDestroyQueryPool(dev_data, queryPool, qp_state, obj_struct);
3447         }
3448     }
3449 }
PreCallValidateGetQueryPoolResults(layer_data * dev_data,VkQueryPool query_pool,uint32_t first_query,uint32_t query_count,VkQueryResultFlags flags,unordered_map<QueryObject,vector<VkCommandBuffer>> * queries_in_flight)3450 static bool PreCallValidateGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
3451                                                uint32_t query_count, VkQueryResultFlags flags,
3452                                                unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
3453     bool skip = false;
3454     auto query_pool_state = dev_data->queryPoolMap.find(query_pool);
3455     if (query_pool_state != dev_data->queryPoolMap.end()) {
3456         if ((query_pool_state->second.createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && (flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
3457             skip |= log_msg(
3458                 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__,
3459                 VALIDATION_ERROR_2fa00664, "DS",
3460                 "QueryPool 0x%" PRIx64
3461                 " was created with a queryType of VK_QUERY_TYPE_TIMESTAMP but flags contains VK_QUERY_RESULT_PARTIAL_BIT. %s",
3462                 HandleToUint64(query_pool), validation_error_map[VALIDATION_ERROR_2fa00664]);
3463         }
3464     }
3465 
3466     // TODO: clean this up, it's insanely wasteful.
3467     for (auto cmd_buffer : dev_data->commandBufferMap) {
3468         if (cmd_buffer.second->in_use.load()) {
3469             for (auto query_state_pair : cmd_buffer.second->queryToStateMap) {
3470                 (*queries_in_flight)[query_state_pair.first].push_back(cmd_buffer.first);
3471             }
3472         }
3473     }
3474 
3475     if (dev_data->instance_data->disabled.get_query_pool_results) return false;
3476     for (uint32_t i = 0; i < query_count; ++i) {
3477         QueryObject query = {query_pool, first_query + i};
3478         auto qif_pair = queries_in_flight->find(query);
3479         auto query_state_pair = dev_data->queryToStateMap.find(query);
3480         if (query_state_pair != dev_data->queryToStateMap.end()) {
3481             // Available and in flight
3482             if (qif_pair != queries_in_flight->end()) {
3483                 if (query_state_pair->second) {
3484                     for (auto cmd_buffer : qif_pair->second) {
3485                         auto cb = GetCBNode(dev_data, cmd_buffer);
3486                         auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
3487                         if (query_event_pair == cb->waitedEventsBeforeQueryReset.end()) {
3488                             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3489                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3490                                             "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
3491                                             HandleToUint64(query_pool), first_query + i);
3492                         }
3493                     }
3494                 }
3495             } else if (!query_state_pair->second) {  // Unavailable and Not in flight
3496                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3497                                 __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3498                                 "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
3499                                 HandleToUint64(query_pool), first_query + i);
3500             }
3501         } else {  // Uninitialized
3502             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3503                             __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
3504                             "Cannot get query results on queryPool 0x%" PRIx64
3505                             " with index %d as data has not been collected for this index.",
3506                             HandleToUint64(query_pool), first_query + i);
3507         }
3508     }
3509     return skip;
3510 }
3511 
PostCallRecordGetQueryPoolResults(layer_data * dev_data,VkQueryPool query_pool,uint32_t first_query,uint32_t query_count,unordered_map<QueryObject,vector<VkCommandBuffer>> * queries_in_flight)3512 static void PostCallRecordGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
3513                                               uint32_t query_count,
3514                                               unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
3515     for (uint32_t i = 0; i < query_count; ++i) {
3516         QueryObject query = {query_pool, first_query + i};
3517         auto qif_pair = queries_in_flight->find(query);
3518         auto query_state_pair = dev_data->queryToStateMap.find(query);
3519         if (query_state_pair != dev_data->queryToStateMap.end()) {
3520             // Available and in flight
3521             if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
3522                 query_state_pair->second) {
3523                 for (auto cmd_buffer : qif_pair->second) {
3524                     auto cb = GetCBNode(dev_data, cmd_buffer);
3525                     auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
3526                     if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) {
3527                         for (auto event : query_event_pair->second) {
3528                             dev_data->eventMap[event].needsSignaled = true;
3529                         }
3530                     }
3531                 }
3532             }
3533         }
3534     }
3535 }
3536 
GetQueryPoolResults(VkDevice device,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount,size_t dataSize,void * pData,VkDeviceSize stride,VkQueryResultFlags flags)3537 VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
3538                                                    size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) {
3539     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3540     unordered_map<QueryObject, vector<VkCommandBuffer>> queries_in_flight;
3541     unique_lock_t lock(global_lock);
3542     bool skip = PreCallValidateGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, flags, &queries_in_flight);
3543     lock.unlock();
3544     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3545     VkResult result =
3546         dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
3547     lock.lock();
3548     PostCallRecordGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, &queries_in_flight);
3549     lock.unlock();
3550     return result;
3551 }
3552 
3553 // Return true if given ranges intersect, else false
3554 // Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
3555 //  in an error so not checking that here
3556 // pad_ranges bool indicates a linear and non-linear comparison which requires padding
3557 // In the case where padding is required, if an alias is encountered then a validation error is reported and skip
3558 //  may be set by the callback function so caller should merge in skip value if padding case is possible.
3559 // This check can be skipped by passing skip_checks=true, for call sites outside the validation path.
rangesIntersect(layer_data const * dev_data,MEMORY_RANGE const * range1,MEMORY_RANGE const * range2,bool * skip,bool skip_checks)3560 static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip,
3561                             bool skip_checks) {
3562     *skip = false;
3563     auto r1_start = range1->start;
3564     auto r1_end = range1->end;
3565     auto r2_start = range2->start;
3566     auto r2_end = range2->end;
3567     VkDeviceSize pad_align = 1;
3568     if (range1->linear != range2->linear) {
3569         pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
3570     }
3571     if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1))) return false;
3572     if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1))) return false;
3573 
3574     if (!skip_checks && (range1->linear != range2->linear)) {
3575         // In linear vs. non-linear case, warn of aliasing
3576         const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
3577         const char *r1_type_str = range1->image ? "image" : "buffer";
3578         const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
3579         const char *r2_type_str = range2->image ? "image" : "buffer";
3580         auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
3581         *skip |= log_msg(
3582             dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, 0, MEMTRACK_INVALID_ALIASING, "MEM",
3583             "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
3584             " which may indicate a bug. For further info refer to the Buffer-Image Granularity section of the Vulkan "
3585             "specification. "
3586             "(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/xhtml/vkspec.html#resources-bufferimagegranularity)",
3587             r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
3588     }
3589     // Ranges intersect
3590     return true;
3591 }
3592 // Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
rangesIntersect(layer_data const * dev_data,MEMORY_RANGE const * range1,VkDeviceSize offset,VkDeviceSize end)3593 bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
3594     // Create a local MEMORY_RANGE struct to wrap offset/size
3595     MEMORY_RANGE range_wrap;
3596     // Synch linear with range1 to avoid padding and potential validation error case
3597     range_wrap.linear = range1->linear;
3598     range_wrap.start = offset;
3599     range_wrap.end = end;
3600     bool tmp_bool;
3601     return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool, true);
3602 }
3603 // For given mem_info, set all ranges valid that intersect [offset-end] range
3604 // TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
SetMemRangesValid(layer_data const * dev_data,DEVICE_MEM_INFO * mem_info,VkDeviceSize offset,VkDeviceSize end)3605 static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
3606     bool tmp_bool = false;
3607     MEMORY_RANGE map_range = {};
3608     map_range.linear = true;
3609     map_range.start = offset;
3610     map_range.end = end;
3611     for (auto &handle_range_pair : mem_info->bound_ranges) {
3612         if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool, false)) {
3613             // TODO : WARN here if tmp_bool true?
3614             handle_range_pair.second.valid = true;
3615         }
3616     }
3617 }
3618 
ValidateInsertMemoryRange(layer_data const * dev_data,uint64_t handle,DEVICE_MEM_INFO * mem_info,VkDeviceSize memoryOffset,VkMemoryRequirements memRequirements,bool is_image,bool is_linear,const char * api_name)3619 static bool ValidateInsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info,
3620                                       VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image,
3621                                       bool is_linear, const char *api_name) {
3622     bool skip = false;
3623 
3624     MEMORY_RANGE range;
3625     range.image = is_image;
3626     range.handle = handle;
3627     range.linear = is_linear;
3628     range.valid = mem_info->global_valid;
3629     range.memory = mem_info->mem;
3630     range.start = memoryOffset;
3631     range.size = memRequirements.size;
3632     range.end = memoryOffset + memRequirements.size - 1;
3633     range.aliases.clear();
3634 
3635     // Check for aliasing problems.
3636     for (auto &obj_range_pair : mem_info->bound_ranges) {
3637         auto check_range = &obj_range_pair.second;
3638         bool intersection_error = false;
3639         if (rangesIntersect(dev_data, &range, check_range, &intersection_error, false)) {
3640             skip |= intersection_error;
3641             range.aliases.insert(check_range);
3642         }
3643     }
3644 
3645     if (memoryOffset >= mem_info->alloc_info.allocationSize) {
3646         UNIQUE_VALIDATION_ERROR_CODE error_code = is_image ? VALIDATION_ERROR_1740082c : VALIDATION_ERROR_1700080e;
3647         skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3648                        HandleToUint64(mem_info->mem), __LINE__, error_code, "MEM",
3649                        "In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64 "), memoryOffset=0x%" PRIxLEAST64
3650                        " must be less than the memory allocation size 0x%" PRIxLEAST64 ". %s",
3651                        api_name, HandleToUint64(mem_info->mem), HandleToUint64(handle), memoryOffset,
3652                        mem_info->alloc_info.allocationSize, validation_error_map[error_code]);
3653     }
3654 
3655     return skip;
3656 }
3657 
3658 // Object with given handle is being bound to memory w/ given mem_info struct.
3659 //  Track the newly bound memory range with given memoryOffset
3660 //  Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
3661 //  and non-linear range incorrectly overlap.
3662 // Return true if an error is flagged and the user callback returns "true", otherwise false
3663 // is_image indicates an image object, otherwise handle is for a buffer
3664 // is_linear indicates a buffer or linear image
InsertMemoryRange(layer_data const * dev_data,uint64_t handle,DEVICE_MEM_INFO * mem_info,VkDeviceSize memoryOffset,VkMemoryRequirements memRequirements,bool is_image,bool is_linear)3665 static void InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
3666                               VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
3667     MEMORY_RANGE range;
3668 
3669     range.image = is_image;
3670     range.handle = handle;
3671     range.linear = is_linear;
3672     range.valid = mem_info->global_valid;
3673     range.memory = mem_info->mem;
3674     range.start = memoryOffset;
3675     range.size = memRequirements.size;
3676     range.end = memoryOffset + memRequirements.size - 1;
3677     range.aliases.clear();
3678     // Update Memory aliasing
3679     // Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
3680     // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
3681     std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
3682     for (auto &obj_range_pair : mem_info->bound_ranges) {
3683         auto check_range = &obj_range_pair.second;
3684         bool intersection_error = false;
3685         if (rangesIntersect(dev_data, &range, check_range, &intersection_error, true)) {
3686             range.aliases.insert(check_range);
3687             tmp_alias_ranges.insert(check_range);
3688         }
3689     }
3690     mem_info->bound_ranges[handle] = std::move(range);
3691     for (auto tmp_range : tmp_alias_ranges) {
3692         tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
3693     }
3694     if (is_image)
3695         mem_info->bound_images.insert(handle);
3696     else
3697         mem_info->bound_buffers.insert(handle);
3698 }
3699 
ValidateInsertImageMemoryRange(layer_data const * dev_data,VkImage image,DEVICE_MEM_INFO * mem_info,VkDeviceSize mem_offset,VkMemoryRequirements mem_reqs,bool is_linear,const char * api_name)3700 static bool ValidateInsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info,
3701                                            VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear,
3702                                            const char *api_name) {
3703     return ValidateInsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear, api_name);
3704 }
InsertImageMemoryRange(layer_data const * dev_data,VkImage image,DEVICE_MEM_INFO * mem_info,VkDeviceSize mem_offset,VkMemoryRequirements mem_reqs,bool is_linear)3705 static void InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
3706                                    VkMemoryRequirements mem_reqs, bool is_linear) {
3707     InsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear);
3708 }
3709 
ValidateInsertBufferMemoryRange(layer_data const * dev_data,VkBuffer buffer,DEVICE_MEM_INFO * mem_info,VkDeviceSize mem_offset,VkMemoryRequirements mem_reqs,const char * api_name)3710 static bool ValidateInsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info,
3711                                             VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, const char *api_name) {
3712     return ValidateInsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true, api_name);
3713 }
InsertBufferMemoryRange(layer_data const * dev_data,VkBuffer buffer,DEVICE_MEM_INFO * mem_info,VkDeviceSize mem_offset,VkMemoryRequirements mem_reqs)3714 static void InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
3715                                     VkMemoryRequirements mem_reqs) {
3716     InsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true);
3717 }
3718 
3719 // Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
3720 //  is_image indicates if handle is for image or buffer
3721 //  This function will also remove the handle-to-index mapping from the appropriate
3722 //  map and clean up any aliases for range being removed.
RemoveMemoryRange(uint64_t handle,DEVICE_MEM_INFO * mem_info,bool is_image)3723 static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
3724     auto erase_range = &mem_info->bound_ranges[handle];
3725     for (auto alias_range : erase_range->aliases) {
3726         alias_range->aliases.erase(erase_range);
3727     }
3728     erase_range->aliases.clear();
3729     mem_info->bound_ranges.erase(handle);
3730     if (is_image) {
3731         mem_info->bound_images.erase(handle);
3732     } else {
3733         mem_info->bound_buffers.erase(handle);
3734     }
3735 }
3736 
RemoveBufferMemoryRange(uint64_t handle,DEVICE_MEM_INFO * mem_info)3737 void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
3738 
RemoveImageMemoryRange(uint64_t handle,DEVICE_MEM_INFO * mem_info)3739 void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
3740 
DestroyBuffer(VkDevice device,VkBuffer buffer,const VkAllocationCallbacks * pAllocator)3741 VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
3742     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3743     BUFFER_STATE *buffer_state = nullptr;
3744     VK_OBJECT obj_struct;
3745     unique_lock_t lock(global_lock);
3746     bool skip = PreCallValidateDestroyBuffer(dev_data, buffer, &buffer_state, &obj_struct);
3747     if (!skip) {
3748         lock.unlock();
3749         dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
3750         lock.lock();
3751         if (buffer != VK_NULL_HANDLE) {
3752             PostCallRecordDestroyBuffer(dev_data, buffer, buffer_state, obj_struct);
3753         }
3754     }
3755 }
3756 
DestroyBufferView(VkDevice device,VkBufferView bufferView,const VkAllocationCallbacks * pAllocator)3757 VKAPI_ATTR void VKAPI_CALL DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
3758     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3759     // Common data objects used pre & post call
3760     BUFFER_VIEW_STATE *buffer_view_state = nullptr;
3761     VK_OBJECT obj_struct;
3762     unique_lock_t lock(global_lock);
3763     // Validate state before calling down chain, update common data if we'll be calling down chain
3764     bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
3765     if (!skip) {
3766         lock.unlock();
3767         dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
3768         lock.lock();
3769         if (bufferView != VK_NULL_HANDLE) {
3770             PostCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
3771         }
3772     }
3773 }
3774 
DestroyImage(VkDevice device,VkImage image,const VkAllocationCallbacks * pAllocator)3775 VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
3776     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3777     IMAGE_STATE *image_state = nullptr;
3778     VK_OBJECT obj_struct;
3779     unique_lock_t lock(global_lock);
3780     bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct);
3781     if (!skip) {
3782         lock.unlock();
3783         dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
3784         lock.lock();
3785         if (image != VK_NULL_HANDLE) {
3786             PostCallRecordDestroyImage(dev_data, image, image_state, obj_struct);
3787         }
3788     }
3789 }
3790 
ValidateMemoryTypes(const layer_data * dev_data,const DEVICE_MEM_INFO * mem_info,const uint32_t memory_type_bits,const char * funcName,UNIQUE_VALIDATION_ERROR_CODE msgCode)3791 static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
3792                                 const char *funcName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
3793     bool skip = false;
3794     if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
3795         skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3796                        HandleToUint64(mem_info->mem), __LINE__, msgCode, "MT",
3797                        "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
3798                        "type (0x%X) of this memory object 0x%" PRIx64 ". %s",
3799                        funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, HandleToUint64(mem_info->mem),
3800                        validation_error_map[msgCode]);
3801     }
3802     return skip;
3803 }
3804 
PreCallValidateBindBufferMemory(layer_data * dev_data,VkBuffer buffer,BUFFER_STATE * buffer_state,VkDeviceMemory mem,VkDeviceSize memoryOffset,const char * api_name)3805 static bool PreCallValidateBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
3806                                             VkDeviceSize memoryOffset, const char *api_name) {
3807     bool skip = false;
3808     if (buffer_state) {
3809         unique_lock_t lock(global_lock);
3810         // Track objects tied to memory
3811         uint64_t buffer_handle = HandleToUint64(buffer);
3812         skip = ValidateSetMemBinding(dev_data, mem, buffer_handle, kVulkanObjectTypeBuffer, api_name);
3813         if (!buffer_state->memory_requirements_checked) {
3814             // There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling
3815             // BindBufferMemory, but it's implied in that memory being bound must conform with VkMemoryRequirements from
3816             // vkGetBufferMemoryRequirements()
3817             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3818                             buffer_handle, __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
3819                             "%s: Binding memory to buffer 0x%" PRIx64
3820                             " but vkGetBufferMemoryRequirements() has not been called on that buffer.",
3821                             api_name, HandleToUint64(buffer_handle));
3822             // Make the call for them so we can verify the state
3823             lock.unlock();
3824             dev_data->dispatch_table.GetBufferMemoryRequirements(dev_data->device, buffer, &buffer_state->requirements);
3825             lock.lock();
3826         }
3827 
3828         // Validate bound memory range information
3829         auto mem_info = GetMemObjInfo(dev_data, mem);
3830         if (mem_info) {
3831             skip |= ValidateInsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements, api_name);
3832             skip |= ValidateMemoryTypes(dev_data, mem_info, buffer_state->requirements.memoryTypeBits, api_name,
3833                                         VALIDATION_ERROR_17000816);
3834         }
3835 
3836         // Validate memory requirements alignment
3837         if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
3838             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3839                             buffer_handle, __LINE__, VALIDATION_ERROR_17000818, "DS",
3840                             "%s: memoryOffset is 0x%" PRIxLEAST64
3841                             " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
3842                             ", returned from a call to vkGetBufferMemoryRequirements with buffer. %s",
3843                             api_name, memoryOffset, buffer_state->requirements.alignment,
3844                             validation_error_map[VALIDATION_ERROR_17000818]);
3845         }
3846 
3847         // Validate memory requirements size
3848         if (mem_info) {
3849             if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) {
3850                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3851                                 buffer_handle, __LINE__, VALIDATION_ERROR_1700081a, "DS",
3852                                 "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
3853                                 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
3854                                 ", returned from a call to vkGetBufferMemoryRequirements with buffer. %s",
3855                                 api_name, mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size,
3856                                 validation_error_map[VALIDATION_ERROR_1700081a]);
3857             }
3858         }
3859 
3860         // Validate device limits alignments
3861         static const VkBufferUsageFlagBits usage_list[3] = {
3862             static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
3863             VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
3864         static const char *memory_type[3] = {"texel", "uniform", "storage"};
3865         static const char *offset_name[3] = {"minTexelBufferOffsetAlignment", "minUniformBufferOffsetAlignment",
3866                                              "minStorageBufferOffsetAlignment"};
3867 
3868         // TODO:  vk_validation_stats.py cannot abide braces immediately preceding or following a validation error enum
3869         // clang-format off
3870         static const UNIQUE_VALIDATION_ERROR_CODE msgCode[3] = { VALIDATION_ERROR_17000810, VALIDATION_ERROR_17000812,
3871             VALIDATION_ERROR_17000814 };
3872         // clang-format on
3873 
3874         // Keep this one fresh!
3875         const VkDeviceSize offset_requirement[3] = {
3876             dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
3877             dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
3878             dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment};
3879         VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
3880 
3881         for (int i = 0; i < 3; i++) {
3882             if (usage & usage_list[i]) {
3883                 if (SafeModulo(memoryOffset, offset_requirement[i]) != 0) {
3884                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3885                                     buffer_handle, __LINE__, msgCode[i], "DS",
3886                                     "%s: %s memoryOffset is 0x%" PRIxLEAST64
3887                                     " but must be a multiple of device limit %s 0x%" PRIxLEAST64 ". %s",
3888                                     api_name, memory_type[i], memoryOffset, offset_name[i], offset_requirement[i],
3889                                     validation_error_map[msgCode[i]]);
3890                 }
3891             }
3892         }
3893     }
3894     return skip;
3895 }
3896 
PostCallRecordBindBufferMemory(layer_data * dev_data,VkBuffer buffer,BUFFER_STATE * buffer_state,VkDeviceMemory mem,VkDeviceSize memoryOffset,const char * api_name)3897 static void PostCallRecordBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
3898                                            VkDeviceSize memoryOffset, const char *api_name) {
3899     if (buffer_state) {
3900         unique_lock_t lock(global_lock);
3901         // Track bound memory range information
3902         auto mem_info = GetMemObjInfo(dev_data, mem);
3903         if (mem_info) {
3904             InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements);
3905         }
3906 
3907         // Track objects tied to memory
3908         uint64_t buffer_handle = HandleToUint64(buffer);
3909         SetMemBinding(dev_data, mem, buffer_state, memoryOffset, buffer_handle, kVulkanObjectTypeBuffer, api_name);
3910     }
3911 }
3912 
BindBufferMemory(VkDevice device,VkBuffer buffer,VkDeviceMemory mem,VkDeviceSize memoryOffset)3913 VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
3914     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3915     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
3916     BUFFER_STATE *buffer_state;
3917     {
3918         unique_lock_t lock(global_lock);
3919         buffer_state = GetBufferState(dev_data, buffer);
3920     }
3921     bool skip = PreCallValidateBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset, "vkBindBufferMemory()");
3922     if (!skip) {
3923         result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
3924         if (result == VK_SUCCESS) {
3925             PostCallRecordBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset, "vkBindBufferMemory()");
3926         }
3927     }
3928     return result;
3929 }
3930 
PreCallValidateBindBufferMemory2KHR(layer_data * dev_data,std::vector<BUFFER_STATE * > * buffer_state,uint32_t bindInfoCount,const VkBindBufferMemoryInfoKHR * pBindInfos)3931 static bool PreCallValidateBindBufferMemory2KHR(layer_data *dev_data, std::vector<BUFFER_STATE *> *buffer_state,
3932                                                 uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos) {
3933     {
3934         unique_lock_t lock(global_lock);
3935         for (uint32_t i = 0; i < bindInfoCount; i++) {
3936             (*buffer_state)[i] = GetBufferState(dev_data, pBindInfos[i].buffer);
3937         }
3938     }
3939     bool skip = false;
3940     char api_name[64];
3941     for (uint32_t i = 0; i < bindInfoCount; i++) {
3942         sprintf(api_name, "vkBindBufferMemory2KHR() pBindInfos[%u]", i);
3943         skip |= PreCallValidateBindBufferMemory(dev_data, pBindInfos[i].buffer, (*buffer_state)[i], pBindInfos[i].memory,
3944                                                 pBindInfos[i].memoryOffset, api_name);
3945     }
3946     return skip;
3947 }
3948 
PostCallRecordBindBufferMemory2KHR(layer_data * dev_data,const std::vector<BUFFER_STATE * > & buffer_state,uint32_t bindInfoCount,const VkBindBufferMemoryInfoKHR * pBindInfos)3949 static void PostCallRecordBindBufferMemory2KHR(layer_data *dev_data, const std::vector<BUFFER_STATE *> &buffer_state,
3950                                                uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos) {
3951     for (uint32_t i = 0; i < bindInfoCount; i++) {
3952         PostCallRecordBindBufferMemory(dev_data, pBindInfos[i].buffer, buffer_state[i], pBindInfos[i].memory,
3953                                        pBindInfos[i].memoryOffset, "vkBindBufferMemory2KHR()");
3954     }
3955 }
3956 
BindBufferMemory2KHR(VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfoKHR * pBindInfos)3957 VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
3958                                                     const VkBindBufferMemoryInfoKHR *pBindInfos) {
3959     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3960     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
3961     std::vector<BUFFER_STATE *> buffer_state(bindInfoCount);
3962     if (!PreCallValidateBindBufferMemory2KHR(dev_data, &buffer_state, bindInfoCount, pBindInfos)) {
3963         result = dev_data->dispatch_table.BindBufferMemory2KHR(device, bindInfoCount, pBindInfos);
3964         if (result == VK_SUCCESS) {
3965             PostCallRecordBindBufferMemory2KHR(dev_data, buffer_state, bindInfoCount, pBindInfos);
3966         }
3967     }
3968     return result;
3969 }
3970 
PostCallRecordGetBufferMemoryRequirements(layer_data * dev_data,VkBuffer buffer,VkMemoryRequirements * pMemoryRequirements)3971 static void PostCallRecordGetBufferMemoryRequirements(layer_data *dev_data, VkBuffer buffer,
3972                                                       VkMemoryRequirements *pMemoryRequirements) {
3973     BUFFER_STATE *buffer_state;
3974     {
3975         unique_lock_t lock(global_lock);
3976         buffer_state = GetBufferState(dev_data, buffer);
3977     }
3978     if (buffer_state) {
3979         buffer_state->requirements = *pMemoryRequirements;
3980         buffer_state->memory_requirements_checked = true;
3981     }
3982 }
3983 
GetBufferMemoryRequirements(VkDevice device,VkBuffer buffer,VkMemoryRequirements * pMemoryRequirements)3984 VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer,
3985                                                        VkMemoryRequirements *pMemoryRequirements) {
3986     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3987     dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
3988     PostCallRecordGetBufferMemoryRequirements(dev_data, buffer, pMemoryRequirements);
3989 }
3990 
GetBufferMemoryRequirements2KHR(VkDevice device,const VkBufferMemoryRequirementsInfo2KHR * pInfo,VkMemoryRequirements2KHR * pMemoryRequirements)3991 VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements2KHR(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
3992                                                            VkMemoryRequirements2KHR *pMemoryRequirements) {
3993     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3994     dev_data->dispatch_table.GetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements);
3995     PostCallRecordGetBufferMemoryRequirements(dev_data, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
3996 }
3997 
PostCallRecordGetImageMemoryRequirements(layer_data * dev_data,VkImage image,VkMemoryRequirements * pMemoryRequirements)3998 static void PostCallRecordGetImageMemoryRequirements(layer_data *dev_data, VkImage image,
3999                                                      VkMemoryRequirements *pMemoryRequirements) {
4000     IMAGE_STATE *image_state;
4001     {
4002         unique_lock_t lock(global_lock);
4003         image_state = GetImageState(dev_data, image);
4004     }
4005     if (image_state) {
4006         image_state->requirements = *pMemoryRequirements;
4007         image_state->memory_requirements_checked = true;
4008     }
4009 }
4010 
GetImageMemoryRequirements(VkDevice device,VkImage image,VkMemoryRequirements * pMemoryRequirements)4011 VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
4012     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4013     dev_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
4014     PostCallRecordGetImageMemoryRequirements(dev_data, image, pMemoryRequirements);
4015 }
4016 
GetImageMemoryRequirements2KHR(VkDevice device,const VkImageMemoryRequirementsInfo2KHR * pInfo,VkMemoryRequirements2KHR * pMemoryRequirements)4017 VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2KHR *pInfo,
4018                                                           VkMemoryRequirements2KHR *pMemoryRequirements) {
4019     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4020     dev_data->dispatch_table.GetImageMemoryRequirements2KHR(device, pInfo, pMemoryRequirements);
4021     PostCallRecordGetImageMemoryRequirements(dev_data, pInfo->image, &pMemoryRequirements->memoryRequirements);
4022 }
4023 
PostCallRecordGetImageSparseMemoryRequirements(IMAGE_STATE * image_state,uint32_t req_count,VkSparseImageMemoryRequirements * reqs)4024 static void PostCallRecordGetImageSparseMemoryRequirements(IMAGE_STATE *image_state, uint32_t req_count,
4025                                                            VkSparseImageMemoryRequirements *reqs) {
4026     image_state->get_sparse_reqs_called = true;
4027     image_state->sparse_requirements.resize(req_count);
4028     if (reqs) {
4029         std::copy(reqs, reqs + req_count, image_state->sparse_requirements.begin());
4030     }
4031     for (const auto &req : image_state->sparse_requirements) {
4032         if (req.formatProperties.aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) {
4033             image_state->sparse_metadata_required = true;
4034         }
4035     }
4036 }
4037 
GetImageSparseMemoryRequirements(VkDevice device,VkImage image,uint32_t * pSparseMemoryRequirementCount,VkSparseImageMemoryRequirements * pSparseMemoryRequirements)4038 VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements(VkDevice device, VkImage image, uint32_t *pSparseMemoryRequirementCount,
4039                                                             VkSparseImageMemoryRequirements *pSparseMemoryRequirements) {
4040     // TODO : Implement tracking here, just passthrough initially
4041     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4042     dev_data->dispatch_table.GetImageSparseMemoryRequirements(device, image, pSparseMemoryRequirementCount,
4043                                                               pSparseMemoryRequirements);
4044     unique_lock_t lock(global_lock);
4045     auto image_state = GetImageState(dev_data, image);
4046     PostCallRecordGetImageSparseMemoryRequirements(image_state, *pSparseMemoryRequirementCount, pSparseMemoryRequirements);
4047 }
4048 
PostCallRecordGetImageSparseMemoryRequirements2KHR(IMAGE_STATE * image_state,uint32_t req_count,VkSparseImageMemoryRequirements2KHR * reqs)4049 static void PostCallRecordGetImageSparseMemoryRequirements2KHR(IMAGE_STATE *image_state, uint32_t req_count,
4050                                                                VkSparseImageMemoryRequirements2KHR *reqs) {
4051     std::vector<VkSparseImageMemoryRequirements> sparse_reqs(req_count);
4052     // Migrate to old struct type for common handling with GetImageSparseMemoryRequirements()
4053     for (uint32_t i = 0; i < req_count; ++i) {
4054         assert(!reqs[i].pNext);  // TODO: If an extension is ever added here we need to handle it
4055         sparse_reqs[i] = reqs[i].memoryRequirements;
4056     }
4057     PostCallRecordGetImageSparseMemoryRequirements(image_state, req_count, sparse_reqs.data());
4058 }
4059 
GetImageSparseMemoryRequirements2KHR(VkDevice device,const VkImageSparseMemoryRequirementsInfo2KHR * pInfo,uint32_t * pSparseMemoryRequirementCount,VkSparseImageMemoryRequirements2KHR * pSparseMemoryRequirements)4060 VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements2KHR(VkDevice device,
4061                                                                 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
4062                                                                 uint32_t *pSparseMemoryRequirementCount,
4063                                                                 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
4064     // TODO : Implement tracking here, just passthrough initially
4065     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4066     dev_data->dispatch_table.GetImageSparseMemoryRequirements2KHR(device, pInfo, pSparseMemoryRequirementCount,
4067                                                                   pSparseMemoryRequirements);
4068     unique_lock_t lock(global_lock);
4069     auto image_state = GetImageState(dev_data, pInfo->image);
4070     PostCallRecordGetImageSparseMemoryRequirements2KHR(image_state, *pSparseMemoryRequirementCount, pSparseMemoryRequirements);
4071 }
4072 
GetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice,VkFormat format,VkImageType type,VkSampleCountFlagBits samples,VkImageUsageFlags usage,VkImageTiling tiling,uint32_t * pPropertyCount,VkSparseImageFormatProperties * pProperties)4073 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format,
4074                                                                         VkImageType type, VkSampleCountFlagBits samples,
4075                                                                         VkImageUsageFlags usage, VkImageTiling tiling,
4076                                                                         uint32_t *pPropertyCount,
4077                                                                         VkSparseImageFormatProperties *pProperties) {
4078     // TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
4079     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
4080     instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, tiling,
4081                                                                                pPropertyCount, pProperties);
4082 }
4083 
GetPhysicalDeviceSparseImageFormatProperties2KHR(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceSparseImageFormatInfo2KHR * pFormatInfo,uint32_t * pPropertyCount,VkSparseImageFormatProperties2KHR * pProperties)4084 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties2KHR(
4085     VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2KHR *pFormatInfo, uint32_t *pPropertyCount,
4086     VkSparseImageFormatProperties2KHR *pProperties) {
4087     // TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
4088     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
4089     instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties2KHR(physicalDevice, pFormatInfo, pPropertyCount,
4090                                                                                    pProperties);
4091 }
4092 
DestroyImageView(VkDevice device,VkImageView imageView,const VkAllocationCallbacks * pAllocator)4093 VKAPI_ATTR void VKAPI_CALL DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
4094     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4095     // Common data objects used pre & post call
4096     IMAGE_VIEW_STATE *image_view_state = nullptr;
4097     VK_OBJECT obj_struct;
4098     unique_lock_t lock(global_lock);
4099     bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
4100     if (!skip) {
4101         lock.unlock();
4102         dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
4103         lock.lock();
4104         if (imageView != VK_NULL_HANDLE) {
4105             PostCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
4106         }
4107     }
4108 }
4109 
DestroyShaderModule(VkDevice device,VkShaderModule shaderModule,const VkAllocationCallbacks * pAllocator)4110 VKAPI_ATTR void VKAPI_CALL DestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
4111                                                const VkAllocationCallbacks *pAllocator) {
4112     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4113 
4114     unique_lock_t lock(global_lock);
4115     dev_data->shaderModuleMap.erase(shaderModule);
4116     lock.unlock();
4117 
4118     dev_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
4119 }
4120 
PreCallValidateDestroyPipeline(layer_data * dev_data,VkPipeline pipeline,PIPELINE_STATE ** pipeline_state,VK_OBJECT * obj_struct)4121 static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
4122                                            VK_OBJECT *obj_struct) {
4123     *pipeline_state = getPipelineState(dev_data, pipeline);
4124     *obj_struct = {HandleToUint64(pipeline), kVulkanObjectTypePipeline};
4125     if (dev_data->instance_data->disabled.destroy_pipeline) return false;
4126     bool skip = false;
4127     if (*pipeline_state) {
4128         skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, "vkDestroyPipeline", VALIDATION_ERROR_25c005fa);
4129     }
4130     return skip;
4131 }
4132 
PostCallRecordDestroyPipeline(layer_data * dev_data,VkPipeline pipeline,PIPELINE_STATE * pipeline_state,VK_OBJECT obj_struct)4133 static void PostCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
4134                                           VK_OBJECT obj_struct) {
4135     // Any bound cmd buffers are now invalid
4136     invalidateCommandBuffers(dev_data, pipeline_state->cb_bindings, obj_struct);
4137     dev_data->pipelineMap.erase(pipeline);
4138 }
4139 
DestroyPipeline(VkDevice device,VkPipeline pipeline,const VkAllocationCallbacks * pAllocator)4140 VKAPI_ATTR void VKAPI_CALL DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
4141     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4142     PIPELINE_STATE *pipeline_state = nullptr;
4143     VK_OBJECT obj_struct;
4144     unique_lock_t lock(global_lock);
4145     bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
4146     if (!skip) {
4147         lock.unlock();
4148         dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
4149         lock.lock();
4150         if (pipeline != VK_NULL_HANDLE) {
4151             PostCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
4152         }
4153     }
4154 }
4155 
DestroyPipelineLayout(VkDevice device,VkPipelineLayout pipelineLayout,const VkAllocationCallbacks * pAllocator)4156 VKAPI_ATTR void VKAPI_CALL DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
4157                                                  const VkAllocationCallbacks *pAllocator) {
4158     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4159     unique_lock_t lock(global_lock);
4160     dev_data->pipelineLayoutMap.erase(pipelineLayout);
4161     lock.unlock();
4162 
4163     dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
4164 }
4165 
PreCallValidateDestroySampler(layer_data * dev_data,VkSampler sampler,SAMPLER_STATE ** sampler_state,VK_OBJECT * obj_struct)4166 static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state,
4167                                           VK_OBJECT *obj_struct) {
4168     *sampler_state = GetSamplerState(dev_data, sampler);
4169     *obj_struct = {HandleToUint64(sampler), kVulkanObjectTypeSampler};
4170     if (dev_data->instance_data->disabled.destroy_sampler) return false;
4171     bool skip = false;
4172     if (*sampler_state) {
4173         skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, "vkDestroySampler", VALIDATION_ERROR_26600874);
4174     }
4175     return skip;
4176 }
4177 
PostCallRecordDestroySampler(layer_data * dev_data,VkSampler sampler,SAMPLER_STATE * sampler_state,VK_OBJECT obj_struct)4178 static void PostCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state,
4179                                          VK_OBJECT obj_struct) {
4180     // Any bound cmd buffers are now invalid
4181     if (sampler_state) invalidateCommandBuffers(dev_data, sampler_state->cb_bindings, obj_struct);
4182     dev_data->samplerMap.erase(sampler);
4183 }
4184 
DestroySampler(VkDevice device,VkSampler sampler,const VkAllocationCallbacks * pAllocator)4185 VKAPI_ATTR void VKAPI_CALL DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
4186     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4187     SAMPLER_STATE *sampler_state = nullptr;
4188     VK_OBJECT obj_struct;
4189     unique_lock_t lock(global_lock);
4190     bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct);
4191     if (!skip) {
4192         lock.unlock();
4193         dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
4194         lock.lock();
4195         if (sampler != VK_NULL_HANDLE) {
4196             PostCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct);
4197         }
4198     }
4199 }
4200 
PostCallRecordDestroyDescriptorSetLayout(layer_data * dev_data,VkDescriptorSetLayout ds_layout)4201 static void PostCallRecordDestroyDescriptorSetLayout(layer_data *dev_data, VkDescriptorSetLayout ds_layout) {
4202     auto layout_it = dev_data->descriptorSetLayoutMap.find(ds_layout);
4203     if (layout_it != dev_data->descriptorSetLayoutMap.end()) {
4204         layout_it->second.get()->MarkDestroyed();
4205         dev_data->descriptorSetLayoutMap.erase(layout_it);
4206     }
4207 }
4208 
DestroyDescriptorSetLayout(VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)4209 VKAPI_ATTR void VKAPI_CALL DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
4210                                                       const VkAllocationCallbacks *pAllocator) {
4211     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4212     dev_data->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
4213     unique_lock_t lock(global_lock);
4214     PostCallRecordDestroyDescriptorSetLayout(dev_data, descriptorSetLayout);
4215 }
4216 
PreCallValidateDestroyDescriptorPool(layer_data * dev_data,VkDescriptorPool pool,DESCRIPTOR_POOL_STATE ** desc_pool_state,VK_OBJECT * obj_struct)4217 static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
4218                                                  DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) {
4219     *desc_pool_state = GetDescriptorPoolState(dev_data, pool);
4220     *obj_struct = {HandleToUint64(pool), kVulkanObjectTypeDescriptorPool};
4221     if (dev_data->instance_data->disabled.destroy_descriptor_pool) return false;
4222     bool skip = false;
4223     if (*desc_pool_state) {
4224         skip |=
4225             ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, "vkDestroyDescriptorPool", VALIDATION_ERROR_2440025e);
4226     }
4227     return skip;
4228 }
4229 
PostCallRecordDestroyDescriptorPool(layer_data * dev_data,VkDescriptorPool descriptorPool,DESCRIPTOR_POOL_STATE * desc_pool_state,VK_OBJECT obj_struct)4230 static void PostCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
4231                                                 DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) {
4232     // Any bound cmd buffers are now invalid
4233     invalidateCommandBuffers(dev_data, desc_pool_state->cb_bindings, obj_struct);
4234     // Free sets that were in this pool
4235     for (auto ds : desc_pool_state->sets) {
4236         freeDescriptorSet(dev_data, ds);
4237     }
4238     dev_data->descriptorPoolMap.erase(descriptorPool);
4239     delete desc_pool_state;
4240 }
4241 
DestroyDescriptorPool(VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)4242 VKAPI_ATTR void VKAPI_CALL DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
4243                                                  const VkAllocationCallbacks *pAllocator) {
4244     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4245     DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr;
4246     VK_OBJECT obj_struct;
4247     unique_lock_t lock(global_lock);
4248     bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
4249     if (!skip) {
4250         lock.unlock();
4251         dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
4252         lock.lock();
4253         if (descriptorPool != VK_NULL_HANDLE) {
4254             PostCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
4255         }
4256     }
4257 }
4258 // Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result
4259 //  If this is a secondary command buffer, then make sure its primary is also in-flight
4260 //  If primary is not in-flight, then remove secondary from global in-flight set
4261 // This function is only valid at a point when cmdBuffer is being reset or freed
checkCommandBufferInFlight(layer_data * dev_data,const GLOBAL_CB_NODE * cb_node,const char * action,UNIQUE_VALIDATION_ERROR_CODE error_code)4262 static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
4263                                        UNIQUE_VALIDATION_ERROR_CODE error_code) {
4264     bool skip = false;
4265     if (cb_node->in_use.load()) {
4266         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4267                         HandleToUint64(cb_node->commandBuffer), __LINE__, error_code, "DS",
4268                         "Attempt to %s command buffer (0x%" PRIx64 ") which is in use. %s", action,
4269                         HandleToUint64(cb_node->commandBuffer), validation_error_map[error_code]);
4270     }
4271     return skip;
4272 }
4273 
4274 // Iterate over all cmdBuffers in given commandPool and verify that each is not in use
checkCommandBuffersInFlight(layer_data * dev_data,COMMAND_POOL_NODE * pPool,const char * action,UNIQUE_VALIDATION_ERROR_CODE error_code)4275 static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
4276                                         UNIQUE_VALIDATION_ERROR_CODE error_code) {
4277     bool skip = false;
4278     for (auto cmd_buffer : pPool->commandBuffers) {
4279         skip |= checkCommandBufferInFlight(dev_data, GetCBNode(dev_data, cmd_buffer), action, error_code);
4280     }
4281     return skip;
4282 }
4283 
4284 // Free all command buffers in given list, removing all references/links to them using ResetCommandBufferState
FreeCommandBufferStates(layer_data * dev_data,COMMAND_POOL_NODE * pool_state,const uint32_t command_buffer_count,const VkCommandBuffer * command_buffers)4285 static void FreeCommandBufferStates(layer_data *dev_data, COMMAND_POOL_NODE *pool_state, const uint32_t command_buffer_count,
4286                                     const VkCommandBuffer *command_buffers) {
4287     for (uint32_t i = 0; i < command_buffer_count; i++) {
4288         auto cb_state = GetCBNode(dev_data, command_buffers[i]);
4289         // Remove references to command buffer's state and delete
4290         if (cb_state) {
4291             // reset prior to delete, removing various references to it.
4292             // TODO: fix this, it's insane.
4293             ResetCommandBufferState(dev_data, cb_state->commandBuffer);
4294             // Remove the cb_state's references from layer_data and COMMAND_POOL_NODE
4295             dev_data->commandBufferMap.erase(cb_state->commandBuffer);
4296             pool_state->commandBuffers.erase(command_buffers[i]);
4297             delete cb_state;
4298         }
4299     }
4300 }
4301 
FreeCommandBuffers(VkDevice device,VkCommandPool commandPool,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)4302 VKAPI_ATTR void VKAPI_CALL FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
4303                                               const VkCommandBuffer *pCommandBuffers) {
4304     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4305     bool skip = false;
4306     unique_lock_t lock(global_lock);
4307 
4308     for (uint32_t i = 0; i < commandBufferCount; i++) {
4309         auto cb_node = GetCBNode(dev_data, pCommandBuffers[i]);
4310         // Delete CB information structure, and remove from commandBufferMap
4311         if (cb_node) {
4312             skip |= checkCommandBufferInFlight(dev_data, cb_node, "free", VALIDATION_ERROR_2840005e);
4313         }
4314     }
4315 
4316     if (skip) return;
4317 
4318     auto pPool = GetCommandPoolNode(dev_data, commandPool);
4319     FreeCommandBufferStates(dev_data, pPool, commandBufferCount, pCommandBuffers);
4320     lock.unlock();
4321 
4322     dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
4323 }
4324 
CreateCommandPool(VkDevice device,const VkCommandPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkCommandPool * pCommandPool)4325 VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
4326                                                  const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) {
4327     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4328 
4329     VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
4330 
4331     if (VK_SUCCESS == result) {
4332         lock_guard_t lock(global_lock);
4333         dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
4334         dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
4335     }
4336     return result;
4337 }
4338 
CreateQueryPool(VkDevice device,const VkQueryPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkQueryPool * pQueryPool)4339 VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
4340                                                const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
4341     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4342     bool skip = false;
4343     if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
4344         if (!dev_data->enabled_features.pipelineStatisticsQuery) {
4345             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
4346                             __LINE__, VALIDATION_ERROR_11c0062e, "DS",
4347                             "Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device with "
4348                             "VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE. %s",
4349                             validation_error_map[VALIDATION_ERROR_11c0062e]);
4350         }
4351     }
4352 
4353     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4354     if (!skip) {
4355         result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
4356     }
4357     if (result == VK_SUCCESS) {
4358         lock_guard_t lock(global_lock);
4359         QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
4360         qp_node->createInfo = *pCreateInfo;
4361     }
4362     return result;
4363 }
4364 
PreCallValidateDestroyCommandPool(layer_data * dev_data,VkCommandPool pool)4365 static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool) {
4366     COMMAND_POOL_NODE *cp_state = GetCommandPoolNode(dev_data, pool);
4367     if (dev_data->instance_data->disabled.destroy_command_pool) return false;
4368     bool skip = false;
4369     if (cp_state) {
4370         // Verify that command buffers in pool are complete (not in-flight)
4371         skip |= checkCommandBuffersInFlight(dev_data, cp_state, "destroy command pool with", VALIDATION_ERROR_24000052);
4372     }
4373     return skip;
4374 }
4375 
PostCallRecordDestroyCommandPool(layer_data * dev_data,VkCommandPool pool)4376 static void PostCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool) {
4377     COMMAND_POOL_NODE *cp_state = GetCommandPoolNode(dev_data, pool);
4378     // Remove cmdpool from cmdpoolmap, after freeing layer data for the command buffers
4379     // "When a pool is destroyed, all command buffers allocated from the pool are freed."
4380     if (cp_state) {
4381         // Create a vector, as FreeCommandBufferStates deletes from cp_state->commandBuffers during iteration.
4382         std::vector<VkCommandBuffer> cb_vec{cp_state->commandBuffers.begin(), cp_state->commandBuffers.end()};
4383         FreeCommandBufferStates(dev_data, cp_state, static_cast<uint32_t>(cb_vec.size()), cb_vec.data());
4384         dev_data->commandPoolMap.erase(pool);
4385     }
4386 }
4387 
4388 // Destroy commandPool along with all of the commandBuffers allocated from that pool
DestroyCommandPool(VkDevice device,VkCommandPool commandPool,const VkAllocationCallbacks * pAllocator)4389 VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
4390     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4391     unique_lock_t lock(global_lock);
4392     bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool);
4393     if (!skip) {
4394         lock.unlock();
4395         dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
4396         lock.lock();
4397         if (commandPool != VK_NULL_HANDLE) {
4398             PostCallRecordDestroyCommandPool(dev_data, commandPool);
4399         }
4400     }
4401 }
4402 
ResetCommandPool(VkDevice device,VkCommandPool commandPool,VkCommandPoolResetFlags flags)4403 VKAPI_ATTR VkResult VKAPI_CALL ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
4404     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4405     bool skip = false;
4406 
4407     unique_lock_t lock(global_lock);
4408     auto pPool = GetCommandPoolNode(dev_data, commandPool);
4409     skip |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with", VALIDATION_ERROR_32800050);
4410     lock.unlock();
4411 
4412     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4413 
4414     VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
4415 
4416     // Reset all of the CBs allocated from this pool
4417     if (VK_SUCCESS == result) {
4418         lock.lock();
4419         for (auto cmdBuffer : pPool->commandBuffers) {
4420             ResetCommandBufferState(dev_data, cmdBuffer);
4421         }
4422         lock.unlock();
4423     }
4424     return result;
4425 }
4426 
ResetFences(VkDevice device,uint32_t fenceCount,const VkFence * pFences)4427 VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
4428     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4429     bool skip = false;
4430     unique_lock_t lock(global_lock);
4431     for (uint32_t i = 0; i < fenceCount; ++i) {
4432         auto pFence = GetFenceNode(dev_data, pFences[i]);
4433         if (pFence && pFence->scope == kSyncScopeInternal && pFence->state == FENCE_INFLIGHT) {
4434             skip |=
4435                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4436                         HandleToUint64(pFences[i]), __LINE__, VALIDATION_ERROR_32e008c6, "DS", "Fence 0x%" PRIx64 " is in use. %s",
4437                         HandleToUint64(pFences[i]), validation_error_map[VALIDATION_ERROR_32e008c6]);
4438         }
4439     }
4440     lock.unlock();
4441 
4442     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4443 
4444     VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
4445 
4446     if (result == VK_SUCCESS) {
4447         lock.lock();
4448         for (uint32_t i = 0; i < fenceCount; ++i) {
4449             auto pFence = GetFenceNode(dev_data, pFences[i]);
4450             if (pFence) {
4451                 if (pFence->scope == kSyncScopeInternal) {
4452                     pFence->state = FENCE_UNSIGNALED;
4453                 } else if (pFence->scope == kSyncScopeExternalTemporary) {
4454                     pFence->scope = kSyncScopeInternal;
4455                 }
4456             }
4457         }
4458         lock.unlock();
4459     }
4460 
4461     return result;
4462 }
4463 
4464 // For given cb_nodes, invalidate them and track object causing invalidation
invalidateCommandBuffers(const layer_data * dev_data,std::unordered_set<GLOBAL_CB_NODE * > const & cb_nodes,VK_OBJECT obj)4465 void invalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) {
4466     for (auto cb_node : cb_nodes) {
4467         if (cb_node->state == CB_RECORDING) {
4468             log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4469                     HandleToUint64(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4470                     "Invalidating a command buffer that's currently being recorded: 0x%" PRIx64 ".",
4471                     HandleToUint64(cb_node->commandBuffer));
4472             cb_node->state = CB_INVALID_INCOMPLETE;
4473         } else if (cb_node->state == CB_RECORDED) {
4474             cb_node->state = CB_INVALID_COMPLETE;
4475         }
4476         cb_node->broken_bindings.push_back(obj);
4477 
4478         // if secondary, then propagate the invalidation to the primaries that will call us.
4479         if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
4480             invalidateCommandBuffers(dev_data, cb_node->linkedCommandBuffers, obj);
4481         }
4482     }
4483 }
4484 
PreCallValidateDestroyFramebuffer(layer_data * dev_data,VkFramebuffer framebuffer,FRAMEBUFFER_STATE ** framebuffer_state,VK_OBJECT * obj_struct)4485 static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
4486                                               FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
4487     *framebuffer_state = GetFramebufferState(dev_data, framebuffer);
4488     *obj_struct = {HandleToUint64(framebuffer), kVulkanObjectTypeFramebuffer};
4489     if (dev_data->instance_data->disabled.destroy_framebuffer) return false;
4490     bool skip = false;
4491     if (*framebuffer_state) {
4492         skip |=
4493             ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, "vkDestroyFrameBuffer", VALIDATION_ERROR_250006f8);
4494     }
4495     return skip;
4496 }
4497 
PostCallRecordDestroyFramebuffer(layer_data * dev_data,VkFramebuffer framebuffer,FRAMEBUFFER_STATE * framebuffer_state,VK_OBJECT obj_struct)4498 static void PostCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
4499                                              VK_OBJECT obj_struct) {
4500     invalidateCommandBuffers(dev_data, framebuffer_state->cb_bindings, obj_struct);
4501     dev_data->frameBufferMap.erase(framebuffer);
4502 }
4503 
DestroyFramebuffer(VkDevice device,VkFramebuffer framebuffer,const VkAllocationCallbacks * pAllocator)4504 VKAPI_ATTR void VKAPI_CALL DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
4505     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4506     FRAMEBUFFER_STATE *framebuffer_state = nullptr;
4507     VK_OBJECT obj_struct;
4508     unique_lock_t lock(global_lock);
4509     bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
4510     if (!skip) {
4511         lock.unlock();
4512         dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
4513         lock.lock();
4514         if (framebuffer != VK_NULL_HANDLE) {
4515             PostCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
4516         }
4517     }
4518 }
4519 
PreCallValidateDestroyRenderPass(layer_data * dev_data,VkRenderPass render_pass,RENDER_PASS_STATE ** rp_state,VK_OBJECT * obj_struct)4520 static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state,
4521                                              VK_OBJECT *obj_struct) {
4522     *rp_state = GetRenderPassState(dev_data, render_pass);
4523     *obj_struct = {HandleToUint64(render_pass), kVulkanObjectTypeRenderPass};
4524     if (dev_data->instance_data->disabled.destroy_renderpass) return false;
4525     bool skip = false;
4526     if (*rp_state) {
4527         skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, "vkDestroyRenderPass", VALIDATION_ERROR_264006d2);
4528     }
4529     return skip;
4530 }
4531 
PostCallRecordDestroyRenderPass(layer_data * dev_data,VkRenderPass render_pass,RENDER_PASS_STATE * rp_state,VK_OBJECT obj_struct)4532 static void PostCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state,
4533                                             VK_OBJECT obj_struct) {
4534     invalidateCommandBuffers(dev_data, rp_state->cb_bindings, obj_struct);
4535     dev_data->renderPassMap.erase(render_pass);
4536 }
4537 
DestroyRenderPass(VkDevice device,VkRenderPass renderPass,const VkAllocationCallbacks * pAllocator)4538 VKAPI_ATTR void VKAPI_CALL DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
4539     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4540     RENDER_PASS_STATE *rp_state = nullptr;
4541     VK_OBJECT obj_struct;
4542     unique_lock_t lock(global_lock);
4543     bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct);
4544     if (!skip) {
4545         lock.unlock();
4546         dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
4547         lock.lock();
4548         if (renderPass != VK_NULL_HANDLE) {
4549             PostCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct);
4550         }
4551     }
4552 }
4553 
CreateBuffer(VkDevice device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)4554 VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
4555                                             const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
4556     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4557     unique_lock_t lock(global_lock);
4558     bool skip = PreCallValidateCreateBuffer(dev_data, pCreateInfo);
4559     lock.unlock();
4560 
4561     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4562     VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
4563 
4564     if (VK_SUCCESS == result) {
4565         lock.lock();
4566         PostCallRecordCreateBuffer(dev_data, pCreateInfo, pBuffer);
4567         lock.unlock();
4568     }
4569     return result;
4570 }
4571 
CreateBufferView(VkDevice device,const VkBufferViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBufferView * pView)4572 VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
4573                                                 const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
4574     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4575     unique_lock_t lock(global_lock);
4576     bool skip = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
4577     lock.unlock();
4578     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4579     VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
4580     if (VK_SUCCESS == result) {
4581         lock.lock();
4582         PostCallRecordCreateBufferView(dev_data, pCreateInfo, pView);
4583         lock.unlock();
4584     }
4585     return result;
4586 }
4587 
4588 // Access helper functions for external modules
GetFormatProperties(core_validation::layer_data * device_data,VkFormat format)4589 VkFormatProperties GetFormatProperties(core_validation::layer_data *device_data, VkFormat format) {
4590     VkFormatProperties format_properties;
4591     instance_layer_data *instance_data =
4592         GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4593     instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, &format_properties);
4594     return format_properties;
4595 }
4596 
GetImageFormatProperties(core_validation::layer_data * device_data,const VkImageCreateInfo * image_ci,VkImageFormatProperties * pImageFormatProperties)4597 VkResult GetImageFormatProperties(core_validation::layer_data *device_data, const VkImageCreateInfo *image_ci,
4598                                   VkImageFormatProperties *pImageFormatProperties) {
4599     instance_layer_data *instance_data =
4600         GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4601     return instance_data->dispatch_table.GetPhysicalDeviceImageFormatProperties(
4602         device_data->physical_device, image_ci->format, image_ci->imageType, image_ci->tiling, image_ci->usage, image_ci->flags,
4603         pImageFormatProperties);
4604 }
4605 
GetReportData(const core_validation::layer_data * device_data)4606 const debug_report_data *GetReportData(const core_validation::layer_data *device_data) { return device_data->report_data; }
4607 
GetPhysicalDeviceProperties(core_validation::layer_data * device_data)4608 const VkPhysicalDeviceProperties *GetPhysicalDeviceProperties(core_validation::layer_data *device_data) {
4609     return &device_data->phys_dev_props;
4610 }
4611 
GetDisables(core_validation::layer_data * device_data)4612 const CHECK_DISABLED *GetDisables(core_validation::layer_data *device_data) { return &device_data->instance_data->disabled; }
4613 
GetImageMap(core_validation::layer_data * device_data)4614 std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> *GetImageMap(core_validation::layer_data *device_data) {
4615     return &device_data->imageMap;
4616 }
4617 
GetImageSubresourceMap(core_validation::layer_data * device_data)4618 std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *GetImageSubresourceMap(core_validation::layer_data *device_data) {
4619     return &device_data->imageSubresourceMap;
4620 }
4621 
GetImageLayoutMap(layer_data * device_data)4622 std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> *GetImageLayoutMap(layer_data *device_data) {
4623     return &device_data->imageLayoutMap;
4624 }
4625 
GetImageLayoutMap(layer_data const * device_data)4626 std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const *GetImageLayoutMap(layer_data const *device_data) {
4627     return &device_data->imageLayoutMap;
4628 }
4629 
GetBufferMap(layer_data * device_data)4630 std::unordered_map<VkBuffer, std::unique_ptr<BUFFER_STATE>> *GetBufferMap(layer_data *device_data) {
4631     return &device_data->bufferMap;
4632 }
4633 
GetBufferViewMap(layer_data * device_data)4634 std::unordered_map<VkBufferView, std::unique_ptr<BUFFER_VIEW_STATE>> *GetBufferViewMap(layer_data *device_data) {
4635     return &device_data->bufferViewMap;
4636 }
4637 
GetImageViewMap(layer_data * device_data)4638 std::unordered_map<VkImageView, std::unique_ptr<IMAGE_VIEW_STATE>> *GetImageViewMap(layer_data *device_data) {
4639     return &device_data->imageViewMap;
4640 }
4641 
GetPhysDevProperties(const layer_data * device_data)4642 const PHYS_DEV_PROPERTIES_NODE *GetPhysDevProperties(const layer_data *device_data) { return &device_data->phys_dev_properties; }
4643 
GetEnabledFeatures(const layer_data * device_data)4644 const VkPhysicalDeviceFeatures *GetEnabledFeatures(const layer_data *device_data) { return &device_data->enabled_features; }
4645 
GetDeviceExtensions(const layer_data * device_data)4646 const DeviceExtensions *GetDeviceExtensions(const layer_data *device_data) { return &device_data->extensions; }
4647 
CreateImage(VkDevice device,const VkImageCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImage * pImage)4648 VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
4649                                            const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
4650     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4651     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4652     bool skip = PreCallValidateCreateImage(dev_data, pCreateInfo, pAllocator, pImage);
4653     if (!skip) {
4654         result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
4655     }
4656     if (VK_SUCCESS == result) {
4657         lock_guard_t lock(global_lock);
4658         PostCallRecordCreateImage(dev_data, pCreateInfo, pImage);
4659     }
4660     return result;
4661 }
4662 
CreateImageView(VkDevice device,const VkImageViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImageView * pView)4663 VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
4664                                                const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
4665     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4666     unique_lock_t lock(global_lock);
4667     bool skip = PreCallValidateCreateImageView(dev_data, pCreateInfo);
4668     lock.unlock();
4669     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4670     VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
4671     if (VK_SUCCESS == result) {
4672         lock.lock();
4673         PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
4674         lock.unlock();
4675     }
4676 
4677     return result;
4678 }
4679 
CreateFence(VkDevice device,const VkFenceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFence * pFence)4680 VKAPI_ATTR VkResult VKAPI_CALL CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo,
4681                                            const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
4682     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4683     VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
4684     if (VK_SUCCESS == result) {
4685         lock_guard_t lock(global_lock);
4686         auto &fence_node = dev_data->fenceMap[*pFence];
4687         fence_node.fence = *pFence;
4688         fence_node.createInfo = *pCreateInfo;
4689         fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
4690     }
4691     return result;
4692 }
4693 
4694 // TODO handle pipeline caches
CreatePipelineCache(VkDevice device,const VkPipelineCacheCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineCache * pPipelineCache)4695 VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
4696                                                    const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
4697     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4698     VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
4699     return result;
4700 }
4701 
DestroyPipelineCache(VkDevice device,VkPipelineCache pipelineCache,const VkAllocationCallbacks * pAllocator)4702 VKAPI_ATTR void VKAPI_CALL DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache,
4703                                                 const VkAllocationCallbacks *pAllocator) {
4704     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4705     dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
4706 }
4707 
GetPipelineCacheData(VkDevice device,VkPipelineCache pipelineCache,size_t * pDataSize,void * pData)4708 VKAPI_ATTR VkResult VKAPI_CALL GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize,
4709                                                     void *pData) {
4710     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4711     VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
4712     return result;
4713 }
4714 
MergePipelineCaches(VkDevice device,VkPipelineCache dstCache,uint32_t srcCacheCount,const VkPipelineCache * pSrcCaches)4715 VKAPI_ATTR VkResult VKAPI_CALL MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount,
4716                                                    const VkPipelineCache *pSrcCaches) {
4717     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4718     VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
4719     return result;
4720 }
4721 
4722 // Validation cache:
4723 // CV is the bottommost implementor of this extension. Don't pass calls down.
CreateValidationCacheEXT(VkDevice device,const VkValidationCacheCreateInfoEXT * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkValidationCacheEXT * pValidationCache)4724 VKAPI_ATTR VkResult VKAPI_CALL CreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo,
4725                                                         const VkAllocationCallbacks *pAllocator,
4726                                                         VkValidationCacheEXT *pValidationCache) {
4727     *pValidationCache = ValidationCache::Create(pCreateInfo);
4728     return *pValidationCache ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED;
4729 }
4730 
DestroyValidationCacheEXT(VkDevice device,VkValidationCacheEXT validationCache,const VkAllocationCallbacks * pAllocator)4731 VKAPI_ATTR void VKAPI_CALL DestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache,
4732                                                      const VkAllocationCallbacks *pAllocator) {
4733     delete (ValidationCache *)validationCache;
4734 }
4735 
GetValidationCacheDataEXT(VkDevice device,VkValidationCacheEXT validationCache,size_t * pDataSize,void * pData)4736 VKAPI_ATTR VkResult VKAPI_CALL GetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize,
4737                                                          void *pData) {
4738     size_t inSize = *pDataSize;
4739     ((ValidationCache *)validationCache)->Write(pDataSize, pData);
4740     return (pData && *pDataSize != inSize) ? VK_INCOMPLETE : VK_SUCCESS;
4741 }
4742 
MergeValidationCachesEXT(VkDevice device,VkValidationCacheEXT dstCache,uint32_t srcCacheCount,const VkValidationCacheEXT * pSrcCaches)4743 VKAPI_ATTR VkResult VKAPI_CALL MergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount,
4744                                                         const VkValidationCacheEXT *pSrcCaches) {
4745     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4746     bool skip = false;
4747     auto dst = (ValidationCache *)dstCache;
4748     auto src = (ValidationCache const *const *)pSrcCaches;
4749     VkResult result = VK_SUCCESS;
4750     for (uint32_t i = 0; i < srcCacheCount; i++) {
4751         if (src[i] == dst) {
4752             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT,
4753                             0, __LINE__, VALIDATION_ERROR_3e600c00, "DS",
4754                             "vkMergeValidationCachesEXT: dstCache (0x%" PRIx64 ") must not appear in pSrcCaches array. %s",
4755                             HandleToUint64(dstCache), validation_error_map[VALIDATION_ERROR_3e600c00]);
4756             result = VK_ERROR_VALIDATION_FAILED_EXT;
4757         }
4758         if (!skip) {
4759             dst->Merge(src[i]);
4760         }
4761     }
4762 
4763     return result;
4764 }
4765 
4766 // utility function to set collective state for pipeline
set_pipeline_state(PIPELINE_STATE * pPipe)4767 void set_pipeline_state(PIPELINE_STATE *pPipe) {
4768     // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
4769     if (pPipe->graphicsPipelineCI.pColorBlendState) {
4770         for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
4771             if (VK_TRUE == pPipe->attachments[i].blendEnable) {
4772                 if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4773                      (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4774                     ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4775                      (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4776                     ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4777                      (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4778                     ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4779                      (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
4780                     pPipe->blendConstantsEnabled = true;
4781                 }
4782             }
4783         }
4784     }
4785 }
4786 
validate_dual_src_blend_feature(layer_data * device_data,PIPELINE_STATE * pipe_state)4787 bool validate_dual_src_blend_feature(layer_data *device_data, PIPELINE_STATE *pipe_state) {
4788     bool skip = false;
4789     if (pipe_state->graphicsPipelineCI.pColorBlendState) {
4790         for (size_t i = 0; i < pipe_state->attachments.size(); ++i) {
4791             if (!device_data->enabled_features.dualSrcBlend) {
4792                 if ((pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
4793                     (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
4794                     (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
4795                     (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA) ||
4796                     (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
4797                     (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
4798                     (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
4799                     (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
4800                     skip |=
4801                         log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
4802                                 HandleToUint64(pipe_state->pipeline), __LINE__, DRAWSTATE_INVALID_FEATURE, "DS",
4803                                 "CmdBindPipeline: vkPipeline (0x%" PRIx64 ") attachment[" PRINTF_SIZE_T_SPECIFIER
4804                                 "] has a dual-source blend factor but this device feature is not enabled.",
4805                                 HandleToUint64(pipe_state->pipeline), i);
4806                 }
4807             }
4808         }
4809     }
4810     return skip;
4811 }
4812 
CreateGraphicsPipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t count,const VkGraphicsPipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)4813 VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
4814                                                        const VkGraphicsPipelineCreateInfo *pCreateInfos,
4815                                                        const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
4816     // The order of operations here is a little convoluted but gets the job done
4817     //  1. Pipeline create state is first shadowed into PIPELINE_STATE struct
4818     //  2. Create state is then validated (which uses flags setup during shadowing)
4819     //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
4820     bool skip = false;
4821     vector<std::unique_ptr<PIPELINE_STATE>> pipe_state;
4822     pipe_state.reserve(count);
4823     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4824 
4825     uint32_t i = 0;
4826     unique_lock_t lock(global_lock);
4827 
4828     for (i = 0; i < count; i++) {
4829         pipe_state.push_back(std::unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE));
4830         pipe_state[i]->initGraphicsPipeline(&pCreateInfos[i], GetRenderPassStateSharedPtr(dev_data, pCreateInfos[i].renderPass));
4831         pipe_state[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
4832     }
4833 
4834     for (i = 0; i < count; i++) {
4835         skip |= ValidatePipelineLocked(dev_data, pipe_state, i);
4836     }
4837 
4838     lock.unlock();
4839 
4840     for (i = 0; i < count; i++) {
4841         skip |= ValidatePipelineUnlocked(dev_data, pipe_state, i);
4842     }
4843 
4844     if (skip) {
4845         for (i = 0; i < count; i++) {
4846             pPipelines[i] = VK_NULL_HANDLE;
4847         }
4848         return VK_ERROR_VALIDATION_FAILED_EXT;
4849     }
4850 
4851     auto result =
4852         dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
4853     lock.lock();
4854     for (i = 0; i < count; i++) {
4855         if (pPipelines[i] != VK_NULL_HANDLE) {
4856             pipe_state[i]->pipeline = pPipelines[i];
4857             dev_data->pipelineMap[pPipelines[i]] = std::move(pipe_state[i]);
4858         }
4859     }
4860 
4861     return result;
4862 }
4863 
CreateComputePipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t count,const VkComputePipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)4864 VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
4865                                                       const VkComputePipelineCreateInfo *pCreateInfos,
4866                                                       const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
4867     bool skip = false;
4868 
4869     vector<std::unique_ptr<PIPELINE_STATE>> pPipeState;
4870     pPipeState.reserve(count);
4871     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4872 
4873     uint32_t i = 0;
4874     unique_lock_t lock(global_lock);
4875     for (i = 0; i < count; i++) {
4876         // Create and initialize internal tracking data structure
4877         pPipeState.push_back(unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE));
4878         pPipeState[i]->initComputePipeline(&pCreateInfos[i]);
4879         pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
4880 
4881         // TODO: Add Compute Pipeline Verification
4882         skip |= validate_compute_pipeline(dev_data, pPipeState[i].get());
4883     }
4884 
4885     if (skip) {
4886         for (i = 0; i < count; i++) {
4887             pPipelines[i] = VK_NULL_HANDLE;
4888         }
4889         return VK_ERROR_VALIDATION_FAILED_EXT;
4890     }
4891 
4892     lock.unlock();
4893     auto result =
4894         dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
4895     lock.lock();
4896     for (i = 0; i < count; i++) {
4897         if (pPipelines[i] != VK_NULL_HANDLE) {
4898             pPipeState[i]->pipeline = pPipelines[i];
4899             dev_data->pipelineMap[pPipelines[i]] = std::move(pPipeState[i]);
4900         }
4901     }
4902 
4903     return result;
4904 }
4905 
CreateSampler(VkDevice device,const VkSamplerCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSampler * pSampler)4906 VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
4907                                              const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
4908     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4909     VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
4910     if (VK_SUCCESS == result) {
4911         lock_guard_t lock(global_lock);
4912         dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
4913     }
4914     return result;
4915 }
4916 
PreCallValidateCreateDescriptorSetLayout(layer_data * dev_data,const VkDescriptorSetLayoutCreateInfo * create_info)4917 static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) {
4918     if (dev_data->instance_data->disabled.create_descriptor_set_layout) return false;
4919     return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(dev_data->report_data, create_info,
4920                                                                     dev_data->extensions.vk_khr_push_descriptor,
4921                                                                     dev_data->phys_dev_ext_props.max_push_descriptors);
4922 }
4923 
PostCallRecordCreateDescriptorSetLayout(layer_data * dev_data,const VkDescriptorSetLayoutCreateInfo * create_info,VkDescriptorSetLayout set_layout)4924 static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info,
4925                                                     VkDescriptorSetLayout set_layout) {
4926     dev_data->descriptorSetLayoutMap[set_layout] = std::make_shared<cvdescriptorset::DescriptorSetLayout>(create_info, set_layout);
4927 }
4928 
CreateDescriptorSetLayout(VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorSetLayout * pSetLayout)4929 VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
4930                                                          const VkAllocationCallbacks *pAllocator,
4931                                                          VkDescriptorSetLayout *pSetLayout) {
4932     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4933     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4934     unique_lock_t lock(global_lock);
4935     bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo);
4936     if (!skip) {
4937         lock.unlock();
4938         result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
4939         if (VK_SUCCESS == result) {
4940             lock.lock();
4941             PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout);
4942         }
4943     }
4944     return result;
4945 }
4946 
4947 // Used by CreatePipelineLayout and CmdPushConstants.
4948 // Note that the index argument is optional and only used by CreatePipelineLayout.
validatePushConstantRange(const layer_data * dev_data,const uint32_t offset,const uint32_t size,const char * caller_name,uint32_t index=0)4949 static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
4950                                       const char *caller_name, uint32_t index = 0) {
4951     if (dev_data->instance_data->disabled.push_constant_range) return false;
4952     uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
4953     bool skip = false;
4954     // Check that offset + size don't exceed the max.
4955     // Prevent arithetic overflow here by avoiding addition and testing in this order.
4956     if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
4957         // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
4958         if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
4959             if (offset >= maxPushConstantsSize) {
4960                 skip |= log_msg(
4961                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
4962                     VALIDATION_ERROR_11a0024c, "DS",
4963                     "%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u. %s",
4964                     caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_11a0024c]);
4965             }
4966             if (size > maxPushConstantsSize - offset) {
4967                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4968                                 __LINE__, VALIDATION_ERROR_11a00254, "DS",
4969                                 "%s call has push constants index %u with offset %u and size %u that exceeds this device's "
4970                                 "maxPushConstantSize of %u. %s",
4971                                 caller_name, index, offset, size, maxPushConstantsSize,
4972                                 validation_error_map[VALIDATION_ERROR_11a00254]);
4973             }
4974         } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
4975             if (offset >= maxPushConstantsSize) {
4976                 skip |= log_msg(
4977                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
4978                     VALIDATION_ERROR_1bc002e4, "DS",
4979                     "%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u. %s",
4980                     caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_1bc002e4]);
4981             }
4982             if (size > maxPushConstantsSize - offset) {
4983                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4984                                 __LINE__, VALIDATION_ERROR_1bc002e6, "DS",
4985                                 "%s call has push constants index %u with offset %u and size %u that exceeds this device's "
4986                                 "maxPushConstantSize of %u. %s",
4987                                 caller_name, index, offset, size, maxPushConstantsSize,
4988                                 validation_error_map[VALIDATION_ERROR_1bc002e6]);
4989             }
4990         } else {
4991             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
4992                             __LINE__, DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
4993         }
4994     }
4995     // size needs to be non-zero and a multiple of 4.
4996     if ((size == 0) || ((size & 0x3) != 0)) {
4997         if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
4998             if (size == 0) {
4999                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5000                                 __LINE__, VALIDATION_ERROR_11a00250, "DS",
5001                                 "%s call has push constants index %u with size %u. Size must be greater than zero. %s", caller_name,
5002                                 index, size, validation_error_map[VALIDATION_ERROR_11a00250]);
5003             }
5004             if (size & 0x3) {
5005                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5006                                 __LINE__, VALIDATION_ERROR_11a00252, "DS",
5007                                 "%s call has push constants index %u with size %u. Size must be a multiple of 4. %s", caller_name,
5008                                 index, size, validation_error_map[VALIDATION_ERROR_11a00252]);
5009             }
5010         } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5011             if (size == 0) {
5012                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5013                                 __LINE__, VALIDATION_ERROR_1bc2c21b, "DS",
5014                                 "%s call has push constants index %u with size %u. Size must be greater than zero. %s", caller_name,
5015                                 index, size, validation_error_map[VALIDATION_ERROR_1bc2c21b]);
5016             }
5017             if (size & 0x3) {
5018                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5019                                 __LINE__, VALIDATION_ERROR_1bc002e2, "DS",
5020                                 "%s call has push constants index %u with size %u. Size must be a multiple of 4. %s", caller_name,
5021                                 index, size, validation_error_map[VALIDATION_ERROR_1bc002e2]);
5022             }
5023         } else {
5024             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5025                             __LINE__, DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5026         }
5027     }
5028     // offset needs to be a multiple of 4.
5029     if ((offset & 0x3) != 0) {
5030         if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5031             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5032                             __LINE__, VALIDATION_ERROR_11a0024e, "DS",
5033                             "%s call has push constants index %u with offset %u. Offset must be a multiple of 4. %s", caller_name,
5034                             index, offset, validation_error_map[VALIDATION_ERROR_11a0024e]);
5035         } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5036             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5037                             __LINE__, VALIDATION_ERROR_1bc002e0, "DS",
5038                             "%s call has push constants with offset %u. Offset must be a multiple of 4. %s", caller_name, offset,
5039                             validation_error_map[VALIDATION_ERROR_1bc002e0]);
5040         } else {
5041             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5042                             __LINE__, DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5043         }
5044     }
5045     return skip;
5046 }
5047 
5048 enum DSL_DESCRIPTOR_GROUPS {
5049     DSL_TYPE_SAMPLERS = 0,
5050     DSL_TYPE_UNIFORM_BUFFERS,
5051     DSL_TYPE_STORAGE_BUFFERS,
5052     DSL_TYPE_SAMPLED_IMAGES,
5053     DSL_TYPE_STORAGE_IMAGES,
5054     DSL_TYPE_INPUT_ATTACHMENTS,
5055     DSL_NUM_DESCRIPTOR_GROUPS
5056 };
5057 
5058 // Used by PreCallValiateCreatePipelineLayout.
5059 // Returns an array of size DSL_NUM_DESCRIPTOR_GROUPS of the maximum number of descriptors used in any single pipeline stage
GetDescriptorCountMaxPerStage(const layer_data * dev_data,const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts)5060 std::valarray<uint32_t> GetDescriptorCountMaxPerStage(
5061     const layer_data *dev_data, const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts) {
5062     // Identify active pipeline stages
5063     std::vector<VkShaderStageFlags> stage_flags = {VK_SHADER_STAGE_VERTEX_BIT, VK_SHADER_STAGE_FRAGMENT_BIT,
5064                                                    VK_SHADER_STAGE_COMPUTE_BIT};
5065     if (dev_data->enabled_features.geometryShader) {
5066         stage_flags.push_back(VK_SHADER_STAGE_GEOMETRY_BIT);
5067     }
5068     if (dev_data->enabled_features.tessellationShader) {
5069         stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
5070         stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
5071     }
5072 
5073     // Allow iteration over enum values
5074     std::vector<DSL_DESCRIPTOR_GROUPS> dsl_groups = {DSL_TYPE_SAMPLERS,       DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS,
5075                                                      DSL_TYPE_SAMPLED_IMAGES, DSL_TYPE_STORAGE_IMAGES,  DSL_TYPE_INPUT_ATTACHMENTS};
5076 
5077     // Sum by layouts per stage, then pick max of stages per type
5078     std::valarray<uint32_t> max_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS);  // max descriptor sum among all pipeline stages
5079     for (auto stage : stage_flags) {
5080         std::valarray<uint32_t> stage_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS);  // per-stage sums
5081         for (auto dsl : set_layouts) {
5082             for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
5083                 const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
5084                 if (0 != (stage & binding->stageFlags)) {
5085                     switch (binding->descriptorType) {
5086                         case VK_DESCRIPTOR_TYPE_SAMPLER:
5087                             stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
5088                             break;
5089                         case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
5090                         case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
5091                             stage_sum[DSL_TYPE_UNIFORM_BUFFERS] += binding->descriptorCount;
5092                             break;
5093                         case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
5094                         case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
5095                             stage_sum[DSL_TYPE_STORAGE_BUFFERS] += binding->descriptorCount;
5096                             break;
5097                         case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
5098                         case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
5099                             stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
5100                             break;
5101                         case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
5102                         case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
5103                             stage_sum[DSL_TYPE_STORAGE_IMAGES] += binding->descriptorCount;
5104                             break;
5105                         case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
5106                             stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
5107                             stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
5108                             break;
5109                         case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
5110                             stage_sum[DSL_TYPE_INPUT_ATTACHMENTS] += binding->descriptorCount;
5111                             break;
5112                         default:
5113                             break;
5114                     }
5115                 }
5116             }
5117         }
5118         for (auto type : dsl_groups) {
5119             max_sum[type] = std::max(stage_sum[type], max_sum[type]);
5120         }
5121     }
5122     return max_sum;
5123 }
5124 
5125 // Used by PreCallValiateCreatePipelineLayout.
5126 // Returns an array of size VK_DESCRIPTOR_TYPE_RANGE_SIZE of the summed descriptors by type across all pipeline stages
GetDescriptorSumAcrossStages(const layer_data * dev_data,const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts)5127 std::valarray<uint32_t> GetDescriptorSumAcrossStages(
5128     const layer_data *dev_data, const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts) {
5129     // Identify active pipeline stages
5130     std::vector<VkShaderStageFlags> stage_flags = {VK_SHADER_STAGE_VERTEX_BIT, VK_SHADER_STAGE_FRAGMENT_BIT,
5131                                                    VK_SHADER_STAGE_COMPUTE_BIT};
5132     if (dev_data->enabled_features.geometryShader) {
5133         stage_flags.push_back(VK_SHADER_STAGE_GEOMETRY_BIT);
5134     }
5135     if (dev_data->enabled_features.tessellationShader) {
5136         stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
5137         stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
5138     }
5139 
5140     // Sum by descriptor type across all enabled stages
5141     std::valarray<uint32_t> sum_by_type(0U, VK_DESCRIPTOR_TYPE_RANGE_SIZE);
5142     for (auto stage : stage_flags) {
5143         for (auto dsl : set_layouts) {
5144             for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
5145                 const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
5146                 if (0 != (stage & binding->stageFlags)) {
5147                     sum_by_type[binding->descriptorType] += binding->descriptorCount;
5148                 }
5149             }
5150         }
5151     }
5152     return sum_by_type;
5153 }
5154 
PreCallValiateCreatePipelineLayout(const layer_data * dev_data,const VkPipelineLayoutCreateInfo * pCreateInfo)5155 static bool PreCallValiateCreatePipelineLayout(const layer_data *dev_data, const VkPipelineLayoutCreateInfo *pCreateInfo) {
5156     bool skip = false;
5157 
5158     // Validate layout count against device physical limit
5159     if (pCreateInfo->setLayoutCount > dev_data->phys_dev_props.limits.maxBoundDescriptorSets) {
5160         skip |=
5161             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
5162                     VALIDATION_ERROR_0fe0023c, "DS",
5163                     "vkCreatePipelineLayout(): setLayoutCount (%d) exceeds physical device maxBoundDescriptorSets limit (%d). %s",
5164                     pCreateInfo->setLayoutCount, dev_data->phys_dev_props.limits.maxBoundDescriptorSets,
5165                     validation_error_map[VALIDATION_ERROR_0fe0023c]);
5166     }
5167 
5168     // Validate Push Constant ranges
5169     uint32_t i, j;
5170     for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5171         skip |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
5172                                           pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
5173         if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
5174             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5175                             __LINE__, VALIDATION_ERROR_11a2dc03, "DS", "vkCreatePipelineLayout() call has no stageFlags set. %s",
5176                             validation_error_map[VALIDATION_ERROR_11a2dc03]);
5177         }
5178     }
5179 
5180     // As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges.
5181     for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5182         for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
5183             if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) {
5184                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5185                                 __LINE__, VALIDATION_ERROR_0fe00248, "DS",
5186                                 "vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d. %s", i, j,
5187                                 validation_error_map[VALIDATION_ERROR_0fe00248]);
5188             }
5189         }
5190     }
5191 
5192     // Early-out
5193     if (skip) return skip;
5194 
5195     std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts(pCreateInfo->setLayoutCount, nullptr);
5196     unsigned int push_descriptor_set_count = 0;
5197     {
5198         unique_lock_t lock(global_lock);  // Lock while accessing global state
5199         for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
5200             set_layouts[i] = GetDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
5201             if (set_layouts[i]->IsPushDescriptor()) ++push_descriptor_set_count;
5202         }
5203     }  // Unlock
5204 
5205     if (push_descriptor_set_count > 1) {
5206         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
5207                         VALIDATION_ERROR_0fe0024a, "DS", "vkCreatePipelineLayout() Multiple push descriptor sets found. %s",
5208                         validation_error_map[VALIDATION_ERROR_0fe0024a]);
5209     }
5210 
5211     // Max descriptors by type, within a single pipeline stage
5212     std::valarray<uint32_t> max_descriptors_per_stage = GetDescriptorCountMaxPerStage(dev_data, set_layouts);
5213     // Samplers
5214     if (max_descriptors_per_stage[DSL_TYPE_SAMPLERS] > dev_data->phys_dev_props.limits.maxPerStageDescriptorSamplers) {
5215         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
5216                         VALIDATION_ERROR_0fe0023e, "DS",
5217                         "vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
5218                         "maxPerStageDescriptorSamplers limit (%d). %s",
5219                         max_descriptors_per_stage[DSL_TYPE_SAMPLERS], dev_data->phys_dev_props.limits.maxPerStageDescriptorSamplers,
5220                         validation_error_map[VALIDATION_ERROR_0fe0023e]);
5221     }
5222 
5223     // Uniform buffers
5224     if (max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS] > dev_data->phys_dev_props.limits.maxPerStageDescriptorUniformBuffers) {
5225         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
5226                         VALIDATION_ERROR_0fe00240, "DS",
5227                         "vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
5228                         "maxPerStageDescriptorUniformBuffers limit (%d). %s",
5229                         max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS],
5230                         dev_data->phys_dev_props.limits.maxPerStageDescriptorUniformBuffers,
5231                         validation_error_map[VALIDATION_ERROR_0fe00240]);
5232     }
5233 
5234     // Storage buffers
5235     if (max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS] > dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageBuffers) {
5236         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
5237                         VALIDATION_ERROR_0fe00242, "DS",
5238                         "vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
5239                         "maxPerStageDescriptorStorageBuffers limit (%d). %s",
5240                         max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS],
5241                         dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageBuffers,
5242                         validation_error_map[VALIDATION_ERROR_0fe00242]);
5243     }
5244 
5245     // Sampled images
5246     if (max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES] > dev_data->phys_dev_props.limits.maxPerStageDescriptorSampledImages) {
5247         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
5248                         VALIDATION_ERROR_0fe00244, "DS",
5249                         "vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
5250                         "maxPerStageDescriptorSampledImages limit (%d). %s",
5251                         max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES],
5252                         dev_data->phys_dev_props.limits.maxPerStageDescriptorSampledImages,
5253                         validation_error_map[VALIDATION_ERROR_0fe00244]);
5254     }
5255 
5256     // Storage images
5257     if (max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES] > dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageImages) {
5258         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
5259                         VALIDATION_ERROR_0fe00246, "DS",
5260                         "vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
5261                         "maxPerStageDescriptorStorageImages limit (%d). %s",
5262                         max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES],
5263                         dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageImages,
5264                         validation_error_map[VALIDATION_ERROR_0fe00246]);
5265     }
5266 
5267     // Input attachments
5268     if (max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS] >
5269         dev_data->phys_dev_props.limits.maxPerStageDescriptorInputAttachments) {
5270         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
5271                         VALIDATION_ERROR_0fe00d18, "DS",
5272                         "vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
5273                         "maxPerStageDescriptorInputAttachments limit (%d). %s",
5274                         max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS],
5275                         dev_data->phys_dev_props.limits.maxPerStageDescriptorInputAttachments,
5276                         validation_error_map[VALIDATION_ERROR_0fe00d18]);
5277     }
5278 
5279     // Total descriptors by type, summed across all pipeline stages
5280     //
5281     std::valarray<uint32_t> sum_all_stages = GetDescriptorSumAcrossStages(dev_data, set_layouts);
5282     // Samplers
5283     if ((sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER]) >
5284         dev_data->phys_dev_props.limits.maxDescriptorSetSamplers) {
5285         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
5286                         VALIDATION_ERROR_0fe00d1a, "DS",
5287                         "vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
5288                         "maxDescriptorSetSamplers limit (%d). %s",
5289                         sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER],
5290                         dev_data->phys_dev_props.limits.maxDescriptorSetSamplers, validation_error_map[VALIDATION_ERROR_0fe00d1a]);
5291     }
5292 
5293     // Uniform buffers
5294     if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffers) {
5295         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
5296                         VALIDATION_ERROR_0fe00d1c, "DS",
5297                         "vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
5298                         "maxDescriptorSetUniformBuffers limit (%d). %s",
5299                         sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER],
5300                         dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffers,
5301                         validation_error_map[VALIDATION_ERROR_0fe00d1c]);
5302     }
5303 
5304     // Dynamic uniform buffers
5305     if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] >
5306         dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic) {
5307         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
5308                         VALIDATION_ERROR_0fe00d1e, "DS",
5309                         "vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
5310                         "maxDescriptorSetUniformBuffersDynamic limit (%d). %s",
5311                         sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
5312                         dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic,
5313                         validation_error_map[VALIDATION_ERROR_0fe00d1e]);
5314     }
5315 
5316     // Storage buffers
5317     if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffers) {
5318         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
5319                         VALIDATION_ERROR_0fe00d20, "DS",
5320                         "vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
5321                         "maxDescriptorSetStorageBuffers limit (%d). %s",
5322                         sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER],
5323                         dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffers,
5324                         validation_error_map[VALIDATION_ERROR_0fe00d20]);
5325     }
5326 
5327     // Dynamic storage buffers
5328     if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] >
5329         dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic) {
5330         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
5331                         VALIDATION_ERROR_0fe00d22, "DS",
5332                         "vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
5333                         "maxDescriptorSetStorageBuffersDynamic limit (%d). %s",
5334                         sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
5335                         dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic,
5336                         validation_error_map[VALIDATION_ERROR_0fe00d22]);
5337     }
5338 
5339     //  Sampled images
5340     if ((sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
5341          sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER]) > dev_data->phys_dev_props.limits.maxDescriptorSetSampledImages) {
5342         skip |=
5343             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
5344                     VALIDATION_ERROR_0fe00d24, "DS",
5345                     "vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
5346                     "maxDescriptorSetSampledImages limit (%d). %s",
5347                     sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
5348                         sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER],
5349                     dev_data->phys_dev_props.limits.maxDescriptorSetSampledImages, validation_error_map[VALIDATION_ERROR_0fe00d24]);
5350     }
5351 
5352     //  Storage images
5353     if ((sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER]) >
5354         dev_data->phys_dev_props.limits.maxDescriptorSetStorageImages) {
5355         skip |=
5356             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
5357                     VALIDATION_ERROR_0fe00d26, "DS",
5358                     "vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
5359                     "maxDescriptorSetStorageImages limit (%d). %s",
5360                     sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER],
5361                     dev_data->phys_dev_props.limits.maxDescriptorSetStorageImages, validation_error_map[VALIDATION_ERROR_0fe00d26]);
5362     }
5363 
5364     // Input attachments
5365     if (sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > dev_data->phys_dev_props.limits.maxDescriptorSetInputAttachments) {
5366         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
5367                         VALIDATION_ERROR_0fe00d28, "DS",
5368                         "vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
5369                         "maxDescriptorSetInputAttachments limit (%d). %s",
5370                         sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT],
5371                         dev_data->phys_dev_props.limits.maxDescriptorSetInputAttachments,
5372                         validation_error_map[VALIDATION_ERROR_0fe00d28]);
5373     }
5374 
5375     return skip;
5376 }
5377 
PostCallRecordCreatePipelineLayout(layer_data * dev_data,const VkPipelineLayoutCreateInfo * pCreateInfo,const VkPipelineLayout * pPipelineLayout)5378 static void PostCallRecordCreatePipelineLayout(layer_data *dev_data, const VkPipelineLayoutCreateInfo *pCreateInfo,
5379                                                const VkPipelineLayout *pPipelineLayout) {
5380     unique_lock_t lock(global_lock);  // Lock while accessing state
5381 
5382     PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
5383     plNode.layout = *pPipelineLayout;
5384     plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
5385     for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) {
5386         plNode.set_layouts[i] = GetDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
5387     }
5388     plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount);
5389     for (uint32_t i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5390         plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i];
5391     }
5392     // Implicit unlock
5393 };
5394 
CreatePipelineLayout(VkDevice device,const VkPipelineLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineLayout * pPipelineLayout)5395 VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
5396                                                     const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
5397     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5398 
5399     bool skip = PreCallValiateCreatePipelineLayout(dev_data, pCreateInfo);
5400     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5401 
5402     VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
5403 
5404     if (VK_SUCCESS == result) {
5405         PostCallRecordCreatePipelineLayout(dev_data, pCreateInfo, pPipelineLayout);
5406     }
5407     return result;
5408 }
5409 
CreateDescriptorPool(VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)5410 VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo,
5411                                                     const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool) {
5412     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5413     VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
5414     if (VK_SUCCESS == result) {
5415         DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
5416         if (NULL == pNewNode) {
5417             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5418                         HandleToUint64(*pDescriptorPool), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
5419                         "Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()"))
5420                 return VK_ERROR_VALIDATION_FAILED_EXT;
5421         } else {
5422             lock_guard_t lock(global_lock);
5423             dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
5424         }
5425     } else {
5426         // Need to do anything if pool create fails?
5427     }
5428     return result;
5429 }
5430 
ResetDescriptorPool(VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)5431 VKAPI_ATTR VkResult VKAPI_CALL ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
5432                                                    VkDescriptorPoolResetFlags flags) {
5433     // TODO : Add checks for VALIDATION_ERROR_32a00272
5434     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5435     VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
5436     if (VK_SUCCESS == result) {
5437         lock_guard_t lock(global_lock);
5438         clearDescriptorPool(dev_data, device, descriptorPool, flags);
5439     }
5440     return result;
5441 }
5442 // Ensure the pool contains enough descriptors and descriptor sets to satisfy
5443 // an allocation request. Fills common_data with the total number of descriptors of each type required,
5444 // as well as DescriptorSetLayout ptrs used for later update.
PreCallValidateAllocateDescriptorSets(layer_data * dev_data,const VkDescriptorSetAllocateInfo * pAllocateInfo,cvdescriptorset::AllocateDescriptorSetsData * common_data)5445 static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
5446                                                   cvdescriptorset::AllocateDescriptorSetsData *common_data) {
5447     // Always update common data
5448     cvdescriptorset::UpdateAllocateDescriptorSetsData(dev_data, pAllocateInfo, common_data);
5449     if (dev_data->instance_data->disabled.allocate_descriptor_sets) return false;
5450     // All state checks for AllocateDescriptorSets is done in single function
5451     return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data, pAllocateInfo, common_data);
5452 }
5453 // Allocation state was good and call down chain was made so update state based on allocating descriptor sets
PostCallRecordAllocateDescriptorSets(layer_data * dev_data,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets,const cvdescriptorset::AllocateDescriptorSetsData * common_data)5454 static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
5455                                                  VkDescriptorSet *pDescriptorSets,
5456                                                  const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
5457     // All the updates are contained in a single cvdescriptorset function
5458     cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
5459                                                    &dev_data->setMap, dev_data);
5460 }
5461 
5462 // TODO: PostCallRecord routine is dependent on data generated in PreCallValidate -- needs to be moved out
AllocateDescriptorSets(VkDevice device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets)5463 VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
5464                                                       VkDescriptorSet *pDescriptorSets) {
5465     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5466     unique_lock_t lock(global_lock);
5467     cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
5468     bool skip = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
5469     lock.unlock();
5470 
5471     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5472 
5473     VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
5474 
5475     if (VK_SUCCESS == result) {
5476         lock.lock();
5477         PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
5478         lock.unlock();
5479     }
5480     return result;
5481 }
5482 // Verify state before freeing DescriptorSets
PreCallValidateFreeDescriptorSets(const layer_data * dev_data,VkDescriptorPool pool,uint32_t count,const VkDescriptorSet * descriptor_sets)5483 static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
5484                                               const VkDescriptorSet *descriptor_sets) {
5485     if (dev_data->instance_data->disabled.free_descriptor_sets) return false;
5486     bool skip = false;
5487     // First make sure sets being destroyed are not currently in-use
5488     for (uint32_t i = 0; i < count; ++i) {
5489         if (descriptor_sets[i] != VK_NULL_HANDLE) {
5490             skip |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
5491         }
5492     }
5493 
5494     DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
5495     if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
5496         // Can't Free from a NON_FREE pool
5497         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5498                         HandleToUint64(pool), __LINE__, VALIDATION_ERROR_28600270, "DS",
5499                         "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
5500                         "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT. %s",
5501                         validation_error_map[VALIDATION_ERROR_28600270]);
5502     }
5503     return skip;
5504 }
5505 // Sets have been removed from the pool so update underlying state
PostCallRecordFreeDescriptorSets(layer_data * dev_data,VkDescriptorPool pool,uint32_t count,const VkDescriptorSet * descriptor_sets)5506 static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
5507                                              const VkDescriptorSet *descriptor_sets) {
5508     DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
5509     // Update available descriptor sets in pool
5510     pool_state->availableSets += count;
5511 
5512     // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
5513     for (uint32_t i = 0; i < count; ++i) {
5514         if (descriptor_sets[i] != VK_NULL_HANDLE) {
5515             auto descriptor_set = dev_data->setMap[descriptor_sets[i]];
5516             uint32_t type_index = 0, descriptor_count = 0;
5517             for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) {
5518                 type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j));
5519                 descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j);
5520                 pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
5521             }
5522             freeDescriptorSet(dev_data, descriptor_set);
5523             pool_state->sets.erase(descriptor_set);
5524         }
5525     }
5526 }
5527 
FreeDescriptorSets(VkDevice device,VkDescriptorPool descriptorPool,uint32_t count,const VkDescriptorSet * pDescriptorSets)5528 VKAPI_ATTR VkResult VKAPI_CALL FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
5529                                                   const VkDescriptorSet *pDescriptorSets) {
5530     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5531     // Make sure that no sets being destroyed are in-flight
5532     unique_lock_t lock(global_lock);
5533     bool skip = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
5534     lock.unlock();
5535 
5536     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5537     VkResult result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
5538     if (VK_SUCCESS == result) {
5539         lock.lock();
5540         PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
5541         lock.unlock();
5542     }
5543     return result;
5544 }
5545 // TODO : This is a Proof-of-concept for core validation architecture
5546 //  Really we'll want to break out these functions to separate files but
5547 //  keeping it all together here to prove out design
5548 // PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
PreCallValidateUpdateDescriptorSets(layer_data * dev_data,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)5549 static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
5550                                                 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
5551                                                 const VkCopyDescriptorSet *pDescriptorCopies) {
5552     if (dev_data->instance_data->disabled.update_descriptor_sets) return false;
5553     // First thing to do is perform map look-ups.
5554     // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
5555     //  so we can't just do a single map look-up up-front, but do them individually in functions below
5556 
5557     // Now make call(s) that validate state, but don't perform state updates in this function
5558     // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
5559     //  namespace which will parse params and make calls into specific class instances
5560     return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
5561                                                          descriptorCopyCount, pDescriptorCopies);
5562 }
5563 // PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
PreCallRecordUpdateDescriptorSets(layer_data * dev_data,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)5564 static void PreCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
5565                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
5566                                               const VkCopyDescriptorSet *pDescriptorCopies) {
5567     cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
5568                                                  pDescriptorCopies);
5569 }
5570 
UpdateDescriptorSets(VkDevice device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)5571 VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
5572                                                 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
5573                                                 const VkCopyDescriptorSet *pDescriptorCopies) {
5574     // Only map look-up at top level is for device-level layer_data
5575     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5576     unique_lock_t lock(global_lock);
5577     bool skip = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
5578                                                     pDescriptorCopies);
5579     if (!skip) {
5580         // Since UpdateDescriptorSets() is void, nothing to check prior to updating state & we can update before call down chain
5581         PreCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
5582                                           pDescriptorCopies);
5583         lock.unlock();
5584         dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
5585                                                       pDescriptorCopies);
5586     }
5587 }
5588 
AllocateCommandBuffers(VkDevice device,const VkCommandBufferAllocateInfo * pCreateInfo,VkCommandBuffer * pCommandBuffer)5589 VKAPI_ATTR VkResult VKAPI_CALL AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo,
5590                                                       VkCommandBuffer *pCommandBuffer) {
5591     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5592     VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
5593     if (VK_SUCCESS == result) {
5594         unique_lock_t lock(global_lock);
5595         auto pPool = GetCommandPoolNode(dev_data, pCreateInfo->commandPool);
5596 
5597         if (pPool) {
5598             for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
5599                 // Add command buffer to its commandPool map
5600                 pPool->commandBuffers.insert(pCommandBuffer[i]);
5601                 GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
5602                 // Add command buffer to map
5603                 dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
5604                 ResetCommandBufferState(dev_data, pCommandBuffer[i]);
5605                 pCB->createInfo = *pCreateInfo;
5606                 pCB->device = device;
5607             }
5608         }
5609         lock.unlock();
5610     }
5611     return result;
5612 }
5613 
5614 // Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
AddFramebufferBinding(layer_data * dev_data,GLOBAL_CB_NODE * cb_state,FRAMEBUFFER_STATE * fb_state)5615 static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
5616     addCommandBufferBinding(&fb_state->cb_bindings, {HandleToUint64(fb_state->framebuffer), kVulkanObjectTypeFramebuffer},
5617                             cb_state);
5618     for (auto attachment : fb_state->attachments) {
5619         auto view_state = attachment.view_state;
5620         if (view_state) {
5621             AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
5622         }
5623     }
5624 }
5625 
BeginCommandBuffer(VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)5626 VKAPI_ATTR VkResult VKAPI_CALL BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
5627     bool skip = false;
5628     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5629     unique_lock_t lock(global_lock);
5630     // Validate command buffer level
5631     GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
5632     if (cb_node) {
5633         // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
5634         if (cb_node->in_use.load()) {
5635             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5636                             HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00062, "MEM",
5637                             "Calling vkBeginCommandBuffer() on active command buffer %" PRIx64
5638                             " before it has completed. You must check command buffer fence before this call. %s",
5639                             HandleToUint64(commandBuffer), validation_error_map[VALIDATION_ERROR_16e00062]);
5640         }
5641         clear_cmd_buf_and_mem_references(dev_data, cb_node);
5642         if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
5643             // Secondary Command Buffer
5644             const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
5645             if (!pInfo) {
5646                 skip |=
5647                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5648                             HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00066, "DS",
5649                             "vkBeginCommandBuffer(): Secondary Command Buffer (0x%" PRIx64 ") must have inheritance info. %s",
5650                             HandleToUint64(commandBuffer), validation_error_map[VALIDATION_ERROR_16e00066]);
5651             } else {
5652                 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
5653                     assert(pInfo->renderPass);
5654                     string errorString = "";
5655                     auto framebuffer = GetFramebufferState(dev_data, pInfo->framebuffer);
5656                     if (framebuffer) {
5657                         if (framebuffer->createInfo.renderPass != pInfo->renderPass) {
5658                             // renderPass that framebuffer was created with must be compatible with local renderPass
5659                             skip |=
5660                                 validateRenderPassCompatibility(dev_data, "framebuffer", framebuffer->rp_state.get(),
5661                                                                 "command buffer", GetRenderPassState(dev_data, pInfo->renderPass),
5662                                                                 "vkBeginCommandBuffer()", VALIDATION_ERROR_0280006e);
5663                         }
5664                         // Connect this framebuffer and its children to this cmdBuffer
5665                         AddFramebufferBinding(dev_data, cb_node, framebuffer);
5666                     }
5667                 }
5668                 if ((pInfo->occlusionQueryEnable == VK_FALSE || dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
5669                     (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
5670                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5671                                     VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
5672                                     VALIDATION_ERROR_16e00068, "DS",
5673                                     "vkBeginCommandBuffer(): Secondary Command Buffer (0x%" PRIx64
5674                                     ") must not have VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device "
5675                                     "does not support precise occlusion queries. %s",
5676                                     HandleToUint64(commandBuffer), validation_error_map[VALIDATION_ERROR_16e00068]);
5677                 }
5678             }
5679             if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
5680                 auto renderPass = GetRenderPassState(dev_data, pInfo->renderPass);
5681                 if (renderPass) {
5682                     if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
5683                         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5684                                         VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
5685                                         VALIDATION_ERROR_0280006c, "DS",
5686                                         "vkBeginCommandBuffer(): Secondary Command Buffers (0x%" PRIx64
5687                                         ") must have a subpass index (%d) that is less than the number of subpasses (%d). %s",
5688                                         HandleToUint64(commandBuffer), pInfo->subpass, renderPass->createInfo.subpassCount,
5689                                         validation_error_map[VALIDATION_ERROR_0280006c]);
5690                     }
5691                 }
5692             }
5693         }
5694         if (CB_RECORDING == cb_node->state) {
5695             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5696                             HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00062, "DS",
5697                             "vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%" PRIx64
5698                             ") in the RECORDING state. Must first call vkEndCommandBuffer(). %s",
5699                             HandleToUint64(commandBuffer), validation_error_map[VALIDATION_ERROR_16e00062]);
5700         } else if (CB_RECORDED == cb_node->state || CB_INVALID_COMPLETE == cb_node->state) {
5701             VkCommandPool cmdPool = cb_node->createInfo.commandPool;
5702             auto pPool = GetCommandPoolNode(dev_data, cmdPool);
5703             if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
5704                 skip |= log_msg(
5705                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5706                     HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_16e00064, "DS",
5707                     "Call to vkBeginCommandBuffer() on command buffer (0x%" PRIx64
5708                     ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIx64
5709                     ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
5710                     HandleToUint64(commandBuffer), HandleToUint64(cmdPool), validation_error_map[VALIDATION_ERROR_16e00064]);
5711             }
5712             ResetCommandBufferState(dev_data, commandBuffer);
5713         }
5714         // Set updated state here in case implicit reset occurs above
5715         cb_node->state = CB_RECORDING;
5716         cb_node->beginInfo = *pBeginInfo;
5717         if (cb_node->beginInfo.pInheritanceInfo) {
5718             cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
5719             cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
5720             // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
5721             if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
5722                 (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
5723                 cb_node->activeRenderPass = GetRenderPassState(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
5724                 cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
5725                 cb_node->activeFramebuffer = cb_node->beginInfo.pInheritanceInfo->framebuffer;
5726                 cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
5727             }
5728         }
5729     }
5730     lock.unlock();
5731     if (skip) {
5732         return VK_ERROR_VALIDATION_FAILED_EXT;
5733     }
5734     VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
5735 
5736     return result;
5737 }
PostCallRecordEndCommandBuffer(layer_data * dev_data,GLOBAL_CB_NODE * cb_state)5738 static void PostCallRecordEndCommandBuffer(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
5739     // Cached validation is specific to a specific recording of a specific command buffer.
5740     for (auto descriptor_set : cb_state->validated_descriptor_sets) {
5741         descriptor_set->ClearCachedValidation(cb_state);
5742     }
5743     cb_state->validated_descriptor_sets.clear();
5744 }
5745 
EndCommandBuffer(VkCommandBuffer commandBuffer)5746 VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
5747     bool skip = false;
5748     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5749     unique_lock_t lock(global_lock);
5750     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5751     if (pCB) {
5752         if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) ||
5753             !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
5754             // This needs spec clarification to update valid usage, see comments in PR:
5755             // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
5756             skip |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer()", VALIDATION_ERROR_27400078);
5757         }
5758         skip |= ValidateCmd(dev_data, pCB, CMD_ENDCOMMANDBUFFER, "vkEndCommandBuffer()");
5759         for (auto query : pCB->activeQueries) {
5760             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5761                             HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_2740007a, "DS",
5762                             "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d. %s",
5763                             HandleToUint64(query.pool), query.index, validation_error_map[VALIDATION_ERROR_2740007a]);
5764         }
5765     }
5766     if (!skip) {
5767         lock.unlock();
5768         auto result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
5769         lock.lock();
5770         PostCallRecordEndCommandBuffer(dev_data, pCB);
5771         if (VK_SUCCESS == result) {
5772             pCB->state = CB_RECORDED;
5773         }
5774         return result;
5775     } else {
5776         return VK_ERROR_VALIDATION_FAILED_EXT;
5777     }
5778 }
5779 
ResetCommandBuffer(VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)5780 VKAPI_ATTR VkResult VKAPI_CALL ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
5781     bool skip = false;
5782     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5783     unique_lock_t lock(global_lock);
5784     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5785     VkCommandPool cmdPool = pCB->createInfo.commandPool;
5786     auto pPool = GetCommandPoolNode(dev_data, cmdPool);
5787     if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
5788         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5789                         HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_3260005c, "DS",
5790                         "Attempt to reset command buffer (0x%" PRIx64 ") created from command pool (0x%" PRIx64
5791                         ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
5792                         HandleToUint64(commandBuffer), HandleToUint64(cmdPool), validation_error_map[VALIDATION_ERROR_3260005c]);
5793     }
5794     skip |= checkCommandBufferInFlight(dev_data, pCB, "reset", VALIDATION_ERROR_3260005a);
5795     lock.unlock();
5796     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5797     VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
5798     if (VK_SUCCESS == result) {
5799         lock.lock();
5800         ResetCommandBufferState(dev_data, commandBuffer);
5801         lock.unlock();
5802     }
5803     return result;
5804 }
5805 
CmdBindPipeline(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipeline pipeline)5806 VKAPI_ATTR void VKAPI_CALL CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
5807                                            VkPipeline pipeline) {
5808     bool skip = false;
5809     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5810     unique_lock_t lock(global_lock);
5811     GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
5812     if (cb_state) {
5813         skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindPipeline()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
5814                                       VALIDATION_ERROR_18002415);
5815         skip |= ValidateCmd(dev_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
5816         // TODO: VALIDATION_ERROR_18000612 VALIDATION_ERROR_18000616  -- using ValidatePipelineBindPoint
5817 
5818         auto pipe_state = getPipelineState(dev_data, pipeline);
5819         if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
5820             cb_state->status &= ~cb_state->static_status;
5821             cb_state->static_status = MakeStaticStateMask(pipe_state->graphicsPipelineCI.ptr()->pDynamicState);
5822             cb_state->status |= cb_state->static_status;
5823         }
5824         cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
5825         set_pipeline_state(pipe_state);
5826         skip |= validate_dual_src_blend_feature(dev_data, pipe_state);
5827         addCommandBufferBinding(&pipe_state->cb_bindings, {HandleToUint64(pipeline), kVulkanObjectTypePipeline}, cb_state);
5828     }
5829     lock.unlock();
5830     if (!skip) dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
5831 }
5832 
CmdSetViewport(VkCommandBuffer commandBuffer,uint32_t firstViewport,uint32_t viewportCount,const VkViewport * pViewports)5833 VKAPI_ATTR void VKAPI_CALL CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
5834                                           const VkViewport *pViewports) {
5835     bool skip = false;
5836     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5837     unique_lock_t lock(global_lock);
5838     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5839     if (pCB) {
5840         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetViewport()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1e002415);
5841         skip |= ValidateCmd(dev_data, pCB, CMD_SETVIEWPORT, "vkCmdSetViewport()");
5842         if (pCB->static_status & CBSTATUS_VIEWPORT_SET) {
5843             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5844                             HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1e00098a, "DS",
5845                             "vkCmdSetViewport(): pipeline was created without VK_DYNAMIC_STATE_VIEWPORT flag. %s.",
5846                             validation_error_map[VALIDATION_ERROR_1e00098a]);
5847         }
5848         if (!skip) {
5849             pCB->viewportMask |= ((1u << viewportCount) - 1u) << firstViewport;
5850             pCB->status |= CBSTATUS_VIEWPORT_SET;
5851         }
5852     }
5853     lock.unlock();
5854     if (!skip) dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
5855 }
5856 
CmdSetScissor(VkCommandBuffer commandBuffer,uint32_t firstScissor,uint32_t scissorCount,const VkRect2D * pScissors)5857 VKAPI_ATTR void VKAPI_CALL CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
5858                                          const VkRect2D *pScissors) {
5859     bool skip = false;
5860     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5861     unique_lock_t lock(global_lock);
5862     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5863     if (pCB) {
5864         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetScissor()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1d802415);
5865         skip |= ValidateCmd(dev_data, pCB, CMD_SETSCISSOR, "vkCmdSetScissor()");
5866         if (pCB->static_status & CBSTATUS_SCISSOR_SET) {
5867             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5868                             HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1d80049c, "DS",
5869                             "vkCmdSetScissor(): pipeline was created without VK_DYNAMIC_STATE_SCISSOR flag. %s.",
5870                             validation_error_map[VALIDATION_ERROR_1d80049c]);
5871         }
5872         if (!skip) {
5873             pCB->scissorMask |= ((1u << scissorCount) - 1u) << firstScissor;
5874             pCB->status |= CBSTATUS_SCISSOR_SET;
5875         }
5876     }
5877     lock.unlock();
5878     if (!skip) dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
5879 }
5880 
CmdSetLineWidth(VkCommandBuffer commandBuffer,float lineWidth)5881 VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
5882     bool skip = false;
5883     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5884     unique_lock_t lock(global_lock);
5885     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5886     if (pCB) {
5887         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetLineWidth()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1d602415);
5888         skip |= ValidateCmd(dev_data, pCB, CMD_SETLINEWIDTH, "vkCmdSetLineWidth()");
5889 
5890         if (pCB->static_status & CBSTATUS_LINE_WIDTH_SET) {
5891             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5892                             HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1d600626, "DS",
5893                             "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH flag. %s",
5894                             validation_error_map[VALIDATION_ERROR_1d600626]);
5895         }
5896         if (!skip) {
5897             pCB->status |= CBSTATUS_LINE_WIDTH_SET;
5898         }
5899     }
5900     lock.unlock();
5901     if (!skip) dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
5902 }
5903 
CmdSetDepthBias(VkCommandBuffer commandBuffer,float depthBiasConstantFactor,float depthBiasClamp,float depthBiasSlopeFactor)5904 VKAPI_ATTR void VKAPI_CALL CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
5905                                            float depthBiasSlopeFactor) {
5906     bool skip = false;
5907     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5908     unique_lock_t lock(global_lock);
5909     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5910     if (pCB) {
5911         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBias()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1cc02415);
5912         skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBIAS, "vkCmdSetDepthBias()");
5913         if (pCB->static_status & CBSTATUS_DEPTH_BIAS_SET) {
5914             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5915                             HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1cc0062a, "DS",
5916                             "vkCmdSetDepthBias(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BIAS flag. %s.",
5917                             validation_error_map[VALIDATION_ERROR_1cc0062a]);
5918         }
5919         if ((depthBiasClamp != 0.0) && (!dev_data->enabled_features.depthBiasClamp)) {
5920             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5921                             HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1cc0062c, "DS",
5922                             "vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp parameter must "
5923                             "be set to 0.0. %s",
5924                             validation_error_map[VALIDATION_ERROR_1cc0062c]);
5925         }
5926         if (!skip) {
5927             pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
5928         }
5929     }
5930     lock.unlock();
5931     if (!skip)
5932         dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
5933 }
5934 
CmdSetBlendConstants(VkCommandBuffer commandBuffer,const float blendConstants[4])5935 VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
5936     bool skip = false;
5937     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5938     unique_lock_t lock(global_lock);
5939     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5940     if (pCB) {
5941         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetBlendConstants()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1ca02415);
5942         skip |= ValidateCmd(dev_data, pCB, CMD_SETBLENDCONSTANTS, "vkCmdSetBlendConstants()");
5943         if (pCB->static_status & CBSTATUS_BLEND_CONSTANTS_SET) {
5944             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5945                             HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1ca004c8, "DS",
5946                             "vkCmdSetBlendConstants(): pipeline was created without VK_DYNAMIC_STATE_BLEND_CONSTANTS flag. %s.",
5947                             validation_error_map[VALIDATION_ERROR_1ca004c8]);
5948         }
5949         if (!skip) {
5950             pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
5951         }
5952     }
5953     lock.unlock();
5954     if (!skip) dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
5955 }
5956 
CmdSetDepthBounds(VkCommandBuffer commandBuffer,float minDepthBounds,float maxDepthBounds)5957 VKAPI_ATTR void VKAPI_CALL CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
5958     bool skip = false;
5959     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5960     unique_lock_t lock(global_lock);
5961     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5962     if (pCB) {
5963         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBounds()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1ce02415);
5964         skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBOUNDS, "vkCmdSetDepthBounds()");
5965         if (pCB->static_status & CBSTATUS_DEPTH_BOUNDS_SET) {
5966             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5967                             HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1ce004ae, "DS",
5968                             "vkCmdSetDepthBounds(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BOUNDS flag. %s.",
5969                             validation_error_map[VALIDATION_ERROR_1ce004ae]);
5970         }
5971         if (!skip) {
5972             pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
5973         }
5974     }
5975     lock.unlock();
5976     if (!skip) dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
5977 }
5978 
CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t compareMask)5979 VKAPI_ATTR void VKAPI_CALL CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
5980                                                     uint32_t compareMask) {
5981     bool skip = false;
5982     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5983     unique_lock_t lock(global_lock);
5984     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5985     if (pCB) {
5986         skip |=
5987             ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilCompareMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1da02415);
5988         skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILCOMPAREMASK, "vkCmdSetStencilCompareMask()");
5989         if (pCB->static_status & CBSTATUS_STENCIL_READ_MASK_SET) {
5990             skip |= log_msg(
5991                 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5992                 HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1da004b4, "DS",
5993                 "vkCmdSetStencilCompareMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK flag. %s.",
5994                 validation_error_map[VALIDATION_ERROR_1da004b4]);
5995         }
5996         if (!skip) {
5997             pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
5998         }
5999     }
6000     lock.unlock();
6001     if (!skip) dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
6002 }
6003 
CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t writeMask)6004 VKAPI_ATTR void VKAPI_CALL CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
6005     bool skip = false;
6006     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6007     unique_lock_t lock(global_lock);
6008     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6009     if (pCB) {
6010         skip |=
6011             ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilWriteMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1de02415);
6012         skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASK, "vkCmdSetStencilWriteMask()");
6013         if (pCB->static_status & CBSTATUS_STENCIL_WRITE_MASK_SET) {
6014             skip |=
6015                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6016                         HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1de004b6, "DS",
6017                         "vkCmdSetStencilWriteMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_WRITE_MASK flag. %s.",
6018                         validation_error_map[VALIDATION_ERROR_1de004b6]);
6019         }
6020         if (!skip) {
6021             pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
6022         }
6023     }
6024     lock.unlock();
6025     if (!skip) dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
6026 }
6027 
CmdSetStencilReference(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t reference)6028 VKAPI_ATTR void VKAPI_CALL CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
6029     bool skip = false;
6030     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6031     unique_lock_t lock(global_lock);
6032     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6033     if (pCB) {
6034         skip |=
6035             ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilReference()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1dc02415);
6036         skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREFERENCE, "vkCmdSetStencilReference()");
6037         if (pCB->static_status & CBSTATUS_STENCIL_REFERENCE_SET) {
6038             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6039                             HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1dc004b8, "DS",
6040                             "vkCmdSetStencilReference(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_REFERENCE flag. %s.",
6041                             validation_error_map[VALIDATION_ERROR_1dc004b8]);
6042         }
6043         if (!skip) {
6044             pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
6045         }
6046     }
6047     lock.unlock();
6048     if (!skip) dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
6049 }
6050 
PreCallRecordCmdBindDescriptorSets(layer_data * device_data,GLOBAL_CB_NODE * cb_state,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t firstSet,uint32_t setCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)6051 static void PreCallRecordCmdBindDescriptorSets(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6052                                                VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet,
6053                                                uint32_t setCount, const VkDescriptorSet *pDescriptorSets,
6054                                                uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) {
6055     uint32_t total_dynamic_descriptors = 0;
6056     string error_string = "";
6057     uint32_t last_set_index = firstSet + setCount - 1;
6058     auto last_bound = &cb_state->lastBound[pipelineBindPoint];
6059 
6060     if (last_set_index >= last_bound->boundDescriptorSets.size()) {
6061         last_bound->boundDescriptorSets.resize(last_set_index + 1);
6062         last_bound->dynamicOffsets.resize(last_set_index + 1);
6063     }
6064     auto old_final_bound_set = last_bound->boundDescriptorSets[last_set_index];
6065     auto pipeline_layout = getPipelineLayout(device_data, layout);
6066     for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
6067         cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(device_data, pDescriptorSets[set_idx]);
6068         if (descriptor_set) {
6069             last_bound->pipeline_layout = *pipeline_layout;
6070 
6071             if ((last_bound->boundDescriptorSets[set_idx + firstSet] != nullptr) &&
6072                 last_bound->boundDescriptorSets[set_idx + firstSet]->IsPushDescriptor()) {
6073                 last_bound->push_descriptor_set = nullptr;
6074                 last_bound->boundDescriptorSets[set_idx + firstSet] = nullptr;
6075             }
6076 
6077             last_bound->boundDescriptorSets[set_idx + firstSet] = descriptor_set;
6078 
6079             auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
6080             last_bound->dynamicOffsets[firstSet + set_idx].clear();
6081             if (set_dynamic_descriptor_count) {
6082                 last_bound->dynamicOffsets[firstSet + set_idx] =
6083                     std::vector<uint32_t>(pDynamicOffsets + total_dynamic_descriptors,
6084                                           pDynamicOffsets + total_dynamic_descriptors + set_dynamic_descriptor_count);
6085                 total_dynamic_descriptors += set_dynamic_descriptor_count;
6086             }
6087             cb_state->validated_descriptor_sets.insert(descriptor_set);
6088         }
6089         // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
6090         if (firstSet > 0) {
6091             for (uint32_t i = 0; i < firstSet; ++i) {
6092                 if (last_bound->boundDescriptorSets[i] &&
6093                     !verify_set_layout_compatibility(last_bound->boundDescriptorSets[i], pipeline_layout, i, error_string)) {
6094                     last_bound->boundDescriptorSets[i] = VK_NULL_HANDLE;
6095                 }
6096             }
6097         }
6098         // Check if newly last bound set invalidates any remaining bound sets
6099         if ((last_bound->boundDescriptorSets.size() - 1) > (last_set_index)) {
6100             if (old_final_bound_set &&
6101                 !verify_set_layout_compatibility(old_final_bound_set, pipeline_layout, last_set_index, error_string)) {
6102                 last_bound->boundDescriptorSets.resize(last_set_index + 1);
6103             }
6104         }
6105     }
6106 }
6107 
PreCallValidateCmdBindDescriptorSets(layer_data * device_data,GLOBAL_CB_NODE * cb_state,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t firstSet,uint32_t setCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)6108 static bool PreCallValidateCmdBindDescriptorSets(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6109                                                  VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet,
6110                                                  uint32_t setCount, const VkDescriptorSet *pDescriptorSets,
6111                                                  uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) {
6112     bool skip = false;
6113     skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdBindDescriptorSets()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6114                                   VALIDATION_ERROR_17c02415);
6115     skip |= ValidateCmd(device_data, cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
6116     // Track total count of dynamic descriptor types to make sure we have an offset for each one
6117     uint32_t total_dynamic_descriptors = 0;
6118     string error_string = "";
6119     uint32_t last_set_index = firstSet + setCount - 1;
6120 
6121     if (last_set_index >= cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
6122         cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1);
6123         cb_state->lastBound[pipelineBindPoint].dynamicOffsets.resize(last_set_index + 1);
6124     }
6125     auto pipeline_layout = getPipelineLayout(device_data, layout);
6126     for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
6127         cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(device_data, pDescriptorSets[set_idx]);
6128         if (descriptor_set) {
6129             if (!descriptor_set->IsUpdated() && (descriptor_set->GetTotalDescriptorCount() != 0)) {
6130                 skip |= log_msg(
6131                     device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6132                     HandleToUint64(pDescriptorSets[set_idx]), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
6133                     "Descriptor Set 0x%" PRIx64 " bound but it was never updated. You may want to either update it or not bind it.",
6134                     HandleToUint64(pDescriptorSets[set_idx]));
6135             }
6136             // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
6137             if (!verify_set_layout_compatibility(descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) {
6138                 skip |=
6139                     log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6140                             HandleToUint64(pDescriptorSets[set_idx]), __LINE__, VALIDATION_ERROR_17c002cc, "DS",
6141                             "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout at index %u of "
6142                             "pipelineLayout 0x%" PRIx64 " due to: %s. %s",
6143                             set_idx, set_idx + firstSet, HandleToUint64(layout), error_string.c_str(),
6144                             validation_error_map[VALIDATION_ERROR_17c002cc]);
6145             }
6146 
6147             auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
6148 
6149             if (set_dynamic_descriptor_count) {
6150                 // First make sure we won't overstep bounds of pDynamicOffsets array
6151                 if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) {
6152                     skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6153                                     VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]),
6154                                     __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
6155                                     "descriptorSet #%u (0x%" PRIx64
6156                                     ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets array. "
6157                                     "There must be one dynamic offset for each dynamic descriptor being bound.",
6158                                     set_idx, HandleToUint64(pDescriptorSets[set_idx]), descriptor_set->GetDynamicDescriptorCount(),
6159                                     (dynamicOffsetCount - total_dynamic_descriptors));
6160                 } else {  // Validate dynamic offsets and Dynamic Offset Minimums
6161                     uint32_t cur_dyn_offset = total_dynamic_descriptors;
6162                     for (uint32_t d = 0; d < descriptor_set->GetTotalDescriptorCount(); d++) {
6163                         if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
6164                             if (SafeModulo(pDynamicOffsets[cur_dyn_offset],
6165                                            device_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) !=
6166                                 0) {
6167                                 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6168                                                 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
6169                                                 VALIDATION_ERROR_17c002d4, "DS",
6170                                                 "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6171                                                 "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
6172                                                 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6173                                                 device_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
6174                                                 validation_error_map[VALIDATION_ERROR_17c002d4]);
6175                             }
6176                             cur_dyn_offset++;
6177                         } else if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
6178                             if (SafeModulo(pDynamicOffsets[cur_dyn_offset],
6179                                            device_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) !=
6180                                 0) {
6181                                 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6182                                                 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
6183                                                 VALIDATION_ERROR_17c002d4, "DS",
6184                                                 "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6185                                                 "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
6186                                                 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6187                                                 device_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment,
6188                                                 validation_error_map[VALIDATION_ERROR_17c002d4]);
6189                             }
6190                             cur_dyn_offset++;
6191                         }
6192                     }
6193                     // Keep running total of dynamic descriptor count to verify at the end
6194                     total_dynamic_descriptors += set_dynamic_descriptor_count;
6195                 }
6196             }
6197         } else {
6198             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6199                             HandleToUint64(pDescriptorSets[set_idx]), __LINE__, DRAWSTATE_INVALID_SET, "DS",
6200                             "Attempt to bind descriptor set 0x%" PRIx64 " that doesn't exist!",
6201                             HandleToUint64(pDescriptorSets[set_idx]));
6202         }
6203     }
6204     //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
6205     if (total_dynamic_descriptors != dynamicOffsetCount) {
6206         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6207                         HandleToUint64(cb_state->commandBuffer), __LINE__, VALIDATION_ERROR_17c002ce, "DS",
6208                         "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount is %u. It should "
6209                         "exactly match the number of dynamic descriptors. %s",
6210                         setCount, total_dynamic_descriptors, dynamicOffsetCount, validation_error_map[VALIDATION_ERROR_17c002ce]);
6211     }
6212     return skip;
6213 }
6214 
CmdBindDescriptorSets(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t firstSet,uint32_t setCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)6215 VKAPI_ATTR void VKAPI_CALL CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
6216                                                  VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
6217                                                  const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
6218                                                  const uint32_t *pDynamicOffsets) {
6219     bool skip = false;
6220     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6221     unique_lock_t lock(global_lock);
6222     GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
6223     assert(cb_state);
6224     skip = PreCallValidateCmdBindDescriptorSets(device_data, cb_state, pipelineBindPoint, layout, firstSet, setCount,
6225                                                 pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
6226     if (!skip) {
6227         PreCallRecordCmdBindDescriptorSets(device_data, cb_state, pipelineBindPoint, layout, firstSet, setCount, pDescriptorSets,
6228                                            dynamicOffsetCount, pDynamicOffsets);
6229         lock.unlock();
6230         device_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
6231                                                           pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
6232     } else {
6233         lock.unlock();
6234     }
6235 }
6236 
6237 // Validates that the supplied bind point is supported for the command buffer (vis. the command pool)
6238 // Takes array of error codes as some of the VUID's (e.g. vkCmdBindPipeline) are written per bindpoint
6239 // TODO add vkCmdBindPipeline bind_point validation using this call.
ValidatePipelineBindPoint(layer_data * device_data,GLOBAL_CB_NODE * cb_state,VkPipelineBindPoint bind_point,const char * func_name,const std::array<UNIQUE_VALIDATION_ERROR_CODE,VK_PIPELINE_BIND_POINT_RANGE_SIZE> & bind_errors)6240 bool ValidatePipelineBindPoint(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
6241                                const char *func_name,
6242                                const std::array<UNIQUE_VALIDATION_ERROR_CODE, VK_PIPELINE_BIND_POINT_RANGE_SIZE> &bind_errors) {
6243     bool skip = false;
6244     auto pool = GetCommandPoolNode(device_data, cb_state->createInfo.commandPool);
6245     if (pool) {  // The loss of a pool in a recording cmd is reported in DestroyCommandPool
6246         static const VkQueueFlags flag_mask[VK_PIPELINE_BIND_POINT_RANGE_SIZE] = {VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT};
6247         const auto bind_point_index = bind_point - VK_PIPELINE_BIND_POINT_BEGIN_RANGE;  // typeof enum is not defined, use auto
6248         const auto &qfp = GetPhysDevProperties(device_data)->queue_family_properties[pool->queueFamilyIndex];
6249         if (0 == (qfp.queueFlags & flag_mask[bind_point_index])) {
6250             const UNIQUE_VALIDATION_ERROR_CODE error = bind_errors[bind_point_index];
6251             auto cb_u64 = HandleToUint64(cb_state->commandBuffer);
6252             auto cp_u64 = HandleToUint64(cb_state->createInfo.commandPool);
6253             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6254                             cb_u64, __LINE__, error, "DS",
6255                             "%s: CommandBuffer 0x%" PRIxLEAST64 " was allocated from VkCommandPool 0x%" PRIxLEAST64
6256                             " that does not support bindpoint %s. %s",
6257                             func_name, cb_u64, cp_u64, string_VkPipelineBindPoint(bind_point), validation_error_map[error]);
6258         }
6259     }
6260     return skip;
6261 }
6262 
PreCallValidateCmdPushDescriptorSetKHR(layer_data * device_data,GLOBAL_CB_NODE * cb_state,const VkPipelineBindPoint bind_point,const VkPipelineLayout layout,const uint32_t set,const uint32_t descriptor_write_count,const VkWriteDescriptorSet * descriptor_writes,const char * func_name)6263 static bool PreCallValidateCmdPushDescriptorSetKHR(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6264                                                    const VkPipelineBindPoint bind_point, const VkPipelineLayout layout,
6265                                                    const uint32_t set, const uint32_t descriptor_write_count,
6266                                                    const VkWriteDescriptorSet *descriptor_writes, const char *func_name) {
6267     bool skip = false;
6268     skip |= ValidateCmd(device_data, cb_state, CMD_PUSHDESCRIPTORSETKHR, func_name);
6269     skip |= ValidateCmdQueueFlags(device_data, cb_state, func_name, (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT),
6270                                   VALIDATION_ERROR_1be02415);
6271     skip |= ValidatePipelineBindPoint(device_data, cb_state, bind_point, func_name,
6272                                       {{VALIDATION_ERROR_1be002d6, VALIDATION_ERROR_1be002d6}});
6273     auto layout_data = getPipelineLayout(device_data, layout);
6274 
6275     // Validate the set index points to a push descriptor set and is in range
6276     if (layout_data) {
6277         const auto &set_layouts = layout_data->set_layouts;
6278         const auto layout_u64 = HandleToUint64(layout);
6279         if (set < set_layouts.size()) {
6280             const auto *dsl = set_layouts[set].get();
6281             if (dsl && (0 == (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR))) {
6282                 skip =
6283                     log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6284                             VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, layout_u64, __LINE__, VALIDATION_ERROR_1be002da, "DS",
6285                             "%s: Set index %" PRIu32
6286                             " does not match push descriptor set layout index for VkPipelineLayout 0x%" PRIxLEAST64 ". %s",
6287                             func_name, set, layout_u64, validation_error_map[VALIDATION_ERROR_1be002da]);
6288             }
6289         } else {
6290             skip = log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
6291                            layout_u64, __LINE__, VALIDATION_ERROR_1be002d8, "DS",
6292                            "%s: Set index %" PRIu32 " is outside of range for VkPipelineLayout 0x%" PRIxLEAST64 " (set < %" PRIu32
6293                            "). %s",
6294                            func_name, set, layout_u64, static_cast<uint32_t>(set_layouts.size()),
6295                            validation_error_map[VALIDATION_ERROR_1be002d8]);
6296         }
6297     }
6298 
6299     return skip;
6300 }
PreCallRecordCmdPushDescriptorSetKHR(layer_data * device_data,GLOBAL_CB_NODE * cb_state,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t set,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites)6301 static void PreCallRecordCmdPushDescriptorSetKHR(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6302                                                  VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set,
6303                                                  uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites) {
6304     if (set >= cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
6305         cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(set + 1);
6306         cb_state->lastBound[pipelineBindPoint].dynamicOffsets.resize(set + 1);
6307     }
6308     const auto &layout_state = getPipelineLayout(device_data, layout);
6309     std::unique_ptr<cvdescriptorset::DescriptorSet> new_desc{
6310         new cvdescriptorset::DescriptorSet(0, 0, layout_state->set_layouts[set], device_data)};
6311     cb_state->lastBound[pipelineBindPoint].boundDescriptorSets[set] = new_desc.get();
6312     cb_state->lastBound[pipelineBindPoint].push_descriptor_set = std::move(new_desc);
6313 }
6314 
CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t set,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites)6315 VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
6316                                                    VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
6317                                                    const VkWriteDescriptorSet *pDescriptorWrites) {
6318     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6319     unique_lock_t lock(global_lock);
6320     auto cb_state = GetCBNode(device_data, commandBuffer);
6321     bool skip = PreCallValidateCmdPushDescriptorSetKHR(device_data, cb_state, pipelineBindPoint, layout, set, descriptorWriteCount,
6322                                                        pDescriptorWrites, "vkCmdPushDescriptorSetKHR()");
6323     if (!skip) {
6324         PreCallRecordCmdPushDescriptorSetKHR(device_data, cb_state, pipelineBindPoint, layout, set, descriptorWriteCount,
6325                                              pDescriptorWrites);
6326         lock.unlock();
6327         device_data->dispatch_table.CmdPushDescriptorSetKHR(commandBuffer, pipelineBindPoint, layout, set, descriptorWriteCount,
6328                                                             pDescriptorWrites);
6329     }
6330 }
6331 
GetIndexAlignment(VkIndexType indexType)6332 static VkDeviceSize GetIndexAlignment(VkIndexType indexType) {
6333     switch (indexType) {
6334         case VK_INDEX_TYPE_UINT16:
6335             return 2;
6336         case VK_INDEX_TYPE_UINT32:
6337             return 4;
6338         default:
6339             // Not a real index type. Express no alignment requirement here; we expect upper layer
6340             // to have already picked up on the enum being nonsense.
6341             return 1;
6342     }
6343 }
6344 
CmdBindIndexBuffer(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,VkIndexType indexType)6345 VKAPI_ATTR void VKAPI_CALL CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
6346                                               VkIndexType indexType) {
6347     bool skip = false;
6348     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6349     unique_lock_t lock(global_lock);
6350 
6351     auto buffer_state = GetBufferState(dev_data, buffer);
6352     auto cb_node = GetCBNode(dev_data, commandBuffer);
6353     assert(cb_node);
6354     assert(buffer_state);
6355 
6356     skip |= ValidateBufferUsageFlags(dev_data, buffer_state, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, true, VALIDATION_ERROR_17e00362,
6357                                      "vkCmdBindIndexBuffer()", "VK_BUFFER_USAGE_INDEX_BUFFER_BIT");
6358     skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindIndexBuffer()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_17e02415);
6359     skip |= ValidateCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
6360     skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindIndexBuffer()", VALIDATION_ERROR_17e00364);
6361     auto offset_align = GetIndexAlignment(indexType);
6362     if (offset % offset_align) {
6363         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6364                         HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_17e00360, "DS",
6365                         "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary. %s", offset,
6366                         string_VkIndexType(indexType), validation_error_map[VALIDATION_ERROR_17e00360]);
6367     }
6368 
6369     if (skip) return;
6370 
6371     std::function<bool()> function = [=]() {
6372         return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindIndexBuffer()");
6373     };
6374     cb_node->queue_submit_functions.push_back(function);
6375     cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
6376 
6377     lock.unlock();
6378     dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
6379 }
6380 
updateResourceTracking(GLOBAL_CB_NODE * pCB,uint32_t firstBinding,uint32_t bindingCount,const VkBuffer * pBuffers)6381 void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
6382     uint32_t end = firstBinding + bindingCount;
6383     if (pCB->currentDrawData.buffers.size() < end) {
6384         pCB->currentDrawData.buffers.resize(end);
6385     }
6386     for (uint32_t i = 0; i < bindingCount; ++i) {
6387         pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
6388     }
6389 }
6390 
updateResourceTrackingOnDraw(GLOBAL_CB_NODE * pCB)6391 static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
6392 
CmdBindVertexBuffers(VkCommandBuffer commandBuffer,uint32_t firstBinding,uint32_t bindingCount,const VkBuffer * pBuffers,const VkDeviceSize * pOffsets)6393 VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
6394                                                 const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
6395     bool skip = false;
6396     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6397     unique_lock_t lock(global_lock);
6398 
6399     auto cb_node = GetCBNode(dev_data, commandBuffer);
6400     assert(cb_node);
6401 
6402     skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindVertexBuffers()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_18202415);
6403     skip |= ValidateCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFERS, "vkCmdBindVertexBuffers()");
6404     for (uint32_t i = 0; i < bindingCount; ++i) {
6405         auto buffer_state = GetBufferState(dev_data, pBuffers[i]);
6406         assert(buffer_state);
6407         skip |= ValidateBufferUsageFlags(dev_data, buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true, VALIDATION_ERROR_182004e6,
6408                                          "vkCmdBindVertexBuffers()", "VK_BUFFER_USAGE_VERTEX_BUFFER_BIT");
6409         skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindVertexBuffers()", VALIDATION_ERROR_182004e8);
6410         if (pOffsets[i] >= buffer_state->createInfo.size) {
6411             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
6412                             HandleToUint64(buffer_state->buffer), __LINE__, VALIDATION_ERROR_182004e4, "DS",
6413                             "vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer. %s", pOffsets[i],
6414                             validation_error_map[VALIDATION_ERROR_182004e4]);
6415         }
6416     }
6417 
6418     if (skip) return;
6419 
6420     for (uint32_t i = 0; i < bindingCount; ++i) {
6421         auto buffer_state = GetBufferState(dev_data, pBuffers[i]);
6422         assert(buffer_state);
6423         std::function<bool()> function = [=]() {
6424             return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindVertexBuffers()");
6425         };
6426         cb_node->queue_submit_functions.push_back(function);
6427     }
6428 
6429     updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
6430 
6431     lock.unlock();
6432     dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
6433 }
6434 
6435 // Expects global_lock to be held by caller
MarkStoreImagesAndBuffersAsWritten(layer_data * dev_data,GLOBAL_CB_NODE * pCB)6436 static void MarkStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
6437     for (auto imageView : pCB->updateImages) {
6438         auto view_state = GetImageViewState(dev_data, imageView);
6439         if (!view_state) continue;
6440 
6441         auto image_state = GetImageState(dev_data, view_state->create_info.image);
6442         assert(image_state);
6443         std::function<bool()> function = [=]() {
6444             SetImageMemoryValid(dev_data, image_state, true);
6445             return false;
6446         };
6447         pCB->queue_submit_functions.push_back(function);
6448     }
6449     for (auto buffer : pCB->updateBuffers) {
6450         auto buffer_state = GetBufferState(dev_data, buffer);
6451         assert(buffer_state);
6452         std::function<bool()> function = [=]() {
6453             SetBufferMemoryValid(dev_data, buffer_state, true);
6454             return false;
6455         };
6456         pCB->queue_submit_functions.push_back(function);
6457     }
6458 }
6459 
6460 // Generic function to handle validation for all CmdDraw* type functions
ValidateCmdDrawType(layer_data * dev_data,VkCommandBuffer cmd_buffer,bool indexed,VkPipelineBindPoint bind_point,CMD_TYPE cmd_type,GLOBAL_CB_NODE ** cb_state,const char * caller,VkQueueFlags queue_flags,UNIQUE_VALIDATION_ERROR_CODE queue_flag_code,UNIQUE_VALIDATION_ERROR_CODE msg_code,UNIQUE_VALIDATION_ERROR_CODE const dynamic_state_msg_code)6461 static bool ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
6462                                 CMD_TYPE cmd_type, GLOBAL_CB_NODE **cb_state, const char *caller, VkQueueFlags queue_flags,
6463                                 UNIQUE_VALIDATION_ERROR_CODE queue_flag_code, UNIQUE_VALIDATION_ERROR_CODE msg_code,
6464                                 UNIQUE_VALIDATION_ERROR_CODE const dynamic_state_msg_code) {
6465     bool skip = false;
6466     *cb_state = GetCBNode(dev_data, cmd_buffer);
6467     if (*cb_state) {
6468         skip |= ValidateCmdQueueFlags(dev_data, *cb_state, caller, queue_flags, queue_flag_code);
6469         skip |= ValidateCmd(dev_data, *cb_state, cmd_type, caller);
6470         skip |= ValidateDrawState(dev_data, *cb_state, cmd_type, indexed, bind_point, caller, dynamic_state_msg_code);
6471         skip |= (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) ? outsideRenderPass(dev_data, *cb_state, caller, msg_code)
6472                                                                 : insideRenderPass(dev_data, *cb_state, caller, msg_code);
6473     }
6474     return skip;
6475 }
6476 
6477 // Generic function to handle state update for all CmdDraw* and CmdDispatch* type functions
UpdateStateCmdDrawDispatchType(layer_data * dev_data,GLOBAL_CB_NODE * cb_state,VkPipelineBindPoint bind_point)6478 static void UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6479     UpdateDrawState(dev_data, cb_state, bind_point);
6480     MarkStoreImagesAndBuffersAsWritten(dev_data, cb_state);
6481 }
6482 
6483 // Generic function to handle state update for all CmdDraw* type functions
UpdateStateCmdDrawType(layer_data * dev_data,GLOBAL_CB_NODE * cb_state,VkPipelineBindPoint bind_point)6484 static void UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6485     UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
6486     updateResourceTrackingOnDraw(cb_state);
6487     cb_state->hasDrawCmd = true;
6488 }
6489 
PreCallValidateCmdDraw(layer_data * dev_data,VkCommandBuffer cmd_buffer,bool indexed,VkPipelineBindPoint bind_point,GLOBAL_CB_NODE ** cb_state,const char * caller)6490 static bool PreCallValidateCmdDraw(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
6491                                    GLOBAL_CB_NODE **cb_state, const char *caller) {
6492     return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAW, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
6493                                VALIDATION_ERROR_1a202415, VALIDATION_ERROR_1a200017, VALIDATION_ERROR_1a200376);
6494 }
6495 
PostCallRecordCmdDraw(layer_data * dev_data,GLOBAL_CB_NODE * cb_state,VkPipelineBindPoint bind_point)6496 static void PostCallRecordCmdDraw(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6497     UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
6498 }
6499 
CmdDraw(VkCommandBuffer commandBuffer,uint32_t vertexCount,uint32_t instanceCount,uint32_t firstVertex,uint32_t firstInstance)6500 VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
6501                                    uint32_t firstVertex, uint32_t firstInstance) {
6502     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6503     GLOBAL_CB_NODE *cb_state = nullptr;
6504     unique_lock_t lock(global_lock);
6505     bool skip = PreCallValidateCmdDraw(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, "vkCmdDraw()");
6506     lock.unlock();
6507     if (!skip) {
6508         dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
6509         lock.lock();
6510         PostCallRecordCmdDraw(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
6511         lock.unlock();
6512     }
6513 }
6514 
PreCallValidateCmdDrawIndexed(layer_data * dev_data,VkCommandBuffer cmd_buffer,bool indexed,VkPipelineBindPoint bind_point,GLOBAL_CB_NODE ** cb_state,const char * caller)6515 static bool PreCallValidateCmdDrawIndexed(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
6516                                           VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
6517     return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXED, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
6518                                VALIDATION_ERROR_1a402415, VALIDATION_ERROR_1a400017, VALIDATION_ERROR_1a40039c);
6519 }
6520 
PostCallRecordCmdDrawIndexed(layer_data * dev_data,GLOBAL_CB_NODE * cb_state,VkPipelineBindPoint bind_point)6521 static void PostCallRecordCmdDrawIndexed(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6522     UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
6523 }
6524 
CmdDrawIndexed(VkCommandBuffer commandBuffer,uint32_t indexCount,uint32_t instanceCount,uint32_t firstIndex,int32_t vertexOffset,uint32_t firstInstance)6525 VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
6526                                           uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
6527     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6528     GLOBAL_CB_NODE *cb_state = nullptr;
6529     unique_lock_t lock(global_lock);
6530     bool skip = PreCallValidateCmdDrawIndexed(dev_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
6531                                               "vkCmdDrawIndexed()");
6532     lock.unlock();
6533     if (!skip) {
6534         dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
6535         lock.lock();
6536         PostCallRecordCmdDrawIndexed(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
6537         lock.unlock();
6538     }
6539 }
6540 
PreCallValidateCmdDrawIndirect(layer_data * dev_data,VkCommandBuffer cmd_buffer,VkBuffer buffer,bool indexed,VkPipelineBindPoint bind_point,GLOBAL_CB_NODE ** cb_state,BUFFER_STATE ** buffer_state,const char * caller)6541 static bool PreCallValidateCmdDrawIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
6542                                            VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
6543                                            const char *caller) {
6544     bool skip =
6545         ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDIRECT, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
6546                             VALIDATION_ERROR_1aa02415, VALIDATION_ERROR_1aa00017, VALIDATION_ERROR_1aa003cc);
6547     *buffer_state = GetBufferState(dev_data, buffer);
6548     skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1aa003b4);
6549     // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
6550     // VkDrawIndirectCommand structures accessed by this command must be 0, which will require access to the contents of 'buffer'.
6551     return skip;
6552 }
6553 
PostCallRecordCmdDrawIndirect(layer_data * dev_data,GLOBAL_CB_NODE * cb_state,VkPipelineBindPoint bind_point,BUFFER_STATE * buffer_state)6554 static void PostCallRecordCmdDrawIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
6555                                           BUFFER_STATE *buffer_state) {
6556     UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
6557     AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
6558 }
6559 
CmdDrawIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,uint32_t count,uint32_t stride)6560 VKAPI_ATTR void VKAPI_CALL CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
6561                                            uint32_t stride) {
6562     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6563     GLOBAL_CB_NODE *cb_state = nullptr;
6564     BUFFER_STATE *buffer_state = nullptr;
6565     unique_lock_t lock(global_lock);
6566     bool skip = PreCallValidateCmdDrawIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
6567                                                &buffer_state, "vkCmdDrawIndirect()");
6568     lock.unlock();
6569     if (!skip) {
6570         dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
6571         lock.lock();
6572         PostCallRecordCmdDrawIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
6573         lock.unlock();
6574     }
6575 }
6576 
PreCallValidateCmdDrawIndexedIndirect(layer_data * dev_data,VkCommandBuffer cmd_buffer,VkBuffer buffer,bool indexed,VkPipelineBindPoint bind_point,GLOBAL_CB_NODE ** cb_state,BUFFER_STATE ** buffer_state,const char * caller)6577 static bool PreCallValidateCmdDrawIndexedIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
6578                                                   VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
6579                                                   BUFFER_STATE **buffer_state, const char *caller) {
6580     bool skip =
6581         ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXEDINDIRECT, cb_state, caller,
6582                             VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1a602415, VALIDATION_ERROR_1a600017, VALIDATION_ERROR_1a600434);
6583     *buffer_state = GetBufferState(dev_data, buffer);
6584     skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1a60041c);
6585     // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
6586     // VkDrawIndexedIndirectCommand structures accessed by this command must be 0, which will require access to the contents of
6587     // 'buffer'.
6588     return skip;
6589 }
6590 
PostCallRecordCmdDrawIndexedIndirect(layer_data * dev_data,GLOBAL_CB_NODE * cb_state,VkPipelineBindPoint bind_point,BUFFER_STATE * buffer_state)6591 static void PostCallRecordCmdDrawIndexedIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
6592                                                  BUFFER_STATE *buffer_state) {
6593     UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
6594     AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
6595 }
6596 
CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,uint32_t count,uint32_t stride)6597 VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
6598                                                   uint32_t count, uint32_t stride) {
6599     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6600     GLOBAL_CB_NODE *cb_state = nullptr;
6601     BUFFER_STATE *buffer_state = nullptr;
6602     unique_lock_t lock(global_lock);
6603     bool skip = PreCallValidateCmdDrawIndexedIndirect(dev_data, commandBuffer, buffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS,
6604                                                       &cb_state, &buffer_state, "vkCmdDrawIndexedIndirect()");
6605     lock.unlock();
6606     if (!skip) {
6607         dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
6608         lock.lock();
6609         PostCallRecordCmdDrawIndexedIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
6610         lock.unlock();
6611     }
6612 }
6613 
PreCallValidateCmdDispatch(layer_data * dev_data,VkCommandBuffer cmd_buffer,bool indexed,VkPipelineBindPoint bind_point,GLOBAL_CB_NODE ** cb_state,const char * caller)6614 static bool PreCallValidateCmdDispatch(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
6615                                        VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
6616     return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCH, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
6617                                VALIDATION_ERROR_19c02415, VALIDATION_ERROR_19c00017, VALIDATION_ERROR_UNDEFINED);
6618 }
6619 
PostCallRecordCmdDispatch(layer_data * dev_data,GLOBAL_CB_NODE * cb_state,VkPipelineBindPoint bind_point)6620 static void PostCallRecordCmdDispatch(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6621     UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
6622 }
6623 
CmdDispatch(VkCommandBuffer commandBuffer,uint32_t x,uint32_t y,uint32_t z)6624 VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
6625     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6626     GLOBAL_CB_NODE *cb_state = nullptr;
6627     unique_lock_t lock(global_lock);
6628     bool skip =
6629         PreCallValidateCmdDispatch(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, &cb_state, "vkCmdDispatch()");
6630     lock.unlock();
6631     if (!skip) {
6632         dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
6633         lock.lock();
6634         PostCallRecordCmdDispatch(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
6635         lock.unlock();
6636     }
6637 }
6638 
PreCallValidateCmdDispatchIndirect(layer_data * dev_data,VkCommandBuffer cmd_buffer,VkBuffer buffer,bool indexed,VkPipelineBindPoint bind_point,GLOBAL_CB_NODE ** cb_state,BUFFER_STATE ** buffer_state,const char * caller)6639 static bool PreCallValidateCmdDispatchIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
6640                                                VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
6641                                                BUFFER_STATE **buffer_state, const char *caller) {
6642     bool skip =
6643         ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCHINDIRECT, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
6644                             VALIDATION_ERROR_1a002415, VALIDATION_ERROR_1a000017, VALIDATION_ERROR_UNDEFINED);
6645     *buffer_state = GetBufferState(dev_data, buffer);
6646     skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1a000322);
6647     return skip;
6648 }
6649 
PostCallRecordCmdDispatchIndirect(layer_data * dev_data,GLOBAL_CB_NODE * cb_state,VkPipelineBindPoint bind_point,BUFFER_STATE * buffer_state)6650 static void PostCallRecordCmdDispatchIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
6651                                               BUFFER_STATE *buffer_state) {
6652     UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
6653     AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
6654 }
6655 
CmdDispatchIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset)6656 VKAPI_ATTR void VKAPI_CALL CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
6657     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6658     GLOBAL_CB_NODE *cb_state = nullptr;
6659     BUFFER_STATE *buffer_state = nullptr;
6660     unique_lock_t lock(global_lock);
6661     bool skip = PreCallValidateCmdDispatchIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_COMPUTE,
6662                                                    &cb_state, &buffer_state, "vkCmdDispatchIndirect()");
6663     lock.unlock();
6664     if (!skip) {
6665         dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
6666         lock.lock();
6667         PostCallRecordCmdDispatchIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE, buffer_state);
6668         lock.unlock();
6669     }
6670 }
6671 
CmdCopyBuffer(VkCommandBuffer commandBuffer,VkBuffer srcBuffer,VkBuffer dstBuffer,uint32_t regionCount,const VkBufferCopy * pRegions)6672 VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
6673                                          uint32_t regionCount, const VkBufferCopy *pRegions) {
6674     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6675     unique_lock_t lock(global_lock);
6676 
6677     auto cb_node = GetCBNode(device_data, commandBuffer);
6678     auto src_buffer_state = GetBufferState(device_data, srcBuffer);
6679     auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
6680 
6681     if (cb_node && src_buffer_state && dst_buffer_state) {
6682         bool skip = PreCallValidateCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
6683         if (!skip) {
6684             PreCallRecordCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
6685             lock.unlock();
6686             device_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
6687         }
6688     } else {
6689         lock.unlock();
6690         assert(0);
6691     }
6692 }
6693 
CmdCopyImage(VkCommandBuffer commandBuffer,VkImage srcImage,VkImageLayout srcImageLayout,VkImage dstImage,VkImageLayout dstImageLayout,uint32_t regionCount,const VkImageCopy * pRegions)6694 VKAPI_ATTR void VKAPI_CALL CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
6695                                         VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
6696                                         const VkImageCopy *pRegions) {
6697     bool skip = false;
6698     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6699     unique_lock_t lock(global_lock);
6700 
6701     auto cb_node = GetCBNode(device_data, commandBuffer);
6702     auto src_image_state = GetImageState(device_data, srcImage);
6703     auto dst_image_state = GetImageState(device_data, dstImage);
6704     if (cb_node && src_image_state && dst_image_state) {
6705         skip = PreCallValidateCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions,
6706                                            srcImageLayout, dstImageLayout);
6707         if (!skip) {
6708             PreCallRecordCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, srcImageLayout,
6709                                       dstImageLayout);
6710             lock.unlock();
6711             device_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
6712                                                      pRegions);
6713         }
6714     } else {
6715         lock.unlock();
6716         assert(0);
6717     }
6718 }
6719 
6720 // Validate that an image's sampleCount matches the requirement for a specific API call
ValidateImageSampleCount(layer_data * dev_data,IMAGE_STATE * image_state,VkSampleCountFlagBits sample_count,const char * location,UNIQUE_VALIDATION_ERROR_CODE msgCode)6721 bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
6722                               const char *location, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
6723     bool skip = false;
6724     if (image_state->createInfo.samples != sample_count) {
6725         skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6726                        HandleToUint64(image_state->image), 0, msgCode, "DS",
6727                        "%s for image 0x%" PRIx64 " was created with a sample count of %s but must be %s. %s", location,
6728                        HandleToUint64(image_state->image), string_VkSampleCountFlagBits(image_state->createInfo.samples),
6729                        string_VkSampleCountFlagBits(sample_count), validation_error_map[msgCode]);
6730     }
6731     return skip;
6732 }
6733 
CmdBlitImage(VkCommandBuffer commandBuffer,VkImage srcImage,VkImageLayout srcImageLayout,VkImage dstImage,VkImageLayout dstImageLayout,uint32_t regionCount,const VkImageBlit * pRegions,VkFilter filter)6734 VKAPI_ATTR void VKAPI_CALL CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
6735                                         VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
6736                                         const VkImageBlit *pRegions, VkFilter filter) {
6737     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6738     unique_lock_t lock(global_lock);
6739 
6740     auto cb_node = GetCBNode(dev_data, commandBuffer);
6741     auto src_image_state = GetImageState(dev_data, srcImage);
6742     auto dst_image_state = GetImageState(dev_data, dstImage);
6743 
6744     bool skip = PreCallValidateCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions,
6745                                             srcImageLayout, dstImageLayout, filter);
6746 
6747     if (!skip) {
6748         PreCallRecordCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, srcImageLayout,
6749                                   dstImageLayout);
6750         lock.unlock();
6751         dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
6752                                               pRegions, filter);
6753     }
6754 }
6755 
CmdCopyBufferToImage(VkCommandBuffer commandBuffer,VkBuffer srcBuffer,VkImage dstImage,VkImageLayout dstImageLayout,uint32_t regionCount,const VkBufferImageCopy * pRegions)6756 VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
6757                                                 VkImageLayout dstImageLayout, uint32_t regionCount,
6758                                                 const VkBufferImageCopy *pRegions) {
6759     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6760     unique_lock_t lock(global_lock);
6761     bool skip = false;
6762     auto cb_node = GetCBNode(device_data, commandBuffer);
6763     auto src_buffer_state = GetBufferState(device_data, srcBuffer);
6764     auto dst_image_state = GetImageState(device_data, dstImage);
6765     if (cb_node && src_buffer_state && dst_image_state) {
6766         skip = PreCallValidateCmdCopyBufferToImage(device_data, dstImageLayout, cb_node, src_buffer_state, dst_image_state,
6767                                                    regionCount, pRegions, "vkCmdCopyBufferToImage()");
6768     } else {
6769         lock.unlock();
6770         assert(0);
6771         // TODO: report VU01244 here, or put in object tracker?
6772     }
6773     if (!skip) {
6774         PreCallRecordCmdCopyBufferToImage(device_data, cb_node, src_buffer_state, dst_image_state, regionCount, pRegions,
6775                                           dstImageLayout);
6776         lock.unlock();
6777         device_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
6778     }
6779 }
6780 
CmdCopyImageToBuffer(VkCommandBuffer commandBuffer,VkImage srcImage,VkImageLayout srcImageLayout,VkBuffer dstBuffer,uint32_t regionCount,const VkBufferImageCopy * pRegions)6781 VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
6782                                                 VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
6783     bool skip = false;
6784     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6785     unique_lock_t lock(global_lock);
6786 
6787     auto cb_node = GetCBNode(device_data, commandBuffer);
6788     auto src_image_state = GetImageState(device_data, srcImage);
6789     auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
6790     if (cb_node && src_image_state && dst_buffer_state) {
6791         skip = PreCallValidateCmdCopyImageToBuffer(device_data, srcImageLayout, cb_node, src_image_state, dst_buffer_state,
6792                                                    regionCount, pRegions, "vkCmdCopyImageToBuffer()");
6793     } else {
6794         lock.unlock();
6795         assert(0);
6796         // TODO: report VU01262 here, or put in object tracker?
6797     }
6798     if (!skip) {
6799         PreCallRecordCmdCopyImageToBuffer(device_data, cb_node, src_image_state, dst_buffer_state, regionCount, pRegions,
6800                                           srcImageLayout);
6801         lock.unlock();
6802         device_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
6803     }
6804 }
6805 
PreCallCmdUpdateBuffer(layer_data * device_data,const GLOBAL_CB_NODE * cb_state,const BUFFER_STATE * dst_buffer_state)6806 static bool PreCallCmdUpdateBuffer(layer_data *device_data, const GLOBAL_CB_NODE *cb_state, const BUFFER_STATE *dst_buffer_state) {
6807     bool skip = false;
6808     skip |= ValidateMemoryIsBoundToBuffer(device_data, dst_buffer_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_1e400046);
6809     // Validate that DST buffer has correct usage flags set
6810     skip |= ValidateBufferUsageFlags(device_data, dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
6811                                      VALIDATION_ERROR_1e400044, "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
6812     skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdUpdateBuffer()",
6813                                   VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1e402415);
6814     skip |= ValidateCmd(device_data, cb_state, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
6815     skip |= insideRenderPass(device_data, cb_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_1e400017);
6816     return skip;
6817 }
6818 
PostCallRecordCmdUpdateBuffer(layer_data * device_data,GLOBAL_CB_NODE * cb_state,BUFFER_STATE * dst_buffer_state)6819 static void PostCallRecordCmdUpdateBuffer(layer_data *device_data, GLOBAL_CB_NODE *cb_state, BUFFER_STATE *dst_buffer_state) {
6820     // Update bindings between buffer and cmd buffer
6821     AddCommandBufferBindingBuffer(device_data, cb_state, dst_buffer_state);
6822     std::function<bool()> function = [=]() {
6823         SetBufferMemoryValid(device_data, dst_buffer_state, true);
6824         return false;
6825     };
6826     cb_state->queue_submit_functions.push_back(function);
6827 }
6828 
CmdUpdateBuffer(VkCommandBuffer commandBuffer,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize dataSize,const uint32_t * pData)6829 VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
6830                                            VkDeviceSize dataSize, const uint32_t *pData) {
6831     bool skip = false;
6832     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6833     unique_lock_t lock(global_lock);
6834 
6835     auto cb_state = GetCBNode(dev_data, commandBuffer);
6836     assert(cb_state);
6837     auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
6838     assert(dst_buff_state);
6839     skip |= PreCallCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
6840     lock.unlock();
6841     if (!skip) {
6842         dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
6843         lock.lock();
6844         PostCallRecordCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
6845         lock.unlock();
6846     }
6847 }
6848 
CmdFillBuffer(VkCommandBuffer commandBuffer,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize size,uint32_t data)6849 VKAPI_ATTR void VKAPI_CALL CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
6850                                          VkDeviceSize size, uint32_t data) {
6851     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6852     unique_lock_t lock(global_lock);
6853     auto cb_node = GetCBNode(device_data, commandBuffer);
6854     auto buffer_state = GetBufferState(device_data, dstBuffer);
6855 
6856     if (cb_node && buffer_state) {
6857         bool skip = PreCallValidateCmdFillBuffer(device_data, cb_node, buffer_state);
6858         if (!skip) {
6859             PreCallRecordCmdFillBuffer(device_data, cb_node, buffer_state);
6860             lock.unlock();
6861             device_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
6862         }
6863     } else {
6864         lock.unlock();
6865         assert(0);
6866     }
6867 }
6868 
CmdClearAttachments(VkCommandBuffer commandBuffer,uint32_t attachmentCount,const VkClearAttachment * pAttachments,uint32_t rectCount,const VkClearRect * pRects)6869 VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
6870                                                const VkClearAttachment *pAttachments, uint32_t rectCount,
6871                                                const VkClearRect *pRects) {
6872     bool skip = false;
6873     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6874     {
6875         lock_guard_t lock(global_lock);
6876         skip = PreCallValidateCmdClearAttachments(dev_data, commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
6877     }
6878     if (!skip) dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
6879 }
6880 
CmdClearColorImage(VkCommandBuffer commandBuffer,VkImage image,VkImageLayout imageLayout,const VkClearColorValue * pColor,uint32_t rangeCount,const VkImageSubresourceRange * pRanges)6881 VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
6882                                               const VkClearColorValue *pColor, uint32_t rangeCount,
6883                                               const VkImageSubresourceRange *pRanges) {
6884     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6885     unique_lock_t lock(global_lock);
6886 
6887     bool skip = PreCallValidateCmdClearColorImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
6888     if (!skip) {
6889         PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
6890         lock.unlock();
6891         dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
6892     }
6893 }
6894 
CmdClearDepthStencilImage(VkCommandBuffer commandBuffer,VkImage image,VkImageLayout imageLayout,const VkClearDepthStencilValue * pDepthStencil,uint32_t rangeCount,const VkImageSubresourceRange * pRanges)6895 VKAPI_ATTR void VKAPI_CALL CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
6896                                                      const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
6897                                                      const VkImageSubresourceRange *pRanges) {
6898     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6899     unique_lock_t lock(global_lock);
6900 
6901     bool skip = PreCallValidateCmdClearDepthStencilImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
6902     if (!skip) {
6903         PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
6904         lock.unlock();
6905         dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
6906     }
6907 }
6908 
CmdResolveImage(VkCommandBuffer commandBuffer,VkImage srcImage,VkImageLayout srcImageLayout,VkImage dstImage,VkImageLayout dstImageLayout,uint32_t regionCount,const VkImageResolve * pRegions)6909 VKAPI_ATTR void VKAPI_CALL CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
6910                                            VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
6911                                            const VkImageResolve *pRegions) {
6912     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6913     unique_lock_t lock(global_lock);
6914 
6915     auto cb_node = GetCBNode(dev_data, commandBuffer);
6916     auto src_image_state = GetImageState(dev_data, srcImage);
6917     auto dst_image_state = GetImageState(dev_data, dstImage);
6918 
6919     bool skip = PreCallValidateCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions);
6920 
6921     if (!skip) {
6922         PreCallRecordCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state);
6923         lock.unlock();
6924         dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
6925                                                  pRegions);
6926     }
6927 }
6928 
GetImageSubresourceLayout(VkDevice device,VkImage image,const VkImageSubresource * pSubresource,VkSubresourceLayout * pLayout)6929 VKAPI_ATTR void VKAPI_CALL GetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
6930                                                      VkSubresourceLayout *pLayout) {
6931     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6932     unique_lock_t lock(global_lock);
6933 
6934     bool skip = PreCallValidateGetImageSubresourceLayout(device_data, image, pSubresource);
6935     if (!skip) {
6936         lock.unlock();
6937         device_data->dispatch_table.GetImageSubresourceLayout(device, image, pSubresource, pLayout);
6938     }
6939 }
6940 
setEventStageMask(VkQueue queue,VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags stageMask)6941 bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
6942     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6943     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6944     if (pCB) {
6945         pCB->eventToStageMap[event] = stageMask;
6946     }
6947     auto queue_data = dev_data->queueMap.find(queue);
6948     if (queue_data != dev_data->queueMap.end()) {
6949         queue_data->second.eventToStageMap[event] = stageMask;
6950     }
6951     return false;
6952 }
6953 
CmdSetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags stageMask)6954 VKAPI_ATTR void VKAPI_CALL CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
6955     bool skip = false;
6956     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6957     unique_lock_t lock(global_lock);
6958     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6959     if (pCB) {
6960         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6961                                       VALIDATION_ERROR_1d402415);
6962         skip |= ValidateCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
6963         skip |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent()", VALIDATION_ERROR_1d400017);
6964         skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdSetEvent()", VALIDATION_ERROR_1d4008fc,
6965                                              VALIDATION_ERROR_1d4008fe);
6966         auto event_state = GetEventNode(dev_data, event);
6967         if (event_state) {
6968             addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, pCB);
6969             event_state->cb_bindings.insert(pCB);
6970         }
6971         pCB->events.push_back(event);
6972         if (!pCB->waitedEvents.count(event)) {
6973             pCB->writeEventsBeforeWait.push_back(event);
6974         }
6975         pCB->eventUpdates.emplace_back([=](VkQueue q) { return setEventStageMask(q, commandBuffer, event, stageMask); });
6976     }
6977     lock.unlock();
6978     if (!skip) dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
6979 }
6980 
CmdResetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags stageMask)6981 VKAPI_ATTR void VKAPI_CALL CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
6982     bool skip = false;
6983     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6984     unique_lock_t lock(global_lock);
6985     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6986     if (pCB) {
6987         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdResetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6988                                       VALIDATION_ERROR_1c402415);
6989         skip |= ValidateCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
6990         skip |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent()", VALIDATION_ERROR_1c400017);
6991         skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdResetEvent()", VALIDATION_ERROR_1c400904,
6992                                              VALIDATION_ERROR_1c400906);
6993         auto event_state = GetEventNode(dev_data, event);
6994         if (event_state) {
6995             addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, pCB);
6996             event_state->cb_bindings.insert(pCB);
6997         }
6998         pCB->events.push_back(event);
6999         if (!pCB->waitedEvents.count(event)) {
7000             pCB->writeEventsBeforeWait.push_back(event);
7001         }
7002         // TODO : Add check for VALIDATION_ERROR_32c008f8
7003         pCB->eventUpdates.emplace_back(
7004             [=](VkQueue q) { return setEventStageMask(q, commandBuffer, event, VkPipelineStageFlags(0)); });
7005     }
7006     lock.unlock();
7007     if (!skip) dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
7008 }
7009 
7010 // Return input pipeline stage flags, expanded for individual bits if VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT
ExpandPipelineStageFlags(VkPipelineStageFlags inflags)7011 static VkPipelineStageFlags ExpandPipelineStageFlags(VkPipelineStageFlags inflags) {
7012     return (inflags != VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT)
7013                ? inflags
7014                : (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
7015                   VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7016                   VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7017                   VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
7018                   VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
7019                   VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
7020 }
7021 
7022 // Verify image barrier image state and that the image is consistent with FB image
ValidateImageBarrierImage(layer_data * device_data,const char * funcName,GLOBAL_CB_NODE const * cb_state,VkFramebuffer framebuffer,uint32_t active_subpass,const safe_VkSubpassDescription & sub_desc,uint64_t rp_handle,uint32_t img_index,const VkImageMemoryBarrier & img_barrier)7023 static bool ValidateImageBarrierImage(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE const *cb_state,
7024                                       VkFramebuffer framebuffer, uint32_t active_subpass, const safe_VkSubpassDescription &sub_desc,
7025                                       uint64_t rp_handle, uint32_t img_index, const VkImageMemoryBarrier &img_barrier) {
7026     bool skip = false;
7027     const auto &fb_state = GetFramebufferState(device_data, framebuffer);
7028     assert(fb_state);
7029     const auto img_bar_image = img_barrier.image;
7030     bool image_match = false;
7031     bool sub_image_found = false;  // Do we find a corresponding subpass description
7032     VkImageLayout sub_image_layout = VK_IMAGE_LAYOUT_UNDEFINED;
7033     uint32_t attach_index = 0;
7034     uint32_t index_count = 0;
7035     // Verify that a framebuffer image matches barrier image
7036     for (const auto &fb_attach : fb_state->attachments) {
7037         if (img_bar_image == fb_attach.image) {
7038             image_match = true;
7039             attach_index = index_count;
7040             break;
7041         }
7042         index_count++;
7043     }
7044     if (image_match) {  // Make sure subpass is referring to matching attachment
7045         if (sub_desc.pDepthStencilAttachment && sub_desc.pDepthStencilAttachment->attachment == attach_index) {
7046             sub_image_layout = sub_desc.pDepthStencilAttachment->layout;
7047             sub_image_found = true;
7048         } else {
7049             for (uint32_t j = 0; j < sub_desc.colorAttachmentCount; ++j) {
7050                 if (sub_desc.pColorAttachments && sub_desc.pColorAttachments[j].attachment == attach_index) {
7051                     sub_image_layout = sub_desc.pColorAttachments[j].layout;
7052                     sub_image_found = true;
7053                     break;
7054                 } else if (sub_desc.pResolveAttachments && sub_desc.pResolveAttachments[j].attachment == attach_index) {
7055                     sub_image_layout = sub_desc.pResolveAttachments[j].layout;
7056                     sub_image_found = true;
7057                     break;
7058                 }
7059             }
7060         }
7061         if (!sub_image_found) {
7062             skip |= log_msg(
7063                 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
7064                 __LINE__, VALIDATION_ERROR_1b800936, "CORE",
7065                 "%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
7066                 ") is not referenced by the VkSubpassDescription for active subpass (%d) of current renderPass (0x%" PRIx64 "). %s",
7067                 funcName, img_index, HandleToUint64(img_bar_image), active_subpass, rp_handle,
7068                 validation_error_map[VALIDATION_ERROR_1b800936]);
7069         }
7070     } else {  // !image_match
7071         auto const fb_handle = HandleToUint64(fb_state->framebuffer);
7072         skip |=
7073             log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT, fb_handle,
7074                     __LINE__, VALIDATION_ERROR_1b800936, "CORE",
7075                     "%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
7076                     ") does not match an image from the current framebuffer (0x%" PRIx64 "). %s",
7077                     funcName, img_index, HandleToUint64(img_bar_image), fb_handle, validation_error_map[VALIDATION_ERROR_1b800936]);
7078     }
7079     if (img_barrier.oldLayout != img_barrier.newLayout) {
7080         skip |=
7081             log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7082                     HandleToUint64(cb_state->commandBuffer), __LINE__, VALIDATION_ERROR_1b80093a, "CORE",
7083                     "%s: As the Image Barrier for image 0x%" PRIx64
7084                     " is being executed within a render pass instance, oldLayout must equal newLayout yet they are %s and %s. %s",
7085                     funcName, HandleToUint64(img_barrier.image), string_VkImageLayout(img_barrier.oldLayout),
7086                     string_VkImageLayout(img_barrier.newLayout), validation_error_map[VALIDATION_ERROR_1b80093a]);
7087     } else {
7088         if (sub_image_found && sub_image_layout != img_barrier.oldLayout) {
7089             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7090                             rp_handle, __LINE__, VALIDATION_ERROR_1b800938, "CORE",
7091                             "%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
7092                             ") is referenced by the VkSubpassDescription for active subpass (%d) of current renderPass (0x%" PRIx64
7093                             ") as having layout %s, but image barrier has layout %s. %s",
7094                             funcName, img_index, HandleToUint64(img_bar_image), active_subpass, rp_handle,
7095                             string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(sub_image_layout),
7096                             validation_error_map[VALIDATION_ERROR_1b800938]);
7097         }
7098     }
7099     return skip;
7100 }
7101 
7102 // Validate image barriers within a renderPass
ValidateRenderPassImageBarriers(layer_data * device_data,const char * funcName,GLOBAL_CB_NODE * cb_state,uint32_t active_subpass,const safe_VkSubpassDescription & sub_desc,uint64_t rp_handle,VkAccessFlags sub_src_access_mask,VkAccessFlags sub_dst_access_mask,uint32_t image_mem_barrier_count,const VkImageMemoryBarrier * image_barriers)7103 static bool ValidateRenderPassImageBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
7104                                             uint32_t active_subpass, const safe_VkSubpassDescription &sub_desc, uint64_t rp_handle,
7105                                             VkAccessFlags sub_src_access_mask, VkAccessFlags sub_dst_access_mask,
7106                                             uint32_t image_mem_barrier_count, const VkImageMemoryBarrier *image_barriers) {
7107     bool skip = false;
7108     for (uint32_t i = 0; i < image_mem_barrier_count; ++i) {
7109         const auto &img_barrier = image_barriers[i];
7110         const auto &img_src_access_mask = img_barrier.srcAccessMask;
7111         if (img_src_access_mask != (sub_src_access_mask & img_src_access_mask)) {
7112             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7113                             rp_handle, __LINE__, VALIDATION_ERROR_1b80092e, "CORE",
7114                             "%s: Barrier pImageMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency "
7115                             "srcAccessMask(0x%X) of subpass %d of renderPass 0x%" PRIx64 ". %s",
7116                             funcName, i, img_src_access_mask, sub_src_access_mask, active_subpass, rp_handle,
7117                             validation_error_map[VALIDATION_ERROR_1b80092e]);
7118         }
7119         const auto &img_dst_access_mask = img_barrier.dstAccessMask;
7120         if (img_dst_access_mask != (sub_dst_access_mask & img_dst_access_mask)) {
7121             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7122                             rp_handle, __LINE__, VALIDATION_ERROR_1b800930, "CORE",
7123                             "%s: Barrier pImageMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency "
7124                             "dstAccessMask(0x%X) of subpass %d of renderPass 0x%" PRIx64 ". %s",
7125                             funcName, i, img_dst_access_mask, sub_dst_access_mask, active_subpass, rp_handle,
7126                             validation_error_map[VALIDATION_ERROR_1b800930]);
7127         }
7128         if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex ||
7129             VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) {
7130             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7131                             rp_handle, __LINE__, VALIDATION_ERROR_1b80093c, "CORE",
7132                             "%s: Barrier pImageMemoryBarriers[%d].srcQueueFamilyIndex is %d and "
7133                             "pImageMemoryBarriers[%d].dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED. %s",
7134                             funcName, i, img_barrier.srcQueueFamilyIndex, i, img_barrier.dstQueueFamilyIndex,
7135                             validation_error_map[VALIDATION_ERROR_1b80093c]);
7136         }
7137         // Secondary CBs can have null framebuffer so queue up validation in that case 'til FB is known
7138         if (VK_NULL_HANDLE == cb_state->activeFramebuffer) {
7139             assert(VK_COMMAND_BUFFER_LEVEL_SECONDARY == cb_state->createInfo.level);
7140             // Secondary CB case w/o FB specified delay validation
7141             cb_state->cmd_execute_commands_functions.emplace_back([=](VkFramebuffer fb) {
7142                 return ValidateImageBarrierImage(device_data, funcName, cb_state, fb, active_subpass, sub_desc, rp_handle, i,
7143                                                  img_barrier);
7144             });
7145         } else {
7146             skip |= ValidateImageBarrierImage(device_data, funcName, cb_state, cb_state->activeFramebuffer, active_subpass,
7147                                               sub_desc, rp_handle, i, img_barrier);
7148         }
7149     }
7150     return skip;
7151 }
7152 
7153 // Validate VUs for Pipeline Barriers that are within a renderPass
7154 // Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state
ValidateRenderPassPipelineBarriers(layer_data * device_data,const char * funcName,GLOBAL_CB_NODE * cb_state,VkPipelineStageFlags src_stage_mask,VkPipelineStageFlags dst_stage_mask,VkDependencyFlags dependency_flags,uint32_t mem_barrier_count,const VkMemoryBarrier * mem_barriers,uint32_t buffer_mem_barrier_count,const VkBufferMemoryBarrier * buffer_mem_barriers,uint32_t image_mem_barrier_count,const VkImageMemoryBarrier * image_barriers)7155 static bool ValidateRenderPassPipelineBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
7156                                                VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
7157                                                VkDependencyFlags dependency_flags, uint32_t mem_barrier_count,
7158                                                const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count,
7159                                                const VkBufferMemoryBarrier *buffer_mem_barriers, uint32_t image_mem_barrier_count,
7160                                                const VkImageMemoryBarrier *image_barriers) {
7161     bool skip = false;
7162     auto rp_state = cb_state->activeRenderPass;
7163     const auto active_subpass = cb_state->activeSubpass;
7164     auto rp_handle = HandleToUint64(rp_state->renderPass);
7165     if (!rp_state->hasSelfDependency[active_subpass]) {
7166         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7167                         rp_handle, __LINE__, VALIDATION_ERROR_1b800928, "CORE",
7168                         "%s: Barriers cannot be set during subpass %d of renderPass 0x%" PRIx64
7169                         " with no self-dependency specified. %s",
7170                         funcName, active_subpass, rp_handle, validation_error_map[VALIDATION_ERROR_1b800928]);
7171     } else {
7172         assert(rp_state->subpass_to_dependency_index[cb_state->activeSubpass] != -1);
7173         // Grab ref to current subpassDescription up-front for use below
7174         const auto &sub_desc = rp_state->createInfo.pSubpasses[active_subpass];
7175         const auto &sub_dep = rp_state->createInfo.pDependencies[rp_state->subpass_to_dependency_index[active_subpass]];
7176         const auto &sub_src_stage_mask = ExpandPipelineStageFlags(sub_dep.srcStageMask);
7177         const auto &sub_dst_stage_mask = ExpandPipelineStageFlags(sub_dep.dstStageMask);
7178         if ((sub_src_stage_mask != VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) &&
7179             (src_stage_mask != (sub_src_stage_mask & src_stage_mask))) {
7180             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7181                             rp_handle, __LINE__, VALIDATION_ERROR_1b80092a, "CORE",
7182                             "%s: Barrier srcStageMask(0x%X) is not a subset of VkSubpassDependency srcStageMask(0x%X) of subpass "
7183                             "%d of renderPass 0x%" PRIx64 ". %s",
7184                             funcName, src_stage_mask, sub_src_stage_mask, active_subpass, rp_handle,
7185                             validation_error_map[VALIDATION_ERROR_1b80092a]);
7186         }
7187         if ((sub_dst_stage_mask != VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) &&
7188             (dst_stage_mask != (sub_dst_stage_mask & dst_stage_mask))) {
7189             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7190                             rp_handle, __LINE__, VALIDATION_ERROR_1b80092c, "CORE",
7191                             "%s: Barrier dstStageMask(0x%X) is not a subset of VkSubpassDependency dstStageMask(0x%X) of subpass "
7192                             "%d of renderPass 0x%" PRIx64 ". %s",
7193                             funcName, dst_stage_mask, sub_dst_stage_mask, active_subpass, rp_handle,
7194                             validation_error_map[VALIDATION_ERROR_1b80092c]);
7195         }
7196         if (0 != buffer_mem_barrier_count) {
7197             skip |=
7198                 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7199                         rp_handle, __LINE__, VALIDATION_ERROR_1b800934, "CORE",
7200                         "%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of renderPass 0x%" PRIx64 ". %s", funcName,
7201                         buffer_mem_barrier_count, active_subpass, rp_handle, validation_error_map[VALIDATION_ERROR_1b800934]);
7202         }
7203         const auto &sub_src_access_mask = sub_dep.srcAccessMask;
7204         const auto &sub_dst_access_mask = sub_dep.dstAccessMask;
7205         for (uint32_t i = 0; i < mem_barrier_count; ++i) {
7206             const auto &mb_src_access_mask = mem_barriers[i].srcAccessMask;
7207             if (mb_src_access_mask != (sub_src_access_mask & mb_src_access_mask)) {
7208                 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7209                                 VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, __LINE__, VALIDATION_ERROR_1b80092e, "CORE",
7210                                 "%s: Barrier pMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency "
7211                                 "srcAccessMask(0x%X) of subpass %d of renderPass 0x%" PRIx64 ". %s",
7212                                 funcName, i, mb_src_access_mask, sub_src_access_mask, active_subpass, rp_handle,
7213                                 validation_error_map[VALIDATION_ERROR_1b80092e]);
7214             }
7215             const auto &mb_dst_access_mask = mem_barriers[i].dstAccessMask;
7216             if (mb_dst_access_mask != (sub_dst_access_mask & mb_dst_access_mask)) {
7217                 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7218                                 VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, __LINE__, VALIDATION_ERROR_1b800930, "CORE",
7219                                 "%s: Barrier pMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency "
7220                                 "dstAccessMask(0x%X) of subpass %d of renderPass 0x%" PRIx64 ". %s",
7221                                 funcName, i, mb_dst_access_mask, sub_dst_access_mask, active_subpass, rp_handle,
7222                                 validation_error_map[VALIDATION_ERROR_1b800930]);
7223             }
7224         }
7225         skip |= ValidateRenderPassImageBarriers(device_data, funcName, cb_state, active_subpass, sub_desc, rp_handle,
7226                                                 sub_src_access_mask, sub_dst_access_mask, image_mem_barrier_count, image_barriers);
7227         if (sub_dep.dependencyFlags != dependency_flags) {
7228             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7229                             rp_handle, __LINE__, VALIDATION_ERROR_1b800932, "CORE",
7230                             "%s: dependencyFlags param (0x%X) does not equal VkSubpassDependency dependencyFlags value (0x%X) for "
7231                             "subpass %d of renderPass 0x%" PRIx64 ". %s",
7232                             funcName, dependency_flags, sub_dep.dependencyFlags, cb_state->activeSubpass, rp_handle,
7233                             validation_error_map[VALIDATION_ERROR_1b800932]);
7234         }
7235     }
7236     return skip;
7237 }
7238 
7239 // Array to mask individual accessMask to corresponding stageMask
7240 //  accessMask active bit position (0-31) maps to index
7241 const static VkPipelineStageFlags AccessMaskToPipeStage[20] = {
7242     // VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0
7243     VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
7244     // VK_ACCESS_INDEX_READ_BIT = 1
7245     VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
7246     // VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 2
7247     VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
7248     // VK_ACCESS_UNIFORM_READ_BIT = 3
7249     VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7250         VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7251         VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
7252     // VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 4
7253     VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
7254     // VK_ACCESS_SHADER_READ_BIT = 5
7255     VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7256         VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7257         VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
7258     // VK_ACCESS_SHADER_WRITE_BIT = 6
7259     VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7260         VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7261         VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
7262     // VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 7
7263     VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
7264     // VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 8
7265     VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
7266     // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 9
7267     VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
7268     // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 10
7269     VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
7270     // VK_ACCESS_TRANSFER_READ_BIT = 11
7271     VK_PIPELINE_STAGE_TRANSFER_BIT,
7272     // VK_ACCESS_TRANSFER_WRITE_BIT = 12
7273     VK_PIPELINE_STAGE_TRANSFER_BIT,
7274     // VK_ACCESS_HOST_READ_BIT = 13
7275     VK_PIPELINE_STAGE_HOST_BIT,
7276     // VK_ACCESS_HOST_WRITE_BIT = 14
7277     VK_PIPELINE_STAGE_HOST_BIT,
7278     // VK_ACCESS_MEMORY_READ_BIT = 15
7279     VK_ACCESS_FLAG_BITS_MAX_ENUM,  // Always match
7280     // VK_ACCESS_MEMORY_WRITE_BIT = 16
7281     VK_ACCESS_FLAG_BITS_MAX_ENUM,  // Always match
7282     // VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX = 17
7283     VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
7284     // VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 18
7285     VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
7286 };
7287 
7288 // Verify that all bits of access_mask are supported by the src_stage_mask
ValidateAccessMaskPipelineStage(VkAccessFlags access_mask,VkPipelineStageFlags stage_mask)7289 static bool ValidateAccessMaskPipelineStage(VkAccessFlags access_mask, VkPipelineStageFlags stage_mask) {
7290     // Early out if all commands set, or access_mask NULL
7291     if ((stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (0 == access_mask)) return true;
7292 
7293     stage_mask = ExpandPipelineStageFlags(stage_mask);
7294     int index = 0;
7295     // for each of the set bits in access_mask, make sure that supporting stage mask bit(s) are set
7296     while (access_mask) {
7297         index = (u_ffs(access_mask) - 1);
7298         assert(index >= 0);
7299         // Must have "!= 0" compare to prevent warning from MSVC
7300         if ((AccessMaskToPipeStage[index] & stage_mask) == 0) return false;  // early out
7301         access_mask &= ~(1 << index);                                        // Mask off bit that's been checked
7302     }
7303     return true;
7304 }
7305 
7306 namespace barrier_queue_families {
7307 enum VuIndex {
7308     kSrcOrDstMustBeIgnore,
7309     kSpecialOrIgnoreOnly,
7310     kSrcIgnoreRequiresDstIgnore,
7311     kDstValidOrSpecialIfNotIgnore,
7312     kSrcValidOrSpecialIfNotIgnore,
7313     kSrcAndDestMustBeIgnore,
7314     kBothIgnoreOrBothValid,
7315     kSubmitQueueMustMatchSrcOrDst
7316 };
7317 static const char *vu_summary[] = {"Source or destination queue family must be ignored.",
7318                                    "Source or destination queue family must be special or ignored.",
7319                                    "Destination queue family must be ignored if source queue family is.",
7320                                    "Destination queue family must be valid, ignored, or special.",
7321                                    "Source queue family must be valid, ignored, or special.",
7322                                    "Source and destination queue family must both be ignored.",
7323                                    "Source and destination queue family must both be ignore or both valid.",
7324                                    "Source or destination queue family must match submit queue family, if not ignored."};
7325 
7326 static const UNIQUE_VALIDATION_ERROR_CODE image_error_codes[] = {
7327     VALIDATION_ERROR_0a000aca,  //  VUID-VkImageMemoryBarrier-image-01381 -- kSrcOrDstMustBeIgnore
7328     VALIDATION_ERROR_0a000dcc,  //  VUID-VkImageMemoryBarrier-image-01766 -- kSpecialOrIgnoreOnly
7329     VALIDATION_ERROR_0a000962,  //  VUID-VkImageMemoryBarrier-image-01201 -- kSrcIgnoreRequiresDstIgnore
7330     VALIDATION_ERROR_0a000dd0,  //  VUID-VkImageMemoryBarrier-image-01768 -- kDstValidOrSpecialIfNotIgnore
7331     VALIDATION_ERROR_0a000dce,  //  VUID-VkImageMemoryBarrier-image-01767 -- kSrcValidOrSpecialIfNotIgnore
7332     VALIDATION_ERROR_0a00095e,  //  VUID-VkImageMemoryBarrier-image-01199 -- kSrcAndDestMustBeIgnore
7333     VALIDATION_ERROR_0a000960,  //  VUID-VkImageMemoryBarrier-image-01200 -- kBothIgnoreOrBothValid
7334     VALIDATION_ERROR_0a00096a,  //  VUID-VkImageMemoryBarrier-image-01205 -- kSubmitQueueMustMatchSrcOrDst
7335 };
7336 
7337 static const UNIQUE_VALIDATION_ERROR_CODE buffer_error_codes[] = {
7338     VALIDATION_ERROR_0180094e,  //  VUID-VkBufferMemoryBarrier-buffer-01191 -- kSrcOrDstMustBeIgnore
7339     VALIDATION_ERROR_01800dc6,  //  VUID-VkBufferMemoryBarrier-buffer-01763 -- kSpecialOrIgnoreOnly
7340     VALIDATION_ERROR_01800952,  //  VUID-VkBufferMemoryBarrier-buffer-01193 -- kSrcIgnoreRequiresDstIgnore
7341     VALIDATION_ERROR_01800dca,  //  VUID-VkBufferMemoryBarrier-buffer-01765 -- kDstValidOrSpecialIfNotIgnore
7342     VALIDATION_ERROR_01800dc8,  //  VUID-VkBufferMemoryBarrier-buffer-01764 -- kSrcValidOrSpecialIfNotIgnore
7343     VALIDATION_ERROR_0180094c,  //  VUID-VkBufferMemoryBarrier-buffer-01190 -- kSrcAndDestMustBeIgnore
7344     VALIDATION_ERROR_01800950,  //  VUID-VkBufferMemoryBarrier-buffer-01192 -- kBothIgnoreOrBothValid
7345     VALIDATION_ERROR_01800958,  //  VUID-VkBufferMemoryBarrier-buffer-01196 -- kSubmitQueueMustMatchSrcOrDst
7346 };
7347 
7348 class ValidatorState {
7349    public:
ValidatorState(const layer_data * device_data,const char * func_name,const GLOBAL_CB_NODE * cb_state,const uint64_t barrier_handle64,const VkSharingMode sharing_mode,const VulkanObjectType object_type,const UNIQUE_VALIDATION_ERROR_CODE * val_codes)7350     ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
7351                    const uint64_t barrier_handle64, const VkSharingMode sharing_mode, const VulkanObjectType object_type,
7352                    const UNIQUE_VALIDATION_ERROR_CODE *val_codes)
7353         : report_data_(device_data->report_data),
7354           func_name_(func_name),
7355           cb_handle64_(HandleToUint64(cb_state->commandBuffer)),
7356           barrier_handle64_(barrier_handle64),
7357           sharing_mode_(sharing_mode),
7358           object_type_(object_type),
7359           val_codes_(val_codes),
7360           limit_(static_cast<uint32_t>(device_data->phys_dev_properties.queue_family_properties.size())),
7361           mem_ext_(device_data->extensions.vk_khr_external_memory) {}
7362 
7363     // Create a validator state from an image state... reducing the image specific to the generic version.
ValidatorState(const layer_data * device_data,const char * func_name,const GLOBAL_CB_NODE * cb_state,const VkImageMemoryBarrier * barrier,const IMAGE_STATE * state)7364     ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
7365                    const VkImageMemoryBarrier *barrier, const IMAGE_STATE *state)
7366         : ValidatorState(device_data, func_name, cb_state, HandleToUint64(barrier->image), state->createInfo.sharingMode,
7367                          kVulkanObjectTypeImage, image_error_codes) {}
7368 
7369     // Create a validator state from an buffer state... reducing the buffer specific to the generic version.
ValidatorState(const layer_data * device_data,const char * func_name,const GLOBAL_CB_NODE * cb_state,const VkBufferMemoryBarrier * barrier,const BUFFER_STATE * state)7370     ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
7371                    const VkBufferMemoryBarrier *barrier, const BUFFER_STATE *state)
7372         : ValidatorState(device_data, func_name, cb_state, HandleToUint64(barrier->buffer), state->createInfo.sharingMode,
7373                          kVulkanObjectTypeImage, buffer_error_codes) {}
7374 
7375     // Log the messages using boilerplate from object state, and Vu specific information from the template arg
7376     // One and two family versions, in the single family version, Vu holds the name of the passed parameter
LogMsg(VuIndex vu_index,size_t location,uint32_t family,const char * param_name) const7377     bool LogMsg(VuIndex vu_index, size_t location, uint32_t family, const char *param_name) const {
7378         const UNIQUE_VALIDATION_ERROR_CODE val_code = val_codes_[vu_index];
7379         const char *annotation = GetFamilyAnnotation(family);
7380         return log_msg(report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_,
7381                        location, val_code, "DS",
7382                        "%s: Barrier using %s 0x%" PRIx64 " created with sharingMode %s, has %s %u%s. %s %s", func_name_,
7383                        GetTypeString(), barrier_handle64_, GetModeString(), param_name, family, annotation, vu_summary[vu_index],
7384                        validation_error_map[val_code]);
7385     }
7386 
LogMsg(VuIndex vu_index,size_t location,uint32_t src_family,uint32_t dst_family) const7387     bool LogMsg(VuIndex vu_index, size_t location, uint32_t src_family, uint32_t dst_family) const {
7388         const UNIQUE_VALIDATION_ERROR_CODE val_code = val_codes_[vu_index];
7389         const char *src_annotation = GetFamilyAnnotation(src_family);
7390         const char *dst_annotation = GetFamilyAnnotation(dst_family);
7391         return log_msg(report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_,
7392                        location, val_code, "DS",
7393                        "%s: Barrier using %s 0x%" PRIx64
7394                        " created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s %s",
7395                        func_name_, GetTypeString(), barrier_handle64_, GetModeString(), src_family, src_annotation, dst_family,
7396                        dst_annotation, vu_summary[vu_index], validation_error_map[val_code]);
7397     }
7398 
7399     // This abstract Vu can only be tested at submit time, thus we need a callback from the closure containing the needed
7400     // data. Note that the mem_barrier is copied to the closure as the lambda lifespan exceed the guarantees of validity for
7401     // application input.
ValidateAtQueueSubmit(const VkQueue queue,const layer_data * device_data,uint32_t src_family,uint32_t dst_family,const ValidatorState & val)7402     static bool ValidateAtQueueSubmit(const VkQueue queue, const layer_data *device_data, uint32_t src_family, uint32_t dst_family,
7403                                       const ValidatorState &val) {
7404         auto queue_data_it = device_data->queueMap.find(queue);
7405         if (queue_data_it == device_data->queueMap.end()) return false;
7406 
7407         uint32_t queue_family = queue_data_it->second.queueFamilyIndex;
7408         if ((src_family != queue_family) && (dst_family != queue_family)) {
7409             const UNIQUE_VALIDATION_ERROR_CODE val_code = val.val_codes_[kSubmitQueueMustMatchSrcOrDst];
7410             const char *src_annotation = val.GetFamilyAnnotation(src_family);
7411             const char *dst_annotation = val.GetFamilyAnnotation(dst_family);
7412             return log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
7413                            HandleToUint64(queue), __LINE__, val_code, "DS",
7414                            "%s: Barrier submitted to queue with family index %u, using %s 0x%" PRIx64
7415                            " created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s %s",
7416                            "vkQueueSubmit", queue_family, val.GetTypeString(), val.barrier_handle64_, val.GetModeString(),
7417                            src_family, src_annotation, dst_family, dst_annotation, vu_summary[kSubmitQueueMustMatchSrcOrDst],
7418                            validation_error_map[val_code]);
7419         }
7420         return false;
7421     }
7422     // Logical helpers for semantic clarity
KhrExternalMem() const7423     inline bool KhrExternalMem() const { return mem_ext_; }
IsValid(uint32_t queue_family) const7424     inline bool IsValid(uint32_t queue_family) const { return (queue_family < limit_); }
IsSpecial(uint32_t queue_family) const7425     inline bool IsSpecial(uint32_t queue_family) const {
7426         return (queue_family == VK_QUEUE_FAMILY_EXTERNAL_KHR) || (queue_family == VK_QUEUE_FAMILY_FOREIGN_EXT);
7427     }
IsValidOrSpecial(uint32_t queue_family) const7428     inline bool IsValidOrSpecial(uint32_t queue_family) const {
7429         return IsValid(queue_family) || (mem_ext_ && IsSpecial(queue_family));
7430     }
IsIgnored(uint32_t queue_family) const7431     inline bool IsIgnored(uint32_t queue_family) const { return queue_family == VK_QUEUE_FAMILY_IGNORED; }
7432 
7433     // Helpers for LogMsg (and log_msg)
GetModeString() const7434     const char *GetModeString() const { return string_VkSharingMode(sharing_mode_); }
7435 
7436     // Descriptive text for the various types of queue family index
GetFamilyAnnotation(uint32_t family) const7437     const char *GetFamilyAnnotation(uint32_t family) const {
7438         const char *external = " (VK_QUEUE_FAMILY_EXTERNAL_KHR)";
7439         const char *foreign = " (VK_QUEUE_FAMILY_FOREIGN_EXT)";
7440         const char *ignored = " (VK_QUEUE_FAMILY_IGNORED)";
7441         const char *valid = " (VALID)";
7442         const char *invalid = " (INVALID)";
7443         switch (family) {
7444             case VK_QUEUE_FAMILY_EXTERNAL_KHR:
7445                 return external;
7446             case VK_QUEUE_FAMILY_FOREIGN_EXT:
7447                 return foreign;
7448             case VK_QUEUE_FAMILY_IGNORED:
7449                 return ignored;
7450             default:
7451                 if (IsValid(family)) {
7452                     return valid;
7453                 }
7454                 return invalid;
7455         };
7456     }
GetTypeString() const7457     const char *GetTypeString() const { return object_string[object_type_]; }
GetSharingMode() const7458     VkSharingMode GetSharingMode() const { return sharing_mode_; }
7459 
7460    protected:
7461     const debug_report_data *const report_data_;
7462     const char *const func_name_;
7463     const uint64_t cb_handle64_;
7464     const uint64_t barrier_handle64_;
7465     const VkSharingMode sharing_mode_;
7466     const VulkanObjectType object_type_;
7467     const UNIQUE_VALIDATION_ERROR_CODE *val_codes_;
7468     const uint32_t limit_;
7469     const bool mem_ext_;
7470 };
7471 
Validate(const layer_data * device_data,const char * func_name,GLOBAL_CB_NODE * cb_state,const ValidatorState & val,const uint32_t src_queue_family,const uint32_t dst_queue_family)7472 bool Validate(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state, const ValidatorState &val,
7473               const uint32_t src_queue_family, const uint32_t dst_queue_family) {
7474     bool skip = false;
7475 
7476     const bool mode_concurrent = val.GetSharingMode() == VK_SHARING_MODE_CONCURRENT;
7477     const bool src_ignored = val.IsIgnored(src_queue_family);
7478     const bool dst_ignored = val.IsIgnored(dst_queue_family);
7479     if (val.KhrExternalMem()) {
7480         if (mode_concurrent) {
7481             if (!(src_ignored || dst_ignored)) {
7482                 skip |= val.LogMsg(kSrcOrDstMustBeIgnore, __LINE__, src_queue_family, dst_queue_family);
7483             }
7484             if ((src_ignored && !(dst_ignored || val.IsSpecial(dst_queue_family))) ||
7485                 (dst_ignored && !(src_ignored || val.IsSpecial(src_queue_family)))) {
7486                 skip |= val.LogMsg(kSpecialOrIgnoreOnly, __LINE__, src_queue_family, dst_queue_family);
7487             }
7488         } else {
7489             // VK_SHARING_MODE_EXCLUSIVE
7490             if (src_ignored && !dst_ignored) {
7491                 skip |= val.LogMsg(kSrcIgnoreRequiresDstIgnore, __LINE__, src_queue_family, dst_queue_family);
7492             }
7493             if (!dst_ignored && !val.IsValidOrSpecial(dst_queue_family)) {
7494                 skip |= val.LogMsg(kDstValidOrSpecialIfNotIgnore, __LINE__, dst_queue_family, "dstQueueFamilyIndex");
7495             }
7496             if (!src_ignored && !val.IsValidOrSpecial(src_queue_family)) {
7497                 skip |= val.LogMsg(kSrcValidOrSpecialIfNotIgnore, __LINE__, src_queue_family, "srcQueueFamilyIndex");
7498             }
7499         }
7500     } else {
7501         // No memory extension
7502         if (mode_concurrent) {
7503             if (!src_ignored || !dst_ignored) {
7504                 skip |= val.LogMsg(kSrcAndDestMustBeIgnore, __LINE__, src_queue_family, dst_queue_family);
7505             }
7506         } else {
7507             // VK_SHARING_MODE_EXCLUSIVE
7508             if (!((src_ignored && dst_ignored) || (val.IsValid(src_queue_family) && val.IsValid(dst_queue_family)))) {
7509                 skip |= val.LogMsg(kBothIgnoreOrBothValid, __LINE__, src_queue_family, dst_queue_family);
7510             }
7511         }
7512     }
7513     if (!mode_concurrent && !src_ignored && !dst_ignored) {
7514         // Only enqueue submit time check if it is needed. If more submit time checks are added, change the criteria
7515         // TODO create a better named list, or rename the submit time lists to something that matches the broader usage...
7516         // Note: if we want to create a semantic that separates state lookup, validation, and state update this should go
7517         // to a local queue of update_state_actions or something.
7518         cb_state->eventUpdates.emplace_back([device_data, src_queue_family, dst_queue_family, val](VkQueue queue) {
7519             return ValidatorState::ValidateAtQueueSubmit(queue, device_data, src_queue_family, dst_queue_family, val);
7520         });
7521     }
7522     return skip;
7523 }
7524 }  // namespace barrier_queue_families
7525 
7526 // Type specific wrapper for image barriers
ValidateBarrierQueueFamilies(const layer_data * device_data,const char * func_name,GLOBAL_CB_NODE * cb_state,const VkImageMemoryBarrier * barrier,const IMAGE_STATE * state_data)7527 bool ValidateBarrierQueueFamilies(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
7528                                   const VkImageMemoryBarrier *barrier, const IMAGE_STATE *state_data) {
7529     // State data is required
7530     if (!state_data) {
7531         return false;
7532     }
7533 
7534     // Create the validator state from the image state
7535     barrier_queue_families::ValidatorState val(device_data, func_name, cb_state, barrier, state_data);
7536     const uint32_t src_queue_family = barrier->srcQueueFamilyIndex;
7537     const uint32_t dst_queue_family = barrier->dstQueueFamilyIndex;
7538     return barrier_queue_families::Validate(device_data, func_name, cb_state, val, src_queue_family, dst_queue_family);
7539 }
7540 
7541 // Type specific wrapper for buffer barriers
ValidateBarrierQueueFamilies(const layer_data * device_data,const char * func_name,GLOBAL_CB_NODE * cb_state,const VkBufferMemoryBarrier * barrier,const BUFFER_STATE * state_data)7542 bool ValidateBarrierQueueFamilies(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
7543                                   const VkBufferMemoryBarrier *barrier, const BUFFER_STATE *state_data) {
7544     // State data is required
7545     if (!state_data) {
7546         return false;
7547     }
7548 
7549     // Create the validator state from the buffer state
7550     barrier_queue_families::ValidatorState val(device_data, func_name, cb_state, barrier, state_data);
7551     const uint32_t src_queue_family = barrier->srcQueueFamilyIndex;
7552     const uint32_t dst_queue_family = barrier->dstQueueFamilyIndex;
7553     return barrier_queue_families::Validate(device_data, func_name, cb_state, val, src_queue_family, dst_queue_family);
7554 }
7555 
ValidateBarriers(layer_data * device_data,const char * funcName,GLOBAL_CB_NODE * cb_state,VkPipelineStageFlags src_stage_mask,VkPipelineStageFlags dst_stage_mask,uint32_t memBarrierCount,const VkMemoryBarrier * pMemBarriers,uint32_t bufferBarrierCount,const VkBufferMemoryBarrier * pBufferMemBarriers,uint32_t imageMemBarrierCount,const VkImageMemoryBarrier * pImageMemBarriers)7556 static bool ValidateBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
7557                              VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask, uint32_t memBarrierCount,
7558                              const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
7559                              const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
7560                              const VkImageMemoryBarrier *pImageMemBarriers) {
7561     bool skip = false;
7562     for (uint32_t i = 0; i < memBarrierCount; ++i) {
7563         const auto &mem_barrier = pMemBarriers[i];
7564         if (!ValidateAccessMaskPipelineStage(mem_barrier.srcAccessMask, src_stage_mask)) {
7565             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7566                             HandleToUint64(cb_state->commandBuffer), __LINE__, VALIDATION_ERROR_1b800940, "DS",
7567                             "%s: pMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X). %s", funcName, i,
7568                             mem_barrier.srcAccessMask, src_stage_mask, validation_error_map[VALIDATION_ERROR_1b800940]);
7569         }
7570         if (!ValidateAccessMaskPipelineStage(mem_barrier.dstAccessMask, dst_stage_mask)) {
7571             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7572                             HandleToUint64(cb_state->commandBuffer), __LINE__, VALIDATION_ERROR_1b800942, "DS",
7573                             "%s: pMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X). %s", funcName, i,
7574                             mem_barrier.dstAccessMask, dst_stage_mask, validation_error_map[VALIDATION_ERROR_1b800942]);
7575         }
7576     }
7577     for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
7578         auto mem_barrier = &pImageMemBarriers[i];
7579         if (!ValidateAccessMaskPipelineStage(mem_barrier->srcAccessMask, src_stage_mask)) {
7580             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7581                             HandleToUint64(cb_state->commandBuffer), __LINE__, VALIDATION_ERROR_1b800940, "DS",
7582                             "%s: pImageMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X). %s", funcName,
7583                             i, mem_barrier->srcAccessMask, src_stage_mask, validation_error_map[VALIDATION_ERROR_1b800940]);
7584         }
7585         if (!ValidateAccessMaskPipelineStage(mem_barrier->dstAccessMask, dst_stage_mask)) {
7586             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7587                             HandleToUint64(cb_state->commandBuffer), __LINE__, VALIDATION_ERROR_1b800942, "DS",
7588                             "%s: pImageMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X). %s", funcName,
7589                             i, mem_barrier->dstAccessMask, dst_stage_mask, validation_error_map[VALIDATION_ERROR_1b800942]);
7590         }
7591 
7592         auto image_data = GetImageState(device_data, mem_barrier->image);
7593         skip |= ValidateBarrierQueueFamilies(device_data, funcName, cb_state, mem_barrier, image_data);
7594 
7595         if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
7596             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7597                             HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
7598                             "%s: Image Layout cannot be transitioned to UNDEFINED or PREINITIALIZED.", funcName);
7599         }
7600 
7601         if (image_data) {
7602             auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
7603             skip |= ValidateImageAspectMask(device_data, image_data->image, image_data->createInfo.format, aspect_mask, funcName);
7604 
7605             std::string param_name = "pImageMemoryBarriers[" + std::to_string(i) + "].subresourceRange";
7606             skip |= ValidateImageBarrierSubresourceRange(device_data, image_data, mem_barrier->subresourceRange, funcName,
7607                                                          param_name.c_str());
7608         }
7609     }
7610 
7611     for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
7612         auto mem_barrier = &pBufferMemBarriers[i];
7613         if (!mem_barrier) continue;
7614 
7615         if (!ValidateAccessMaskPipelineStage(mem_barrier->srcAccessMask, src_stage_mask)) {
7616             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7617                             HandleToUint64(cb_state->commandBuffer), __LINE__, VALIDATION_ERROR_1b800940, "DS",
7618                             "%s: pBufferMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X). %s", funcName,
7619                             i, mem_barrier->srcAccessMask, src_stage_mask, validation_error_map[VALIDATION_ERROR_1b800940]);
7620         }
7621         if (!ValidateAccessMaskPipelineStage(mem_barrier->dstAccessMask, dst_stage_mask)) {
7622             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7623                             HandleToUint64(cb_state->commandBuffer), __LINE__, VALIDATION_ERROR_1b800942, "DS",
7624                             "%s: pBufferMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X). %s", funcName,
7625                             i, mem_barrier->dstAccessMask, dst_stage_mask, validation_error_map[VALIDATION_ERROR_1b800942]);
7626         }
7627         // Validate buffer barrier queue family indices
7628         auto buffer_state = GetBufferState(device_data, mem_barrier->buffer);
7629         skip |= ValidateBarrierQueueFamilies(device_data, funcName, cb_state, mem_barrier, buffer_state);
7630 
7631         if (buffer_state) {
7632             auto buffer_size = buffer_state->requirements.size;
7633             if (mem_barrier->offset >= buffer_size) {
7634                 skip |= log_msg(
7635                     device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7636                     HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
7637                     "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
7638                     funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
7639                     HandleToUint64(buffer_size));
7640             } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
7641                 skip |=
7642                     log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7643                             HandleToUint64(cb_state->commandBuffer), __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
7644                             "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
7645                             " whose sum is greater than total size 0x%" PRIx64 ".",
7646                             funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
7647                             HandleToUint64(mem_barrier->size), HandleToUint64(buffer_size));
7648             }
7649         }
7650     }
7651     return skip;
7652 }
7653 
validateEventStageMask(VkQueue queue,GLOBAL_CB_NODE * pCB,uint32_t eventCount,size_t firstEventIndex,VkPipelineStageFlags sourceStageMask)7654 bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex,
7655                             VkPipelineStageFlags sourceStageMask) {
7656     bool skip = false;
7657     VkPipelineStageFlags stageMask = 0;
7658     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
7659     for (uint32_t i = 0; i < eventCount; ++i) {
7660         auto event = pCB->events[firstEventIndex + i];
7661         auto queue_data = dev_data->queueMap.find(queue);
7662         if (queue_data == dev_data->queueMap.end()) return false;
7663         auto event_data = queue_data->second.eventToStageMap.find(event);
7664         if (event_data != queue_data->second.eventToStageMap.end()) {
7665             stageMask |= event_data->second;
7666         } else {
7667             auto global_event_data = GetEventNode(dev_data, event);
7668             if (!global_event_data) {
7669                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
7670                                 HandleToUint64(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
7671                                 "Event 0x%" PRIx64 " cannot be waited on if it has never been set.", HandleToUint64(event));
7672             } else {
7673                 stageMask |= global_event_data->stageMask;
7674             }
7675         }
7676     }
7677     // TODO: Need to validate that host_bit is only set if set event is called
7678     // but set event can be called at any time.
7679     if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
7680         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7681                         HandleToUint64(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_1e62d401, "DS",
7682                         "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%X which must be the bitwise OR of "
7683                         "the stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with "
7684                         "vkSetEvent but instead is 0x%X. %s",
7685                         sourceStageMask, stageMask, validation_error_map[VALIDATION_ERROR_1e62d401]);
7686     }
7687     return skip;
7688 }
7689 
7690 // Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
7691 static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
7692     {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
7693     {VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
7694     {VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
7695     {VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
7696     {VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
7697     {VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
7698     {VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
7699     {VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
7700     {VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
7701     {VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
7702     {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
7703     {VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
7704     {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
7705     {VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
7706 
7707 static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
7708                                                             VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
7709                                                             VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
7710                                                             VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
7711                                                             VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
7712                                                             VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
7713                                                             VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
7714                                                             VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
7715                                                             VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
7716                                                             VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
7717                                                             VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
7718                                                             VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
7719                                                             VK_PIPELINE_STAGE_TRANSFER_BIT,
7720                                                             VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
7721 
CheckStageMaskQueueCompatibility(layer_data * dev_data,VkCommandBuffer command_buffer,VkPipelineStageFlags stage_mask,VkQueueFlags queue_flags,const char * function,const char * src_or_dest,UNIQUE_VALIDATION_ERROR_CODE error_code)7722 bool CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
7723                                       VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
7724                                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
7725     bool skip = false;
7726     // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
7727     for (const auto &item : stage_flag_bit_array) {
7728         if (stage_mask & item) {
7729             if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
7730                 skip |=
7731                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7732                             HandleToUint64(command_buffer), __LINE__, error_code, "DL",
7733                             "%s(): %s flag %s is not compatible with the queue family properties of this command buffer. %s",
7734                             function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)),
7735                             validation_error_map[error_code]);
7736             }
7737         }
7738     }
7739     return skip;
7740 }
7741 
ValidateStageMasksAgainstQueueCapabilities(layer_data * dev_data,GLOBAL_CB_NODE const * cb_state,VkPipelineStageFlags source_stage_mask,VkPipelineStageFlags dest_stage_mask,const char * function,UNIQUE_VALIDATION_ERROR_CODE error_code)7742 bool ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE const *cb_state,
7743                                                 VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask,
7744                                                 const char *function, UNIQUE_VALIDATION_ERROR_CODE error_code) {
7745     bool skip = false;
7746     uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex;
7747     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(dev_data->physical_device), instance_layer_data_map);
7748     auto physical_device_state = GetPhysicalDeviceState(instance_data, dev_data->physical_device);
7749 
7750     // Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
7751     // specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
7752     // that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
7753 
7754     if (queue_family_index < physical_device_state->queue_family_properties.size()) {
7755         VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
7756 
7757         if ((source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
7758             skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, source_stage_mask, specified_queue_flags,
7759                                                      function, "srcStageMask", error_code);
7760         }
7761         if ((dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
7762             skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, dest_stage_mask, specified_queue_flags,
7763                                                      function, "dstStageMask", error_code);
7764         }
7765     }
7766     return skip;
7767 }
7768 
CmdWaitEvents(VkCommandBuffer commandBuffer,uint32_t eventCount,const VkEvent * pEvents,VkPipelineStageFlags sourceStageMask,VkPipelineStageFlags dstStageMask,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)7769 VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
7770                                          VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
7771                                          uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
7772                                          uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
7773                                          uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
7774     bool skip = false;
7775     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7776     unique_lock_t lock(global_lock);
7777     GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
7778     if (cb_state) {
7779         skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, sourceStageMask, dstStageMask, "vkCmdWaitEvents",
7780                                                            VALIDATION_ERROR_1e600918);
7781         skip |= ValidateStageMaskGsTsEnables(dev_data, sourceStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_1e60090e,
7782                                              VALIDATION_ERROR_1e600912);
7783         skip |= ValidateStageMaskGsTsEnables(dev_data, dstStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_1e600910,
7784                                              VALIDATION_ERROR_1e600914);
7785         skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
7786                                       VALIDATION_ERROR_1e602415);
7787         skip |= ValidateCmd(dev_data, cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
7788         skip |= ValidateBarriersToImages(dev_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdWaitEvents()");
7789         skip |= ValidateBarriers(dev_data, "vkCmdWaitEvents()", cb_state, sourceStageMask, dstStageMask, memoryBarrierCount,
7790                                  pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
7791                                  pImageMemoryBarriers);
7792         if (!skip) {
7793             auto first_event_index = cb_state->events.size();
7794             for (uint32_t i = 0; i < eventCount; ++i) {
7795                 auto event_state = GetEventNode(dev_data, pEvents[i]);
7796                 if (event_state) {
7797                     addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(pEvents[i]), kVulkanObjectTypeEvent},
7798                                             cb_state);
7799                     event_state->cb_bindings.insert(cb_state);
7800                 }
7801                 cb_state->waitedEvents.insert(pEvents[i]);
7802                 cb_state->events.push_back(pEvents[i]);
7803             }
7804             cb_state->eventUpdates.emplace_back(
7805                 [=](VkQueue q) { return validateEventStageMask(q, cb_state, eventCount, first_event_index, sourceStageMask); });
7806             TransitionImageLayouts(dev_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
7807         }
7808     }
7809     lock.unlock();
7810     if (!skip)
7811         dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
7812                                                memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
7813                                                imageMemoryBarrierCount, pImageMemoryBarriers);
7814 }
7815 
PreCallValidateCmdPipelineBarrier(layer_data * device_data,GLOBAL_CB_NODE * cb_state,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkDependencyFlags dependencyFlags,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)7816 static bool PreCallValidateCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkPipelineStageFlags srcStageMask,
7817                                               VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
7818                                               uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
7819                                               uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
7820                                               uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
7821     bool skip = false;
7822     skip |= ValidateStageMasksAgainstQueueCapabilities(device_data, cb_state, srcStageMask, dstStageMask, "vkCmdPipelineBarrier",
7823                                                        VALIDATION_ERROR_1b80093e);
7824     skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdPipelineBarrier()",
7825                                   VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1b802415);
7826     skip |= ValidateCmd(device_data, cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
7827     skip |= ValidateStageMaskGsTsEnables(device_data, srcStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_1b800920,
7828                                          VALIDATION_ERROR_1b800924);
7829     skip |= ValidateStageMaskGsTsEnables(device_data, dstStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_1b800922,
7830                                          VALIDATION_ERROR_1b800926);
7831     if (cb_state->activeRenderPass) {
7832         skip |= ValidateRenderPassPipelineBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask,
7833                                                    dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7834                                                    pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7835         if (skip) return true;  // Early return to avoid redundant errors from below calls
7836     }
7837     skip |=
7838         ValidateBarriersToImages(device_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdPipelineBarrier()");
7839     skip |= ValidateBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, memoryBarrierCount,
7840                              pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
7841                              pImageMemoryBarriers);
7842     return skip;
7843 }
7844 
PreCallRecordCmdPipelineBarrier(layer_data * device_data,GLOBAL_CB_NODE * cb_state,VkCommandBuffer commandBuffer,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)7845 static void PreCallRecordCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
7846                                             uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
7847     TransitionImageLayouts(device_data, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
7848 }
7849 
CmdPipelineBarrier(VkCommandBuffer commandBuffer,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkDependencyFlags dependencyFlags,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)7850 VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
7851                                               VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
7852                                               uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
7853                                               uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
7854                                               uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
7855     bool skip = false;
7856     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7857     unique_lock_t lock(global_lock);
7858     GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
7859     if (cb_state) {
7860         skip |= PreCallValidateCmdPipelineBarrier(device_data, cb_state, srcStageMask, dstStageMask, dependencyFlags,
7861                                                   memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7862                                                   pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7863         if (!skip) {
7864             PreCallRecordCmdPipelineBarrier(device_data, cb_state, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
7865         }
7866     } else {
7867         assert(0);
7868     }
7869     lock.unlock();
7870     if (!skip) {
7871         device_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
7872                                                        memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7873                                                        pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7874     }
7875 }
7876 
setQueryState(VkQueue queue,VkCommandBuffer commandBuffer,QueryObject object,bool value)7877 static bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
7878     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7879     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7880     if (pCB) {
7881         pCB->queryToStateMap[object] = value;
7882     }
7883     auto queue_data = dev_data->queueMap.find(queue);
7884     if (queue_data != dev_data->queueMap.end()) {
7885         queue_data->second.queryToStateMap[object] = value;
7886     }
7887     return false;
7888 }
7889 
CmdBeginQuery(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t slot,VkFlags flags)7890 VKAPI_ATTR void VKAPI_CALL CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
7891     bool skip = false;
7892     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7893     unique_lock_t lock(global_lock);
7894     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7895     if (pCB) {
7896         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdBeginQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
7897                                       VALIDATION_ERROR_17802415);
7898         skip |= ValidateCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
7899     }
7900     lock.unlock();
7901 
7902     if (skip) return;
7903 
7904     dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
7905 
7906     lock.lock();
7907     if (pCB) {
7908         QueryObject query = {queryPool, slot};
7909         pCB->activeQueries.insert(query);
7910         pCB->startedQueries.insert(query);
7911         addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
7912                                 {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, pCB);
7913     }
7914 }
7915 
CmdEndQuery(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t slot)7916 VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
7917     bool skip = false;
7918     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7919     unique_lock_t lock(global_lock);
7920     QueryObject query = {queryPool, slot};
7921     GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
7922     if (cb_state) {
7923         if (!cb_state->activeQueries.count(query)) {
7924             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7925                             HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1ae00652, "DS",
7926                             "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d. %s",
7927                             HandleToUint64(queryPool), slot, validation_error_map[VALIDATION_ERROR_1ae00652]);
7928         }
7929         skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdEndQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
7930                                       VALIDATION_ERROR_1ae02415);
7931         skip |= ValidateCmd(dev_data, cb_state, CMD_ENDQUERY, "VkCmdEndQuery()");
7932     }
7933     lock.unlock();
7934 
7935     if (skip) return;
7936 
7937     dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
7938 
7939     lock.lock();
7940     if (cb_state) {
7941         cb_state->activeQueries.erase(query);
7942         cb_state->queryUpdates.emplace_back([=](VkQueue q) { return setQueryState(q, commandBuffer, query, true); });
7943         addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
7944                                 {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
7945     }
7946 }
7947 
CmdResetQueryPool(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount)7948 VKAPI_ATTR void VKAPI_CALL CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
7949                                              uint32_t queryCount) {
7950     bool skip = false;
7951     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7952     unique_lock_t lock(global_lock);
7953     GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
7954     skip |= insideRenderPass(dev_data, cb_state, "vkCmdResetQueryPool()", VALIDATION_ERROR_1c600017);
7955     skip |= ValidateCmd(dev_data, cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
7956     skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdResetQueryPool()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
7957                                   VALIDATION_ERROR_1c602415);
7958     lock.unlock();
7959 
7960     if (skip) return;
7961 
7962     dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
7963 
7964     lock.lock();
7965     for (uint32_t i = 0; i < queryCount; i++) {
7966         QueryObject query = {queryPool, firstQuery + i};
7967         cb_state->waitedEventsBeforeQueryReset[query] = cb_state->waitedEvents;
7968         cb_state->queryUpdates.emplace_back([=](VkQueue q) { return setQueryState(q, commandBuffer, query, false); });
7969     }
7970     addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
7971                             {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
7972 }
7973 
IsQueryInvalid(layer_data * dev_data,QUEUE_STATE * queue_data,VkQueryPool queryPool,uint32_t queryIndex)7974 static bool IsQueryInvalid(layer_data *dev_data, QUEUE_STATE *queue_data, VkQueryPool queryPool, uint32_t queryIndex) {
7975     QueryObject query = {queryPool, queryIndex};
7976     auto query_data = queue_data->queryToStateMap.find(query);
7977     if (query_data != queue_data->queryToStateMap.end()) {
7978         if (!query_data->second) return true;
7979     } else {
7980         auto it = dev_data->queryToStateMap.find(query);
7981         if (it == dev_data->queryToStateMap.end() || !it->second) return true;
7982     }
7983 
7984     return false;
7985 }
7986 
validateQuery(VkQueue queue,GLOBAL_CB_NODE * pCB,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount)7987 static bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
7988     bool skip = false;
7989     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
7990     auto queue_data = GetQueueState(dev_data, queue);
7991     if (!queue_data) return false;
7992     for (uint32_t i = 0; i < queryCount; i++) {
7993         if (IsQueryInvalid(dev_data, queue_data, queryPool, firstQuery + i)) {
7994             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7995                             HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
7996                             "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
7997                             HandleToUint64(queryPool), firstQuery + i);
7998         }
7999     }
8000     return skip;
8001 }
8002 
CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize stride,VkQueryResultFlags flags)8003 VKAPI_ATTR void VKAPI_CALL CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
8004                                                    uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
8005                                                    VkDeviceSize stride, VkQueryResultFlags flags) {
8006     bool skip = false;
8007     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8008     unique_lock_t lock(global_lock);
8009 
8010     auto cb_node = GetCBNode(dev_data, commandBuffer);
8011     auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
8012     if (cb_node && dst_buff_state) {
8013         skip |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_19400674);
8014         // Validate that DST buffer has correct usage flags set
8015         skip |=
8016             ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_19400672,
8017                                      "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8018         skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdCopyQueryPoolResults()",
8019                                       VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_19402415);
8020         skip |= ValidateCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8021         skip |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_19400017);
8022     }
8023     lock.unlock();
8024 
8025     if (skip) return;
8026 
8027     dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset, stride,
8028                                                      flags);
8029 
8030     lock.lock();
8031     if (cb_node && dst_buff_state) {
8032         AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
8033         cb_node->queue_submit_functions.emplace_back([=]() {
8034             SetBufferMemoryValid(dev_data, dst_buff_state, true);
8035             return false;
8036         });
8037         cb_node->queryUpdates.emplace_back([=](VkQueue q) { return validateQuery(q, cb_node, queryPool, firstQuery, queryCount); });
8038         addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
8039                                 {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_node);
8040     }
8041 }
8042 
CmdPushConstants(VkCommandBuffer commandBuffer,VkPipelineLayout layout,VkShaderStageFlags stageFlags,uint32_t offset,uint32_t size,const void * pValues)8043 VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags,
8044                                             uint32_t offset, uint32_t size, const void *pValues) {
8045     bool skip = false;
8046     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8047     unique_lock_t lock(global_lock);
8048     GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8049     if (cb_state) {
8050         skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdPushConstants()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8051                                       VALIDATION_ERROR_1bc02415);
8052         skip |= ValidateCmd(dev_data, cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8053     }
8054     skip |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
8055     if (0 == stageFlags) {
8056         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8057                         HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1bc2dc03, "DS",
8058                         "vkCmdPushConstants() call has no stageFlags set. %s", validation_error_map[VALIDATION_ERROR_1bc2dc03]);
8059     }
8060 
8061     // Check if specified push constant range falls within a pipeline-defined range which has matching stageFlags.
8062     // The spec doesn't seem to disallow having multiple push constant ranges with the
8063     // same offset and size, but different stageFlags.  So we can't just check the
8064     // stageFlags in the first range with matching offset and size.
8065     if (!skip) {
8066         const auto &ranges = getPipelineLayout(dev_data, layout)->push_constant_ranges;
8067         bool found_matching_range = false;
8068         for (const auto &range : ranges) {
8069             if ((stageFlags == range.stageFlags) && (offset >= range.offset) && (offset + size <= range.offset + range.size)) {
8070                 found_matching_range = true;
8071                 break;
8072             }
8073         }
8074         if (!found_matching_range) {
8075             skip |= log_msg(
8076                 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8077                 HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1bc002de, "DS",
8078                 "vkCmdPushConstants() stageFlags = 0x%" PRIx32
8079                 " do not match the stageFlags in any of the ranges with offset = %d and size = %d in pipeline layout 0x%" PRIx64
8080                 ". %s",
8081                 (uint32_t)stageFlags, offset, size, HandleToUint64(layout), validation_error_map[VALIDATION_ERROR_1bc002de]);
8082         }
8083     }
8084     lock.unlock();
8085     if (!skip) dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8086 }
8087 
CmdWriteTimestamp(VkCommandBuffer commandBuffer,VkPipelineStageFlagBits pipelineStage,VkQueryPool queryPool,uint32_t slot)8088 VKAPI_ATTR void VKAPI_CALL CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
8089                                              VkQueryPool queryPool, uint32_t slot) {
8090     bool skip = false;
8091     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8092     unique_lock_t lock(global_lock);
8093     GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8094     if (cb_state) {
8095         skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWriteTimestamp()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8096                                       VALIDATION_ERROR_1e802415);
8097         skip |= ValidateCmd(dev_data, cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8098     }
8099     lock.unlock();
8100 
8101     if (skip) return;
8102 
8103     dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8104 
8105     lock.lock();
8106     if (cb_state) {
8107         QueryObject query = {queryPool, slot};
8108         cb_state->queryUpdates.emplace_back([=](VkQueue q) { return setQueryState(q, commandBuffer, query, true); });
8109     }
8110 }
8111 
MatchUsage(layer_data * dev_data,uint32_t count,const VkAttachmentReference * attachments,const VkFramebufferCreateInfo * fbci,VkImageUsageFlagBits usage_flag,UNIQUE_VALIDATION_ERROR_CODE error_code)8112 static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
8113                        const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag,
8114                        UNIQUE_VALIDATION_ERROR_CODE error_code) {
8115     bool skip = false;
8116 
8117     for (uint32_t attach = 0; attach < count; attach++) {
8118         if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
8119             // Attachment counts are verified elsewhere, but prevent an invalid access
8120             if (attachments[attach].attachment < fbci->attachmentCount) {
8121                 const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
8122                 auto view_state = GetImageViewState(dev_data, *image_view);
8123                 if (view_state) {
8124                     const VkImageCreateInfo *ici = &GetImageState(dev_data, view_state->create_info.image)->createInfo;
8125                     if (ici != nullptr) {
8126                         if ((ici->usage & usage_flag) == 0) {
8127                             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8128                                             VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, error_code, "DS",
8129                                             "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
8130                                             "IMAGE_USAGE flags (%s). %s",
8131                                             attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag),
8132                                             validation_error_map[error_code]);
8133                         }
8134                     }
8135                 }
8136             }
8137         }
8138     }
8139     return skip;
8140 }
8141 
8142 // Validate VkFramebufferCreateInfo which includes:
8143 // 1. attachmentCount equals renderPass attachmentCount
8144 // 2. corresponding framebuffer and renderpass attachments have matching formats
8145 // 3. corresponding framebuffer and renderpass attachments have matching sample counts
8146 // 4. fb attachments only have a single mip level
8147 // 5. fb attachment dimensions are each at least as large as the fb
8148 // 6. fb attachments use idenity swizzle
8149 // 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
8150 // 8. fb dimensions are within physical device limits
ValidateFramebufferCreateInfo(layer_data * dev_data,const VkFramebufferCreateInfo * pCreateInfo)8151 static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8152     bool skip = false;
8153 
8154     auto rp_state = GetRenderPassState(dev_data, pCreateInfo->renderPass);
8155     if (rp_state) {
8156         const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr();
8157         if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
8158             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8159                             HandleToUint64(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_094006d8, "DS",
8160                             "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount "
8161                             "of %u of renderPass (0x%" PRIx64 ") being used to create Framebuffer. %s",
8162                             pCreateInfo->attachmentCount, rpci->attachmentCount, HandleToUint64(pCreateInfo->renderPass),
8163                             validation_error_map[VALIDATION_ERROR_094006d8]);
8164         } else {
8165             // attachmentCounts match, so make sure corresponding attachment details line up
8166             const VkImageView *image_views = pCreateInfo->pAttachments;
8167             for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8168                 auto view_state = GetImageViewState(dev_data, image_views[i]);
8169                 auto &ivci = view_state->create_info;
8170                 if (ivci.format != rpci->pAttachments[i].format) {
8171                     skip |=
8172                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8173                                 HandleToUint64(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_094006e0, "DS",
8174                                 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not "
8175                                 "match the format of %s used by the corresponding attachment for renderPass (0x%" PRIx64 "). %s",
8176                                 i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
8177                                 HandleToUint64(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_094006e0]);
8178                 }
8179                 const VkImageCreateInfo *ici = &GetImageState(dev_data, ivci.image)->createInfo;
8180                 if (ici->samples != rpci->pAttachments[i].samples) {
8181                     skip |= log_msg(
8182                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8183                         HandleToUint64(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_094006e2, "DS",
8184                         "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match the %s "
8185                         "samples used by the corresponding attachment for renderPass (0x%" PRIx64 "). %s",
8186                         i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
8187                         HandleToUint64(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_094006e2]);
8188                 }
8189                 // Verify that view only has a single mip level
8190                 if (ivci.subresourceRange.levelCount != 1) {
8191                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8192                                     0, __LINE__, VALIDATION_ERROR_094006e6, "DS",
8193                                     "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u but "
8194                                     "only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer. %s",
8195                                     i, ivci.subresourceRange.levelCount, validation_error_map[VALIDATION_ERROR_094006e6]);
8196                 }
8197                 const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
8198                 uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
8199                 uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
8200                 if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
8201                     (mip_height < pCreateInfo->height)) {
8202                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8203                                     0, __LINE__, VALIDATION_ERROR_094006e4, "DS",
8204                                     "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions "
8205                                     "smaller than the corresponding framebuffer dimensions. Here are the respective dimensions for "
8206                                     "attachment #%u, framebuffer:\n"
8207                                     "width: %u, %u\n"
8208                                     "height: %u, %u\n"
8209                                     "layerCount: %u, %u\n%s",
8210                                     i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
8211                                     pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers,
8212                                     validation_error_map[VALIDATION_ERROR_094006e4]);
8213                 }
8214                 if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
8215                     ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
8216                     ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
8217                     ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
8218                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8219                                     0, __LINE__, VALIDATION_ERROR_094006e8, "DS",
8220                                     "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All "
8221                                     "framebuffer attachments must have been created with the identity swizzle. Here are the actual "
8222                                     "swizzle values:\n"
8223                                     "r swizzle = %s\n"
8224                                     "g swizzle = %s\n"
8225                                     "b swizzle = %s\n"
8226                                     "a swizzle = %s\n"
8227                                     "%s",
8228                                     i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
8229                                     string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a),
8230                                     validation_error_map[VALIDATION_ERROR_094006e8]);
8231                 }
8232             }
8233         }
8234         // Verify correct attachment usage flags
8235         for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
8236             // Verify input attachments:
8237             skip |=
8238                 MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
8239                            pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VALIDATION_ERROR_094006de);
8240             // Verify color attachments:
8241             skip |=
8242                 MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
8243                            pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VALIDATION_ERROR_094006da);
8244             // Verify depth/stencil attachments:
8245             if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
8246                 skip |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
8247                                    VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VALIDATION_ERROR_094006dc);
8248             }
8249         }
8250     }
8251     // Verify FB dimensions are within physical device limits
8252     if (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) {
8253         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
8254                         VALIDATION_ERROR_094006ec, "DS",
8255                         "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. Requested "
8256                         "width: %u, device max: %u\n%s",
8257                         pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
8258                         validation_error_map[VALIDATION_ERROR_094006ec]);
8259     }
8260     if (pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) {
8261         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
8262                         VALIDATION_ERROR_094006f0, "DS",
8263                         "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. Requested "
8264                         "height: %u, device max: %u\n%s",
8265                         pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
8266                         validation_error_map[VALIDATION_ERROR_094006f0]);
8267     }
8268     if (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers) {
8269         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
8270                         VALIDATION_ERROR_094006f4, "DS",
8271                         "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. Requested "
8272                         "layers: %u, device max: %u\n%s",
8273                         pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers,
8274                         validation_error_map[VALIDATION_ERROR_094006f4]);
8275     }
8276     // Verify FB dimensions are greater than zero
8277     if (pCreateInfo->width <= 0) {
8278         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
8279                         VALIDATION_ERROR_094006ea, "DS",
8280                         "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero. %s",
8281                         validation_error_map[VALIDATION_ERROR_094006ea]);
8282     }
8283     if (pCreateInfo->height <= 0) {
8284         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
8285                         VALIDATION_ERROR_094006ee, "DS",
8286                         "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero. %s",
8287                         validation_error_map[VALIDATION_ERROR_094006ee]);
8288     }
8289     if (pCreateInfo->layers <= 0) {
8290         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
8291                         VALIDATION_ERROR_094006f2, "DS",
8292                         "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero. %s",
8293                         validation_error_map[VALIDATION_ERROR_094006f2]);
8294     }
8295     return skip;
8296 }
8297 
8298 // Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
8299 //  Return true if an error is encountered and callback returns true to skip call down chain
8300 //   false indicates that call down chain should proceed
PreCallValidateCreateFramebuffer(layer_data * dev_data,const VkFramebufferCreateInfo * pCreateInfo)8301 static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8302     // TODO : Verify that renderPass FB is created with is compatible with FB
8303     bool skip = false;
8304     skip |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
8305     return skip;
8306 }
8307 
8308 // CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
PostCallRecordCreateFramebuffer(layer_data * dev_data,const VkFramebufferCreateInfo * pCreateInfo,VkFramebuffer fb)8309 static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
8310     // Shadow create info and store in map
8311     std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
8312         new FRAMEBUFFER_STATE(fb, pCreateInfo, GetRenderPassStateSharedPtr(dev_data, pCreateInfo->renderPass)));
8313 
8314     for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8315         VkImageView view = pCreateInfo->pAttachments[i];
8316         auto view_state = GetImageViewState(dev_data, view);
8317         if (!view_state) {
8318             continue;
8319         }
8320         MT_FB_ATTACHMENT_INFO fb_info;
8321         fb_info.view_state = view_state;
8322         fb_info.image = view_state->create_info.image;
8323         fb_state->attachments.push_back(fb_info);
8324     }
8325     dev_data->frameBufferMap[fb] = std::move(fb_state);
8326 }
8327 
CreateFramebuffer(VkDevice device,const VkFramebufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFramebuffer * pFramebuffer)8328 VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8329                                                  const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) {
8330     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8331     unique_lock_t lock(global_lock);
8332     bool skip = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
8333     lock.unlock();
8334 
8335     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
8336 
8337     VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8338 
8339     if (VK_SUCCESS == result) {
8340         lock.lock();
8341         PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
8342         lock.unlock();
8343     }
8344     return result;
8345 }
8346 
FindDependency(const uint32_t index,const uint32_t dependent,const std::vector<DAGNode> & subpass_to_node,std::unordered_set<uint32_t> & processed_nodes)8347 static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node,
8348                            std::unordered_set<uint32_t> &processed_nodes) {
8349     // If we have already checked this node we have not found a dependency path so return false.
8350     if (processed_nodes.count(index)) return false;
8351     processed_nodes.insert(index);
8352     const DAGNode &node = subpass_to_node[index];
8353     // Look for a dependency path. If one exists return true else recurse on the previous nodes.
8354     if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
8355         for (auto elem : node.prev) {
8356             if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
8357         }
8358     } else {
8359         return true;
8360     }
8361     return false;
8362 }
8363 
CheckDependencyExists(const layer_data * dev_data,const uint32_t subpass,const std::vector<uint32_t> & dependent_subpasses,const std::vector<DAGNode> & subpass_to_node,bool & skip)8364 static bool CheckDependencyExists(const layer_data *dev_data, const uint32_t subpass,
8365                                   const std::vector<uint32_t> &dependent_subpasses, const std::vector<DAGNode> &subpass_to_node,
8366                                   bool &skip) {
8367     bool result = true;
8368     // Loop through all subpasses that share the same attachment and make sure a dependency exists
8369     for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
8370         if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) continue;
8371         const DAGNode &node = subpass_to_node[subpass];
8372         // Check for a specified dependency between the two nodes. If one exists we are done.
8373         auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
8374         auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
8375         if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
8376             // If no dependency exits an implicit dependency still might. If not, throw an error.
8377             std::unordered_set<uint32_t> processed_nodes;
8378             if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
8379                   FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
8380                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8381                                 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8382                                 "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
8383                                 dependent_subpasses[k]);
8384                 result = false;
8385             }
8386         }
8387     }
8388     return result;
8389 }
8390 
CheckPreserved(const layer_data * dev_data,const VkRenderPassCreateInfo * pCreateInfo,const int index,const uint32_t attachment,const std::vector<DAGNode> & subpass_to_node,int depth,bool & skip)8391 static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
8392                            const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) {
8393     const DAGNode &node = subpass_to_node[index];
8394     // If this node writes to the attachment return true as next nodes need to preserve the attachment.
8395     const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8396     for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8397         if (attachment == subpass.pColorAttachments[j].attachment) return true;
8398     }
8399     for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8400         if (attachment == subpass.pInputAttachments[j].attachment) return true;
8401     }
8402     if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8403         if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
8404     }
8405     bool result = false;
8406     // Loop through previous nodes and see if any of them write to the attachment.
8407     for (auto elem : node.prev) {
8408         result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip);
8409     }
8410     // If the attachment was written to by a previous node than this node needs to preserve it.
8411     if (result && depth > 0) {
8412         bool has_preserved = false;
8413         for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8414             if (subpass.pPreserveAttachments[j] == attachment) {
8415                 has_preserved = true;
8416                 break;
8417             }
8418         }
8419         if (!has_preserved) {
8420             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8421                             __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8422                             "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
8423         }
8424     }
8425     return result;
8426 }
8427 
8428 template <class T>
isRangeOverlapping(T offset1,T size1,T offset2,T size2)8429 bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
8430     return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
8431            ((offset1 > offset2) && (offset1 < (offset2 + size2)));
8432 }
8433 
isRegionOverlapping(VkImageSubresourceRange range1,VkImageSubresourceRange range2)8434 bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
8435     return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
8436             isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
8437 }
8438 
ValidateDependencies(const layer_data * dev_data,FRAMEBUFFER_STATE const * framebuffer,RENDER_PASS_STATE const * renderPass)8439 static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
8440                                  RENDER_PASS_STATE const *renderPass) {
8441     bool skip = false;
8442     auto const pFramebufferInfo = framebuffer->createInfo.ptr();
8443     auto const pCreateInfo = renderPass->createInfo.ptr();
8444     auto const &subpass_to_node = renderPass->subpassToNode;
8445     std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
8446     std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
8447     std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
8448     // Find overlapping attachments
8449     for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8450         for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
8451             VkImageView viewi = pFramebufferInfo->pAttachments[i];
8452             VkImageView viewj = pFramebufferInfo->pAttachments[j];
8453             if (viewi == viewj) {
8454                 overlapping_attachments[i].push_back(j);
8455                 overlapping_attachments[j].push_back(i);
8456                 continue;
8457             }
8458             auto view_state_i = GetImageViewState(dev_data, viewi);
8459             auto view_state_j = GetImageViewState(dev_data, viewj);
8460             if (!view_state_i || !view_state_j) {
8461                 continue;
8462             }
8463             auto view_ci_i = view_state_i->create_info;
8464             auto view_ci_j = view_state_j->create_info;
8465             if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
8466                 overlapping_attachments[i].push_back(j);
8467                 overlapping_attachments[j].push_back(i);
8468                 continue;
8469             }
8470             auto image_data_i = GetImageState(dev_data, view_ci_i.image);
8471             auto image_data_j = GetImageState(dev_data, view_ci_j.image);
8472             if (!image_data_i || !image_data_j) {
8473                 continue;
8474             }
8475             if (image_data_i->binding.mem == image_data_j->binding.mem &&
8476                 isRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
8477                                    image_data_j->binding.size)) {
8478                 overlapping_attachments[i].push_back(j);
8479                 overlapping_attachments[j].push_back(i);
8480             }
8481         }
8482     }
8483     for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
8484         uint32_t attachment = i;
8485         for (auto other_attachment : overlapping_attachments[i]) {
8486             if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8487                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
8488                                 HandleToUint64(framebuffer->framebuffer), __LINE__, VALIDATION_ERROR_12200682, "DS",
8489                                 "Attachment %d aliases attachment %d but doesn't set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
8490                                 attachment, other_attachment, validation_error_map[VALIDATION_ERROR_12200682]);
8491             }
8492             if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8493                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
8494                                 HandleToUint64(framebuffer->framebuffer), __LINE__, VALIDATION_ERROR_12200682, "DS",
8495                                 "Attachment %d aliases attachment %d but doesn't set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
8496                                 other_attachment, attachment, validation_error_map[VALIDATION_ERROR_12200682]);
8497             }
8498         }
8499     }
8500     // Find for each attachment the subpasses that use them.
8501     unordered_set<uint32_t> attachmentIndices;
8502     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8503         const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8504         attachmentIndices.clear();
8505         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8506             uint32_t attachment = subpass.pInputAttachments[j].attachment;
8507             if (attachment == VK_ATTACHMENT_UNUSED) continue;
8508             input_attachment_to_subpass[attachment].push_back(i);
8509             for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8510                 input_attachment_to_subpass[overlapping_attachment].push_back(i);
8511             }
8512         }
8513         for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8514             uint32_t attachment = subpass.pColorAttachments[j].attachment;
8515             if (attachment == VK_ATTACHMENT_UNUSED) continue;
8516             output_attachment_to_subpass[attachment].push_back(i);
8517             for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8518                 output_attachment_to_subpass[overlapping_attachment].push_back(i);
8519             }
8520             attachmentIndices.insert(attachment);
8521         }
8522         if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8523             uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8524             output_attachment_to_subpass[attachment].push_back(i);
8525             for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8526                 output_attachment_to_subpass[overlapping_attachment].push_back(i);
8527             }
8528 
8529             if (attachmentIndices.count(attachment)) {
8530                 skip |=
8531                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8532                             __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8533                             "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
8534             }
8535         }
8536     }
8537     // If there is a dependency needed make sure one exists
8538     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8539         const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8540         // If the attachment is an input then all subpasses that output must have a dependency relationship
8541         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8542             uint32_t attachment = subpass.pInputAttachments[j].attachment;
8543             if (attachment == VK_ATTACHMENT_UNUSED) continue;
8544             CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
8545         }
8546         // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
8547         for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8548             uint32_t attachment = subpass.pColorAttachments[j].attachment;
8549             if (attachment == VK_ATTACHMENT_UNUSED) continue;
8550             CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
8551             CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
8552         }
8553         if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8554             const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
8555             CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
8556             CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
8557         }
8558     }
8559     // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
8560     // written.
8561     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8562         const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8563         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8564             CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip);
8565         }
8566     }
8567     return skip;
8568 }
8569 
CreatePassDAG(const layer_data * dev_data,const VkRenderPassCreateInfo * pCreateInfo,std::vector<DAGNode> & subpass_to_node,std::vector<bool> & has_self_dependency,std::vector<int32_t> & subpass_to_dep_index)8570 static bool CreatePassDAG(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo,
8571                           std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency,
8572                           std::vector<int32_t> &subpass_to_dep_index) {
8573     bool skip = false;
8574     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8575         DAGNode &subpass_node = subpass_to_node[i];
8576         subpass_node.pass = i;
8577         subpass_to_dep_index[i] = -1;  // Default to no dependency and overwrite below as needed
8578     }
8579     for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
8580         const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
8581         if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
8582             if (dependency.srcSubpass == dependency.dstSubpass) {
8583                 skip |=
8584                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8585                             __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
8586             }
8587         } else if (dependency.srcSubpass > dependency.dstSubpass) {
8588             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8589                             __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8590                             "Dependency graph must be specified such that an earlier pass cannot depend on a later pass.");
8591         } else if (dependency.srcSubpass == dependency.dstSubpass) {
8592             has_self_dependency[dependency.srcSubpass] = true;
8593             subpass_to_dep_index[dependency.srcSubpass] = i;
8594         } else {
8595             subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
8596             subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
8597         }
8598     }
8599     return skip;
8600 }
8601 
CreateShaderModule(VkDevice device,const VkShaderModuleCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkShaderModule * pShaderModule)8602 VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
8603                                                   const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule) {
8604     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8605     bool spirv_valid;
8606 
8607     if (PreCallValidateCreateShaderModule(dev_data, pCreateInfo, &spirv_valid)) return VK_ERROR_VALIDATION_FAILED_EXT;
8608 
8609     VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
8610 
8611     if (res == VK_SUCCESS) {
8612         lock_guard_t lock(global_lock);
8613         unique_ptr<shader_module> new_shader_module(spirv_valid ? new shader_module(pCreateInfo) : new shader_module());
8614         dev_data->shaderModuleMap[*pShaderModule] = std::move(new_shader_module);
8615     }
8616     return res;
8617 }
8618 
ValidateAttachmentIndex(layer_data * dev_data,uint32_t attachment,uint32_t attachment_count,const char * type)8619 static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
8620     bool skip = false;
8621     if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
8622         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
8623                         VALIDATION_ERROR_12200684, "DS",
8624                         "CreateRenderPass: %s attachment %d must be less than the total number of attachments %d. %s", type,
8625                         attachment, attachment_count, validation_error_map[VALIDATION_ERROR_12200684]);
8626     }
8627     return skip;
8628 }
8629 
IsPowerOfTwo(unsigned x)8630 static bool IsPowerOfTwo(unsigned x) { return x && !(x & (x - 1)); }
8631 
ValidateRenderpassAttachmentUsage(layer_data * dev_data,const VkRenderPassCreateInfo * pCreateInfo)8632 static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
8633     bool skip = false;
8634     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8635         const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8636         if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
8637             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8638                             __LINE__, VALIDATION_ERROR_14000698, "DS",
8639                             "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS. %s", i,
8640                             validation_error_map[VALIDATION_ERROR_14000698]);
8641         }
8642 
8643         for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8644             uint32_t attachment = subpass.pPreserveAttachments[j];
8645             if (attachment == VK_ATTACHMENT_UNUSED) {
8646                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8647                                 __LINE__, VALIDATION_ERROR_140006aa, "DS",
8648                                 "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED. %s", j,
8649                                 validation_error_map[VALIDATION_ERROR_140006aa]);
8650             } else {
8651                 skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
8652 
8653                 bool found = (subpass.pDepthStencilAttachment != NULL && subpass.pDepthStencilAttachment->attachment == attachment);
8654                 for (uint32_t r = 0; !found && r < subpass.inputAttachmentCount; ++r) {
8655                     found = (subpass.pInputAttachments[r].attachment == attachment);
8656                 }
8657                 for (uint32_t r = 0; !found && r < subpass.colorAttachmentCount; ++r) {
8658                     found = (subpass.pColorAttachments[r].attachment == attachment) ||
8659                             (subpass.pResolveAttachments != NULL && subpass.pResolveAttachments[r].attachment == attachment);
8660                 }
8661                 if (found) {
8662                     skip |= log_msg(
8663                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
8664                         VALIDATION_ERROR_140006ac, "DS",
8665                         "CreateRenderPass: subpass %u pPreserveAttachments[%u] (%u) must not be used elsewhere in the subpass. %s",
8666                         i, j, attachment, validation_error_map[VALIDATION_ERROR_140006ac]);
8667                 }
8668             }
8669         }
8670 
8671         auto subpass_performs_resolve =
8672             subpass.pResolveAttachments &&
8673             std::any_of(subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
8674                         [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
8675 
8676         unsigned sample_count = 0;
8677 
8678         for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8679             uint32_t attachment;
8680             if (subpass.pResolveAttachments) {
8681                 attachment = subpass.pResolveAttachments[j].attachment;
8682                 skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
8683 
8684                 if (!skip && attachment != VK_ATTACHMENT_UNUSED &&
8685                     pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
8686                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8687                                     0, __LINE__, VALIDATION_ERROR_140006a2, "DS",
8688                                     "CreateRenderPass:  Subpass %u requests multisample resolve into attachment %u, which must "
8689                                     "have VK_SAMPLE_COUNT_1_BIT but has %s. %s",
8690                                     i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples),
8691                                     validation_error_map[VALIDATION_ERROR_140006a2]);
8692                 }
8693 
8694                 if (!skip && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED &&
8695                     subpass.pColorAttachments[j].attachment == VK_ATTACHMENT_UNUSED) {
8696                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8697                                     0, __LINE__, VALIDATION_ERROR_1400069e, "DS",
8698                                     "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u which has "
8699                                     "attachment=VK_ATTACHMENT_UNUSED. %s",
8700                                     i, attachment, validation_error_map[VALIDATION_ERROR_1400069e]);
8701                 }
8702             }
8703             attachment = subpass.pColorAttachments[j].attachment;
8704             skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
8705 
8706             if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
8707                 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
8708 
8709                 if (subpass_performs_resolve && pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
8710                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8711                                     0, __LINE__, VALIDATION_ERROR_140006a0, "DS",
8712                                     "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u which has "
8713                                     "VK_SAMPLE_COUNT_1_BIT. %s",
8714                                     i, attachment, validation_error_map[VALIDATION_ERROR_140006a0]);
8715                 }
8716 
8717                 if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED) {
8718                     const auto &color_desc = pCreateInfo->pAttachments[attachment];
8719                     const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment];
8720                     if (color_desc.format != resolve_desc.format) {
8721                         skip |=
8722                             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8723                                     0, __LINE__, VALIDATION_ERROR_140006a4, "DS",
8724                                     "CreateRenderPass:  Subpass %u pColorAttachments[%u] resolves to an attachment with a "
8725                                     "different format. color format: %u, resolve format: %u. %s",
8726                                     i, j, color_desc.format, resolve_desc.format, validation_error_map[VALIDATION_ERROR_140006a4]);
8727                     }
8728                 }
8729 
8730                 if (dev_data->extensions.vk_amd_mixed_attachment_samples && subpass.pDepthStencilAttachment &&
8731                     subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8732                     const auto depth_stencil_sample_count =
8733                         pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples;
8734                     if (pCreateInfo->pAttachments[attachment].samples > depth_stencil_sample_count) {
8735                         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8736                                         VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, VALIDATION_ERROR_14000bc4, "DS",
8737                                         "CreateRenderPass:  Subpass %u pColorAttachments[%u] has %s which is larger than "
8738                                         "depth/stencil attachment %s. %s",
8739                                         i, j, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples),
8740                                         string_VkSampleCountFlagBits(depth_stencil_sample_count),
8741                                         validation_error_map[VALIDATION_ERROR_14000bc4]);
8742                     }
8743                 }
8744             }
8745         }
8746 
8747         if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8748             uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8749             skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
8750 
8751             if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
8752                 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
8753             }
8754         }
8755 
8756         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8757             uint32_t attachment = subpass.pInputAttachments[j].attachment;
8758             skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
8759         }
8760 
8761         if (!dev_data->extensions.vk_amd_mixed_attachment_samples && sample_count && !IsPowerOfTwo(sample_count)) {
8762             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8763                             __LINE__, VALIDATION_ERROR_0082b401, "DS",
8764                             "CreateRenderPass:  Subpass %u attempts to render to attachments with inconsistent sample counts. %s",
8765                             i, validation_error_map[VALIDATION_ERROR_0082b401]);
8766         }
8767     }
8768     return skip;
8769 }
8770 
MarkAttachmentFirstUse(RENDER_PASS_STATE * render_pass,uint32_t index,bool is_read)8771 static void MarkAttachmentFirstUse(RENDER_PASS_STATE *render_pass, uint32_t index, bool is_read) {
8772     if (index == VK_ATTACHMENT_UNUSED) return;
8773 
8774     if (!render_pass->attachment_first_read.count(index)) render_pass->attachment_first_read[index] = is_read;
8775 }
8776 
CreateRenderPass(VkDevice device,const VkRenderPassCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkRenderPass * pRenderPass)8777 VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
8778                                                 const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
8779     bool skip = false;
8780     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8781 
8782     unique_lock_t lock(global_lock);
8783     // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
8784     //       ValidateLayouts.
8785     skip |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
8786     for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
8787         skip |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].srcStageMask, "vkCreateRenderPass()",
8788                                              VALIDATION_ERROR_13e006b8, VALIDATION_ERROR_13e006bc);
8789         skip |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].dstStageMask, "vkCreateRenderPass()",
8790                                              VALIDATION_ERROR_13e006ba, VALIDATION_ERROR_13e006be);
8791     }
8792     if (!skip) {
8793         skip |= ValidateLayouts(dev_data, device, pCreateInfo);
8794     }
8795     lock.unlock();
8796 
8797     if (skip) {
8798         return VK_ERROR_VALIDATION_FAILED_EXT;
8799     }
8800 
8801     VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
8802 
8803     if (VK_SUCCESS == result) {
8804         lock.lock();
8805 
8806         std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
8807         std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
8808         std::vector<int32_t> subpass_to_dep_index(pCreateInfo->subpassCount);
8809         skip |= CreatePassDAG(dev_data, pCreateInfo, subpass_to_node, has_self_dependency, subpass_to_dep_index);
8810 
8811         auto render_pass = std::make_shared<RENDER_PASS_STATE>(pCreateInfo);
8812         render_pass->renderPass = *pRenderPass;
8813         render_pass->hasSelfDependency = has_self_dependency;
8814         render_pass->subpassToNode = subpass_to_node;
8815         render_pass->subpass_to_dependency_index = subpass_to_dep_index;
8816 
8817         for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8818             const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8819             for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8820                 MarkAttachmentFirstUse(render_pass.get(), subpass.pColorAttachments[j].attachment, false);
8821 
8822                 // resolve attachments are considered to be written
8823                 if (subpass.pResolveAttachments) {
8824                     MarkAttachmentFirstUse(render_pass.get(), subpass.pResolveAttachments[j].attachment, false);
8825                 }
8826             }
8827             if (subpass.pDepthStencilAttachment) {
8828                 MarkAttachmentFirstUse(render_pass.get(), subpass.pDepthStencilAttachment->attachment, false);
8829             }
8830             for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8831                 MarkAttachmentFirstUse(render_pass.get(), subpass.pInputAttachments[j].attachment, true);
8832             }
8833         }
8834 
8835         dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
8836     }
8837     return result;
8838 }
8839 
validatePrimaryCommandBuffer(const layer_data * dev_data,const GLOBAL_CB_NODE * pCB,char const * cmd_name,UNIQUE_VALIDATION_ERROR_CODE error_code)8840 static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, char const *cmd_name,
8841                                          UNIQUE_VALIDATION_ERROR_CODE error_code) {
8842     bool skip = false;
8843     if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
8844         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8845                         HandleToUint64(pCB->commandBuffer), __LINE__, error_code, "DS",
8846                         "Cannot execute command %s on a secondary command buffer. %s", cmd_name, validation_error_map[error_code]);
8847     }
8848     return skip;
8849 }
8850 
VerifyRenderAreaBounds(const layer_data * dev_data,const VkRenderPassBeginInfo * pRenderPassBegin)8851 static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
8852     bool skip = false;
8853     const safe_VkFramebufferCreateInfo *pFramebufferInfo =
8854         &GetFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
8855     if (pRenderPassBegin->renderArea.offset.x < 0 ||
8856         (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
8857         pRenderPassBegin->renderArea.offset.y < 0 ||
8858         (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
8859         skip |= static_cast<bool>(log_msg(
8860             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
8861             DRAWSTATE_INVALID_RENDER_AREA, "CORE",
8862             "Cannot execute a render pass with renderArea not within the bound of the framebuffer. RenderArea: x %d, y %d, width "
8863             "%d, height %d. Framebuffer: width %d, height %d.",
8864             pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
8865             pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
8866     }
8867     return skip;
8868 }
8869 
8870 // If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
8871 // [load|store]Op flag must be checked
8872 // TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
8873 template <typename T>
FormatSpecificLoadAndStoreOpSettings(VkFormat format,T color_depth_op,T stencil_op,T op)8874 static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
8875     if (color_depth_op != op && stencil_op != op) {
8876         return false;
8877     }
8878     bool check_color_depth_load_op = !FormatIsStencilOnly(format);
8879     bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op;
8880 
8881     return ((check_color_depth_load_op && (color_depth_op == op)) || (check_stencil_load_op && (stencil_op == op)));
8882 }
8883 
CmdBeginRenderPass(VkCommandBuffer commandBuffer,const VkRenderPassBeginInfo * pRenderPassBegin,VkSubpassContents contents)8884 VKAPI_ATTR void VKAPI_CALL CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
8885                                               VkSubpassContents contents) {
8886     bool skip = false;
8887     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8888     unique_lock_t lock(global_lock);
8889     GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
8890     auto render_pass_state = pRenderPassBegin ? GetRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr;
8891     auto framebuffer = pRenderPassBegin ? GetFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
8892     if (cb_node) {
8893         if (render_pass_state) {
8894             uint32_t clear_op_size = 0;  // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
8895             cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
8896             for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
8897                 MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
8898                 auto pAttachment = &render_pass_state->createInfo.pAttachments[i];
8899                 if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, pAttachment->stencilLoadOp,
8900                                                          VK_ATTACHMENT_LOAD_OP_CLEAR)) {
8901                     clear_op_size = static_cast<uint32_t>(i) + 1;
8902                     std::function<bool()> function = [=]() {
8903                         SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), true);
8904                         return false;
8905                     };
8906                     cb_node->queue_submit_functions.push_back(function);
8907                 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
8908                                                                 pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
8909                     std::function<bool()> function = [=]() {
8910                         SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), false);
8911                         return false;
8912                     };
8913                     cb_node->queue_submit_functions.push_back(function);
8914                 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
8915                                                                 pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_LOAD)) {
8916                     std::function<bool()> function = [=]() {
8917                         return ValidateImageMemoryIsValid(dev_data, GetImageState(dev_data, fb_info.image),
8918                                                           "vkCmdBeginRenderPass()");
8919                     };
8920                     cb_node->queue_submit_functions.push_back(function);
8921                 }
8922                 if (render_pass_state->attachment_first_read[i]) {
8923                     std::function<bool()> function = [=]() {
8924                         return ValidateImageMemoryIsValid(dev_data, GetImageState(dev_data, fb_info.image),
8925                                                           "vkCmdBeginRenderPass()");
8926                     };
8927                     cb_node->queue_submit_functions.push_back(function);
8928                 }
8929             }
8930             if (clear_op_size > pRenderPassBegin->clearValueCount) {
8931                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8932                                 HandleToUint64(render_pass_state->renderPass), __LINE__, VALIDATION_ERROR_1200070c, "DS",
8933                                 "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there "
8934                                 "must be at least %u entries in pClearValues array to account for the highest index attachment in "
8935                                 "renderPass 0x%" PRIx64
8936                                 " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array is indexed by "
8937                                 "attachment number so even if some pClearValues entries between 0 and %u correspond to attachments "
8938                                 "that aren't cleared they will be ignored. %s",
8939                                 pRenderPassBegin->clearValueCount, clear_op_size, HandleToUint64(render_pass_state->renderPass),
8940                                 clear_op_size, clear_op_size - 1, validation_error_map[VALIDATION_ERROR_1200070c]);
8941             }
8942             skip |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
8943             skip |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin,
8944                                                           GetFramebufferState(dev_data, pRenderPassBegin->framebuffer));
8945             if (framebuffer->rp_state->renderPass != render_pass_state->renderPass) {
8946                 skip |= validateRenderPassCompatibility(dev_data, "render pass", render_pass_state, "framebuffer",
8947                                                         framebuffer->rp_state.get(), "vkCmdBeginRenderPass()",
8948                                                         VALIDATION_ERROR_12000710);
8949             }
8950             skip |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_17a00017);
8951             skip |= ValidateDependencies(dev_data, framebuffer, render_pass_state);
8952             skip |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_17a00019);
8953             skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBeginRenderPass()", VK_QUEUE_GRAPHICS_BIT,
8954                                           VALIDATION_ERROR_17a02415);
8955             skip |= ValidateCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
8956             cb_node->activeRenderPass = render_pass_state;
8957             // This is a shallow copy as that is all that is needed for now
8958             cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
8959             cb_node->activeSubpass = 0;
8960             cb_node->activeSubpassContents = contents;
8961             cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
8962             // Connect this framebuffer and its children to this cmdBuffer
8963             AddFramebufferBinding(dev_data, cb_node, framebuffer);
8964             // Connect this RP to cmdBuffer
8965             addCommandBufferBinding(&render_pass_state->cb_bindings,
8966                                     {HandleToUint64(render_pass_state->renderPass), kVulkanObjectTypeRenderPass}, cb_node);
8967             // transition attachments to the correct layouts for beginning of renderPass and first subpass
8968             TransitionBeginRenderPassLayouts(dev_data, cb_node, render_pass_state, framebuffer);
8969         }
8970     }
8971     lock.unlock();
8972     if (!skip) {
8973         dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
8974     }
8975 }
8976 
CmdNextSubpass(VkCommandBuffer commandBuffer,VkSubpassContents contents)8977 VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
8978     bool skip = false;
8979     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8980     unique_lock_t lock(global_lock);
8981     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
8982     if (pCB) {
8983         skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_1b600019);
8984         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdNextSubpass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1b602415);
8985         skip |= ValidateCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
8986         skip |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_1b600017);
8987 
8988         auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
8989         if (pCB->activeSubpass == subpassCount - 1) {
8990             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8991                             HandleToUint64(commandBuffer), __LINE__, VALIDATION_ERROR_1b60071a, "DS",
8992                             "vkCmdNextSubpass(): Attempted to advance beyond final subpass. %s",
8993                             validation_error_map[VALIDATION_ERROR_1b60071a]);
8994         }
8995     }
8996     lock.unlock();
8997 
8998     if (skip) return;
8999 
9000     dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
9001 
9002     if (pCB) {
9003         lock.lock();
9004         pCB->activeSubpass++;
9005         pCB->activeSubpassContents = contents;
9006         TransitionSubpassLayouts(dev_data, pCB, pCB->activeRenderPass, pCB->activeSubpass,
9007                                  GetFramebufferState(dev_data, pCB->activeRenderPassBeginInfo.framebuffer));
9008     }
9009 }
9010 
CmdEndRenderPass(VkCommandBuffer commandBuffer)9011 VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
9012     bool skip = false;
9013     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9014     unique_lock_t lock(global_lock);
9015     auto pCB = GetCBNode(dev_data, commandBuffer);
9016     FRAMEBUFFER_STATE *framebuffer = NULL;
9017     if (pCB) {
9018         RENDER_PASS_STATE *rp_state = pCB->activeRenderPass;
9019         framebuffer = GetFramebufferState(dev_data, pCB->activeFramebuffer);
9020         if (rp_state) {
9021             if (pCB->activeSubpass != rp_state->createInfo.subpassCount - 1) {
9022                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9023                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer), __LINE__,
9024                                 VALIDATION_ERROR_1b00071c, "DS", "vkCmdEndRenderPass(): Called before reaching final subpass. %s",
9025                                 validation_error_map[VALIDATION_ERROR_1b00071c]);
9026             }
9027 
9028             for (size_t i = 0; i < rp_state->createInfo.attachmentCount; ++i) {
9029                 MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
9030                 auto pAttachment = &rp_state->createInfo.pAttachments[i];
9031                 if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp, pAttachment->stencilStoreOp,
9032                                                          VK_ATTACHMENT_STORE_OP_STORE)) {
9033                     std::function<bool()> function = [=]() {
9034                         SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), true);
9035                         return false;
9036                     };
9037                     pCB->queue_submit_functions.push_back(function);
9038                 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
9039                                                                 pAttachment->stencilStoreOp, VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
9040                     std::function<bool()> function = [=]() {
9041                         SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), false);
9042                         return false;
9043                     };
9044                     pCB->queue_submit_functions.push_back(function);
9045                 }
9046             }
9047         }
9048         skip |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass()", VALIDATION_ERROR_1b000017);
9049         skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass()", VALIDATION_ERROR_1b000019);
9050         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdEndRenderPass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1b002415);
9051         skip |= ValidateCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9052     }
9053     lock.unlock();
9054 
9055     if (skip) return;
9056 
9057     dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
9058 
9059     if (pCB) {
9060         lock.lock();
9061         TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, framebuffer);
9062         pCB->activeRenderPass = nullptr;
9063         pCB->activeSubpass = 0;
9064         pCB->activeFramebuffer = VK_NULL_HANDLE;
9065     }
9066 }
9067 
validateFramebuffer(layer_data * dev_data,VkCommandBuffer primaryBuffer,const GLOBAL_CB_NODE * pCB,VkCommandBuffer secondaryBuffer,const GLOBAL_CB_NODE * pSubCB,const char * caller)9068 static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9069                                 VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB, const char *caller) {
9070     bool skip = false;
9071     if (!pSubCB->beginInfo.pInheritanceInfo) {
9072         return skip;
9073     }
9074     VkFramebuffer primary_fb = pCB->activeFramebuffer;
9075     VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9076     if (secondary_fb != VK_NULL_HANDLE) {
9077         if (primary_fb != secondary_fb) {
9078             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9079                             HandleToUint64(primaryBuffer), __LINE__, VALIDATION_ERROR_1b2000c6, "DS",
9080                             "vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64
9081                             " which has a framebuffer 0x%" PRIx64
9082                             " that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ". %s",
9083                             HandleToUint64(secondaryBuffer), HandleToUint64(secondary_fb), HandleToUint64(primary_fb),
9084                             validation_error_map[VALIDATION_ERROR_1b2000c6]);
9085         }
9086         auto fb = GetFramebufferState(dev_data, secondary_fb);
9087         if (!fb) {
9088             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9089                             HandleToUint64(primaryBuffer), __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9090                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%" PRIx64
9091                             " which has invalid framebuffer 0x%" PRIx64 ".",
9092                             HandleToUint64(secondaryBuffer), HandleToUint64(secondary_fb));
9093             return skip;
9094         }
9095     }
9096     return skip;
9097 }
9098 
validateSecondaryCommandBufferState(layer_data * dev_data,GLOBAL_CB_NODE * pCB,GLOBAL_CB_NODE * pSubCB)9099 static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9100     bool skip = false;
9101     unordered_set<int> activeTypes;
9102     for (auto queryObject : pCB->activeQueries) {
9103         auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9104         if (queryPoolData != dev_data->queryPoolMap.end()) {
9105             if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9106                 pSubCB->beginInfo.pInheritanceInfo) {
9107                 VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9108                 if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9109                     skip |= log_msg(
9110                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9111                         HandleToUint64(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_1b2000d0, "DS",
9112                         "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%" PRIx64
9113                         " which has invalid active query pool 0x%" PRIx64
9114                         ". Pipeline statistics is being queried so the command buffer must have all bits set on the queryPool. %s",
9115                         HandleToUint64(pCB->commandBuffer), HandleToUint64(queryPoolData->first),
9116                         validation_error_map[VALIDATION_ERROR_1b2000d0]);
9117                 }
9118             }
9119             activeTypes.insert(queryPoolData->second.createInfo.queryType);
9120         }
9121     }
9122     for (auto queryObject : pSubCB->startedQueries) {
9123         auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9124         if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9125             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9126                             HandleToUint64(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9127                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%" PRIx64
9128                             " which has invalid active query pool 0x%" PRIx64
9129                             " of type %d but a query of that type has been started on secondary Cmd Buffer 0x%" PRIx64 ".",
9130                             HandleToUint64(pCB->commandBuffer), HandleToUint64(queryPoolData->first),
9131                             queryPoolData->second.createInfo.queryType, HandleToUint64(pSubCB->commandBuffer));
9132         }
9133     }
9134 
9135     auto primary_pool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
9136     auto secondary_pool = GetCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
9137     if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
9138         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9139                         HandleToUint64(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
9140                         "vkCmdExecuteCommands(): Primary command buffer 0x%" PRIx64
9141                         " created in queue family %d has secondary command buffer 0x%" PRIx64 " created in queue family %d.",
9142                         HandleToUint64(pCB->commandBuffer), primary_pool->queueFamilyIndex, HandleToUint64(pSubCB->commandBuffer),
9143                         secondary_pool->queueFamilyIndex);
9144     }
9145 
9146     return skip;
9147 }
9148 
CmdExecuteCommands(VkCommandBuffer commandBuffer,uint32_t commandBuffersCount,const VkCommandBuffer * pCommandBuffers)9149 VKAPI_ATTR void VKAPI_CALL CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
9150                                               const VkCommandBuffer *pCommandBuffers) {
9151     bool skip = false;
9152     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9153     unique_lock_t lock(global_lock);
9154     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
9155     if (pCB) {
9156         GLOBAL_CB_NODE *pSubCB = NULL;
9157         for (uint32_t i = 0; i < commandBuffersCount; i++) {
9158             pSubCB = GetCBNode(dev_data, pCommandBuffers[i]);
9159             assert(pSubCB);
9160             if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
9161                 skip |= log_msg(
9162                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9163                     HandleToUint64(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_1b2000b0, "DS",
9164                     "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%" PRIx64
9165                     " in element %u of pCommandBuffers array. All cmd buffers in pCommandBuffers array must be secondary. %s",
9166                     HandleToUint64(pCommandBuffers[i]), i, validation_error_map[VALIDATION_ERROR_1b2000b0]);
9167             } else if (pCB->activeRenderPass) {  // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
9168                 if (pSubCB->beginInfo.pInheritanceInfo != nullptr) {
9169                     auto secondary_rp_state = GetRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9170                     if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
9171                         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9172                                         VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]),
9173                                         __LINE__, VALIDATION_ERROR_1b2000c0, "DS",
9174                                         "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIx64
9175                                         ") executed within render pass (0x%" PRIx64
9176                                         ") must have had vkBeginCommandBuffer() called w/ "
9177                                         "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set. %s",
9178                                         HandleToUint64(pCommandBuffers[i]), HandleToUint64(pCB->activeRenderPass->renderPass),
9179                                         validation_error_map[VALIDATION_ERROR_1b2000c0]);
9180                     } else {
9181                         // Make sure render pass is compatible with parent command buffer pass if has continue
9182                         if (pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
9183                             skip |= validateRenderPassCompatibility(dev_data, "primary command buffer", pCB->activeRenderPass,
9184                                                                     "secondary command buffer", secondary_rp_state,
9185                                                                     "vkCmdExecuteCommands()", VALIDATION_ERROR_1b2000c4);
9186                         }
9187                         //  If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
9188                         skip |=
9189                             validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB, "vkCmdExecuteCommands()");
9190                         if (VK_NULL_HANDLE == pSubCB->activeFramebuffer) {
9191                             //  Inherit primary's activeFramebuffer and while running validate functions
9192                             for (auto &function : pSubCB->cmd_execute_commands_functions) {
9193                                 skip |= function(pCB->activeFramebuffer);
9194                             }
9195                         }
9196                     }
9197                 }
9198             }
9199             // TODO(mlentine): Move more logic into this method
9200             skip |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
9201             skip |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()", 0, VALIDATION_ERROR_1b2000b2);
9202             if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
9203                 if (pSubCB->in_use.load() || pCB->linkedCommandBuffers.count(pSubCB)) {
9204                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9205                                     VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer), __LINE__,
9206                                     VALIDATION_ERROR_1b2000b4, "DS",
9207                                     "Attempt to simultaneously execute command buffer 0x%" PRIx64
9208                                     " without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set! %s",
9209                                     HandleToUint64(pCB->commandBuffer), validation_error_map[VALIDATION_ERROR_1b2000b4]);
9210                 }
9211                 if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
9212                     // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
9213                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9214                                     VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]), __LINE__,
9215                                     DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9216                                     "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIx64
9217                                     ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary "
9218                                     "command buffer (0x%" PRIx64
9219                                     ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set, even "
9220                                     "though it does.",
9221                                     HandleToUint64(pCommandBuffers[i]), HandleToUint64(pCB->commandBuffer));
9222                     pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
9223                 }
9224             }
9225             if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
9226                 skip |=
9227                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9228                             HandleToUint64(pCommandBuffers[i]), __LINE__, VALIDATION_ERROR_1b2000ca, "DS",
9229                             "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIx64
9230                             ") cannot be submitted with a query in flight and inherited queries not supported on this device. %s",
9231                             HandleToUint64(pCommandBuffers[i]), validation_error_map[VALIDATION_ERROR_1b2000ca]);
9232             }
9233             // TODO: separate validate from update! This is very tangled.
9234             // Propagate layout transitions to the primary cmd buffer
9235             for (auto ilm_entry : pSubCB->imageLayoutMap) {
9236                 if (pCB->imageLayoutMap.find(ilm_entry.first) != pCB->imageLayoutMap.end()) {
9237                     pCB->imageLayoutMap[ilm_entry.first].layout = ilm_entry.second.layout;
9238                 } else {
9239                     assert(ilm_entry.first.hasSubresource);
9240                     IMAGE_CMD_BUF_LAYOUT_NODE node;
9241                     if (!FindCmdBufLayout(dev_data, pCB, ilm_entry.first.image, ilm_entry.first.subresource, node)) {
9242                         node.initialLayout = ilm_entry.second.initialLayout;
9243                     }
9244                     node.layout = ilm_entry.second.layout;
9245                     SetLayout(dev_data, pCB, ilm_entry.first, node);
9246                 }
9247             }
9248             pSubCB->primaryCommandBuffer = pCB->commandBuffer;
9249             pCB->linkedCommandBuffers.insert(pSubCB);
9250             pSubCB->linkedCommandBuffers.insert(pCB);
9251             for (auto &function : pSubCB->queryUpdates) {
9252                 pCB->queryUpdates.push_back(function);
9253             }
9254             for (auto &function : pSubCB->queue_submit_functions) {
9255                 pCB->queue_submit_functions.push_back(function);
9256             }
9257         }
9258         skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteCommands()", VALIDATION_ERROR_1b200019);
9259         skip |=
9260             ValidateCmdQueueFlags(dev_data, pCB, "vkCmdExecuteCommands()",
9261                                   VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1b202415);
9262         skip |= ValidateCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()");
9263     }
9264     lock.unlock();
9265     if (!skip) dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
9266 }
9267 
MapMemory(VkDevice device,VkDeviceMemory mem,VkDeviceSize offset,VkDeviceSize size,VkFlags flags,void ** ppData)9268 VKAPI_ATTR VkResult VKAPI_CALL MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
9269                                          void **ppData) {
9270     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9271 
9272     bool skip = false;
9273     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9274     unique_lock_t lock(global_lock);
9275     DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
9276     if (mem_info) {
9277         // TODO : This could me more fine-grained to track just region that is valid
9278         mem_info->global_valid = true;
9279         auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
9280         skip |= ValidateMapImageLayouts(dev_data, device, mem_info, offset, end_offset);
9281         // TODO : Do we need to create new "bound_range" for the mapped range?
9282         SetMemRangesValid(dev_data, mem_info, offset, end_offset);
9283         if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
9284              VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
9285             skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9286                            HandleToUint64(mem), __LINE__, VALIDATION_ERROR_31200554, "MEM",
9287                            "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIx64 ". %s",
9288                            HandleToUint64(mem), validation_error_map[VALIDATION_ERROR_31200554]);
9289         }
9290     }
9291     skip |= ValidateMapMemRange(dev_data, mem, offset, size);
9292     lock.unlock();
9293 
9294     if (!skip) {
9295         result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
9296         if (VK_SUCCESS == result) {
9297             lock.lock();
9298             // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
9299             storeMemRanges(dev_data, mem, offset, size);
9300             initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
9301             lock.unlock();
9302         }
9303     }
9304     return result;
9305 }
9306 
UnmapMemory(VkDevice device,VkDeviceMemory mem)9307 VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
9308     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9309     bool skip = false;
9310 
9311     unique_lock_t lock(global_lock);
9312     skip |= deleteMemRanges(dev_data, mem);
9313     lock.unlock();
9314     if (!skip) {
9315         dev_data->dispatch_table.UnmapMemory(device, mem);
9316     }
9317 }
9318 
validateMemoryIsMapped(layer_data * dev_data,const char * funcName,uint32_t memRangeCount,const VkMappedMemoryRange * pMemRanges)9319 static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
9320                                    const VkMappedMemoryRange *pMemRanges) {
9321     bool skip = false;
9322     for (uint32_t i = 0; i < memRangeCount; ++i) {
9323         auto mem_info = GetMemObjInfo(dev_data, pMemRanges[i].memory);
9324         if (mem_info) {
9325             if (pMemRanges[i].size == VK_WHOLE_SIZE) {
9326                 if (mem_info->mem_range.offset > pMemRanges[i].offset) {
9327                     skip |=
9328                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9329                                 HandleToUint64(pMemRanges[i].memory), __LINE__, VALIDATION_ERROR_0c20055c, "MEM",
9330                                 "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
9331                                 ") is less than Memory Object's offset (" PRINTF_SIZE_T_SPECIFIER "). %s",
9332                                 funcName, static_cast<size_t>(pMemRanges[i].offset),
9333                                 static_cast<size_t>(mem_info->mem_range.offset), validation_error_map[VALIDATION_ERROR_0c20055c]);
9334                 }
9335             } else {
9336                 const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE)
9337                                               ? mem_info->alloc_info.allocationSize
9338                                               : (mem_info->mem_range.offset + mem_info->mem_range.size);
9339                 if ((mem_info->mem_range.offset > pMemRanges[i].offset) ||
9340                     (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
9341                     skip |=
9342                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9343                                 HandleToUint64(pMemRanges[i].memory), __LINE__, VALIDATION_ERROR_0c20055a, "MEM",
9344                                 "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
9345                                 ") exceed the Memory Object's upper-bound (" PRINTF_SIZE_T_SPECIFIER "). %s",
9346                                 funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
9347                                 static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end),
9348                                 validation_error_map[VALIDATION_ERROR_0c20055a]);
9349                 }
9350             }
9351         }
9352     }
9353     return skip;
9354 }
9355 
ValidateAndCopyNoncoherentMemoryToDriver(layer_data * dev_data,uint32_t mem_range_count,const VkMappedMemoryRange * mem_ranges)9356 static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
9357                                                      const VkMappedMemoryRange *mem_ranges) {
9358     bool skip = false;
9359     for (uint32_t i = 0; i < mem_range_count; ++i) {
9360         auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
9361         if (mem_info) {
9362             if (mem_info->shadow_copy) {
9363                 VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
9364                                         ? mem_info->mem_range.size
9365                                         : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
9366                 char *data = static_cast<char *>(mem_info->shadow_copy);
9367                 for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
9368                     if (data[j] != NoncoherentMemoryFillValue) {
9369                         skip |= log_msg(
9370                             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9371                             HandleToUint64(mem_ranges[i].memory), __LINE__, MEMTRACK_INVALID_MAP, "MEM",
9372                             "Memory underflow was detected on mem obj 0x%" PRIx64, HandleToUint64(mem_ranges[i].memory));
9373                     }
9374                 }
9375                 for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
9376                     if (data[j] != NoncoherentMemoryFillValue) {
9377                         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9378                                         VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges[i].memory),
9379                                         __LINE__, MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIx64,
9380                                         HandleToUint64(mem_ranges[i].memory));
9381                     }
9382                 }
9383                 memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
9384             }
9385         }
9386     }
9387     return skip;
9388 }
9389 
CopyNoncoherentMemoryFromDriver(layer_data * dev_data,uint32_t mem_range_count,const VkMappedMemoryRange * mem_ranges)9390 static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
9391     for (uint32_t i = 0; i < mem_range_count; ++i) {
9392         auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
9393         if (mem_info && mem_info->shadow_copy) {
9394             VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
9395                                     ? mem_info->mem_range.size
9396                                     : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
9397             char *data = static_cast<char *>(mem_info->shadow_copy);
9398             memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
9399         }
9400     }
9401 }
9402 
ValidateMappedMemoryRangeDeviceLimits(layer_data * dev_data,const char * func_name,uint32_t mem_range_count,const VkMappedMemoryRange * mem_ranges)9403 static bool ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
9404                                                   const VkMappedMemoryRange *mem_ranges) {
9405     bool skip = false;
9406     for (uint32_t i = 0; i < mem_range_count; ++i) {
9407         uint64_t atom_size = dev_data->phys_dev_properties.properties.limits.nonCoherentAtomSize;
9408         if (SafeModulo(mem_ranges[i].offset, atom_size) != 0) {
9409             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9410                             HandleToUint64(mem_ranges->memory), __LINE__, VALIDATION_ERROR_0c20055e, "MEM",
9411                             "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
9412                             ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
9413                             func_name, i, mem_ranges[i].offset, atom_size, validation_error_map[VALIDATION_ERROR_0c20055e]);
9414         }
9415         if ((mem_ranges[i].size != VK_WHOLE_SIZE) && (SafeModulo(mem_ranges[i].size, atom_size) != 0)) {
9416             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9417                             HandleToUint64(mem_ranges->memory), __LINE__, VALIDATION_ERROR_0c200560, "MEM",
9418                             "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
9419                             ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
9420                             func_name, i, mem_ranges[i].size, atom_size, validation_error_map[VALIDATION_ERROR_0c200560]);
9421         }
9422     }
9423     return skip;
9424 }
9425 
PreCallValidateFlushMappedMemoryRanges(layer_data * dev_data,uint32_t mem_range_count,const VkMappedMemoryRange * mem_ranges)9426 static bool PreCallValidateFlushMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
9427                                                    const VkMappedMemoryRange *mem_ranges) {
9428     bool skip = false;
9429     lock_guard_t lock(global_lock);
9430     skip |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, mem_range_count, mem_ranges);
9431     skip |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
9432     return skip;
9433 }
9434 
FlushMappedMemoryRanges(VkDevice device,uint32_t memRangeCount,const VkMappedMemoryRange * pMemRanges)9435 VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
9436                                                        const VkMappedMemoryRange *pMemRanges) {
9437     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9438     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9439 
9440     if (!PreCallValidateFlushMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
9441         result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
9442     }
9443     return result;
9444 }
9445 
PreCallValidateInvalidateMappedMemoryRanges(layer_data * dev_data,uint32_t mem_range_count,const VkMappedMemoryRange * mem_ranges)9446 static bool PreCallValidateInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
9447                                                         const VkMappedMemoryRange *mem_ranges) {
9448     bool skip = false;
9449     lock_guard_t lock(global_lock);
9450     skip |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
9451     return skip;
9452 }
9453 
PostCallRecordInvalidateMappedMemoryRanges(layer_data * dev_data,uint32_t mem_range_count,const VkMappedMemoryRange * mem_ranges)9454 static void PostCallRecordInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
9455                                                        const VkMappedMemoryRange *mem_ranges) {
9456     lock_guard_t lock(global_lock);
9457     // Update our shadow copy with modified driver data
9458     CopyNoncoherentMemoryFromDriver(dev_data, mem_range_count, mem_ranges);
9459 }
9460 
InvalidateMappedMemoryRanges(VkDevice device,uint32_t memRangeCount,const VkMappedMemoryRange * pMemRanges)9461 VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
9462                                                             const VkMappedMemoryRange *pMemRanges) {
9463     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9464     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9465 
9466     if (!PreCallValidateInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
9467         result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
9468         if (result == VK_SUCCESS) {
9469             PostCallRecordInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges);
9470         }
9471     }
9472     return result;
9473 }
9474 
PreCallValidateBindImageMemory(layer_data * dev_data,VkImage image,IMAGE_STATE * image_state,VkDeviceMemory mem,VkDeviceSize memoryOffset,const char * api_name)9475 static bool PreCallValidateBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
9476                                            VkDeviceSize memoryOffset, const char *api_name) {
9477     bool skip = false;
9478     if (image_state) {
9479         unique_lock_t lock(global_lock);
9480         // Track objects tied to memory
9481         uint64_t image_handle = HandleToUint64(image);
9482         skip = ValidateSetMemBinding(dev_data, mem, image_handle, kVulkanObjectTypeImage, api_name);
9483         if (!image_state->memory_requirements_checked) {
9484             // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
9485             // BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
9486             // vkGetImageMemoryRequirements()
9487             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9488                             image_handle, __LINE__, DRAWSTATE_INVALID_IMAGE, "DS",
9489                             "%s: Binding memory to image 0x%" PRIx64
9490                             " but vkGetImageMemoryRequirements() has not been called on that image.",
9491                             api_name, HandleToUint64(image_handle));
9492             // Make the call for them so we can verify the state
9493             lock.unlock();
9494             dev_data->dispatch_table.GetImageMemoryRequirements(dev_data->device, image, &image_state->requirements);
9495             lock.lock();
9496         }
9497 
9498         // Validate bound memory range information
9499         auto mem_info = GetMemObjInfo(dev_data, mem);
9500         if (mem_info) {
9501             skip |= ValidateInsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
9502                                                    image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR, api_name);
9503             skip |= ValidateMemoryTypes(dev_data, mem_info, image_state->requirements.memoryTypeBits, api_name,
9504                                         VALIDATION_ERROR_1740082e);
9505         }
9506 
9507         // Validate memory requirements alignment
9508         if (SafeModulo(memoryOffset, image_state->requirements.alignment) != 0) {
9509             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9510                             image_handle, __LINE__, VALIDATION_ERROR_17400830, "DS",
9511                             "%s: memoryOffset is 0x%" PRIxLEAST64
9512                             " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
9513                             ", returned from a call to vkGetImageMemoryRequirements with image. %s",
9514                             api_name, memoryOffset, image_state->requirements.alignment,
9515                             validation_error_map[VALIDATION_ERROR_17400830]);
9516         }
9517 
9518         // Validate memory requirements size
9519         if (mem_info) {
9520             if (image_state->requirements.size > mem_info->alloc_info.allocationSize - memoryOffset) {
9521                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9522                                 image_handle, __LINE__, VALIDATION_ERROR_17400832, "DS",
9523                                 "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
9524                                 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
9525                                 ", returned from a call to vkGetImageMemoryRequirements with image. %s",
9526                                 api_name, mem_info->alloc_info.allocationSize - memoryOffset, image_state->requirements.size,
9527                                 validation_error_map[VALIDATION_ERROR_17400832]);
9528             }
9529         }
9530     }
9531     return skip;
9532 }
9533 
PostCallRecordBindImageMemory(layer_data * dev_data,VkImage image,IMAGE_STATE * image_state,VkDeviceMemory mem,VkDeviceSize memoryOffset,const char * api_name)9534 static void PostCallRecordBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
9535                                           VkDeviceSize memoryOffset, const char *api_name) {
9536     if (image_state) {
9537         unique_lock_t lock(global_lock);
9538         // Track bound memory range information
9539         auto mem_info = GetMemObjInfo(dev_data, mem);
9540         if (mem_info) {
9541             InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
9542                                    image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
9543         }
9544 
9545         // Track objects tied to memory
9546         uint64_t image_handle = HandleToUint64(image);
9547         SetMemBinding(dev_data, mem, image_state, memoryOffset, image_handle, kVulkanObjectTypeImage, api_name);
9548     }
9549 }
9550 
BindImageMemory(VkDevice device,VkImage image,VkDeviceMemory mem,VkDeviceSize memoryOffset)9551 VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
9552     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9553     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9554     IMAGE_STATE *image_state;
9555     {
9556         unique_lock_t lock(global_lock);
9557         image_state = GetImageState(dev_data, image);
9558     }
9559     bool skip = PreCallValidateBindImageMemory(dev_data, image, image_state, mem, memoryOffset, "vkBindImageMemory()");
9560     if (!skip) {
9561         result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
9562         if (result == VK_SUCCESS) {
9563             PostCallRecordBindImageMemory(dev_data, image, image_state, mem, memoryOffset, "vkBindImageMemory()");
9564         }
9565     }
9566     return result;
9567 }
9568 
PreCallValidateBindImageMemory2KHR(layer_data * dev_data,std::vector<IMAGE_STATE * > * image_state,uint32_t bindInfoCount,const VkBindImageMemoryInfoKHR * pBindInfos)9569 static bool PreCallValidateBindImageMemory2KHR(layer_data *dev_data, std::vector<IMAGE_STATE *> *image_state,
9570                                                uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos) {
9571     {
9572         unique_lock_t lock(global_lock);
9573         for (uint32_t i = 0; i < bindInfoCount; i++) {
9574             (*image_state)[i] = GetImageState(dev_data, pBindInfos[i].image);
9575         }
9576     }
9577     bool skip = false;
9578     char api_name[128];
9579     for (uint32_t i = 0; i < bindInfoCount; i++) {
9580         sprintf(api_name, "vkBindImageMemory2KHR() pBindInfos[%u]", i);
9581         skip |= PreCallValidateBindImageMemory(dev_data, pBindInfos[i].image, (*image_state)[i], pBindInfos[i].memory,
9582                                                pBindInfos[i].memoryOffset, api_name);
9583     }
9584     return skip;
9585 }
9586 
PostCallRecordBindImageMemory2KHR(layer_data * dev_data,const std::vector<IMAGE_STATE * > & image_state,uint32_t bindInfoCount,const VkBindImageMemoryInfoKHR * pBindInfos)9587 static void PostCallRecordBindImageMemory2KHR(layer_data *dev_data, const std::vector<IMAGE_STATE *> &image_state,
9588                                               uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos) {
9589     for (uint32_t i = 0; i < bindInfoCount; i++) {
9590         PostCallRecordBindImageMemory(dev_data, pBindInfos[i].image, image_state[i], pBindInfos[i].memory,
9591                                       pBindInfos[i].memoryOffset, "vkBindImageMemory2KHR()");
9592     }
9593 }
9594 
BindImageMemory2KHR(VkDevice device,uint32_t bindInfoCount,const VkBindImageMemoryInfoKHR * pBindInfos)9595 VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount,
9596                                                    const VkBindImageMemoryInfoKHR *pBindInfos) {
9597     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9598     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9599     std::vector<IMAGE_STATE *> image_state(bindInfoCount);
9600     if (!PreCallValidateBindImageMemory2KHR(dev_data, &image_state, bindInfoCount, pBindInfos)) {
9601         result = dev_data->dispatch_table.BindImageMemory2KHR(device, bindInfoCount, pBindInfos);
9602         if (result == VK_SUCCESS) {
9603             PostCallRecordBindImageMemory2KHR(dev_data, image_state, bindInfoCount, pBindInfos);
9604         }
9605     }
9606     return result;
9607 }
9608 
SetEvent(VkDevice device,VkEvent event)9609 VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
9610     bool skip = false;
9611     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9612     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9613     unique_lock_t lock(global_lock);
9614     auto event_state = GetEventNode(dev_data, event);
9615     if (event_state) {
9616         event_state->needsSignaled = false;
9617         event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
9618         if (event_state->write_in_use) {
9619             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
9620                             HandleToUint64(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9621                             "Cannot call vkSetEvent() on event 0x%" PRIx64 " that is already in use by a command buffer.",
9622                             HandleToUint64(event));
9623         }
9624     }
9625     lock.unlock();
9626     // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
9627     // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
9628     // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
9629     for (auto queue_data : dev_data->queueMap) {
9630         auto event_entry = queue_data.second.eventToStageMap.find(event);
9631         if (event_entry != queue_data.second.eventToStageMap.end()) {
9632             event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
9633         }
9634     }
9635     if (!skip) result = dev_data->dispatch_table.SetEvent(device, event);
9636     return result;
9637 }
9638 
PreCallValidateQueueBindSparse(layer_data * dev_data,VkQueue queue,uint32_t bindInfoCount,const VkBindSparseInfo * pBindInfo,VkFence fence)9639 static bool PreCallValidateQueueBindSparse(layer_data *dev_data, VkQueue queue, uint32_t bindInfoCount,
9640                                            const VkBindSparseInfo *pBindInfo, VkFence fence) {
9641     auto pFence = GetFenceNode(dev_data, fence);
9642     bool skip = ValidateFenceForSubmit(dev_data, pFence);
9643     if (skip) {
9644         return true;
9645     }
9646 
9647     unordered_set<VkSemaphore> signaled_semaphores;
9648     unordered_set<VkSemaphore> unsignaled_semaphores;
9649     unordered_set<VkSemaphore> internal_semaphores;
9650     for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
9651         const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
9652 
9653         std::vector<SEMAPHORE_WAIT> semaphore_waits;
9654         std::vector<VkSemaphore> semaphore_signals;
9655         for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
9656             VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
9657             auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
9658             if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
9659                 if (unsignaled_semaphores.count(semaphore) ||
9660                     (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
9661                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9662                                     HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9663                                     "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
9664                                     HandleToUint64(queue), HandleToUint64(semaphore));
9665                 } else {
9666                     signaled_semaphores.erase(semaphore);
9667                     unsignaled_semaphores.insert(semaphore);
9668                 }
9669             }
9670             if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) {
9671                 internal_semaphores.insert(semaphore);
9672             }
9673         }
9674         for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
9675             VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
9676             auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
9677             if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) {
9678                 if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
9679                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9680                                     HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9681                                     "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
9682                                     " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
9683                                     HandleToUint64(queue), HandleToUint64(semaphore), HandleToUint64(pSemaphore->signaler.first));
9684                 } else {
9685                     unsignaled_semaphores.erase(semaphore);
9686                     signaled_semaphores.insert(semaphore);
9687                 }
9688             }
9689         }
9690         // Store sparse binding image_state and after binding is complete make sure that any requiring metadata have it bound
9691         std::unordered_set<IMAGE_STATE *> sparse_images;
9692         // If we're binding sparse image memory make sure reqs were queried and note if metadata is required and bound
9693         for (uint32_t i = 0; i < bindInfo.imageBindCount; ++i) {
9694             const auto &opaque_bind = bindInfo.pImageOpaqueBinds[i];
9695             auto image_state = GetImageState(dev_data, opaque_bind.image);
9696             sparse_images.insert(image_state);
9697             if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) {
9698                 // For now just warning if sparse image binding occurs without calling to get reqs first
9699                 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9700                                HandleToUint64(image_state->image), __LINE__, MEMTRACK_INVALID_STATE, "CV",
9701                                "vkQueueBindSparse(): Binding sparse memory to image 0x%" PRIx64
9702                                " without first calling vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.",
9703                                HandleToUint64(image_state->image));
9704             }
9705             for (uint32_t j = 0; j < opaque_bind.bindCount; ++j) {
9706                 if (opaque_bind.pBinds[j].flags & VK_IMAGE_ASPECT_METADATA_BIT) {
9707                     image_state->sparse_metadata_bound = true;
9708                 }
9709             }
9710         }
9711         for (uint32_t i = 0; i < bindInfo.imageOpaqueBindCount; ++i) {
9712             auto image_state = GetImageState(dev_data, bindInfo.pImageOpaqueBinds[i].image);
9713             sparse_images.insert(image_state);
9714             if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) {
9715                 // For now just warning if sparse image binding occurs without calling to get reqs first
9716                 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9717                                HandleToUint64(image_state->image), __LINE__, MEMTRACK_INVALID_STATE, "CV",
9718                                "vkQueueBindSparse(): Binding opaque sparse memory to image 0x%" PRIx64
9719                                " without first calling vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.",
9720                                HandleToUint64(image_state->image));
9721             }
9722         }
9723         for (const auto &sparse_image_state : sparse_images) {
9724             if (sparse_image_state->sparse_metadata_required && !sparse_image_state->sparse_metadata_bound) {
9725                 // Warn if sparse image binding metadata required for image with sparse binding, but metadata not bound
9726                 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9727                                HandleToUint64(sparse_image_state->image), __LINE__, MEMTRACK_INVALID_STATE, "CV",
9728                                "vkQueueBindSparse(): Binding sparse memory to image 0x%" PRIx64
9729                                " which requires a metadata aspect but no binding with VK_IMAGE_ASPECT_METADATA_BIT set was made.",
9730                                HandleToUint64(sparse_image_state->image));
9731             }
9732         }
9733     }
9734 
9735     return skip;
9736 }
PostCallRecordQueueBindSparse(layer_data * dev_data,VkQueue queue,uint32_t bindInfoCount,const VkBindSparseInfo * pBindInfo,VkFence fence)9737 static void PostCallRecordQueueBindSparse(layer_data *dev_data, VkQueue queue, uint32_t bindInfoCount,
9738                                           const VkBindSparseInfo *pBindInfo, VkFence fence) {
9739     uint64_t early_retire_seq = 0;
9740     auto pFence = GetFenceNode(dev_data, fence);
9741     auto pQueue = GetQueueState(dev_data, queue);
9742 
9743     if (pFence) {
9744         if (pFence->scope == kSyncScopeInternal) {
9745             SubmitFence(pQueue, pFence, std::max(1u, bindInfoCount));
9746             if (!bindInfoCount) {
9747                 // No work to do, just dropping a fence in the queue by itself.
9748                 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(),
9749                                                  std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence);
9750             }
9751         } else {
9752             // Retire work up until this fence early, we will not see the wait that corresponds to this signal
9753             early_retire_seq = pQueue->seq + pQueue->submissions.size();
9754             if (!dev_data->external_sync_warning) {
9755                 dev_data->external_sync_warning = true;
9756                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
9757                         HandleToUint64(fence), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9758                         "vkQueueBindSparse(): Signaling external fence 0x%" PRIx64 " on queue 0x%" PRIx64
9759                         " will disable validation of preceding command buffer lifecycle states and the in-use status of associated "
9760                         "objects.",
9761                         HandleToUint64(fence), HandleToUint64(queue));
9762             }
9763         }
9764     }
9765 
9766     for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
9767         const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
9768         // Track objects tied to memory
9769         for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
9770             for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
9771                 auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
9772                 SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
9773                                     HandleToUint64(bindInfo.pBufferBinds[j].buffer), kVulkanObjectTypeBuffer);
9774             }
9775         }
9776         for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
9777             for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
9778                 auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
9779                 SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
9780                                     HandleToUint64(bindInfo.pImageOpaqueBinds[j].image), kVulkanObjectTypeImage);
9781             }
9782         }
9783         for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
9784             for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
9785                 auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
9786                 // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
9787                 VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
9788                 SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
9789                                     HandleToUint64(bindInfo.pImageBinds[j].image), kVulkanObjectTypeImage);
9790             }
9791         }
9792 
9793         std::vector<SEMAPHORE_WAIT> semaphore_waits;
9794         std::vector<VkSemaphore> semaphore_signals;
9795         std::vector<VkSemaphore> semaphore_externals;
9796         for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
9797             VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
9798             auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
9799             if (pSemaphore) {
9800                 if (pSemaphore->scope == kSyncScopeInternal) {
9801                     if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
9802                         semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
9803                         pSemaphore->in_use.fetch_add(1);
9804                     }
9805                     pSemaphore->signaler.first = VK_NULL_HANDLE;
9806                     pSemaphore->signaled = false;
9807                 } else {
9808                     semaphore_externals.push_back(semaphore);
9809                     pSemaphore->in_use.fetch_add(1);
9810                     if (pSemaphore->scope == kSyncScopeExternalTemporary) {
9811                         pSemaphore->scope = kSyncScopeInternal;
9812                     }
9813                 }
9814             }
9815         }
9816         for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
9817             VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
9818             auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
9819             if (pSemaphore) {
9820                 if (pSemaphore->scope == kSyncScopeInternal) {
9821                     pSemaphore->signaler.first = queue;
9822                     pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
9823                     pSemaphore->signaled = true;
9824                     pSemaphore->in_use.fetch_add(1);
9825                     semaphore_signals.push_back(semaphore);
9826                 } else {
9827                     // Retire work up until this submit early, we will not see the wait that corresponds to this signal
9828                     early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1);
9829                     if (!dev_data->external_sync_warning) {
9830                         dev_data->external_sync_warning = true;
9831                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9832                                 HandleToUint64(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9833                                 "vkQueueBindSparse(): Signaling external semaphore 0x%" PRIx64 " on queue 0x%" PRIx64
9834                                 " will disable validation of preceding command buffer lifecycle states and the in-use status of "
9835                                 "associated objects.",
9836                                 HandleToUint64(semaphore), HandleToUint64(queue));
9837                     }
9838                 }
9839             }
9840         }
9841 
9842         pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), semaphore_waits, semaphore_signals, semaphore_externals,
9843                                          bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
9844     }
9845 
9846     if (early_retire_seq) {
9847         RetireWorkOnQueue(dev_data, pQueue, early_retire_seq);
9848     }
9849 }
9850 
QueueBindSparse(VkQueue queue,uint32_t bindInfoCount,const VkBindSparseInfo * pBindInfo,VkFence fence)9851 VKAPI_ATTR VkResult VKAPI_CALL QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
9852                                                VkFence fence) {
9853     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
9854     unique_lock_t lock(global_lock);
9855     bool skip = PreCallValidateQueueBindSparse(dev_data, queue, bindInfoCount, pBindInfo, fence);
9856     lock.unlock();
9857 
9858     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
9859 
9860     VkResult result = dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
9861 
9862     lock.lock();
9863     PostCallRecordQueueBindSparse(dev_data, queue, bindInfoCount, pBindInfo, fence);
9864     lock.unlock();
9865     return result;
9866 }
9867 
CreateSemaphore(VkDevice device,const VkSemaphoreCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSemaphore * pSemaphore)9868 VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
9869                                                const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
9870     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9871     VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
9872     if (result == VK_SUCCESS) {
9873         lock_guard_t lock(global_lock);
9874         SEMAPHORE_NODE *sNode = &dev_data->semaphoreMap[*pSemaphore];
9875         sNode->signaler.first = VK_NULL_HANDLE;
9876         sNode->signaler.second = 0;
9877         sNode->signaled = false;
9878         sNode->scope = kSyncScopeInternal;
9879     }
9880     return result;
9881 }
9882 
PreCallValidateImportSemaphore(layer_data * dev_data,VkSemaphore semaphore,const char * caller_name)9883 static bool PreCallValidateImportSemaphore(layer_data *dev_data, VkSemaphore semaphore, const char *caller_name) {
9884     SEMAPHORE_NODE *sema_node = GetSemaphoreNode(dev_data, semaphore);
9885     VK_OBJECT obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
9886     bool skip = false;
9887     if (sema_node) {
9888         skip |= ValidateObjectNotInUse(dev_data, sema_node, obj_struct, caller_name, VALIDATION_ERROR_UNDEFINED);
9889     }
9890     return skip;
9891 }
9892 
PostCallRecordImportSemaphore(layer_data * dev_data,VkSemaphore semaphore,VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type,VkSemaphoreImportFlagsKHR flags)9893 static void PostCallRecordImportSemaphore(layer_data *dev_data, VkSemaphore semaphore,
9894                                           VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type, VkSemaphoreImportFlagsKHR flags) {
9895     SEMAPHORE_NODE *sema_node = GetSemaphoreNode(dev_data, semaphore);
9896     if (sema_node && sema_node->scope != kSyncScopeExternalPermanent) {
9897         if ((handle_type == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) &&
9898             sema_node->scope == kSyncScopeInternal) {
9899             sema_node->scope = kSyncScopeExternalTemporary;
9900         } else {
9901             sema_node->scope = kSyncScopeExternalPermanent;
9902         }
9903     }
9904 }
9905 
9906 #ifdef VK_USE_PLATFORM_WIN32_KHR
9907 VKAPI_ATTR VkResult VKAPI_CALL
ImportSemaphoreWin32HandleKHR(VkDevice device,const VkImportSemaphoreWin32HandleInfoKHR * pImportSemaphoreWin32HandleInfo)9908 ImportSemaphoreWin32HandleKHR(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) {
9909     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9910     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9911     bool skip =
9912         PreCallValidateImportSemaphore(dev_data, pImportSemaphoreWin32HandleInfo->semaphore, "vkImportSemaphoreWin32HandleKHR");
9913 
9914     if (!skip) {
9915         result = dev_data->dispatch_table.ImportSemaphoreWin32HandleKHR(device, pImportSemaphoreWin32HandleInfo);
9916     }
9917 
9918     if (result == VK_SUCCESS) {
9919         PostCallRecordImportSemaphore(dev_data, pImportSemaphoreWin32HandleInfo->semaphore,
9920                                       pImportSemaphoreWin32HandleInfo->handleType, pImportSemaphoreWin32HandleInfo->flags);
9921     }
9922     return result;
9923 }
9924 #endif
9925 
ImportSemaphoreFdKHR(VkDevice device,const VkImportSemaphoreFdInfoKHR * pImportSemaphoreFdInfo)9926 VKAPI_ATTR VkResult VKAPI_CALL ImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) {
9927     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9928     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9929     bool skip = PreCallValidateImportSemaphore(dev_data, pImportSemaphoreFdInfo->semaphore, "vkImportSemaphoreFdKHR");
9930 
9931     if (!skip) {
9932         result = dev_data->dispatch_table.ImportSemaphoreFdKHR(device, pImportSemaphoreFdInfo);
9933     }
9934 
9935     if (result == VK_SUCCESS) {
9936         PostCallRecordImportSemaphore(dev_data, pImportSemaphoreFdInfo->semaphore, pImportSemaphoreFdInfo->handleType,
9937                                       pImportSemaphoreFdInfo->flags);
9938     }
9939     return result;
9940 }
9941 
PostCallRecordGetSemaphore(layer_data * dev_data,VkSemaphore semaphore,VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type)9942 static void PostCallRecordGetSemaphore(layer_data *dev_data, VkSemaphore semaphore,
9943                                        VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type) {
9944     SEMAPHORE_NODE *sema_node = GetSemaphoreNode(dev_data, semaphore);
9945     if (sema_node && handle_type != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
9946         // Cannot track semaphore state once it is exported, except for Sync FD handle types which have copy transference
9947         sema_node->scope = kSyncScopeExternalPermanent;
9948     }
9949 }
9950 
9951 #ifdef VK_USE_PLATFORM_WIN32_KHR
GetSemaphoreWin32HandleKHR(VkDevice device,const VkSemaphoreGetWin32HandleInfoKHR * pGetWin32HandleInfo,HANDLE * pHandle)9952 VKAPI_ATTR VkResult VKAPI_CALL GetSemaphoreWin32HandleKHR(VkDevice device,
9953                                                           const VkSemaphoreGetWin32HandleInfoKHR *pGetWin32HandleInfo,
9954                                                           HANDLE *pHandle) {
9955     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9956     VkResult result = dev_data->dispatch_table.GetSemaphoreWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
9957 
9958     if (result == VK_SUCCESS) {
9959         PostCallRecordGetSemaphore(dev_data, pGetWin32HandleInfo->semaphore, pGetWin32HandleInfo->handleType);
9960     }
9961     return result;
9962 }
9963 #endif
9964 
GetSemaphoreFdKHR(VkDevice device,const VkSemaphoreGetFdInfoKHR * pGetFdInfo,int * pFd)9965 VKAPI_ATTR VkResult VKAPI_CALL GetSemaphoreFdKHR(VkDevice device, const VkSemaphoreGetFdInfoKHR *pGetFdInfo, int *pFd) {
9966     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9967     VkResult result = dev_data->dispatch_table.GetSemaphoreFdKHR(device, pGetFdInfo, pFd);
9968 
9969     if (result == VK_SUCCESS) {
9970         PostCallRecordGetSemaphore(dev_data, pGetFdInfo->semaphore, pGetFdInfo->handleType);
9971     }
9972     return result;
9973 }
9974 
PreCallValidateImportFence(layer_data * dev_data,VkFence fence,const char * caller_name)9975 static bool PreCallValidateImportFence(layer_data *dev_data, VkFence fence, const char *caller_name) {
9976     FENCE_NODE *fence_node = GetFenceNode(dev_data, fence);
9977     bool skip = false;
9978     if (fence_node && fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
9979         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
9980                         HandleToUint64(fence), __LINE__, VALIDATION_ERROR_UNDEFINED, "DS",
9981                         "Cannot call %s on fence 0x%" PRIx64 " that is currently in use.", caller_name, HandleToUint64(fence));
9982     }
9983     return skip;
9984 }
9985 
PostCallRecordImportFence(layer_data * dev_data,VkFence fence,VkExternalFenceHandleTypeFlagBitsKHR handle_type,VkFenceImportFlagsKHR flags)9986 static void PostCallRecordImportFence(layer_data *dev_data, VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type,
9987                                       VkFenceImportFlagsKHR flags) {
9988     FENCE_NODE *fence_node = GetFenceNode(dev_data, fence);
9989     if (fence_node && fence_node->scope != kSyncScopeExternalPermanent) {
9990         if ((handle_type == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_FENCE_IMPORT_TEMPORARY_BIT_KHR) &&
9991             fence_node->scope == kSyncScopeInternal) {
9992             fence_node->scope = kSyncScopeExternalTemporary;
9993         } else {
9994             fence_node->scope = kSyncScopeExternalPermanent;
9995         }
9996     }
9997 }
9998 
9999 #ifdef VK_USE_PLATFORM_WIN32_KHR
ImportFenceWin32HandleKHR(VkDevice device,const VkImportFenceWin32HandleInfoKHR * pImportFenceWin32HandleInfo)10000 VKAPI_ATTR VkResult VKAPI_CALL ImportFenceWin32HandleKHR(VkDevice device,
10001                                                          const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) {
10002     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10003     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10004     bool skip = PreCallValidateImportFence(dev_data, pImportFenceWin32HandleInfo->fence, "vkImportFenceWin32HandleKHR");
10005 
10006     if (!skip) {
10007         result = dev_data->dispatch_table.ImportFenceWin32HandleKHR(device, pImportFenceWin32HandleInfo);
10008     }
10009 
10010     if (result == VK_SUCCESS) {
10011         PostCallRecordImportFence(dev_data, pImportFenceWin32HandleInfo->fence, pImportFenceWin32HandleInfo->handleType,
10012                                   pImportFenceWin32HandleInfo->flags);
10013     }
10014     return result;
10015 }
10016 #endif
10017 
ImportFenceFdKHR(VkDevice device,const VkImportFenceFdInfoKHR * pImportFenceFdInfo)10018 VKAPI_ATTR VkResult VKAPI_CALL ImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) {
10019     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10020     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10021     bool skip = PreCallValidateImportFence(dev_data, pImportFenceFdInfo->fence, "vkImportFenceFdKHR");
10022 
10023     if (!skip) {
10024         result = dev_data->dispatch_table.ImportFenceFdKHR(device, pImportFenceFdInfo);
10025     }
10026 
10027     if (result == VK_SUCCESS) {
10028         PostCallRecordImportFence(dev_data, pImportFenceFdInfo->fence, pImportFenceFdInfo->handleType, pImportFenceFdInfo->flags);
10029     }
10030     return result;
10031 }
10032 
PostCallRecordGetFence(layer_data * dev_data,VkFence fence,VkExternalFenceHandleTypeFlagBitsKHR handle_type)10033 static void PostCallRecordGetFence(layer_data *dev_data, VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type) {
10034     FENCE_NODE *fence_node = GetFenceNode(dev_data, fence);
10035     if (fence_node) {
10036         if (handle_type != VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
10037             // Export with reference transference becomes external
10038             fence_node->scope = kSyncScopeExternalPermanent;
10039         } else if (fence_node->scope == kSyncScopeInternal) {
10040             // Export with copy transference has a side effect of resetting the fence
10041             fence_node->state = FENCE_UNSIGNALED;
10042         }
10043     }
10044 }
10045 
10046 #ifdef VK_USE_PLATFORM_WIN32_KHR
GetFenceWin32HandleKHR(VkDevice device,const VkFenceGetWin32HandleInfoKHR * pGetWin32HandleInfo,HANDLE * pHandle)10047 VKAPI_ATTR VkResult VKAPI_CALL GetFenceWin32HandleKHR(VkDevice device, const VkFenceGetWin32HandleInfoKHR *pGetWin32HandleInfo,
10048                                                       HANDLE *pHandle) {
10049     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10050     VkResult result = dev_data->dispatch_table.GetFenceWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
10051 
10052     if (result == VK_SUCCESS) {
10053         PostCallRecordGetFence(dev_data, pGetWin32HandleInfo->fence, pGetWin32HandleInfo->handleType);
10054     }
10055     return result;
10056 }
10057 #endif
10058 
GetFenceFdKHR(VkDevice device,const VkFenceGetFdInfoKHR * pGetFdInfo,int * pFd)10059 VKAPI_ATTR VkResult VKAPI_CALL GetFenceFdKHR(VkDevice device, const VkFenceGetFdInfoKHR *pGetFdInfo, int *pFd) {
10060     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10061     VkResult result = dev_data->dispatch_table.GetFenceFdKHR(device, pGetFdInfo, pFd);
10062 
10063     if (result == VK_SUCCESS) {
10064         PostCallRecordGetFence(dev_data, pGetFdInfo->fence, pGetFdInfo->handleType);
10065     }
10066     return result;
10067 }
10068 
CreateEvent(VkDevice device,const VkEventCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkEvent * pEvent)10069 VKAPI_ATTR VkResult VKAPI_CALL CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
10070                                            const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
10071     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10072     VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
10073     if (result == VK_SUCCESS) {
10074         lock_guard_t lock(global_lock);
10075         dev_data->eventMap[*pEvent].needsSignaled = false;
10076         dev_data->eventMap[*pEvent].write_in_use = 0;
10077         dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
10078     }
10079     return result;
10080 }
10081 
PreCallValidateCreateSwapchainKHR(layer_data * dev_data,const char * func_name,VkSwapchainCreateInfoKHR const * pCreateInfo,SURFACE_STATE * surface_state,SWAPCHAIN_NODE * old_swapchain_state)10082 static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, const char *func_name,
10083                                               VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state,
10084                                               SWAPCHAIN_NODE *old_swapchain_state) {
10085     auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
10086 
10087     // TODO: revisit this. some of these rules are being relaxed.
10088 
10089     // All physical devices and queue families are required to be able
10090     // to present to any native window on Android; require the
10091     // application to have established support on any other platform.
10092     if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
10093         auto support_predicate = [dev_data](decltype(surface_state->gpu_queue_support)::value_type qs) -> bool {
10094             // TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device
10095             return (qs.first.gpu == dev_data->physical_device) && qs.second;
10096         };
10097         const auto &support = surface_state->gpu_queue_support;
10098         bool is_supported = std::any_of(support.begin(), support.end(), support_predicate);
10099 
10100         if (!is_supported) {
10101             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10102                         HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009ec, "DS",
10103                         "%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. The "
10104                         "vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support with "
10105                         "this surface for at least one queue family of this device. %s",
10106                         func_name, validation_error_map[VALIDATION_ERROR_146009ec]))
10107                 return true;
10108         }
10109     }
10110 
10111     if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
10112         if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10113                     HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS, "DS",
10114                     "%s: surface has an existing swapchain other than oldSwapchain", func_name))
10115             return true;
10116     }
10117     if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
10118         if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10119                     HandleToUint64(pCreateInfo->oldSwapchain), __LINE__, DRAWSTATE_SWAPCHAIN_WRONG_SURFACE, "DS",
10120                     "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
10121             return true;
10122     }
10123     auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
10124     if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
10125         if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
10126                     HandleToUint64(dev_data->physical_device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
10127                     "%s: surface capabilities not retrieved for this physical device", func_name))
10128             return true;
10129     } else {  // have valid capabilities
10130         auto &capabilities = physical_device_state->surfaceCapabilities;
10131         // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
10132         if (pCreateInfo->minImageCount < capabilities.minImageCount) {
10133             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10134                         HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009ee, "DS",
10135                         "%s called with minImageCount = %d, which is outside the bounds returned by "
10136                         "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
10137                         func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
10138                         validation_error_map[VALIDATION_ERROR_146009ee]))
10139                 return true;
10140         }
10141 
10142         if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
10143             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10144                         HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f0, "DS",
10145                         "%s called with minImageCount = %d, which is outside the bounds returned by "
10146                         "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
10147                         func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
10148                         validation_error_map[VALIDATION_ERROR_146009f0]))
10149                 return true;
10150         }
10151 
10152         // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
10153         if ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
10154             (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
10155             (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
10156             (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height)) {
10157             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10158                         HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f4, "DS",
10159                         "%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
10160                         "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
10161                         "maxImageExtent = (%d,%d). %s",
10162                         func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
10163                         capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width,
10164                         capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height,
10165                         validation_error_map[VALIDATION_ERROR_146009f4]))
10166                 return true;
10167         }
10168         // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
10169         // VkSurfaceCapabilitiesKHR::supportedTransforms.
10170         if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
10171             !(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
10172             // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
10173             // it up a little at a time, and then log it:
10174             std::string errorString = "";
10175             char str[1024];
10176             // Here's the first part of the message:
10177             sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s).  Supported values are:\n", func_name,
10178                     string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
10179             errorString += str;
10180             for (int i = 0; i < 32; i++) {
10181                 // Build up the rest of the message:
10182                 if ((1 << i) & capabilities.supportedTransforms) {
10183                     const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
10184                     sprintf(str, "    %s\n", newStr);
10185                     errorString += str;
10186                 }
10187             }
10188             // Log the message that we've built up:
10189             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10190                         HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009fe, "DS", "%s. %s", errorString.c_str(),
10191                         validation_error_map[VALIDATION_ERROR_146009fe]))
10192                 return true;
10193         }
10194 
10195         // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
10196         // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
10197         if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
10198             !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
10199             // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
10200             // it up a little at a time, and then log it:
10201             std::string errorString = "";
10202             char str[1024];
10203             // Here's the first part of the message:
10204             sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s).  Supported values are:\n",
10205                     func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
10206             errorString += str;
10207             for (int i = 0; i < 32; i++) {
10208                 // Build up the rest of the message:
10209                 if ((1 << i) & capabilities.supportedCompositeAlpha) {
10210                     const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
10211                     sprintf(str, "    %s\n", newStr);
10212                     errorString += str;
10213                 }
10214             }
10215             // Log the message that we've built up:
10216             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10217                         HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_14600a00, "DS", "%s. %s", errorString.c_str(),
10218                         validation_error_map[VALIDATION_ERROR_14600a00]))
10219                 return true;
10220         }
10221         // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
10222         if (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers) {
10223             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10224                         HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f6, "DS",
10225                         "%s called with a non-supported imageArrayLayers (i.e. %d).  Maximum value is %d. %s", func_name,
10226                         pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers,
10227                         validation_error_map[VALIDATION_ERROR_146009f6]))
10228                 return true;
10229         }
10230         // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
10231         if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
10232             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10233                         HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f8, "DS",
10234                         "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x).  Supported flag bits are 0x%08x. %s",
10235                         func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags,
10236                         validation_error_map[VALIDATION_ERROR_146009f8]))
10237                 return true;
10238         }
10239     }
10240 
10241     // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
10242     if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
10243         if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10244                     HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
10245                     "%s called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name))
10246             return true;
10247     } else {
10248         // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
10249         bool foundFormat = false;
10250         bool foundColorSpace = false;
10251         bool foundMatch = false;
10252         for (auto const &format : physical_device_state->surface_formats) {
10253             if (pCreateInfo->imageFormat == format.format) {
10254                 // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
10255                 foundFormat = true;
10256                 if (pCreateInfo->imageColorSpace == format.colorSpace) {
10257                     foundMatch = true;
10258                     break;
10259                 }
10260             } else {
10261                 if (pCreateInfo->imageColorSpace == format.colorSpace) {
10262                     foundColorSpace = true;
10263                 }
10264             }
10265         }
10266         if (!foundMatch) {
10267             if (!foundFormat) {
10268                 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10269                             HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f2, "DS",
10270                             "%s called with a non-supported pCreateInfo->imageFormat (i.e. %d). %s", func_name,
10271                             pCreateInfo->imageFormat, validation_error_map[VALIDATION_ERROR_146009f2]))
10272                     return true;
10273             }
10274             if (!foundColorSpace) {
10275                 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10276                             HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_146009f2, "DS",
10277                             "%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d). %s", func_name,
10278                             pCreateInfo->imageColorSpace, validation_error_map[VALIDATION_ERROR_146009f2]))
10279                     return true;
10280             }
10281         }
10282     }
10283 
10284     // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
10285     if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
10286         // FIFO is required to always be supported
10287         if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) {
10288             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10289                         HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
10290                         "%s called before calling vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name))
10291                 return true;
10292         }
10293     } else {
10294         // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
10295         bool foundMatch = std::find(physical_device_state->present_modes.begin(), physical_device_state->present_modes.end(),
10296                                     pCreateInfo->presentMode) != physical_device_state->present_modes.end();
10297         if (!foundMatch) {
10298             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10299                         HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_14600a02, "DS",
10300                         "%s called with a non-supported presentMode (i.e. %s). %s", func_name,
10301                         string_VkPresentModeKHR(pCreateInfo->presentMode), validation_error_map[VALIDATION_ERROR_14600a02]))
10302                 return true;
10303         }
10304     }
10305     // Validate state for shared presentable case
10306     if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
10307         VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
10308         if (!dev_data->extensions.vk_khr_shared_presentable_image) {
10309             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10310                         HandleToUint64(dev_data->device), __LINE__, DRAWSTATE_EXTENSION_NOT_ENABLED, "DS",
10311                         "%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not "
10312                         "been enabled.",
10313                         func_name, string_VkPresentModeKHR(pCreateInfo->presentMode)))
10314                 return true;
10315         } else if (pCreateInfo->minImageCount != 1) {
10316             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10317                         HandleToUint64(dev_data->device), __LINE__, VALIDATION_ERROR_14600ace, "DS",
10318                         "%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount "
10319                         "must be 1. %s",
10320                         func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount,
10321                         validation_error_map[VALIDATION_ERROR_14600ace]))
10322                 return true;
10323         }
10324     }
10325 
10326     return false;
10327 }
10328 
PostCallRecordCreateSwapchainKHR(layer_data * dev_data,VkResult result,const VkSwapchainCreateInfoKHR * pCreateInfo,VkSwapchainKHR * pSwapchain,SURFACE_STATE * surface_state,SWAPCHAIN_NODE * old_swapchain_state)10329 static void PostCallRecordCreateSwapchainKHR(layer_data *dev_data, VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo,
10330                                              VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state,
10331                                              SWAPCHAIN_NODE *old_swapchain_state) {
10332     if (VK_SUCCESS == result) {
10333         lock_guard_t lock(global_lock);
10334         auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
10335         if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
10336             VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
10337             swapchain_state->shared_presentable = true;
10338         }
10339         surface_state->swapchain = swapchain_state.get();
10340         dev_data->swapchainMap[*pSwapchain] = std::move(swapchain_state);
10341     } else {
10342         surface_state->swapchain = nullptr;
10343     }
10344     // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
10345     if (old_swapchain_state) {
10346         old_swapchain_state->replaced = true;
10347     }
10348     surface_state->old_swapchain = old_swapchain_state;
10349     return;
10350 }
10351 
CreateSwapchainKHR(VkDevice device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchain)10352 VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
10353                                                   const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
10354     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10355     auto surface_state = GetSurfaceState(dev_data->instance_data, pCreateInfo->surface);
10356     auto old_swapchain_state = GetSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
10357 
10358     if (PreCallValidateCreateSwapchainKHR(dev_data, "vkCreateSwapChainKHR()", pCreateInfo, surface_state, old_swapchain_state)) {
10359         return VK_ERROR_VALIDATION_FAILED_EXT;
10360     }
10361 
10362     VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
10363 
10364     PostCallRecordCreateSwapchainKHR(dev_data, result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
10365 
10366     return result;
10367 }
10368 
DestroySwapchainKHR(VkDevice device,VkSwapchainKHR swapchain,const VkAllocationCallbacks * pAllocator)10369 VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
10370     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10371     bool skip = false;
10372 
10373     unique_lock_t lock(global_lock);
10374     auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
10375     if (swapchain_data) {
10376         if (swapchain_data->images.size() > 0) {
10377             for (auto swapchain_image : swapchain_data->images) {
10378                 auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
10379                 if (image_sub != dev_data->imageSubresourceMap.end()) {
10380                     for (auto imgsubpair : image_sub->second) {
10381                         auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
10382                         if (image_item != dev_data->imageLayoutMap.end()) {
10383                             dev_data->imageLayoutMap.erase(image_item);
10384                         }
10385                     }
10386                     dev_data->imageSubresourceMap.erase(image_sub);
10387                 }
10388                 skip = ClearMemoryObjectBindings(dev_data, HandleToUint64(swapchain_image), kVulkanObjectTypeSwapchainKHR);
10389                 dev_data->imageMap.erase(swapchain_image);
10390             }
10391         }
10392 
10393         auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
10394         if (surface_state) {
10395             if (surface_state->swapchain == swapchain_data) surface_state->swapchain = nullptr;
10396             if (surface_state->old_swapchain == swapchain_data) surface_state->old_swapchain = nullptr;
10397         }
10398 
10399         dev_data->swapchainMap.erase(swapchain);
10400     }
10401     lock.unlock();
10402     if (!skip) dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
10403 }
10404 
PreCallValidateGetSwapchainImagesKHR(layer_data * device_data,SWAPCHAIN_NODE * swapchain_state,VkDevice device,uint32_t * pSwapchainImageCount,VkImage * pSwapchainImages)10405 static bool PreCallValidateGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
10406                                                  uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
10407     bool skip = false;
10408     if (swapchain_state && pSwapchainImages) {
10409         lock_guard_t lock(global_lock);
10410         // Compare the preliminary value of *pSwapchainImageCount with the value this time:
10411         if (swapchain_state->vkGetSwapchainImagesKHRState == UNCALLED) {
10412             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10413                             HandleToUint64(device), __LINE__, SWAPCHAIN_PRIOR_COUNT, "DS",
10414                             "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount; but no prior positive value has "
10415                             "been seen for pSwapchainImages.");
10416         } else if (*pSwapchainImageCount > swapchain_state->get_swapchain_image_count) {
10417             skip |=
10418                 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10419                         HandleToUint64(device), __LINE__, SWAPCHAIN_INVALID_COUNT, "DS",
10420                         "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount, and with pSwapchainImages set to a "
10421                         "value (%d) that is greater than the value (%d) that was returned when pSwapchainImageCount was NULL.",
10422                         *pSwapchainImageCount, swapchain_state->get_swapchain_image_count);
10423         }
10424     }
10425     return skip;
10426 }
10427 
PostCallRecordGetSwapchainImagesKHR(layer_data * device_data,SWAPCHAIN_NODE * swapchain_state,VkDevice device,uint32_t * pSwapchainImageCount,VkImage * pSwapchainImages)10428 static void PostCallRecordGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
10429                                                 uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
10430     lock_guard_t lock(global_lock);
10431 
10432     if (*pSwapchainImageCount > swapchain_state->images.size()) swapchain_state->images.resize(*pSwapchainImageCount);
10433 
10434     if (pSwapchainImages) {
10435         if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_DETAILS) {
10436             swapchain_state->vkGetSwapchainImagesKHRState = QUERY_DETAILS;
10437         }
10438         for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) {
10439             if (swapchain_state->images[i] != VK_NULL_HANDLE) continue;  // Already retrieved this.
10440 
10441             IMAGE_LAYOUT_NODE image_layout_node;
10442             image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10443             image_layout_node.format = swapchain_state->createInfo.imageFormat;
10444             // Add imageMap entries for each swapchain image
10445             VkImageCreateInfo image_ci = {};
10446             image_ci.flags = 0;
10447             image_ci.imageType = VK_IMAGE_TYPE_2D;
10448             image_ci.format = swapchain_state->createInfo.imageFormat;
10449             image_ci.extent.width = swapchain_state->createInfo.imageExtent.width;
10450             image_ci.extent.height = swapchain_state->createInfo.imageExtent.height;
10451             image_ci.extent.depth = 1;
10452             image_ci.mipLevels = 1;
10453             image_ci.arrayLayers = swapchain_state->createInfo.imageArrayLayers;
10454             image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
10455             image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
10456             image_ci.usage = swapchain_state->createInfo.imageUsage;
10457             image_ci.sharingMode = swapchain_state->createInfo.imageSharingMode;
10458             device_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
10459             auto &image_state = device_data->imageMap[pSwapchainImages[i]];
10460             image_state->valid = false;
10461             image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
10462             swapchain_state->images[i] = pSwapchainImages[i];
10463             ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10464             device_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10465             device_data->imageLayoutMap[subpair] = image_layout_node;
10466         }
10467     }
10468 
10469     if (*pSwapchainImageCount) {
10470         if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_COUNT) {
10471             swapchain_state->vkGetSwapchainImagesKHRState = QUERY_COUNT;
10472         }
10473         swapchain_state->get_swapchain_image_count = *pSwapchainImageCount;
10474     }
10475 }
10476 
GetSwapchainImagesKHR(VkDevice device,VkSwapchainKHR swapchain,uint32_t * pSwapchainImageCount,VkImage * pSwapchainImages)10477 VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
10478                                                      VkImage *pSwapchainImages) {
10479     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10480     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10481 
10482     auto swapchain_state = GetSwapchainNode(device_data, swapchain);
10483     bool skip = PreCallValidateGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
10484 
10485     if (!skip) {
10486         result = device_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
10487     }
10488 
10489     if ((result == VK_SUCCESS || result == VK_INCOMPLETE)) {
10490         PostCallRecordGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
10491     }
10492     return result;
10493 }
10494 
QueuePresentKHR(VkQueue queue,const VkPresentInfoKHR * pPresentInfo)10495 VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10496     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
10497     bool skip = false;
10498 
10499     lock_guard_t lock(global_lock);
10500     auto queue_state = GetQueueState(dev_data, queue);
10501 
10502     for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10503         auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
10504         if (pSemaphore && !pSemaphore->signaled) {
10505             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10506                             __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10507                             "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
10508                             HandleToUint64(queue), HandleToUint64(pPresentInfo->pWaitSemaphores[i]));
10509         }
10510     }
10511 
10512     for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10513         auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
10514         if (swapchain_data) {
10515             if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
10516                 skip |=
10517                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10518                             HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
10519                             "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
10520                             pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
10521             } else {
10522                 auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
10523                 auto image_state = GetImageState(dev_data, image);
10524 
10525                 if (image_state->shared_presentable) {
10526                     image_state->layout_locked = true;
10527                 }
10528 
10529                 skip |= ValidateImageMemoryIsValid(dev_data, image_state, "vkQueuePresentKHR()");
10530 
10531                 if (!image_state->acquired) {
10532                     skip |= log_msg(
10533                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10534                         HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED, "DS",
10535                         "vkQueuePresentKHR: Swapchain image index %u has not been acquired.", pPresentInfo->pImageIndices[i]);
10536                 }
10537 
10538                 vector<VkImageLayout> layouts;
10539                 if (FindLayouts(dev_data, image, layouts)) {
10540                     for (auto layout : layouts) {
10541                         if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) && (!dev_data->extensions.vk_khr_shared_presentable_image ||
10542                                                                             (layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) {
10543                             skip |=
10544                                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
10545                                         HandleToUint64(queue), __LINE__, VALIDATION_ERROR_11200a20, "DS",
10546                                         "Images passed to present must be in layout VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or "
10547                                         "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s. %s",
10548                                         string_VkImageLayout(layout), validation_error_map[VALIDATION_ERROR_11200a20]);
10549                         }
10550                     }
10551                 }
10552             }
10553 
10554             // All physical devices and queue families are required to be able
10555             // to present to any native window on Android; require the
10556             // application to have established support on any other platform.
10557             if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
10558                 auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
10559                 auto support_it = surface_state->gpu_queue_support.find({dev_data->physical_device, queue_state->queueFamilyIndex});
10560 
10561                 if (support_it == surface_state->gpu_queue_support.end()) {
10562                     skip |=
10563                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10564                                 HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_UNSUPPORTED_QUEUE, "DS",
10565                                 "vkQueuePresentKHR: Presenting image without calling vkGetPhysicalDeviceSurfaceSupportKHR");
10566                 } else if (!support_it->second) {
10567                     skip |=
10568                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10569                                 HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, VALIDATION_ERROR_31800a18, "DS",
10570                                 "vkQueuePresentKHR: Presenting image on queue that cannot present to this surface. %s",
10571                                 validation_error_map[VALIDATION_ERROR_31800a18]);
10572                 }
10573             }
10574         }
10575     }
10576     if (pPresentInfo && pPresentInfo->pNext) {
10577         // Verify ext struct
10578         const auto *present_regions = lvl_find_in_chain<VkPresentRegionsKHR>(pPresentInfo->pNext);
10579         if (present_regions) {
10580             for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) {
10581                 auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
10582                 assert(swapchain_data);
10583                 VkPresentRegionKHR region = present_regions->pRegions[i];
10584                 for (uint32_t j = 0; j < region.rectangleCount; ++j) {
10585                     VkRectLayerKHR rect = region.pRectangles[j];
10586                     // TODO: Need to update these errors to their unique error ids when available
10587                     if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) {
10588                         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10589                                         VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]),
10590                                         __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
10591                                         "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
10592                                         "pRegion[%i].pRectangles[%i], the sum of offset.x (%i) and extent.width (%i) is greater "
10593                                         "than the corresponding swapchain's imageExtent.width (%i).",
10594                                         i, j, rect.offset.x, rect.extent.width, swapchain_data->createInfo.imageExtent.width);
10595                     }
10596                     if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) {
10597                         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10598                                         VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]),
10599                                         __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
10600                                         "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
10601                                         "pRegion[%i].pRectangles[%i], the sum of offset.y (%i) and extent.height (%i) is greater "
10602                                         "than the corresponding swapchain's imageExtent.height (%i).",
10603                                         i, j, rect.offset.y, rect.extent.height, swapchain_data->createInfo.imageExtent.height);
10604                     }
10605                     if (rect.layer > swapchain_data->createInfo.imageArrayLayers) {
10606                         skip |= log_msg(
10607                             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10608                             HandleToUint64(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE, "DS",
10609                             "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the layer "
10610                             "(%i) is greater than the corresponding swapchain's imageArrayLayers (%i).",
10611                             i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers);
10612                     }
10613                 }
10614             }
10615         }
10616 
10617         const auto *present_times_info = lvl_find_in_chain<VkPresentTimesInfoGOOGLE>(pPresentInfo->pNext);
10618         if (present_times_info) {
10619             if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) {
10620                 skip |=
10621                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10622                             HandleToUint64(pPresentInfo->pSwapchains[0]), __LINE__,
10623 
10624                             VALIDATION_ERROR_118009be, "DS",
10625                             "vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but pPresentInfo->swapchainCount "
10626                             "is %i. For VkPresentTimesInfoGOOGLE down pNext chain of VkPresentInfoKHR, "
10627                             "VkPresentTimesInfoGOOGLE.swapchainCount must equal VkPresentInfoKHR.swapchainCount.",
10628                             present_times_info->swapchainCount, pPresentInfo->swapchainCount);
10629             }
10630         }
10631     }
10632 
10633     if (skip) {
10634         return VK_ERROR_VALIDATION_FAILED_EXT;
10635     }
10636 
10637     VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
10638 
10639     if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
10640         // Semaphore waits occur before error generation, if the call reached
10641         // the ICD. (Confirm?)
10642         for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10643             auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
10644             if (pSemaphore) {
10645                 pSemaphore->signaler.first = VK_NULL_HANDLE;
10646                 pSemaphore->signaled = false;
10647             }
10648         }
10649 
10650         for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10651             // Note: this is imperfect, in that we can get confused about what
10652             // did or didn't succeed-- but if the app does that, it's confused
10653             // itself just as much.
10654             auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
10655 
10656             if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR) continue;  // this present didn't actually happen.
10657 
10658             // Mark the image as having been released to the WSI
10659             auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
10660             auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
10661             auto image_state = GetImageState(dev_data, image);
10662             image_state->acquired = false;
10663         }
10664 
10665         // Note: even though presentation is directed to a queue, there is no
10666         // direct ordering between QP and subsequent work, so QP (and its
10667         // semaphore waits) /never/ participate in any completion proof.
10668     }
10669 
10670     return result;
10671 }
10672 
PreCallValidateCreateSharedSwapchainsKHR(layer_data * dev_data,uint32_t swapchainCount,const VkSwapchainCreateInfoKHR * pCreateInfos,VkSwapchainKHR * pSwapchains,std::vector<SURFACE_STATE * > & surface_state,std::vector<SWAPCHAIN_NODE * > & old_swapchain_state)10673 static bool PreCallValidateCreateSharedSwapchainsKHR(layer_data *dev_data, uint32_t swapchainCount,
10674                                                      const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
10675                                                      std::vector<SURFACE_STATE *> &surface_state,
10676                                                      std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
10677     if (pCreateInfos) {
10678         lock_guard_t lock(global_lock);
10679         for (uint32_t i = 0; i < swapchainCount; i++) {
10680             surface_state.push_back(GetSurfaceState(dev_data->instance_data, pCreateInfos[i].surface));
10681             old_swapchain_state.push_back(GetSwapchainNode(dev_data, pCreateInfos[i].oldSwapchain));
10682             std::stringstream func_name;
10683             func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]";
10684             if (PreCallValidateCreateSwapchainKHR(dev_data, func_name.str().c_str(), &pCreateInfos[i], surface_state[i],
10685                                                   old_swapchain_state[i])) {
10686                 return true;
10687             }
10688         }
10689     }
10690     return false;
10691 }
10692 
PostCallRecordCreateSharedSwapchainsKHR(layer_data * dev_data,VkResult result,uint32_t swapchainCount,const VkSwapchainCreateInfoKHR * pCreateInfos,VkSwapchainKHR * pSwapchains,std::vector<SURFACE_STATE * > & surface_state,std::vector<SWAPCHAIN_NODE * > & old_swapchain_state)10693 static void PostCallRecordCreateSharedSwapchainsKHR(layer_data *dev_data, VkResult result, uint32_t swapchainCount,
10694                                                     const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
10695                                                     std::vector<SURFACE_STATE *> &surface_state,
10696                                                     std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
10697     if (VK_SUCCESS == result) {
10698         for (uint32_t i = 0; i < swapchainCount; i++) {
10699             auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(&pCreateInfos[i], pSwapchains[i]));
10700             if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfos[i].presentMode ||
10701                 VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfos[i].presentMode) {
10702                 swapchain_state->shared_presentable = true;
10703             }
10704             surface_state[i]->swapchain = swapchain_state.get();
10705             dev_data->swapchainMap[pSwapchains[i]] = std::move(swapchain_state);
10706         }
10707     } else {
10708         for (uint32_t i = 0; i < swapchainCount; i++) {
10709             surface_state[i]->swapchain = nullptr;
10710         }
10711     }
10712     // Spec requires that even if CreateSharedSwapchainKHR fails, oldSwapchain behaves as replaced.
10713     for (uint32_t i = 0; i < swapchainCount; i++) {
10714         if (old_swapchain_state[i]) {
10715             old_swapchain_state[i]->replaced = true;
10716         }
10717         surface_state[i]->old_swapchain = old_swapchain_state[i];
10718     }
10719     return;
10720 }
10721 
CreateSharedSwapchainsKHR(VkDevice device,uint32_t swapchainCount,const VkSwapchainCreateInfoKHR * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchains)10722 VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
10723                                                          const VkSwapchainCreateInfoKHR *pCreateInfos,
10724                                                          const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
10725     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10726     std::vector<SURFACE_STATE *> surface_state;
10727     std::vector<SWAPCHAIN_NODE *> old_swapchain_state;
10728 
10729     if (PreCallValidateCreateSharedSwapchainsKHR(dev_data, swapchainCount, pCreateInfos, pSwapchains, surface_state,
10730                                                  old_swapchain_state)) {
10731         return VK_ERROR_VALIDATION_FAILED_EXT;
10732     }
10733 
10734     VkResult result =
10735         dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
10736 
10737     PostCallRecordCreateSharedSwapchainsKHR(dev_data, result, swapchainCount, pCreateInfos, pSwapchains, surface_state,
10738                                             old_swapchain_state);
10739 
10740     return result;
10741 }
10742 
PreCallValidateAcquireNextImageKHR(layer_data * dev_data,VkDevice device,VkSwapchainKHR swapchain,uint64_t timeout,VkSemaphore semaphore,VkFence fence,uint32_t * pImageIndex)10743 static bool PreCallValidateAcquireNextImageKHR(layer_data *dev_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10744                                                VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
10745     bool skip = false;
10746     if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
10747         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10748                         HandleToUint64(device), __LINE__, DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE, "DS",
10749                         "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way to "
10750                         "determine the completion of this operation.");
10751     }
10752 
10753     auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
10754     if (pSemaphore && pSemaphore->scope == kSyncScopeInternal && pSemaphore->signaled) {
10755         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10756                         HandleToUint64(semaphore), __LINE__, VALIDATION_ERROR_16400a0c, "DS",
10757                         "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state. %s",
10758                         validation_error_map[VALIDATION_ERROR_16400a0c]);
10759     }
10760 
10761     auto pFence = GetFenceNode(dev_data, fence);
10762     if (pFence) {
10763         skip |= ValidateFenceForSubmit(dev_data, pFence);
10764     }
10765 
10766     auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
10767     if (swapchain_data->replaced) {
10768         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10769                         HandleToUint64(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_REPLACED, "DS",
10770                         "vkAcquireNextImageKHR: This swapchain has been replaced. The application can still present any images it "
10771                         "has acquired, but cannot acquire any more.");
10772     }
10773 
10774     auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
10775     if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
10776         uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
10777                                                  [=](VkImage image) { return GetImageState(dev_data, image)->acquired; });
10778         if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
10779             skip |=
10780                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10781                         HandleToUint64(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_TOO_MANY_IMAGES, "DS",
10782                         "vkAcquireNextImageKHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")",
10783                         acquired_images);
10784         }
10785     }
10786 
10787     if (swapchain_data->images.size() == 0) {
10788         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10789                         HandleToUint64(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGES_NOT_FOUND, "DS",
10790                         "vkAcquireNextImageKHR: No images found to acquire from. Application probably did not call "
10791                         "vkGetSwapchainImagesKHR after swapchain creation.");
10792     }
10793     return skip;
10794 }
10795 
PostCallRecordAcquireNextImageKHR(layer_data * dev_data,VkDevice device,VkSwapchainKHR swapchain,uint64_t timeout,VkSemaphore semaphore,VkFence fence,uint32_t * pImageIndex)10796 static void PostCallRecordAcquireNextImageKHR(layer_data *dev_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10797                                               VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
10798     auto pFence = GetFenceNode(dev_data, fence);
10799     if (pFence && pFence->scope == kSyncScopeInternal) {
10800         // Treat as inflight since it is valid to wait on this fence, even in cases where it is technically a temporary
10801         // import
10802         pFence->state = FENCE_INFLIGHT;
10803         pFence->signaler.first = VK_NULL_HANDLE;  // ANI isn't on a queue, so this can't participate in a completion proof.
10804     }
10805 
10806     auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
10807     if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) {
10808         // Treat as signaled since it is valid to wait on this semaphore, even in cases where it is technically a
10809         // temporary import
10810         pSemaphore->signaled = true;
10811         pSemaphore->signaler.first = VK_NULL_HANDLE;
10812     }
10813 
10814     // Mark the image as acquired.
10815     auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
10816     auto image = swapchain_data->images[*pImageIndex];
10817     auto image_state = GetImageState(dev_data, image);
10818     image_state->acquired = true;
10819     image_state->shared_presentable = swapchain_data->shared_presentable;
10820 }
10821 
AcquireNextImageKHR(VkDevice device,VkSwapchainKHR swapchain,uint64_t timeout,VkSemaphore semaphore,VkFence fence,uint32_t * pImageIndex)10822 VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10823                                                    VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
10824     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10825 
10826     unique_lock_t lock(global_lock);
10827     bool skip = PreCallValidateAcquireNextImageKHR(dev_data, device, swapchain, timeout, semaphore, fence, pImageIndex);
10828     lock.unlock();
10829 
10830     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
10831 
10832     VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
10833 
10834     lock.lock();
10835     if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
10836         PostCallRecordAcquireNextImageKHR(dev_data, device, swapchain, timeout, semaphore, fence, pImageIndex);
10837     }
10838     lock.unlock();
10839 
10840     return result;
10841 }
10842 
EnumeratePhysicalDevices(VkInstance instance,uint32_t * pPhysicalDeviceCount,VkPhysicalDevice * pPhysicalDevices)10843 VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
10844                                                         VkPhysicalDevice *pPhysicalDevices) {
10845     bool skip = false;
10846     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
10847     assert(instance_data);
10848 
10849     // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
10850     if (NULL == pPhysicalDevices) {
10851         instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
10852     } else {
10853         if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) {
10854             // Flag warning here. You can call this without having queried the count, but it may not be
10855             // robust on platforms with multiple physical devices.
10856             skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
10857                             0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
10858                             "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first call "
10859                             "vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
10860         }  // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
10861         else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) {
10862             // Having actual count match count from app is not a requirement, so this can be a warning
10863             skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
10864                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
10865                             "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count supported by "
10866                             "this instance is %u.",
10867                             *pPhysicalDeviceCount, instance_data->physical_devices_count);
10868         }
10869         instance_data->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
10870     }
10871     if (skip) {
10872         return VK_ERROR_VALIDATION_FAILED_EXT;
10873     }
10874     VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
10875     if (NULL == pPhysicalDevices) {
10876         instance_data->physical_devices_count = *pPhysicalDeviceCount;
10877     } else if (result == VK_SUCCESS) {  // Save physical devices
10878         for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
10879             auto &phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
10880             phys_device_state.phys_device = pPhysicalDevices[i];
10881             // Init actual features for each physical device
10882             instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
10883         }
10884     }
10885     return result;
10886 }
10887 
10888 // Common function to handle validation for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_layer_data * instance_data,PHYSICAL_DEVICE_STATE * pd_state,uint32_t requested_queue_family_property_count,bool qfp_null,const char * caller_name)10889 static bool ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
10890                                                                  PHYSICAL_DEVICE_STATE *pd_state,
10891                                                                  uint32_t requested_queue_family_property_count, bool qfp_null,
10892                                                                  const char *caller_name) {
10893     bool skip = false;
10894     if (!qfp_null) {
10895         // Verify that for each physical device, this command is called first with NULL pQueueFamilyProperties in order to get count
10896         if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
10897             skip |= log_msg(
10898                 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
10899                 HandleToUint64(pd_state->phys_device), __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
10900                 "%s is called with non-NULL pQueueFamilyProperties before obtaining pQueueFamilyPropertyCount. It is recommended "
10901                 "to first call %s with NULL pQueueFamilyProperties in order to obtain the maximal pQueueFamilyPropertyCount.",
10902                 caller_name, caller_name);
10903             // Then verify that pCount that is passed in on second call matches what was returned
10904         } else if (pd_state->queue_family_count != requested_queue_family_property_count) {
10905             skip |= log_msg(
10906                 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
10907                 HandleToUint64(pd_state->phys_device), __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
10908                 "%s is called with non-NULL pQueueFamilyProperties and pQueueFamilyPropertyCount value %" PRIu32
10909                 ", but the largest previously returned pQueueFamilyPropertyCount for this physicalDevice is %" PRIu32
10910                 ". It is recommended to instead receive all the properties by calling %s with pQueueFamilyPropertyCount that was "
10911                 "previously obtained by calling %s with NULL pQueueFamilyProperties.",
10912                 caller_name, requested_queue_family_property_count, pd_state->queue_family_count, caller_name, caller_name);
10913         }
10914         pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
10915     }
10916 
10917     return skip;
10918 }
10919 
PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_layer_data * instance_data,PHYSICAL_DEVICE_STATE * pd_state,uint32_t * pQueueFamilyPropertyCount,VkQueueFamilyProperties * pQueueFamilyProperties)10920 static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
10921                                                                   PHYSICAL_DEVICE_STATE *pd_state,
10922                                                                   uint32_t *pQueueFamilyPropertyCount,
10923                                                                   VkQueueFamilyProperties *pQueueFamilyProperties) {
10924     return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
10925                                                                 (nullptr == pQueueFamilyProperties),
10926                                                                 "vkGetPhysicalDeviceQueueFamilyProperties()");
10927 }
10928 
PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(instance_layer_data * instance_data,PHYSICAL_DEVICE_STATE * pd_state,uint32_t * pQueueFamilyPropertyCount,VkQueueFamilyProperties2KHR * pQueueFamilyProperties)10929 static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(instance_layer_data *instance_data,
10930                                                                       PHYSICAL_DEVICE_STATE *pd_state,
10931                                                                       uint32_t *pQueueFamilyPropertyCount,
10932                                                                       VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
10933     return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
10934                                                                 (nullptr == pQueueFamilyProperties),
10935                                                                 "vkGetPhysicalDeviceQueueFamilyProperties2KHR()");
10936 }
10937 
10938 // Common function to update state for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE * pd_state,uint32_t count,VkQueueFamilyProperties2KHR * pQueueFamilyProperties)10939 static void StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
10940                                                                     VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
10941     if (!pQueueFamilyProperties) {
10942         if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
10943             pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
10944         pd_state->queue_family_count = count;
10945     } else {  // Save queue family properties
10946         pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
10947         pd_state->queue_family_count = std::max(pd_state->queue_family_count, count);
10948 
10949         pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count));
10950         for (uint32_t i = 0; i < count; ++i) {
10951             pd_state->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
10952         }
10953     }
10954 }
10955 
PostCallRecordGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE * pd_state,uint32_t count,VkQueueFamilyProperties * pQueueFamilyProperties)10956 static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
10957                                                                  VkQueueFamilyProperties *pQueueFamilyProperties) {
10958     VkQueueFamilyProperties2KHR *pqfp = nullptr;
10959     std::vector<VkQueueFamilyProperties2KHR> qfp;
10960     qfp.resize(count);
10961     if (pQueueFamilyProperties) {
10962         for (uint32_t i = 0; i < count; ++i) {
10963             qfp[i].sType = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR;
10964             qfp[i].pNext = nullptr;
10965             qfp[i].queueFamilyProperties = pQueueFamilyProperties[i];
10966         }
10967         pqfp = qfp.data();
10968     }
10969     StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pqfp);
10970 }
10971 
PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(PHYSICAL_DEVICE_STATE * pd_state,uint32_t count,VkQueueFamilyProperties2KHR * pQueueFamilyProperties)10972 static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
10973                                                                      VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
10974     StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pQueueFamilyProperties);
10975 }
10976 
GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,uint32_t * pQueueFamilyPropertyCount,VkQueueFamilyProperties * pQueueFamilyProperties)10977 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
10978                                                                   uint32_t *pQueueFamilyPropertyCount,
10979                                                                   VkQueueFamilyProperties *pQueueFamilyProperties) {
10980     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
10981     auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
10982     assert(physical_device_state);
10983     unique_lock_t lock(global_lock);
10984 
10985     bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state,
10986                                                                       pQueueFamilyPropertyCount, pQueueFamilyProperties);
10987 
10988     lock.unlock();
10989 
10990     if (skip) return;
10991 
10992     instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount,
10993                                                                          pQueueFamilyProperties);
10994 
10995     lock.lock();
10996     PostCallRecordGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pQueueFamilyProperties);
10997 }
10998 
GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,uint32_t * pQueueFamilyPropertyCount,VkQueueFamilyProperties2KHR * pQueueFamilyProperties)10999 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
11000                                                                       uint32_t *pQueueFamilyPropertyCount,
11001                                                                       VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11002     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11003     auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11004     assert(physical_device_state);
11005     unique_lock_t lock(global_lock);
11006 
11007     bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(instance_data, physical_device_state,
11008                                                                           pQueueFamilyPropertyCount, pQueueFamilyProperties);
11009 
11010     lock.unlock();
11011 
11012     if (skip) return;
11013 
11014     instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount,
11015                                                                              pQueueFamilyProperties);
11016 
11017     lock.lock();
11018     PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(physical_device_state, *pQueueFamilyPropertyCount,
11019                                                              pQueueFamilyProperties);
11020 }
11021 
11022 template <typename TCreateInfo, typename FPtr>
CreateSurface(VkInstance instance,TCreateInfo const * pCreateInfo,VkAllocationCallbacks const * pAllocator,VkSurfaceKHR * pSurface,FPtr fptr)11023 static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo, VkAllocationCallbacks const *pAllocator,
11024                               VkSurfaceKHR *pSurface, FPtr fptr) {
11025     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11026 
11027     // Call down the call chain:
11028     VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
11029 
11030     if (result == VK_SUCCESS) {
11031         unique_lock_t lock(global_lock);
11032         instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
11033         lock.unlock();
11034     }
11035 
11036     return result;
11037 }
11038 
DestroySurfaceKHR(VkInstance instance,VkSurfaceKHR surface,const VkAllocationCallbacks * pAllocator)11039 VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
11040     bool skip = false;
11041     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11042     unique_lock_t lock(global_lock);
11043     auto surface_state = GetSurfaceState(instance_data, surface);
11044 
11045     if ((surface_state) && (surface_state->swapchain)) {
11046         skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
11047                         HandleToUint64(instance), __LINE__, VALIDATION_ERROR_26c009e4, "DS",
11048                         "vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed. %s",
11049                         validation_error_map[VALIDATION_ERROR_26c009e4]);
11050     }
11051     instance_data->surface_map.erase(surface);
11052     lock.unlock();
11053     if (!skip) {
11054         instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
11055     }
11056 }
11057 
CreateDisplayPlaneSurfaceKHR(VkInstance instance,const VkDisplaySurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)11058 VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
11059                                                             const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11060     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateDisplayPlaneSurfaceKHR);
11061 }
11062 
11063 #ifdef VK_USE_PLATFORM_ANDROID_KHR
CreateAndroidSurfaceKHR(VkInstance instance,const VkAndroidSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)11064 VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
11065                                                        const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11066     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
11067 }
11068 #endif  // VK_USE_PLATFORM_ANDROID_KHR
11069 
11070 #ifdef VK_USE_PLATFORM_MIR_KHR
CreateMirSurfaceKHR(VkInstance instance,const VkMirSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)11071 VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
11072                                                    const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11073     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
11074 }
11075 
GetPhysicalDeviceMirPresentationSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,MirConnection * connection)11076 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceMirPresentationSupportKHR(VkPhysicalDevice physicalDevice,
11077                                                                           uint32_t queueFamilyIndex, MirConnection *connection) {
11078     bool skip = false;
11079     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11080 
11081     unique_lock_t lock(global_lock);
11082     const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11083 
11084     skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2d2009e2,
11085                                               "vkGetPhysicalDeviceMirPresentationSupportKHR", "queueFamilyIndex");
11086 
11087     lock.unlock();
11088 
11089     if (skip) return VK_FALSE;
11090 
11091     // Call down the call chain:
11092     VkBool32 result =
11093         instance_data->dispatch_table.GetPhysicalDeviceMirPresentationSupportKHR(physicalDevice, queueFamilyIndex, connection);
11094 
11095     return result;
11096 }
11097 #endif  // VK_USE_PLATFORM_MIR_KHR
11098 
11099 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
CreateWaylandSurfaceKHR(VkInstance instance,const VkWaylandSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)11100 VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
11101                                                        const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11102     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
11103 }
11104 
GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,struct wl_display * display)11105 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
11106                                                                               uint32_t queueFamilyIndex,
11107                                                                               struct wl_display *display) {
11108     bool skip = false;
11109     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11110 
11111     unique_lock_t lock(global_lock);
11112     const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11113 
11114     skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f000a34,
11115                                               "vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex");
11116 
11117     lock.unlock();
11118 
11119     if (skip) return VK_FALSE;
11120 
11121     // Call down the call chain:
11122     VkBool32 result =
11123         instance_data->dispatch_table.GetPhysicalDeviceWaylandPresentationSupportKHR(physicalDevice, queueFamilyIndex, display);
11124 
11125     return result;
11126 }
11127 #endif  // VK_USE_PLATFORM_WAYLAND_KHR
11128 
11129 #ifdef VK_USE_PLATFORM_WIN32_KHR
CreateWin32SurfaceKHR(VkInstance instance,const VkWin32SurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)11130 VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
11131                                                      const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11132     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
11133 }
11134 
GetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex)11135 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
11136                                                                             uint32_t queueFamilyIndex) {
11137     bool skip = false;
11138     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11139 
11140     unique_lock_t lock(global_lock);
11141     const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11142 
11143     skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f200a3a,
11144                                               "vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex");
11145 
11146     lock.unlock();
11147 
11148     if (skip) return VK_FALSE;
11149 
11150     // Call down the call chain:
11151     VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceWin32PresentationSupportKHR(physicalDevice, queueFamilyIndex);
11152 
11153     return result;
11154 }
11155 #endif  // VK_USE_PLATFORM_WIN32_KHR
11156 
11157 #ifdef VK_USE_PLATFORM_XCB_KHR
CreateXcbSurfaceKHR(VkInstance instance,const VkXcbSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)11158 VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
11159                                                    const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11160     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
11161 }
11162 
GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,xcb_connection_t * connection,xcb_visualid_t visual_id)11163 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
11164                                                                           uint32_t queueFamilyIndex, xcb_connection_t *connection,
11165                                                                           xcb_visualid_t visual_id) {
11166     bool skip = false;
11167     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11168 
11169     unique_lock_t lock(global_lock);
11170     const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11171 
11172     skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f400a40,
11173                                               "vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex");
11174 
11175     lock.unlock();
11176 
11177     if (skip) return VK_FALSE;
11178 
11179     // Call down the call chain:
11180     VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice, queueFamilyIndex,
11181                                                                                                connection, visual_id);
11182 
11183     return result;
11184 }
11185 #endif  // VK_USE_PLATFORM_XCB_KHR
11186 
11187 #ifdef VK_USE_PLATFORM_XLIB_KHR
CreateXlibSurfaceKHR(VkInstance instance,const VkXlibSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)11188 VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
11189                                                     const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11190     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
11191 }
11192 
GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,Display * dpy,VisualID visualID)11193 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
11194                                                                            uint32_t queueFamilyIndex, Display *dpy,
11195                                                                            VisualID visualID) {
11196     bool skip = false;
11197     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11198 
11199     unique_lock_t lock(global_lock);
11200     const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11201 
11202     skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f600a46,
11203                                               "vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex");
11204 
11205     lock.unlock();
11206 
11207     if (skip) return VK_FALSE;
11208 
11209     // Call down the call chain:
11210     VkBool32 result =
11211         instance_data->dispatch_table.GetPhysicalDeviceXlibPresentationSupportKHR(physicalDevice, queueFamilyIndex, dpy, visualID);
11212 
11213     return result;
11214 }
11215 #endif  // VK_USE_PLATFORM_XLIB_KHR
11216 
GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR surface,VkSurfaceCapabilitiesKHR * pSurfaceCapabilities)11217 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11218                                                                        VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
11219     auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11220 
11221     unique_lock_t lock(global_lock);
11222     auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11223     lock.unlock();
11224 
11225     auto result =
11226         instance_data->dispatch_table.GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);
11227 
11228     if (result == VK_SUCCESS) {
11229         physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
11230         physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
11231     }
11232 
11233     return result;
11234 }
11235 
PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instance_layer_data * instanceData,VkPhysicalDevice physicalDevice,VkSurfaceCapabilities2KHR * pSurfaceCapabilities)11236 static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instance_layer_data *instanceData,
11237                                                                    VkPhysicalDevice physicalDevice,
11238                                                                    VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
11239     unique_lock_t lock(global_lock);
11240     auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
11241     physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
11242     physicalDeviceState->surfaceCapabilities = pSurfaceCapabilities->surfaceCapabilities;
11243 }
11244 
GetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,VkSurfaceCapabilities2KHR * pSurfaceCapabilities)11245 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,
11246                                                                         const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
11247                                                                         VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
11248     auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11249 
11250     auto result =
11251         instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2KHR(physicalDevice, pSurfaceInfo, pSurfaceCapabilities);
11252 
11253     if (result == VK_SUCCESS) {
11254         PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instanceData, physicalDevice, pSurfaceCapabilities);
11255     }
11256 
11257     return result;
11258 }
11259 
PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instance_layer_data * instanceData,VkPhysicalDevice physicalDevice,VkSurfaceCapabilities2EXT * pSurfaceCapabilities)11260 static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instance_layer_data *instanceData,
11261                                                                    VkPhysicalDevice physicalDevice,
11262                                                                    VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
11263     unique_lock_t lock(global_lock);
11264     auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
11265     physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
11266     physicalDeviceState->surfaceCapabilities.minImageCount = pSurfaceCapabilities->minImageCount;
11267     physicalDeviceState->surfaceCapabilities.maxImageCount = pSurfaceCapabilities->maxImageCount;
11268     physicalDeviceState->surfaceCapabilities.currentExtent = pSurfaceCapabilities->currentExtent;
11269     physicalDeviceState->surfaceCapabilities.minImageExtent = pSurfaceCapabilities->minImageExtent;
11270     physicalDeviceState->surfaceCapabilities.maxImageExtent = pSurfaceCapabilities->maxImageExtent;
11271     physicalDeviceState->surfaceCapabilities.maxImageArrayLayers = pSurfaceCapabilities->maxImageArrayLayers;
11272     physicalDeviceState->surfaceCapabilities.supportedTransforms = pSurfaceCapabilities->supportedTransforms;
11273     physicalDeviceState->surfaceCapabilities.currentTransform = pSurfaceCapabilities->currentTransform;
11274     physicalDeviceState->surfaceCapabilities.supportedCompositeAlpha = pSurfaceCapabilities->supportedCompositeAlpha;
11275     physicalDeviceState->surfaceCapabilities.supportedUsageFlags = pSurfaceCapabilities->supportedUsageFlags;
11276 }
11277 
GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice,VkSurfaceKHR surface,VkSurfaceCapabilities2EXT * pSurfaceCapabilities)11278 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11279                                                                         VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
11280     auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11281 
11282     auto result =
11283         instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2EXT(physicalDevice, surface, pSurfaceCapabilities);
11284 
11285     if (result == VK_SUCCESS) {
11286         PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instanceData, physicalDevice, pSurfaceCapabilities);
11287     }
11288 
11289     return result;
11290 }
11291 
GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,VkSurfaceKHR surface,VkBool32 * pSupported)11292 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
11293                                                                   VkSurfaceKHR surface, VkBool32 *pSupported) {
11294     bool skip = false;
11295     auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11296 
11297     unique_lock_t lock(global_lock);
11298     const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11299     auto surface_state = GetSurfaceState(instance_data, surface);
11300 
11301     skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2ee009ea,
11302                                               "vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex");
11303 
11304     lock.unlock();
11305 
11306     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
11307 
11308     auto result =
11309         instance_data->dispatch_table.GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, pSupported);
11310 
11311     if (result == VK_SUCCESS) {
11312         surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported == VK_TRUE);
11313     }
11314 
11315     return result;
11316 }
11317 
GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR surface,uint32_t * pPresentModeCount,VkPresentModeKHR * pPresentModes)11318 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11319                                                                        uint32_t *pPresentModeCount,
11320                                                                        VkPresentModeKHR *pPresentModes) {
11321     bool skip = false;
11322     auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11323     unique_lock_t lock(global_lock);
11324     // TODO: this isn't quite right. available modes may differ by surface AND physical device.
11325     auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11326     auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
11327 
11328     if (pPresentModes) {
11329         // Compare the preliminary value of *pPresentModeCount with the value this time:
11330         auto prev_mode_count = (uint32_t)physical_device_state->present_modes.size();
11331         switch (call_state) {
11332             case UNCALLED:
11333                 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11334                                 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), __LINE__,
11335                                 DEVLIMITS_MUST_QUERY_COUNT, "DL",
11336                                 "vkGetPhysicalDeviceSurfacePresentModesKHR() called with non-NULL pPresentModeCount; but no prior "
11337                                 "positive value has been seen for pPresentModeCount.");
11338                 break;
11339             default:
11340                 // both query count and query details
11341                 if (*pPresentModeCount != prev_mode_count) {
11342                     skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11343                                     VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), __LINE__,
11344                                     DEVLIMITS_COUNT_MISMATCH, "DL",
11345                                     "vkGetPhysicalDeviceSurfacePresentModesKHR() called with *pPresentModeCount (%u) that differs "
11346                                     "from the value (%u) that was returned when pPresentModes was NULL.",
11347                                     *pPresentModeCount, prev_mode_count);
11348                 }
11349                 break;
11350         }
11351     }
11352     lock.unlock();
11353 
11354     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
11355 
11356     auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount,
11357                                                                                         pPresentModes);
11358 
11359     if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
11360         lock.lock();
11361 
11362         if (*pPresentModeCount) {
11363             if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
11364             if (*pPresentModeCount > physical_device_state->present_modes.size())
11365                 physical_device_state->present_modes.resize(*pPresentModeCount);
11366         }
11367         if (pPresentModes) {
11368             if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
11369             for (uint32_t i = 0; i < *pPresentModeCount; i++) {
11370                 physical_device_state->present_modes[i] = pPresentModes[i];
11371             }
11372         }
11373     }
11374 
11375     return result;
11376 }
11377 
GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR surface,uint32_t * pSurfaceFormatCount,VkSurfaceFormatKHR * pSurfaceFormats)11378 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11379                                                                   uint32_t *pSurfaceFormatCount,
11380                                                                   VkSurfaceFormatKHR *pSurfaceFormats) {
11381     bool skip = false;
11382     auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11383     unique_lock_t lock(global_lock);
11384     auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11385     auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
11386 
11387     if (pSurfaceFormats) {
11388         auto prev_format_count = (uint32_t)physical_device_state->surface_formats.size();
11389 
11390         switch (call_state) {
11391             case UNCALLED:
11392                 // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application
11393                 // didn't
11394                 // previously call this function with a NULL value of pSurfaceFormats:
11395                 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11396                                 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), __LINE__,
11397                                 DEVLIMITS_MUST_QUERY_COUNT, "DL",
11398                                 "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior "
11399                                 "positive value has been seen for pSurfaceFormats.");
11400                 break;
11401             default:
11402                 if (prev_format_count != *pSurfaceFormatCount) {
11403                     skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11404                                     VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice), __LINE__,
11405                                     DEVLIMITS_COUNT_MISMATCH, "DL",
11406                                     "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with "
11407                                     "pSurfaceFormats set to a value (%u) that is greater than the value (%u) that was returned "
11408                                     "when pSurfaceFormatCount was NULL.",
11409                                     *pSurfaceFormatCount, prev_format_count);
11410                 }
11411                 break;
11412         }
11413     }
11414     lock.unlock();
11415 
11416     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
11417 
11418     // Call down the call chain:
11419     auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount,
11420                                                                                    pSurfaceFormats);
11421 
11422     if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
11423         lock.lock();
11424 
11425         if (*pSurfaceFormatCount) {
11426             if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
11427             if (*pSurfaceFormatCount > physical_device_state->surface_formats.size())
11428                 physical_device_state->surface_formats.resize(*pSurfaceFormatCount);
11429         }
11430         if (pSurfaceFormats) {
11431             if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
11432             for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
11433                 physical_device_state->surface_formats[i] = pSurfaceFormats[i];
11434             }
11435         }
11436     }
11437     return result;
11438 }
11439 
PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instance_layer_data * instanceData,VkPhysicalDevice physicalDevice,uint32_t * pSurfaceFormatCount,VkSurfaceFormat2KHR * pSurfaceFormats)11440 static void PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instance_layer_data *instanceData, VkPhysicalDevice physicalDevice,
11441                                                               uint32_t *pSurfaceFormatCount, VkSurfaceFormat2KHR *pSurfaceFormats) {
11442     unique_lock_t lock(global_lock);
11443     auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
11444     if (*pSurfaceFormatCount) {
11445         if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_COUNT) {
11446             physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_COUNT;
11447         }
11448         if (*pSurfaceFormatCount > physicalDeviceState->surface_formats.size())
11449             physicalDeviceState->surface_formats.resize(*pSurfaceFormatCount);
11450     }
11451     if (pSurfaceFormats) {
11452         if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_DETAILS) {
11453             physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_DETAILS;
11454         }
11455         for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
11456             physicalDeviceState->surface_formats[i] = pSurfaceFormats[i].surfaceFormat;
11457         }
11458     }
11459 }
11460 
GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,uint32_t * pSurfaceFormatCount,VkSurfaceFormat2KHR * pSurfaceFormats)11461 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
11462                                                                    const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
11463                                                                    uint32_t *pSurfaceFormatCount,
11464                                                                    VkSurfaceFormat2KHR *pSurfaceFormats) {
11465     auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11466     auto result = instanceData->dispatch_table.GetPhysicalDeviceSurfaceFormats2KHR(physicalDevice, pSurfaceInfo,
11467                                                                                    pSurfaceFormatCount, pSurfaceFormats);
11468     if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
11469         PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instanceData, physicalDevice, pSurfaceFormatCount, pSurfaceFormats);
11470     }
11471     return result;
11472 }
11473 
CreateDebugReportCallbackEXT(VkInstance instance,const VkDebugReportCallbackCreateInfoEXT * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDebugReportCallbackEXT * pMsgCallback)11474 VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
11475                                                             const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
11476                                                             const VkAllocationCallbacks *pAllocator,
11477                                                             VkDebugReportCallbackEXT *pMsgCallback) {
11478     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11479     VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
11480     if (VK_SUCCESS == res) {
11481         lock_guard_t lock(global_lock);
11482         res = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
11483     }
11484     return res;
11485 }
11486 
DestroyDebugReportCallbackEXT(VkInstance instance,VkDebugReportCallbackEXT msgCallback,const VkAllocationCallbacks * pAllocator)11487 VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
11488                                                          const VkAllocationCallbacks *pAllocator) {
11489     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11490     instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
11491     lock_guard_t lock(global_lock);
11492     layer_destroy_msg_callback(instance_data->report_data, msgCallback, pAllocator);
11493 }
11494 
DebugReportMessageEXT(VkInstance instance,VkDebugReportFlagsEXT flags,VkDebugReportObjectTypeEXT objType,uint64_t object,size_t location,int32_t msgCode,const char * pLayerPrefix,const char * pMsg)11495 VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
11496                                                  VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
11497                                                  int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
11498     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11499     instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
11500 }
11501 
EnumerateInstanceLayerProperties(uint32_t * pCount,VkLayerProperties * pProperties)11502 VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
11503     return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11504 }
11505 
EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,uint32_t * pCount,VkLayerProperties * pProperties)11506 VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
11507                                                               VkLayerProperties *pProperties) {
11508     return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11509 }
11510 
EnumerateInstanceExtensionProperties(const char * pLayerName,uint32_t * pCount,VkExtensionProperties * pProperties)11511 VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
11512                                                                     VkExtensionProperties *pProperties) {
11513     if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11514         return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
11515 
11516     return VK_ERROR_LAYER_NOT_PRESENT;
11517 }
11518 
EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,const char * pLayerName,uint32_t * pCount,VkExtensionProperties * pProperties)11519 VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
11520                                                                   uint32_t *pCount, VkExtensionProperties *pProperties) {
11521     if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11522         return util_GetExtensionProperties(1, device_extensions, pCount, pProperties);
11523 
11524     assert(physicalDevice);
11525 
11526     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11527     return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
11528 }
11529 
EnumeratePhysicalDeviceGroupsKHX(VkInstance instance,uint32_t * pPhysicalDeviceGroupCount,VkPhysicalDeviceGroupPropertiesKHX * pPhysicalDeviceGroupProperties)11530 VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroupsKHX(
11531     VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHX *pPhysicalDeviceGroupProperties) {
11532     bool skip = false;
11533     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11534 
11535     if (instance_data) {
11536         // For this instance, flag when EnumeratePhysicalDeviceGroupsKHX goes to QUERY_COUNT and then QUERY_DETAILS.
11537         if (NULL == pPhysicalDeviceGroupProperties) {
11538             instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_COUNT;
11539         } else {
11540             if (UNCALLED == instance_data->vkEnumeratePhysicalDeviceGroupsState) {
11541                 // Flag warning here. You can call this without having queried the count, but it may not be
11542                 // robust on platforms with multiple physical devices.
11543                 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11544                                 VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
11545                                 "Call sequence has vkEnumeratePhysicalDeviceGroupsKHX() w/ non-NULL "
11546                                 "pPhysicalDeviceGroupProperties. You should first call vkEnumeratePhysicalDeviceGroupsKHX() w/ "
11547                                 "NULL pPhysicalDeviceGroupProperties to query pPhysicalDeviceGroupCount.");
11548             }  // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
11549             else if (instance_data->physical_device_groups_count != *pPhysicalDeviceGroupCount) {
11550                 // Having actual count match count from app is not a requirement, so this can be a warning
11551                 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11552                                 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11553                                 "Call to vkEnumeratePhysicalDeviceGroupsKHX() w/ pPhysicalDeviceGroupCount value %u, but actual "
11554                                 "count supported by this instance is %u.",
11555                                 *pPhysicalDeviceGroupCount, instance_data->physical_device_groups_count);
11556             }
11557             instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_DETAILS;
11558         }
11559         if (skip) {
11560             return VK_ERROR_VALIDATION_FAILED_EXT;
11561         }
11562         VkResult result = instance_data->dispatch_table.EnumeratePhysicalDeviceGroupsKHX(instance, pPhysicalDeviceGroupCount,
11563                                                                                          pPhysicalDeviceGroupProperties);
11564         if (NULL == pPhysicalDeviceGroupProperties) {
11565             instance_data->physical_device_groups_count = *pPhysicalDeviceGroupCount;
11566         } else if (result == VK_SUCCESS) {  // Save physical devices
11567             for (uint32_t i = 0; i < *pPhysicalDeviceGroupCount; i++) {
11568                 for (uint32_t j = 0; j < pPhysicalDeviceGroupProperties[i].physicalDeviceCount; j++) {
11569                     VkPhysicalDevice cur_phys_dev = pPhysicalDeviceGroupProperties[i].physicalDevices[j];
11570                     auto &phys_device_state = instance_data->physical_device_map[cur_phys_dev];
11571                     phys_device_state.phys_device = cur_phys_dev;
11572                     // Init actual features for each physical device
11573                     instance_data->dispatch_table.GetPhysicalDeviceFeatures(cur_phys_dev, &phys_device_state.features);
11574                 }
11575             }
11576         }
11577         return result;
11578     } else {
11579         log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__,
11580                 DEVLIMITS_INVALID_INSTANCE, "DL",
11581                 "Invalid instance (0x%" PRIx64 ") passed into vkEnumeratePhysicalDeviceGroupsKHX().", HandleToUint64(instance));
11582     }
11583     return VK_ERROR_VALIDATION_FAILED_EXT;
11584 }
11585 
CreateDescriptorUpdateTemplateKHR(VkDevice device,const VkDescriptorUpdateTemplateCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplateKHR * pDescriptorUpdateTemplate)11586 VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplateKHR(VkDevice device,
11587                                                                  const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
11588                                                                  const VkAllocationCallbacks *pAllocator,
11589                                                                  VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
11590     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11591     VkResult result =
11592         dev_data->dispatch_table.CreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
11593     if (VK_SUCCESS == result) {
11594         lock_guard_t lock(global_lock);
11595         // Shadow template createInfo for later updates
11596         safe_VkDescriptorUpdateTemplateCreateInfoKHR *local_create_info =
11597             new safe_VkDescriptorUpdateTemplateCreateInfoKHR(pCreateInfo);
11598         std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info));
11599         dev_data->desc_template_map[*pDescriptorUpdateTemplate] = std::move(template_state);
11600     }
11601     return result;
11602 }
11603 
DestroyDescriptorUpdateTemplateKHR(VkDevice device,VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,const VkAllocationCallbacks * pAllocator)11604 VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplateKHR(VkDevice device,
11605                                                               VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
11606                                                               const VkAllocationCallbacks *pAllocator) {
11607     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11608     unique_lock_t lock(global_lock);
11609     dev_data->desc_template_map.erase(descriptorUpdateTemplate);
11610     lock.unlock();
11611     dev_data->dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator);
11612 }
11613 
11614 // PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSetsWithTemplate()
PostCallRecordUpdateDescriptorSetWithTemplateKHR(layer_data * device_data,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,const void * pData)11615 static void PostCallRecordUpdateDescriptorSetWithTemplateKHR(layer_data *device_data, VkDescriptorSet descriptorSet,
11616                                                              VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
11617                                                              const void *pData) {
11618     auto const template_map_entry = device_data->desc_template_map.find(descriptorUpdateTemplate);
11619     if (template_map_entry == device_data->desc_template_map.end()) {
11620         assert(0);
11621     }
11622 
11623     cvdescriptorset::PerformUpdateDescriptorSetsWithTemplateKHR(device_data, descriptorSet, template_map_entry->second, pData);
11624 }
11625 
UpdateDescriptorSetWithTemplateKHR(VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,const void * pData)11626 VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
11627                                                               VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
11628                                                               const void *pData) {
11629     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11630     device_data->dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, pData);
11631 
11632     PostCallRecordUpdateDescriptorSetWithTemplateKHR(device_data, descriptorSet, descriptorUpdateTemplate, pData);
11633 }
11634 
CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,VkPipelineLayout layout,uint32_t set,const void * pData)11635 VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
11636                                                                VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
11637                                                                VkPipelineLayout layout, uint32_t set, const void *pData) {
11638     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
11639     unique_lock_t lock(global_lock);
11640     bool skip = false;
11641     GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
11642     // Minimal validation for command buffer state
11643     if (cb_state) {
11644         skip |= ValidateCmd(dev_data, cb_state, CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, "vkCmdPushDescriptorSetWithTemplateKHR()");
11645     }
11646     lock.unlock();
11647 
11648     if (!skip) {
11649         dev_data->dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, pData);
11650     }
11651 }
11652 
PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_layer_data * instanceData,VkPhysicalDevice physicalDevice,uint32_t * pPropertyCount,VkDisplayPlanePropertiesKHR * pProperties)11653 static void PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_layer_data *instanceData,
11654                                                                      VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
11655                                                                      VkDisplayPlanePropertiesKHR *pProperties) {
11656     unique_lock_t lock(global_lock);
11657     auto physical_device_state = GetPhysicalDeviceState(instanceData, physicalDevice);
11658 
11659     if (*pPropertyCount) {
11660         if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_COUNT) {
11661             physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_COUNT;
11662         }
11663         physical_device_state->display_plane_property_count = *pPropertyCount;
11664     }
11665     if (pProperties) {
11666         if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_DETAILS) {
11667             physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_DETAILS;
11668         }
11669     }
11670 }
11671 
GetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice,uint32_t * pPropertyCount,VkDisplayPlanePropertiesKHR * pProperties)11672 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
11673                                                                           VkDisplayPlanePropertiesKHR *pProperties) {
11674     VkResult result = VK_SUCCESS;
11675     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11676 
11677     result = instance_data->dispatch_table.GetPhysicalDeviceDisplayPlanePropertiesKHR(physicalDevice, pPropertyCount, pProperties);
11678 
11679     if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
11680         PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_data, physicalDevice, pPropertyCount, pProperties);
11681     }
11682 
11683     return result;
11684 }
11685 
ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_layer_data * instance_data,VkPhysicalDevice physicalDevice,uint32_t planeIndex,const char * api_name)11686 static bool ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_layer_data *instance_data,
11687                                                                     VkPhysicalDevice physicalDevice, uint32_t planeIndex,
11688                                                                     const char *api_name) {
11689     bool skip = false;
11690     auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11691     if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState == UNCALLED) {
11692         skip |= log_msg(
11693             instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11694             HandleToUint64(physicalDevice), __LINE__, SWAPCHAIN_GET_SUPPORTED_DISPLAYS_WITHOUT_QUERY, "DL",
11695             "Potential problem with calling %s() without first querying vkGetPhysicalDeviceDisplayPlanePropertiesKHR.", api_name);
11696     } else {
11697         if (planeIndex >= physical_device_state->display_plane_property_count) {
11698             skip |= log_msg(
11699                 instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11700                 HandleToUint64(physicalDevice), __LINE__, VALIDATION_ERROR_29c009c2, "DL",
11701                 "%s(): planeIndex must be in the range [0, %d] that was returned by vkGetPhysicalDeviceDisplayPlanePropertiesKHR. "
11702                 "Do you have the plane index hardcoded? %s",
11703                 api_name, physical_device_state->display_plane_property_count - 1, validation_error_map[VALIDATION_ERROR_29c009c2]);
11704         }
11705     }
11706     return skip;
11707 }
11708 
PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_layer_data * instance_data,VkPhysicalDevice physicalDevice,uint32_t planeIndex)11709 static bool PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
11710                                                                uint32_t planeIndex) {
11711     bool skip = false;
11712     lock_guard_t lock(global_lock);
11713     skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
11714                                                                     "vkGetDisplayPlaneSupportedDisplaysKHR");
11715     return skip;
11716 }
11717 
GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice,uint32_t planeIndex,uint32_t * pDisplayCount,VkDisplayKHR * pDisplays)11718 VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
11719                                                                    uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
11720     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11721     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11722     bool skip = PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_data, physicalDevice, planeIndex);
11723     if (!skip) {
11724         result =
11725             instance_data->dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays);
11726     }
11727     return result;
11728 }
11729 
PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_layer_data * instance_data,VkPhysicalDevice physicalDevice,uint32_t planeIndex)11730 static bool PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
11731                                                           uint32_t planeIndex) {
11732     bool skip = false;
11733     lock_guard_t lock(global_lock);
11734     skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
11735                                                                     "vkGetDisplayPlaneCapabilitiesKHR");
11736     return skip;
11737 }
11738 
GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice,VkDisplayModeKHR mode,uint32_t planeIndex,VkDisplayPlaneCapabilitiesKHR * pCapabilities)11739 VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
11740                                                               uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) {
11741     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11742     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11743     bool skip = PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_data, physicalDevice, planeIndex);
11744 
11745     if (!skip) {
11746         result = instance_data->dispatch_table.GetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities);
11747     }
11748 
11749     return result;
11750 }
11751 
DebugMarkerSetObjectNameEXT(VkDevice device,const VkDebugMarkerObjectNameInfoEXT * pNameInfo)11752 VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(VkDevice device, const VkDebugMarkerObjectNameInfoEXT *pNameInfo) {
11753     unique_lock_t lock(global_lock);
11754     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11755     if (pNameInfo->pObjectName) {
11756         device_data->report_data->debugObjectNameMap->insert(
11757             std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->object, pNameInfo->pObjectName));
11758     } else {
11759         device_data->report_data->debugObjectNameMap->erase(pNameInfo->object);
11760     }
11761     lock.unlock();
11762     VkResult result = device_data->dispatch_table.DebugMarkerSetObjectNameEXT(device, pNameInfo);
11763     return result;
11764 }
11765 
DebugMarkerSetObjectTagEXT(VkDevice device,VkDebugMarkerObjectTagInfoEXT * pTagInfo)11766 VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectTagEXT(VkDevice device, VkDebugMarkerObjectTagInfoEXT *pTagInfo) {
11767     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11768     VkResult result = device_data->dispatch_table.DebugMarkerSetObjectTagEXT(device, pTagInfo);
11769     return result;
11770 }
11771 
CmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer,VkDebugMarkerMarkerInfoEXT * pMarkerInfo)11772 VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
11773     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
11774     unique_lock_t lock(global_lock);
11775     bool skip = false;
11776     GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
11777     // Minimal validation for command buffer state
11778     if (cb_state) {
11779         skip |= ValidateCmd(device_data, cb_state, CMD_DEBUGMARKERBEGINEXT, "vkCmdDebugMarkerBeginEXT()");
11780     }
11781     lock.unlock();
11782     if (!skip) {
11783         device_data->dispatch_table.CmdDebugMarkerBeginEXT(commandBuffer, pMarkerInfo);
11784     }
11785 }
11786 
CmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer)11787 VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) {
11788     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
11789     unique_lock_t lock(global_lock);
11790     bool skip = false;
11791     GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
11792     // Minimal validation for command buffer state
11793     if (cb_state) {
11794         skip |= ValidateCmd(device_data, cb_state, CMD_DEBUGMARKERENDEXT, "vkCmdDebugMarkerEndEXT()");
11795     }
11796     lock.unlock();
11797     if (!skip) {
11798         device_data->dispatch_table.CmdDebugMarkerEndEXT(commandBuffer);
11799     }
11800 }
11801 
CmdDebugMarkerInsertEXT(VkCommandBuffer commandBuffer,VkDebugMarkerMarkerInfoEXT * pMarkerInfo)11802 VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerInsertEXT(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
11803     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
11804     device_data->dispatch_table.CmdDebugMarkerInsertEXT(commandBuffer, pMarkerInfo);
11805 }
11806 
CmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer,uint32_t firstDiscardRectangle,uint32_t discardRectangleCount,const VkRect2D * pDiscardRectangles)11807 VKAPI_ATTR void VKAPI_CALL CmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle,
11808                                                      uint32_t discardRectangleCount, const VkRect2D *pDiscardRectangles) {
11809     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
11810     unique_lock_t lock(global_lock);
11811     bool skip = false;
11812     GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
11813     // Minimal validation for command buffer state
11814     if (cb_state) {
11815         skip |= ValidateCmd(dev_data, cb_state, CMD_SETDISCARDRECTANGLEEXT, "vkCmdSetDiscardRectangleEXT()");
11816     }
11817     lock.unlock();
11818 
11819     if (!skip) {
11820         dev_data->dispatch_table.CmdSetDiscardRectangleEXT(commandBuffer, firstDiscardRectangle, discardRectangleCount,
11821                                                            pDiscardRectangles);
11822     }
11823 }
11824 
CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,const VkSampleLocationsInfoEXT * pSampleLocationsInfo)11825 VKAPI_ATTR void VKAPI_CALL CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
11826                                                     const VkSampleLocationsInfoEXT *pSampleLocationsInfo) {
11827     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
11828     unique_lock_t lock(global_lock);
11829     bool skip = false;
11830     GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
11831     // Minimal validation for command buffer state
11832     if (cb_state) {
11833         skip |= ValidateCmd(dev_data, cb_state, CMD_SETSAMPLELOCATIONSEXT, "vkCmdSetSampleLocationsEXT()");
11834     }
11835     lock.unlock();
11836 
11837     if (!skip) {
11838         dev_data->dispatch_table.CmdSetSampleLocationsEXT(commandBuffer, pSampleLocationsInfo);
11839     }
11840 }
11841 
11842 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName);
11843 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName);
11844 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName);
11845 
11846 // Map of all APIs to be intercepted by this layer
11847 static const std::unordered_map<std::string, void *> name_to_funcptr_map = {
11848     {"vkGetInstanceProcAddr", (void *)GetInstanceProcAddr},
11849     {"vk_layerGetPhysicalDeviceProcAddr", (void *)GetPhysicalDeviceProcAddr},
11850     {"vkGetDeviceProcAddr", (void *)GetDeviceProcAddr},
11851     {"vkCreateInstance", (void *)CreateInstance},
11852     {"vkCreateDevice", (void *)CreateDevice},
11853     {"vkEnumeratePhysicalDevices", (void *)EnumeratePhysicalDevices},
11854     {"vkGetPhysicalDeviceQueueFamilyProperties", (void *)GetPhysicalDeviceQueueFamilyProperties},
11855     {"vkDestroyInstance", (void *)DestroyInstance},
11856     {"vkEnumerateInstanceLayerProperties", (void *)EnumerateInstanceLayerProperties},
11857     {"vkEnumerateDeviceLayerProperties", (void *)EnumerateDeviceLayerProperties},
11858     {"vkEnumerateInstanceExtensionProperties", (void *)EnumerateInstanceExtensionProperties},
11859     {"vkEnumerateDeviceExtensionProperties", (void *)EnumerateDeviceExtensionProperties},
11860     {"vkCreateDescriptorUpdateTemplateKHR", (void *)CreateDescriptorUpdateTemplateKHR},
11861     {"vkDestroyDescriptorUpdateTemplateKHR", (void *)DestroyDescriptorUpdateTemplateKHR},
11862     {"vkUpdateDescriptorSetWithTemplateKHR", (void *)UpdateDescriptorSetWithTemplateKHR},
11863     {"vkCmdPushDescriptorSetWithTemplateKHR", (void *)CmdPushDescriptorSetWithTemplateKHR},
11864     {"vkCmdPushDescriptorSetKHR", (void *)CmdPushDescriptorSetKHR},
11865     {"vkCreateSwapchainKHR", (void *)CreateSwapchainKHR},
11866     {"vkDestroySwapchainKHR", (void *)DestroySwapchainKHR},
11867     {"vkGetSwapchainImagesKHR", (void *)GetSwapchainImagesKHR},
11868     {"vkAcquireNextImageKHR", (void *)AcquireNextImageKHR},
11869     {"vkQueuePresentKHR", (void *)QueuePresentKHR},
11870     {"vkQueueSubmit", (void *)QueueSubmit},
11871     {"vkWaitForFences", (void *)WaitForFences},
11872     {"vkGetFenceStatus", (void *)GetFenceStatus},
11873     {"vkQueueWaitIdle", (void *)QueueWaitIdle},
11874     {"vkDeviceWaitIdle", (void *)DeviceWaitIdle},
11875     {"vkGetDeviceQueue", (void *)GetDeviceQueue},
11876     {"vkDestroyDevice", (void *)DestroyDevice},
11877     {"vkDestroyFence", (void *)DestroyFence},
11878     {"vkResetFences", (void *)ResetFences},
11879     {"vkDestroySemaphore", (void *)DestroySemaphore},
11880     {"vkDestroyEvent", (void *)DestroyEvent},
11881     {"vkDestroyQueryPool", (void *)DestroyQueryPool},
11882     {"vkDestroyBuffer", (void *)DestroyBuffer},
11883     {"vkDestroyBufferView", (void *)DestroyBufferView},
11884     {"vkDestroyImage", (void *)DestroyImage},
11885     {"vkDestroyImageView", (void *)DestroyImageView},
11886     {"vkDestroyShaderModule", (void *)DestroyShaderModule},
11887     {"vkDestroyPipeline", (void *)DestroyPipeline},
11888     {"vkDestroyPipelineLayout", (void *)DestroyPipelineLayout},
11889     {"vkDestroySampler", (void *)DestroySampler},
11890     {"vkDestroyDescriptorSetLayout", (void *)DestroyDescriptorSetLayout},
11891     {"vkDestroyDescriptorPool", (void *)DestroyDescriptorPool},
11892     {"vkDestroyFramebuffer", (void *)DestroyFramebuffer},
11893     {"vkDestroyRenderPass", (void *)DestroyRenderPass},
11894     {"vkCreateBuffer", (void *)CreateBuffer},
11895     {"vkCreateBufferView", (void *)CreateBufferView},
11896     {"vkCreateImage", (void *)CreateImage},
11897     {"vkCreateImageView", (void *)CreateImageView},
11898     {"vkCreateFence", (void *)CreateFence},
11899     {"vkCreatePipelineCache", (void *)CreatePipelineCache},
11900     {"vkDestroyPipelineCache", (void *)DestroyPipelineCache},
11901     {"vkGetPipelineCacheData", (void *)GetPipelineCacheData},
11902     {"vkMergePipelineCaches", (void *)MergePipelineCaches},
11903     {"vkCreateGraphicsPipelines", (void *)CreateGraphicsPipelines},
11904     {"vkCreateComputePipelines", (void *)CreateComputePipelines},
11905     {"vkCreateSampler", (void *)CreateSampler},
11906     {"vkCreateDescriptorSetLayout", (void *)CreateDescriptorSetLayout},
11907     {"vkCreatePipelineLayout", (void *)CreatePipelineLayout},
11908     {"vkCreateDescriptorPool", (void *)CreateDescriptorPool},
11909     {"vkResetDescriptorPool", (void *)ResetDescriptorPool},
11910     {"vkAllocateDescriptorSets", (void *)AllocateDescriptorSets},
11911     {"vkFreeDescriptorSets", (void *)FreeDescriptorSets},
11912     {"vkUpdateDescriptorSets", (void *)UpdateDescriptorSets},
11913     {"vkCreateCommandPool", (void *)CreateCommandPool},
11914     {"vkDestroyCommandPool", (void *)DestroyCommandPool},
11915     {"vkResetCommandPool", (void *)ResetCommandPool},
11916     {"vkCreateQueryPool", (void *)CreateQueryPool},
11917     {"vkAllocateCommandBuffers", (void *)AllocateCommandBuffers},
11918     {"vkFreeCommandBuffers", (void *)FreeCommandBuffers},
11919     {"vkBeginCommandBuffer", (void *)BeginCommandBuffer},
11920     {"vkEndCommandBuffer", (void *)EndCommandBuffer},
11921     {"vkResetCommandBuffer", (void *)ResetCommandBuffer},
11922     {"vkCmdBindPipeline", (void *)CmdBindPipeline},
11923     {"vkCmdSetViewport", (void *)CmdSetViewport},
11924     {"vkCmdSetScissor", (void *)CmdSetScissor},
11925     {"vkCmdSetLineWidth", (void *)CmdSetLineWidth},
11926     {"vkCmdSetDepthBias", (void *)CmdSetDepthBias},
11927     {"vkCmdSetBlendConstants", (void *)CmdSetBlendConstants},
11928     {"vkCmdSetDepthBounds", (void *)CmdSetDepthBounds},
11929     {"vkCmdSetStencilCompareMask", (void *)CmdSetStencilCompareMask},
11930     {"vkCmdSetStencilWriteMask", (void *)CmdSetStencilWriteMask},
11931     {"vkCmdSetStencilReference", (void *)CmdSetStencilReference},
11932     {"vkCmdBindDescriptorSets", (void *)CmdBindDescriptorSets},
11933     {"vkCmdBindVertexBuffers", (void *)CmdBindVertexBuffers},
11934     {"vkCmdBindIndexBuffer", (void *)CmdBindIndexBuffer},
11935     {"vkCmdDraw", (void *)CmdDraw},
11936     {"vkCmdDrawIndexed", (void *)CmdDrawIndexed},
11937     {"vkCmdDrawIndirect", (void *)CmdDrawIndirect},
11938     {"vkCmdDrawIndexedIndirect", (void *)CmdDrawIndexedIndirect},
11939     {"vkCmdDispatch", (void *)CmdDispatch},
11940     {"vkCmdDispatchIndirect", (void *)CmdDispatchIndirect},
11941     {"vkCmdCopyBuffer", (void *)CmdCopyBuffer},
11942     {"vkCmdCopyImage", (void *)CmdCopyImage},
11943     {"vkCmdBlitImage", (void *)CmdBlitImage},
11944     {"vkCmdCopyBufferToImage", (void *)CmdCopyBufferToImage},
11945     {"vkCmdCopyImageToBuffer", (void *)CmdCopyImageToBuffer},
11946     {"vkCmdUpdateBuffer", (void *)CmdUpdateBuffer},
11947     {"vkCmdFillBuffer", (void *)CmdFillBuffer},
11948     {"vkCmdClearColorImage", (void *)CmdClearColorImage},
11949     {"vkCmdClearDepthStencilImage", (void *)CmdClearDepthStencilImage},
11950     {"vkCmdClearAttachments", (void *)CmdClearAttachments},
11951     {"vkCmdResolveImage", (void *)CmdResolveImage},
11952     {"vkGetImageSubresourceLayout", (void *)GetImageSubresourceLayout},
11953     {"vkCmdSetEvent", (void *)CmdSetEvent},
11954     {"vkCmdResetEvent", (void *)CmdResetEvent},
11955     {"vkCmdWaitEvents", (void *)CmdWaitEvents},
11956     {"vkCmdPipelineBarrier", (void *)CmdPipelineBarrier},
11957     {"vkCmdBeginQuery", (void *)CmdBeginQuery},
11958     {"vkCmdEndQuery", (void *)CmdEndQuery},
11959     {"vkCmdResetQueryPool", (void *)CmdResetQueryPool},
11960     {"vkCmdCopyQueryPoolResults", (void *)CmdCopyQueryPoolResults},
11961     {"vkCmdPushConstants", (void *)CmdPushConstants},
11962     {"vkCmdWriteTimestamp", (void *)CmdWriteTimestamp},
11963     {"vkCreateFramebuffer", (void *)CreateFramebuffer},
11964     {"vkCreateShaderModule", (void *)CreateShaderModule},
11965     {"vkCreateRenderPass", (void *)CreateRenderPass},
11966     {"vkCmdBeginRenderPass", (void *)CmdBeginRenderPass},
11967     {"vkCmdNextSubpass", (void *)CmdNextSubpass},
11968     {"vkCmdEndRenderPass", (void *)CmdEndRenderPass},
11969     {"vkCmdExecuteCommands", (void *)CmdExecuteCommands},
11970     {"vkCmdDebugMarkerBeginEXT", (void *)CmdDebugMarkerBeginEXT},
11971     {"vkCmdDebugMarkerEndEXT", (void *)CmdDebugMarkerEndEXT},
11972     {"vkCmdDebugMarkerInsertEXT", (void *)CmdDebugMarkerInsertEXT},
11973     {"vkDebugMarkerSetObjectNameEXT", (void *)DebugMarkerSetObjectNameEXT},
11974     {"vkDebugMarkerSetObjectTagEXT", (void *)DebugMarkerSetObjectTagEXT},
11975     {"vkSetEvent", (void *)SetEvent},
11976     {"vkMapMemory", (void *)MapMemory},
11977     {"vkUnmapMemory", (void *)UnmapMemory},
11978     {"vkFlushMappedMemoryRanges", (void *)FlushMappedMemoryRanges},
11979     {"vkInvalidateMappedMemoryRanges", (void *)InvalidateMappedMemoryRanges},
11980     {"vkAllocateMemory", (void *)AllocateMemory},
11981     {"vkFreeMemory", (void *)FreeMemory},
11982     {"vkBindBufferMemory", (void *)BindBufferMemory},
11983     {"vkBindBufferMemory2KHR", (void *)BindBufferMemory2KHR},
11984     {"vkGetBufferMemoryRequirements", (void *)GetBufferMemoryRequirements},
11985     {"vkGetBufferMemoryRequirements2KHR", (void *)GetBufferMemoryRequirements2KHR},
11986     {"vkGetImageMemoryRequirements", (void *)GetImageMemoryRequirements},
11987     {"vkGetImageMemoryRequirements2KHR", (void *)GetImageMemoryRequirements2KHR},
11988     {"vkGetImageSparseMemoryRequirements", (void *)GetImageSparseMemoryRequirements},
11989     {"vkGetImageSparseMemoryRequirements2KHR", (void *)GetImageSparseMemoryRequirements2KHR},
11990     {"vkGetPhysicalDeviceSparseImageFormatProperties", (void *)GetPhysicalDeviceSparseImageFormatProperties},
11991     {"vkGetPhysicalDeviceSparseImageFormatProperties2KHR", (void *)GetPhysicalDeviceSparseImageFormatProperties2KHR},
11992     {"vkGetQueryPoolResults", (void *)GetQueryPoolResults},
11993     {"vkBindImageMemory", (void *)BindImageMemory},
11994     {"vkBindImageMemory2KHR", (void *)BindImageMemory2KHR},
11995     {"vkQueueBindSparse", (void *)QueueBindSparse},
11996     {"vkCreateSemaphore", (void *)CreateSemaphore},
11997     {"vkCreateEvent", (void *)CreateEvent},
11998 #ifdef VK_USE_PLATFORM_ANDROID_KHR
11999     {"vkCreateAndroidSurfaceKHR", (void *)CreateAndroidSurfaceKHR},
12000 #endif
12001 #ifdef VK_USE_PLATFORM_MIR_KHR
12002     {"vkCreateMirSurfaceKHR", (void *)CreateMirSurfaceKHR},
12003     {"vkGetPhysicalDeviceMirPresentationSupportKHR", (void *)GetPhysicalDeviceMirPresentationSupportKHR},
12004 #endif
12005 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
12006     {"vkCreateWaylandSurfaceKHR", (void *)CreateWaylandSurfaceKHR},
12007     {"vkGetPhysicalDeviceWaylandPresentationSupportKHR", (void *)GetPhysicalDeviceWaylandPresentationSupportKHR},
12008 #endif
12009 #ifdef VK_USE_PLATFORM_WIN32_KHR
12010     {"vkCreateWin32SurfaceKHR", (void *)CreateWin32SurfaceKHR},
12011     {"vkGetPhysicalDeviceWin32PresentationSupportKHR", (void *)GetPhysicalDeviceWin32PresentationSupportKHR},
12012     {"vkImportSemaphoreWin32HandleKHR", (void *)ImportSemaphoreWin32HandleKHR},
12013     {"vkGetSemaphoreWin32HandleKHR", (void *)GetSemaphoreWin32HandleKHR},
12014     {"vkImportFenceWin32HandleKHR", (void *)ImportFenceWin32HandleKHR},
12015     {"vkGetFenceWin32HandleKHR", (void *)GetFenceWin32HandleKHR},
12016 #endif
12017 #ifdef VK_USE_PLATFORM_XCB_KHR
12018     {"vkCreateXcbSurfaceKHR", (void *)CreateXcbSurfaceKHR},
12019     {"vkGetPhysicalDeviceXcbPresentationSupportKHR", (void *)GetPhysicalDeviceXcbPresentationSupportKHR},
12020 #endif
12021 #ifdef VK_USE_PLATFORM_XLIB_KHR
12022     {"vkCreateXlibSurfaceKHR", (void *)CreateXlibSurfaceKHR},
12023     {"vkGetPhysicalDeviceXlibPresentationSupportKHR", (void *)GetPhysicalDeviceXlibPresentationSupportKHR},
12024 #endif
12025     {"vkCreateDisplayPlaneSurfaceKHR", (void *)CreateDisplayPlaneSurfaceKHR},
12026     {"vkDestroySurfaceKHR", (void *)DestroySurfaceKHR},
12027     {"vkGetPhysicalDeviceSurfaceCapabilitiesKHR", (void *)GetPhysicalDeviceSurfaceCapabilitiesKHR},
12028     {"vkGetPhysicalDeviceSurfaceCapabilities2KHR", (void *)GetPhysicalDeviceSurfaceCapabilities2KHR},
12029     {"vkGetPhysicalDeviceSurfaceCapabilities2EXT", (void *)GetPhysicalDeviceSurfaceCapabilities2EXT},
12030     {"vkGetPhysicalDeviceSurfaceSupportKHR", (void *)GetPhysicalDeviceSurfaceSupportKHR},
12031     {"vkGetPhysicalDeviceSurfacePresentModesKHR", (void *)GetPhysicalDeviceSurfacePresentModesKHR},
12032     {"vkGetPhysicalDeviceSurfaceFormatsKHR", (void *)GetPhysicalDeviceSurfaceFormatsKHR},
12033     {"vkGetPhysicalDeviceSurfaceFormats2KHR", (void *)GetPhysicalDeviceSurfaceFormats2KHR},
12034     {"vkGetPhysicalDeviceQueueFamilyProperties2KHR", (void *)GetPhysicalDeviceQueueFamilyProperties2KHR},
12035     {"vkEnumeratePhysicalDeviceGroupsKHX", (void *)EnumeratePhysicalDeviceGroupsKHX},
12036     {"vkCreateDebugReportCallbackEXT", (void *)CreateDebugReportCallbackEXT},
12037     {"vkDestroyDebugReportCallbackEXT", (void *)DestroyDebugReportCallbackEXT},
12038     {"vkDebugReportMessageEXT", (void *)DebugReportMessageEXT},
12039     {"vkGetPhysicalDeviceDisplayPlanePropertiesKHR", (void *)GetPhysicalDeviceDisplayPlanePropertiesKHR},
12040     {"vkGetDisplayPlaneSupportedDisplaysKHR", (void *)GetDisplayPlaneSupportedDisplaysKHR},
12041     {"vkGetDisplayPlaneCapabilitiesKHR", (void *)GetDisplayPlaneCapabilitiesKHR},
12042     {"vkImportSemaphoreFdKHR", (void *)ImportSemaphoreFdKHR},
12043     {"vkGetSemaphoreFdKHR", (void *)GetSemaphoreFdKHR},
12044     {"vkImportFenceFdKHR", (void *)ImportFenceFdKHR},
12045     {"vkGetFenceFdKHR", (void *)GetFenceFdKHR},
12046     {"vkCreateValidationCacheEXT", (void *)CreateValidationCacheEXT},
12047     {"vkDestroyValidationCacheEXT", (void *)DestroyValidationCacheEXT},
12048     {"vkGetValidationCacheDataEXT", (void *)GetValidationCacheDataEXT},
12049     {"vkMergeValidationCachesEXT", (void *)MergeValidationCachesEXT},
12050     {"vkCmdSetDiscardRectangleEXT", (void *)CmdSetDiscardRectangleEXT},
12051     {"vkCmdSetSampleLocationsEXT", (void *)CmdSetSampleLocationsEXT},
12052 };
12053 
GetDeviceProcAddr(VkDevice device,const char * funcName)12054 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
12055     assert(device);
12056     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12057 
12058     // Is API to be intercepted by this layer?
12059     const auto &item = name_to_funcptr_map.find(funcName);
12060     if (item != name_to_funcptr_map.end()) {
12061         return reinterpret_cast<PFN_vkVoidFunction>(item->second);
12062     }
12063 
12064     auto &table = device_data->dispatch_table;
12065     if (!table.GetDeviceProcAddr) return nullptr;
12066     return table.GetDeviceProcAddr(device, funcName);
12067 }
12068 
GetInstanceProcAddr(VkInstance instance,const char * funcName)12069 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
12070     instance_layer_data *instance_data;
12071     // Is API to be intercepted by this layer?
12072     const auto &item = name_to_funcptr_map.find(funcName);
12073     if (item != name_to_funcptr_map.end()) {
12074         return reinterpret_cast<PFN_vkVoidFunction>(item->second);
12075     }
12076 
12077     instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12078     auto &table = instance_data->dispatch_table;
12079     if (!table.GetInstanceProcAddr) return nullptr;
12080     return table.GetInstanceProcAddr(instance, funcName);
12081 }
12082 
GetPhysicalDeviceProcAddr(VkInstance instance,const char * funcName)12083 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
12084     assert(instance);
12085     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12086 
12087     auto &table = instance_data->dispatch_table;
12088     if (!table.GetPhysicalDeviceProcAddr) return nullptr;
12089     return table.GetPhysicalDeviceProcAddr(instance, funcName);
12090 }
12091 
12092 }  // namespace core_validation
12093 
12094 // loader-layer interface v0, just wrappers since there is only a layer
12095 
vkEnumerateInstanceExtensionProperties(const char * pLayerName,uint32_t * pCount,VkExtensionProperties * pProperties)12096 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
12097                                                                                       VkExtensionProperties *pProperties) {
12098     return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
12099 }
12100 
vkEnumerateInstanceLayerProperties(uint32_t * pCount,VkLayerProperties * pProperties)12101 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
12102                                                                                   VkLayerProperties *pProperties) {
12103     return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
12104 }
12105 
vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,uint32_t * pCount,VkLayerProperties * pProperties)12106 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
12107                                                                                 VkLayerProperties *pProperties) {
12108     // the layer command handles VK_NULL_HANDLE just fine internally
12109     assert(physicalDevice == VK_NULL_HANDLE);
12110     return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
12111 }
12112 
vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,const char * pLayerName,uint32_t * pCount,VkExtensionProperties * pProperties)12113 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
12114                                                                                     const char *pLayerName, uint32_t *pCount,
12115                                                                                     VkExtensionProperties *pProperties) {
12116     // the layer command handles VK_NULL_HANDLE just fine internally
12117     assert(physicalDevice == VK_NULL_HANDLE);
12118     return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
12119 }
12120 
vkGetDeviceProcAddr(VkDevice dev,const char * funcName)12121 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
12122     return core_validation::GetDeviceProcAddr(dev, funcName);
12123 }
12124 
vkGetInstanceProcAddr(VkInstance instance,const char * funcName)12125 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
12126     return core_validation::GetInstanceProcAddr(instance, funcName);
12127 }
12128 
vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,const char * funcName)12129 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
12130                                                                                            const char *funcName) {
12131     return core_validation::GetPhysicalDeviceProcAddr(instance, funcName);
12132 }
12133 
vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface * pVersionStruct)12134 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
12135     assert(pVersionStruct != NULL);
12136     assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
12137 
12138     // Fill in the function pointers if our version is at least capable of having the structure contain them.
12139     if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
12140         pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
12141         pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
12142         pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
12143     }
12144 
12145     if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
12146         core_validation::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
12147     } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
12148         pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
12149     }
12150 
12151     return VK_SUCCESS;
12152 }
12153