• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright (c) 2015-2016 The Khronos Group Inc.
2  * Copyright (c) 2015-2016 Valve Corporation
3  * Copyright (c) 2015-2016 LunarG, Inc.
4  * Copyright (C) 2015-2016 Google Inc.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * Author: Cody Northrop <cnorthrop@google.com>
19  * Author: Michael Lentine <mlentine@google.com>
20  * Author: Tobin Ehlis <tobine@google.com>
21  * Author: Chia-I Wu <olv@google.com>
22  * Author: Chris Forbes <chrisf@ijw.co.nz>
23  * Author: Mark Lobodzinski <mark@lunarg.com>
24  * Author: Ian Elliott <ianelliott@google.com>
25  */
26 
27 // Allow use of STL min and max functions in Windows
28 #define NOMINMAX
29 
30 #include <SPIRV/spirv.hpp>
31 #include <algorithm>
32 #include <assert.h>
33 #include <iostream>
34 #include <list>
35 #include <map>
36 #include <mutex>
37 #include <set>
38 //#include <memory>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <string.h>
42 #include <string>
43 #include <tuple>
44 
45 #include "vk_loader_platform.h"
46 #include "vk_dispatch_table_helper.h"
47 #include "vk_struct_string_helper_cpp.h"
48 #if defined(__GNUC__)
49 #pragma GCC diagnostic ignored "-Wwrite-strings"
50 #endif
51 #if defined(__GNUC__)
52 #pragma GCC diagnostic warning "-Wwrite-strings"
53 #endif
54 #include "vk_struct_size_helper.h"
55 #include "core_validation.h"
56 #include "vk_layer_table.h"
57 #include "vk_layer_data.h"
58 #include "vk_layer_extension_utils.h"
59 #include "vk_layer_utils.h"
60 #include "spirv-tools/libspirv.h"
61 
62 #if defined __ANDROID__
63 #include <android/log.h>
64 #define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
65 #else
66 #define LOGCONSOLE(...)                                                                                                            \
67     {                                                                                                                              \
68         printf(__VA_ARGS__);                                                                                                       \
69         printf("\n");                                                                                                              \
70     }
71 #endif
72 
73 using namespace std;
74 
75 namespace core_validation {
76 
77 using std::unordered_map;
78 using std::unordered_set;
79 
80 // WSI Image Objects bypass usual Image Object creation methods.  A special Memory
81 // Object value will be used to identify them internally.
82 static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
83 // 2nd special memory handle used to flag object as unbound from memory
84 static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
85 
86 struct devExts {
87     bool wsi_enabled;
88     bool wsi_display_swapchain_enabled;
89     unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
90     unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
91 };
92 
93 // fwd decls
94 struct shader_module;
95 
96 struct instance_layer_data {
97     VkInstance instance = VK_NULL_HANDLE;
98     debug_report_data *report_data = nullptr;
99     std::vector<VkDebugReportCallbackEXT> logging_callback;
100     VkLayerInstanceDispatchTable dispatch_table;
101 
102     CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
103     uint32_t physical_devices_count = 0;
104     CHECK_DISABLED disabled = {};
105 
106     unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
107     unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
108 
109     bool surfaceExtensionEnabled = false;
110     bool displayExtensionEnabled = false;
111 #ifdef VK_USE_PLATFORM_ANDROID_KHR
112     bool androidSurfaceExtensionEnabled = false;
113 #endif
114 #ifdef VK_USE_PLATFORM_MIR_KHR
115     bool mirSurfaceExtensionEnabled = false;
116 #endif
117 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
118     bool waylandSurfaceExtensionEnabled = false;
119 #endif
120 #ifdef VK_USE_PLATFORM_WIN32_KHR
121     bool win32SurfaceExtensionEnabled = false;
122 #endif
123 #ifdef VK_USE_PLATFORM_XCB_KHR
124     bool xcbSurfaceExtensionEnabled = false;
125 #endif
126 #ifdef VK_USE_PLATFORM_XLIB_KHR
127     bool xlibSurfaceExtensionEnabled = false;
128 #endif
129 };
130 
131 struct layer_data {
132     debug_report_data *report_data = nullptr;
133     VkLayerDispatchTable dispatch_table;
134 
135     devExts device_extensions = {};
136     unordered_set<VkQueue> queues;  // All queues under given device
137     // Global set of all cmdBuffers that are inFlight on this device
138     unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
139     // Layer specific data
140     unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
141     unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
142     unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
143     unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
144     unordered_map<VkBuffer, unique_ptr<BUFFER_NODE>> bufferMap;
145     unordered_map<VkPipeline, PIPELINE_STATE *> pipelineMap;
146     unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
147     unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
148     unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
149     unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
150     unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
151     unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
152     unordered_map<VkFence, FENCE_NODE> fenceMap;
153     unordered_map<VkQueue, QUEUE_NODE> queueMap;
154     unordered_map<VkEvent, EVENT_STATE> eventMap;
155     unordered_map<QueryObject, bool> queryToStateMap;
156     unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
157     unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
158     unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
159     unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
160     unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
161     unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
162     unordered_map<VkRenderPass, unique_ptr<RENDER_PASS_STATE>> renderPassMap;
163     unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
164     VkDevice device = VK_NULL_HANDLE;
165 
166     instance_layer_data *instance_data = nullptr;  // from device to enclosing instance
167 
168     VkPhysicalDeviceFeatures enabled_features = {};
169     // Device specific data
170     PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
171     VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
172 };
173 
174 // TODO : Do we need to guard access to layer_data_map w/ lock?
175 static unordered_map<void *, layer_data *> layer_data_map;
176 static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
177 
178 static const VkLayerProperties global_layer = {
179     "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
180 };
181 
ValidateLayerOrdering(const TCreateInfo & createInfo)182 template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
183     bool foundLayer = false;
184     for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
185         if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
186             foundLayer = true;
187         }
188         // This has to be logged to console as we don't have a callback at this point.
189         if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
190             LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
191                        global_layer.layerName);
192         }
193     }
194 }
195 
196 // Code imported from shader_checker
197 static void build_def_index(shader_module *);
198 
199 // A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
200 // without the caller needing to care too much about the physical SPIRV module layout.
201 struct spirv_inst_iter {
202     std::vector<uint32_t>::const_iterator zero;
203     std::vector<uint32_t>::const_iterator it;
204 
lencore_validation::spirv_inst_iter205     uint32_t len() {
206         auto result = *it >> 16;
207         assert(result > 0);
208         return result;
209     }
210 
opcodecore_validation::spirv_inst_iter211     uint32_t opcode() { return *it & 0x0ffffu; }
212 
wordcore_validation::spirv_inst_iter213     uint32_t const &word(unsigned n) {
214         assert(n < len());
215         return it[n];
216     }
217 
offsetcore_validation::spirv_inst_iter218     uint32_t offset() { return (uint32_t)(it - zero); }
219 
spirv_inst_itercore_validation::spirv_inst_iter220     spirv_inst_iter() {}
221 
spirv_inst_itercore_validation::spirv_inst_iter222     spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
223 
operator ==core_validation::spirv_inst_iter224     bool operator==(spirv_inst_iter const &other) { return it == other.it; }
225 
operator !=core_validation::spirv_inst_iter226     bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
227 
operator ++core_validation::spirv_inst_iter228     spirv_inst_iter operator++(int) { /* x++ */
229         spirv_inst_iter ii = *this;
230         it += len();
231         return ii;
232     }
233 
operator ++core_validation::spirv_inst_iter234     spirv_inst_iter operator++() { /* ++x; */
235         it += len();
236         return *this;
237     }
238 
239     /* The iterator and the value are the same thing. */
operator *core_validation::spirv_inst_iter240     spirv_inst_iter &operator*() { return *this; }
operator *core_validation::spirv_inst_iter241     spirv_inst_iter const &operator*() const { return *this; }
242 };
243 
244 struct shader_module {
245     /* the spirv image itself */
246     vector<uint32_t> words;
247     /* a mapping of <id> to the first word of its def. this is useful because walking type
248      * trees, constant expressions, etc requires jumping all over the instruction stream.
249      */
250     unordered_map<unsigned, unsigned> def_index;
251 
shader_modulecore_validation::shader_module252     shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
253         : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
254           def_index() {
255 
256         build_def_index(this);
257     }
258 
259     /* expose begin() / end() to enable range-based for */
begincore_validation::shader_module260     spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
endcore_validation::shader_module261     spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
262     /* given an offset into the module, produce an iterator there. */
atcore_validation::shader_module263     spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
264 
265     /* gets an iterator to the definition of an id */
get_defcore_validation::shader_module266     spirv_inst_iter get_def(unsigned id) const {
267         auto it = def_index.find(id);
268         if (it == def_index.end()) {
269             return end();
270         }
271         return at(it->second);
272     }
273 };
274 
275 // TODO : This can be much smarter, using separate locks for separate global data
276 static std::mutex global_lock;
277 
278 // Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
getImageViewState(const layer_data * dev_data,VkImageView image_view)279 IMAGE_VIEW_STATE *getImageViewState(const layer_data *dev_data, VkImageView image_view) {
280     auto iv_it = dev_data->imageViewMap.find(image_view);
281     if (iv_it == dev_data->imageViewMap.end()) {
282         return nullptr;
283     }
284     return iv_it->second.get();
285 }
286 // Return sampler node ptr for specified sampler or else NULL
getSamplerState(const layer_data * dev_data,VkSampler sampler)287 SAMPLER_STATE *getSamplerState(const layer_data *dev_data, VkSampler sampler) {
288     auto sampler_it = dev_data->samplerMap.find(sampler);
289     if (sampler_it == dev_data->samplerMap.end()) {
290         return nullptr;
291     }
292     return sampler_it->second.get();
293 }
294 // Return image node ptr for specified image or else NULL
getImageState(const layer_data * dev_data,VkImage image)295 IMAGE_STATE *getImageState(const layer_data *dev_data, VkImage image) {
296     auto img_it = dev_data->imageMap.find(image);
297     if (img_it == dev_data->imageMap.end()) {
298         return nullptr;
299     }
300     return img_it->second.get();
301 }
302 // Return buffer node ptr for specified buffer or else NULL
getBufferNode(const layer_data * dev_data,VkBuffer buffer)303 BUFFER_NODE *getBufferNode(const layer_data *dev_data, VkBuffer buffer) {
304     auto buff_it = dev_data->bufferMap.find(buffer);
305     if (buff_it == dev_data->bufferMap.end()) {
306         return nullptr;
307     }
308     return buff_it->second.get();
309 }
310 // Return swapchain node for specified swapchain or else NULL
getSwapchainNode(const layer_data * dev_data,VkSwapchainKHR swapchain)311 SWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
312     auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
313     if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
314         return nullptr;
315     }
316     return swp_it->second.get();
317 }
318 // Return swapchain for specified image or else NULL
getSwapchainFromImage(const layer_data * dev_data,VkImage image)319 VkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) {
320     auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
321     if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
322         return VK_NULL_HANDLE;
323     }
324     return img_it->second;
325 }
326 // Return buffer node ptr for specified buffer or else NULL
getBufferViewState(const layer_data * my_data,VkBufferView buffer_view)327 BUFFER_VIEW_STATE *getBufferViewState(const layer_data *my_data, VkBufferView buffer_view) {
328     auto bv_it = my_data->bufferViewMap.find(buffer_view);
329     if (bv_it == my_data->bufferViewMap.end()) {
330         return nullptr;
331     }
332     return bv_it->second.get();
333 }
334 
getFenceNode(layer_data * dev_data,VkFence fence)335 FENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) {
336     auto it = dev_data->fenceMap.find(fence);
337     if (it == dev_data->fenceMap.end()) {
338         return nullptr;
339     }
340     return &it->second;
341 }
342 
getEventNode(layer_data * dev_data,VkEvent event)343 EVENT_STATE *getEventNode(layer_data *dev_data, VkEvent event) {
344     auto it = dev_data->eventMap.find(event);
345     if (it == dev_data->eventMap.end()) {
346         return nullptr;
347     }
348     return &it->second;
349 }
350 
getQueryPoolNode(layer_data * dev_data,VkQueryPool query_pool)351 QUERY_POOL_NODE *getQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
352     auto it = dev_data->queryPoolMap.find(query_pool);
353     if (it == dev_data->queryPoolMap.end()) {
354         return nullptr;
355     }
356     return &it->second;
357 }
358 
getQueueNode(layer_data * dev_data,VkQueue queue)359 QUEUE_NODE *getQueueNode(layer_data *dev_data, VkQueue queue) {
360     auto it = dev_data->queueMap.find(queue);
361     if (it == dev_data->queueMap.end()) {
362         return nullptr;
363     }
364     return &it->second;
365 }
366 
getSemaphoreNode(layer_data * dev_data,VkSemaphore semaphore)367 SEMAPHORE_NODE *getSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
368     auto it = dev_data->semaphoreMap.find(semaphore);
369     if (it == dev_data->semaphoreMap.end()) {
370         return nullptr;
371     }
372     return &it->second;
373 }
374 
getCommandPoolNode(layer_data * dev_data,VkCommandPool pool)375 COMMAND_POOL_NODE *getCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
376     auto it = dev_data->commandPoolMap.find(pool);
377     if (it == dev_data->commandPoolMap.end()) {
378         return nullptr;
379     }
380     return &it->second;
381 }
382 
getPhysicalDeviceState(instance_layer_data * instance_data,VkPhysicalDevice phys)383 PHYSICAL_DEVICE_STATE *getPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
384     auto it = instance_data->physical_device_map.find(phys);
385     if (it == instance_data->physical_device_map.end()) {
386         return nullptr;
387     }
388     return &it->second;
389 }
390 
getSurfaceState(instance_layer_data * instance_data,VkSurfaceKHR surface)391 SURFACE_STATE *getSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
392     auto it = instance_data->surface_map.find(surface);
393     if (it == instance_data->surface_map.end()) {
394         return nullptr;
395     }
396     return &it->second;
397 }
398 
399 // Return ptr to memory binding for given handle of specified type
GetObjectMemBinding(layer_data * my_data,uint64_t handle,VkDebugReportObjectTypeEXT type)400 static BINDABLE *GetObjectMemBinding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
401     switch (type) {
402     case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
403         return getImageState(my_data, VkImage(handle));
404     case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
405         return getBufferNode(my_data, VkBuffer(handle));
406     default:
407         break;
408     }
409     return nullptr;
410 }
411 // prototype
412 static GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
413 
414 // Helper function to validate correct usage bits set for buffers or images
415 //  Verify that (actual & desired) flags != 0 or,
416 //   if strict is true, verify that (actual & desired) flags == desired
417 //  In case of error, report it via dbg callbacks
validate_usage_flags(layer_data * my_data,VkFlags actual,VkFlags desired,VkBool32 strict,uint64_t obj_handle,VkDebugReportObjectTypeEXT obj_type,char const * ty_str,char const * func_name,char const * usage_str)418 static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict,
419                                      uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
420                                      char const *func_name, char const *usage_str) {
421     bool correct_usage = false;
422     bool skip_call = false;
423     if (strict)
424         correct_usage = ((actual & desired) == desired);
425     else
426         correct_usage = ((actual & desired) != 0);
427     if (!correct_usage) {
428         skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
429                             MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64
430                                                                 " used by %s. In this case, %s should have %s set during creation.",
431                             ty_str, obj_handle, func_name, ty_str, usage_str);
432     }
433     return skip_call;
434 }
435 
436 // Helper function to validate usage flags for buffers
437 // For given buffer_node send actual vs. desired usage off to helper above where
438 //  an error will be flagged if usage is not correct
ValidateImageUsageFlags(layer_data * dev_data,IMAGE_STATE const * image_state,VkFlags desired,VkBool32 strict,char const * func_name,char const * usage_string)439 static bool ValidateImageUsageFlags(layer_data *dev_data, IMAGE_STATE const *image_state, VkFlags desired, VkBool32 strict,
440                                     char const *func_name, char const *usage_string) {
441     return validate_usage_flags(dev_data, image_state->createInfo.usage, desired, strict,
442                                 reinterpret_cast<const uint64_t &>(image_state->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
443                                 "image", func_name, usage_string);
444 }
445 
446 // Helper function to validate usage flags for buffers
447 // For given buffer_node send actual vs. desired usage off to helper above where
448 //  an error will be flagged if usage is not correct
ValidateBufferUsageFlags(layer_data * dev_data,BUFFER_NODE const * buffer_node,VkFlags desired,VkBool32 strict,char const * func_name,char const * usage_string)449 static bool ValidateBufferUsageFlags(layer_data *dev_data, BUFFER_NODE const *buffer_node, VkFlags desired, VkBool32 strict,
450                                      char const *func_name, char const *usage_string) {
451     return validate_usage_flags(dev_data, buffer_node->createInfo.usage, desired, strict,
452                                 reinterpret_cast<const uint64_t &>(buffer_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
453                                 "buffer", func_name, usage_string);
454 }
455 
456 // Return ptr to info in map container containing mem, or NULL if not found
457 //  Calls to this function should be wrapped in mutex
getMemObjInfo(const layer_data * dev_data,const VkDeviceMemory mem)458 DEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
459     auto mem_it = dev_data->memObjMap.find(mem);
460     if (mem_it == dev_data->memObjMap.end()) {
461         return NULL;
462     }
463     return mem_it->second.get();
464 }
465 
add_mem_obj_info(layer_data * my_data,void * object,const VkDeviceMemory mem,const VkMemoryAllocateInfo * pAllocateInfo)466 static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
467                              const VkMemoryAllocateInfo *pAllocateInfo) {
468     assert(object != NULL);
469 
470     my_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
471 }
472 
473 // Helper function to print lowercase string of object type
474 //  TODO: Unify string helper functions, this should really come out of a string helper if not there already
object_type_to_string(VkDebugReportObjectTypeEXT type)475 static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
476     switch (type) {
477     case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
478         return "image";
479     case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
480         return "buffer";
481     case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT:
482         return "image view";
483     case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT:
484         return "buffer view";
485     case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
486         return "swapchain";
487     case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT:
488         return "descriptor set";
489     case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT:
490         return "framebuffer";
491     case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT:
492         return "event";
493     case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT:
494         return "query pool";
495     case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT:
496         return "descriptor pool";
497     case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT:
498         return "command pool";
499     case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT:
500         return "pipeline";
501     case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT:
502         return "sampler";
503     case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT:
504         return "renderpass";
505     case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT:
506         return "device memory";
507     case VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT:
508         return "semaphore";
509     default:
510         return "unknown";
511     }
512 }
513 
514 // For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
ValidateMemoryIsValid(layer_data * dev_data,VkDeviceMemory mem,uint64_t bound_object_handle,VkDebugReportObjectTypeEXT type,const char * functionName)515 static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle,
516                                   VkDebugReportObjectTypeEXT type, const char *functionName) {
517     DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
518     if (mem_info) {
519         if (!mem_info->bound_ranges[bound_object_handle].valid) {
520             return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
521                            reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
522                            "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
523                            ", please fill the memory before using.",
524                            functionName, reinterpret_cast<uint64_t &>(mem), object_type_to_string(type), bound_object_handle);
525         }
526     }
527     return false;
528 }
529 // For given image_state
530 //  If mem is special swapchain key, then verify that image_state valid member is true
531 //  Else verify that the image's bound memory range is valid
ValidateImageMemoryIsValid(layer_data * dev_data,IMAGE_STATE * image_state,const char * functionName)532 static bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_STATE *image_state, const char *functionName) {
533     if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
534         if (!image_state->valid) {
535             return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
536                            reinterpret_cast<uint64_t &>(image_state->binding.mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
537                            "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
538                            functionName, reinterpret_cast<uint64_t &>(image_state->image));
539         }
540     } else {
541         return ValidateMemoryIsValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image),
542                                      VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, functionName);
543     }
544     return false;
545 }
546 // For given buffer_node, verify that the range it's bound to is valid
ValidateBufferMemoryIsValid(layer_data * dev_data,BUFFER_NODE * buffer_node,const char * functionName)547 static bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_NODE *buffer_node, const char *functionName) {
548     return ValidateMemoryIsValid(dev_data, buffer_node->binding.mem, reinterpret_cast<uint64_t &>(buffer_node->buffer),
549                                  VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, functionName);
550 }
551 // For the given memory allocation, set the range bound by the given handle object to the valid param value
SetMemoryValid(layer_data * dev_data,VkDeviceMemory mem,uint64_t handle,bool valid)552 static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
553     DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
554     if (mem_info) {
555         mem_info->bound_ranges[handle].valid = valid;
556     }
557 }
558 // For given image node
559 //  If mem is special swapchain key, then set entire image_state to valid param value
560 //  Else set the image's bound memory range to valid param value
SetImageMemoryValid(layer_data * dev_data,IMAGE_STATE * image_state,bool valid)561 static void SetImageMemoryValid(layer_data *dev_data, IMAGE_STATE *image_state, bool valid) {
562     if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
563         image_state->valid = valid;
564     } else {
565         SetMemoryValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image), valid);
566     }
567 }
568 // For given buffer node set the buffer's bound memory range to valid param value
SetBufferMemoryValid(layer_data * dev_data,BUFFER_NODE * buffer_node,bool valid)569 static void SetBufferMemoryValid(layer_data *dev_data, BUFFER_NODE *buffer_node, bool valid) {
570     SetMemoryValid(dev_data, buffer_node->binding.mem, reinterpret_cast<uint64_t &>(buffer_node->buffer), valid);
571 }
572 // Find CB Info and add mem reference to list container
573 // Find Mem Obj Info and add CB reference to list container
update_cmd_buf_and_mem_references(layer_data * dev_data,const VkCommandBuffer cb,const VkDeviceMemory mem,const char * apiName)574 static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
575                                               const char *apiName) {
576     bool skip_call = false;
577 
578     // Skip validation if this image was created through WSI
579     if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
580 
581         // First update CB binding in MemObj mini CB list
582         DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
583         if (pMemInfo) {
584             // Now update CBInfo's Mem reference list
585             GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, cb);
586             pMemInfo->cb_bindings.insert(cb_node);
587             // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
588             if (cb_node) {
589                 cb_node->memObjs.insert(mem);
590             }
591         }
592     }
593     return skip_call;
594 }
595 
596 // Create binding link between given sampler and command buffer node
AddCommandBufferBindingSampler(GLOBAL_CB_NODE * cb_node,SAMPLER_STATE * sampler_state)597 void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
598     sampler_state->cb_bindings.insert(cb_node);
599     cb_node->object_bindings.insert(
600         {reinterpret_cast<uint64_t &>(sampler_state->sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT});
601 }
602 
603 // Create binding link between given image node and command buffer node
AddCommandBufferBindingImage(const layer_data * dev_data,GLOBAL_CB_NODE * cb_node,IMAGE_STATE * image_state)604 void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
605     // Skip validation if this image was created through WSI
606     if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
607         // First update CB binding in MemObj mini CB list
608         DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, image_state->binding.mem);
609         if (pMemInfo) {
610             pMemInfo->cb_bindings.insert(cb_node);
611             // Now update CBInfo's Mem reference list
612             cb_node->memObjs.insert(image_state->binding.mem);
613         }
614         // Now update cb binding for image
615         cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(image_state->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT});
616         image_state->cb_bindings.insert(cb_node);
617     }
618 }
619 
620 // Create binding link between given image view node and its image with command buffer node
AddCommandBufferBindingImageView(const layer_data * dev_data,GLOBAL_CB_NODE * cb_node,IMAGE_VIEW_STATE * view_state)621 void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
622     // First add bindings for imageView
623     view_state->cb_bindings.insert(cb_node);
624     cb_node->object_bindings.insert(
625         {reinterpret_cast<uint64_t &>(view_state->image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT});
626     auto image_state = getImageState(dev_data, view_state->create_info.image);
627     // Add bindings for image within imageView
628     if (image_state) {
629         AddCommandBufferBindingImage(dev_data, cb_node, image_state);
630     }
631 }
632 
633 // Create binding link between given buffer node and command buffer node
AddCommandBufferBindingBuffer(const layer_data * dev_data,GLOBAL_CB_NODE * cb_node,BUFFER_NODE * buff_node)634 void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_NODE *buff_node) {
635     // First update CB binding in MemObj mini CB list
636     DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, buff_node->binding.mem);
637     if (pMemInfo) {
638         pMemInfo->cb_bindings.insert(cb_node);
639         // Now update CBInfo's Mem reference list
640         cb_node->memObjs.insert(buff_node->binding.mem);
641     }
642     // Now update cb binding for buffer
643     cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(buff_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
644     buff_node->cb_bindings.insert(cb_node);
645 }
646 
647 // Create binding link between given buffer view node and its buffer with command buffer node
AddCommandBufferBindingBufferView(const layer_data * dev_data,GLOBAL_CB_NODE * cb_node,BUFFER_VIEW_STATE * view_state)648 void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
649     // First add bindings for bufferView
650     view_state->cb_bindings.insert(cb_node);
651     cb_node->object_bindings.insert(
652         {reinterpret_cast<uint64_t &>(view_state->buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT});
653     auto buffer_node = getBufferNode(dev_data, view_state->create_info.buffer);
654     // Add bindings for buffer within bufferView
655     if (buffer_node) {
656         AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_node);
657     }
658 }
659 
660 // For every mem obj bound to particular CB, free bindings related to that CB
clear_cmd_buf_and_mem_references(layer_data * dev_data,GLOBAL_CB_NODE * cb_node)661 static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
662     if (cb_node) {
663         if (cb_node->memObjs.size() > 0) {
664             for (auto mem : cb_node->memObjs) {
665                 DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
666                 if (pInfo) {
667                     pInfo->cb_bindings.erase(cb_node);
668                 }
669             }
670             cb_node->memObjs.clear();
671         }
672         cb_node->validate_functions.clear();
673     }
674 }
675 // Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
clear_cmd_buf_and_mem_references(layer_data * dev_data,const VkCommandBuffer cb)676 static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
677     clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
678 }
679 
680 // Clear a single object binding from given memory object, or report error if binding is missing
ClearMemoryObjectBinding(layer_data * dev_data,uint64_t handle,VkDebugReportObjectTypeEXT type,VkDeviceMemory mem)681 static bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type, VkDeviceMemory mem) {
682     DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
683     // This obj is bound to a memory object. Remove the reference to this object in that memory object's list
684     if (mem_info && !mem_info->obj_bindings.erase({handle, type})) {
685         return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
686                     "MEM", "While trying to clear mem binding for %s obj 0x%" PRIxLEAST64
687                            ", unable to find that object referenced by mem obj 0x%" PRIxLEAST64,
688                     object_type_to_string(type), handle, (uint64_t)mem);
689     }
690     return false;
691 }
692 
693 // ClearMemoryObjectBindings clears the binding of objects to memory
694 //  For the given object it pulls the memory bindings and makes sure that the bindings
695 //  no longer refer to the object being cleared. This occurs when objects are destroyed.
ClearMemoryObjectBindings(layer_data * dev_data,uint64_t handle,VkDebugReportObjectTypeEXT type)696 static bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
697     bool skip = false;
698     BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
699     if (mem_binding) {
700         if (!mem_binding->sparse) {
701             skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
702         } else { // Sparse, clear all bindings
703             for (auto& sparse_mem_binding : mem_binding->sparse_bindings) {
704                 skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
705             }
706         }
707     }
708     return skip;
709 }
710 
711 // For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
VerifyBoundMemoryIsValid(const layer_data * dev_data,VkDeviceMemory mem,uint64_t handle,const char * api_name,const char * type_name)712 bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
713                               const char *type_name) {
714     bool result = false;
715     if (VK_NULL_HANDLE == mem) {
716         result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
717                          __LINE__, MEMTRACK_OBJECT_NOT_BOUND, "MEM",
718                          "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound. Memory should be bound by calling "
719                          "vkBind%sMemory().",
720                          api_name, type_name, handle, type_name);
721     } else if (MEMORY_UNBOUND == mem) {
722         result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
723                          __LINE__, MEMTRACK_OBJECT_NOT_BOUND, "MEM",
724                          "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound and previously bound memory was freed. "
725                          "Memory must not be freed prior to this operation.",
726                          api_name, type_name, handle);
727     }
728     return result;
729 }
730 
731 // Check to see if memory was ever bound to this image
ValidateMemoryIsBoundToImage(const layer_data * dev_data,const IMAGE_STATE * image_state,const char * api_name)732 bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name) {
733     bool result = false;
734     if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
735         result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem, reinterpret_cast<const uint64_t &>(image_state->image),
736                                           api_name, "Image");
737     }
738     return result;
739 }
740 
741 // Check to see if memory was bound to this buffer
ValidateMemoryIsBoundToBuffer(const layer_data * dev_data,const BUFFER_NODE * buffer_node,const char * api_name)742 bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_NODE *buffer_node, const char *api_name) {
743     bool result = false;
744     if (0 == (static_cast<uint32_t>(buffer_node->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
745         result = VerifyBoundMemoryIsValid(dev_data, buffer_node->binding.mem,
746                                           reinterpret_cast<const uint64_t &>(buffer_node->buffer), api_name, "Buffer");
747     }
748     return result;
749 }
750 
751 // SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object
752 // For NULL mem case, output warning
753 // Make sure given object is in global object map
754 //  IF a previous binding existed, output validation error
755 //  Otherwise, add reference from objectInfo to memoryInfo
756 //  Add reference off of objInfo
SetMemBinding(layer_data * dev_data,VkDeviceMemory mem,uint64_t handle,VkDebugReportObjectTypeEXT type,const char * apiName)757 static bool SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VkDebugReportObjectTypeEXT type,
758                           const char *apiName) {
759     bool skip_call = false;
760     // It's an error to bind an object to NULL memory
761     if (mem == VK_NULL_HANDLE) {
762         skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
763                             "MEM", "In %s, attempting to Bind Obj(0x%" PRIxLEAST64 ") to NULL", apiName, handle);
764     } else {
765         BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
766         assert(mem_binding);
767         // TODO : Add check here to make sure object isn't sparse
768         //  VALIDATION_ERROR_00792 for buffers
769         //  VALIDATION_ERROR_00804 for images
770         assert(!mem_binding->sparse);
771         DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
772         if (mem_info) {
773             DEVICE_MEM_INFO *prev_binding = getMemObjInfo(dev_data, mem_binding->binding.mem);
774             if (prev_binding) {
775                 skip_call |=
776                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
777                             reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
778                             "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
779                             ") which has already been bound to mem object 0x%" PRIxLEAST64,
780                             apiName, reinterpret_cast<uint64_t &>(mem), handle, reinterpret_cast<uint64_t &>(prev_binding->mem));
781             } else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
782                 skip_call |=
783                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
784                             reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
785                             "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
786                             ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
787                             "Vulkan so this attempt to bind to new memory is not allowed.",
788                             apiName, reinterpret_cast<uint64_t &>(mem), handle);
789             } else {
790                 mem_info->obj_bindings.insert({handle, type});
791                 // For image objects, make sure default memory state is correctly set
792                 // TODO : What's the best/correct way to handle this?
793                 if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
794                     auto const image_state = getImageState(dev_data, VkImage(handle));
795                     if (image_state) {
796                         VkImageCreateInfo ici = image_state->createInfo;
797                         if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
798                             // TODO::  More memory state transition stuff.
799                         }
800                     }
801                 }
802                 mem_binding->binding.mem = mem;
803             }
804         }
805     }
806     return skip_call;
807 }
808 
809 // For NULL mem case, clear any previous binding Else...
810 // Make sure given object is in its object map
811 //  IF a previous binding existed, update binding
812 //  Add reference from objectInfo to memoryInfo
813 //  Add reference off of object's binding info
814 // Return VK_TRUE if addition is successful, VK_FALSE otherwise
SetSparseMemBinding(layer_data * dev_data,MEM_BINDING binding,uint64_t handle,VkDebugReportObjectTypeEXT type,const char * apiName)815 static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VkDebugReportObjectTypeEXT type,
816                                 const char *apiName) {
817     bool skip_call = VK_FALSE;
818     // Handle NULL case separately, just clear previous binding & decrement reference
819     if (binding.mem == VK_NULL_HANDLE) {
820         // TODO : This should cause the range of the resource to be unbound according to spec
821     } else {
822         BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
823         assert(mem_binding);
824         assert(mem_binding->sparse);
825         DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, binding.mem);
826         if (mem_info) {
827             mem_info->obj_bindings.insert({handle, type});
828             // Need to set mem binding for this object
829             mem_binding->sparse_bindings.insert(binding);
830         }
831     }
832     return skip_call;
833 }
834 
835 // For handle of given object type, return memory binding
get_mem_for_type(layer_data * dev_data,uint64_t handle,VkDebugReportObjectTypeEXT type,VkDeviceMemory * mem)836 static bool get_mem_for_type(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
837     bool skip_call = false;
838     *mem = VK_NULL_HANDLE;
839     switch (type) {
840     case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
841         *mem = getImageState(dev_data, VkImage(handle))->binding.mem;
842         break;
843     case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
844         *mem = getBufferNode(dev_data, VkBuffer(handle))->binding.mem;
845         break;
846     default:
847         assert(0);
848     }
849     if (!*mem) {
850         skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
851                             "MEM", "Trying to get mem binding for %s object 0x%" PRIxLEAST64
852                                    " but binding is NULL. Has memory been bound to this object?",
853                             object_type_to_string(type), handle);
854     }
855     return skip_call;
856 }
857 
858 // Print details of MemObjInfo list
print_mem_list(layer_data * dev_data)859 static void print_mem_list(layer_data *dev_data) {
860     // Early out if info is not requested
861     if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
862         return;
863     }
864 
865     // Just printing each msg individually for now, may want to package these into single large print
866     log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
867             MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
868             dev_data->memObjMap.size());
869     log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
870             MEMTRACK_NONE, "MEM", "=============================");
871 
872     if (dev_data->memObjMap.size() <= 0)
873         return;
874 
875     for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
876         auto mem_info = (*ii).second.get();
877 
878         log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
879                 __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at 0x%p===", (void *)mem_info);
880         log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
881                 __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: 0x%" PRIxLEAST64, (uint64_t)(mem_info->mem));
882         log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
883                 __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: " PRINTF_SIZE_T_SPECIFIER,
884                 mem_info->cb_bindings.size() + mem_info->obj_bindings.size());
885         if (0 != mem_info->alloc_info.allocationSize) {
886             string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&mem_info->alloc_info, "MEM(INFO):         ");
887             log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
888                     __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
889         } else {
890             log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
891                     __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
892         }
893 
894         log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
895                 __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
896                 mem_info->obj_bindings.size());
897         if (mem_info->obj_bindings.size() > 0) {
898             for (auto obj : mem_info->obj_bindings) {
899                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
900                         0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT 0x%" PRIx64, obj.handle);
901             }
902         }
903 
904         log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
905                 __LINE__, MEMTRACK_NONE, "MEM",
906                 "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
907                 mem_info->cb_bindings.size());
908         if (mem_info->cb_bindings.size() > 0) {
909             for (auto cb : mem_info->cb_bindings) {
910                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
911                         0, __LINE__, MEMTRACK_NONE, "MEM", "      VK command buffer 0x%p", cb);
912             }
913         }
914     }
915 }
916 
printCBList(layer_data * my_data)917 static void printCBList(layer_data *my_data) {
918     GLOBAL_CB_NODE *pCBInfo = NULL;
919 
920     // Early out if info is not requested
921     if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
922         return;
923     }
924 
925     log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
926             MEMTRACK_NONE, "MEM", "Details of command buffer list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
927             my_data->commandBufferMap.size());
928     log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
929             MEMTRACK_NONE, "MEM", "==================");
930 
931     if (my_data->commandBufferMap.size() <= 0)
932         return;
933 
934     for (auto &cb_node : my_data->commandBufferMap) {
935         pCBInfo = cb_node.second;
936 
937         log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
938                 __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (0x%p) has command buffer 0x%p", (void *)pCBInfo,
939                 (void *)pCBInfo->commandBuffer);
940 
941         if (pCBInfo->memObjs.size() <= 0)
942             continue;
943         for (auto obj : pCBInfo->memObjs) {
944             log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
945                     __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj 0x%" PRIx64, (uint64_t)obj);
946         }
947     }
948 }
949 
950 // Return a string representation of CMD_TYPE enum
cmdTypeToString(CMD_TYPE cmd)951 static string cmdTypeToString(CMD_TYPE cmd) {
952     switch (cmd) {
953     case CMD_BINDPIPELINE:
954         return "CMD_BINDPIPELINE";
955     case CMD_BINDPIPELINEDELTA:
956         return "CMD_BINDPIPELINEDELTA";
957     case CMD_SETVIEWPORTSTATE:
958         return "CMD_SETVIEWPORTSTATE";
959     case CMD_SETLINEWIDTHSTATE:
960         return "CMD_SETLINEWIDTHSTATE";
961     case CMD_SETDEPTHBIASSTATE:
962         return "CMD_SETDEPTHBIASSTATE";
963     case CMD_SETBLENDSTATE:
964         return "CMD_SETBLENDSTATE";
965     case CMD_SETDEPTHBOUNDSSTATE:
966         return "CMD_SETDEPTHBOUNDSSTATE";
967     case CMD_SETSTENCILREADMASKSTATE:
968         return "CMD_SETSTENCILREADMASKSTATE";
969     case CMD_SETSTENCILWRITEMASKSTATE:
970         return "CMD_SETSTENCILWRITEMASKSTATE";
971     case CMD_SETSTENCILREFERENCESTATE:
972         return "CMD_SETSTENCILREFERENCESTATE";
973     case CMD_BINDDESCRIPTORSETS:
974         return "CMD_BINDDESCRIPTORSETS";
975     case CMD_BINDINDEXBUFFER:
976         return "CMD_BINDINDEXBUFFER";
977     case CMD_BINDVERTEXBUFFER:
978         return "CMD_BINDVERTEXBUFFER";
979     case CMD_DRAW:
980         return "CMD_DRAW";
981     case CMD_DRAWINDEXED:
982         return "CMD_DRAWINDEXED";
983     case CMD_DRAWINDIRECT:
984         return "CMD_DRAWINDIRECT";
985     case CMD_DRAWINDEXEDINDIRECT:
986         return "CMD_DRAWINDEXEDINDIRECT";
987     case CMD_DISPATCH:
988         return "CMD_DISPATCH";
989     case CMD_DISPATCHINDIRECT:
990         return "CMD_DISPATCHINDIRECT";
991     case CMD_COPYBUFFER:
992         return "CMD_COPYBUFFER";
993     case CMD_COPYIMAGE:
994         return "CMD_COPYIMAGE";
995     case CMD_BLITIMAGE:
996         return "CMD_BLITIMAGE";
997     case CMD_COPYBUFFERTOIMAGE:
998         return "CMD_COPYBUFFERTOIMAGE";
999     case CMD_COPYIMAGETOBUFFER:
1000         return "CMD_COPYIMAGETOBUFFER";
1001     case CMD_CLONEIMAGEDATA:
1002         return "CMD_CLONEIMAGEDATA";
1003     case CMD_UPDATEBUFFER:
1004         return "CMD_UPDATEBUFFER";
1005     case CMD_FILLBUFFER:
1006         return "CMD_FILLBUFFER";
1007     case CMD_CLEARCOLORIMAGE:
1008         return "CMD_CLEARCOLORIMAGE";
1009     case CMD_CLEARATTACHMENTS:
1010         return "CMD_CLEARCOLORATTACHMENT";
1011     case CMD_CLEARDEPTHSTENCILIMAGE:
1012         return "CMD_CLEARDEPTHSTENCILIMAGE";
1013     case CMD_RESOLVEIMAGE:
1014         return "CMD_RESOLVEIMAGE";
1015     case CMD_SETEVENT:
1016         return "CMD_SETEVENT";
1017     case CMD_RESETEVENT:
1018         return "CMD_RESETEVENT";
1019     case CMD_WAITEVENTS:
1020         return "CMD_WAITEVENTS";
1021     case CMD_PIPELINEBARRIER:
1022         return "CMD_PIPELINEBARRIER";
1023     case CMD_BEGINQUERY:
1024         return "CMD_BEGINQUERY";
1025     case CMD_ENDQUERY:
1026         return "CMD_ENDQUERY";
1027     case CMD_RESETQUERYPOOL:
1028         return "CMD_RESETQUERYPOOL";
1029     case CMD_COPYQUERYPOOLRESULTS:
1030         return "CMD_COPYQUERYPOOLRESULTS";
1031     case CMD_WRITETIMESTAMP:
1032         return "CMD_WRITETIMESTAMP";
1033     case CMD_INITATOMICCOUNTERS:
1034         return "CMD_INITATOMICCOUNTERS";
1035     case CMD_LOADATOMICCOUNTERS:
1036         return "CMD_LOADATOMICCOUNTERS";
1037     case CMD_SAVEATOMICCOUNTERS:
1038         return "CMD_SAVEATOMICCOUNTERS";
1039     case CMD_BEGINRENDERPASS:
1040         return "CMD_BEGINRENDERPASS";
1041     case CMD_ENDRENDERPASS:
1042         return "CMD_ENDRENDERPASS";
1043     default:
1044         return "UNKNOWN";
1045     }
1046 }
1047 
1048 // SPIRV utility functions
build_def_index(shader_module * module)1049 static void build_def_index(shader_module *module) {
1050     for (auto insn : *module) {
1051         switch (insn.opcode()) {
1052         /* Types */
1053         case spv::OpTypeVoid:
1054         case spv::OpTypeBool:
1055         case spv::OpTypeInt:
1056         case spv::OpTypeFloat:
1057         case spv::OpTypeVector:
1058         case spv::OpTypeMatrix:
1059         case spv::OpTypeImage:
1060         case spv::OpTypeSampler:
1061         case spv::OpTypeSampledImage:
1062         case spv::OpTypeArray:
1063         case spv::OpTypeRuntimeArray:
1064         case spv::OpTypeStruct:
1065         case spv::OpTypeOpaque:
1066         case spv::OpTypePointer:
1067         case spv::OpTypeFunction:
1068         case spv::OpTypeEvent:
1069         case spv::OpTypeDeviceEvent:
1070         case spv::OpTypeReserveId:
1071         case spv::OpTypeQueue:
1072         case spv::OpTypePipe:
1073             module->def_index[insn.word(1)] = insn.offset();
1074             break;
1075 
1076         /* Fixed constants */
1077         case spv::OpConstantTrue:
1078         case spv::OpConstantFalse:
1079         case spv::OpConstant:
1080         case spv::OpConstantComposite:
1081         case spv::OpConstantSampler:
1082         case spv::OpConstantNull:
1083             module->def_index[insn.word(2)] = insn.offset();
1084             break;
1085 
1086         /* Specialization constants */
1087         case spv::OpSpecConstantTrue:
1088         case spv::OpSpecConstantFalse:
1089         case spv::OpSpecConstant:
1090         case spv::OpSpecConstantComposite:
1091         case spv::OpSpecConstantOp:
1092             module->def_index[insn.word(2)] = insn.offset();
1093             break;
1094 
1095         /* Variables */
1096         case spv::OpVariable:
1097             module->def_index[insn.word(2)] = insn.offset();
1098             break;
1099 
1100         /* Functions */
1101         case spv::OpFunction:
1102             module->def_index[insn.word(2)] = insn.offset();
1103             break;
1104 
1105         default:
1106             /* We don't care about any other defs for now. */
1107             break;
1108         }
1109     }
1110 }
1111 
find_entrypoint(shader_module * src,char const * name,VkShaderStageFlagBits stageBits)1112 static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1113     for (auto insn : *src) {
1114         if (insn.opcode() == spv::OpEntryPoint) {
1115             auto entrypointName = (char const *)&insn.word(3);
1116             auto entrypointStageBits = 1u << insn.word(1);
1117 
1118             if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1119                 return insn;
1120             }
1121         }
1122     }
1123 
1124     return src->end();
1125 }
1126 
storage_class_name(unsigned sc)1127 static char const *storage_class_name(unsigned sc) {
1128     switch (sc) {
1129     case spv::StorageClassInput:
1130         return "input";
1131     case spv::StorageClassOutput:
1132         return "output";
1133     case spv::StorageClassUniformConstant:
1134         return "const uniform";
1135     case spv::StorageClassUniform:
1136         return "uniform";
1137     case spv::StorageClassWorkgroup:
1138         return "workgroup local";
1139     case spv::StorageClassCrossWorkgroup:
1140         return "workgroup global";
1141     case spv::StorageClassPrivate:
1142         return "private global";
1143     case spv::StorageClassFunction:
1144         return "function";
1145     case spv::StorageClassGeneric:
1146         return "generic";
1147     case spv::StorageClassAtomicCounter:
1148         return "atomic counter";
1149     case spv::StorageClassImage:
1150         return "image";
1151     case spv::StorageClassPushConstant:
1152         return "push constant";
1153     default:
1154         return "unknown";
1155     }
1156 }
1157 
1158 /* get the value of an integral constant */
get_constant_value(shader_module const * src,unsigned id)1159 unsigned get_constant_value(shader_module const *src, unsigned id) {
1160     auto value = src->get_def(id);
1161     assert(value != src->end());
1162 
1163     if (value.opcode() != spv::OpConstant) {
1164         /* TODO: Either ensure that the specialization transform is already performed on a module we're
1165             considering here, OR -- specialize on the fly now.
1166             */
1167         return 1;
1168     }
1169 
1170     return value.word(3);
1171 }
1172 
1173 
describe_type_inner(std::ostringstream & ss,shader_module const * src,unsigned type)1174 static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1175     auto insn = src->get_def(type);
1176     assert(insn != src->end());
1177 
1178     switch (insn.opcode()) {
1179     case spv::OpTypeBool:
1180         ss << "bool";
1181         break;
1182     case spv::OpTypeInt:
1183         ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1184         break;
1185     case spv::OpTypeFloat:
1186         ss << "float" << insn.word(2);
1187         break;
1188     case spv::OpTypeVector:
1189         ss << "vec" << insn.word(3) << " of ";
1190         describe_type_inner(ss, src, insn.word(2));
1191         break;
1192     case spv::OpTypeMatrix:
1193         ss << "mat" << insn.word(3) << " of ";
1194         describe_type_inner(ss, src, insn.word(2));
1195         break;
1196     case spv::OpTypeArray:
1197         ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1198         describe_type_inner(ss, src, insn.word(2));
1199         break;
1200     case spv::OpTypePointer:
1201         ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1202         describe_type_inner(ss, src, insn.word(3));
1203         break;
1204     case spv::OpTypeStruct: {
1205         ss << "struct of (";
1206         for (unsigned i = 2; i < insn.len(); i++) {
1207             describe_type_inner(ss, src, insn.word(i));
1208             if (i == insn.len() - 1) {
1209                 ss << ")";
1210             } else {
1211                 ss << ", ";
1212             }
1213         }
1214         break;
1215     }
1216     case spv::OpTypeSampler:
1217         ss << "sampler";
1218         break;
1219     case spv::OpTypeSampledImage:
1220         ss << "sampler+";
1221         describe_type_inner(ss, src, insn.word(2));
1222         break;
1223     case spv::OpTypeImage:
1224         ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1225         break;
1226     default:
1227         ss << "oddtype";
1228         break;
1229     }
1230 }
1231 
1232 
describe_type(shader_module const * src,unsigned type)1233 static std::string describe_type(shader_module const *src, unsigned type) {
1234     std::ostringstream ss;
1235     describe_type_inner(ss, src, type);
1236     return ss.str();
1237 }
1238 
1239 
is_narrow_numeric_type(spirv_inst_iter type)1240 static bool is_narrow_numeric_type(spirv_inst_iter type)
1241 {
1242     if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1243         return false;
1244     return type.word(2) < 64;
1245 }
1246 
1247 
types_match(shader_module const * a,shader_module const * b,unsigned a_type,unsigned b_type,bool a_arrayed,bool b_arrayed,bool relaxed)1248 static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
1249     /* walk two type trees together, and complain about differences */
1250     auto a_insn = a->get_def(a_type);
1251     auto b_insn = b->get_def(b_type);
1252     assert(a_insn != a->end());
1253     assert(b_insn != b->end());
1254 
1255     if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1256         return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1257     }
1258 
1259     if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1260         /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1261         return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1262     }
1263 
1264     if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1265         return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1266     }
1267 
1268     if (a_insn.opcode() != b_insn.opcode()) {
1269         return false;
1270     }
1271 
1272     if (a_insn.opcode() == spv::OpTypePointer) {
1273         /* match on pointee type. storage class is expected to differ */
1274         return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1275     }
1276 
1277     if (a_arrayed || b_arrayed) {
1278         /* if we havent resolved array-of-verts by here, we're not going to. */
1279         return false;
1280     }
1281 
1282     switch (a_insn.opcode()) {
1283     case spv::OpTypeBool:
1284         return true;
1285     case spv::OpTypeInt:
1286         /* match on width, signedness */
1287         return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1288     case spv::OpTypeFloat:
1289         /* match on width */
1290         return a_insn.word(2) == b_insn.word(2);
1291     case spv::OpTypeVector:
1292         /* match on element type, count. */
1293         if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1294             return false;
1295         if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1296             return a_insn.word(3) >= b_insn.word(3);
1297         }
1298         else {
1299             return a_insn.word(3) == b_insn.word(3);
1300         }
1301     case spv::OpTypeMatrix:
1302         /* match on element type, count. */
1303         return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
1304     case spv::OpTypeArray:
1305         /* match on element type, count. these all have the same layout. we don't get here if
1306          * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1307          * not a literal within OpTypeArray */
1308         return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1309                get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1310     case spv::OpTypeStruct:
1311         /* match on all element types */
1312         {
1313             if (a_insn.len() != b_insn.len()) {
1314                 return false; /* structs cannot match if member counts differ */
1315             }
1316 
1317             for (unsigned i = 2; i < a_insn.len(); i++) {
1318                 if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1319                     return false;
1320                 }
1321             }
1322 
1323             return true;
1324         }
1325     default:
1326         /* remaining types are CLisms, or may not appear in the interfaces we
1327          * are interested in. Just claim no match.
1328          */
1329         return false;
1330     }
1331 }
1332 
value_or_default(std::unordered_map<unsigned,unsigned> const & map,unsigned id,int def)1333 static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1334     auto it = map.find(id);
1335     if (it == map.end())
1336         return def;
1337     else
1338         return it->second;
1339 }
1340 
get_locations_consumed_by_type(shader_module const * src,unsigned type,bool strip_array_level)1341 static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1342     auto insn = src->get_def(type);
1343     assert(insn != src->end());
1344 
1345     switch (insn.opcode()) {
1346     case spv::OpTypePointer:
1347         /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1348          * we're never actually passing pointers around. */
1349         return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1350     case spv::OpTypeArray:
1351         if (strip_array_level) {
1352             return get_locations_consumed_by_type(src, insn.word(2), false);
1353         } else {
1354             return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1355         }
1356     case spv::OpTypeMatrix:
1357         /* num locations is the dimension * element size */
1358         return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1359     case spv::OpTypeVector: {
1360         auto scalar_type = src->get_def(insn.word(2));
1361         auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ?
1362             scalar_type.word(2) : 32;
1363 
1364         /* locations are 128-bit wide; 3- and 4-component vectors of 64 bit
1365          * types require two. */
1366         return (bit_width * insn.word(3) + 127) / 128;
1367     }
1368     default:
1369         /* everything else is just 1. */
1370         return 1;
1371 
1372         /* TODO: extend to handle 64bit scalar types, whose vectors may need
1373          * multiple locations. */
1374     }
1375 }
1376 
get_locations_consumed_by_format(VkFormat format)1377 static unsigned get_locations_consumed_by_format(VkFormat format) {
1378     switch (format) {
1379     case VK_FORMAT_R64G64B64A64_SFLOAT:
1380     case VK_FORMAT_R64G64B64A64_SINT:
1381     case VK_FORMAT_R64G64B64A64_UINT:
1382     case VK_FORMAT_R64G64B64_SFLOAT:
1383     case VK_FORMAT_R64G64B64_SINT:
1384     case VK_FORMAT_R64G64B64_UINT:
1385         return 2;
1386     default:
1387         return 1;
1388     }
1389 }
1390 
1391 typedef std::pair<unsigned, unsigned> location_t;
1392 typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1393 
1394 struct interface_var {
1395     uint32_t id;
1396     uint32_t type_id;
1397     uint32_t offset;
1398     bool is_patch;
1399     bool is_block_member;
1400     /* TODO: collect the name, too? Isn't required to be present. */
1401 };
1402 
1403 struct shader_stage_attributes {
1404     char const *const name;
1405     bool arrayed_input;
1406     bool arrayed_output;
1407 };
1408 
1409 static shader_stage_attributes shader_stage_attribs[] = {
1410     {"vertex shader", false, false},
1411     {"tessellation control shader", true, true},
1412     {"tessellation evaluation shader", true, false},
1413     {"geometry shader", true, false},
1414     {"fragment shader", false, false},
1415 };
1416 
get_struct_type(shader_module const * src,spirv_inst_iter def,bool is_array_of_verts)1417 static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1418     while (true) {
1419 
1420         if (def.opcode() == spv::OpTypePointer) {
1421             def = src->get_def(def.word(3));
1422         } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1423             def = src->get_def(def.word(2));
1424             is_array_of_verts = false;
1425         } else if (def.opcode() == spv::OpTypeStruct) {
1426             return def;
1427         } else {
1428             return src->end();
1429         }
1430     }
1431 }
1432 
collect_interface_block_members(shader_module const * src,std::map<location_t,interface_var> * out,std::unordered_map<unsigned,unsigned> const & blocks,bool is_array_of_verts,uint32_t id,uint32_t type_id,bool is_patch)1433 static void collect_interface_block_members(shader_module const *src,
1434                                             std::map<location_t, interface_var> *out,
1435                                             std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1436                                             uint32_t id, uint32_t type_id, bool is_patch) {
1437     /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1438     auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1439     if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1440         /* this isn't an interface block. */
1441         return;
1442     }
1443 
1444     std::unordered_map<unsigned, unsigned> member_components;
1445 
1446     /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1447     for (auto insn : *src) {
1448         if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1449             unsigned member_index = insn.word(2);
1450 
1451             if (insn.word(3) == spv::DecorationComponent) {
1452                 unsigned component = insn.word(4);
1453                 member_components[member_index] = component;
1454             }
1455         }
1456     }
1457 
1458     /* Second pass -- produce the output, from Location decorations */
1459     for (auto insn : *src) {
1460         if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1461             unsigned member_index = insn.word(2);
1462             unsigned member_type_id = type.word(2 + member_index);
1463 
1464             if (insn.word(3) == spv::DecorationLocation) {
1465                 unsigned location = insn.word(4);
1466                 unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1467                 auto component_it = member_components.find(member_index);
1468                 unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1469 
1470                 for (unsigned int offset = 0; offset < num_locations; offset++) {
1471                     interface_var v;
1472                     v.id = id;
1473                     /* TODO: member index in interface_var too? */
1474                     v.type_id = member_type_id;
1475                     v.offset = offset;
1476                     v.is_patch = is_patch;
1477                     v.is_block_member = true;
1478                     (*out)[std::make_pair(location + offset, component)] = v;
1479                 }
1480             }
1481         }
1482     }
1483 }
1484 
collect_interface_by_location(shader_module const * src,spirv_inst_iter entrypoint,spv::StorageClass sinterface,bool is_array_of_verts)1485 static std::map<location_t, interface_var> collect_interface_by_location(
1486         shader_module const *src, spirv_inst_iter entrypoint,
1487         spv::StorageClass sinterface, bool is_array_of_verts) {
1488 
1489     std::unordered_map<unsigned, unsigned> var_locations;
1490     std::unordered_map<unsigned, unsigned> var_builtins;
1491     std::unordered_map<unsigned, unsigned> var_components;
1492     std::unordered_map<unsigned, unsigned> blocks;
1493     std::unordered_map<unsigned, unsigned> var_patch;
1494 
1495     for (auto insn : *src) {
1496 
1497         /* We consider two interface models: SSO rendezvous-by-location, and
1498          * builtins. Complain about anything that fits neither model.
1499          */
1500         if (insn.opcode() == spv::OpDecorate) {
1501             if (insn.word(2) == spv::DecorationLocation) {
1502                 var_locations[insn.word(1)] = insn.word(3);
1503             }
1504 
1505             if (insn.word(2) == spv::DecorationBuiltIn) {
1506                 var_builtins[insn.word(1)] = insn.word(3);
1507             }
1508 
1509             if (insn.word(2) == spv::DecorationComponent) {
1510                 var_components[insn.word(1)] = insn.word(3);
1511             }
1512 
1513             if (insn.word(2) == spv::DecorationBlock) {
1514                 blocks[insn.word(1)] = 1;
1515             }
1516 
1517             if (insn.word(2) == spv::DecorationPatch) {
1518                 var_patch[insn.word(1)] = 1;
1519             }
1520         }
1521     }
1522 
1523     /* TODO: handle grouped decorations */
1524     /* TODO: handle index=1 dual source outputs from FS -- two vars will
1525      * have the same location, and we DON'T want to clobber. */
1526 
1527     /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1528        terminator, to fill out the rest of the word - so we only need to look at the last byte in
1529        the word to determine which word contains the terminator. */
1530     uint32_t word = 3;
1531     while (entrypoint.word(word) & 0xff000000u) {
1532         ++word;
1533     }
1534     ++word;
1535 
1536     std::map<location_t, interface_var> out;
1537 
1538     for (; word < entrypoint.len(); word++) {
1539         auto insn = src->get_def(entrypoint.word(word));
1540         assert(insn != src->end());
1541         assert(insn.opcode() == spv::OpVariable);
1542 
1543         if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1544             unsigned id = insn.word(2);
1545             unsigned type = insn.word(1);
1546 
1547             int location = value_or_default(var_locations, id, -1);
1548             int builtin = value_or_default(var_builtins, id, -1);
1549             unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1550             bool is_patch = var_patch.find(id) != var_patch.end();
1551 
1552             /* All variables and interface block members in the Input or Output storage classes
1553              * must be decorated with either a builtin or an explicit location.
1554              *
1555              * TODO: integrate the interface block support here. For now, don't complain --
1556              * a valid SPIRV module will only hit this path for the interface block case, as the
1557              * individual members of the type are decorated, rather than variable declarations.
1558              */
1559 
1560             if (location != -1) {
1561                 /* A user-defined interface variable, with a location. Where a variable
1562                  * occupied multiple locations, emit one result for each. */
1563                 unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1564                 for (unsigned int offset = 0; offset < num_locations; offset++) {
1565                     interface_var v;
1566                     v.id = id;
1567                     v.type_id = type;
1568                     v.offset = offset;
1569                     v.is_patch = is_patch;
1570                     v.is_block_member = false;
1571                     out[std::make_pair(location + offset, component)] = v;
1572                 }
1573             } else if (builtin == -1) {
1574                 /* An interface block instance */
1575                 collect_interface_block_members(src, &out, blocks, is_array_of_verts, id, type, is_patch);
1576             }
1577         }
1578     }
1579 
1580     return out;
1581 }
1582 
collect_interface_by_input_attachment_index(debug_report_data * report_data,shader_module const * src,std::unordered_set<uint32_t> const & accessible_ids)1583 static std::vector<std::pair<uint32_t, interface_var>> collect_interface_by_input_attachment_index(
1584         debug_report_data *report_data, shader_module const *src,
1585         std::unordered_set<uint32_t> const &accessible_ids) {
1586 
1587     std::vector<std::pair<uint32_t, interface_var>> out;
1588 
1589     for (auto insn : *src) {
1590         if (insn.opcode() == spv::OpDecorate) {
1591             if (insn.word(2) == spv::DecorationInputAttachmentIndex) {
1592                 auto attachment_index = insn.word(3);
1593                 auto id = insn.word(1);
1594 
1595                 if (accessible_ids.count(id)) {
1596                     auto def = src->get_def(id);
1597                     assert(def != src->end());
1598 
1599                     if (def.opcode() == spv::OpVariable && insn.word(3) == spv::StorageClassUniformConstant) {
1600                         auto num_locations = get_locations_consumed_by_type(src, def.word(1), false);
1601                         for (unsigned int offset = 0; offset < num_locations; offset++) {
1602                             interface_var v;
1603                             v.id = id;
1604                             v.type_id = def.word(1);
1605                             v.offset = offset;
1606                             v.is_patch = false;
1607                             v.is_block_member = false;
1608                             out.emplace_back(attachment_index + offset, v);
1609                         }
1610                     }
1611                 }
1612             }
1613         }
1614     }
1615 
1616     return out;
1617 }
1618 
collect_interface_by_descriptor_slot(debug_report_data * report_data,shader_module const * src,std::unordered_set<uint32_t> const & accessible_ids)1619 static std::vector<std::pair<descriptor_slot_t, interface_var>> collect_interface_by_descriptor_slot(
1620         debug_report_data *report_data, shader_module const *src,
1621         std::unordered_set<uint32_t> const &accessible_ids) {
1622 
1623     std::unordered_map<unsigned, unsigned> var_sets;
1624     std::unordered_map<unsigned, unsigned> var_bindings;
1625 
1626     for (auto insn : *src) {
1627         /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1628          * DecorationDescriptorSet and DecorationBinding.
1629          */
1630         if (insn.opcode() == spv::OpDecorate) {
1631             if (insn.word(2) == spv::DecorationDescriptorSet) {
1632                 var_sets[insn.word(1)] = insn.word(3);
1633             }
1634 
1635             if (insn.word(2) == spv::DecorationBinding) {
1636                 var_bindings[insn.word(1)] = insn.word(3);
1637             }
1638         }
1639     }
1640 
1641     std::vector<std::pair<descriptor_slot_t, interface_var>> out;
1642 
1643     for (auto id : accessible_ids) {
1644         auto insn = src->get_def(id);
1645         assert(insn != src->end());
1646 
1647         if (insn.opcode() == spv::OpVariable &&
1648             (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1649             unsigned set = value_or_default(var_sets, insn.word(2), 0);
1650             unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1651 
1652             interface_var v;
1653             v.id = insn.word(2);
1654             v.type_id = insn.word(1);
1655             v.offset = 0;
1656             v.is_patch = false;
1657             v.is_block_member = false;
1658             out.emplace_back(std::make_pair(set, binding), v);
1659         }
1660     }
1661 
1662     return out;
1663 }
1664 
validate_interface_between_stages(debug_report_data * report_data,shader_module const * producer,spirv_inst_iter producer_entrypoint,shader_stage_attributes const * producer_stage,shader_module const * consumer,spirv_inst_iter consumer_entrypoint,shader_stage_attributes const * consumer_stage)1665 static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
1666                                               spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1667                                               shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1668                                               shader_stage_attributes const *consumer_stage) {
1669     bool pass = true;
1670 
1671     auto outputs = collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, producer_stage->arrayed_output);
1672     auto inputs = collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, consumer_stage->arrayed_input);
1673 
1674     auto a_it = outputs.begin();
1675     auto b_it = inputs.begin();
1676 
1677     /* maps sorted by key (location); walk them together to find mismatches */
1678     while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1679         bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1680         bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1681         auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1682         auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1683 
1684         if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1685             if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1686                         __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1687                         "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1688                         a_first.second, consumer_stage->name)) {
1689                 pass = false;
1690             }
1691             a_it++;
1692         } else if (a_at_end || a_first > b_first) {
1693             if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1694                         __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1695                         "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1696                         producer_stage->name)) {
1697                 pass = false;
1698             }
1699             b_it++;
1700         } else {
1701             // subtleties of arrayed interfaces:
1702             // - if is_patch, then the member is not arrayed, even though the interface may be.
1703             // - if is_block_member, then the extra array level of an arrayed interface is not
1704             //   expressed in the member type -- it's expressed in the block type.
1705             if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1706                              producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1707                              consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member,
1708                              true)) {
1709                 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1710                             __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1711                             a_first.first, a_first.second,
1712                             describe_type(producer, a_it->second.type_id).c_str(),
1713                             describe_type(consumer, b_it->second.type_id).c_str())) {
1714                     pass = false;
1715                 }
1716             }
1717             if (a_it->second.is_patch != b_it->second.is_patch) {
1718                 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1719                             __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1720                             "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1721                             "per-%s in %s stage", a_first.first, a_first.second,
1722                             a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1723                             b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1724                     pass = false;
1725                 }
1726             }
1727             a_it++;
1728             b_it++;
1729         }
1730     }
1731 
1732     return pass;
1733 }
1734 
1735 enum FORMAT_TYPE {
1736     FORMAT_TYPE_UNDEFINED,
1737     FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1738     FORMAT_TYPE_SINT,
1739     FORMAT_TYPE_UINT,
1740 };
1741 
get_format_type(VkFormat fmt)1742 static unsigned get_format_type(VkFormat fmt) {
1743     switch (fmt) {
1744     case VK_FORMAT_UNDEFINED:
1745         return FORMAT_TYPE_UNDEFINED;
1746     case VK_FORMAT_R8_SINT:
1747     case VK_FORMAT_R8G8_SINT:
1748     case VK_FORMAT_R8G8B8_SINT:
1749     case VK_FORMAT_R8G8B8A8_SINT:
1750     case VK_FORMAT_R16_SINT:
1751     case VK_FORMAT_R16G16_SINT:
1752     case VK_FORMAT_R16G16B16_SINT:
1753     case VK_FORMAT_R16G16B16A16_SINT:
1754     case VK_FORMAT_R32_SINT:
1755     case VK_FORMAT_R32G32_SINT:
1756     case VK_FORMAT_R32G32B32_SINT:
1757     case VK_FORMAT_R32G32B32A32_SINT:
1758     case VK_FORMAT_R64_SINT:
1759     case VK_FORMAT_R64G64_SINT:
1760     case VK_FORMAT_R64G64B64_SINT:
1761     case VK_FORMAT_R64G64B64A64_SINT:
1762     case VK_FORMAT_B8G8R8_SINT:
1763     case VK_FORMAT_B8G8R8A8_SINT:
1764     case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1765     case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1766     case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1767         return FORMAT_TYPE_SINT;
1768     case VK_FORMAT_R8_UINT:
1769     case VK_FORMAT_R8G8_UINT:
1770     case VK_FORMAT_R8G8B8_UINT:
1771     case VK_FORMAT_R8G8B8A8_UINT:
1772     case VK_FORMAT_R16_UINT:
1773     case VK_FORMAT_R16G16_UINT:
1774     case VK_FORMAT_R16G16B16_UINT:
1775     case VK_FORMAT_R16G16B16A16_UINT:
1776     case VK_FORMAT_R32_UINT:
1777     case VK_FORMAT_R32G32_UINT:
1778     case VK_FORMAT_R32G32B32_UINT:
1779     case VK_FORMAT_R32G32B32A32_UINT:
1780     case VK_FORMAT_R64_UINT:
1781     case VK_FORMAT_R64G64_UINT:
1782     case VK_FORMAT_R64G64B64_UINT:
1783     case VK_FORMAT_R64G64B64A64_UINT:
1784     case VK_FORMAT_B8G8R8_UINT:
1785     case VK_FORMAT_B8G8R8A8_UINT:
1786     case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1787     case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1788     case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1789         return FORMAT_TYPE_UINT;
1790     default:
1791         return FORMAT_TYPE_FLOAT;
1792     }
1793 }
1794 
1795 /* characterizes a SPIR-V type appearing in an interface to a FF stage,
1796  * for comparison to a VkFormat's characterization above. */
get_fundamental_type(shader_module const * src,unsigned type)1797 static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1798     auto insn = src->get_def(type);
1799     assert(insn != src->end());
1800 
1801     switch (insn.opcode()) {
1802     case spv::OpTypeInt:
1803         return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1804     case spv::OpTypeFloat:
1805         return FORMAT_TYPE_FLOAT;
1806     case spv::OpTypeVector:
1807         return get_fundamental_type(src, insn.word(2));
1808     case spv::OpTypeMatrix:
1809         return get_fundamental_type(src, insn.word(2));
1810     case spv::OpTypeArray:
1811         return get_fundamental_type(src, insn.word(2));
1812     case spv::OpTypePointer:
1813         return get_fundamental_type(src, insn.word(3));
1814     case spv::OpTypeImage:
1815         return get_fundamental_type(src, insn.word(2));
1816 
1817     default:
1818         return FORMAT_TYPE_UNDEFINED;
1819     }
1820 }
1821 
get_shader_stage_id(VkShaderStageFlagBits stage)1822 static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1823     uint32_t bit_pos = u_ffs(stage);
1824     return bit_pos - 1;
1825 }
1826 
validate_vi_consistency(debug_report_data * report_data,VkPipelineVertexInputStateCreateInfo const * vi)1827 static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1828     /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1829      * each binding should be specified only once.
1830      */
1831     std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1832     bool pass = true;
1833 
1834     for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1835         auto desc = &vi->pVertexBindingDescriptions[i];
1836         auto &binding = bindings[desc->binding];
1837         if (binding) {
1838             if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1839                         __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1840                         "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1841                 pass = false;
1842             }
1843         } else {
1844             binding = desc;
1845         }
1846     }
1847 
1848     return pass;
1849 }
1850 
validate_vi_against_vs_inputs(debug_report_data * report_data,VkPipelineVertexInputStateCreateInfo const * vi,shader_module const * vs,spirv_inst_iter entrypoint)1851 static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
1852                                           shader_module const *vs, spirv_inst_iter entrypoint) {
1853     bool pass = true;
1854 
1855     auto inputs = collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, false);
1856 
1857     /* Build index by location */
1858     std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1859     if (vi) {
1860         for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1861             auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1862             for (auto j = 0u; j < num_locations; j++) {
1863                 attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1864             }
1865         }
1866     }
1867 
1868     auto it_a = attribs.begin();
1869     auto it_b = inputs.begin();
1870     bool used = false;
1871 
1872     while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1873         bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1874         bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1875         auto a_first = a_at_end ? 0 : it_a->first;
1876         auto b_first = b_at_end ? 0 : it_b->first.first;
1877         if (!a_at_end && (b_at_end || a_first < b_first)) {
1878             if (!used && log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1879                         __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1880                         "Vertex attribute at location %d not consumed by vertex shader", a_first)) {
1881                 pass = false;
1882             }
1883             used = false;
1884             it_a++;
1885         } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1886             if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1887                         __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Vertex shader consumes input at location %d but not provided",
1888                         b_first)) {
1889                 pass = false;
1890             }
1891             it_b++;
1892         } else {
1893             unsigned attrib_type = get_format_type(it_a->second->format);
1894             unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1895 
1896             /* type checking */
1897             if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1898                 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1899                             __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1900                             "Attribute type of `%s` at location %d does not match vertex shader input type of `%s`",
1901                             string_VkFormat(it_a->second->format), a_first,
1902                             describe_type(vs, it_b->second.type_id).c_str())) {
1903                     pass = false;
1904                 }
1905             }
1906 
1907             /* OK! */
1908             used = true;
1909             it_b++;
1910         }
1911     }
1912 
1913     return pass;
1914 }
1915 
validate_fs_outputs_against_render_pass(debug_report_data * report_data,shader_module const * fs,spirv_inst_iter entrypoint,VkRenderPassCreateInfo const * rpci,uint32_t subpass_index)1916 static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
1917                                                     spirv_inst_iter entrypoint, VkRenderPassCreateInfo const *rpci,
1918                                                     uint32_t subpass_index) {
1919     std::map<uint32_t, VkFormat> color_attachments;
1920     auto subpass = rpci->pSubpasses[subpass_index];
1921     for (auto i = 0u; i < subpass.colorAttachmentCount; ++i) {
1922         uint32_t attachment = subpass.pColorAttachments[i].attachment;
1923         if (attachment == VK_ATTACHMENT_UNUSED)
1924             continue;
1925         if (rpci->pAttachments[attachment].format != VK_FORMAT_UNDEFINED) {
1926             color_attachments[i] = rpci->pAttachments[attachment].format;
1927         }
1928     }
1929 
1930     bool pass = true;
1931 
1932     /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1933 
1934     auto outputs = collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, false);
1935 
1936     auto it_a = outputs.begin();
1937     auto it_b = color_attachments.begin();
1938 
1939     /* Walk attachment list and outputs together */
1940 
1941     while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
1942         bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
1943         bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
1944 
1945         if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
1946             if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1947                         __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1948                         "fragment shader writes to output location %d with no matching attachment", it_a->first.first)) {
1949                 pass = false;
1950             }
1951             it_a++;
1952         } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
1953             if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1954                         __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by fragment shader",
1955                         it_b->first)) {
1956                 pass = false;
1957             }
1958             it_b++;
1959         } else {
1960             unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
1961             unsigned att_type = get_format_type(it_b->second);
1962 
1963             /* type checking */
1964             if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1965                 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1966                             __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1967                             "Attachment %d of type `%s` does not match fragment shader output type of `%s`", it_b->first,
1968                             string_VkFormat(it_b->second),
1969                             describe_type(fs, it_a->second.type_id).c_str())) {
1970                     pass = false;
1971                 }
1972             }
1973 
1974             /* OK! */
1975             it_a++;
1976             it_b++;
1977         }
1978     }
1979 
1980     return pass;
1981 }
1982 
1983 /* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1984  * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1985  * for example.
1986  * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1987  *  - NOT the shader input/output interfaces.
1988  *
1989  * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1990  * converting parts of this to be generated from the machine-readable spec instead.
1991  */
mark_accessible_ids(shader_module const * src,spirv_inst_iter entrypoint)1992 static std::unordered_set<uint32_t> mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint) {
1993     std::unordered_set<uint32_t> ids;
1994     std::unordered_set<uint32_t> worklist;
1995     worklist.insert(entrypoint.word(2));
1996 
1997     while (!worklist.empty()) {
1998         auto id_iter = worklist.begin();
1999         auto id = *id_iter;
2000         worklist.erase(id_iter);
2001 
2002         auto insn = src->get_def(id);
2003         if (insn == src->end()) {
2004             /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble
2005              * across all kinds of things here that we may not care about. */
2006             continue;
2007         }
2008 
2009         /* try to add to the output set */
2010         if (!ids.insert(id).second) {
2011             continue; /* if we already saw this id, we don't want to walk it again. */
2012         }
2013 
2014         switch (insn.opcode()) {
2015         case spv::OpFunction:
2016             /* scan whole body of the function, enlisting anything interesting */
2017             while (++insn, insn.opcode() != spv::OpFunctionEnd) {
2018                 switch (insn.opcode()) {
2019                 case spv::OpLoad:
2020                 case spv::OpAtomicLoad:
2021                 case spv::OpAtomicExchange:
2022                 case spv::OpAtomicCompareExchange:
2023                 case spv::OpAtomicCompareExchangeWeak:
2024                 case spv::OpAtomicIIncrement:
2025                 case spv::OpAtomicIDecrement:
2026                 case spv::OpAtomicIAdd:
2027                 case spv::OpAtomicISub:
2028                 case spv::OpAtomicSMin:
2029                 case spv::OpAtomicUMin:
2030                 case spv::OpAtomicSMax:
2031                 case spv::OpAtomicUMax:
2032                 case spv::OpAtomicAnd:
2033                 case spv::OpAtomicOr:
2034                 case spv::OpAtomicXor:
2035                     worklist.insert(insn.word(3)); /* ptr */
2036                     break;
2037                 case spv::OpStore:
2038                 case spv::OpAtomicStore:
2039                     worklist.insert(insn.word(1)); /* ptr */
2040                     break;
2041                 case spv::OpAccessChain:
2042                 case spv::OpInBoundsAccessChain:
2043                     worklist.insert(insn.word(3)); /* base ptr */
2044                     break;
2045                 case spv::OpSampledImage:
2046                 case spv::OpImageSampleImplicitLod:
2047                 case spv::OpImageSampleExplicitLod:
2048                 case spv::OpImageSampleDrefImplicitLod:
2049                 case spv::OpImageSampleDrefExplicitLod:
2050                 case spv::OpImageSampleProjImplicitLod:
2051                 case spv::OpImageSampleProjExplicitLod:
2052                 case spv::OpImageSampleProjDrefImplicitLod:
2053                 case spv::OpImageSampleProjDrefExplicitLod:
2054                 case spv::OpImageFetch:
2055                 case spv::OpImageGather:
2056                 case spv::OpImageDrefGather:
2057                 case spv::OpImageRead:
2058                 case spv::OpImage:
2059                 case spv::OpImageQueryFormat:
2060                 case spv::OpImageQueryOrder:
2061                 case spv::OpImageQuerySizeLod:
2062                 case spv::OpImageQuerySize:
2063                 case spv::OpImageQueryLod:
2064                 case spv::OpImageQueryLevels:
2065                 case spv::OpImageQuerySamples:
2066                 case spv::OpImageSparseSampleImplicitLod:
2067                 case spv::OpImageSparseSampleExplicitLod:
2068                 case spv::OpImageSparseSampleDrefImplicitLod:
2069                 case spv::OpImageSparseSampleDrefExplicitLod:
2070                 case spv::OpImageSparseSampleProjImplicitLod:
2071                 case spv::OpImageSparseSampleProjExplicitLod:
2072                 case spv::OpImageSparseSampleProjDrefImplicitLod:
2073                 case spv::OpImageSparseSampleProjDrefExplicitLod:
2074                 case spv::OpImageSparseFetch:
2075                 case spv::OpImageSparseGather:
2076                 case spv::OpImageSparseDrefGather:
2077                 case spv::OpImageTexelPointer:
2078                     worklist.insert(insn.word(3)); /* image or sampled image */
2079                     break;
2080                 case spv::OpImageWrite:
2081                     worklist.insert(insn.word(1)); /* image -- different operand order to above */
2082                     break;
2083                 case spv::OpFunctionCall:
2084                     for (uint32_t i = 3; i < insn.len(); i++) {
2085                         worklist.insert(insn.word(i)); /* fn itself, and all args */
2086                     }
2087                     break;
2088 
2089                 case spv::OpExtInst:
2090                     for (uint32_t i = 5; i < insn.len(); i++) {
2091                         worklist.insert(insn.word(i)); /* operands to ext inst */
2092                     }
2093                     break;
2094                 }
2095             }
2096             break;
2097         }
2098     }
2099 
2100     return ids;
2101 }
2102 
validate_push_constant_block_against_pipeline(debug_report_data * report_data,std::vector<VkPushConstantRange> const * push_constant_ranges,shader_module const * src,spirv_inst_iter type,VkShaderStageFlagBits stage)2103 static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
2104                                                           std::vector<VkPushConstantRange> const *push_constant_ranges,
2105                                                           shader_module const *src, spirv_inst_iter type,
2106                                                           VkShaderStageFlagBits stage) {
2107     bool pass = true;
2108 
2109     /* strip off ptrs etc */
2110     type = get_struct_type(src, type, false);
2111     assert(type != src->end());
2112 
2113     /* validate directly off the offsets. this isn't quite correct for arrays
2114      * and matrices, but is a good first step. TODO: arrays, matrices, weird
2115      * sizes */
2116     for (auto insn : *src) {
2117         if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2118 
2119             if (insn.word(3) == spv::DecorationOffset) {
2120                 unsigned offset = insn.word(4);
2121                 auto size = 4; /* bytes; TODO: calculate this based on the type */
2122 
2123                 bool found_range = false;
2124                 for (auto const &range : *push_constant_ranges) {
2125                     if (range.offset <= offset && range.offset + range.size >= offset + size) {
2126                         found_range = true;
2127 
2128                         if ((range.stageFlags & stage) == 0) {
2129                             if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2130                                         __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2131                                         "Push constant range covering variable starting at "
2132                                         "offset %u not accessible from stage %s",
2133                                         offset, string_VkShaderStageFlagBits(stage))) {
2134                                 pass = false;
2135                             }
2136                         }
2137 
2138                         break;
2139                     }
2140                 }
2141 
2142                 if (!found_range) {
2143                     if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2144                                 __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
2145                                 "Push constant range covering variable starting at "
2146                                 "offset %u not declared in layout",
2147                                 offset)) {
2148                         pass = false;
2149                     }
2150                 }
2151             }
2152         }
2153     }
2154 
2155     return pass;
2156 }
2157 
validate_push_constant_usage(debug_report_data * report_data,std::vector<VkPushConstantRange> const * push_constant_ranges,shader_module const * src,std::unordered_set<uint32_t> accessible_ids,VkShaderStageFlagBits stage)2158 static bool validate_push_constant_usage(debug_report_data *report_data,
2159                                          std::vector<VkPushConstantRange> const *push_constant_ranges, shader_module const *src,
2160                                          std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2161     bool pass = true;
2162 
2163     for (auto id : accessible_ids) {
2164         auto def_insn = src->get_def(id);
2165         if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2166             pass &= validate_push_constant_block_against_pipeline(report_data, push_constant_ranges, src,
2167                                                                   src->get_def(def_insn.word(1)), stage);
2168         }
2169     }
2170 
2171     return pass;
2172 }
2173 
2174 // For given pipelineLayout verify that the set_layout_node at slot.first
2175 //  has the requested binding at slot.second and return ptr to that binding
get_descriptor_binding(PIPELINE_LAYOUT_NODE const * pipelineLayout,descriptor_slot_t slot)2176 static VkDescriptorSetLayoutBinding const * get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout, descriptor_slot_t slot) {
2177 
2178     if (!pipelineLayout)
2179         return nullptr;
2180 
2181     if (slot.first >= pipelineLayout->set_layouts.size())
2182         return nullptr;
2183 
2184     return pipelineLayout->set_layouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
2185 }
2186 
2187 // Block of code at start here for managing/tracking Pipeline state that this layer cares about
2188 
2189 static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2190 
2191 // TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2192 //   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2193 //   to that same cmd buffer by separate thread are not changing state from underneath us
2194 // Track the last cmd buffer touched by this thread
2195 
hasDrawCmd(GLOBAL_CB_NODE * pCB)2196 static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2197     for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2198         if (pCB->drawCount[i])
2199             return true;
2200     }
2201     return false;
2202 }
2203 
2204 // Check object status for selected flag state
validate_status(layer_data * my_data,GLOBAL_CB_NODE * pNode,CBStatusFlags status_mask,VkFlags msg_flags,DRAW_STATE_ERROR error_code,const char * fail_msg)2205 static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2206                             DRAW_STATE_ERROR error_code, const char *fail_msg) {
2207     if (!(pNode->status & status_mask)) {
2208         return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2209                        reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
2210                        "command buffer object 0x%" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer),
2211                        fail_msg);
2212     }
2213     return false;
2214 }
2215 
2216 // Retrieve pipeline node ptr for given pipeline object
getPipelineState(layer_data const * my_data,VkPipeline pipeline)2217 static PIPELINE_STATE *getPipelineState(layer_data const *my_data, VkPipeline pipeline) {
2218     auto it = my_data->pipelineMap.find(pipeline);
2219     if (it == my_data->pipelineMap.end()) {
2220         return nullptr;
2221     }
2222     return it->second;
2223 }
2224 
getRenderPassState(layer_data const * my_data,VkRenderPass renderpass)2225 static RENDER_PASS_STATE *getRenderPassState(layer_data const *my_data, VkRenderPass renderpass) {
2226     auto it = my_data->renderPassMap.find(renderpass);
2227     if (it == my_data->renderPassMap.end()) {
2228         return nullptr;
2229     }
2230     return it->second.get();
2231 }
2232 
getFramebufferState(const layer_data * my_data,VkFramebuffer framebuffer)2233 static FRAMEBUFFER_STATE *getFramebufferState(const layer_data *my_data, VkFramebuffer framebuffer) {
2234     auto it = my_data->frameBufferMap.find(framebuffer);
2235     if (it == my_data->frameBufferMap.end()) {
2236         return nullptr;
2237     }
2238     return it->second.get();
2239 }
2240 
getDescriptorSetLayout(layer_data const * my_data,VkDescriptorSetLayout dsLayout)2241 cvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *my_data, VkDescriptorSetLayout dsLayout) {
2242     auto it = my_data->descriptorSetLayoutMap.find(dsLayout);
2243     if (it == my_data->descriptorSetLayoutMap.end()) {
2244         return nullptr;
2245     }
2246     return it->second;
2247 }
2248 
getPipelineLayout(layer_data const * my_data,VkPipelineLayout pipeLayout)2249 static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *my_data, VkPipelineLayout pipeLayout) {
2250     auto it = my_data->pipelineLayoutMap.find(pipeLayout);
2251     if (it == my_data->pipelineLayoutMap.end()) {
2252         return nullptr;
2253     }
2254     return &it->second;
2255 }
2256 
2257 // Return true if for a given PSO, the given state enum is dynamic, else return false
isDynamic(const PIPELINE_STATE * pPipeline,const VkDynamicState state)2258 static bool isDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
2259     if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2260         for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2261             if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2262                 return true;
2263         }
2264     }
2265     return false;
2266 }
2267 
2268 // Validate state stored as flags at time of draw call
validate_draw_state_flags(layer_data * dev_data,GLOBAL_CB_NODE * pCB,const PIPELINE_STATE * pPipe,bool indexedDraw)2269 static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexedDraw) {
2270     bool result = false;
2271     if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
2272         ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2273          (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
2274         result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2275                                   DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
2276     }
2277     if (pPipe->graphicsPipelineCI.pRasterizationState &&
2278         (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
2279         result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2280                                   DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2281     }
2282     if (pPipe->blendConstantsEnabled) {
2283         result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2284                                   DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2285     }
2286     if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2287         (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2288         result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2289                                   DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2290     }
2291     if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2292         (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2293         result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2294                                   DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2295         result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2296                                   DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2297         result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2298                                   DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2299     }
2300     if (indexedDraw) {
2301         result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2302                                   DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2303                                   "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2304     }
2305     return result;
2306 }
2307 
2308 // Verify attachment reference compatibility according to spec
2309 //  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2310 //  If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
2311 //   to make sure that format and samples counts match.
2312 //  If not, they are not compatible.
attachment_references_compatible(const uint32_t index,const VkAttachmentReference * pPrimary,const uint32_t primaryCount,const VkAttachmentDescription * pPrimaryAttachments,const VkAttachmentReference * pSecondary,const uint32_t secondaryCount,const VkAttachmentDescription * pSecondaryAttachments)2313 static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2314                                              const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2315                                              const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2316                                              const VkAttachmentDescription *pSecondaryAttachments) {
2317     // Check potential NULL cases first to avoid nullptr issues later
2318     if (pPrimary == nullptr) {
2319         if (pSecondary == nullptr) {
2320             return true;
2321         }
2322         return false;
2323     } else if (pSecondary == nullptr) {
2324         return false;
2325     }
2326     if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2327         if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2328             return true;
2329     } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2330         if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2331             return true;
2332     } else { // Format and sample count must match
2333         if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2334             return true;
2335         } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2336             return false;
2337         }
2338         if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2339              pSecondaryAttachments[pSecondary[index].attachment].format) &&
2340             (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2341              pSecondaryAttachments[pSecondary[index].attachment].samples))
2342             return true;
2343     }
2344     // Format and sample counts didn't match
2345     return false;
2346 }
2347 // TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code
2348 // For given primary RenderPass object and secondry RenderPassCreateInfo, verify that they're compatible
verify_renderpass_compatibility(const layer_data * my_data,const VkRenderPassCreateInfo * primaryRPCI,const VkRenderPassCreateInfo * secondaryRPCI,string & errorMsg)2349 static bool verify_renderpass_compatibility(const layer_data *my_data, const VkRenderPassCreateInfo *primaryRPCI,
2350                                             const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) {
2351     if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2352         stringstream errorStr;
2353         errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2354                  << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2355         errorMsg = errorStr.str();
2356         return false;
2357     }
2358     uint32_t spIndex = 0;
2359     for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2360         // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2361         uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2362         uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2363         uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2364         for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2365             if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2366                                                   primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2367                                                   secondaryColorCount, secondaryRPCI->pAttachments)) {
2368                 stringstream errorStr;
2369                 errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2370                 errorMsg = errorStr.str();
2371                 return false;
2372             } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2373                                                          primaryColorCount, primaryRPCI->pAttachments,
2374                                                          secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2375                                                          secondaryColorCount, secondaryRPCI->pAttachments)) {
2376                 stringstream errorStr;
2377                 errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2378                 errorMsg = errorStr.str();
2379                 return false;
2380             }
2381         }
2382 
2383         if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2384                                               1, primaryRPCI->pAttachments,
2385                                               secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2386                                               1, secondaryRPCI->pAttachments)) {
2387             stringstream errorStr;
2388             errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2389             errorMsg = errorStr.str();
2390             return false;
2391         }
2392 
2393         uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2394         uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2395         uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2396         for (uint32_t i = 0; i < inputMax; ++i) {
2397             if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2398                                                   primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2399                                                   secondaryColorCount, secondaryRPCI->pAttachments)) {
2400                 stringstream errorStr;
2401                 errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2402                 errorMsg = errorStr.str();
2403                 return false;
2404             }
2405         }
2406     }
2407     return true;
2408 }
2409 
2410 // For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2411 // pipelineLayout[layoutIndex]
verify_set_layout_compatibility(layer_data * my_data,const cvdescriptorset::DescriptorSet * pSet,PIPELINE_LAYOUT_NODE const * pipeline_layout,const uint32_t layoutIndex,string & errorMsg)2412 static bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *pSet,
2413                                             PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
2414                                             string &errorMsg) {
2415     auto num_sets = pipeline_layout->set_layouts.size();
2416     if (layoutIndex >= num_sets) {
2417         stringstream errorStr;
2418         errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
2419                  << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
2420                  << layoutIndex;
2421         errorMsg = errorStr.str();
2422         return false;
2423     }
2424     auto layout_node = pipeline_layout->set_layouts[layoutIndex];
2425     return pSet->IsCompatible(layout_node, &errorMsg);
2426 }
2427 
2428 // Validate that data for each specialization entry is fully contained within the buffer.
validate_specialization_offsets(debug_report_data * report_data,VkPipelineShaderStageCreateInfo const * info)2429 static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
2430     bool pass = true;
2431 
2432     VkSpecializationInfo const *spec = info->pSpecializationInfo;
2433 
2434     if (spec) {
2435         for (auto i = 0u; i < spec->mapEntryCount; i++) {
2436             if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2437                 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2438                             /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2439                             "Specialization entry %u (for constant id %u) references memory outside provided "
2440                             "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2441                             " bytes provided)",
2442                             i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2443                             spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2444 
2445                     pass = false;
2446                 }
2447             }
2448         }
2449     }
2450 
2451     return pass;
2452 }
2453 
descriptor_type_match(shader_module const * module,uint32_t type_id,VkDescriptorType descriptor_type,unsigned & descriptor_count)2454 static bool descriptor_type_match(shader_module const *module, uint32_t type_id,
2455                                   VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2456     auto type = module->get_def(type_id);
2457 
2458     descriptor_count = 1;
2459 
2460     /* Strip off any array or ptrs. Where we remove array levels, adjust the
2461      * descriptor count for each dimension. */
2462     while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2463         if (type.opcode() == spv::OpTypeArray) {
2464             descriptor_count *= get_constant_value(module, type.word(3));
2465             type = module->get_def(type.word(2));
2466         }
2467         else {
2468             type = module->get_def(type.word(3));
2469         }
2470     }
2471 
2472     switch (type.opcode()) {
2473     case spv::OpTypeStruct: {
2474         for (auto insn : *module) {
2475             if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2476                 if (insn.word(2) == spv::DecorationBlock) {
2477                     return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2478                            descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2479                 } else if (insn.word(2) == spv::DecorationBufferBlock) {
2480                     return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2481                            descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2482                 }
2483             }
2484         }
2485 
2486         /* Invalid */
2487         return false;
2488     }
2489 
2490     case spv::OpTypeSampler:
2491         return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER ||
2492             descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2493 
2494     case spv::OpTypeSampledImage:
2495         if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2496             /* Slight relaxation for some GLSL historical madness: samplerBuffer
2497              * doesn't really have a sampler, and a texel buffer descriptor
2498              * doesn't really provide one. Allow this slight mismatch.
2499              */
2500             auto image_type = module->get_def(type.word(2));
2501             auto dim = image_type.word(3);
2502             auto sampled = image_type.word(7);
2503             return dim == spv::DimBuffer && sampled == 1;
2504         }
2505         return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2506 
2507     case spv::OpTypeImage: {
2508         /* Many descriptor types backing image types-- depends on dimension
2509          * and whether the image will be used with a sampler. SPIRV for
2510          * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2511          * runtime is unacceptable.
2512          */
2513         auto dim = type.word(3);
2514         auto sampled = type.word(7);
2515 
2516         if (dim == spv::DimSubpassData) {
2517             return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2518         } else if (dim == spv::DimBuffer) {
2519             if (sampled == 1) {
2520                 return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2521             } else {
2522                 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2523             }
2524         } else if (sampled == 1) {
2525             return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ||
2526                 descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2527         } else {
2528             return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2529         }
2530     }
2531 
2532     /* We shouldn't really see any other junk types -- but if we do, they're
2533      * a mismatch.
2534      */
2535     default:
2536         return false; /* Mismatch */
2537     }
2538 }
2539 
require_feature(debug_report_data * report_data,VkBool32 feature,char const * feature_name)2540 static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
2541     if (!feature) {
2542         if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2543                     __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2544                     "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2545                     "enabled on the device",
2546                     feature_name)) {
2547             return false;
2548         }
2549     }
2550 
2551     return true;
2552 }
2553 
validate_shader_capabilities(debug_report_data * report_data,shader_module const * src,VkPhysicalDeviceFeatures const * enabledFeatures)2554 static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src,
2555                                          VkPhysicalDeviceFeatures const *enabledFeatures) {
2556     bool pass = true;
2557 
2558 
2559     for (auto insn : *src) {
2560         if (insn.opcode() == spv::OpCapability) {
2561             switch (insn.word(1)) {
2562             case spv::CapabilityMatrix:
2563             case spv::CapabilityShader:
2564             case spv::CapabilityInputAttachment:
2565             case spv::CapabilitySampled1D:
2566             case spv::CapabilityImage1D:
2567             case spv::CapabilitySampledBuffer:
2568             case spv::CapabilityImageBuffer:
2569             case spv::CapabilityImageQuery:
2570             case spv::CapabilityDerivativeControl:
2571                 // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2572                 break;
2573 
2574             case spv::CapabilityGeometry:
2575                 pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader");
2576                 break;
2577 
2578             case spv::CapabilityTessellation:
2579                 pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader");
2580                 break;
2581 
2582             case spv::CapabilityFloat64:
2583                 pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2584                 break;
2585 
2586             case spv::CapabilityInt64:
2587                 pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64");
2588                 break;
2589 
2590             case spv::CapabilityTessellationPointSize:
2591             case spv::CapabilityGeometryPointSize:
2592                 pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2593                                         "shaderTessellationAndGeometryPointSize");
2594                 break;
2595 
2596             case spv::CapabilityImageGatherExtended:
2597                 pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2598                 break;
2599 
2600             case spv::CapabilityStorageImageMultisample:
2601                 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2602                 break;
2603 
2604             case spv::CapabilityUniformBufferArrayDynamicIndexing:
2605                 pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2606                                         "shaderUniformBufferArrayDynamicIndexing");
2607                 break;
2608 
2609             case spv::CapabilitySampledImageArrayDynamicIndexing:
2610                 pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2611                                         "shaderSampledImageArrayDynamicIndexing");
2612                 break;
2613 
2614             case spv::CapabilityStorageBufferArrayDynamicIndexing:
2615                 pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2616                                         "shaderStorageBufferArrayDynamicIndexing");
2617                 break;
2618 
2619             case spv::CapabilityStorageImageArrayDynamicIndexing:
2620                 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2621                                         "shaderStorageImageArrayDynamicIndexing");
2622                 break;
2623 
2624             case spv::CapabilityClipDistance:
2625                 pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2626                 break;
2627 
2628             case spv::CapabilityCullDistance:
2629                 pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2630                 break;
2631 
2632             case spv::CapabilityImageCubeArray:
2633                 pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2634                 break;
2635 
2636             case spv::CapabilitySampleRateShading:
2637                 pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2638                 break;
2639 
2640             case spv::CapabilitySparseResidency:
2641                 pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2642                 break;
2643 
2644             case spv::CapabilityMinLod:
2645                 pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2646                 break;
2647 
2648             case spv::CapabilitySampledCubeArray:
2649                 pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2650                 break;
2651 
2652             case spv::CapabilityImageMSArray:
2653                 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2654                 break;
2655 
2656             case spv::CapabilityStorageImageExtendedFormats:
2657                 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats,
2658                                         "shaderStorageImageExtendedFormats");
2659                 break;
2660 
2661             case spv::CapabilityInterpolationFunction:
2662                 pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2663                 break;
2664 
2665             case spv::CapabilityStorageImageReadWithoutFormat:
2666                 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2667                                         "shaderStorageImageReadWithoutFormat");
2668                 break;
2669 
2670             case spv::CapabilityStorageImageWriteWithoutFormat:
2671                 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2672                                         "shaderStorageImageWriteWithoutFormat");
2673                 break;
2674 
2675             case spv::CapabilityMultiViewport:
2676                 pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport");
2677                 break;
2678 
2679             default:
2680                 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2681                             __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2682                             "Shader declares capability %u, not supported in Vulkan.",
2683                             insn.word(1)))
2684                     pass = false;
2685                 break;
2686             }
2687         }
2688     }
2689 
2690     return pass;
2691 }
2692 
2693 
descriptor_type_to_reqs(shader_module const * module,uint32_t type_id)2694 static uint32_t descriptor_type_to_reqs(shader_module const *module, uint32_t type_id) {
2695     auto type = module->get_def(type_id);
2696 
2697     while (true) {
2698         switch (type.opcode()) {
2699         case spv::OpTypeArray:
2700         case spv::OpTypeSampledImage:
2701             type = module->get_def(type.word(2));
2702             break;
2703         case spv::OpTypePointer:
2704             type = module->get_def(type.word(3));
2705             break;
2706         case spv::OpTypeImage: {
2707             auto dim = type.word(3);
2708             auto arrayed = type.word(5);
2709             auto msaa = type.word(6);
2710 
2711             switch (dim) {
2712             case spv::Dim1D:
2713                 return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_1D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_1D;
2714             case spv::Dim2D:
2715                 return (msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE) |
2716                     (arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_2D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_2D);
2717             case spv::Dim3D:
2718                 return DESCRIPTOR_REQ_VIEW_TYPE_3D;
2719             case spv::DimCube:
2720                 return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_CUBE_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_CUBE;
2721             case spv::DimSubpassData:
2722                 return msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE;
2723             default:  // buffer, etc.
2724                 return 0;
2725             }
2726         }
2727         default:
2728             return 0;
2729         }
2730     }
2731 }
2732 
2733 static bool
validate_pipeline_shader_stage(debug_report_data * report_data,VkPipelineShaderStageCreateInfo const * pStage,PIPELINE_STATE * pipeline,shader_module ** out_module,spirv_inst_iter * out_entrypoint,VkPhysicalDeviceFeatures const * enabledFeatures,std::unordered_map<VkShaderModule,std::unique_ptr<shader_module>> const & shaderModuleMap)2734 validate_pipeline_shader_stage(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *pStage,
2735                                PIPELINE_STATE *pipeline, shader_module **out_module, spirv_inst_iter *out_entrypoint,
2736                                VkPhysicalDeviceFeatures const *enabledFeatures,
2737                                std::unordered_map<VkShaderModule, std::unique_ptr<shader_module>> const &shaderModuleMap) {
2738     bool pass = true;
2739     auto module_it = shaderModuleMap.find(pStage->module);
2740     auto module = *out_module = module_it->second.get();
2741 
2742     /* find the entrypoint */
2743     auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2744     if (entrypoint == module->end()) {
2745         if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2746                     __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2747                     "No entrypoint found named `%s` for stage %s", pStage->pName,
2748                     string_VkShaderStageFlagBits(pStage->stage))) {
2749             return false;   // no point continuing beyond here, any analysis is just going to be garbage.
2750         }
2751     }
2752 
2753     /* validate shader capabilities against enabled device features */
2754     pass &= validate_shader_capabilities(report_data, module, enabledFeatures);
2755 
2756     /* mark accessible ids */
2757     auto accessible_ids = mark_accessible_ids(module, entrypoint);
2758 
2759     /* validate descriptor set layout against what the entrypoint actually uses */
2760     auto descriptor_uses = collect_interface_by_descriptor_slot(report_data, module, accessible_ids);
2761 
2762     auto pipelineLayout = pipeline->pipeline_layout;
2763 
2764     pass &= validate_specialization_offsets(report_data, pStage);
2765     pass &= validate_push_constant_usage(report_data, &pipelineLayout.push_constant_ranges, module, accessible_ids, pStage->stage);
2766 
2767     /* validate descriptor use */
2768     for (auto use : descriptor_uses) {
2769         // While validating shaders capture which slots are used by the pipeline
2770         auto & reqs = pipeline->active_slots[use.first.first][use.first.second];
2771         reqs = descriptor_req(reqs | descriptor_type_to_reqs(module, use.second.type_id));
2772 
2773         /* verify given pipelineLayout has requested setLayout with requested binding */
2774         const auto &binding = get_descriptor_binding(&pipelineLayout, use.first);
2775         unsigned required_descriptor_count;
2776 
2777         if (!binding) {
2778             if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2779                         __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2780                         "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2781                         use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2782                 pass = false;
2783             }
2784         } else if (~binding->stageFlags & pStage->stage) {
2785             if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2786                         /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2787                         "Shader uses descriptor slot %u.%u (used "
2788                         "as type `%s`) but descriptor not "
2789                         "accessible from stage %s",
2790                         use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2791                         string_VkShaderStageFlagBits(pStage->stage))) {
2792                 pass = false;
2793             }
2794         } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType,
2795                                           /*out*/ required_descriptor_count)) {
2796             if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2797                         SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot "
2798                                                                        "%u.%u (used as type `%s`) but "
2799                                                                        "descriptor of type %s",
2800                         use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2801                         string_VkDescriptorType(binding->descriptorType))) {
2802                 pass = false;
2803             }
2804         } else if (binding->descriptorCount < required_descriptor_count) {
2805             if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2806                         SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2807                         "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2808                         required_descriptor_count, use.first.first, use.first.second,
2809                         describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2810                 pass = false;
2811             }
2812         }
2813     }
2814 
2815     /* validate use of input attachments against subpass structure */
2816     if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
2817         auto input_attachment_uses = collect_interface_by_input_attachment_index(report_data, module, accessible_ids);
2818 
2819         auto rpci = pipeline->render_pass_ci.ptr();
2820         auto subpass = pipeline->graphicsPipelineCI.subpass;
2821 
2822         for (auto use : input_attachment_uses) {
2823             auto input_attachments = rpci->pSubpasses[subpass].pInputAttachments;
2824             auto index = (input_attachments && use.first < rpci->pSubpasses[subpass].inputAttachmentCount) ?
2825                     input_attachments[use.first].attachment : VK_ATTACHMENT_UNUSED;
2826 
2827             if (index == VK_ATTACHMENT_UNUSED) {
2828                 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2829                             SHADER_CHECKER_MISSING_INPUT_ATTACHMENT, "SC",
2830                             "Shader consumes input attachment index %d but not provided in subpass",
2831                             use.first)) {
2832                     pass = false;
2833                 }
2834             }
2835             else if (get_format_type(rpci->pAttachments[index].format) !=
2836                     get_fundamental_type(module, use.second.type_id)) {
2837                 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2838                             SHADER_CHECKER_INPUT_ATTACHMENT_TYPE_MISMATCH, "SC",
2839                             "Subpass input attachment %u format of %s does not match type used in shader `%s`",
2840                             use.first, string_VkFormat(rpci->pAttachments[index].format),
2841                             describe_type(module, use.second.type_id).c_str())) {
2842                     pass = false;
2843                 }
2844             }
2845         }
2846     }
2847 
2848     return pass;
2849 }
2850 
2851 
2852 // Validate that the shaders used by the given pipeline and store the active_slots
2853 //  that are actually used by the pipeline into pPipeline->active_slots
2854 static bool
validate_and_capture_pipeline_shader_state(debug_report_data * report_data,PIPELINE_STATE * pPipeline,VkPhysicalDeviceFeatures const * enabledFeatures,std::unordered_map<VkShaderModule,unique_ptr<shader_module>> const & shaderModuleMap)2855 validate_and_capture_pipeline_shader_state(debug_report_data *report_data, PIPELINE_STATE *pPipeline,
2856                                            VkPhysicalDeviceFeatures const *enabledFeatures,
2857                                            std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const &shaderModuleMap) {
2858     auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
2859     int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2860     int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2861 
2862     shader_module *shaders[5];
2863     memset(shaders, 0, sizeof(shaders));
2864     spirv_inst_iter entrypoints[5];
2865     memset(entrypoints, 0, sizeof(entrypoints));
2866     VkPipelineVertexInputStateCreateInfo const *vi = 0;
2867     bool pass = true;
2868 
2869     for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2870         auto pStage = &pCreateInfo->pStages[i];
2871         auto stage_id = get_shader_stage_id(pStage->stage);
2872         pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline,
2873                                                &shaders[stage_id], &entrypoints[stage_id],
2874                                                enabledFeatures, shaderModuleMap);
2875     }
2876 
2877     // if the shader stages are no good individually, cross-stage validation is pointless.
2878     if (!pass)
2879         return false;
2880 
2881     vi = pCreateInfo->pVertexInputState;
2882 
2883     if (vi) {
2884         pass &= validate_vi_consistency(report_data, vi);
2885     }
2886 
2887     if (shaders[vertex_stage]) {
2888         pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2889     }
2890 
2891     int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2892     int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2893 
2894     while (!shaders[producer] && producer != fragment_stage) {
2895         producer++;
2896         consumer++;
2897     }
2898 
2899     for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2900         assert(shaders[producer]);
2901         if (shaders[consumer]) {
2902             pass &= validate_interface_between_stages(report_data,
2903                                                       shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2904                                                       shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
2905 
2906             producer = consumer;
2907         }
2908     }
2909 
2910     if (shaders[fragment_stage]) {
2911         pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage],
2912                                                         pPipeline->render_pass_ci.ptr(), pCreateInfo->subpass);
2913     }
2914 
2915     return pass;
2916 }
2917 
validate_compute_pipeline(debug_report_data * report_data,PIPELINE_STATE * pPipeline,VkPhysicalDeviceFeatures const * enabledFeatures,std::unordered_map<VkShaderModule,unique_ptr<shader_module>> const & shaderModuleMap)2918 static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_STATE *pPipeline,
2919                                       VkPhysicalDeviceFeatures const *enabledFeatures,
2920                                       std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const &shaderModuleMap) {
2921     auto pCreateInfo = pPipeline->computePipelineCI.ptr();
2922 
2923     shader_module *module;
2924     spirv_inst_iter entrypoint;
2925 
2926     return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline,
2927                                           &module, &entrypoint, enabledFeatures, shaderModuleMap);
2928 }
2929 // Return Set node ptr for specified set or else NULL
getSetNode(const layer_data * my_data,VkDescriptorSet set)2930 cvdescriptorset::DescriptorSet *getSetNode(const layer_data *my_data, VkDescriptorSet set) {
2931     auto set_it = my_data->setMap.find(set);
2932     if (set_it == my_data->setMap.end()) {
2933         return NULL;
2934     }
2935     return set_it->second;
2936 }
2937 // For the given command buffer, verify and update the state for activeSetBindingsPairs
2938 //  This includes:
2939 //  1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound.
2940 //     To be valid, the dynamic offset combined with the offset and range from its
2941 //     descriptor update must not overflow the size of its buffer being updated
2942 //  2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images
2943 //  3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers
validate_and_update_drawtime_descriptor_state(layer_data * dev_data,GLOBAL_CB_NODE * pCB,const vector<std::tuple<cvdescriptorset::DescriptorSet *,std::map<uint32_t,descriptor_req>,std::vector<uint32_t> const * >> & activeSetBindingsPairs,const char * function)2944 static bool validate_and_update_drawtime_descriptor_state(
2945     layer_data *dev_data, GLOBAL_CB_NODE *pCB,
2946     const vector<std::tuple<cvdescriptorset::DescriptorSet *, std::map<uint32_t, descriptor_req>, std::vector<uint32_t> const *>>
2947         &activeSetBindingsPairs,
2948     const char *function) {
2949     bool result = false;
2950     for (auto set_bindings_pair : activeSetBindingsPairs) {
2951         cvdescriptorset::DescriptorSet *set_node = std::get<0>(set_bindings_pair);
2952         std::string err_str;
2953         if (!set_node->ValidateDrawState(std::get<1>(set_bindings_pair), *std::get<2>(set_bindings_pair),
2954                                          &err_str)) {
2955             // Report error here
2956             auto set = set_node->GetSet();
2957             result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2958                               reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2959                               "Descriptor set 0x%" PRIxLEAST64 " encountered the following validation error at %s() time: %s",
2960                               reinterpret_cast<const uint64_t &>(set), function, err_str.c_str());
2961         }
2962         set_node->GetStorageUpdates(std::get<1>(set_bindings_pair), &pCB->updateBuffers, &pCB->updateImages);
2963     }
2964     return result;
2965 }
2966 
2967 // For given pipeline, return number of MSAA samples, or one if MSAA disabled
getNumSamples(PIPELINE_STATE const * pipe)2968 static VkSampleCountFlagBits getNumSamples(PIPELINE_STATE const *pipe) {
2969     if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
2970         VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
2971         return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
2972     }
2973     return VK_SAMPLE_COUNT_1_BIT;
2974 }
2975 
list_bits(std::ostream & s,uint32_t bits)2976 static void list_bits(std::ostream& s, uint32_t bits) {
2977     for (int i = 0; i < 32 && bits; i++) {
2978         if (bits & (1 << i)) {
2979             s << i;
2980             bits &= ~(1 << i);
2981             if (bits) {
2982                 s << ",";
2983             }
2984         }
2985     }
2986 }
2987 
2988 // Validate draw-time state related to the PSO
validatePipelineDrawtimeState(layer_data const * my_data,LAST_BOUND_STATE const & state,const GLOBAL_CB_NODE * pCB,PIPELINE_STATE const * pPipeline)2989 static bool validatePipelineDrawtimeState(layer_data const *my_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
2990                                           PIPELINE_STATE const *pPipeline) {
2991     bool skip_call = false;
2992 
2993     // Verify vertex binding
2994     if (pPipeline->vertexBindingDescriptions.size() > 0) {
2995         for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
2996             auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
2997             if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
2998                 (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
2999                 skip_call |= log_msg(
3000                     my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3001                     DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
3002                     "The Pipeline State Object (0x%" PRIxLEAST64 ") expects that this Command Buffer's vertex binding Index %u "
3003                     "should be set via vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct "
3004                     "at index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
3005                     (uint64_t)state.pipeline_state->pipeline, vertex_binding, i, vertex_binding);
3006             }
3007         }
3008     } else {
3009         if (!pCB->currentDrawData.buffers.empty()) {
3010             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
3011                                  0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
3012                                  "Vertex buffers are bound to command buffer (0x%" PRIxLEAST64
3013                                  ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
3014                                  (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline_state->pipeline);
3015         }
3016     }
3017     // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
3018     // Skip check if rasterization is disabled or there is no viewport.
3019     if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
3020          (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
3021         pPipeline->graphicsPipelineCI.pViewportState) {
3022         bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3023         bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3024 
3025         if (dynViewport) {
3026             auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
3027             auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
3028             if (missingViewportMask) {
3029                 std::stringstream ss;
3030                 ss << "Dynamic viewport(s) ";
3031                 list_bits(ss, missingViewportMask);
3032                 ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
3033                 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3034                                      __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3035                                      "%s", ss.str().c_str());
3036             }
3037         }
3038 
3039         if (dynScissor) {
3040             auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
3041             auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
3042             if (missingScissorMask) {
3043                 std::stringstream ss;
3044                 ss << "Dynamic scissor(s) ";
3045                 list_bits(ss, missingScissorMask);
3046                 ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
3047                 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3048                                      __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3049                                      "%s", ss.str().c_str());
3050             }
3051         }
3052     }
3053 
3054     // Verify that any MSAA request in PSO matches sample# in bound FB
3055     // Skip the check if rasterization is disabled.
3056     if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3057         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3058         VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
3059         if (pCB->activeRenderPass) {
3060             auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
3061             const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
3062             uint32_t i;
3063 
3064             const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
3065             if ((color_blend_state != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
3066                 (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount)) {
3067                 skip_call |=
3068                         log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3069                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
3070                                 "Render pass subpass %u mismatch with blending state defined and blend state attachment "
3071                                 "count %u while subpass color attachment count %u in Pipeline (0x%" PRIxLEAST64 ")!  These "
3072                                 "must be the same at draw-time.",
3073                                 pCB->activeSubpass, color_blend_state->attachmentCount, subpass_desc->colorAttachmentCount,
3074                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
3075             }
3076 
3077             unsigned subpass_num_samples = 0;
3078 
3079             for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
3080                 auto attachment = subpass_desc->pColorAttachments[i].attachment;
3081                 if (attachment != VK_ATTACHMENT_UNUSED)
3082                     subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
3083             }
3084 
3085             if (subpass_desc->pDepthStencilAttachment &&
3086                 subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3087                 auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
3088                 subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
3089             }
3090 
3091             if (subpass_num_samples && static_cast<unsigned>(pso_num_samples) != subpass_num_samples) {
3092                 skip_call |=
3093                         log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3094                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3095                                 "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
3096                                 ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
3097                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
3098                                 reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
3099             }
3100         } else {
3101             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3102                                  reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3103                                  "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
3104                                  reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
3105         }
3106     }
3107     // Verify that PSO creation renderPass is compatible with active renderPass
3108     if (pCB->activeRenderPass) {
3109         std::string err_string;
3110         if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) &&
3111             !verify_renderpass_compatibility(my_data, pCB->activeRenderPass->createInfo.ptr(), pPipeline->render_pass_ci.ptr(),
3112                                              err_string)) {
3113             // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
3114             skip_call |=
3115                 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3116                         reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
3117                         "At Draw time the active render pass (0x%" PRIxLEAST64 ") is incompatible w/ gfx pipeline "
3118                         "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
3119                         reinterpret_cast<uint64_t &>(pCB->activeRenderPass->renderPass),
3120                         reinterpret_cast<uint64_t const &>(pPipeline->pipeline),
3121                         reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
3122         }
3123 
3124         if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
3125             skip_call |=
3126                 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3127                         reinterpret_cast<uint64_t const &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
3128                         "Pipeline was built for subpass %u but used in subpass %u", pPipeline->graphicsPipelineCI.subpass,
3129                         pCB->activeSubpass);
3130         }
3131     }
3132     // TODO : Add more checks here
3133 
3134     return skip_call;
3135 }
3136 
3137 // Validate overall state at the time of a draw call
validate_and_update_draw_state(layer_data * my_data,GLOBAL_CB_NODE * cb_node,const bool indexedDraw,const VkPipelineBindPoint bindPoint,const char * function)3138 static bool validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *cb_node, const bool indexedDraw,
3139                                            const VkPipelineBindPoint bindPoint, const char *function) {
3140     bool result = false;
3141     auto const &state = cb_node->lastBound[bindPoint];
3142     PIPELINE_STATE *pPipe = state.pipeline_state;
3143     if (nullptr == pPipe) {
3144         result |= log_msg(
3145             my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
3146             DRAWSTATE_INVALID_PIPELINE, "DS",
3147             "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
3148         // Early return as any further checks below will be busted w/o a pipeline
3149         if (result)
3150             return true;
3151     }
3152     // First check flag states
3153     if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
3154         result = validate_draw_state_flags(my_data, cb_node, pPipe, indexedDraw);
3155 
3156     // Now complete other state checks
3157     if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
3158         string errorString;
3159         auto pipeline_layout = pPipe->pipeline_layout;
3160 
3161         // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
3162         vector<std::tuple<cvdescriptorset::DescriptorSet *, std::map<uint32_t, descriptor_req>, std::vector<uint32_t> const *>>
3163             activeSetBindingsPairs;
3164         for (auto & setBindingPair : pPipe->active_slots) {
3165             uint32_t setIndex = setBindingPair.first;
3166             // If valid set is not bound throw an error
3167             if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
3168                 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3169                                   DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
3170                                   "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline,
3171                                   setIndex);
3172             } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex,
3173                                                         errorString)) {
3174                 // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
3175                 VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
3176                 result |=
3177                     log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3178                             (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
3179                             "VkDescriptorSet (0x%" PRIxLEAST64
3180                             ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
3181                             reinterpret_cast<uint64_t &>(setHandle), setIndex, reinterpret_cast<uint64_t &>(pipeline_layout.layout),
3182                             errorString.c_str());
3183             } else { // Valid set is bound and layout compatible, validate that it's updated
3184                 // Pull the set node
3185                 cvdescriptorset::DescriptorSet *pSet = state.boundDescriptorSets[setIndex];
3186                 // Gather active bindings
3187                 std::unordered_set<uint32_t> bindings;
3188                 for (auto binding : setBindingPair.second) {
3189                     bindings.insert(binding.first);
3190                 }
3191                 // Bind this set and its active descriptor resources to the command buffer
3192                 pSet->BindCommandBuffer(cb_node, bindings);
3193                 // Save vector of all active sets to verify dynamicOffsets below
3194                 activeSetBindingsPairs.push_back(std::make_tuple(pSet, setBindingPair.second, &state.dynamicOffsets[setIndex]));
3195                 // Make sure set has been updated if it has no immutable samplers
3196                 //  If it has immutable samplers, we'll flag error later as needed depending on binding
3197                 if (!pSet->IsUpdated()) {
3198                     for (auto binding : bindings) {
3199                         if (!pSet->GetImmutableSamplerPtrFromBinding(binding)) {
3200                             result |= log_msg(
3201                                 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3202                                 (uint64_t)pSet->GetSet(), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3203                                 "Descriptor Set 0x%" PRIxLEAST64 " bound but was never updated. It is now being used to draw so "
3204                                 "this will result in undefined behavior.",
3205                                 (uint64_t)pSet->GetSet());
3206                         }
3207                     }
3208                 }
3209             }
3210         }
3211         // For given active slots, verify any dynamic descriptors and record updated images & buffers
3212         result |= validate_and_update_drawtime_descriptor_state(my_data, cb_node, activeSetBindingsPairs, function);
3213     }
3214 
3215     // Check general pipeline state that needs to be validated at drawtime
3216     if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
3217         result |= validatePipelineDrawtimeState(my_data, state, cb_node, pPipe);
3218 
3219     return result;
3220 }
3221 
3222 // Validate HW line width capabilities prior to setting requested line width.
verifyLineWidth(layer_data * my_data,DRAW_STATE_ERROR dsError,const uint64_t & target,float lineWidth)3223 static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
3224     bool skip_call = false;
3225 
3226     // First check to see if the physical device supports wide lines.
3227     if ((VK_FALSE == my_data->enabled_features.wideLines) && (1.0f != lineWidth)) {
3228         skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
3229                              dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature "
3230                                             "not supported/enabled so lineWidth must be 1.0f!",
3231                              lineWidth);
3232     } else {
3233         // Otherwise, make sure the width falls in the valid range.
3234         if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
3235             (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
3236             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
3237                                  __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width "
3238                                                           "to between [%f, %f]!",
3239                                  lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
3240                                  my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
3241         }
3242     }
3243 
3244     return skip_call;
3245 }
3246 
3247 // Verify that create state for a pipeline is valid
verifyPipelineCreateState(layer_data * my_data,const VkDevice device,std::vector<PIPELINE_STATE * > pPipelines,int pipelineIndex)3248 static bool verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_STATE *> pPipelines,
3249                                       int pipelineIndex) {
3250     bool skip_call = false;
3251 
3252     PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex];
3253 
3254     // If create derivative bit is set, check that we've specified a base
3255     // pipeline correctly, and that the base pipeline was created to allow
3256     // derivatives.
3257     if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3258         PIPELINE_STATE *pBasePipeline = nullptr;
3259         if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3260               (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3261             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3262                                  DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3263                                  "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3264         } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3265             if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3266                 skip_call |=
3267                     log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3268                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3269                             "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3270             } else {
3271                 pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3272             }
3273         } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3274             pBasePipeline = getPipelineState(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3275         }
3276 
3277         if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3278             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3279                                  DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3280                                  "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3281         }
3282     }
3283 
3284     if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3285         if (!my_data->enabled_features.independentBlend) {
3286             if (pPipeline->attachments.size() > 1) {
3287                 VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3288                 for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3289                     // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
3290                     // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
3291                     // only attachment state, so memcmp is best suited for the comparison
3292                     if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
3293                                sizeof(pAttachments[0]))) {
3294                         skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3295                                              __LINE__, DRAWSTATE_INDEPENDENT_BLEND, "DS",
3296                                              "Invalid Pipeline CreateInfo: If independent blend feature not "
3297                                              "enabled, all elements of pAttachments must be identical");
3298                         break;
3299                     }
3300                 }
3301             }
3302         }
3303         if (!my_data->enabled_features.logicOp &&
3304             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3305             skip_call |=
3306                 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3307                         DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3308                         "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3309         }
3310     }
3311 
3312     // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3313     // produces nonsense errors that confuse users. Other layers should already
3314     // emit errors for renderpass being invalid.
3315     auto renderPass = getRenderPassState(my_data, pPipeline->graphicsPipelineCI.renderPass);
3316     if (renderPass && pPipeline->graphicsPipelineCI.subpass >= renderPass->createInfo.subpassCount) {
3317         skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3318                              DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3319                                                                             "is out of range for this renderpass (0..%u)",
3320                              pPipeline->graphicsPipelineCI.subpass, renderPass->createInfo.subpassCount - 1);
3321     }
3322 
3323     if (!validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->enabled_features,
3324                                                     my_data->shaderModuleMap)) {
3325         skip_call = true;
3326     }
3327     // Each shader's stage must be unique
3328     if (pPipeline->duplicate_shaders) {
3329         for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3330             if (pPipeline->duplicate_shaders & stage) {
3331                 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3332                                      __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3333                                      "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3334                                      string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
3335             }
3336         }
3337     }
3338     // VS is required
3339     if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3340         skip_call |=
3341             log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3342                     DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vertex Shader required");
3343     }
3344     // Either both or neither TC/TE shaders should be defined
3345     if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3346         ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3347         skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3348                              DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3349                              "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3350     }
3351     // Compute shaders should be specified independent of Gfx shaders
3352     if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3353         (pPipeline->active_shaders &
3354          (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3355           VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3356         skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3357                              DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3358                              "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3359     }
3360     // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3361     // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3362     if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3363         (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3364          pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3365         skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3366                              DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3367                                                                             "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3368                                                                             "topology for tessellation pipelines");
3369     }
3370     if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3371         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3372         if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3373             skip_call |=
3374                 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3375                         DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3376                                                                        "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3377                                                                        "topology is only valid for tessellation pipelines");
3378         }
3379         if (!pPipeline->graphicsPipelineCI.pTessellationState) {
3380             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3381                                  DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3382                                  "Invalid Pipeline CreateInfo State: "
3383                                  "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3384                                  "topology used. pTessellationState must not be NULL in this case.");
3385         } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints ||
3386                    (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) {
3387             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3388                                  DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3389                                                                                 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3390                                                                                 "topology used with patchControlPoints value %u."
3391                                                                                 " patchControlPoints should be >0 and <=32.",
3392                                  pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints);
3393         }
3394     }
3395     // If a rasterization state is provided, make sure that the line width conforms to the HW.
3396     if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3397         if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3398             skip_call |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
3399                                          reinterpret_cast<uint64_t const &>(pPipeline->pipeline),
3400                                          pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3401         }
3402     }
3403     // Viewport state must be included if rasterization is enabled.
3404     // If the viewport state is included, the viewport and scissor counts should always match.
3405     // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3406     if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3407         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3408         if (!pPipeline->graphicsPipelineCI.pViewportState) {
3409             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3410                                  DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3411                                                                             "and scissors are dynamic PSO must include "
3412                                                                             "viewportCount and scissorCount in pViewportState.");
3413         } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3414                    pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3415             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3416                                  DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3417                                  "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3418                                  pPipeline->graphicsPipelineCI.pViewportState->viewportCount,
3419                                  pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3420         } else {
3421             // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3422             bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3423             bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3424             if (!dynViewport) {
3425                 if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3426                     !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3427                     skip_call |=
3428                         log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3429                                 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3430                                 "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3431                                 "must either include pViewports data, or include viewport in pDynamicState and set it with "
3432                                 "vkCmdSetViewport().",
3433                                 pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3434                 }
3435             }
3436             if (!dynScissor) {
3437                 if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3438                     !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3439                     skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3440                                          __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3441                                          "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3442                                          "must either include pScissors data, or include scissor in pDynamicState and set it with "
3443                                          "vkCmdSetScissor().",
3444                                          pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3445                 }
3446             }
3447         }
3448 
3449         // If rasterization is not disabled, and subpass uses a depth/stencil
3450         // attachment, pDepthStencilState must be a pointer to a valid structure
3451         auto subpass_desc = renderPass ? &renderPass->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass] : nullptr;
3452         if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
3453             subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3454             if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
3455                 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
3456                                      __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3457                                      "Invalid Pipeline CreateInfo State: "
3458                                      "pDepthStencilState is NULL when rasterization is enabled and subpass uses a "
3459                                      "depth/stencil attachment");
3460             }
3461         }
3462     }
3463     return skip_call;
3464 }
3465 
3466 // Free the Pipeline nodes
deletePipelines(layer_data * my_data)3467 static void deletePipelines(layer_data *my_data) {
3468     if (my_data->pipelineMap.size() <= 0)
3469         return;
3470     for (auto &pipe_map_pair : my_data->pipelineMap) {
3471         delete pipe_map_pair.second;
3472     }
3473     my_data->pipelineMap.clear();
3474 }
3475 
3476 // Block of code at start here specifically for managing/tracking DSs
3477 
3478 // Return Pool node ptr for specified pool or else NULL
getDescriptorPoolState(const layer_data * dev_data,const VkDescriptorPool pool)3479 DESCRIPTOR_POOL_STATE *getDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
3480     auto pool_it = dev_data->descriptorPoolMap.find(pool);
3481     if (pool_it == dev_data->descriptorPoolMap.end()) {
3482         return NULL;
3483     }
3484     return pool_it->second;
3485 }
3486 
3487 // Return false if update struct is of valid type, otherwise flag error and return code from callback
validUpdateStruct(layer_data * my_data,const VkDevice device,const GENERIC_HEADER * pUpdateStruct)3488 static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3489     switch (pUpdateStruct->sType) {
3490     case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3491     case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3492         return false;
3493     default:
3494         return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3495                        DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3496                        "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3497                        string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3498     }
3499 }
3500 
3501 // Set count for given update struct in the last parameter
getUpdateCount(layer_data * my_data,const VkDevice device,const GENERIC_HEADER * pUpdateStruct)3502 static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3503     switch (pUpdateStruct->sType) {
3504     case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3505         return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3506     case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3507         // TODO : Need to understand this case better and make sure code is correct
3508         return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3509     default:
3510         return 0;
3511     }
3512 }
3513 
3514 // For given layout and update, return the first overall index of the layout that is updated
getUpdateStartIndex(layer_data * my_data,const VkDevice device,const uint32_t binding_start_index,const uint32_t arrayIndex,const GENERIC_HEADER * pUpdateStruct)3515 static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3516                                     const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3517     return binding_start_index + arrayIndex;
3518 }
3519 // For given layout and update, return the last overall index of the layout that is updated
getUpdateEndIndex(layer_data * my_data,const VkDevice device,const uint32_t binding_start_index,const uint32_t arrayIndex,const GENERIC_HEADER * pUpdateStruct)3520 static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3521                                   const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3522     uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3523     return binding_start_index + arrayIndex + count - 1;
3524 }
3525 // Verify that the descriptor type in the update struct matches what's expected by the layout
validateUpdateConsistency(layer_data * my_data,const VkDevice device,const VkDescriptorType layout_type,const GENERIC_HEADER * pUpdateStruct,uint32_t startIndex,uint32_t endIndex)3526 static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
3527                                       const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3528     // First get actual type of update
3529     bool skip_call = false;
3530     VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
3531     switch (pUpdateStruct->sType) {
3532     case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3533         actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3534         break;
3535     case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3536         /* no need to validate */
3537         return false;
3538         break;
3539     default:
3540         skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3541                              DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3542                              "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3543                              string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3544     }
3545     if (!skip_call) {
3546         if (layout_type != actualType) {
3547             skip_call |= log_msg(
3548                 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3549                 DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3550                 "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3551                 string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
3552         }
3553     }
3554     return skip_call;
3555 }
3556 //TODO: Consolidate functions
FindLayout(const GLOBAL_CB_NODE * pCB,ImageSubresourcePair imgpair,IMAGE_CMD_BUF_LAYOUT_NODE & node,const VkImageAspectFlags aspectMask)3557 bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3558     layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3559     if (!(imgpair.subresource.aspectMask & aspectMask)) {
3560         return false;
3561     }
3562     VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3563     imgpair.subresource.aspectMask = aspectMask;
3564     auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3565     if (imgsubIt == pCB->imageLayoutMap.end()) {
3566         return false;
3567     }
3568     if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3569         log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3570                 reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3571                 "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3572                 reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3573     }
3574     if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3575         log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3576                 reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3577                 "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3578                 reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
3579     }
3580     node = imgsubIt->second;
3581     return true;
3582 }
3583 
FindLayout(const layer_data * my_data,ImageSubresourcePair imgpair,VkImageLayout & layout,const VkImageAspectFlags aspectMask)3584 bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3585     if (!(imgpair.subresource.aspectMask & aspectMask)) {
3586         return false;
3587     }
3588     VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3589     imgpair.subresource.aspectMask = aspectMask;
3590     auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3591     if (imgsubIt == my_data->imageLayoutMap.end()) {
3592         return false;
3593     }
3594     if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3595         log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3596                 reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3597                 "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3598                 reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
3599     }
3600     layout = imgsubIt->second.layout;
3601     return true;
3602 }
3603 
3604 // find layout(s) on the cmd buf level
FindLayout(const GLOBAL_CB_NODE * pCB,VkImage image,VkImageSubresource range,IMAGE_CMD_BUF_LAYOUT_NODE & node)3605 bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3606     ImageSubresourcePair imgpair = {image, true, range};
3607     node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3608     FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3609     FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3610     FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3611     FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3612     if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3613         imgpair = {image, false, VkImageSubresource()};
3614         auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3615         if (imgsubIt == pCB->imageLayoutMap.end())
3616             return false;
3617         node = imgsubIt->second;
3618     }
3619     return true;
3620 }
3621 
3622 // find layout(s) on the global level
FindLayout(const layer_data * my_data,ImageSubresourcePair imgpair,VkImageLayout & layout)3623 bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3624     layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3625     FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3626     FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3627     FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3628     FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3629     if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3630         imgpair = {imgpair.image, false, VkImageSubresource()};
3631         auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3632         if (imgsubIt == my_data->imageLayoutMap.end())
3633             return false;
3634         layout = imgsubIt->second.layout;
3635     }
3636     return true;
3637 }
3638 
FindLayout(const layer_data * my_data,VkImage image,VkImageSubresource range,VkImageLayout & layout)3639 bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3640     ImageSubresourcePair imgpair = {image, true, range};
3641     return FindLayout(my_data, imgpair, layout);
3642 }
3643 
FindLayouts(const layer_data * my_data,VkImage image,std::vector<VkImageLayout> & layouts)3644 bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3645     auto sub_data = my_data->imageSubresourceMap.find(image);
3646     if (sub_data == my_data->imageSubresourceMap.end())
3647         return false;
3648     auto image_state = getImageState(my_data, image);
3649     if (!image_state)
3650         return false;
3651     bool ignoreGlobal = false;
3652     // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3653     // potential errors in this case.
3654     if (sub_data->second.size() >= (image_state->createInfo.arrayLayers * image_state->createInfo.mipLevels + 1)) {
3655         ignoreGlobal = true;
3656     }
3657     for (auto imgsubpair : sub_data->second) {
3658         if (ignoreGlobal && !imgsubpair.hasSubresource)
3659             continue;
3660         auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3661         if (img_data != my_data->imageLayoutMap.end()) {
3662             layouts.push_back(img_data->second.layout);
3663         }
3664     }
3665     return true;
3666 }
3667 
3668 // Set the layout on the global level
SetLayout(layer_data * my_data,ImageSubresourcePair imgpair,const VkImageLayout & layout)3669 void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3670     VkImage &image = imgpair.image;
3671     // TODO (mlentine): Maybe set format if new? Not used atm.
3672     my_data->imageLayoutMap[imgpair].layout = layout;
3673     // TODO (mlentine): Maybe make vector a set?
3674     auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3675     if (subresource == my_data->imageSubresourceMap[image].end()) {
3676         my_data->imageSubresourceMap[image].push_back(imgpair);
3677     }
3678 }
3679 
3680 // Set the layout on the cmdbuf level
SetLayout(GLOBAL_CB_NODE * pCB,ImageSubresourcePair imgpair,const IMAGE_CMD_BUF_LAYOUT_NODE & node)3681 void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3682     pCB->imageLayoutMap[imgpair] = node;
3683     // TODO (mlentine): Maybe make vector a set?
3684     auto subresource =
3685         std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3686     if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3687         pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3688     }
3689 }
3690 
SetLayout(GLOBAL_CB_NODE * pCB,ImageSubresourcePair imgpair,const VkImageLayout & layout)3691 void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3692     // TODO (mlentine): Maybe make vector a set?
3693     if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3694         pCB->imageSubresourceMap[imgpair.image].end()) {
3695         pCB->imageLayoutMap[imgpair].layout = layout;
3696     } else {
3697         // TODO (mlentine): Could be expensive and might need to be removed.
3698         assert(imgpair.hasSubresource);
3699         IMAGE_CMD_BUF_LAYOUT_NODE node;
3700         if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3701             node.initialLayout = layout;
3702         }
3703         SetLayout(pCB, imgpair, {node.initialLayout, layout});
3704     }
3705 }
3706 
3707 template <class OBJECT, class LAYOUT>
SetLayout(OBJECT * pObject,ImageSubresourcePair imgpair,const LAYOUT & layout,VkImageAspectFlags aspectMask)3708 void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3709     if (imgpair.subresource.aspectMask & aspectMask) {
3710         imgpair.subresource.aspectMask = aspectMask;
3711         SetLayout(pObject, imgpair, layout);
3712     }
3713 }
3714 
3715 template <class OBJECT, class LAYOUT>
SetLayout(OBJECT * pObject,VkImage image,VkImageSubresource range,const LAYOUT & layout)3716 void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3717     ImageSubresourcePair imgpair = {image, true, range};
3718     SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3719     SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3720     SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3721     SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3722 }
3723 
SetLayout(OBJECT * pObject,VkImage image,const LAYOUT & layout)3724 template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3725     ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3726     SetLayout(pObject, image, imgpair, layout);
3727 }
3728 
SetLayout(const layer_data * dev_data,GLOBAL_CB_NODE * pCB,VkImageView imageView,const VkImageLayout & layout)3729 void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3730     auto view_state = getImageViewState(dev_data, imageView);
3731     assert(view_state);
3732     auto image = view_state->create_info.image;
3733     const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
3734     // TODO: Do not iterate over every possibility - consolidate where possible
3735     for (uint32_t j = 0; j < subRange.levelCount; j++) {
3736         uint32_t level = subRange.baseMipLevel + j;
3737         for (uint32_t k = 0; k < subRange.layerCount; k++) {
3738             uint32_t layer = subRange.baseArrayLayer + k;
3739             VkImageSubresource sub = {subRange.aspectMask, level, layer};
3740             // TODO: If ImageView was created with depth or stencil, transition both layouts as
3741             // the aspectMask is ignored and both are used. Verify that the extra implicit layout
3742             // is OK for descriptor set layout validation
3743             if (subRange.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
3744                 if (vk_format_is_depth_and_stencil(view_state->create_info.format)) {
3745                     sub.aspectMask |= (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT);
3746                 }
3747             }
3748             SetLayout(pCB, image, sub, layout);
3749         }
3750     }
3751 }
3752 
3753 // Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3754 // func_str is the name of the calling function
3755 // Return false if no errors occur
3756 // Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
validateIdleDescriptorSet(const layer_data * dev_data,VkDescriptorSet set,std::string func_str)3757 static bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
3758     if (dev_data->instance_data->disabled.idle_descriptor_set)
3759         return false;
3760     bool skip_call = false;
3761     auto set_node = dev_data->setMap.find(set);
3762     if (set_node == dev_data->setMap.end()) {
3763         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3764                              (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3765                              "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3766                              (uint64_t)(set));
3767     } else {
3768         // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
3769         if (set_node->second->in_use.load()) {
3770             skip_call |=
3771                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3772                         (uint64_t)(set), __LINE__, VALIDATION_ERROR_00919, "DS",
3773                         "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer. %s",
3774                         func_str.c_str(), (uint64_t)(set), validation_error_map[VALIDATION_ERROR_00919]);
3775         }
3776     }
3777     return skip_call;
3778 }
3779 
3780 // Remove set from setMap and delete the set
freeDescriptorSet(layer_data * dev_data,cvdescriptorset::DescriptorSet * descriptor_set)3781 static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
3782     dev_data->setMap.erase(descriptor_set->GetSet());
3783     delete descriptor_set;
3784 }
3785 // Free all DS Pools including their Sets & related sub-structs
3786 // NOTE : Calls to this function should be wrapped in mutex
deletePools(layer_data * my_data)3787 static void deletePools(layer_data *my_data) {
3788     if (my_data->descriptorPoolMap.size() <= 0)
3789         return;
3790     for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
3791         // Remove this pools' sets from setMap and delete them
3792         for (auto ds : (*ii).second->sets) {
3793             freeDescriptorSet(my_data, ds);
3794         }
3795         (*ii).second->sets.clear();
3796     }
3797     my_data->descriptorPoolMap.clear();
3798 }
3799 
clearDescriptorPool(layer_data * my_data,const VkDevice device,const VkDescriptorPool pool,VkDescriptorPoolResetFlags flags)3800 static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
3801                                 VkDescriptorPoolResetFlags flags) {
3802     DESCRIPTOR_POOL_STATE *pPool = getDescriptorPoolState(my_data, pool);
3803     // TODO: validate flags
3804     // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3805     for (auto ds : pPool->sets) {
3806         freeDescriptorSet(my_data, ds);
3807     }
3808     pPool->sets.clear();
3809     // Reset available count for each type and available sets for this pool
3810     for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3811         pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3812     }
3813     pPool->availableSets = pPool->maxSets;
3814 }
3815 
3816 // For given CB object, fetch associated CB Node from map
getCBNode(layer_data const * my_data,const VkCommandBuffer cb)3817 static GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) {
3818     auto it = my_data->commandBufferMap.find(cb);
3819     if (it == my_data->commandBufferMap.end()) {
3820         log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3821                 reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3822                 "Attempt to use CommandBuffer 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
3823         return NULL;
3824     }
3825     return it->second;
3826 }
3827 // Free all CB Nodes
3828 // NOTE : Calls to this function should be wrapped in mutex
deleteCommandBuffers(layer_data * my_data)3829 static void deleteCommandBuffers(layer_data *my_data) {
3830     if (my_data->commandBufferMap.empty()) {
3831         return;
3832     }
3833     for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
3834         delete (*ii).second;
3835     }
3836     my_data->commandBufferMap.clear();
3837 }
3838 
report_error_no_cb_begin(const layer_data * dev_data,const VkCommandBuffer cb,const char * caller_name)3839 static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
3840     return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3841                    (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3842                    "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3843 }
3844 
validateCmdsInCmdBuffer(const layer_data * dev_data,const GLOBAL_CB_NODE * pCB,const CMD_TYPE cmd_type)3845 bool validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
3846     if (!pCB->activeRenderPass)
3847         return false;
3848     bool skip_call = false;
3849     if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3850         (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
3851         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3852                              DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3853                              "Commands cannot be called in a subpass using secondary command buffers.");
3854     } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3855         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3856                              DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3857                              "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3858     }
3859     return skip_call;
3860 }
3861 
checkGraphicsBit(const layer_data * my_data,VkQueueFlags flags,const char * name)3862 static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3863     if (!(flags & VK_QUEUE_GRAPHICS_BIT))
3864         return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3865                        DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3866                        "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3867     return false;
3868 }
3869 
checkComputeBit(const layer_data * my_data,VkQueueFlags flags,const char * name)3870 static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3871     if (!(flags & VK_QUEUE_COMPUTE_BIT))
3872         return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3873                        DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3874                        "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
3875     return false;
3876 }
3877 
checkGraphicsOrComputeBit(const layer_data * my_data,VkQueueFlags flags,const char * name)3878 static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3879     if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
3880         return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3881                        DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3882                        "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3883     return false;
3884 }
3885 
3886 // Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
3887 //  in the recording state or if there's an issue with the Cmd ordering
addCmd(layer_data * my_data,GLOBAL_CB_NODE * pCB,const CMD_TYPE cmd,const char * caller_name)3888 static bool addCmd(layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
3889     bool skip_call = false;
3890     auto pPool = getCommandPoolNode(my_data, pCB->createInfo.commandPool);
3891     if (pPool) {
3892         VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].queueFlags;
3893         switch (cmd) {
3894         case CMD_BINDPIPELINE:
3895         case CMD_BINDPIPELINEDELTA:
3896         case CMD_BINDDESCRIPTORSETS:
3897         case CMD_FILLBUFFER:
3898         case CMD_CLEARCOLORIMAGE:
3899         case CMD_SETEVENT:
3900         case CMD_RESETEVENT:
3901         case CMD_WAITEVENTS:
3902         case CMD_BEGINQUERY:
3903         case CMD_ENDQUERY:
3904         case CMD_RESETQUERYPOOL:
3905         case CMD_COPYQUERYPOOLRESULTS:
3906         case CMD_WRITETIMESTAMP:
3907             skip_call |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3908             break;
3909         case CMD_SETVIEWPORTSTATE:
3910         case CMD_SETSCISSORSTATE:
3911         case CMD_SETLINEWIDTHSTATE:
3912         case CMD_SETDEPTHBIASSTATE:
3913         case CMD_SETBLENDSTATE:
3914         case CMD_SETDEPTHBOUNDSSTATE:
3915         case CMD_SETSTENCILREADMASKSTATE:
3916         case CMD_SETSTENCILWRITEMASKSTATE:
3917         case CMD_SETSTENCILREFERENCESTATE:
3918         case CMD_BINDINDEXBUFFER:
3919         case CMD_BINDVERTEXBUFFER:
3920         case CMD_DRAW:
3921         case CMD_DRAWINDEXED:
3922         case CMD_DRAWINDIRECT:
3923         case CMD_DRAWINDEXEDINDIRECT:
3924         case CMD_BLITIMAGE:
3925         case CMD_CLEARATTACHMENTS:
3926         case CMD_CLEARDEPTHSTENCILIMAGE:
3927         case CMD_RESOLVEIMAGE:
3928         case CMD_BEGINRENDERPASS:
3929         case CMD_NEXTSUBPASS:
3930         case CMD_ENDRENDERPASS:
3931             skip_call |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
3932             break;
3933         case CMD_DISPATCH:
3934         case CMD_DISPATCHINDIRECT:
3935             skip_call |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3936             break;
3937         case CMD_COPYBUFFER:
3938         case CMD_COPYIMAGE:
3939         case CMD_COPYBUFFERTOIMAGE:
3940         case CMD_COPYIMAGETOBUFFER:
3941         case CMD_CLONEIMAGEDATA:
3942         case CMD_UPDATEBUFFER:
3943         case CMD_PIPELINEBARRIER:
3944         case CMD_EXECUTECOMMANDS:
3945         case CMD_END:
3946             break;
3947         default:
3948             break;
3949         }
3950     }
3951     if (pCB->state != CB_RECORDING) {
3952         skip_call |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
3953     } else {
3954         skip_call |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
3955         CMD_NODE cmdNode = {};
3956         // init cmd node and append to end of cmd LL
3957         cmdNode.cmdNumber = ++pCB->numCmds;
3958         cmdNode.type = cmd;
3959         pCB->cmds.push_back(cmdNode);
3960     }
3961     return skip_call;
3962 }
3963 // For given object struct return a ptr of BASE_NODE type for its wrapping struct
GetStateStructPtrFromObject(layer_data * dev_data,VK_OBJECT object_struct)3964 BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
3965     BASE_NODE *base_ptr = nullptr;
3966     switch (object_struct.type) {
3967     case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
3968         base_ptr = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
3969         break;
3970     }
3971     case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
3972         base_ptr = getSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
3973         break;
3974     }
3975     case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
3976         base_ptr = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
3977         break;
3978     }
3979     case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
3980         base_ptr = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
3981         break;
3982     }
3983     case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
3984         base_ptr = getBufferNode(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
3985         break;
3986     }
3987     case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
3988         base_ptr = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
3989         break;
3990     }
3991     case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
3992         base_ptr = getImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
3993         break;
3994     }
3995     case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
3996         base_ptr = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
3997         break;
3998     }
3999     case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
4000         base_ptr = getEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
4001         break;
4002     }
4003     case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
4004         base_ptr = getDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
4005         break;
4006     }
4007     case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
4008         base_ptr = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
4009         break;
4010     }
4011     case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
4012         base_ptr = getFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
4013         break;
4014     }
4015     case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
4016         base_ptr = getRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
4017         break;
4018     }
4019     case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
4020         base_ptr = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
4021         break;
4022     }
4023     default:
4024         // TODO : Any other objects to be handled here?
4025         assert(0);
4026         break;
4027     }
4028     return base_ptr;
4029 }
4030 
4031 // Tie the VK_OBJECT to the cmd buffer which includes:
4032 //  Add object_binding to cmd buffer
4033 //  Add cb_binding to object
addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE * > * cb_bindings,VK_OBJECT obj,GLOBAL_CB_NODE * cb_node)4034 static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
4035     cb_bindings->insert(cb_node);
4036     cb_node->object_bindings.insert(obj);
4037 }
4038 // For a given object, if cb_node is in that objects cb_bindings, remove cb_node
removeCommandBufferBinding(layer_data * dev_data,VK_OBJECT const * object,GLOBAL_CB_NODE * cb_node)4039 static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
4040     BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
4041     if (base_obj)
4042         base_obj->cb_bindings.erase(cb_node);
4043 }
4044 // Reset the command buffer state
4045 //  Maintain the createInfo and set state to CB_NEW, but clear all other state
resetCB(layer_data * dev_data,const VkCommandBuffer cb)4046 static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
4047     GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
4048     if (pCB) {
4049         pCB->in_use.store(0);
4050         pCB->cmds.clear();
4051         // Reset CB state (note that createInfo is not cleared)
4052         pCB->commandBuffer = cb;
4053         memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
4054         memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
4055         pCB->numCmds = 0;
4056         memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
4057         pCB->state = CB_NEW;
4058         pCB->submitCount = 0;
4059         pCB->status = 0;
4060         pCB->viewportMask = 0;
4061         pCB->scissorMask = 0;
4062 
4063         for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4064             pCB->lastBound[i].reset();
4065         }
4066 
4067         memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
4068         pCB->activeRenderPass = nullptr;
4069         pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
4070         pCB->activeSubpass = 0;
4071         pCB->broken_bindings.clear();
4072         pCB->waitedEvents.clear();
4073         pCB->events.clear();
4074         pCB->writeEventsBeforeWait.clear();
4075         pCB->waitedEventsBeforeQueryReset.clear();
4076         pCB->queryToStateMap.clear();
4077         pCB->activeQueries.clear();
4078         pCB->startedQueries.clear();
4079         pCB->imageSubresourceMap.clear();
4080         pCB->imageLayoutMap.clear();
4081         pCB->eventToStageMap.clear();
4082         pCB->drawData.clear();
4083         pCB->currentDrawData.buffers.clear();
4084         pCB->primaryCommandBuffer = VK_NULL_HANDLE;
4085         // Make sure any secondaryCommandBuffers are removed from globalInFlight
4086         for (auto secondary_cb : pCB->secondaryCommandBuffers) {
4087             dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
4088         }
4089         pCB->secondaryCommandBuffers.clear();
4090         pCB->updateImages.clear();
4091         pCB->updateBuffers.clear();
4092         clear_cmd_buf_and_mem_references(dev_data, pCB);
4093         pCB->eventUpdates.clear();
4094         pCB->queryUpdates.clear();
4095 
4096         // Remove object bindings
4097         for (auto obj : pCB->object_bindings) {
4098             removeCommandBufferBinding(dev_data, &obj, pCB);
4099         }
4100         pCB->object_bindings.clear();
4101         // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
4102         for (auto framebuffer : pCB->framebuffers) {
4103             auto fb_state = getFramebufferState(dev_data, framebuffer);
4104             if (fb_state)
4105                 fb_state->cb_bindings.erase(pCB);
4106         }
4107         pCB->framebuffers.clear();
4108         pCB->activeFramebuffer = VK_NULL_HANDLE;
4109     }
4110 }
4111 
4112 // Set PSO-related status bits for CB, including dynamic state set via PSO
set_cb_pso_status(GLOBAL_CB_NODE * pCB,const PIPELINE_STATE * pPipe)4113 static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe) {
4114     // Account for any dynamic state not set via this PSO
4115     if (!pPipe->graphicsPipelineCI.pDynamicState ||
4116         !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
4117         pCB->status |= CBSTATUS_ALL_STATE_SET;
4118     } else {
4119         // First consider all state on
4120         // Then unset any state that's noted as dynamic in PSO
4121         // Finally OR that into CB statemask
4122         CBStatusFlags psoDynStateMask = CBSTATUS_ALL_STATE_SET;
4123         for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
4124             switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
4125             case VK_DYNAMIC_STATE_LINE_WIDTH:
4126                 psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
4127                 break;
4128             case VK_DYNAMIC_STATE_DEPTH_BIAS:
4129                 psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
4130                 break;
4131             case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
4132                 psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
4133                 break;
4134             case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
4135                 psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
4136                 break;
4137             case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4138                 psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4139                 break;
4140             case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4141                 psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4142                 break;
4143             case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4144                 psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4145                 break;
4146             default:
4147                 // TODO : Flag error here
4148                 break;
4149             }
4150         }
4151         pCB->status |= psoDynStateMask;
4152     }
4153 }
4154 
4155 // Print the last bound Gfx Pipeline
printPipeline(layer_data * my_data,const VkCommandBuffer cb)4156 static bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
4157     bool skip_call = false;
4158     GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4159     if (pCB) {
4160         PIPELINE_STATE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_state;
4161         if (!pPipeTrav) {
4162             // nothing to print
4163         } else {
4164             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4165                                  __LINE__, DRAWSTATE_NONE, "DS", "%s",
4166                                  vk_print_vkgraphicspipelinecreateinfo(
4167                                      reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}")
4168                                      .c_str());
4169         }
4170     }
4171     return skip_call;
4172 }
4173 
printCB(layer_data * my_data,const VkCommandBuffer cb)4174 static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
4175     GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4176     if (pCB && pCB->cmds.size() > 0) {
4177         log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4178                 DRAWSTATE_NONE, "DS", "Cmds in command buffer 0x%p", (void *)cb);
4179         vector<CMD_NODE> cmds = pCB->cmds;
4180         for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
4181             // TODO : Need to pass cmdbuffer as srcObj here
4182             log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4183                     __LINE__, DRAWSTATE_NONE, "DS", "  CMD 0x%" PRIx64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
4184         }
4185     } else {
4186         // Nothing to print
4187     }
4188 }
4189 
synchAndPrintDSConfig(layer_data * my_data,const VkCommandBuffer cb)4190 static bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
4191     bool skip_call = false;
4192     if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
4193         return skip_call;
4194     }
4195     skip_call |= printPipeline(my_data, cb);
4196     return skip_call;
4197 }
4198 
4199 // Flags validation error if the associated call is made inside a render pass. The apiName
4200 // routine should ONLY be called outside a render pass.
insideRenderPass(const layer_data * my_data,GLOBAL_CB_NODE * pCB,const char * apiName)4201 static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4202     bool inside = false;
4203     if (pCB->activeRenderPass) {
4204         inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4205                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
4206                          "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 ")", apiName,
4207                          (uint64_t)pCB->activeRenderPass->renderPass);
4208     }
4209     return inside;
4210 }
4211 
4212 // Flags validation error if the associated call is made outside a render pass. The apiName
4213 // routine should ONLY be called inside a render pass.
outsideRenderPass(const layer_data * my_data,GLOBAL_CB_NODE * pCB,const char * apiName)4214 static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4215     bool outside = false;
4216     if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4217         ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4218          !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4219         outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4220                           (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
4221                           "%s: This call must be issued inside an active render pass.", apiName);
4222     }
4223     return outside;
4224 }
4225 
init_core_validation(instance_layer_data * instance_data,const VkAllocationCallbacks * pAllocator)4226 static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
4227 
4228     layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
4229 
4230 }
4231 
checkInstanceRegisterExtensions(const VkInstanceCreateInfo * pCreateInfo,instance_layer_data * instance_data)4232 static void checkInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, instance_layer_data *instance_data) {
4233     for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4234         if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME))
4235             instance_data->surfaceExtensionEnabled = true;
4236         if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_EXTENSION_NAME))
4237             instance_data->displayExtensionEnabled = true;
4238 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4239         if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_ANDROID_SURFACE_EXTENSION_NAME))
4240             instance_data->androidSurfaceExtensionEnabled = true;
4241 #endif
4242 #ifdef VK_USE_PLATFORM_MIR_KHR
4243         if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_MIR_SURFACE_EXTENSION_NAME))
4244             instance_data->mirSurfaceExtensionEnabled = true;
4245 #endif
4246 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
4247         if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME))
4248             instance_data->waylandSurfaceExtensionEnabled = true;
4249 #endif
4250 #ifdef VK_USE_PLATFORM_WIN32_KHR
4251         if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WIN32_SURFACE_EXTENSION_NAME))
4252             instance_data->win32SurfaceExtensionEnabled = true;
4253 #endif
4254 #ifdef VK_USE_PLATFORM_XCB_KHR
4255         if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XCB_SURFACE_EXTENSION_NAME))
4256             instance_data->xcbSurfaceExtensionEnabled = true;
4257 #endif
4258 #ifdef VK_USE_PLATFORM_XLIB_KHR
4259         if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XLIB_SURFACE_EXTENSION_NAME))
4260             instance_data->xlibSurfaceExtensionEnabled = true;
4261 #endif
4262     }
4263 }
4264 
4265 VKAPI_ATTR VkResult VKAPI_CALL
CreateInstance(const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkInstance * pInstance)4266 CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4267     VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4268 
4269     assert(chain_info->u.pLayerInfo);
4270     PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4271     PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4272     if (fpCreateInstance == NULL)
4273         return VK_ERROR_INITIALIZATION_FAILED;
4274 
4275     // Advance the link info for the next element on the chain
4276     chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4277 
4278     VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4279     if (result != VK_SUCCESS)
4280         return result;
4281 
4282     instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), instance_layer_data_map);
4283     instance_data->instance = *pInstance;
4284     layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
4285 
4286     instance_data->report_data = debug_report_create_instance(
4287         &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
4288     checkInstanceRegisterExtensions(pCreateInfo, instance_data);
4289     init_core_validation(instance_data, pAllocator);
4290 
4291     ValidateLayerOrdering(*pCreateInfo);
4292 
4293     return result;
4294 }
4295 
4296 /* hook DestroyInstance to remove tableInstanceMap entry */
DestroyInstance(VkInstance instance,const VkAllocationCallbacks * pAllocator)4297 VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4298     // TODOSC : Shouldn't need any customization here
4299     dispatch_key key = get_dispatch_key(instance);
4300     // TBD: Need any locking this early, in case this function is called at the
4301     // same time by more than one thread?
4302     instance_layer_data *instance_data = get_my_data_ptr(key, instance_layer_data_map);
4303     instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
4304 
4305     std::lock_guard<std::mutex> lock(global_lock);
4306     // Clean up logging callback, if any
4307     while (instance_data->logging_callback.size() > 0) {
4308         VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
4309         layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
4310         instance_data->logging_callback.pop_back();
4311     }
4312 
4313     layer_debug_report_destroy_instance(instance_data->report_data);
4314     layer_data_map.erase(key);
4315 }
4316 
checkDeviceRegisterExtensions(const VkDeviceCreateInfo * pCreateInfo,VkDevice device)4317 static void checkDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4318     uint32_t i;
4319     // TBD: Need any locking, in case this function is called at the same time
4320     // by more than one thread?
4321     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4322     dev_data->device_extensions.wsi_enabled = false;
4323     dev_data->device_extensions.wsi_display_swapchain_enabled = false;
4324 
4325     for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4326         if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4327             dev_data->device_extensions.wsi_enabled = true;
4328         if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME) == 0)
4329             dev_data->device_extensions.wsi_display_swapchain_enabled = true;
4330     }
4331 }
4332 
4333 // Verify that queue family has been properly requested
ValidateRequestedQueueFamilyProperties(instance_layer_data * instance_data,VkPhysicalDevice gpu,const VkDeviceCreateInfo * create_info)4334 bool ValidateRequestedQueueFamilyProperties(instance_layer_data *instance_data, VkPhysicalDevice gpu, const VkDeviceCreateInfo *create_info) {
4335     bool skip_call = false;
4336     auto physical_device_state = getPhysicalDeviceState(instance_data, gpu);
4337     // First check is app has actually requested queueFamilyProperties
4338     if (!physical_device_state) {
4339         skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
4340                              0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
4341                              "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
4342     } else if (QUERY_DETAILS != physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
4343         // TODO: This is not called out as an invalid use in the spec so make more informative recommendation.
4344         skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
4345                              VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST,
4346                              "DL", "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties().");
4347     } else {
4348         // Check that the requested queue properties are valid
4349         for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
4350             uint32_t requestedIndex = create_info->pQueueCreateInfos[i].queueFamilyIndex;
4351             if (requestedIndex >= physical_device_state->queue_family_properties.size()) {
4352                 skip_call |= log_msg(
4353                     instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
4354                     __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4355                     "Invalid queue create request in vkCreateDevice(). Invalid queueFamilyIndex %u requested.", requestedIndex);
4356             } else if (create_info->pQueueCreateInfos[i].queueCount >
4357                        physical_device_state->queue_family_properties[requestedIndex].queueCount) {
4358                 skip_call |=
4359                     log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
4360                             0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4361                             "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but "
4362                             "requested queueCount is %u.",
4363                             requestedIndex, physical_device_state->queue_family_properties[requestedIndex].queueCount,
4364                             create_info->pQueueCreateInfos[i].queueCount);
4365             }
4366         }
4367     }
4368     return skip_call;
4369 }
4370 
4371 // Verify that features have been queried and that they are available
ValidateRequestedFeatures(instance_layer_data * dev_data,VkPhysicalDevice phys,const VkPhysicalDeviceFeatures * requested_features)4372 static bool ValidateRequestedFeatures(instance_layer_data *dev_data, VkPhysicalDevice phys, const VkPhysicalDeviceFeatures *requested_features) {
4373     bool skip_call = false;
4374 
4375     auto phys_device_state = getPhysicalDeviceState(dev_data, phys);
4376     const VkBool32 *actual = reinterpret_cast<VkBool32 *>(&phys_device_state->features);
4377     const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
4378     // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
4379     //  Need to provide the struct member name with the issue. To do that seems like we'll
4380     //  have to loop through each struct member which should be done w/ codegen to keep in synch.
4381     uint32_t errors = 0;
4382     uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
4383     for (uint32_t i = 0; i < total_bools; i++) {
4384         if (requested[i] > actual[i]) {
4385             // TODO: Add index to struct member name helper to be able to include a feature name
4386             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4387                 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4388                 "DL", "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, "
4389                 "which is not available on this device.",
4390                 i);
4391             errors++;
4392         }
4393     }
4394     if (errors && (UNCALLED == phys_device_state->vkGetPhysicalDeviceFeaturesState)) {
4395         // If user didn't request features, notify them that they should
4396         // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
4397         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4398                              VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4399                              "DL", "You requested features that are unavailable on this device. You should first query feature "
4400                                    "availability by calling vkGetPhysicalDeviceFeatures().");
4401     }
4402     return skip_call;
4403 }
4404 
CreateDevice(VkPhysicalDevice gpu,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice)4405 VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4406                                             const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4407     instance_layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), instance_layer_data_map);
4408     bool skip_call = false;
4409 
4410     // Check that any requested features are available
4411     if (pCreateInfo->pEnabledFeatures) {
4412         skip_call |= ValidateRequestedFeatures(my_instance_data, gpu, pCreateInfo->pEnabledFeatures);
4413     }
4414     skip_call |= ValidateRequestedQueueFamilyProperties(my_instance_data, gpu, pCreateInfo);
4415 
4416     if (skip_call) {
4417         return VK_ERROR_VALIDATION_FAILED_EXT;
4418     }
4419 
4420     VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4421 
4422     assert(chain_info->u.pLayerInfo);
4423     PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4424     PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4425     PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
4426     if (fpCreateDevice == NULL) {
4427         return VK_ERROR_INITIALIZATION_FAILED;
4428     }
4429 
4430     // Advance the link info for the next element on the chain
4431     chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4432 
4433     VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4434     if (result != VK_SUCCESS) {
4435         return result;
4436     }
4437 
4438     std::unique_lock<std::mutex> lock(global_lock);
4439     layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4440 
4441     my_device_data->instance_data = my_instance_data;
4442     // Setup device dispatch table
4443     layer_init_device_dispatch_table(*pDevice, &my_device_data->dispatch_table, fpGetDeviceProcAddr);
4444     my_device_data->device = *pDevice;
4445 
4446     my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4447     checkDeviceRegisterExtensions(pCreateInfo, *pDevice);
4448     // Get physical device limits for this device
4449     my_instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
4450     uint32_t count;
4451     my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4452     my_device_data->phys_dev_properties.queue_family_properties.resize(count);
4453     my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
4454         gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
4455     // TODO: device limits should make sure these are compatible
4456     if (pCreateInfo->pEnabledFeatures) {
4457         my_device_data->enabled_features = *pCreateInfo->pEnabledFeatures;
4458     } else {
4459         memset(&my_device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
4460     }
4461     // Store physical device mem limits into device layer_data struct
4462     my_instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
4463     lock.unlock();
4464 
4465     ValidateLayerOrdering(*pCreateInfo);
4466 
4467     return result;
4468 }
4469 
4470 // prototype
DestroyDevice(VkDevice device,const VkAllocationCallbacks * pAllocator)4471 VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4472     // TODOSC : Shouldn't need any customization here
4473     bool skip = false;
4474     dispatch_key key = get_dispatch_key(device);
4475     layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4476     // Free all the memory
4477     std::unique_lock<std::mutex> lock(global_lock);
4478     deletePipelines(dev_data);
4479     dev_data->renderPassMap.clear();
4480     deleteCommandBuffers(dev_data);
4481     // This will also delete all sets in the pool & remove them from setMap
4482     deletePools(dev_data);
4483     // All sets should be removed
4484     assert(dev_data->setMap.empty());
4485     for (auto del_layout : dev_data->descriptorSetLayoutMap) {
4486         delete del_layout.second;
4487     }
4488     dev_data->descriptorSetLayoutMap.clear();
4489     dev_data->imageViewMap.clear();
4490     dev_data->imageMap.clear();
4491     dev_data->imageSubresourceMap.clear();
4492     dev_data->imageLayoutMap.clear();
4493     dev_data->bufferViewMap.clear();
4494     dev_data->bufferMap.clear();
4495     // Queues persist until device is destroyed
4496     dev_data->queueMap.clear();
4497     log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4498             (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4499     log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4500             (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4501     print_mem_list(dev_data);
4502     printCBList(dev_data);
4503     // Report any memory leaks
4504     DEVICE_MEM_INFO *pInfo = NULL;
4505     if (!dev_data->memObjMap.empty()) {
4506         for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4507             pInfo = (*ii).second.get();
4508             if (pInfo->alloc_info.allocationSize != 0) {
4509                 // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4510                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4511                                 (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK, "MEM",
4512                                 "Mem Object 0x%" PRIx64 " has not been freed. You should clean up this memory by calling "
4513                                 "vkFreeMemory(0x%" PRIx64 ") prior to vkDestroyDevice().",
4514                                 (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4515             }
4516         }
4517     }
4518     layer_debug_report_destroy_device(device);
4519     lock.unlock();
4520 
4521 #if DISPATCH_MAP_DEBUG
4522     fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
4523 #endif
4524     if (!skip) {
4525         dev_data->dispatch_table.DestroyDevice(device, pAllocator);
4526         layer_data_map.erase(key);
4527     }
4528 }
4529 
4530 static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4531 
4532 // This validates that the initial layout specified in the command buffer for
4533 // the IMAGE is the same
4534 // as the global IMAGE layout
ValidateCmdBufImageLayouts(layer_data * dev_data,GLOBAL_CB_NODE * pCB)4535 static bool ValidateCmdBufImageLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4536     bool skip_call = false;
4537     for (auto cb_image_data : pCB->imageLayoutMap) {
4538         VkImageLayout imageLayout;
4539         if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4540             skip_call |=
4541                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4542                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".",
4543                         reinterpret_cast<const uint64_t &>(cb_image_data.first));
4544         } else {
4545             if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4546                 // TODO: Set memory invalid which is in mem_tracker currently
4547             } else if (imageLayout != cb_image_data.second.initialLayout) {
4548                 if (cb_image_data.first.hasSubresource) {
4549                     skip_call |= log_msg(
4550                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4551                         reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4552                         "Cannot submit cmd buffer using image (0x%" PRIx64 ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], "
4553                         "with layout %s when first use is %s.",
4554                         reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask,
4555                                 cb_image_data.first.subresource.arrayLayer,
4556                                 cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
4557                         string_VkImageLayout(cb_image_data.second.initialLayout));
4558                 } else {
4559                     skip_call |= log_msg(
4560                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4561                         reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4562                         "Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when "
4563                         "first use is %s.",
4564                         reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4565                         string_VkImageLayout(cb_image_data.second.initialLayout));
4566                 }
4567             }
4568             SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4569         }
4570     }
4571     return skip_call;
4572 }
4573 
4574 // Loop through bound objects and increment their in_use counts
4575 //  For any unknown objects, flag an error
ValidateAndIncrementBoundObjects(layer_data * dev_data,GLOBAL_CB_NODE const * cb_node)4576 static bool ValidateAndIncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4577     bool skip = false;
4578     DRAW_STATE_ERROR error_code = DRAWSTATE_NONE;
4579     BASE_NODE *base_obj = nullptr;
4580     for (auto obj : cb_node->object_bindings) {
4581         switch (obj.type) {
4582         case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
4583             base_obj = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(obj.handle));
4584             error_code = DRAWSTATE_INVALID_DESCRIPTOR_SET;
4585             break;
4586         }
4587         case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
4588             base_obj = getSamplerState(dev_data, reinterpret_cast<VkSampler &>(obj.handle));
4589             error_code = DRAWSTATE_INVALID_SAMPLER;
4590             break;
4591         }
4592         case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
4593             base_obj = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(obj.handle));
4594             error_code = DRAWSTATE_INVALID_QUERY_POOL;
4595             break;
4596         }
4597         case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
4598             base_obj = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(obj.handle));
4599             error_code = DRAWSTATE_INVALID_PIPELINE;
4600             break;
4601         }
4602         case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
4603             base_obj = getBufferNode(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
4604             error_code = DRAWSTATE_INVALID_BUFFER;
4605             break;
4606         }
4607         case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
4608             base_obj = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(obj.handle));
4609             error_code = DRAWSTATE_INVALID_BUFFER_VIEW;
4610             break;
4611         }
4612         case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4613             base_obj = getImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
4614             error_code = DRAWSTATE_INVALID_IMAGE;
4615             break;
4616         }
4617         case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
4618             base_obj = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(obj.handle));
4619             error_code = DRAWSTATE_INVALID_IMAGE_VIEW;
4620             break;
4621         }
4622         case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
4623             base_obj = getEventNode(dev_data, reinterpret_cast<VkEvent &>(obj.handle));
4624             error_code = DRAWSTATE_INVALID_EVENT;
4625             break;
4626         }
4627         case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
4628             base_obj = getDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(obj.handle));
4629             error_code = DRAWSTATE_INVALID_DESCRIPTOR_POOL;
4630             break;
4631         }
4632         case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
4633             base_obj = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(obj.handle));
4634             error_code = DRAWSTATE_INVALID_COMMAND_POOL;
4635             break;
4636         }
4637         case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
4638             base_obj = getFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(obj.handle));
4639             error_code = DRAWSTATE_INVALID_FRAMEBUFFER;
4640             break;
4641         }
4642         case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
4643             base_obj = getRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(obj.handle));
4644             error_code = DRAWSTATE_INVALID_RENDERPASS;
4645             break;
4646         }
4647         case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
4648             base_obj = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(obj.handle));
4649             error_code = DRAWSTATE_INVALID_DEVICE_MEMORY;
4650             break;
4651         }
4652         default:
4653             // TODO : Merge handling of other objects types into this code
4654             break;
4655         }
4656         if (!base_obj) {
4657             skip |=
4658                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj.type, obj.handle, __LINE__, error_code, "DS",
4659                         "Cannot submit cmd buffer using deleted %s 0x%" PRIx64 ".", object_type_to_string(obj.type), obj.handle);
4660         } else {
4661             base_obj->in_use.fetch_add(1);
4662         }
4663     }
4664     return skip;
4665 }
4666 
4667 // Track which resources are in-flight by atomically incrementing their "in_use" count
validateAndIncrementResources(layer_data * dev_data,GLOBAL_CB_NODE * cb_node)4668 static bool validateAndIncrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
4669     bool skip_call = false;
4670 
4671     cb_node->in_use.fetch_add(1);
4672     dev_data->globalInFlightCmdBuffers.insert(cb_node->commandBuffer);
4673 
4674     // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
4675     skip_call |= ValidateAndIncrementBoundObjects(dev_data, cb_node);
4676     // TODO : We should be able to remove the NULL look-up checks from the code below as long as
4677     //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
4678     //  should then be flagged prior to calling this function
4679     for (auto drawDataElement : cb_node->drawData) {
4680         for (auto buffer : drawDataElement.buffers) {
4681             auto buffer_node = getBufferNode(dev_data, buffer);
4682             if (!buffer_node) {
4683                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4684                                      (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4685                                      "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
4686             } else {
4687                 buffer_node->in_use.fetch_add(1);
4688             }
4689         }
4690     }
4691     for (auto event : cb_node->writeEventsBeforeWait) {
4692         auto event_state = getEventNode(dev_data, event);
4693         if (event_state)
4694             event_state->write_in_use++;
4695     }
4696     return skip_call;
4697 }
4698 
4699 // Note: This function assumes that the global lock is held by the calling
4700 // thread.
4701 // TODO: untangle this.
cleanInFlightCmdBuffer(layer_data * my_data,VkCommandBuffer cmdBuffer)4702 static bool cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4703     bool skip_call = false;
4704     GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4705     if (pCB) {
4706         for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
4707             for (auto event : queryEventsPair.second) {
4708                 if (my_data->eventMap[event].needsSignaled) {
4709                     skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4710                                          VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4711                                          "Cannot get query results on queryPool 0x%" PRIx64
4712                                          " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
4713                                          (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4714                 }
4715             }
4716         }
4717     }
4718     return skip_call;
4719 }
4720 
4721 // TODO: nuke this completely.
4722 // Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
removeInFlightCmdBuffer(layer_data * dev_data,VkCommandBuffer cmd_buffer)4723 static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4724     // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4725     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4726     pCB->in_use.fetch_sub(1);
4727     if (!pCB->in_use.load()) {
4728         dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4729     }
4730 }
4731 
4732 // Decrement in-use count for objects bound to command buffer
DecrementBoundResources(layer_data * dev_data,GLOBAL_CB_NODE const * cb_node)4733 static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4734     BASE_NODE *base_obj = nullptr;
4735     for (auto obj : cb_node->object_bindings) {
4736         base_obj = GetStateStructPtrFromObject(dev_data, obj);
4737         if (base_obj) {
4738             base_obj->in_use.fetch_sub(1);
4739         }
4740     }
4741 }
4742 
RetireWorkOnQueue(layer_data * dev_data,QUEUE_NODE * pQueue,uint64_t seq)4743 static bool RetireWorkOnQueue(layer_data *dev_data, QUEUE_NODE *pQueue, uint64_t seq)
4744 {
4745     bool skip_call = false; // TODO: extract everything that might fail to precheck
4746     std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
4747 
4748     // Roll this queue forward, one submission at a time.
4749     while (pQueue->seq < seq) {
4750         auto & submission = pQueue->submissions.front();
4751 
4752         for (auto & wait : submission.waitSemaphores) {
4753             auto pSemaphore = getSemaphoreNode(dev_data, wait.semaphore);
4754             pSemaphore->in_use.fetch_sub(1);
4755             auto & lastSeq = otherQueueSeqs[wait.queue];
4756             lastSeq = std::max(lastSeq, wait.seq);
4757         }
4758 
4759         for (auto & semaphore : submission.signalSemaphores) {
4760             auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4761             pSemaphore->in_use.fetch_sub(1);
4762         }
4763 
4764         for (auto cb : submission.cbs) {
4765             auto cb_node = getCBNode(dev_data, cb);
4766             // First perform decrement on general case bound objects
4767             DecrementBoundResources(dev_data, cb_node);
4768             for (auto drawDataElement : cb_node->drawData) {
4769                 for (auto buffer : drawDataElement.buffers) {
4770                     auto buffer_node = getBufferNode(dev_data, buffer);
4771                     if (buffer_node) {
4772                         buffer_node->in_use.fetch_sub(1);
4773                     }
4774                 }
4775             }
4776             for (auto event : cb_node->writeEventsBeforeWait) {
4777                 auto eventNode = dev_data->eventMap.find(event);
4778                 if (eventNode != dev_data->eventMap.end()) {
4779                     eventNode->second.write_in_use--;
4780                 }
4781             }
4782             for (auto queryStatePair : cb_node->queryToStateMap) {
4783                 dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4784             }
4785             for (auto eventStagePair : cb_node->eventToStageMap) {
4786                 dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4787             }
4788 
4789             skip_call |= cleanInFlightCmdBuffer(dev_data, cb);
4790             removeInFlightCmdBuffer(dev_data, cb);
4791         }
4792 
4793         auto pFence = getFenceNode(dev_data, submission.fence);
4794         if (pFence) {
4795             pFence->state = FENCE_RETIRED;
4796         }
4797 
4798         pQueue->submissions.pop_front();
4799         pQueue->seq++;
4800     }
4801 
4802     // Roll other queues forward to the highest seq we saw a wait for
4803     for (auto qs : otherQueueSeqs) {
4804         skip_call |= RetireWorkOnQueue(dev_data, getQueueNode(dev_data, qs.first), qs.second);
4805     }
4806 
4807     return skip_call;
4808 }
4809 
4810 
4811 // Submit a fence to a queue, delimiting previous fences and previous untracked
4812 // work by it.
4813 static void
SubmitFence(QUEUE_NODE * pQueue,FENCE_NODE * pFence,uint64_t submitCount)4814 SubmitFence(QUEUE_NODE *pQueue, FENCE_NODE *pFence, uint64_t submitCount)
4815 {
4816     pFence->state = FENCE_INFLIGHT;
4817     pFence->signaler.first = pQueue->queue;
4818     pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
4819 }
4820 
validateCommandBufferSimultaneousUse(layer_data * dev_data,GLOBAL_CB_NODE * pCB)4821 static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4822     bool skip_call = false;
4823     if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4824         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4825         skip_call |=
4826             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4827                     __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
4828                     "Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.",
4829                     reinterpret_cast<uint64_t>(pCB->commandBuffer));
4830     }
4831     return skip_call;
4832 }
4833 
validateCommandBufferState(layer_data * dev_data,GLOBAL_CB_NODE * pCB,const char * call_source)4834 static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *call_source) {
4835     bool skip = false;
4836     if (dev_data->instance_data->disabled.command_buffer_state)
4837         return skip;
4838     // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4839     if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
4840         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4841                         __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4842                         "Commandbuffer 0x%" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
4843                         "set, but has been submitted 0x%" PRIxLEAST64 " times.",
4844                         (uint64_t)(pCB->commandBuffer), pCB->submitCount);
4845     }
4846     // Validate that cmd buffers have been updated
4847     if (CB_RECORDED != pCB->state) {
4848         if (CB_INVALID == pCB->state) {
4849             // Inform app of reason CB invalid
4850             for (auto obj : pCB->broken_bindings) {
4851                 const char *type_str = object_type_to_string(obj.type);
4852                 // Descriptor sets are a special case that can be either destroyed or updated to invalidated a CB
4853                 const char *cause_str =
4854                     (obj.type == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT) ? "destroyed or updated" : "destroyed";
4855 
4856                 skip |=
4857                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4858                             reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4859                             "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid because bound %s 0x%" PRIxLEAST64
4860                             " was %s.",
4861                             reinterpret_cast<uint64_t &>(pCB->commandBuffer), type_str, obj.handle, cause_str);
4862             }
4863         } else { // Flag error for using CB w/o vkEndCommandBuffer() called
4864             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4865                             (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
4866                             "You must call vkEndCommandBuffer() on command buffer 0x%" PRIxLEAST64 " before this call to %s!",
4867                             reinterpret_cast<uint64_t &>(pCB->commandBuffer), call_source);
4868         }
4869     }
4870     return skip;
4871 }
4872 
4873 // Validate that queueFamilyIndices of primary command buffers match this queue
4874 // Secondary command buffers were previously validated in vkCmdExecuteCommands().
validateQueueFamilyIndices(layer_data * dev_data,GLOBAL_CB_NODE * pCB,VkQueue queue)4875 static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
4876     bool skip_call = false;
4877     auto pPool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
4878     auto queue_node = getQueueNode(dev_data, queue);
4879 
4880     if (pPool && queue_node && (pPool->queueFamilyIndex != queue_node->queueFamilyIndex)) {
4881         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4882             reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
4883             "vkQueueSubmit: Primary command buffer 0x%" PRIxLEAST64
4884             " created in queue family %d is being submitted on queue 0x%" PRIxLEAST64 " from queue family %d.",
4885             reinterpret_cast<uint64_t>(pCB->commandBuffer), pPool->queueFamilyIndex,
4886             reinterpret_cast<uint64_t>(queue), queue_node->queueFamilyIndex);
4887     }
4888 
4889     return skip_call;
4890 }
4891 
validatePrimaryCommandBufferState(layer_data * dev_data,GLOBAL_CB_NODE * pCB)4892 static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4893     // Track in-use for resources off of primary and any secondary CBs
4894     bool skip_call = false;
4895 
4896     // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
4897     // on device
4898     skip_call |= validateCommandBufferSimultaneousUse(dev_data, pCB);
4899 
4900     skip_call |= validateAndIncrementResources(dev_data, pCB);
4901 
4902     if (!pCB->secondaryCommandBuffers.empty()) {
4903         for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
4904             GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
4905             skip_call |= validateAndIncrementResources(dev_data, pSubCB);
4906             if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
4907                 !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4908                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4909                         __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4910                         "Commandbuffer 0x%" PRIxLEAST64 " was submitted with secondary buffer 0x%" PRIxLEAST64
4911                         " but that buffer has subsequently been bound to "
4912                         "primary cmd buffer 0x%" PRIxLEAST64
4913                         " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
4914                         reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
4915                         reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
4916             }
4917         }
4918     }
4919 
4920     skip_call |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()");
4921 
4922     return skip_call;
4923 }
4924 
4925 static bool
ValidateFenceForSubmit(layer_data * dev_data,FENCE_NODE * pFence)4926 ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence)
4927 {
4928     bool skip_call = false;
4929 
4930     if (pFence) {
4931         if (pFence->state == FENCE_INFLIGHT) {
4932             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4933                                  (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4934                                  "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence));
4935         }
4936 
4937         else if (pFence->state == FENCE_RETIRED) {
4938             skip_call |=
4939                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4940                         reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4941                         "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
4942                         reinterpret_cast<uint64_t &>(pFence->fence));
4943         }
4944     }
4945 
4946     return skip_call;
4947 }
4948 
4949 
4950 VKAPI_ATTR VkResult VKAPI_CALL
QueueSubmit(VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)4951 QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
4952     bool skip_call = false;
4953     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4954     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4955     std::unique_lock<std::mutex> lock(global_lock);
4956 
4957     auto pQueue = getQueueNode(dev_data, queue);
4958     auto pFence = getFenceNode(dev_data, fence);
4959     skip_call |= ValidateFenceForSubmit(dev_data, pFence);
4960 
4961     if (skip_call) {
4962         return VK_ERROR_VALIDATION_FAILED_EXT;
4963     }
4964 
4965     // TODO : Review these old print functions and clean up as appropriate
4966     print_mem_list(dev_data);
4967     printCBList(dev_data);
4968 
4969     // Mark the fence in-use.
4970     if (pFence) {
4971         SubmitFence(pQueue, pFence, std::max(1u, submitCount));
4972     }
4973 
4974     // Now verify each individual submit
4975     for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4976         const VkSubmitInfo *submit = &pSubmits[submit_idx];
4977         vector<SEMAPHORE_WAIT> semaphore_waits;
4978         vector<VkSemaphore> semaphore_signals;
4979         for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
4980             VkSemaphore semaphore = submit->pWaitSemaphores[i];
4981             auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4982             if (pSemaphore) {
4983                 if (pSemaphore->signaled) {
4984                     if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
4985                         semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
4986                         pSemaphore->in_use.fetch_add(1);
4987                     }
4988                     pSemaphore->signaler.first = VK_NULL_HANDLE;
4989                     pSemaphore->signaled = false;
4990                 } else {
4991                     skip_call |=
4992                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4993                                 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4994                                 "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
4995                                 reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
4996                 }
4997             }
4998         }
4999         for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
5000             VkSemaphore semaphore = submit->pSignalSemaphores[i];
5001             auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
5002             if (pSemaphore) {
5003                 if (pSemaphore->signaled) {
5004                     skip_call |=
5005                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5006                                 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
5007                                 "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
5008                                 " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
5009                                 reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
5010                                 reinterpret_cast<uint64_t &>(pSemaphore->signaler.first));
5011                 } else {
5012                     pSemaphore->signaler.first = queue;
5013                     pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
5014                     pSemaphore->signaled = true;
5015                     pSemaphore->in_use.fetch_add(1);
5016                     semaphore_signals.push_back(semaphore);
5017                 }
5018             }
5019         }
5020 
5021         std::vector<VkCommandBuffer> cbs;
5022 
5023         for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5024             auto cb_node = getCBNode(dev_data, submit->pCommandBuffers[i]);
5025             skip_call |= ValidateCmdBufImageLayouts(dev_data, cb_node);
5026             if (cb_node) {
5027                 cbs.push_back(submit->pCommandBuffers[i]);
5028                 for (auto secondaryCmdBuffer : cb_node->secondaryCommandBuffers) {
5029                     cbs.push_back(secondaryCmdBuffer);
5030                 }
5031 
5032                 cb_node->submitCount++; // increment submit count
5033                 skip_call |= validatePrimaryCommandBufferState(dev_data, cb_node);
5034                 skip_call |= validateQueueFamilyIndices(dev_data, cb_node, queue);
5035                 // Potential early exit here as bad object state may crash in delayed function calls
5036                 if (skip_call)
5037                     return result;
5038                 // Call submit-time functions to validate/update state
5039                 for (auto &function : cb_node->validate_functions) {
5040                     skip_call |= function();
5041                 }
5042                 for (auto &function : cb_node->eventUpdates) {
5043                     skip_call |= function(queue);
5044                 }
5045                 for (auto &function : cb_node->queryUpdates) {
5046                     skip_call |= function(queue);
5047                 }
5048             }
5049         }
5050 
5051         pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals,
5052                                          submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
5053     }
5054 
5055     if (pFence && !submitCount) {
5056         // If no submissions, but just dropping a fence on the end of the queue,
5057         // record an empty submission with just the fence, so we can determine
5058         // its completion.
5059         pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
5060                                          std::vector<SEMAPHORE_WAIT>(),
5061                                          std::vector<VkSemaphore>(),
5062                                          fence);
5063     }
5064 
5065     lock.unlock();
5066     if (!skip_call)
5067         result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
5068 
5069     return result;
5070 }
5071 
AllocateMemory(VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMemory)5072 VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
5073                                               const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
5074     layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5075     VkResult result = my_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
5076     // TODO : Track allocations and overall size here
5077     std::lock_guard<std::mutex> lock(global_lock);
5078     add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
5079     print_mem_list(my_data);
5080     return result;
5081 }
5082 
5083 // For given obj node, if it is use, flag a validation error and return callback result, else return false
ValidateObjectNotInUse(const layer_data * dev_data,BASE_NODE * obj_node,VK_OBJECT obj_struct,UNIQUE_VALIDATION_ERROR_CODE error_code)5084 bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct,
5085                             UNIQUE_VALIDATION_ERROR_CODE error_code) {
5086     if (dev_data->instance_data->disabled.object_in_use)
5087         return false;
5088     bool skip = false;
5089     if (obj_node->in_use.load()) {
5090         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_struct.type, obj_struct.handle, __LINE__,
5091                         error_code, "DS", "Cannot delete %s 0x%" PRIx64 " that is currently in use by a command buffer. %s",
5092                         object_type_to_string(obj_struct.type), obj_struct.handle, validation_error_map[error_code]);
5093     }
5094     return skip;
5095 }
5096 
PreCallValidateFreeMemory(layer_data * dev_data,VkDeviceMemory mem,DEVICE_MEM_INFO ** mem_info,VK_OBJECT * obj_struct)5097 static bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) {
5098     *mem_info = getMemObjInfo(dev_data, mem);
5099     *obj_struct = {reinterpret_cast<uint64_t &>(mem), VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT};
5100     if (dev_data->instance_data->disabled.free_memory)
5101         return false;
5102     bool skip = false;
5103     if (*mem_info) {
5104         skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, VALIDATION_ERROR_00620);
5105     }
5106     return skip;
5107 }
5108 
PostCallRecordFreeMemory(layer_data * dev_data,VkDeviceMemory mem,DEVICE_MEM_INFO * mem_info,VK_OBJECT obj_struct)5109 static void PostCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) {
5110     // Clear mem binding for any bound objects
5111     for (auto obj : mem_info->obj_bindings) {
5112         log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__, MEMTRACK_FREED_MEM_REF,
5113                 "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64, obj.handle,
5114                 (uint64_t)mem_info->mem);
5115         switch (obj.type) {
5116         case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
5117             auto image_state = getImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
5118             assert(image_state); // Any destroyed images should already be removed from bindings
5119             image_state->binding.mem = MEMORY_UNBOUND;
5120             break;
5121         }
5122         case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
5123             auto buff_node = getBufferNode(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
5124             assert(buff_node); // Any destroyed buffers should already be removed from bindings
5125             buff_node->binding.mem = MEMORY_UNBOUND;
5126             break;
5127         }
5128         default:
5129             // Should only have buffer or image objects bound to memory
5130             assert(0);
5131         }
5132     }
5133     // Any bound cmd buffers are now invalid
5134     invalidateCommandBuffers(mem_info->cb_bindings, obj_struct);
5135     dev_data->memObjMap.erase(mem);
5136 }
5137 
FreeMemory(VkDevice device,VkDeviceMemory mem,const VkAllocationCallbacks * pAllocator)5138 VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
5139     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5140     DEVICE_MEM_INFO *mem_info = nullptr;
5141     VK_OBJECT obj_struct;
5142     std::unique_lock<std::mutex> lock(global_lock);
5143     bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct);
5144     if (!skip) {
5145         lock.unlock();
5146         dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
5147         lock.lock();
5148         PostCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct);
5149     }
5150 }
5151 
5152 // Validate that given Map memory range is valid. This means that the memory should not already be mapped,
5153 //  and that the size of the map range should be:
5154 //  1. Not zero
5155 //  2. Within the size of the memory allocation
ValidateMapMemRange(layer_data * my_data,VkDeviceMemory mem,VkDeviceSize offset,VkDeviceSize size)5156 static bool ValidateMapMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5157     bool skip_call = false;
5158 
5159     if (size == 0) {
5160         skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5161                             (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5162                             "VkMapMemory: Attempting to map memory range of size zero");
5163     }
5164 
5165     auto mem_element = my_data->memObjMap.find(mem);
5166     if (mem_element != my_data->memObjMap.end()) {
5167         auto mem_info = mem_element->second.get();
5168         // It is an application error to call VkMapMemory on an object that is already mapped
5169         if (mem_info->mem_range.size != 0) {
5170             skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5171                                 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5172                                 "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
5173         }
5174 
5175         // Validate that offset + size is within object's allocationSize
5176         if (size == VK_WHOLE_SIZE) {
5177             if (offset >= mem_info->alloc_info.allocationSize) {
5178                 skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5179                                     VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5180                                     "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
5181                                            " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
5182                                     offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
5183             }
5184         } else {
5185             if ((offset + size) > mem_info->alloc_info.allocationSize) {
5186                 skip_call =
5187                     log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5188                             (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5189                             "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64, offset,
5190                             size + offset, mem_info->alloc_info.allocationSize);
5191             }
5192         }
5193     }
5194     return skip_call;
5195 }
5196 
storeMemRanges(layer_data * my_data,VkDeviceMemory mem,VkDeviceSize offset,VkDeviceSize size)5197 static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5198     auto mem_info = getMemObjInfo(my_data, mem);
5199     if (mem_info) {
5200         mem_info->mem_range.offset = offset;
5201         mem_info->mem_range.size = size;
5202     }
5203 }
5204 
deleteMemRanges(layer_data * my_data,VkDeviceMemory mem)5205 static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
5206     bool skip_call = false;
5207     auto mem_info = getMemObjInfo(my_data, mem);
5208     if (mem_info) {
5209         if (!mem_info->mem_range.size) {
5210             // Valid Usage: memory must currently be mapped
5211             skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5212                                 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5213                                 "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
5214         }
5215         mem_info->mem_range.size = 0;
5216         if (mem_info->shadow_copy) {
5217             free(mem_info->shadow_copy_base);
5218             mem_info->shadow_copy_base = 0;
5219             mem_info->shadow_copy = 0;
5220         }
5221     }
5222     return skip_call;
5223 }
5224 
5225 // Guard value for pad data
5226 static char NoncoherentMemoryFillValue = 0xb;
5227 
initializeAndTrackMemory(layer_data * dev_data,VkDeviceMemory mem,VkDeviceSize offset,VkDeviceSize size,void ** ppData)5228 static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
5229                                      void **ppData) {
5230     auto mem_info = getMemObjInfo(dev_data, mem);
5231     if (mem_info) {
5232         mem_info->p_driver_data = *ppData;
5233         uint32_t index = mem_info->alloc_info.memoryTypeIndex;
5234         if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
5235             mem_info->shadow_copy = 0;
5236         } else {
5237             if (size == VK_WHOLE_SIZE) {
5238                 size = mem_info->alloc_info.allocationSize - offset;
5239             }
5240             mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5241             assert(vk_safe_modulo(mem_info->shadow_pad_size,
5242                                   dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) == 0);
5243             // Ensure start of mapped region reflects hardware alignment constraints
5244             uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5245 
5246             // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
5247             uint64_t start_offset = offset % map_alignment;
5248             // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
5249             mem_info->shadow_copy_base = malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
5250 
5251             mem_info->shadow_copy =
5252                 reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
5253                                          ~(map_alignment - 1)) + start_offset;
5254             assert(vk_safe_modulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
5255                                   map_alignment) == 0);
5256 
5257             memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
5258             *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
5259         }
5260     }
5261 }
5262 
5263 // Verify that state for fence being waited on is appropriate. That is,
5264 //  a fence being waited on should not already be signaled and
5265 //  it should have been submitted on a queue or during acquire next image
verifyWaitFenceState(layer_data * dev_data,VkFence fence,const char * apiCall)5266 static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
5267     bool skip_call = false;
5268 
5269     auto pFence = getFenceNode(dev_data, fence);
5270     if (pFence) {
5271         if (pFence->state == FENCE_UNSIGNALED) {
5272             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5273                                  reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5274                                  "%s called for fence 0x%" PRIxLEAST64 " which has not been submitted on a Queue or during "
5275                                  "acquire next image.",
5276                                  apiCall, reinterpret_cast<uint64_t &>(fence));
5277         }
5278     }
5279     return skip_call;
5280 }
5281 
RetireFence(layer_data * dev_data,VkFence fence)5282 static bool RetireFence(layer_data *dev_data, VkFence fence) {
5283     auto pFence = getFenceNode(dev_data, fence);
5284     if (pFence->signaler.first != VK_NULL_HANDLE) {
5285         /* Fence signaller is a queue -- use this as proof that prior operations
5286          * on that queue have completed.
5287          */
5288         return RetireWorkOnQueue(dev_data,
5289                                  getQueueNode(dev_data, pFence->signaler.first),
5290                                  pFence->signaler.second);
5291     }
5292     else {
5293         /* Fence signaller is the WSI. We're not tracking what the WSI op
5294          * actually /was/ in CV yet, but we need to mark the fence as retired.
5295          */
5296         pFence->state = FENCE_RETIRED;
5297         return false;
5298     }
5299 }
5300 
5301 VKAPI_ATTR VkResult VKAPI_CALL
WaitForFences(VkDevice device,uint32_t fenceCount,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout)5302 WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5303     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5304     bool skip_call = false;
5305     // Verify fence status of submitted fences
5306     std::unique_lock<std::mutex> lock(global_lock);
5307     for (uint32_t i = 0; i < fenceCount; i++) {
5308         skip_call |= verifyWaitFenceState(dev_data, pFences[i], "vkWaitForFences");
5309     }
5310     lock.unlock();
5311     if (skip_call)
5312         return VK_ERROR_VALIDATION_FAILED_EXT;
5313 
5314     VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5315 
5316     if (result == VK_SUCCESS) {
5317         lock.lock();
5318         // When we know that all fences are complete we can clean/remove their CBs
5319         if (waitAll || fenceCount == 1) {
5320             for (uint32_t i = 0; i < fenceCount; i++) {
5321                 skip_call |= RetireFence(dev_data, pFences[i]);
5322             }
5323         }
5324         // NOTE : Alternate case not handled here is when some fences have completed. In
5325         //  this case for app to guarantee which fences completed it will have to call
5326         //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5327         lock.unlock();
5328     }
5329     if (skip_call)
5330         return VK_ERROR_VALIDATION_FAILED_EXT;
5331     return result;
5332 }
5333 
GetFenceStatus(VkDevice device,VkFence fence)5334 VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
5335     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5336     bool skip_call = false;
5337     std::unique_lock<std::mutex> lock(global_lock);
5338     skip_call = verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
5339     lock.unlock();
5340 
5341     if (skip_call)
5342         return VK_ERROR_VALIDATION_FAILED_EXT;
5343 
5344     VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
5345     lock.lock();
5346     if (result == VK_SUCCESS) {
5347         skip_call |= RetireFence(dev_data, fence);
5348     }
5349     lock.unlock();
5350     if (skip_call)
5351         return VK_ERROR_VALIDATION_FAILED_EXT;
5352     return result;
5353 }
5354 
GetDeviceQueue(VkDevice device,uint32_t queueFamilyIndex,uint32_t queueIndex,VkQueue * pQueue)5355 VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
5356                                                             VkQueue *pQueue) {
5357     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5358     dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5359     std::lock_guard<std::mutex> lock(global_lock);
5360 
5361     // Add queue to tracking set only if it is new
5362     auto result = dev_data->queues.emplace(*pQueue);
5363     if (result.second == true) {
5364         QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
5365         pQNode->queue = *pQueue;
5366         pQNode->queueFamilyIndex = queueFamilyIndex;
5367         pQNode->seq = 0;
5368     }
5369 }
5370 
QueueWaitIdle(VkQueue queue)5371 VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
5372     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5373     bool skip_call = false;
5374     std::unique_lock<std::mutex> lock(global_lock);
5375     auto pQueue = getQueueNode(dev_data, queue);
5376     skip_call |= RetireWorkOnQueue(dev_data, pQueue, pQueue->seq + pQueue->submissions.size());
5377     lock.unlock();
5378     if (skip_call)
5379         return VK_ERROR_VALIDATION_FAILED_EXT;
5380     VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
5381     return result;
5382 }
5383 
DeviceWaitIdle(VkDevice device)5384 VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
5385     bool skip_call = false;
5386     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5387     std::unique_lock<std::mutex> lock(global_lock);
5388     for (auto & queue : dev_data->queueMap) {
5389         skip_call |= RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
5390     }
5391     lock.unlock();
5392     if (skip_call)
5393         return VK_ERROR_VALIDATION_FAILED_EXT;
5394     VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
5395     return result;
5396 }
5397 
DestroyFence(VkDevice device,VkFence fence,const VkAllocationCallbacks * pAllocator)5398 VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5399     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5400     bool skip_call = false;
5401     std::unique_lock<std::mutex> lock(global_lock);
5402     auto fence_pair = dev_data->fenceMap.find(fence);
5403     if (fence_pair != dev_data->fenceMap.end()) {
5404         if (fence_pair->second.state == FENCE_INFLIGHT) {
5405             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5406                                  (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", "Fence 0x%" PRIx64 " is in use.",
5407                                  (uint64_t)(fence));
5408         }
5409         dev_data->fenceMap.erase(fence_pair);
5410     }
5411     lock.unlock();
5412 
5413     if (!skip_call)
5414         dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
5415 }
5416 
5417 VKAPI_ATTR void VKAPI_CALL
DestroySemaphore(VkDevice device,VkSemaphore semaphore,const VkAllocationCallbacks * pAllocator)5418 DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5419     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5420     bool skip = false;
5421     std::unique_lock<std::mutex> lock(global_lock);
5422     auto sema_node = getSemaphoreNode(dev_data, semaphore);
5423     if (sema_node) {
5424         skip |= ValidateObjectNotInUse(dev_data, sema_node,
5425                                        {reinterpret_cast<uint64_t &>(semaphore), VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT},
5426                                        VALIDATION_ERROR_00199);
5427     }
5428     if (!skip) {
5429         dev_data->semaphoreMap.erase(semaphore);
5430         lock.unlock();
5431         dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
5432     }
5433 }
5434 
PreCallValidateDestroyEvent(layer_data * dev_data,VkEvent event,EVENT_STATE ** event_state,VK_OBJECT * obj_struct)5435 static bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) {
5436     *event_state = getEventNode(dev_data, event);
5437     *obj_struct = {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT};
5438     if (dev_data->instance_data->disabled.destroy_event)
5439         return false;
5440     bool skip = false;
5441     if (*event_state) {
5442         skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, VALIDATION_ERROR_00213);
5443     }
5444     return skip;
5445 }
5446 
PostCallRecordDestroyEvent(layer_data * dev_data,VkEvent event,EVENT_STATE * event_state,VK_OBJECT obj_struct)5447 static void PostCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) {
5448     invalidateCommandBuffers(event_state->cb_bindings, obj_struct);
5449     dev_data->eventMap.erase(event);
5450 }
5451 
DestroyEvent(VkDevice device,VkEvent event,const VkAllocationCallbacks * pAllocator)5452 VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5453     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5454     EVENT_STATE *event_state = nullptr;
5455     VK_OBJECT obj_struct;
5456     std::unique_lock<std::mutex> lock(global_lock);
5457     bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct);
5458     if (!skip) {
5459         lock.unlock();
5460         dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
5461         lock.lock();
5462         PostCallRecordDestroyEvent(dev_data, event, event_state, obj_struct);
5463     }
5464 }
5465 
5466 VKAPI_ATTR void VKAPI_CALL
DestroyQueryPool(VkDevice device,VkQueryPool queryPool,const VkAllocationCallbacks * pAllocator)5467 DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5468     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5469     bool skip = false;
5470     std::unique_lock<std::mutex> lock(global_lock);
5471     auto qp_node = getQueryPoolNode(dev_data, queryPool);
5472     if (qp_node) {
5473         VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT};
5474         skip |= ValidateObjectNotInUse(dev_data, qp_node, obj_struct, VALIDATION_ERROR_01012);
5475         // Any bound cmd buffers are now invalid
5476         invalidateCommandBuffers(qp_node->cb_bindings, obj_struct);
5477     }
5478     if (!skip) {
5479         dev_data->queryPoolMap.erase(queryPool);
5480         lock.unlock();
5481         dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
5482     }
5483 }
5484 
GetQueryPoolResults(VkDevice device,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount,size_t dataSize,void * pData,VkDeviceSize stride,VkQueryResultFlags flags)5485 VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
5486                                                    uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
5487                                                    VkQueryResultFlags flags) {
5488     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5489     unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
5490     std::unique_lock<std::mutex> lock(global_lock);
5491     for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5492         auto pCB = getCBNode(dev_data, cmdBuffer);
5493         for (auto queryStatePair : pCB->queryToStateMap) {
5494             queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
5495         }
5496     }
5497     bool skip_call = false;
5498     for (uint32_t i = 0; i < queryCount; ++i) {
5499         QueryObject query = {queryPool, firstQuery + i};
5500         auto queryElement = queriesInFlight.find(query);
5501         auto queryToStateElement = dev_data->queryToStateMap.find(query);
5502         if (queryToStateElement != dev_data->queryToStateMap.end()) {
5503             // Available and in flight
5504             if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5505                 queryToStateElement->second) {
5506                 for (auto cmdBuffer : queryElement->second) {
5507                     auto pCB = getCBNode(dev_data, cmdBuffer);
5508                     auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
5509                     if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
5510                         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5511                                              VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5512                                              "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
5513                                              (uint64_t)(queryPool), firstQuery + i);
5514                     } else {
5515                         for (auto event : queryEventElement->second) {
5516                             dev_data->eventMap[event].needsSignaled = true;
5517                         }
5518                     }
5519                 }
5520                 // Unavailable and in flight
5521             } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5522                        !queryToStateElement->second) {
5523                 // TODO : Can there be the same query in use by multiple command buffers in flight?
5524                 bool make_available = false;
5525                 for (auto cmdBuffer : queryElement->second) {
5526                     auto pCB = getCBNode(dev_data, cmdBuffer);
5527                     make_available |= pCB->queryToStateMap[query];
5528                 }
5529                 if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5530                     skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5531                                          VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5532                                          "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5533                                          (uint64_t)(queryPool), firstQuery + i);
5534                 }
5535                 // Unavailable
5536             } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5537                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5538                                      VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5539                                      "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5540                                      (uint64_t)(queryPool), firstQuery + i);
5541                 // Unitialized
5542             } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5543                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5544                                      VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5545                                      "Cannot get query results on queryPool 0x%" PRIx64
5546                                      " with index %d as data has not been collected for this index.",
5547                                      (uint64_t)(queryPool), firstQuery + i);
5548             }
5549         }
5550     }
5551     lock.unlock();
5552     if (skip_call)
5553         return VK_ERROR_VALIDATION_FAILED_EXT;
5554     return dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
5555 }
5556 
validateIdleBuffer(const layer_data * my_data,VkBuffer buffer)5557 static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5558     bool skip_call = false;
5559     auto buffer_node = getBufferNode(my_data, buffer);
5560     if (!buffer_node) {
5561         skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5562                              (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5563                              "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5564     } else {
5565         if (buffer_node->in_use.load()) {
5566             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5567                                  (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5568                                  "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5569         }
5570     }
5571     return skip_call;
5572 }
5573 
5574 // Return true if given ranges intersect, else false
5575 // Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
5576 //  in an error so not checking that here
5577 // pad_ranges bool indicates a linear and non-linear comparison which requires padding
5578 // In the case where padding is required, if an alias is encountered then a validation error is reported and skip_call
5579 //  may be set by the callback function so caller should merge in skip_call value if padding case is possible.
rangesIntersect(layer_data const * dev_data,MEMORY_RANGE const * range1,MEMORY_RANGE const * range2,bool * skip_call)5580 static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip_call) {
5581     *skip_call = false;
5582     auto r1_start = range1->start;
5583     auto r1_end = range1->end;
5584     auto r2_start = range2->start;
5585     auto r2_end = range2->end;
5586     VkDeviceSize pad_align = 1;
5587     if (range1->linear != range2->linear) {
5588         pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
5589     }
5590     if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1)))
5591         return false;
5592     if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1)))
5593         return false;
5594 
5595     if (range1->linear != range2->linear) {
5596         // In linear vs. non-linear case, it's an error to alias
5597         const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
5598         const char *r1_type_str = range1->image ? "image" : "buffer";
5599         const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
5600         const char *r2_type_str = range2->image ? "image" : "buffer";
5601         auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
5602         *skip_call |=
5603             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, range1->handle, 0, MEMTRACK_INVALID_ALIASING,
5604                     "MEM", "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
5605                            " which is in violation of the Buffer-Image Granularity section of the Vulkan specification.",
5606                     r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
5607     }
5608     // Ranges intersect
5609     return true;
5610 }
5611 // Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
rangesIntersect(layer_data const * dev_data,MEMORY_RANGE const * range1,VkDeviceSize offset,VkDeviceSize end)5612 static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
5613     // Create a local MEMORY_RANGE struct to wrap offset/size
5614     MEMORY_RANGE range_wrap;
5615     // Synch linear with range1 to avoid padding and potential validation error case
5616     range_wrap.linear = range1->linear;
5617     range_wrap.start = offset;
5618     range_wrap.end = end;
5619     bool tmp_bool;
5620     return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool);
5621 }
5622 // For given mem_info, set all ranges valid that intersect [offset-end] range
5623 // TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
SetMemRangesValid(layer_data const * dev_data,DEVICE_MEM_INFO * mem_info,VkDeviceSize offset,VkDeviceSize end)5624 static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
5625     bool tmp_bool = false;
5626     MEMORY_RANGE map_range;
5627     map_range.linear = true;
5628     map_range.start = offset;
5629     map_range.end = end;
5630     for (auto &handle_range_pair : mem_info->bound_ranges) {
5631         if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool)) {
5632             // TODO : WARN here if tmp_bool true?
5633             handle_range_pair.second.valid = true;
5634         }
5635     }
5636 }
5637 // Object with given handle is being bound to memory w/ given mem_info struct.
5638 //  Track the newly bound memory range with given memoryOffset
5639 //  Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
5640 //  and non-linear range incorrectly overlap.
5641 // Return true if an error is flagged and the user callback returns "true", otherwise false
5642 // is_image indicates an image object, otherwise handle is for a buffer
5643 // is_linear indicates a buffer or linear image
InsertMemoryRange(layer_data const * dev_data,uint64_t handle,DEVICE_MEM_INFO * mem_info,VkDeviceSize memoryOffset,VkMemoryRequirements memRequirements,bool is_image,bool is_linear)5644 static bool InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
5645                               VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
5646     bool skip_call = false;
5647     MEMORY_RANGE range;
5648 
5649     range.image = is_image;
5650     range.handle = handle;
5651     range.linear = is_linear;
5652     range.valid = mem_info->global_valid;
5653     range.memory = mem_info->mem;
5654     range.start = memoryOffset;
5655     range.size = memRequirements.size;
5656     range.end = memoryOffset + memRequirements.size - 1;
5657     range.aliases.clear();
5658     // Update Memory aliasing
5659     // Save aliase ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
5660     // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
5661     std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
5662     for (auto &obj_range_pair : mem_info->bound_ranges) {
5663         auto check_range = &obj_range_pair.second;
5664         bool intersection_error = false;
5665         if (rangesIntersect(dev_data, &range, check_range, &intersection_error)) {
5666             skip_call |= intersection_error;
5667             range.aliases.insert(check_range);
5668             tmp_alias_ranges.insert(check_range);
5669         }
5670     }
5671     mem_info->bound_ranges[handle] = std::move(range);
5672     for (auto tmp_range : tmp_alias_ranges) {
5673         tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
5674     }
5675     if (is_image)
5676         mem_info->bound_images.insert(handle);
5677     else
5678         mem_info->bound_buffers.insert(handle);
5679 
5680     return skip_call;
5681 }
5682 
InsertImageMemoryRange(layer_data const * dev_data,VkImage image,DEVICE_MEM_INFO * mem_info,VkDeviceSize mem_offset,VkMemoryRequirements mem_reqs,bool is_linear)5683 static bool InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5684                                    VkMemoryRequirements mem_reqs, bool is_linear) {
5685     return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(image), mem_info, mem_offset, mem_reqs, true, is_linear);
5686 }
5687 
InsertBufferMemoryRange(layer_data const * dev_data,VkBuffer buffer,DEVICE_MEM_INFO * mem_info,VkDeviceSize mem_offset,VkMemoryRequirements mem_reqs)5688 static bool InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5689                                     VkMemoryRequirements mem_reqs) {
5690     return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(buffer), mem_info, mem_offset, mem_reqs, false, true);
5691 }
5692 
5693 // Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
5694 //  is_image indicates if handle is for image or buffer
5695 //  This function will also remove the handle-to-index mapping from the appropriate
5696 //  map and clean up any aliases for range being removed.
RemoveMemoryRange(uint64_t handle,DEVICE_MEM_INFO * mem_info,bool is_image)5697 static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
5698     auto erase_range = &mem_info->bound_ranges[handle];
5699     for (auto alias_range : erase_range->aliases) {
5700         alias_range->aliases.erase(erase_range);
5701     }
5702     erase_range->aliases.clear();
5703     mem_info->bound_ranges.erase(handle);
5704     if (is_image) {
5705         mem_info->bound_images.erase(handle);
5706     } else {
5707         mem_info->bound_buffers.erase(handle);
5708     }
5709 }
5710 
RemoveBufferMemoryRange(uint64_t handle,DEVICE_MEM_INFO * mem_info)5711 static void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
5712 
RemoveImageMemoryRange(uint64_t handle,DEVICE_MEM_INFO * mem_info)5713 static void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
5714 
DestroyBuffer(VkDevice device,VkBuffer buffer,const VkAllocationCallbacks * pAllocator)5715 VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer,
5716                                          const VkAllocationCallbacks *pAllocator) {
5717     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5718     std::unique_lock<std::mutex> lock(global_lock);
5719     if (!validateIdleBuffer(dev_data, buffer)) {
5720         // Clean up memory binding and range information for buffer
5721         auto buff_node = getBufferNode(dev_data, buffer);
5722         if (buff_node) {
5723             // Any bound cmd buffers are now invalid
5724             invalidateCommandBuffers(buff_node->cb_bindings,
5725                                      {reinterpret_cast<uint64_t &>(buff_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
5726             auto mem_info = getMemObjInfo(dev_data, buff_node->binding.mem);
5727             if (mem_info) {
5728                 RemoveBufferMemoryRange(reinterpret_cast<uint64_t &>(buffer), mem_info);
5729             }
5730             ClearMemoryObjectBindings(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5731             dev_data->bufferMap.erase(buff_node->buffer);
5732         }
5733         lock.unlock();
5734         dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
5735     }
5736 }
5737 
PreCallValidateDestroyBufferView(layer_data * dev_data,VkBufferView buffer_view,BUFFER_VIEW_STATE ** buffer_view_state,VK_OBJECT * obj_struct)5738 static bool PreCallValidateDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE **buffer_view_state,
5739                                              VK_OBJECT *obj_struct) {
5740     *buffer_view_state = getBufferViewState(dev_data, buffer_view);
5741     *obj_struct = {reinterpret_cast<uint64_t &>(buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT};
5742     if (dev_data->instance_data->disabled.destroy_buffer_view)
5743         return false;
5744     bool skip = false;
5745     if (*buffer_view_state) {
5746         skip |= ValidateObjectNotInUse(dev_data, *buffer_view_state, *obj_struct, VALIDATION_ERROR_00701);
5747     }
5748     return skip;
5749 }
5750 
PostCallRecordDestroyBufferView(layer_data * dev_data,VkBufferView buffer_view,BUFFER_VIEW_STATE * buffer_view_state,VK_OBJECT obj_struct)5751 static void PostCallRecordDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE *buffer_view_state,
5752                                             VK_OBJECT obj_struct) {
5753     // Any bound cmd buffers are now invalid
5754     invalidateCommandBuffers(buffer_view_state->cb_bindings, obj_struct);
5755     dev_data->bufferViewMap.erase(buffer_view);
5756 }
5757 
5758 VKAPI_ATTR void VKAPI_CALL
DestroyBufferView(VkDevice device,VkBufferView bufferView,const VkAllocationCallbacks * pAllocator)5759 DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5760     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5761     // Common data objects used pre & post call
5762     BUFFER_VIEW_STATE *buffer_view_state = nullptr;
5763     VK_OBJECT obj_struct;
5764     std::unique_lock<std::mutex> lock(global_lock);
5765     // Validate state before calling down chain, update common data if we'll be calling down chain
5766     bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
5767     if (!skip) {
5768         lock.unlock();
5769         dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
5770         lock.lock();
5771         PostCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
5772     }
5773 }
5774 
PreCallValidateDestroyImage(layer_data * dev_data,VkImage image,IMAGE_STATE ** image_state,VK_OBJECT * obj_struct)5775 static bool PreCallValidateDestroyImage(layer_data *dev_data, VkImage image, IMAGE_STATE **image_state, VK_OBJECT *obj_struct) {
5776     *image_state = getImageState(dev_data, image);
5777     *obj_struct = {reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT};
5778     if (dev_data->instance_data->disabled.destroy_image)
5779         return false;
5780     bool skip = false;
5781     if (*image_state) {
5782         skip |= ValidateObjectNotInUse(dev_data, *image_state, *obj_struct, VALIDATION_ERROR_00743);
5783     }
5784     return skip;
5785 }
5786 
PostCallRecordDestroyImage(layer_data * dev_data,VkImage image,IMAGE_STATE * image_state,VK_OBJECT obj_struct)5787 static void PostCallRecordDestroyImage(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VK_OBJECT obj_struct) {
5788     invalidateCommandBuffers(image_state->cb_bindings, obj_struct);
5789     // Clean up memory mapping, bindings and range references for image
5790     auto mem_info = getMemObjInfo(dev_data, image_state->binding.mem);
5791     if (mem_info) {
5792         RemoveImageMemoryRange(obj_struct.handle, mem_info);
5793     }
5794     ClearMemoryObjectBindings(dev_data, obj_struct.handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5795     // Remove image from imageMap
5796     dev_data->imageMap.erase(image);
5797 
5798     const auto &sub_entry = dev_data->imageSubresourceMap.find(image);
5799     if (sub_entry != dev_data->imageSubresourceMap.end()) {
5800         for (const auto &pair : sub_entry->second) {
5801             dev_data->imageLayoutMap.erase(pair);
5802         }
5803         dev_data->imageSubresourceMap.erase(sub_entry);
5804     }
5805 }
5806 
DestroyImage(VkDevice device,VkImage image,const VkAllocationCallbacks * pAllocator)5807 VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5808     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5809     IMAGE_STATE *image_state = nullptr;
5810     VK_OBJECT obj_struct;
5811     std::unique_lock<std::mutex> lock(global_lock);
5812     bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct);
5813     if (!skip) {
5814         lock.unlock();
5815         dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
5816         lock.lock();
5817         PostCallRecordDestroyImage(dev_data, image, image_state, obj_struct);
5818     }
5819 }
5820 
ValidateMemoryTypes(const layer_data * dev_data,const DEVICE_MEM_INFO * mem_info,const uint32_t memory_type_bits,const char * funcName)5821 static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
5822                                   const char *funcName) {
5823     bool skip_call = false;
5824     if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
5825         skip_call = log_msg(
5826             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5827             reinterpret_cast<const uint64_t &>(mem_info->mem), __LINE__, MEMTRACK_INVALID_MEM_TYPE, "MT",
5828             "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
5829             "type (0x%X) of this memory object 0x%" PRIx64 ".",
5830             funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, reinterpret_cast<const uint64_t &>(mem_info->mem));
5831     }
5832     return skip_call;
5833 }
5834 
5835 VKAPI_ATTR VkResult VKAPI_CALL
BindBufferMemory(VkDevice device,VkBuffer buffer,VkDeviceMemory mem,VkDeviceSize memoryOffset)5836 BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5837     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5838     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5839     std::unique_lock<std::mutex> lock(global_lock);
5840     // Track objects tied to memory
5841     uint64_t buffer_handle = reinterpret_cast<uint64_t &>(buffer);
5842     bool skip_call = SetMemBinding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5843     auto buffer_node = getBufferNode(dev_data, buffer);
5844     if (buffer_node) {
5845         VkMemoryRequirements memRequirements;
5846         dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, &memRequirements);
5847         buffer_node->binding.mem = mem;
5848         buffer_node->binding.offset = memoryOffset;
5849         buffer_node->binding.size = memRequirements.size;
5850 
5851         // Track and validate bound memory range information
5852         auto mem_info = getMemObjInfo(dev_data, mem);
5853         if (mem_info) {
5854             skip_call |= InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, memRequirements);
5855             skip_call |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "BindBufferMemory");
5856         }
5857 
5858         // Validate memory requirements alignment
5859         if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) {
5860             skip_call |=
5861                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
5862                         __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS",
5863                         "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the "
5864                         "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5865                         ", returned from a call to vkGetBufferMemoryRequirements with buffer",
5866                         memoryOffset, memRequirements.alignment);
5867         }
5868 
5869         // Validate device limits alignments
5870         static const VkBufferUsageFlagBits usage_list[3] = {
5871             static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
5872             VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
5873             VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
5874         static const char *memory_type[3] = {"texel",
5875                                              "uniform",
5876                                              "storage"};
5877         static const char *offset_name[3] = {
5878             "minTexelBufferOffsetAlignment",
5879             "minUniformBufferOffsetAlignment",
5880             "minStorageBufferOffsetAlignment"
5881         };
5882 
5883         // Keep this one fresh!
5884         const VkDeviceSize offset_requirement[3] = {
5885             dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
5886             dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
5887             dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment
5888         };
5889         VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
5890 
5891         for (int i = 0; i < 3; i++) {
5892             if (usage & usage_list[i]) {
5893                 if (vk_safe_modulo(memoryOffset, offset_requirement[i]) != 0) {
5894                     skip_call |=
5895                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5896                                 0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS",
5897                                 "vkBindBufferMemory(): %s memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5898                                 "device limit %s 0x%" PRIxLEAST64,
5899                                 memory_type[i], memoryOffset, offset_name[i], offset_requirement[i]);
5900                 }
5901             }
5902         }
5903     }
5904     print_mem_list(dev_data);
5905     lock.unlock();
5906     if (!skip_call) {
5907         result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
5908     }
5909     return result;
5910 }
5911 
5912 VKAPI_ATTR void VKAPI_CALL
GetBufferMemoryRequirements(VkDevice device,VkBuffer buffer,VkMemoryRequirements * pMemoryRequirements)5913 GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5914     layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5915     // TODO : What to track here?
5916     //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5917     my_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5918 }
5919 
5920 VKAPI_ATTR void VKAPI_CALL
GetImageMemoryRequirements(VkDevice device,VkImage image,VkMemoryRequirements * pMemoryRequirements)5921 GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5922     layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5923     // TODO : What to track here?
5924     //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5925     my_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
5926 }
5927 
PreCallValidateDestroyImageView(layer_data * dev_data,VkImageView image_view,IMAGE_VIEW_STATE ** image_view_state,VK_OBJECT * obj_struct)5928 static bool PreCallValidateDestroyImageView(layer_data *dev_data, VkImageView image_view, IMAGE_VIEW_STATE **image_view_state,
5929                                             VK_OBJECT *obj_struct) {
5930     *image_view_state = getImageViewState(dev_data, image_view);
5931     *obj_struct = {reinterpret_cast<uint64_t &>(image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT};
5932     if (dev_data->instance_data->disabled.destroy_image_view)
5933         return false;
5934     bool skip = false;
5935     if (*image_view_state) {
5936         skip |= ValidateObjectNotInUse(dev_data, *image_view_state, *obj_struct, VALIDATION_ERROR_00776);
5937     }
5938     return skip;
5939 }
5940 
PostCallRecordDestroyImageView(layer_data * dev_data,VkImageView image_view,IMAGE_VIEW_STATE * image_view_state,VK_OBJECT obj_struct)5941 static void PostCallRecordDestroyImageView(layer_data *dev_data, VkImageView image_view, IMAGE_VIEW_STATE *image_view_state,
5942                                            VK_OBJECT obj_struct) {
5943     // Any bound cmd buffers are now invalid
5944     invalidateCommandBuffers(image_view_state->cb_bindings, obj_struct);
5945     dev_data->imageViewMap.erase(image_view);
5946 }
5947 
5948 VKAPI_ATTR void VKAPI_CALL
DestroyImageView(VkDevice device,VkImageView imageView,const VkAllocationCallbacks * pAllocator)5949 DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5950     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5951     // Common data objects used pre & post call
5952     IMAGE_VIEW_STATE *image_view_state = nullptr;
5953     VK_OBJECT obj_struct;
5954     std::unique_lock<std::mutex> lock(global_lock);
5955     bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
5956     if (!skip) {
5957         lock.unlock();
5958         dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
5959         lock.lock();
5960         PostCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
5961     }
5962 }
5963 
5964 VKAPI_ATTR void VKAPI_CALL
DestroyShaderModule(VkDevice device,VkShaderModule shaderModule,const VkAllocationCallbacks * pAllocator)5965 DestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
5966     layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5967 
5968     std::unique_lock<std::mutex> lock(global_lock);
5969     my_data->shaderModuleMap.erase(shaderModule);
5970     lock.unlock();
5971 
5972     my_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
5973 }
5974 
PreCallValidateDestroyPipeline(layer_data * dev_data,VkPipeline pipeline,PIPELINE_STATE ** pipeline_state,VK_OBJECT * obj_struct)5975 static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
5976                                            VK_OBJECT *obj_struct) {
5977     *pipeline_state = getPipelineState(dev_data, pipeline);
5978     *obj_struct = {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT};
5979     if (dev_data->instance_data->disabled.destroy_pipeline)
5980         return false;
5981     bool skip = false;
5982     if (*pipeline_state) {
5983         skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, VALIDATION_ERROR_00555);
5984     }
5985     return skip;
5986 }
5987 
PostCallRecordDestroyPipeline(layer_data * dev_data,VkPipeline pipeline,PIPELINE_STATE * pipeline_state,VK_OBJECT obj_struct)5988 static void PostCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
5989                                           VK_OBJECT obj_struct) {
5990     // Any bound cmd buffers are now invalid
5991     invalidateCommandBuffers(pipeline_state->cb_bindings, obj_struct);
5992     dev_data->pipelineMap.erase(pipeline);
5993 }
5994 
5995 VKAPI_ATTR void VKAPI_CALL
DestroyPipeline(VkDevice device,VkPipeline pipeline,const VkAllocationCallbacks * pAllocator)5996 DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
5997     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5998     PIPELINE_STATE *pipeline_state = nullptr;
5999     VK_OBJECT obj_struct;
6000     std::unique_lock<std::mutex> lock(global_lock);
6001     bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
6002     if (!skip) {
6003         lock.unlock();
6004         dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
6005         lock.lock();
6006         PostCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
6007     }
6008 }
6009 
6010 VKAPI_ATTR void VKAPI_CALL
DestroyPipelineLayout(VkDevice device,VkPipelineLayout pipelineLayout,const VkAllocationCallbacks * pAllocator)6011 DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
6012     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6013     std::unique_lock<std::mutex> lock(global_lock);
6014     dev_data->pipelineLayoutMap.erase(pipelineLayout);
6015     lock.unlock();
6016 
6017     dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
6018 }
6019 
PreCallValidateDestroySampler(layer_data * dev_data,VkSampler sampler,SAMPLER_STATE ** sampler_state,VK_OBJECT * obj_struct)6020 static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state,
6021                                           VK_OBJECT *obj_struct) {
6022     *sampler_state = getSamplerState(dev_data, sampler);
6023     *obj_struct = {reinterpret_cast<uint64_t &>(sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT};
6024     if (dev_data->instance_data->disabled.destroy_sampler)
6025         return false;
6026     bool skip = false;
6027     if (*sampler_state) {
6028         skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, VALIDATION_ERROR_00837);
6029     }
6030     return skip;
6031 }
6032 
PostCallRecordDestroySampler(layer_data * dev_data,VkSampler sampler,SAMPLER_STATE * sampler_state,VK_OBJECT obj_struct)6033 static void PostCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state,
6034                                          VK_OBJECT obj_struct) {
6035     // Any bound cmd buffers are now invalid
6036     if (sampler_state)
6037         invalidateCommandBuffers(sampler_state->cb_bindings, obj_struct);
6038     dev_data->samplerMap.erase(sampler);
6039 }
6040 
6041 VKAPI_ATTR void VKAPI_CALL
DestroySampler(VkDevice device,VkSampler sampler,const VkAllocationCallbacks * pAllocator)6042 DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
6043     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6044     SAMPLER_STATE *sampler_state = nullptr;
6045     VK_OBJECT obj_struct;
6046     std::unique_lock<std::mutex> lock(global_lock);
6047     bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct);
6048     if (!skip) {
6049         lock.unlock();
6050         dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
6051         lock.lock();
6052         PostCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct);
6053     }
6054 }
6055 
6056 VKAPI_ATTR void VKAPI_CALL
DestroyDescriptorSetLayout(VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)6057 DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
6058     // TODO : Clean up any internal data structures using this obj.
6059     get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6060         ->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
6061 }
6062 
PreCallValidateDestroyDescriptorPool(layer_data * dev_data,VkDescriptorPool pool,DESCRIPTOR_POOL_STATE ** desc_pool_state,VK_OBJECT * obj_struct)6063 static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
6064                                                  DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) {
6065     *desc_pool_state = getDescriptorPoolState(dev_data, pool);
6066     *obj_struct = {reinterpret_cast<uint64_t &>(pool), VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT};
6067     if (dev_data->instance_data->disabled.destroy_descriptor_pool)
6068         return false;
6069     bool skip = false;
6070     if (*desc_pool_state) {
6071         skip |= ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, VALIDATION_ERROR_00901);
6072     }
6073     return skip;
6074 }
6075 
PostCallRecordDestroyDescriptorPool(layer_data * dev_data,VkDescriptorPool descriptorPool,DESCRIPTOR_POOL_STATE * desc_pool_state,VK_OBJECT obj_struct)6076 static void PostCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
6077                                                 DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) {
6078     // Any bound cmd buffers are now invalid
6079     invalidateCommandBuffers(desc_pool_state->cb_bindings, obj_struct);
6080     // Free sets that were in this pool
6081     for (auto ds : desc_pool_state->sets) {
6082         freeDescriptorSet(dev_data, ds);
6083     }
6084     dev_data->descriptorPoolMap.erase(descriptorPool);
6085 }
6086 
6087 VKAPI_ATTR void VKAPI_CALL
DestroyDescriptorPool(VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)6088 DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
6089     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6090     DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr;
6091     VK_OBJECT obj_struct;
6092     std::unique_lock<std::mutex> lock(global_lock);
6093     bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
6094     if (!skip) {
6095         lock.unlock();
6096         dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
6097         lock.lock();
6098         PostCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
6099     }
6100 }
6101 // Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
6102 //  If this is a secondary command buffer, then make sure its primary is also in-flight
6103 //  If primary is not in-flight, then remove secondary from global in-flight set
6104 // This function is only valid at a point when cmdBuffer is being reset or freed
checkCommandBufferInFlight(layer_data * dev_data,const GLOBAL_CB_NODE * cb_node,const char * action,UNIQUE_VALIDATION_ERROR_CODE error_code)6105 static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
6106                                        UNIQUE_VALIDATION_ERROR_CODE error_code) {
6107     bool skip_call = false;
6108     if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
6109         // Primary CB or secondary where primary is also in-flight is an error
6110         if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
6111             (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
6112             skip_call |=
6113                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6114                         reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, error_code, "DS",
6115                         "Attempt to %s command buffer (0x%" PRIxLEAST64 ") which is in use. %s", action,
6116                         reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), validation_error_map[error_code]);
6117         }
6118     }
6119     return skip_call;
6120 }
6121 
6122 // Iterate over all cmdBuffers in given commandPool and verify that each is not in use
checkCommandBuffersInFlight(layer_data * dev_data,COMMAND_POOL_NODE * pPool,const char * action,UNIQUE_VALIDATION_ERROR_CODE error_code)6123 static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
6124                                         UNIQUE_VALIDATION_ERROR_CODE error_code) {
6125     bool skip_call = false;
6126     for (auto cmd_buffer : pPool->commandBuffers) {
6127         if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
6128             skip_call |= checkCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action, error_code);
6129         }
6130     }
6131     return skip_call;
6132 }
6133 
clearCommandBuffersInFlight(layer_data * dev_data,COMMAND_POOL_NODE * pPool)6134 static void clearCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
6135     for (auto cmd_buffer : pPool->commandBuffers) {
6136         dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
6137     }
6138 }
6139 
6140 VKAPI_ATTR void VKAPI_CALL
FreeCommandBuffers(VkDevice device,VkCommandPool commandPool,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)6141 FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
6142     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6143     bool skip_call = false;
6144     std::unique_lock<std::mutex> lock(global_lock);
6145 
6146     for (uint32_t i = 0; i < commandBufferCount; i++) {
6147         auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
6148         // Delete CB information structure, and remove from commandBufferMap
6149         if (cb_node) {
6150             skip_call |= checkCommandBufferInFlight(dev_data, cb_node, "free", VALIDATION_ERROR_00096);
6151         }
6152     }
6153 
6154     if (skip_call)
6155         return;
6156 
6157     auto pPool = getCommandPoolNode(dev_data, commandPool);
6158     for (uint32_t i = 0; i < commandBufferCount; i++) {
6159         auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
6160         // Delete CB information structure, and remove from commandBufferMap
6161         if (cb_node) {
6162             dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
6163             // reset prior to delete for data clean-up
6164             resetCB(dev_data, cb_node->commandBuffer);
6165             dev_data->commandBufferMap.erase(cb_node->commandBuffer);
6166             delete cb_node;
6167         }
6168 
6169         // Remove commandBuffer reference from commandPoolMap
6170         pPool->commandBuffers.remove(pCommandBuffers[i]);
6171     }
6172     printCBList(dev_data);
6173     lock.unlock();
6174 
6175     dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
6176 }
6177 
CreateCommandPool(VkDevice device,const VkCommandPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkCommandPool * pCommandPool)6178 VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
6179                                                  const VkAllocationCallbacks *pAllocator,
6180                                                  VkCommandPool *pCommandPool) {
6181     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6182 
6183     VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
6184 
6185     if (VK_SUCCESS == result) {
6186         std::lock_guard<std::mutex> lock(global_lock);
6187         dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
6188         dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
6189     }
6190     return result;
6191 }
6192 
CreateQueryPool(VkDevice device,const VkQueryPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkQueryPool * pQueryPool)6193 VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
6194                                                const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
6195 
6196     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6197     VkResult result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
6198     if (result == VK_SUCCESS) {
6199         std::lock_guard<std::mutex> lock(global_lock);
6200         QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
6201         qp_node->createInfo = *pCreateInfo;
6202     }
6203     return result;
6204 }
6205 
PreCallValidateDestroyCommandPool(layer_data * dev_data,VkCommandPool pool,COMMAND_POOL_NODE ** cp_state)6206 static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE **cp_state) {
6207     *cp_state = getCommandPoolNode(dev_data, pool);
6208     if (dev_data->instance_data->disabled.destroy_command_pool)
6209         return false;
6210     bool skip = false;
6211     if (*cp_state) {
6212         // Verify that command buffers in pool are complete (not in-flight)
6213         skip |= checkCommandBuffersInFlight(dev_data, *cp_state, "destroy command pool with", VALIDATION_ERROR_00077);
6214     }
6215     return skip;
6216 }
6217 
PostCallRecordDestroyCommandPool(layer_data * dev_data,VkCommandPool pool,COMMAND_POOL_NODE * cp_state)6218 static void PostCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE *cp_state) {
6219     // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap
6220     clearCommandBuffersInFlight(dev_data, cp_state);
6221     for (auto cb : cp_state->commandBuffers) {
6222         clear_cmd_buf_and_mem_references(dev_data, cb);
6223         auto cb_node = getCBNode(dev_data, cb);
6224         // Remove references to this cb_node prior to delete
6225         // TODO : Need better solution here, resetCB?
6226         for (auto obj : cb_node->object_bindings) {
6227             removeCommandBufferBinding(dev_data, &obj, cb_node);
6228         }
6229         for (auto framebuffer : cb_node->framebuffers) {
6230             auto fb_state = getFramebufferState(dev_data, framebuffer);
6231             if (fb_state)
6232                 fb_state->cb_bindings.erase(cb_node);
6233         }
6234         dev_data->commandBufferMap.erase(cb); // Remove this command buffer
6235         delete cb_node;                       // delete CB info structure
6236     }
6237     dev_data->commandPoolMap.erase(pool);
6238 }
6239 
6240 // Destroy commandPool along with all of the commandBuffers allocated from that pool
DestroyCommandPool(VkDevice device,VkCommandPool commandPool,const VkAllocationCallbacks * pAllocator)6241 VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
6242     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6243     COMMAND_POOL_NODE *cp_state = nullptr;
6244     std::unique_lock<std::mutex> lock(global_lock);
6245     bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool, &cp_state);
6246     if (!skip) {
6247         lock.unlock();
6248         dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
6249         lock.lock();
6250         PostCallRecordDestroyCommandPool(dev_data, commandPool, cp_state);
6251     }
6252 }
6253 
6254 VKAPI_ATTR VkResult VKAPI_CALL
ResetCommandPool(VkDevice device,VkCommandPool commandPool,VkCommandPoolResetFlags flags)6255 ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
6256     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6257     bool skip_call = false;
6258 
6259     std::unique_lock<std::mutex> lock(global_lock);
6260     auto pPool = getCommandPoolNode(dev_data, commandPool);
6261     skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with", VALIDATION_ERROR_00072);
6262     lock.unlock();
6263 
6264     if (skip_call)
6265         return VK_ERROR_VALIDATION_FAILED_EXT;
6266 
6267     VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
6268 
6269     // Reset all of the CBs allocated from this pool
6270     if (VK_SUCCESS == result) {
6271         lock.lock();
6272         clearCommandBuffersInFlight(dev_data, pPool);
6273         for (auto cmdBuffer : pPool->commandBuffers) {
6274             resetCB(dev_data, cmdBuffer);
6275         }
6276         lock.unlock();
6277     }
6278     return result;
6279 }
6280 
ResetFences(VkDevice device,uint32_t fenceCount,const VkFence * pFences)6281 VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
6282     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6283     bool skip_call = false;
6284     std::unique_lock<std::mutex> lock(global_lock);
6285     for (uint32_t i = 0; i < fenceCount; ++i) {
6286         auto pFence = getFenceNode(dev_data, pFences[i]);
6287         if (pFence && pFence->state == FENCE_INFLIGHT) {
6288             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6289                                  reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
6290                                  "Fence 0x%" PRIx64 " is in use.", reinterpret_cast<const uint64_t &>(pFences[i]));
6291         }
6292     }
6293     lock.unlock();
6294 
6295     if (skip_call)
6296         return VK_ERROR_VALIDATION_FAILED_EXT;
6297 
6298     VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
6299 
6300     if (result == VK_SUCCESS) {
6301         lock.lock();
6302         for (uint32_t i = 0; i < fenceCount; ++i) {
6303             auto pFence = getFenceNode(dev_data, pFences[i]);
6304             if (pFence) {
6305                 pFence->state = FENCE_UNSIGNALED;
6306             }
6307         }
6308         lock.unlock();
6309     }
6310 
6311     return result;
6312 }
6313 
6314 // For given cb_nodes, invalidate them and track object causing invalidation
invalidateCommandBuffers(std::unordered_set<GLOBAL_CB_NODE * > cb_nodes,VK_OBJECT obj)6315 void invalidateCommandBuffers(std::unordered_set<GLOBAL_CB_NODE *> cb_nodes, VK_OBJECT obj) {
6316     for (auto cb_node : cb_nodes) {
6317         cb_node->state = CB_INVALID;
6318         cb_node->broken_bindings.push_back(obj);
6319     }
6320 }
6321 
PreCallValidateDestroyFramebuffer(layer_data * dev_data,VkFramebuffer framebuffer,FRAMEBUFFER_STATE ** framebuffer_state,VK_OBJECT * obj_struct)6322 static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
6323                                               FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
6324     *framebuffer_state = getFramebufferState(dev_data, framebuffer);
6325     *obj_struct = {reinterpret_cast<uint64_t &>(framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT};
6326     if (dev_data->instance_data->disabled.destroy_framebuffer)
6327         return false;
6328     bool skip = false;
6329     if (*framebuffer_state) {
6330         skip |= ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, VALIDATION_ERROR_00422);
6331     }
6332     return skip;
6333 }
6334 
PostCallRecordDestroyFramebuffer(layer_data * dev_data,VkFramebuffer framebuffer,FRAMEBUFFER_STATE * framebuffer_state,VK_OBJECT obj_struct)6335 static void PostCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
6336                                              VK_OBJECT obj_struct) {
6337     invalidateCommandBuffers(framebuffer_state->cb_bindings, obj_struct);
6338     dev_data->frameBufferMap.erase(framebuffer);
6339 }
6340 
6341 VKAPI_ATTR void VKAPI_CALL
DestroyFramebuffer(VkDevice device,VkFramebuffer framebuffer,const VkAllocationCallbacks * pAllocator)6342 DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
6343     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6344     FRAMEBUFFER_STATE *framebuffer_state = nullptr;
6345     VK_OBJECT obj_struct;
6346     std::unique_lock<std::mutex> lock(global_lock);
6347     bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
6348     if (!skip) {
6349         lock.unlock();
6350         dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
6351         lock.lock();
6352         PostCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
6353     }
6354 }
6355 
PreCallValidateDestroyRenderPass(layer_data * dev_data,VkRenderPass render_pass,RENDER_PASS_STATE ** rp_state,VK_OBJECT * obj_struct)6356 static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state,
6357                                              VK_OBJECT *obj_struct) {
6358     *rp_state = getRenderPassState(dev_data, render_pass);
6359     *obj_struct = {reinterpret_cast<uint64_t &>(render_pass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT};
6360     if (dev_data->instance_data->disabled.destroy_renderpass)
6361         return false;
6362     bool skip = false;
6363     if (*rp_state) {
6364         skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, VALIDATION_ERROR_00393);
6365     }
6366     return skip;
6367 }
6368 
PostCallRecordDestroyRenderPass(layer_data * dev_data,VkRenderPass render_pass,RENDER_PASS_STATE * rp_state,VK_OBJECT obj_struct)6369 static void PostCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state,
6370                                             VK_OBJECT obj_struct) {
6371     invalidateCommandBuffers(rp_state->cb_bindings, obj_struct);
6372     dev_data->renderPassMap.erase(render_pass);
6373 }
6374 
6375 VKAPI_ATTR void VKAPI_CALL
DestroyRenderPass(VkDevice device,VkRenderPass renderPass,const VkAllocationCallbacks * pAllocator)6376 DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6377     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6378     RENDER_PASS_STATE *rp_state = nullptr;
6379     VK_OBJECT obj_struct;
6380     std::unique_lock<std::mutex> lock(global_lock);
6381     bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct);
6382     if (!skip) {
6383         lock.unlock();
6384         dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
6385         lock.lock();
6386         PostCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct);
6387     }
6388 }
6389 
CreateBuffer(VkDevice device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)6390 VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6391                                             const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6392     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6393     // TODO: Add check for VALIDATION_ERROR_00658
6394     // TODO: Add check for VALIDATION_ERROR_00666
6395     // TODO: Add check for VALIDATION_ERROR_00667
6396     // TODO: Add check for VALIDATION_ERROR_00668
6397     // TODO: Add check for VALIDATION_ERROR_00669
6398     VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6399 
6400     if (VK_SUCCESS == result) {
6401         std::lock_guard<std::mutex> lock(global_lock);
6402         // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6403         dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_NODE>(new BUFFER_NODE(*pBuffer, pCreateInfo))));
6404     }
6405     return result;
6406 }
6407 
PreCallValidateCreateBufferView(layer_data * dev_data,const VkBufferViewCreateInfo * pCreateInfo)6408 static bool PreCallValidateCreateBufferView(layer_data *dev_data, const VkBufferViewCreateInfo *pCreateInfo) {
6409     bool skip_call = false;
6410     BUFFER_NODE *buf_node = getBufferNode(dev_data, pCreateInfo->buffer);
6411     // If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time
6412     if (buf_node) {
6413         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buf_node, "vkCreateBufferView()");
6414         // In order to create a valid buffer view, the buffer must have been created with at least one of the
6415         // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6416         skip_call |= ValidateBufferUsageFlags(dev_data, buf_node,
6417                                               VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT,
6418                                               false, "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6419     }
6420     return skip_call;
6421 }
6422 
CreateBufferView(VkDevice device,const VkBufferViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBufferView * pView)6423 VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6424                                                 const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6425     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6426     std::unique_lock<std::mutex> lock(global_lock);
6427     bool skip_call = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
6428     lock.unlock();
6429     if (skip_call)
6430         return VK_ERROR_VALIDATION_FAILED_EXT;
6431     VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
6432     if (VK_SUCCESS == result) {
6433         lock.lock();
6434         dev_data->bufferViewMap[*pView] = unique_ptr<BUFFER_VIEW_STATE>(new BUFFER_VIEW_STATE(*pView, pCreateInfo));
6435         lock.unlock();
6436     }
6437     return result;
6438 }
6439 
CreateImage(VkDevice device,const VkImageCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImage * pImage)6440 VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6441                                            const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6442     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6443 
6444     VkResult result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
6445 
6446     if (VK_SUCCESS == result) {
6447         std::lock_guard<std::mutex> lock(global_lock);
6448         IMAGE_LAYOUT_NODE image_state;
6449         image_state.layout = pCreateInfo->initialLayout;
6450         image_state.format = pCreateInfo->format;
6451         dev_data->imageMap.insert(std::make_pair(*pImage, unique_ptr<IMAGE_STATE>(new IMAGE_STATE(*pImage, pCreateInfo))));
6452         ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
6453         dev_data->imageSubresourceMap[*pImage].push_back(subpair);
6454         dev_data->imageLayoutMap[subpair] = image_state;
6455     }
6456     return result;
6457 }
6458 
ResolveRemainingLevelsLayers(layer_data * dev_data,VkImageSubresourceRange * range,VkImage image)6459 static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
6460     /* expects global_lock to be held by caller */
6461 
6462     auto image_state = getImageState(dev_data, image);
6463     if (image_state) {
6464         /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
6465          * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
6466          * the actual values.
6467          */
6468         if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
6469             range->levelCount = image_state->createInfo.mipLevels - range->baseMipLevel;
6470         }
6471 
6472         if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6473             range->layerCount = image_state->createInfo.arrayLayers - range->baseArrayLayer;
6474         }
6475     }
6476 }
6477 
6478 // Return the correct layer/level counts if the caller used the special
6479 // values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
ResolveRemainingLevelsLayers(layer_data * dev_data,uint32_t * levels,uint32_t * layers,VkImageSubresourceRange range,VkImage image)6480 static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6481                                          VkImage image) {
6482     /* expects global_lock to be held by caller */
6483 
6484     *levels = range.levelCount;
6485     *layers = range.layerCount;
6486     auto image_state = getImageState(dev_data, image);
6487     if (image_state) {
6488         if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6489             *levels = image_state->createInfo.mipLevels - range.baseMipLevel;
6490         }
6491         if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6492             *layers = image_state->createInfo.arrayLayers - range.baseArrayLayer;
6493         }
6494     }
6495 }
6496 
PreCallValidateCreateImageView(layer_data * dev_data,const VkImageViewCreateInfo * pCreateInfo)6497 static bool PreCallValidateCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *pCreateInfo) {
6498     bool skip_call = false;
6499     IMAGE_STATE *image_state = getImageState(dev_data, pCreateInfo->image);
6500     if (image_state) {
6501         skip_call |= ValidateImageUsageFlags(
6502             dev_data, image_state, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
6503                                        VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6504             false, "vkCreateImageView()",
6505             "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT|DEPTH_STENCIL_ATTACHMENT|INPUT_ATTACHMENT]_BIT");
6506         // If this isn't a sparse image, it needs to have memory backing it at CreateImageView time
6507         skip_call |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCreateImageView()");
6508     }
6509     return skip_call;
6510 }
6511 
PostCallRecordCreateImageView(layer_data * dev_data,const VkImageViewCreateInfo * pCreateInfo,VkImageView view)6512 static inline void PostCallRecordCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *pCreateInfo, VkImageView view) {
6513     dev_data->imageViewMap[view] = unique_ptr<IMAGE_VIEW_STATE>(new IMAGE_VIEW_STATE(view, pCreateInfo));
6514     ResolveRemainingLevelsLayers(dev_data, &dev_data->imageViewMap[view].get()->create_info.subresourceRange, pCreateInfo->image);
6515 }
6516 
CreateImageView(VkDevice device,const VkImageViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImageView * pView)6517 VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6518                                                const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6519     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6520     std::unique_lock<std::mutex> lock(global_lock);
6521     bool skip_call = PreCallValidateCreateImageView(dev_data, pCreateInfo);
6522     lock.unlock();
6523     if (skip_call)
6524         return VK_ERROR_VALIDATION_FAILED_EXT;
6525     VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
6526     if (VK_SUCCESS == result) {
6527         lock.lock();
6528         PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
6529         lock.unlock();
6530     }
6531 
6532     return result;
6533 }
6534 
6535 VKAPI_ATTR VkResult VKAPI_CALL
CreateFence(VkDevice device,const VkFenceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFence * pFence)6536 CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6537     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6538     VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
6539     if (VK_SUCCESS == result) {
6540         std::lock_guard<std::mutex> lock(global_lock);
6541         auto &fence_node = dev_data->fenceMap[*pFence];
6542         fence_node.fence = *pFence;
6543         fence_node.createInfo = *pCreateInfo;
6544         fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
6545     }
6546     return result;
6547 }
6548 
6549 // TODO handle pipeline caches
CreatePipelineCache(VkDevice device,const VkPipelineCacheCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineCache * pPipelineCache)6550 VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6551                                                    const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6552     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6553     VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6554     return result;
6555 }
6556 
6557 VKAPI_ATTR void VKAPI_CALL
DestroyPipelineCache(VkDevice device,VkPipelineCache pipelineCache,const VkAllocationCallbacks * pAllocator)6558 DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6559     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6560     dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
6561 }
6562 
6563 VKAPI_ATTR VkResult VKAPI_CALL
GetPipelineCacheData(VkDevice device,VkPipelineCache pipelineCache,size_t * pDataSize,void * pData)6564 GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6565     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6566     VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6567     return result;
6568 }
6569 
6570 VKAPI_ATTR VkResult VKAPI_CALL
MergePipelineCaches(VkDevice device,VkPipelineCache dstCache,uint32_t srcCacheCount,const VkPipelineCache * pSrcCaches)6571 MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6572     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6573     VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6574     return result;
6575 }
6576 
6577 // utility function to set collective state for pipeline
set_pipeline_state(PIPELINE_STATE * pPipe)6578 void set_pipeline_state(PIPELINE_STATE *pPipe) {
6579     // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6580     if (pPipe->graphicsPipelineCI.pColorBlendState) {
6581         for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6582             if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6583                 if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6584                      (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6585                     ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6586                      (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6587                     ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6588                      (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6589                     ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6590                      (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6591                     pPipe->blendConstantsEnabled = true;
6592                 }
6593             }
6594         }
6595     }
6596 }
6597 
6598 VKAPI_ATTR VkResult VKAPI_CALL
CreateGraphicsPipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t count,const VkGraphicsPipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)6599 CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6600                         const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6601                         VkPipeline *pPipelines) {
6602     VkResult result = VK_SUCCESS;
6603     // TODO What to do with pipelineCache?
6604     // The order of operations here is a little convoluted but gets the job done
6605     //  1. Pipeline create state is first shadowed into PIPELINE_STATE struct
6606     //  2. Create state is then validated (which uses flags setup during shadowing)
6607     //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6608     bool skip_call = false;
6609     // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6610     vector<PIPELINE_STATE *> pPipeState(count);
6611     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6612 
6613     uint32_t i = 0;
6614     std::unique_lock<std::mutex> lock(global_lock);
6615 
6616     for (i = 0; i < count; i++) {
6617         pPipeState[i] = new PIPELINE_STATE;
6618         pPipeState[i]->initGraphicsPipeline(&pCreateInfos[i]);
6619         pPipeState[i]->render_pass_ci.initialize(getRenderPassState(dev_data, pCreateInfos[i].renderPass)->createInfo.ptr());
6620         pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6621 
6622         skip_call |= verifyPipelineCreateState(dev_data, device, pPipeState, i);
6623     }
6624 
6625     if (!skip_call) {
6626         lock.unlock();
6627         result =
6628             dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6629         lock.lock();
6630         for (i = 0; i < count; i++) {
6631             pPipeState[i]->pipeline = pPipelines[i];
6632             dev_data->pipelineMap[pPipeState[i]->pipeline] = pPipeState[i];
6633         }
6634         lock.unlock();
6635     } else {
6636         for (i = 0; i < count; i++) {
6637             delete pPipeState[i];
6638         }
6639         lock.unlock();
6640         return VK_ERROR_VALIDATION_FAILED_EXT;
6641     }
6642     return result;
6643 }
6644 
6645 VKAPI_ATTR VkResult VKAPI_CALL
CreateComputePipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t count,const VkComputePipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)6646 CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6647                        const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6648                        VkPipeline *pPipelines) {
6649     VkResult result = VK_SUCCESS;
6650     bool skip_call = false;
6651 
6652     // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6653     vector<PIPELINE_STATE *> pPipeState(count);
6654     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6655 
6656     uint32_t i = 0;
6657     std::unique_lock<std::mutex> lock(global_lock);
6658     for (i = 0; i < count; i++) {
6659         // TODO: Verify compute stage bits
6660 
6661         // Create and initialize internal tracking data structure
6662         pPipeState[i] = new PIPELINE_STATE;
6663         pPipeState[i]->initComputePipeline(&pCreateInfos[i]);
6664         pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6665         // memcpy(&pPipeState[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
6666 
6667         // TODO: Add Compute Pipeline Verification
6668         skip_call |= !validate_compute_pipeline(dev_data->report_data, pPipeState[i], &dev_data->enabled_features,
6669                                                 dev_data->shaderModuleMap);
6670         // skip_call |= verifyPipelineCreateState(dev_data, device, pPipeState[i]);
6671     }
6672 
6673     if (!skip_call) {
6674         lock.unlock();
6675         result =
6676             dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6677         lock.lock();
6678         for (i = 0; i < count; i++) {
6679             pPipeState[i]->pipeline = pPipelines[i];
6680             dev_data->pipelineMap[pPipeState[i]->pipeline] = pPipeState[i];
6681         }
6682         lock.unlock();
6683     } else {
6684         for (i = 0; i < count; i++) {
6685             // Clean up any locally allocated data structures
6686             delete pPipeState[i];
6687         }
6688         lock.unlock();
6689         return VK_ERROR_VALIDATION_FAILED_EXT;
6690     }
6691     return result;
6692 }
6693 
CreateSampler(VkDevice device,const VkSamplerCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSampler * pSampler)6694 VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6695                                              const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6696     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6697     VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6698     if (VK_SUCCESS == result) {
6699         std::lock_guard<std::mutex> lock(global_lock);
6700         dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
6701     }
6702     return result;
6703 }
6704 
PreCallValidateCreateDescriptorSetLayout(layer_data * dev_data,const VkDescriptorSetLayoutCreateInfo * create_info)6705 static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) {
6706     if (dev_data->instance_data->disabled.create_descriptor_set_layout)
6707         return false;
6708     return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(dev_data->report_data, create_info);
6709 }
6710 
PostCallRecordCreateDescriptorSetLayout(layer_data * dev_data,const VkDescriptorSetLayoutCreateInfo * create_info,VkDescriptorSetLayout set_layout)6711 static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info,
6712                                                     VkDescriptorSetLayout set_layout) {
6713     // TODO: Convert this to unique_ptr to avoid leaks
6714     dev_data->descriptorSetLayoutMap[set_layout] = new cvdescriptorset::DescriptorSetLayout(create_info, set_layout);
6715 }
6716 
6717 VKAPI_ATTR VkResult VKAPI_CALL
CreateDescriptorSetLayout(VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorSetLayout * pSetLayout)6718 CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6719                           const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6720     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6721     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6722     std::unique_lock<std::mutex> lock(global_lock);
6723     bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo);
6724     if (!skip) {
6725         lock.unlock();
6726         result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6727         if (VK_SUCCESS == result) {
6728             lock.lock();
6729             PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout);
6730         }
6731     }
6732     return result;
6733 }
6734 
6735 // Used by CreatePipelineLayout and CmdPushConstants.
6736 // Note that the index argument is optional and only used by CreatePipelineLayout.
validatePushConstantRange(const layer_data * dev_data,const uint32_t offset,const uint32_t size,const char * caller_name,uint32_t index=0)6737 static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6738                                       const char *caller_name, uint32_t index = 0) {
6739     if (dev_data->instance_data->disabled.push_constant_range)
6740         return false;
6741     uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
6742     bool skip_call = false;
6743     // Check that offset + size don't exceed the max.
6744     // Prevent arithetic overflow here by avoiding addition and testing in this order.
6745     // TODO : This check combines VALIDATION_ERROR_00877 & 880, need to break out separately
6746     if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
6747         // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
6748         if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6749             skip_call |=
6750                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6751                         VALIDATION_ERROR_00877, "DS", "%s call has push constants index %u with offset %u and size %u that "
6752                                                       "exceeds this device's maxPushConstantSize of %u. %s",
6753                         caller_name, index, offset, size, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00877]);
6754         } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6755             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6756                                  DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
6757                                                                        "exceeds this device's maxPushConstantSize of %u.",
6758                                  caller_name, offset, size, maxPushConstantsSize);
6759         } else {
6760             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6761                                  DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6762         }
6763     }
6764     // size needs to be non-zero and a multiple of 4.
6765     // TODO : This check combines VALIDATION_ERROR_00878 & 879, need to break out separately
6766     if ((size == 0) || ((size & 0x3) != 0)) {
6767         if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6768             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6769                                  VALIDATION_ERROR_00878, "DS", "%s call has push constants index %u with "
6770                                                                "size %u. Size must be greater than zero and a multiple of 4. %s",
6771                                  caller_name, index, size, validation_error_map[VALIDATION_ERROR_00878]);
6772         } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6773             skip_call |=
6774                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6775                         DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
6776                                                               "size %u. Size must be greater than zero and a multiple of 4.",
6777                         caller_name, size);
6778         } else {
6779             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6780                                  DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6781         }
6782     }
6783     // offset needs to be a multiple of 4.
6784     if ((offset & 0x3) != 0) {
6785         if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6786             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6787                                  DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
6788                                                                        "offset %u. Offset must be a multiple of 4.",
6789                                  caller_name, index, offset);
6790         } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6791             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6792                                  DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
6793                                                                        "offset %u. Offset must be a multiple of 4.",
6794                                  caller_name, offset);
6795         } else {
6796             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6797                                  DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6798         }
6799     }
6800     return skip_call;
6801 }
6802 
CreatePipelineLayout(VkDevice device,const VkPipelineLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineLayout * pPipelineLayout)6803 VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6804                                                     const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6805     bool skip_call = false;
6806     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6807     // TODO : Add checks for VALIDATION_ERRORS 865-871
6808     // Push Constant Range checks
6809     uint32_t i, j;
6810     for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6811         skip_call |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6812                                                pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
6813         if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
6814             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6815                                  DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has no stageFlags set.");
6816         }
6817     }
6818     if (skip_call)
6819         return VK_ERROR_VALIDATION_FAILED_EXT;
6820 
6821     // Each range has been validated.  Now check for overlap between ranges (if they are good).
6822     // There's no explicit Valid Usage language against this, so issue a warning instead of an error.
6823     for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6824         for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
6825             const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset;
6826             const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size;
6827             const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset;
6828             const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size;
6829             if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) {
6830                 skip_call |=
6831                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6832                             DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constants with "
6833                                                                   "overlapping ranges: %u:[%u, %u), %u:[%u, %u)",
6834                             i, minA, maxA, j, minB, maxB);
6835             }
6836         }
6837     }
6838 
6839     VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6840     if (VK_SUCCESS == result) {
6841         std::lock_guard<std::mutex> lock(global_lock);
6842         PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6843         plNode.layout = *pPipelineLayout;
6844         plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
6845         for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6846             plNode.set_layouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
6847         }
6848         plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount);
6849         for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6850             plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i];
6851         }
6852     }
6853     return result;
6854 }
6855 
6856 VKAPI_ATTR VkResult VKAPI_CALL
CreateDescriptorPool(VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)6857 CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
6858                      VkDescriptorPool *pDescriptorPool) {
6859     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6860     VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6861     if (VK_SUCCESS == result) {
6862         if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6863                     (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64,
6864                     (uint64_t)*pDescriptorPool))
6865             return VK_ERROR_VALIDATION_FAILED_EXT;
6866         DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
6867         if (NULL == pNewNode) {
6868             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6869                         (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6870                         "Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()"))
6871                 return VK_ERROR_VALIDATION_FAILED_EXT;
6872         } else {
6873             std::lock_guard<std::mutex> lock(global_lock);
6874             dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6875         }
6876     } else {
6877         // Need to do anything if pool create fails?
6878     }
6879     return result;
6880 }
6881 
6882 VKAPI_ATTR VkResult VKAPI_CALL
ResetDescriptorPool(VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)6883 ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
6884     // TODO : Add checks for VALIDATION_ERROR_00928
6885     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6886     VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
6887     if (VK_SUCCESS == result) {
6888         std::lock_guard<std::mutex> lock(global_lock);
6889         clearDescriptorPool(dev_data, device, descriptorPool, flags);
6890     }
6891     return result;
6892 }
6893 // Ensure the pool contains enough descriptors and descriptor sets to satisfy
6894 // an allocation request. Fills common_data with the total number of descriptors of each type required,
6895 // as well as DescriptorSetLayout ptrs used for later update.
PreCallValidateAllocateDescriptorSets(layer_data * dev_data,const VkDescriptorSetAllocateInfo * pAllocateInfo,cvdescriptorset::AllocateDescriptorSetsData * common_data)6896 static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6897                                                   cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6898     if (dev_data->instance_data->disabled.allocate_descriptor_sets)
6899         return false;
6900     // All state checks for AllocateDescriptorSets is done in single function
6901     return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data->report_data, pAllocateInfo, dev_data, common_data);
6902 }
6903 // Allocation state was good and call down chain was made so update state based on allocating descriptor sets
PostCallRecordAllocateDescriptorSets(layer_data * dev_data,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets,const cvdescriptorset::AllocateDescriptorSetsData * common_data)6904 static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6905                                                  VkDescriptorSet *pDescriptorSets,
6906                                                  const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6907     // All the updates are contained in a single cvdescriptorset function
6908     cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
6909                                                    &dev_data->setMap, dev_data);
6910 }
6911 
6912 VKAPI_ATTR VkResult VKAPI_CALL
AllocateDescriptorSets(VkDevice device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets)6913 AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6914     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6915     std::unique_lock<std::mutex> lock(global_lock);
6916     cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
6917     bool skip_call = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
6918     lock.unlock();
6919 
6920     if (skip_call)
6921         return VK_ERROR_VALIDATION_FAILED_EXT;
6922 
6923     VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6924 
6925     if (VK_SUCCESS == result) {
6926         lock.lock();
6927         PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
6928         lock.unlock();
6929     }
6930     return result;
6931 }
6932 // Verify state before freeing DescriptorSets
PreCallValidateFreeDescriptorSets(const layer_data * dev_data,VkDescriptorPool pool,uint32_t count,const VkDescriptorSet * descriptor_sets)6933 static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6934                                               const VkDescriptorSet *descriptor_sets) {
6935     if (dev_data->instance_data->disabled.free_descriptor_sets)
6936         return false;
6937     bool skip_call = false;
6938     // First make sure sets being destroyed are not currently in-use
6939     for (uint32_t i = 0; i < count; ++i)
6940         skip_call |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
6941 
6942     DESCRIPTOR_POOL_STATE *pool_state = getDescriptorPoolState(dev_data, pool);
6943     if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
6944         // Can't Free from a NON_FREE pool
6945         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6946                              reinterpret_cast<uint64_t &>(pool), __LINE__, VALIDATION_ERROR_00922, "DS",
6947                              "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6948                              "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT. %s",
6949                              validation_error_map[VALIDATION_ERROR_00922]);
6950     }
6951     return skip_call;
6952 }
6953 // Sets have been removed from the pool so update underlying state
PostCallRecordFreeDescriptorSets(layer_data * dev_data,VkDescriptorPool pool,uint32_t count,const VkDescriptorSet * descriptor_sets)6954 static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6955                                              const VkDescriptorSet *descriptor_sets) {
6956     DESCRIPTOR_POOL_STATE *pool_state = getDescriptorPoolState(dev_data, pool);
6957     // Update available descriptor sets in pool
6958     pool_state->availableSets += count;
6959 
6960     // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
6961     for (uint32_t i = 0; i < count; ++i) {
6962         auto set_state = dev_data->setMap[descriptor_sets[i]];
6963         uint32_t type_index = 0, descriptor_count = 0;
6964         for (uint32_t j = 0; j < set_state->GetBindingCount(); ++j) {
6965             type_index = static_cast<uint32_t>(set_state->GetTypeFromIndex(j));
6966             descriptor_count = set_state->GetDescriptorCountFromIndex(j);
6967             pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
6968         }
6969         freeDescriptorSet(dev_data, set_state);
6970         pool_state->sets.erase(set_state);
6971     }
6972 }
6973 
6974 VKAPI_ATTR VkResult VKAPI_CALL
FreeDescriptorSets(VkDevice device,VkDescriptorPool descriptorPool,uint32_t count,const VkDescriptorSet * pDescriptorSets)6975 FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6976     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6977     // Make sure that no sets being destroyed are in-flight
6978     std::unique_lock<std::mutex> lock(global_lock);
6979     bool skip_call = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6980     lock.unlock();
6981 
6982     if (skip_call)
6983         return VK_ERROR_VALIDATION_FAILED_EXT;
6984     VkResult result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6985     if (VK_SUCCESS == result) {
6986         lock.lock();
6987         PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6988         lock.unlock();
6989     }
6990     return result;
6991 }
6992 // TODO : This is a Proof-of-concept for core validation architecture
6993 //  Really we'll want to break out these functions to separate files but
6994 //  keeping it all together here to prove out design
6995 // PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
PreCallValidateUpdateDescriptorSets(layer_data * dev_data,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)6996 static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6997                                                 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6998                                                 const VkCopyDescriptorSet *pDescriptorCopies) {
6999     if (dev_data->instance_data->disabled.update_descriptor_sets)
7000         return false;
7001     // First thing to do is perform map look-ups.
7002     // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
7003     //  so we can't just do a single map look-up up-front, but do them individually in functions below
7004 
7005     // Now make call(s) that validate state, but don't perform state updates in this function
7006     // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
7007     //  namespace which will parse params and make calls into specific class instances
7008     return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
7009                                                          descriptorCopyCount, pDescriptorCopies);
7010 }
7011 // PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
PostCallRecordUpdateDescriptorSets(layer_data * dev_data,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)7012 static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
7013                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
7014                                                const VkCopyDescriptorSet *pDescriptorCopies) {
7015     cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7016                                                  pDescriptorCopies);
7017 }
7018 
7019 VKAPI_ATTR void VKAPI_CALL
UpdateDescriptorSets(VkDevice device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)7020 UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
7021                      uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
7022     // Only map look-up at top level is for device-level layer_data
7023     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7024     std::unique_lock<std::mutex> lock(global_lock);
7025     bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7026                                                          pDescriptorCopies);
7027     lock.unlock();
7028     if (!skip_call) {
7029         dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7030                                                       pDescriptorCopies);
7031         lock.lock();
7032         // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
7033         PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7034                                            pDescriptorCopies);
7035     }
7036 }
7037 
7038 VKAPI_ATTR VkResult VKAPI_CALL
AllocateCommandBuffers(VkDevice device,const VkCommandBufferAllocateInfo * pCreateInfo,VkCommandBuffer * pCommandBuffer)7039 AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
7040     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7041     VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
7042     if (VK_SUCCESS == result) {
7043         std::unique_lock<std::mutex> lock(global_lock);
7044         auto pPool = getCommandPoolNode(dev_data, pCreateInfo->commandPool);
7045 
7046         if (pPool) {
7047             for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
7048                 // Add command buffer to its commandPool map
7049                 pPool->commandBuffers.push_back(pCommandBuffer[i]);
7050                 GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
7051                 // Add command buffer to map
7052                 dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
7053                 resetCB(dev_data, pCommandBuffer[i]);
7054                 pCB->createInfo = *pCreateInfo;
7055                 pCB->device = device;
7056             }
7057         }
7058         printCBList(dev_data);
7059         lock.unlock();
7060     }
7061     return result;
7062 }
7063 
7064 // Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
AddFramebufferBinding(layer_data * dev_data,GLOBAL_CB_NODE * cb_state,FRAMEBUFFER_STATE * fb_state)7065 static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
7066     addCommandBufferBinding(&fb_state->cb_bindings,
7067                             {reinterpret_cast<uint64_t &>(fb_state->framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT},
7068                             cb_state);
7069     for (auto attachment : fb_state->attachments) {
7070         auto view_state = attachment.view_state;
7071         if (view_state) {
7072             AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
7073         }
7074         auto rp_state = getRenderPassState(dev_data, fb_state->createInfo.renderPass);
7075         if (rp_state) {
7076             addCommandBufferBinding(
7077                 &rp_state->cb_bindings,
7078                 {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state);
7079         }
7080     }
7081 }
7082 
7083 VKAPI_ATTR VkResult VKAPI_CALL
BeginCommandBuffer(VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)7084 BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
7085     bool skip_call = false;
7086     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7087     std::unique_lock<std::mutex> lock(global_lock);
7088     // Validate command buffer level
7089     GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
7090     if (cb_node) {
7091         // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
7092         if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
7093             skip_call |=
7094                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7095                         (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
7096                         "Calling vkBeginCommandBuffer() on active command buffer 0x%p before it has completed. "
7097                         "You must check command buffer fence before this call.",
7098                         commandBuffer);
7099         }
7100         clear_cmd_buf_and_mem_references(dev_data, cb_node);
7101         if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
7102             // Secondary Command Buffer
7103             const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
7104             if (!pInfo) {
7105                 skip_call |=
7106                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7107                             reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7108                             "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info.",
7109                             reinterpret_cast<void *>(commandBuffer));
7110             } else {
7111                 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
7112                     if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB
7113                         skip_call |= log_msg(
7114                             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7115                             reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7116                             "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must specify a valid renderpass parameter.",
7117                             reinterpret_cast<void *>(commandBuffer));
7118                     }
7119                     if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf
7120                         skip_call |= log_msg(
7121                             dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7122                             reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7123                             "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) may perform better if a "
7124                             "valid framebuffer parameter is specified.",
7125                             reinterpret_cast<void *>(commandBuffer));
7126                     } else {
7127                         string errorString = "";
7128                         auto framebuffer = getFramebufferState(dev_data, pInfo->framebuffer);
7129                         if (framebuffer) {
7130                             if ((framebuffer->createInfo.renderPass != pInfo->renderPass) &&
7131                                 !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(),
7132                                                                  getRenderPassState(dev_data, pInfo->renderPass)->createInfo.ptr(),
7133                                                                  errorString)) {
7134                                 // renderPass that framebuffer was created with must be compatible with local renderPass
7135                                 skip_call |= log_msg(
7136                                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7137                                     VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
7138                                     __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
7139                                     "vkBeginCommandBuffer(): Secondary Command "
7140                                     "Buffer (0x%p) renderPass (0x%" PRIxLEAST64 ") is incompatible w/ framebuffer "
7141                                     "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
7142                                     reinterpret_cast<void *>(commandBuffer), reinterpret_cast<const uint64_t &>(pInfo->renderPass),
7143                                     reinterpret_cast<const uint64_t &>(pInfo->framebuffer),
7144                                     reinterpret_cast<uint64_t &>(framebuffer->createInfo.renderPass), errorString.c_str());
7145                             }
7146                             // Connect this framebuffer and its children to this cmdBuffer
7147                             AddFramebufferBinding(dev_data, cb_node, framebuffer);
7148                         }
7149                     }
7150                 }
7151                 if ((pInfo->occlusionQueryEnable == VK_FALSE ||
7152                      dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
7153                     (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
7154                     skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7155                                          VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
7156                                          __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7157                                          "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
7158                                          "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
7159                                          "support precise occlusion queries.",
7160                                          reinterpret_cast<void *>(commandBuffer));
7161                 }
7162             }
7163             if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
7164                 auto renderPass = getRenderPassState(dev_data, pInfo->renderPass);
7165                 if (renderPass) {
7166                     if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
7167                         skip_call |= log_msg(
7168                             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7169                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7170                             "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must has a subpass index (%d) "
7171                             "that is less than the number of subpasses (%d).",
7172                             (void *)commandBuffer, pInfo->subpass, renderPass->createInfo.subpassCount);
7173                     }
7174                 }
7175             }
7176         }
7177         if (CB_RECORDING == cb_node->state) {
7178             skip_call |=
7179                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7180                         (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7181                         "vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%" PRIxLEAST64
7182                         ") in the RECORDING state. Must first call vkEndCommandBuffer().",
7183                         (uint64_t)commandBuffer);
7184         } else if (CB_RECORDED == cb_node->state || (CB_INVALID == cb_node->state && CMD_END == cb_node->cmds.back().type)) {
7185             VkCommandPool cmdPool = cb_node->createInfo.commandPool;
7186             auto pPool = getCommandPoolNode(dev_data, cmdPool);
7187             if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
7188                 skip_call |=
7189                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7190                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7191                             "Call to vkBeginCommandBuffer() on command buffer (0x%" PRIxLEAST64
7192                             ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
7193                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7194                             (uint64_t)commandBuffer, (uint64_t)cmdPool);
7195             }
7196             resetCB(dev_data, commandBuffer);
7197         }
7198         // Set updated state here in case implicit reset occurs above
7199         cb_node->state = CB_RECORDING;
7200         cb_node->beginInfo = *pBeginInfo;
7201         if (cb_node->beginInfo.pInheritanceInfo) {
7202             cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
7203             cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
7204             // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
7205             if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
7206                 (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
7207                 cb_node->activeRenderPass = getRenderPassState(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
7208                 cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
7209                 cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
7210             }
7211         }
7212     } else {
7213         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7214                              (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
7215                              "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for command buffer 0x%p!",
7216                              (void *)commandBuffer);
7217     }
7218     lock.unlock();
7219     if (skip_call) {
7220         return VK_ERROR_VALIDATION_FAILED_EXT;
7221     }
7222     VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
7223 
7224     return result;
7225 }
7226 
EndCommandBuffer(VkCommandBuffer commandBuffer)7227 VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
7228     bool skip_call = false;
7229     VkResult result = VK_SUCCESS;
7230     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7231     std::unique_lock<std::mutex> lock(global_lock);
7232     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7233     if (pCB) {
7234         if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) || !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
7235             // This needs spec clarification to update valid usage, see comments in PR:
7236             // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
7237             skip_call |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer");
7238         }
7239         skip_call |= addCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
7240         for (auto query : pCB->activeQueries) {
7241             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7242                                  DRAWSTATE_INVALID_QUERY, "DS",
7243                                  "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d",
7244                                  (uint64_t)(query.pool), query.index);
7245         }
7246     }
7247     if (!skip_call) {
7248         lock.unlock();
7249         result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
7250         lock.lock();
7251         if (VK_SUCCESS == result) {
7252             pCB->state = CB_RECORDED;
7253             // Reset CB status flags
7254             pCB->status = 0;
7255             printCB(dev_data, commandBuffer);
7256         }
7257     } else {
7258         result = VK_ERROR_VALIDATION_FAILED_EXT;
7259     }
7260     lock.unlock();
7261     return result;
7262 }
7263 
7264 VKAPI_ATTR VkResult VKAPI_CALL
ResetCommandBuffer(VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)7265 ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
7266     bool skip_call = false;
7267     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7268     std::unique_lock<std::mutex> lock(global_lock);
7269     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7270     VkCommandPool cmdPool = pCB->createInfo.commandPool;
7271     auto pPool = getCommandPoolNode(dev_data, cmdPool);
7272     if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
7273         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7274                              (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7275                              "Attempt to reset command buffer (0x%" PRIxLEAST64 ") created from command pool (0x%" PRIxLEAST64
7276                              ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7277                              (uint64_t)commandBuffer, (uint64_t)cmdPool);
7278     }
7279     skip_call |= checkCommandBufferInFlight(dev_data, pCB, "reset", VALIDATION_ERROR_00092);
7280     lock.unlock();
7281     if (skip_call)
7282         return VK_ERROR_VALIDATION_FAILED_EXT;
7283     VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
7284     if (VK_SUCCESS == result) {
7285         lock.lock();
7286         dev_data->globalInFlightCmdBuffers.erase(commandBuffer);
7287         resetCB(dev_data, commandBuffer);
7288         lock.unlock();
7289     }
7290     return result;
7291 }
7292 
7293 VKAPI_ATTR void VKAPI_CALL
CmdBindPipeline(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipeline pipeline)7294 CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
7295     bool skip_call = false;
7296     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7297     std::unique_lock<std::mutex> lock(global_lock);
7298     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7299     if (pCB) {
7300         skip_call |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7301         if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
7302             skip_call |=
7303                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7304                         (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
7305                         "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
7306                         (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass->renderPass);
7307         }
7308 
7309         PIPELINE_STATE *pPN = getPipelineState(dev_data, pipeline);
7310         if (pPN) {
7311             pCB->lastBound[pipelineBindPoint].pipeline_state = pPN;
7312             set_cb_pso_status(pCB, pPN);
7313             set_pipeline_state(pPN);
7314         } else {
7315             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7316                                  (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
7317                                  "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
7318         }
7319         addCommandBufferBinding(&getPipelineState(dev_data, pipeline)->cb_bindings,
7320                                 {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT}, pCB);
7321     }
7322     lock.unlock();
7323     if (!skip_call)
7324         dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
7325 }
7326 
7327 VKAPI_ATTR void VKAPI_CALL
CmdSetViewport(VkCommandBuffer commandBuffer,uint32_t firstViewport,uint32_t viewportCount,const VkViewport * pViewports)7328 CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
7329     bool skip_call = false;
7330     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7331     std::unique_lock<std::mutex> lock(global_lock);
7332     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7333     if (pCB) {
7334         skip_call |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7335         pCB->viewportMask |= ((1u<<viewportCount) - 1u) << firstViewport;
7336     }
7337     lock.unlock();
7338     if (!skip_call)
7339         dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
7340 }
7341 
7342 VKAPI_ATTR void VKAPI_CALL
CmdSetScissor(VkCommandBuffer commandBuffer,uint32_t firstScissor,uint32_t scissorCount,const VkRect2D * pScissors)7343 CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
7344     bool skip_call = false;
7345     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7346     std::unique_lock<std::mutex> lock(global_lock);
7347     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7348     if (pCB) {
7349         skip_call |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7350         pCB->scissorMask |= ((1u<<scissorCount) - 1u) << firstScissor;
7351     }
7352     lock.unlock();
7353     if (!skip_call)
7354         dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
7355 }
7356 
CmdSetLineWidth(VkCommandBuffer commandBuffer,float lineWidth)7357 VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
7358     bool skip_call = false;
7359     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7360     std::unique_lock<std::mutex> lock(global_lock);
7361     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7362     if (pCB) {
7363         skip_call |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7364         pCB->status |= CBSTATUS_LINE_WIDTH_SET;
7365 
7366         PIPELINE_STATE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_state;
7367         if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
7368             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7369                                  reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SET, "DS",
7370                                  "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
7371                                  "flag.  This is undefined behavior and could be ignored.");
7372         } else {
7373             skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
7374         }
7375     }
7376     lock.unlock();
7377     if (!skip_call)
7378         dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
7379 }
7380 
7381 VKAPI_ATTR void VKAPI_CALL
CmdSetDepthBias(VkCommandBuffer commandBuffer,float depthBiasConstantFactor,float depthBiasClamp,float depthBiasSlopeFactor)7382 CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
7383     bool skip_call = false;
7384     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7385     std::unique_lock<std::mutex> lock(global_lock);
7386     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7387     if (pCB) {
7388         skip_call |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7389         pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
7390     }
7391     lock.unlock();
7392     if (!skip_call)
7393         dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
7394 }
7395 
CmdSetBlendConstants(VkCommandBuffer commandBuffer,const float blendConstants[4])7396 VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
7397     bool skip_call = false;
7398     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7399     std::unique_lock<std::mutex> lock(global_lock);
7400     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7401     if (pCB) {
7402         skip_call |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7403         pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
7404     }
7405     lock.unlock();
7406     if (!skip_call)
7407         dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
7408 }
7409 
7410 VKAPI_ATTR void VKAPI_CALL
CmdSetDepthBounds(VkCommandBuffer commandBuffer,float minDepthBounds,float maxDepthBounds)7411 CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
7412     bool skip_call = false;
7413     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7414     std::unique_lock<std::mutex> lock(global_lock);
7415     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7416     if (pCB) {
7417         skip_call |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7418         pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
7419     }
7420     lock.unlock();
7421     if (!skip_call)
7422         dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
7423 }
7424 
7425 VKAPI_ATTR void VKAPI_CALL
CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t compareMask)7426 CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
7427     bool skip_call = false;
7428     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7429     std::unique_lock<std::mutex> lock(global_lock);
7430     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7431     if (pCB) {
7432         skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7433         pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7434     }
7435     lock.unlock();
7436     if (!skip_call)
7437         dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
7438 }
7439 
7440 VKAPI_ATTR void VKAPI_CALL
CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t writeMask)7441 CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7442     bool skip_call = false;
7443     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7444     std::unique_lock<std::mutex> lock(global_lock);
7445     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7446     if (pCB) {
7447         skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7448         pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7449     }
7450     lock.unlock();
7451     if (!skip_call)
7452         dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7453 }
7454 
7455 VKAPI_ATTR void VKAPI_CALL
CmdSetStencilReference(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t reference)7456 CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7457     bool skip_call = false;
7458     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7459     std::unique_lock<std::mutex> lock(global_lock);
7460     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7461     if (pCB) {
7462         skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7463         pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7464     }
7465     lock.unlock();
7466     if (!skip_call)
7467         dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
7468 }
7469 
7470 VKAPI_ATTR void VKAPI_CALL
CmdBindDescriptorSets(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t firstSet,uint32_t setCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)7471 CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
7472                       uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7473                       const uint32_t *pDynamicOffsets) {
7474     bool skip_call = false;
7475     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7476     std::unique_lock<std::mutex> lock(global_lock);
7477     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7478     if (pCB) {
7479         if (pCB->state == CB_RECORDING) {
7480             // Track total count of dynamic descriptor types to make sure we have an offset for each one
7481             uint32_t totalDynamicDescriptors = 0;
7482             string errorString = "";
7483             uint32_t lastSetIndex = firstSet + setCount - 1;
7484             if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
7485                 pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7486                 pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1);
7487             }
7488             auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
7489             auto pipeline_layout = getPipelineLayout(dev_data, layout);
7490             for (uint32_t i = 0; i < setCount; i++) {
7491                 cvdescriptorset::DescriptorSet *pSet = getSetNode(dev_data, pDescriptorSets[i]);
7492                 if (pSet) {
7493                     pCB->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout;
7494                     pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pSet;
7495                     skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7496                                          VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7497                                          DRAWSTATE_NONE, "DS", "Descriptor Set 0x%" PRIxLEAST64 " bound on pipeline %s",
7498                                          (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
7499                     if (!pSet->IsUpdated() && (pSet->GetTotalDescriptorCount() != 0)) {
7500                         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7501                                              VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7502                                              DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7503                                              "Descriptor Set 0x%" PRIxLEAST64
7504                                              " bound but it was never updated. You may want to either update it or not bind it.",
7505                                              (uint64_t)pDescriptorSets[i]);
7506                     }
7507                     // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7508                     if (!verify_set_layout_compatibility(dev_data, pSet, pipeline_layout, i + firstSet, errorString)) {
7509                         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7510                                              VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7511                                              DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
7512                                              "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
7513                                              "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s",
7514                                              i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str());
7515                     }
7516 
7517                     auto setDynamicDescriptorCount = pSet->GetDynamicDescriptorCount();
7518 
7519                     pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear();
7520 
7521                     if (setDynamicDescriptorCount) {
7522                         // First make sure we won't overstep bounds of pDynamicOffsets array
7523                         if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) {
7524                             skip_call |=
7525                                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7526                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7527                                         DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7528                                         "descriptorSet #%u (0x%" PRIxLEAST64
7529                                         ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7530                                         "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7531                                         i, (uint64_t)pDescriptorSets[i], pSet->GetDynamicDescriptorCount(),
7532                                         (dynamicOffsetCount - totalDynamicDescriptors));
7533                         } else { // Validate and store dynamic offsets with the set
7534                             // Validate Dynamic Offset Minimums
7535                             uint32_t cur_dyn_offset = totalDynamicDescriptors;
7536                             for (uint32_t d = 0; d < pSet->GetTotalDescriptorCount(); d++) {
7537                                 if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7538                                     if (vk_safe_modulo(
7539                                             pDynamicOffsets[cur_dyn_offset],
7540                                             dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
7541                                         skip_call |= log_msg(
7542                                             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7543                                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7544                                             DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
7545                                             "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7546                                             "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
7547                                             cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7548                                             dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
7549                                     }
7550                                     cur_dyn_offset++;
7551                                 } else if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7552                                     if (vk_safe_modulo(
7553                                             pDynamicOffsets[cur_dyn_offset],
7554                                             dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
7555                                         skip_call |= log_msg(
7556                                             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7557                                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7558                                             DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
7559                                             "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7560                                             "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
7561                                             cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7562                                             dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
7563                                     }
7564                                     cur_dyn_offset++;
7565                                 }
7566                             }
7567 
7568                             pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] =
7569                                 std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors,
7570                                                       pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount);
7571                             // Keep running total of dynamic descriptor count to verify at the end
7572                             totalDynamicDescriptors += setDynamicDescriptorCount;
7573 
7574                         }
7575                     }
7576                 } else {
7577                     skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7578                                          VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7579                                          DRAWSTATE_INVALID_SET, "DS", "Attempt to bind descriptor set 0x%" PRIxLEAST64
7580                                          " that doesn't exist!",
7581                                          (uint64_t)pDescriptorSets[i]);
7582                 }
7583                 skip_call |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7584                 // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7585                 if (firstSet > 0) { // Check set #s below the first bound set
7586                     for (uint32_t i = 0; i < firstSet; ++i) {
7587                         if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7588                             !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i],
7589                                                              pipeline_layout, i, errorString)) {
7590                             skip_call |= log_msg(
7591                                 dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7592                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7593                                 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7594                                 "DescriptorSet 0x%" PRIxLEAST64
7595                                 " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7596                                 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7597                             pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7598                         }
7599                     }
7600                 }
7601                 // Check if newly last bound set invalidates any remaining bound sets
7602                 if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7603                     if (oldFinalBoundSet &&
7604                         !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, pipeline_layout, lastSetIndex, errorString)) {
7605                         auto old_set = oldFinalBoundSet->GetSet();
7606                         skip_call |=
7607                             log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7608                                     VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
7609                                     DRAWSTATE_NONE, "DS", "DescriptorSet 0x%" PRIxLEAST64
7610                                                           " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
7611                                                           " newly bound as set #%u so set #%u and any subsequent sets were "
7612                                                           "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7613                                     reinterpret_cast<uint64_t &>(old_set), lastSetIndex,
7614                                     (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7615                                     lastSetIndex + 1, (uint64_t)layout);
7616                         pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7617                     }
7618                 }
7619             }
7620             //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7621             if (totalDynamicDescriptors != dynamicOffsetCount) {
7622                 skip_call |=
7623                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7624                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7625                             "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7626                             "is %u. It should exactly match the number of dynamic descriptors.",
7627                             setCount, totalDynamicDescriptors, dynamicOffsetCount);
7628             }
7629         } else {
7630             skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7631         }
7632     }
7633     lock.unlock();
7634     if (!skip_call)
7635         dev_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7636                                                        pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7637 }
7638 
7639 VKAPI_ATTR void VKAPI_CALL
CmdBindIndexBuffer(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,VkIndexType indexType)7640 CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7641     bool skip_call = false;
7642     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7643     // TODO : Somewhere need to verify that IBs have correct usage state flagged
7644     std::unique_lock<std::mutex> lock(global_lock);
7645 
7646     auto buff_node = getBufferNode(dev_data, buffer);
7647     auto cb_node = getCBNode(dev_data, commandBuffer);
7648     if (cb_node && buff_node) {
7649         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdBindIndexBuffer()");
7650         std::function<bool()> function = [=]() {
7651             return ValidateBufferMemoryIsValid(dev_data, buff_node, "vkCmdBindIndexBuffer()");
7652         };
7653         cb_node->validate_functions.push_back(function);
7654         skip_call |= addCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7655         VkDeviceSize offset_align = 0;
7656         switch (indexType) {
7657         case VK_INDEX_TYPE_UINT16:
7658             offset_align = 2;
7659             break;
7660         case VK_INDEX_TYPE_UINT32:
7661             offset_align = 4;
7662             break;
7663         default:
7664             // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7665             break;
7666         }
7667         if (!offset_align || (offset % offset_align)) {
7668             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7669                                  DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7670                                  "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7671                                  offset, string_VkIndexType(indexType));
7672         }
7673         cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7674     } else {
7675         assert(0);
7676     }
7677     lock.unlock();
7678     if (!skip_call)
7679         dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7680 }
7681 
updateResourceTracking(GLOBAL_CB_NODE * pCB,uint32_t firstBinding,uint32_t bindingCount,const VkBuffer * pBuffers)7682 void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7683     uint32_t end = firstBinding + bindingCount;
7684     if (pCB->currentDrawData.buffers.size() < end) {
7685         pCB->currentDrawData.buffers.resize(end);
7686     }
7687     for (uint32_t i = 0; i < bindingCount; ++i) {
7688         pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7689     }
7690 }
7691 
updateResourceTrackingOnDraw(GLOBAL_CB_NODE * pCB)7692 static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7693 
CmdBindVertexBuffers(VkCommandBuffer commandBuffer,uint32_t firstBinding,uint32_t bindingCount,const VkBuffer * pBuffers,const VkDeviceSize * pOffsets)7694 VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7695                                                 uint32_t bindingCount, const VkBuffer *pBuffers,
7696                                                 const VkDeviceSize *pOffsets) {
7697     bool skip_call = false;
7698     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7699     // TODO : Somewhere need to verify that VBs have correct usage state flagged
7700     std::unique_lock<std::mutex> lock(global_lock);
7701 
7702     auto cb_node = getCBNode(dev_data, commandBuffer);
7703     if (cb_node) {
7704         for (uint32_t i = 0; i < bindingCount; ++i) {
7705             auto buff_node = getBufferNode(dev_data, pBuffers[i]);
7706             assert(buff_node);
7707             skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdBindVertexBuffers()");
7708             std::function<bool()> function = [=]() {
7709                 return ValidateBufferMemoryIsValid(dev_data, buff_node, "vkCmdBindVertexBuffers()");
7710             };
7711             cb_node->validate_functions.push_back(function);
7712         }
7713         addCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7714         updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
7715     } else {
7716         skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7717     }
7718     lock.unlock();
7719     if (!skip_call)
7720         dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7721 }
7722 
7723 /* expects global_lock to be held by caller */
markStoreImagesAndBuffersAsWritten(layer_data * dev_data,GLOBAL_CB_NODE * pCB)7724 static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
7725     bool skip_call = false;
7726 
7727     for (auto imageView : pCB->updateImages) {
7728         auto view_state = getImageViewState(dev_data, imageView);
7729         if (!view_state)
7730             continue;
7731 
7732         auto image_state = getImageState(dev_data, view_state->create_info.image);
7733         assert(image_state);
7734         std::function<bool()> function = [=]() {
7735             SetImageMemoryValid(dev_data, image_state, true);
7736             return false;
7737         };
7738         pCB->validate_functions.push_back(function);
7739     }
7740     for (auto buffer : pCB->updateBuffers) {
7741         auto buff_node = getBufferNode(dev_data, buffer);
7742         assert(buff_node);
7743         std::function<bool()> function = [=]() {
7744             SetBufferMemoryValid(dev_data, buff_node, true);
7745             return false;
7746         };
7747         pCB->validate_functions.push_back(function);
7748     }
7749     return skip_call;
7750 }
7751 
CmdDraw(VkCommandBuffer commandBuffer,uint32_t vertexCount,uint32_t instanceCount,uint32_t firstVertex,uint32_t firstInstance)7752 VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7753                                    uint32_t firstVertex, uint32_t firstInstance) {
7754     bool skip_call = false;
7755     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7756     std::unique_lock<std::mutex> lock(global_lock);
7757     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7758     if (pCB) {
7759         skip_call |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
7760         pCB->drawCount[DRAW]++;
7761         skip_call |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDraw");
7762         skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7763         // TODO : Need to pass commandBuffer as srcObj here
7764         skip_call |=
7765             log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7766                     __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call 0x%" PRIx64 ", reporting descriptor set state:",
7767                     g_drawCount[DRAW]++);
7768         skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7769         if (!skip_call) {
7770             updateResourceTrackingOnDraw(pCB);
7771         }
7772         skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
7773     }
7774     lock.unlock();
7775     if (!skip_call)
7776         dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7777 }
7778 
CmdDrawIndexed(VkCommandBuffer commandBuffer,uint32_t indexCount,uint32_t instanceCount,uint32_t firstIndex,int32_t vertexOffset,uint32_t firstInstance)7779 VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
7780                                           uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
7781                                                             uint32_t firstInstance) {
7782     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7783     bool skip_call = false;
7784     std::unique_lock<std::mutex> lock(global_lock);
7785     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7786     if (pCB) {
7787         skip_call |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
7788         pCB->drawCount[DRAW_INDEXED]++;
7789         skip_call |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexed");
7790         skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7791         // TODO : Need to pass commandBuffer as srcObj here
7792         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7793                              VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7794                              "vkCmdDrawIndexed() call 0x%" PRIx64 ", reporting descriptor set state:",
7795                              g_drawCount[DRAW_INDEXED]++);
7796         skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7797         if (!skip_call) {
7798             updateResourceTrackingOnDraw(pCB);
7799         }
7800         skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
7801     }
7802     lock.unlock();
7803     if (!skip_call)
7804         dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
7805 }
7806 
7807 VKAPI_ATTR void VKAPI_CALL
CmdDrawIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,uint32_t count,uint32_t stride)7808 CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7809     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7810     bool skip_call = false;
7811     std::unique_lock<std::mutex> lock(global_lock);
7812 
7813     auto cb_node = getCBNode(dev_data, commandBuffer);
7814     auto buff_node = getBufferNode(dev_data, buffer);
7815     if (cb_node && buff_node) {
7816         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDrawIndirect()");
7817         AddCommandBufferBindingBuffer(dev_data, cb_node, buff_node);
7818         skip_call |= addCmd(dev_data, cb_node, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
7819         cb_node->drawCount[DRAW_INDIRECT]++;
7820         skip_call |= validate_and_update_draw_state(dev_data, cb_node, false, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndirect");
7821         skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
7822         // TODO : Need to pass commandBuffer as srcObj here
7823         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7824                              VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7825                              "vkCmdDrawIndirect() call 0x%" PRIx64 ", reporting descriptor set state:",
7826                              g_drawCount[DRAW_INDIRECT]++);
7827         skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7828         if (!skip_call) {
7829             updateResourceTrackingOnDraw(cb_node);
7830         }
7831         skip_call |= outsideRenderPass(dev_data, cb_node, "vkCmdDrawIndirect()");
7832     } else {
7833         assert(0);
7834     }
7835     lock.unlock();
7836     if (!skip_call)
7837         dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7838 }
7839 
7840 VKAPI_ATTR void VKAPI_CALL
CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,uint32_t count,uint32_t stride)7841 CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7842     bool skip_call = false;
7843     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7844     std::unique_lock<std::mutex> lock(global_lock);
7845 
7846     auto cb_node = getCBNode(dev_data, commandBuffer);
7847     auto buff_node = getBufferNode(dev_data, buffer);
7848     if (cb_node && buff_node) {
7849         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDrawIndexedIndirect()");
7850         AddCommandBufferBindingBuffer(dev_data, cb_node, buff_node);
7851         skip_call |= addCmd(dev_data, cb_node, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
7852         cb_node->drawCount[DRAW_INDEXED_INDIRECT]++;
7853         skip_call |=
7854             validate_and_update_draw_state(dev_data, cb_node, true, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexedIndirect");
7855         skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
7856         // TODO : Need to pass commandBuffer as srcObj here
7857         skip_call |=
7858             log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7859                     __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call 0x%" PRIx64 ", reporting descriptor set state:",
7860                     g_drawCount[DRAW_INDEXED_INDIRECT]++);
7861         skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7862         if (!skip_call) {
7863             updateResourceTrackingOnDraw(cb_node);
7864         }
7865         skip_call |= outsideRenderPass(dev_data, cb_node, "vkCmdDrawIndexedIndirect()");
7866     } else {
7867         assert(0);
7868     }
7869     lock.unlock();
7870     if (!skip_call)
7871         dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7872 }
7873 
CmdDispatch(VkCommandBuffer commandBuffer,uint32_t x,uint32_t y,uint32_t z)7874 VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7875     bool skip_call = false;
7876     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7877     std::unique_lock<std::mutex> lock(global_lock);
7878     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7879     if (pCB) {
7880         skip_call |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatch");
7881         skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7882         skip_call |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
7883         skip_call |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
7884     }
7885     lock.unlock();
7886     if (!skip_call)
7887         dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
7888 }
7889 
7890 VKAPI_ATTR void VKAPI_CALL
CmdDispatchIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset)7891 CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7892     bool skip_call = false;
7893     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7894     std::unique_lock<std::mutex> lock(global_lock);
7895 
7896     auto cb_node = getCBNode(dev_data, commandBuffer);
7897     auto buff_node = getBufferNode(dev_data, buffer);
7898     if (cb_node && buff_node) {
7899         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDispatchIndirect()");
7900         AddCommandBufferBindingBuffer(dev_data, cb_node, buff_node);
7901         skip_call |=
7902             validate_and_update_draw_state(dev_data, cb_node, false, VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatchIndirect");
7903         skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
7904         skip_call |= addCmd(dev_data, cb_node, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
7905         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdDispatchIndirect()");
7906     }
7907     lock.unlock();
7908     if (!skip_call)
7909         dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
7910 }
7911 
CmdCopyBuffer(VkCommandBuffer commandBuffer,VkBuffer srcBuffer,VkBuffer dstBuffer,uint32_t regionCount,const VkBufferCopy * pRegions)7912 VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7913                                          uint32_t regionCount, const VkBufferCopy *pRegions) {
7914     bool skip_call = false;
7915     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7916     std::unique_lock<std::mutex> lock(global_lock);
7917 
7918     auto cb_node = getCBNode(dev_data, commandBuffer);
7919     auto src_buff_node = getBufferNode(dev_data, srcBuffer);
7920     auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
7921     if (cb_node && src_buff_node && dst_buff_node) {
7922         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_node, "vkCmdCopyBuffer()");
7923         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyBuffer()");
7924         // Update bindings between buffers and cmd buffer
7925         AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_node);
7926         AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
7927         // Validate that SRC & DST buffers have correct usage flags set
7928         skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_node, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyBuffer()",
7929                                               "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7930         skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyBuffer()",
7931                                               "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7932 
7933         std::function<bool()> function = [=]() {
7934             return ValidateBufferMemoryIsValid(dev_data, src_buff_node, "vkCmdCopyBuffer()");
7935         };
7936         cb_node->validate_functions.push_back(function);
7937         function = [=]() {
7938             SetBufferMemoryValid(dev_data, dst_buff_node, true);
7939             return false;
7940         };
7941         cb_node->validate_functions.push_back(function);
7942 
7943         skip_call |= addCmd(dev_data, cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7944         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBuffer()");
7945     } else {
7946         // Param_checker will flag errors on invalid objects, just assert here as debugging aid
7947         assert(0);
7948     }
7949     lock.unlock();
7950     if (!skip_call)
7951         dev_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7952 }
7953 
VerifySourceImageLayout(layer_data * dev_data,GLOBAL_CB_NODE * cb_node,VkImage srcImage,VkImageSubresourceLayers subLayers,VkImageLayout srcImageLayout)7954 static bool VerifySourceImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage srcImage,
7955                                     VkImageSubresourceLayers subLayers, VkImageLayout srcImageLayout) {
7956     bool skip_call = false;
7957 
7958     for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7959         uint32_t layer = i + subLayers.baseArrayLayer;
7960         VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7961         IMAGE_CMD_BUF_LAYOUT_NODE node;
7962         if (!FindLayout(cb_node, srcImage, sub, node)) {
7963             SetLayout(cb_node, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
7964             continue;
7965         }
7966         if (node.layout != srcImageLayout) {
7967             // TODO: Improve log message in the next pass
7968             skip_call |=
7969                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7970                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7971                                                                         "and doesn't match the current layout %s.",
7972                         string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7973         }
7974     }
7975     if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7976         if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7977             // TODO : Can we deal with image node from the top of call tree and avoid map look-up here?
7978             auto image_state = getImageState(dev_data, srcImage);
7979             if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
7980                 // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7981                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7982                                      (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7983                                      "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7984             }
7985         } else {
7986             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7987                                  DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7988                                                                        "TRANSFER_SRC_OPTIMAL or GENERAL.",
7989                                  string_VkImageLayout(srcImageLayout));
7990         }
7991     }
7992     return skip_call;
7993 }
7994 
VerifyDestImageLayout(layer_data * dev_data,GLOBAL_CB_NODE * cb_node,VkImage destImage,VkImageSubresourceLayers subLayers,VkImageLayout destImageLayout)7995 static bool VerifyDestImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage destImage,
7996                                   VkImageSubresourceLayers subLayers, VkImageLayout destImageLayout) {
7997     bool skip_call = false;
7998 
7999     for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
8000         uint32_t layer = i + subLayers.baseArrayLayer;
8001         VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
8002         IMAGE_CMD_BUF_LAYOUT_NODE node;
8003         if (!FindLayout(cb_node, destImage, sub, node)) {
8004             SetLayout(cb_node, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
8005             continue;
8006         }
8007         if (node.layout != destImageLayout) {
8008             skip_call |=
8009                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
8010                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
8011                                                                         "doesn't match the current layout %s.",
8012                         string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
8013         }
8014     }
8015     if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
8016         if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
8017             auto image_state = getImageState(dev_data, destImage);
8018             if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
8019                 // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
8020                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8021                                      (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8022                                      "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
8023             }
8024         } else {
8025             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8026                                  DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
8027                                                                        "TRANSFER_DST_OPTIMAL or GENERAL.",
8028                                  string_VkImageLayout(destImageLayout));
8029         }
8030     }
8031     return skip_call;
8032 }
8033 
8034 // Test if two VkExtent3D structs are equivalent
IsExtentEqual(const VkExtent3D * extent,const VkExtent3D * other_extent)8035 static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) {
8036     bool result = true;
8037     if ((extent->width != other_extent->width) || (extent->height != other_extent->height) ||
8038         (extent->depth != other_extent->depth)) {
8039         result = false;
8040     }
8041     return result;
8042 }
8043 
8044 // Returns the image extent of a specific subresource.
GetImageSubresourceExtent(const IMAGE_STATE * img,const VkImageSubresourceLayers * subresource)8045 static inline VkExtent3D GetImageSubresourceExtent(const IMAGE_STATE *img, const VkImageSubresourceLayers *subresource) {
8046     const uint32_t mip = subresource->mipLevel;
8047     VkExtent3D extent = img->createInfo.extent;
8048     extent.width = std::max(1U, extent.width >> mip);
8049     extent.height = std::max(1U, extent.height >> mip);
8050     extent.depth = std::max(1U, extent.depth >> mip);
8051     return extent;
8052 }
8053 
8054 // Test if the extent argument has all dimensions set to 0.
IsExtentZero(const VkExtent3D * extent)8055 static inline bool IsExtentZero(const VkExtent3D *extent) {
8056     return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0));
8057 }
8058 
8059 // Returns the image transfer granularity for a specific image scaled by compressed block size if necessary.
GetScaledItg(layer_data * dev_data,const GLOBAL_CB_NODE * cb_node,const IMAGE_STATE * img)8060 static inline VkExtent3D GetScaledItg(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img) {
8061     // Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device.
8062     VkExtent3D granularity = { 0, 0, 0 };
8063     auto pPool = getCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
8064     if (pPool) {
8065         granularity = dev_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity;
8066         if (vk_format_is_compressed(img->createInfo.format)) {
8067             auto block_size = vk_format_compressed_block_size(img->createInfo.format);
8068             granularity.width *= block_size.width;
8069             granularity.height *= block_size.height;
8070         }
8071     }
8072     return granularity;
8073 }
8074 
8075 // Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure
IsExtentAligned(const VkExtent3D * extent,const VkExtent3D * granularity)8076 static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) {
8077     bool valid = true;
8078     if ((vk_safe_modulo(extent->depth, granularity->depth) != 0) || (vk_safe_modulo(extent->width, granularity->width) != 0) ||
8079         (vk_safe_modulo(extent->height, granularity->height) != 0)) {
8080         valid = false;
8081     }
8082     return valid;
8083 }
8084 
8085 // Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values
CheckItgOffset(layer_data * dev_data,const GLOBAL_CB_NODE * cb_node,const VkOffset3D * offset,const VkExtent3D * granularity,const uint32_t i,const char * function,const char * member)8086 static inline bool CheckItgOffset(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkOffset3D *offset,
8087                                   const VkExtent3D *granularity, const uint32_t i, const char *function, const char *member) {
8088     bool skip = false;
8089     VkExtent3D offset_extent = {};
8090     offset_extent.width = static_cast<uint32_t>(abs(offset->x));
8091     offset_extent.height = static_cast<uint32_t>(abs(offset->y));
8092     offset_extent.depth = static_cast<uint32_t>(abs(offset->z));
8093     if (IsExtentZero(granularity)) {
8094         // If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0)
8095         if (IsExtentZero(&offset_extent) == false) {
8096             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8097                             DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8098                             "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) "
8099                             "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
8100                             function, i, member, offset->x, offset->y, offset->z);
8101         }
8102     } else {
8103         // If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even
8104         // integer multiples of the image transfer granularity.
8105         if (IsExtentAligned(&offset_extent, granularity) == false) {
8106             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8107                             DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8108                             "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer "
8109                             "multiples of this command buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).",
8110                             function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height,
8111                             granularity->depth);
8112         }
8113     }
8114     return skip;
8115 }
8116 
8117 // Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values
CheckItgExtent(layer_data * dev_data,const GLOBAL_CB_NODE * cb_node,const VkExtent3D * extent,const VkOffset3D * offset,const VkExtent3D * granularity,const VkExtent3D * subresource_extent,const uint32_t i,const char * function,const char * member)8118 static inline bool CheckItgExtent(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkExtent3D *extent,
8119                                   const VkOffset3D *offset, const VkExtent3D *granularity, const VkExtent3D *subresource_extent,
8120                                   const uint32_t i, const char *function, const char *member) {
8121     bool skip = false;
8122     if (IsExtentZero(granularity)) {
8123         // If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image
8124         // subresource extent.
8125         if (IsExtentEqual(extent, subresource_extent) == false) {
8126             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8127                             DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8128                             "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) "
8129                             "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
8130                             function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width,
8131                             subresource_extent->height, subresource_extent->depth);
8132         }
8133     } else {
8134         // If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even
8135         // integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image
8136         // subresource extent dimensions.
8137         VkExtent3D offset_extent_sum = {};
8138         offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width;
8139         offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height;
8140         offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth;
8141         if ((IsExtentAligned(extent, granularity) == false) && (IsExtentEqual(&offset_extent_sum, subresource_extent) == false)) {
8142             skip |=
8143                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8144                         DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8145                         "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command buffer's "
8146                         "queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + "
8147                         "extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).",
8148                         function, i, member, extent->width, extent->height, extent->depth, granularity->width, granularity->height,
8149                         granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height, extent->depth,
8150                         subresource_extent->width, subresource_extent->height, subresource_extent->depth);
8151         }
8152     }
8153     return skip;
8154 }
8155 
8156 // Check a uint32_t width or stride value against a queue family's Image Transfer Granularity width value
CheckItgInt(layer_data * dev_data,const GLOBAL_CB_NODE * cb_node,const uint32_t value,const uint32_t granularity,const uint32_t i,const char * function,const char * member)8157 static inline bool CheckItgInt(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const uint32_t value,
8158                                const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
8159     bool skip = false;
8160     if (vk_safe_modulo(value, granularity) != 0) {
8161         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8162                         DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8163                         "%s: pRegion[%d].%s (%d) must be an even integer multiple of this command buffer's queue family image "
8164                         "transfer granularity width (%d).",
8165                         function, i, member, value, granularity);
8166     }
8167     return skip;
8168 }
8169 
8170 // Check a VkDeviceSize value against a queue family's Image Transfer Granularity width value
CheckItgSize(layer_data * dev_data,const GLOBAL_CB_NODE * cb_node,const VkDeviceSize value,const uint32_t granularity,const uint32_t i,const char * function,const char * member)8171 static inline bool CheckItgSize(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkDeviceSize value,
8172                                 const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
8173     bool skip = false;
8174     if (vk_safe_modulo(value, granularity) != 0) {
8175         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8176                         DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8177                         "%s: pRegion[%d].%s (%" PRIdLEAST64
8178                         ") must be an even integer multiple of this command buffer's queue family image transfer "
8179                         "granularity width (%d).",
8180                         function, i, member, value, granularity);
8181     }
8182     return skip;
8183 }
8184 
8185 // Check valid usage Image Tranfer Granularity requirements for elements of a VkImageCopy structure
ValidateCopyImageTransferGranularityRequirements(layer_data * dev_data,const GLOBAL_CB_NODE * cb_node,const IMAGE_STATE * img,const VkImageCopy * region,const uint32_t i,const char * function)8186 static inline bool ValidateCopyImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
8187                                                                     const IMAGE_STATE *img, const VkImageCopy *region,
8188                                                                     const uint32_t i, const char *function) {
8189     bool skip = false;
8190     VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
8191     skip |= CheckItgOffset(dev_data, cb_node, &region->srcOffset, &granularity, i, function, "srcOffset");
8192     skip |= CheckItgOffset(dev_data, cb_node, &region->dstOffset, &granularity, i, function, "dstOffset");
8193     VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->dstSubresource);
8194     skip |= CheckItgExtent(dev_data, cb_node, &region->extent, &region->dstOffset, &granularity, &subresource_extent, i, function,
8195                            "extent");
8196     return skip;
8197 }
8198 
8199 // Check valid usage Image Tranfer Granularity requirements for elements of a VkBufferImageCopy structure
ValidateCopyBufferImageTransferGranularityRequirements(layer_data * dev_data,const GLOBAL_CB_NODE * cb_node,const IMAGE_STATE * img,const VkBufferImageCopy * region,const uint32_t i,const char * function)8200 static inline bool ValidateCopyBufferImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
8201                                                                           const IMAGE_STATE *img, const VkBufferImageCopy *region,
8202                                                                           const uint32_t i, const char *function) {
8203     bool skip = false;
8204     VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
8205     skip |= CheckItgSize(dev_data, cb_node, region->bufferOffset, granularity.width, i, function, "bufferOffset");
8206     skip |= CheckItgInt(dev_data, cb_node, region->bufferRowLength, granularity.width, i, function, "bufferRowLength");
8207     skip |= CheckItgInt(dev_data, cb_node, region->bufferImageHeight, granularity.width, i, function, "bufferImageHeight");
8208     skip |= CheckItgOffset(dev_data, cb_node, &region->imageOffset, &granularity, i, function, "imageOffset");
8209     VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->imageSubresource);
8210     skip |= CheckItgExtent(dev_data, cb_node, &region->imageExtent, &region->imageOffset, &granularity, &subresource_extent, i,
8211                            function, "imageExtent");
8212     return skip;
8213 }
8214 
8215 VKAPI_ATTR void VKAPI_CALL
CmdCopyImage(VkCommandBuffer commandBuffer,VkImage srcImage,VkImageLayout srcImageLayout,VkImage dstImage,VkImageLayout dstImageLayout,uint32_t regionCount,const VkImageCopy * pRegions)8216 CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8217              VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
8218     bool skip_call = false;
8219     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8220     std::unique_lock<std::mutex> lock(global_lock);
8221 
8222     auto cb_node = getCBNode(dev_data, commandBuffer);
8223     auto src_image_state = getImageState(dev_data, srcImage);
8224     auto dst_image_state = getImageState(dev_data, dstImage);
8225     if (cb_node && src_image_state && dst_image_state) {
8226         skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdCopyImage()");
8227         skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdCopyImage()");
8228         // Update bindings between images and cmd buffer
8229         AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8230         AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
8231         // Validate that SRC & DST images have correct usage flags set
8232         skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyImage()",
8233                                              "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8234         skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyImage()",
8235                                              "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8236         std::function<bool()> function = [=]() {
8237             return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdCopyImage()");
8238         };
8239         cb_node->validate_functions.push_back(function);
8240         function = [=]() {
8241             SetImageMemoryValid(dev_data, dst_image_state, true);
8242             return false;
8243         };
8244         cb_node->validate_functions.push_back(function);
8245 
8246         skip_call |= addCmd(dev_data, cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()");
8247         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImage()");
8248         for (uint32_t i = 0; i < regionCount; ++i) {
8249             skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].srcSubresource, srcImageLayout);
8250             skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].dstSubresource, dstImageLayout);
8251             skip_call |= ValidateCopyImageTransferGranularityRequirements(dev_data, cb_node, dst_image_state, &pRegions[i], i,
8252                                                                           "vkCmdCopyImage()");
8253         }
8254     } else {
8255         assert(0);
8256     }
8257     lock.unlock();
8258     if (!skip_call)
8259         dev_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8260                                               pRegions);
8261 }
8262 
8263 // Validate that an image's sampleCount matches the requirement for a specific API call
ValidateImageSampleCount(layer_data * dev_data,IMAGE_STATE * image_state,VkSampleCountFlagBits sample_count,const char * location)8264 static inline bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
8265                                             const char *location) {
8266     bool skip = false;
8267     if (image_state->createInfo.samples != sample_count) {
8268         skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8269                        reinterpret_cast<uint64_t &>(image_state->image), 0, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
8270                        "%s for image 0x%" PRIxLEAST64 " was created with a sample count of %s but must be %s.", location,
8271                        reinterpret_cast<uint64_t &>(image_state->image),
8272                        string_VkSampleCountFlagBits(image_state->createInfo.samples), string_VkSampleCountFlagBits(sample_count));
8273     }
8274     return skip;
8275 }
8276 
8277 VKAPI_ATTR void VKAPI_CALL
CmdBlitImage(VkCommandBuffer commandBuffer,VkImage srcImage,VkImageLayout srcImageLayout,VkImage dstImage,VkImageLayout dstImageLayout,uint32_t regionCount,const VkImageBlit * pRegions,VkFilter filter)8278 CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8279              VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
8280     bool skip_call = false;
8281     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8282     std::unique_lock<std::mutex> lock(global_lock);
8283 
8284     auto cb_node = getCBNode(dev_data, commandBuffer);
8285     auto src_image_state = getImageState(dev_data, srcImage);
8286     auto dst_image_state = getImageState(dev_data, dstImage);
8287     if (cb_node && src_image_state && dst_image_state) {
8288         skip_call |= ValidateImageSampleCount(dev_data, src_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): srcImage");
8289         skip_call |= ValidateImageSampleCount(dev_data, dst_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): dstImage");
8290         skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdBlitImage()");
8291         skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdBlitImage()");
8292         // Update bindings between images and cmd buffer
8293         AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8294         AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
8295         // Validate that SRC & DST images have correct usage flags set
8296         skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdBlitImage()",
8297                                              "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8298         skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdBlitImage()",
8299                                              "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8300         std::function<bool()> function = [=]() {
8301             return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdBlitImage()");
8302         };
8303         cb_node->validate_functions.push_back(function);
8304         function = [=]() {
8305             SetImageMemoryValid(dev_data, dst_image_state, true);
8306             return false;
8307         };
8308         cb_node->validate_functions.push_back(function);
8309 
8310         skip_call |= addCmd(dev_data, cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()");
8311         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBlitImage()");
8312     } else {
8313         assert(0);
8314     }
8315     lock.unlock();
8316     if (!skip_call)
8317         dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8318                                               pRegions, filter);
8319 }
8320 
CmdCopyBufferToImage(VkCommandBuffer commandBuffer,VkBuffer srcBuffer,VkImage dstImage,VkImageLayout dstImageLayout,uint32_t regionCount,const VkBufferImageCopy * pRegions)8321 VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
8322                                                 VkImage dstImage, VkImageLayout dstImageLayout,
8323                                                 uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8324     bool skip_call = false;
8325     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8326     std::unique_lock<std::mutex> lock(global_lock);
8327 
8328     auto cb_node = getCBNode(dev_data, commandBuffer);
8329     auto src_buff_node = getBufferNode(dev_data, srcBuffer);
8330     auto dst_image_state = getImageState(dev_data, dstImage);
8331     if (cb_node && src_buff_node && dst_image_state) {
8332         skip_call |=
8333             ValidateImageSampleCount(dev_data, dst_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyBufferToImage(): dstImage");
8334         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_node, "vkCmdCopyBufferToImage()");
8335         skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdCopyBufferToImage()");
8336         AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_node);
8337         AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
8338         skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_node, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
8339                                               "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8340         skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8341                                              "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8342         std::function<bool()> function = [=]() {
8343             SetImageMemoryValid(dev_data, dst_image_state, true);
8344             return false;
8345         };
8346         cb_node->validate_functions.push_back(function);
8347         function = [=]() { return ValidateBufferMemoryIsValid(dev_data, src_buff_node, "vkCmdCopyBufferToImage()"); };
8348         cb_node->validate_functions.push_back(function);
8349 
8350         skip_call |= addCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8351         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBufferToImage()");
8352         for (uint32_t i = 0; i < regionCount; ++i) {
8353             skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].imageSubresource, dstImageLayout);
8354             skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, dst_image_state, &pRegions[i], i,
8355                                                                                 "vkCmdCopyBufferToImage()");
8356         }
8357     } else {
8358         assert(0);
8359     }
8360     lock.unlock();
8361     if (!skip_call)
8362         dev_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
8363 }
8364 
CmdCopyImageToBuffer(VkCommandBuffer commandBuffer,VkImage srcImage,VkImageLayout srcImageLayout,VkBuffer dstBuffer,uint32_t regionCount,const VkBufferImageCopy * pRegions)8365 VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
8366                                                 VkImageLayout srcImageLayout, VkBuffer dstBuffer,
8367                                                 uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8368     bool skip_call = false;
8369     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8370     std::unique_lock<std::mutex> lock(global_lock);
8371 
8372     auto cb_node = getCBNode(dev_data, commandBuffer);
8373     auto src_image_state = getImageState(dev_data, srcImage);
8374     auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8375     if (cb_node && src_image_state && dst_buff_node) {
8376         skip_call |=
8377             ValidateImageSampleCount(dev_data, src_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyImageToBuffer(): srcImage");
8378         skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdCopyImageToBuffer()");
8379         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyImageToBuffer()");
8380         // Update bindings between buffer/image and cmd buffer
8381         AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8382         AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8383         // Validate that SRC image & DST buffer have correct usage flags set
8384         skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8385                                              "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8386         skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8387                                               "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8388         std::function<bool()> function = [=]() {
8389             return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdCopyImageToBuffer()");
8390         };
8391         cb_node->validate_functions.push_back(function);
8392         function = [=]() {
8393             SetBufferMemoryValid(dev_data, dst_buff_node, true);
8394             return false;
8395         };
8396         cb_node->validate_functions.push_back(function);
8397 
8398         skip_call |= addCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8399         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImageToBuffer()");
8400         for (uint32_t i = 0; i < regionCount; ++i) {
8401             skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].imageSubresource, srcImageLayout);
8402             skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, src_image_state, &pRegions[i], i,
8403                                                                                 "CmdCopyImageToBuffer");
8404         }
8405     } else {
8406         assert(0);
8407     }
8408     lock.unlock();
8409     if (!skip_call)
8410         dev_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
8411 }
8412 
CmdUpdateBuffer(VkCommandBuffer commandBuffer,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize dataSize,const uint32_t * pData)8413 VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
8414                                            VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
8415     bool skip_call = false;
8416     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8417     std::unique_lock<std::mutex> lock(global_lock);
8418 
8419     auto cb_node = getCBNode(dev_data, commandBuffer);
8420     auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8421     if (cb_node && dst_buff_node) {
8422         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdUpdateBuffer()");
8423         // Update bindings between buffer and cmd buffer
8424         AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8425         // Validate that DST buffer has correct usage flags set
8426         skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8427                                               "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8428         std::function<bool()> function = [=]() {
8429             SetBufferMemoryValid(dev_data, dst_buff_node, true);
8430             return false;
8431         };
8432         cb_node->validate_functions.push_back(function);
8433 
8434         skip_call |= addCmd(dev_data, cb_node, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8435         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyUpdateBuffer()");
8436     } else {
8437         assert(0);
8438     }
8439     lock.unlock();
8440     if (!skip_call)
8441         dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
8442 }
8443 
8444 VKAPI_ATTR void VKAPI_CALL
CmdFillBuffer(VkCommandBuffer commandBuffer,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize size,uint32_t data)8445 CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
8446     bool skip_call = false;
8447     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8448     std::unique_lock<std::mutex> lock(global_lock);
8449 
8450     auto cb_node = getCBNode(dev_data, commandBuffer);
8451     auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8452     if (cb_node && dst_buff_node) {
8453         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdFillBuffer()");
8454         // Update bindings between buffer and cmd buffer
8455         AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8456         // Validate that DST buffer has correct usage flags set
8457         skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdFillBuffer()",
8458                                               "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8459         std::function<bool()> function = [=]() {
8460             SetBufferMemoryValid(dev_data, dst_buff_node, true);
8461             return false;
8462         };
8463         cb_node->validate_functions.push_back(function);
8464 
8465         skip_call |= addCmd(dev_data, cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8466         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyFillBuffer()");
8467     } else {
8468         assert(0);
8469     }
8470     lock.unlock();
8471     if (!skip_call)
8472         dev_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
8473 }
8474 
CmdClearAttachments(VkCommandBuffer commandBuffer,uint32_t attachmentCount,const VkClearAttachment * pAttachments,uint32_t rectCount,const VkClearRect * pRects)8475 VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8476                                                const VkClearAttachment *pAttachments, uint32_t rectCount,
8477                                                const VkClearRect *pRects) {
8478     bool skip_call = false;
8479     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8480     std::unique_lock<std::mutex> lock(global_lock);
8481     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8482     if (pCB) {
8483         skip_call |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
8484         // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
8485         if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
8486             (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
8487             // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
8488             // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
8489             // call CmdClearAttachments
8490             // Otherwise this seems more like a performance warning.
8491             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8492                                  VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t &>(commandBuffer),
8493                                  0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
8494                                  "vkCmdClearAttachments() issued on command buffer object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
8495                                  " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
8496                                  (uint64_t)(commandBuffer));
8497         }
8498         skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments()");
8499     }
8500 
8501     // Validate that attachment is in reference list of active subpass
8502     if (pCB->activeRenderPass) {
8503         const VkRenderPassCreateInfo *pRPCI = pCB->activeRenderPass->createInfo.ptr();
8504         const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
8505 
8506         for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
8507             const VkClearAttachment *attachment = &pAttachments[attachment_idx];
8508             if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
8509                 if (attachment->colorAttachment >= pSD->colorAttachmentCount) {
8510                     skip_call |= log_msg(
8511                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8512                         (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8513                         "vkCmdClearAttachments() color attachment index %d out of range for active subpass %d; ignored",
8514                         attachment->colorAttachment, pCB->activeSubpass);
8515                 }
8516                 else if (pSD->pColorAttachments[attachment->colorAttachment].attachment == VK_ATTACHMENT_UNUSED) {
8517                     skip_call |= log_msg(
8518                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8519                         (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8520                         "vkCmdClearAttachments() color attachment index %d is VK_ATTACHMENT_UNUSED; ignored",
8521                         attachment->colorAttachment);
8522                 }
8523             } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
8524                 if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
8525                     (pSD->pDepthStencilAttachment->attachment ==
8526                      VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
8527 
8528                     skip_call |= log_msg(
8529                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8530                         (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8531                         "vkCmdClearAttachments() depth/stencil clear with no depth/stencil attachment in subpass; ignored");
8532                 }
8533             }
8534         }
8535     }
8536     lock.unlock();
8537     if (!skip_call)
8538         dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8539 }
8540 
CmdClearColorImage(VkCommandBuffer commandBuffer,VkImage image,VkImageLayout imageLayout,const VkClearColorValue * pColor,uint32_t rangeCount,const VkImageSubresourceRange * pRanges)8541 VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
8542                                               VkImageLayout imageLayout, const VkClearColorValue *pColor,
8543                                               uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
8544     bool skip_call = false;
8545     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8546     std::unique_lock<std::mutex> lock(global_lock);
8547     // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8548 
8549     auto cb_node = getCBNode(dev_data, commandBuffer);
8550     auto image_state = getImageState(dev_data, image);
8551     if (cb_node && image_state) {
8552         skip_call |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCmdClearColorImage()");
8553         AddCommandBufferBindingImage(dev_data, cb_node, image_state);
8554         std::function<bool()> function = [=]() {
8555             SetImageMemoryValid(dev_data, image_state, true);
8556             return false;
8557         };
8558         cb_node->validate_functions.push_back(function);
8559 
8560         skip_call |= addCmd(dev_data, cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
8561         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearColorImage()");
8562     } else {
8563         assert(0);
8564     }
8565     lock.unlock();
8566     if (!skip_call)
8567         dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
8568 }
8569 
8570 VKAPI_ATTR void VKAPI_CALL
CmdClearDepthStencilImage(VkCommandBuffer commandBuffer,VkImage image,VkImageLayout imageLayout,const VkClearDepthStencilValue * pDepthStencil,uint32_t rangeCount,const VkImageSubresourceRange * pRanges)8571 CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8572                           const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8573                           const VkImageSubresourceRange *pRanges) {
8574     bool skip_call = false;
8575     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8576     std::unique_lock<std::mutex> lock(global_lock);
8577     // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8578 
8579     auto cb_node = getCBNode(dev_data, commandBuffer);
8580     auto image_state = getImageState(dev_data, image);
8581     if (cb_node && image_state) {
8582         skip_call |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCmdClearDepthStencilImage()");
8583         AddCommandBufferBindingImage(dev_data, cb_node, image_state);
8584         std::function<bool()> function = [=]() {
8585             SetImageMemoryValid(dev_data, image_state, true);
8586             return false;
8587         };
8588         cb_node->validate_functions.push_back(function);
8589 
8590         skip_call |= addCmd(dev_data, cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
8591         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearDepthStencilImage()");
8592     } else {
8593         assert(0);
8594     }
8595     lock.unlock();
8596     if (!skip_call)
8597         dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
8598 }
8599 
8600 VKAPI_ATTR void VKAPI_CALL
CmdResolveImage(VkCommandBuffer commandBuffer,VkImage srcImage,VkImageLayout srcImageLayout,VkImage dstImage,VkImageLayout dstImageLayout,uint32_t regionCount,const VkImageResolve * pRegions)8601 CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8602                 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
8603     bool skip_call = false;
8604     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8605     std::unique_lock<std::mutex> lock(global_lock);
8606 
8607     auto cb_node = getCBNode(dev_data, commandBuffer);
8608     auto src_image_state = getImageState(dev_data, srcImage);
8609     auto dst_image_state = getImageState(dev_data, dstImage);
8610     if (cb_node && src_image_state && dst_image_state) {
8611         skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdResolveImage()");
8612         skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdResolveImage()");
8613         // Update bindings between images and cmd buffer
8614         AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8615         AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
8616         std::function<bool()> function = [=]() {
8617             return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdResolveImage()");
8618         };
8619         cb_node->validate_functions.push_back(function);
8620         function = [=]() {
8621             SetImageMemoryValid(dev_data, dst_image_state, true);
8622             return false;
8623         };
8624         cb_node->validate_functions.push_back(function);
8625 
8626         skip_call |= addCmd(dev_data, cb_node, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
8627         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdResolveImage()");
8628     } else {
8629         assert(0);
8630     }
8631     lock.unlock();
8632     if (!skip_call)
8633         dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8634                                                  pRegions);
8635 }
8636 
setEventStageMask(VkQueue queue,VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags stageMask)8637 bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8638     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8639     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8640     if (pCB) {
8641         pCB->eventToStageMap[event] = stageMask;
8642     }
8643     auto queue_data = dev_data->queueMap.find(queue);
8644     if (queue_data != dev_data->queueMap.end()) {
8645         queue_data->second.eventToStageMap[event] = stageMask;
8646     }
8647     return false;
8648 }
8649 
8650 VKAPI_ATTR void VKAPI_CALL
CmdSetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags stageMask)8651 CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8652     bool skip_call = false;
8653     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8654     std::unique_lock<std::mutex> lock(global_lock);
8655     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8656     if (pCB) {
8657         skip_call |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
8658         skip_call |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
8659         auto event_state = getEventNode(dev_data, event);
8660         if (event_state) {
8661             addCommandBufferBinding(&event_state->cb_bindings,
8662                                     {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
8663             event_state->cb_bindings.insert(pCB);
8664         }
8665         pCB->events.push_back(event);
8666         if (!pCB->waitedEvents.count(event)) {
8667             pCB->writeEventsBeforeWait.push_back(event);
8668         }
8669         std::function<bool(VkQueue)> eventUpdate =
8670             std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
8671         pCB->eventUpdates.push_back(eventUpdate);
8672     }
8673     lock.unlock();
8674     if (!skip_call)
8675         dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
8676 }
8677 
8678 VKAPI_ATTR void VKAPI_CALL
CmdResetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags stageMask)8679 CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8680     bool skip_call = false;
8681     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8682     std::unique_lock<std::mutex> lock(global_lock);
8683     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8684     if (pCB) {
8685         skip_call |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
8686         skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
8687         auto event_state = getEventNode(dev_data, event);
8688         if (event_state) {
8689             addCommandBufferBinding(&event_state->cb_bindings,
8690                                     {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
8691             event_state->cb_bindings.insert(pCB);
8692         }
8693         pCB->events.push_back(event);
8694         if (!pCB->waitedEvents.count(event)) {
8695             pCB->writeEventsBeforeWait.push_back(event);
8696         }
8697         std::function<bool(VkQueue)> eventUpdate =
8698             std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
8699         pCB->eventUpdates.push_back(eventUpdate);
8700     }
8701     lock.unlock();
8702     if (!skip_call)
8703         dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
8704 }
8705 
TransitionImageLayouts(VkCommandBuffer cmdBuffer,uint32_t memBarrierCount,const VkImageMemoryBarrier * pImgMemBarriers)8706 static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8707                                    const VkImageMemoryBarrier *pImgMemBarriers) {
8708     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8709     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8710     bool skip = false;
8711     uint32_t levelCount = 0;
8712     uint32_t layerCount = 0;
8713 
8714     for (uint32_t i = 0; i < memBarrierCount; ++i) {
8715         auto mem_barrier = &pImgMemBarriers[i];
8716         if (!mem_barrier)
8717             continue;
8718         // TODO: Do not iterate over every possibility - consolidate where
8719         // possible
8720         ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
8721 
8722         for (uint32_t j = 0; j < levelCount; j++) {
8723             uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
8724             for (uint32_t k = 0; k < layerCount; k++) {
8725                 uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
8726                 VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
8727                 IMAGE_CMD_BUF_LAYOUT_NODE node;
8728                 if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
8729                     SetLayout(pCB, mem_barrier->image, sub,
8730                               IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
8731                     continue;
8732                 }
8733                 if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
8734                     // TODO: Set memory invalid which is in mem_tracker currently
8735                 } else if (node.layout != mem_barrier->oldLayout) {
8736                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8737                                     __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
8738                                                                                     "when current layout is %s.",
8739                                     string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
8740                 }
8741                 SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
8742             }
8743         }
8744     }
8745     return skip;
8746 }
8747 
8748 // Print readable FlagBits in FlagMask
string_VkAccessFlags(VkAccessFlags accessMask)8749 static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
8750     std::string result;
8751     std::string separator;
8752 
8753     if (accessMask == 0) {
8754         result = "[None]";
8755     } else {
8756         result = "[";
8757         for (auto i = 0; i < 32; i++) {
8758             if (accessMask & (1 << i)) {
8759                 result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
8760                 separator = " | ";
8761             }
8762         }
8763         result = result + "]";
8764     }
8765     return result;
8766 }
8767 
8768 // AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
8769 // If required_bit is zero, accessMask must have at least one of 'optional_bits' set
8770 // TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
ValidateMaskBits(const layer_data * my_data,VkCommandBuffer cmdBuffer,const VkAccessFlags & accessMask,const VkImageLayout & layout,VkAccessFlags required_bit,VkAccessFlags optional_bits,const char * type)8771 static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8772                              const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
8773                              const char *type) {
8774     bool skip_call = false;
8775 
8776     if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
8777         if (accessMask & ~(required_bit | optional_bits)) {
8778             // TODO: Verify against Valid Use
8779             skip_call |=
8780                 log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8781                         DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
8782                         type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8783         }
8784     } else {
8785         if (!required_bit) {
8786             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8787                                  DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
8788                                                                   "%s when layout is %s, unless the app has previously added a "
8789                                                                   "barrier for this transition.",
8790                                  type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
8791                                  string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
8792         } else {
8793             std::string opt_bits;
8794             if (optional_bits != 0) {
8795                 std::stringstream ss;
8796                 ss << optional_bits;
8797                 opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
8798             }
8799             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8800                                  DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
8801                                                                   "layout is %s, unless the app has previously added a barrier for "
8802                                                                   "this transition.",
8803                                  type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
8804                                  string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
8805         }
8806     }
8807     return skip_call;
8808 }
8809 
ValidateMaskBitsFromLayouts(const layer_data * my_data,VkCommandBuffer cmdBuffer,const VkAccessFlags & accessMask,const VkImageLayout & layout,const char * type)8810 static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8811                                         const VkImageLayout &layout, const char *type) {
8812     bool skip_call = false;
8813     switch (layout) {
8814     case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
8815         skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
8816                                       VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type);
8817         break;
8818     }
8819     case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
8820         skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
8821                                       VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type);
8822         break;
8823     }
8824     case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
8825         skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
8826         break;
8827     }
8828     case VK_IMAGE_LAYOUT_PREINITIALIZED: {
8829         skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
8830         break;
8831     }
8832     case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
8833         skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8834                                       VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
8835                                       VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type);
8836         break;
8837     }
8838     case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
8839         skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8840                                       VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8841         break;
8842     }
8843     case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
8844         skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
8845         break;
8846     }
8847     case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: {
8848         skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_MEMORY_READ_BIT, 0, type);
8849         break;
8850     }
8851     case VK_IMAGE_LAYOUT_UNDEFINED: {
8852         if (accessMask != 0) {
8853             // TODO: Verify against Valid Use section spec
8854             skip_call |=
8855                 log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8856                         DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
8857                         type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8858         }
8859         break;
8860     }
8861     case VK_IMAGE_LAYOUT_GENERAL:
8862     default: { break; }
8863     }
8864     return skip_call;
8865 }
8866 
ValidateBarriers(const char * funcName,VkCommandBuffer cmdBuffer,uint32_t memBarrierCount,const VkMemoryBarrier * pMemBarriers,uint32_t bufferBarrierCount,const VkBufferMemoryBarrier * pBufferMemBarriers,uint32_t imageMemBarrierCount,const VkImageMemoryBarrier * pImageMemBarriers)8867 static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8868                              const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8869                              const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8870                              const VkImageMemoryBarrier *pImageMemBarriers) {
8871     bool skip_call = false;
8872     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8873     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8874     if (pCB->activeRenderPass && memBarrierCount) {
8875         if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
8876             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8877                                  DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
8878                                                                   "with no self dependency specified.",
8879                                  funcName, pCB->activeSubpass);
8880         }
8881     }
8882     for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8883         auto mem_barrier = &pImageMemBarriers[i];
8884         auto image_data = getImageState(dev_data, mem_barrier->image);
8885         if (image_data) {
8886             uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
8887             uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
8888             if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
8889                 // srcQueueFamilyIndex and dstQueueFamilyIndex must both
8890                 // be VK_QUEUE_FAMILY_IGNORED
8891                 if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
8892                     skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8893                                          __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8894                                          "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
8895                                          "VK_SHARING_MODE_CONCURRENT.  Src and dst "
8896                                          " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
8897                                          funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8898                 }
8899             } else {
8900                 // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
8901                 // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
8902                 // or both be a valid queue family
8903                 if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
8904                     (src_q_f_index != dst_q_f_index)) {
8905                     skip_call |=
8906                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8907                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
8908                                                                      "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
8909                                                                      "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
8910                                                                      "must be.",
8911                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8912                 } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
8913                            ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8914                             (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
8915                     skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8916                                          __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8917                                          "%s: Image 0x%" PRIx64 " was created with sharingMode "
8918                                          "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
8919                                          " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
8920                                          "queueFamilies crated for this device.",
8921                                          funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
8922                                          dst_q_f_index, dev_data->phys_dev_properties.queue_family_properties.size());
8923                 }
8924             }
8925         }
8926 
8927         if (mem_barrier) {
8928             if (mem_barrier->oldLayout != mem_barrier->newLayout) {
8929                 skip_call |=
8930                     ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
8931                 skip_call |=
8932                     ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
8933             }
8934             if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8935                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8936                         DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
8937                                                          "PREINITIALIZED.",
8938                         funcName);
8939             }
8940             auto image_data = getImageState(dev_data, mem_barrier->image);
8941             VkFormat format = VK_FORMAT_UNDEFINED;
8942             uint32_t arrayLayers = 0, mipLevels = 0;
8943             bool imageFound = false;
8944             if (image_data) {
8945                 format = image_data->createInfo.format;
8946                 arrayLayers = image_data->createInfo.arrayLayers;
8947                 mipLevels = image_data->createInfo.mipLevels;
8948                 imageFound = true;
8949             } else if (dev_data->device_extensions.wsi_enabled) {
8950                 auto imageswap_data = getSwapchainFromImage(dev_data, mem_barrier->image);
8951                 if (imageswap_data) {
8952                     auto swapchain_data = getSwapchainNode(dev_data, imageswap_data);
8953                     if (swapchain_data) {
8954                         format = swapchain_data->createInfo.imageFormat;
8955                         arrayLayers = swapchain_data->createInfo.imageArrayLayers;
8956                         mipLevels = 1;
8957                         imageFound = true;
8958                     }
8959                 }
8960             }
8961             if (imageFound) {
8962                 auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
8963                 if (vk_format_is_depth_or_stencil(format)) {
8964                     if (vk_format_is_depth_and_stencil(format)) {
8965                         if (!(aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) && !(aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
8966                             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8967                                     __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8968                                     "%s: Image is a depth and stencil format and thus must "
8969                                     "have either one or both of VK_IMAGE_ASPECT_DEPTH_BIT and "
8970                                     "VK_IMAGE_ASPECT_STENCIL_BIT set.",
8971                                     funcName);
8972                         }
8973                     } else if (vk_format_is_depth_only(format)) {
8974                         if (!(aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT)) {
8975                             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8976                                     __LINE__, DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth-only format and thus must "
8977                                                                                "have VK_IMAGE_ASPECT_DEPTH_BIT set.",
8978                                     funcName);
8979                         }
8980                     } else { // stencil-only case
8981                         if (!(aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
8982                             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8983                                     __LINE__, DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a stencil-only format and thus must "
8984                                                                                "have VK_IMAGE_ASPECT_STENCIL_BIT set.",
8985                                     funcName);
8986                         }
8987                     }
8988                 } else { // image is a color format
8989                     if (!(aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT)) {
8990                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8991                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a color format and thus must "
8992                                                                  "have VK_IMAGE_ASPECT_COLOR_BIT set.",
8993                                 funcName);
8994                     }
8995                 }
8996                 int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
8997                                      ? 1
8998                                      : mem_barrier->subresourceRange.layerCount;
8999                 if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
9000                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9001                             DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
9002                                                              "baseArrayLayer (%d) and layerCount (%d) be less "
9003                                                              "than or equal to the total number of layers (%d).",
9004                             funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
9005                             arrayLayers);
9006                 }
9007                 int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
9008                                      ? 1
9009                                      : mem_barrier->subresourceRange.levelCount;
9010                 if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
9011                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9012                             DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
9013                                                              "(%d) and levelCount (%d) be less than or equal to "
9014                                                              "the total number of levels (%d).",
9015                             funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
9016                             mipLevels);
9017                 }
9018             }
9019         }
9020     }
9021     for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
9022         auto mem_barrier = &pBufferMemBarriers[i];
9023         if (pCB->activeRenderPass) {
9024             skip_call |=
9025                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9026                         DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
9027         }
9028         if (!mem_barrier)
9029             continue;
9030 
9031         // Validate buffer barrier queue family indices
9032         if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
9033              mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
9034             (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
9035              mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
9036             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9037                                  DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
9038                                  "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
9039                                  "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
9040                                  funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
9041                                  dev_data->phys_dev_properties.queue_family_properties.size());
9042         }
9043 
9044         auto buffer_node = getBufferNode(dev_data, mem_barrier->buffer);
9045         if (buffer_node) {
9046             auto buffer_size = buffer_node->binding.size;
9047             if (mem_barrier->offset >= buffer_size) {
9048                 skip_call |= log_msg(
9049                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9050                     DRAWSTATE_INVALID_BARRIER, "DS",
9051                     "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
9052                     funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
9053                     reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(buffer_size));
9054             } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
9055                 skip_call |= log_msg(
9056                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9057                     DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
9058                                                      " whose sum is greater than total size 0x%" PRIx64 ".",
9059                     funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
9060                     reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
9061                     reinterpret_cast<const uint64_t &>(buffer_size));
9062             }
9063         }
9064     }
9065     return skip_call;
9066 }
9067 
validateEventStageMask(VkQueue queue,GLOBAL_CB_NODE * pCB,uint32_t eventCount,size_t firstEventIndex,VkPipelineStageFlags sourceStageMask)9068 bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
9069     bool skip_call = false;
9070     VkPipelineStageFlags stageMask = 0;
9071     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
9072     for (uint32_t i = 0; i < eventCount; ++i) {
9073         auto event = pCB->events[firstEventIndex + i];
9074         auto queue_data = dev_data->queueMap.find(queue);
9075         if (queue_data == dev_data->queueMap.end())
9076             return false;
9077         auto event_data = queue_data->second.eventToStageMap.find(event);
9078         if (event_data != queue_data->second.eventToStageMap.end()) {
9079             stageMask |= event_data->second;
9080         } else {
9081             auto global_event_data = getEventNode(dev_data, event);
9082             if (!global_event_data) {
9083                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
9084                                      reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
9085                                      "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
9086                                      reinterpret_cast<const uint64_t &>(event));
9087             } else {
9088                 stageMask |= global_event_data->stageMask;
9089             }
9090         }
9091     }
9092     // TODO: Need to validate that host_bit is only set if set event is called
9093     // but set event can be called at any time.
9094     if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
9095         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9096                              DRAWSTATE_INVALID_EVENT, "DS", "Submitting cmdbuffer with call to VkCmdWaitEvents "
9097                                                             "using srcStageMask 0x%X which must be the bitwise "
9098                                                             "OR of the stageMask parameters used in calls to "
9099                                                             "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
9100                                                             "used with vkSetEvent but instead is 0x%X.",
9101                              sourceStageMask, stageMask);
9102     }
9103     return skip_call;
9104 }
9105 
9106 VKAPI_ATTR void VKAPI_CALL
CmdWaitEvents(VkCommandBuffer commandBuffer,uint32_t eventCount,const VkEvent * pEvents,VkPipelineStageFlags sourceStageMask,VkPipelineStageFlags dstStageMask,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)9107 CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
9108               VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
9109               uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
9110               uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
9111     bool skip_call = false;
9112     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9113     std::unique_lock<std::mutex> lock(global_lock);
9114     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9115     if (pCB) {
9116         auto firstEventIndex = pCB->events.size();
9117         for (uint32_t i = 0; i < eventCount; ++i) {
9118             auto event_state = getEventNode(dev_data, pEvents[i]);
9119             if (event_state) {
9120                 addCommandBufferBinding(&event_state->cb_bindings,
9121                                         {reinterpret_cast<const uint64_t &>(pEvents[i]), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT},
9122                                         pCB);
9123                 event_state->cb_bindings.insert(pCB);
9124             }
9125             pCB->waitedEvents.insert(pEvents[i]);
9126             pCB->events.push_back(pEvents[i]);
9127         }
9128         std::function<bool(VkQueue)> eventUpdate =
9129             std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask);
9130         pCB->eventUpdates.push_back(eventUpdate);
9131         if (pCB->state == CB_RECORDING) {
9132             skip_call |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
9133         } else {
9134             skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
9135         }
9136         skip_call |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
9137         skip_call |=
9138             ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
9139                              pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
9140     }
9141     lock.unlock();
9142     if (!skip_call)
9143         dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
9144                                                memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
9145                                                imageMemoryBarrierCount, pImageMemoryBarriers);
9146 }
9147 
9148 VKAPI_ATTR void VKAPI_CALL
CmdPipelineBarrier(VkCommandBuffer commandBuffer,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkDependencyFlags dependencyFlags,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)9149 CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
9150                    VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
9151                    uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
9152                    uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
9153     bool skip_call = false;
9154     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9155     std::unique_lock<std::mutex> lock(global_lock);
9156     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9157     if (pCB) {
9158         skip_call |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
9159         skip_call |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
9160         skip_call |=
9161             ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
9162                              pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
9163     }
9164     lock.unlock();
9165     if (!skip_call)
9166         dev_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
9167                                                     pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
9168                                                     imageMemoryBarrierCount, pImageMemoryBarriers);
9169 }
9170 
setQueryState(VkQueue queue,VkCommandBuffer commandBuffer,QueryObject object,bool value)9171 bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
9172     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9173     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9174     if (pCB) {
9175         pCB->queryToStateMap[object] = value;
9176     }
9177     auto queue_data = dev_data->queueMap.find(queue);
9178     if (queue_data != dev_data->queueMap.end()) {
9179         queue_data->second.queryToStateMap[object] = value;
9180     }
9181     return false;
9182 }
9183 
9184 VKAPI_ATTR void VKAPI_CALL
CmdBeginQuery(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t slot,VkFlags flags)9185 CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
9186     bool skip_call = false;
9187     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9188     std::unique_lock<std::mutex> lock(global_lock);
9189     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9190     if (pCB) {
9191         QueryObject query = {queryPool, slot};
9192         pCB->activeQueries.insert(query);
9193         if (!pCB->startedQueries.count(query)) {
9194             pCB->startedQueries.insert(query);
9195         }
9196         skip_call |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
9197         addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9198                                 {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9199     }
9200     lock.unlock();
9201     if (!skip_call)
9202         dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
9203 }
9204 
CmdEndQuery(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t slot)9205 VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
9206     bool skip_call = false;
9207     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9208     std::unique_lock<std::mutex> lock(global_lock);
9209     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9210     if (pCB) {
9211         QueryObject query = {queryPool, slot};
9212         if (!pCB->activeQueries.count(query)) {
9213             skip_call |=
9214                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9215                         DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d",
9216                         (uint64_t)(queryPool), slot);
9217         } else {
9218             pCB->activeQueries.erase(query);
9219         }
9220         std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
9221         pCB->queryUpdates.push_back(queryUpdate);
9222         if (pCB->state == CB_RECORDING) {
9223             skip_call |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
9224         } else {
9225             skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
9226         }
9227         addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9228                                 {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9229     }
9230     lock.unlock();
9231     if (!skip_call)
9232         dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
9233 }
9234 
9235 VKAPI_ATTR void VKAPI_CALL
CmdResetQueryPool(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount)9236 CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
9237     bool skip_call = false;
9238     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9239     std::unique_lock<std::mutex> lock(global_lock);
9240     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9241     if (pCB) {
9242         for (uint32_t i = 0; i < queryCount; i++) {
9243             QueryObject query = {queryPool, firstQuery + i};
9244             pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
9245             std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
9246             pCB->queryUpdates.push_back(queryUpdate);
9247         }
9248         if (pCB->state == CB_RECORDING) {
9249             skip_call |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
9250         } else {
9251             skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
9252         }
9253         skip_call |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
9254         addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9255                                 {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9256     }
9257     lock.unlock();
9258     if (!skip_call)
9259         dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
9260 }
9261 
validateQuery(VkQueue queue,GLOBAL_CB_NODE * pCB,VkQueryPool queryPool,uint32_t queryCount,uint32_t firstQuery)9262 bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
9263     bool skip_call = false;
9264     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
9265     auto queue_data = dev_data->queueMap.find(queue);
9266     if (queue_data == dev_data->queueMap.end())
9267         return false;
9268     for (uint32_t i = 0; i < queryCount; i++) {
9269         QueryObject query = {queryPool, firstQuery + i};
9270         auto query_data = queue_data->second.queryToStateMap.find(query);
9271         bool fail = false;
9272         if (query_data != queue_data->second.queryToStateMap.end()) {
9273             if (!query_data->second) {
9274                 fail = true;
9275             }
9276         } else {
9277             auto global_query_data = dev_data->queryToStateMap.find(query);
9278             if (global_query_data != dev_data->queryToStateMap.end()) {
9279                 if (!global_query_data->second) {
9280                     fail = true;
9281                 }
9282             } else {
9283                 fail = true;
9284             }
9285         }
9286         if (fail) {
9287             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9288                                  DRAWSTATE_INVALID_QUERY, "DS",
9289                                  "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
9290                                  reinterpret_cast<uint64_t &>(queryPool), firstQuery + i);
9291         }
9292     }
9293     return skip_call;
9294 }
9295 
9296 VKAPI_ATTR void VKAPI_CALL
CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize stride,VkQueryResultFlags flags)9297 CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
9298                         VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
9299     bool skip_call = false;
9300     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9301     std::unique_lock<std::mutex> lock(global_lock);
9302 
9303     auto cb_node = getCBNode(dev_data, commandBuffer);
9304     auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
9305     if (cb_node && dst_buff_node) {
9306         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyQueryPoolResults()");
9307         // Update bindings between buffer and cmd buffer
9308         AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
9309         // Validate that DST buffer has correct usage flags set
9310         skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
9311                                               "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
9312         std::function<bool()> function = [=]() {
9313             SetBufferMemoryValid(dev_data, dst_buff_node, true);
9314             return false;
9315         };
9316         cb_node->validate_functions.push_back(function);
9317         std::function<bool(VkQueue)> queryUpdate =
9318             std::bind(validateQuery, std::placeholders::_1, cb_node, queryPool, queryCount, firstQuery);
9319         cb_node->queryUpdates.push_back(queryUpdate);
9320         if (cb_node->state == CB_RECORDING) {
9321             skip_call |= addCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
9322         } else {
9323             skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
9324         }
9325         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()");
9326         addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9327                                 {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, cb_node);
9328     } else {
9329         assert(0);
9330     }
9331     lock.unlock();
9332     if (!skip_call)
9333         dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
9334                                                          stride, flags);
9335 }
9336 
CmdPushConstants(VkCommandBuffer commandBuffer,VkPipelineLayout layout,VkShaderStageFlags stageFlags,uint32_t offset,uint32_t size,const void * pValues)9337 VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
9338                                             VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
9339                                             const void *pValues) {
9340     bool skip_call = false;
9341     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9342     std::unique_lock<std::mutex> lock(global_lock);
9343     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9344     if (pCB) {
9345         if (pCB->state == CB_RECORDING) {
9346             skip_call |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
9347         } else {
9348             skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
9349         }
9350     }
9351     skip_call |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
9352     if (0 == stageFlags) {
9353         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9354                              DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() call has no stageFlags set.");
9355     }
9356 
9357     // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout.
9358     auto pipeline_layout = getPipelineLayout(dev_data, layout);
9359     // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is
9360     // contained in the pipeline ranges.
9361     // Build a {start, end} span list for ranges with matching stage flags.
9362     const auto &ranges = pipeline_layout->push_constant_ranges;
9363     struct span {
9364         uint32_t start;
9365         uint32_t end;
9366     };
9367     std::vector<span> spans;
9368     spans.reserve(ranges.size());
9369     for (const auto &iter : ranges) {
9370         if (iter.stageFlags == stageFlags) {
9371             spans.push_back({iter.offset, iter.offset + iter.size});
9372         }
9373     }
9374     if (spans.size() == 0) {
9375         // There were no ranges that matched the stageFlags.
9376         skip_call |=
9377             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9378                     DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() stageFlags = 0x%" PRIx32 " do not match "
9379                                                           "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ".",
9380                     (uint32_t)stageFlags, (uint64_t)layout);
9381     } else {
9382         // Sort span list by start value.
9383         struct comparer {
9384             bool operator()(struct span i, struct span j) { return i.start < j.start; }
9385         } my_comparer;
9386         std::sort(spans.begin(), spans.end(), my_comparer);
9387 
9388         // Examine two spans at a time.
9389         std::vector<span>::iterator current = spans.begin();
9390         std::vector<span>::iterator next = current + 1;
9391         while (next != spans.end()) {
9392             if (current->end < next->start) {
9393                 // There is a gap; cannot coalesce. Move to the next two spans.
9394                 ++current;
9395                 ++next;
9396             } else {
9397                 // Coalesce the two spans.  The start of the next span
9398                 // is within the current span, so pick the larger of
9399                 // the end values to extend the current span.
9400                 // Then delete the next span and set next to the span after it.
9401                 current->end = max(current->end, next->end);
9402                 next = spans.erase(next);
9403             }
9404         }
9405 
9406         // Now we can check if the incoming range is within any of the spans.
9407         bool contained_in_a_range = false;
9408         for (uint32_t i = 0; i < spans.size(); ++i) {
9409             if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) {
9410                 contained_in_a_range = true;
9411                 break;
9412             }
9413         }
9414         if (!contained_in_a_range) {
9415             skip_call |=
9416                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9417                         DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() Push constant range [%d, %d) "
9418                                                               "with stageFlags = 0x%" PRIx32 " "
9419                                                               "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ".",
9420                         offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout);
9421         }
9422     }
9423     lock.unlock();
9424     if (!skip_call)
9425         dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
9426 }
9427 
9428 VKAPI_ATTR void VKAPI_CALL
CmdWriteTimestamp(VkCommandBuffer commandBuffer,VkPipelineStageFlagBits pipelineStage,VkQueryPool queryPool,uint32_t slot)9429 CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
9430     bool skip_call = false;
9431     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9432     std::unique_lock<std::mutex> lock(global_lock);
9433     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9434     if (pCB) {
9435         QueryObject query = {queryPool, slot};
9436         std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
9437         pCB->queryUpdates.push_back(queryUpdate);
9438         if (pCB->state == CB_RECORDING) {
9439             skip_call |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
9440         } else {
9441             skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
9442         }
9443     }
9444     lock.unlock();
9445     if (!skip_call)
9446         dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
9447 }
9448 
MatchUsage(layer_data * dev_data,uint32_t count,const VkAttachmentReference * attachments,const VkFramebufferCreateInfo * fbci,VkImageUsageFlagBits usage_flag)9449 static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
9450                        const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag) {
9451     bool skip_call = false;
9452 
9453     for (uint32_t attach = 0; attach < count; attach++) {
9454         if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
9455             // Attachment counts are verified elsewhere, but prevent an invalid access
9456             if (attachments[attach].attachment < fbci->attachmentCount) {
9457                 const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
9458                 auto view_state = getImageViewState(dev_data, *image_view);
9459                 if (view_state) {
9460                     const VkImageCreateInfo *ici = &getImageState(dev_data, view_state->create_info.image)->createInfo;
9461                     if (ici != nullptr) {
9462                         if ((ici->usage & usage_flag) == 0) {
9463                             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9464                                                  (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_USAGE, "DS",
9465                                                  "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
9466                                                  "IMAGE_USAGE flags (%s).",
9467                                                  attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
9468                         }
9469                     }
9470                 }
9471             }
9472         }
9473     }
9474     return skip_call;
9475 }
9476 
9477 // Validate VkFramebufferCreateInfo which includes:
9478 // 1. attachmentCount equals renderPass attachmentCount
9479 // 2. corresponding framebuffer and renderpass attachments have matching formats
9480 // 3. corresponding framebuffer and renderpass attachments have matching sample counts
9481 // 4. fb attachments only have a single mip level
9482 // 5. fb attachment dimensions are each at least as large as the fb
9483 // 6. fb attachments use idenity swizzle
9484 // 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
9485 // 8. fb dimensions are within physical device limits
ValidateFramebufferCreateInfo(layer_data * dev_data,const VkFramebufferCreateInfo * pCreateInfo)9486 static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
9487     bool skip_call = false;
9488 
9489     auto rp_state = getRenderPassState(dev_data, pCreateInfo->renderPass);
9490     if (rp_state) {
9491         const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr();
9492         if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
9493             skip_call |= log_msg(
9494                 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9495                 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9496                 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
9497                 "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer.",
9498                 pCreateInfo->attachmentCount, rpci->attachmentCount, reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9499         } else {
9500             // attachmentCounts match, so make sure corresponding attachment details line up
9501             const VkImageView *image_views = pCreateInfo->pAttachments;
9502             for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9503                 auto view_state = getImageViewState(dev_data, image_views[i]);
9504                 auto &ivci = view_state->create_info;
9505                 if (ivci.format != rpci->pAttachments[i].format) {
9506                     skip_call |= log_msg(
9507                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9508                         reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
9509                         "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
9510                               "the format of "
9511                               "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
9512                         i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
9513                         reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9514                 }
9515                 const VkImageCreateInfo *ici = &getImageState(dev_data, ivci.image)->createInfo;
9516                 if (ici->samples != rpci->pAttachments[i].samples) {
9517                     skip_call |= log_msg(
9518                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9519                         reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
9520                         "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
9521                               "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
9522                         i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
9523                         reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9524                 }
9525                 // Verify that view only has a single mip level
9526                 if (ivci.subresourceRange.levelCount != 1) {
9527                     skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9528                                          __LINE__, DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9529                                          "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
9530                                          "but only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer.",
9531                                          i, ivci.subresourceRange.levelCount);
9532                 }
9533                 const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
9534                 uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
9535                 uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
9536                 if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
9537                     (mip_height < pCreateInfo->height)) {
9538                     skip_call |=
9539                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9540                                 DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9541                                 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
9542                                 "than the corresponding "
9543                                 "framebuffer dimensions. Attachment dimensions must be at least as large. Here are the respective "
9544                                 "dimensions for "
9545                                 "attachment #%u, framebuffer:\n"
9546                                 "width: %u, %u\n"
9547                                 "height: %u, %u\n"
9548                                 "layerCount: %u, %u\n",
9549                                 i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
9550                                 pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
9551                 }
9552                 if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
9553                     ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
9554                     ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
9555                     ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
9556                     skip_call |= log_msg(
9557                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9558                         DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9559                         "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
9560                         "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
9561                         "r swizzle = %s\n"
9562                         "g swizzle = %s\n"
9563                         "b swizzle = %s\n"
9564                         "a swizzle = %s\n",
9565                         i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
9566                         string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a));
9567                 }
9568             }
9569         }
9570         // Verify correct attachment usage flags
9571         for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
9572             // Verify input attachments:
9573             skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount,
9574                                     rpci->pSubpasses[subpass].pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT);
9575             // Verify color attachments:
9576             skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount,
9577                                     rpci->pSubpasses[subpass].pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
9578             // Verify depth/stencil attachments:
9579             if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
9580                 skip_call |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
9581                                         VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
9582             }
9583         }
9584     } else {
9585         skip_call |=
9586             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9587                     reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9588                     "vkCreateFramebuffer(): Attempt to create framebuffer with invalid renderPass (0x%" PRIxLEAST64 ").",
9589                     reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9590     }
9591     // Verify FB dimensions are within physical device limits
9592     if ((pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) ||
9593         (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) ||
9594         (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers)) {
9595         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9596                              DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9597                              "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo dimensions exceed physical device limits. "
9598                              "Here are the respective dimensions: requested, device max:\n"
9599                              "width: %u, %u\n"
9600                              "height: %u, %u\n"
9601                              "layerCount: %u, %u\n",
9602                              pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
9603                              pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
9604                              pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers);
9605     }
9606     return skip_call;
9607 }
9608 
9609 // Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
9610 //  Return true if an error is encountered and callback returns true to skip call down chain
9611 //   false indicates that call down chain should proceed
PreCallValidateCreateFramebuffer(layer_data * dev_data,const VkFramebufferCreateInfo * pCreateInfo)9612 static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
9613     // TODO : Verify that renderPass FB is created with is compatible with FB
9614     bool skip_call = false;
9615     skip_call |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
9616     return skip_call;
9617 }
9618 
9619 // CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
PostCallRecordCreateFramebuffer(layer_data * dev_data,const VkFramebufferCreateInfo * pCreateInfo,VkFramebuffer fb)9620 static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
9621     // Shadow create info and store in map
9622     std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
9623         new FRAMEBUFFER_STATE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->createInfo.ptr()));
9624 
9625     for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9626         VkImageView view = pCreateInfo->pAttachments[i];
9627         auto view_state = getImageViewState(dev_data, view);
9628         if (!view_state) {
9629             continue;
9630         }
9631         MT_FB_ATTACHMENT_INFO fb_info;
9632         fb_info.mem = getImageState(dev_data, view_state->create_info.image)->binding.mem;
9633         fb_info.view_state = view_state;
9634         fb_info.image = view_state->create_info.image;
9635         fb_state->attachments.push_back(fb_info);
9636     }
9637     dev_data->frameBufferMap[fb] = std::move(fb_state);
9638 }
9639 
CreateFramebuffer(VkDevice device,const VkFramebufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFramebuffer * pFramebuffer)9640 VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
9641                                                  const VkAllocationCallbacks *pAllocator,
9642                                                  VkFramebuffer *pFramebuffer) {
9643     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9644     std::unique_lock<std::mutex> lock(global_lock);
9645     bool skip_call = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
9646     lock.unlock();
9647 
9648     if (skip_call)
9649         return VK_ERROR_VALIDATION_FAILED_EXT;
9650 
9651     VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
9652 
9653     if (VK_SUCCESS == result) {
9654         lock.lock();
9655         PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
9656         lock.unlock();
9657     }
9658     return result;
9659 }
9660 
FindDependency(const int index,const int dependent,const std::vector<DAGNode> & subpass_to_node,std::unordered_set<uint32_t> & processed_nodes)9661 static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
9662                            std::unordered_set<uint32_t> &processed_nodes) {
9663     // If we have already checked this node we have not found a dependency path so return false.
9664     if (processed_nodes.count(index))
9665         return false;
9666     processed_nodes.insert(index);
9667     const DAGNode &node = subpass_to_node[index];
9668     // Look for a dependency path. If one exists return true else recurse on the previous nodes.
9669     if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
9670         for (auto elem : node.prev) {
9671             if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
9672                 return true;
9673         }
9674     } else {
9675         return true;
9676     }
9677     return false;
9678 }
9679 
CheckDependencyExists(const layer_data * dev_data,const int subpass,const std::vector<uint32_t> & dependent_subpasses,const std::vector<DAGNode> & subpass_to_node,bool & skip_call)9680 static bool CheckDependencyExists(const layer_data *dev_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
9681                                   const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
9682     bool result = true;
9683     // Loop through all subpasses that share the same attachment and make sure a dependency exists
9684     for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
9685         if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
9686             continue;
9687         const DAGNode &node = subpass_to_node[subpass];
9688         // Check for a specified dependency between the two nodes. If one exists we are done.
9689         auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
9690         auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
9691         if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
9692             // If no dependency exits an implicit dependency still might. If not, throw an error.
9693             std::unordered_set<uint32_t> processed_nodes;
9694             if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
9695                 FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
9696                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9697                                      __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9698                                      "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
9699                                      dependent_subpasses[k]);
9700                 result = false;
9701             }
9702         }
9703     }
9704     return result;
9705 }
9706 
CheckPreserved(const layer_data * dev_data,const VkRenderPassCreateInfo * pCreateInfo,const int index,const uint32_t attachment,const std::vector<DAGNode> & subpass_to_node,int depth,bool & skip_call)9707 static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
9708                            const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
9709     const DAGNode &node = subpass_to_node[index];
9710     // If this node writes to the attachment return true as next nodes need to preserve the attachment.
9711     const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9712     for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9713         if (attachment == subpass.pColorAttachments[j].attachment)
9714             return true;
9715     }
9716     if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9717         if (attachment == subpass.pDepthStencilAttachment->attachment)
9718             return true;
9719     }
9720     bool result = false;
9721     // Loop through previous nodes and see if any of them write to the attachment.
9722     for (auto elem : node.prev) {
9723         result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
9724     }
9725     // If the attachment was written to by a previous node than this node needs to preserve it.
9726     if (result && depth > 0) {
9727         const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9728         bool has_preserved = false;
9729         for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9730             if (subpass.pPreserveAttachments[j] == attachment) {
9731                 has_preserved = true;
9732                 break;
9733             }
9734         }
9735         if (!has_preserved) {
9736             skip_call |=
9737                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9738                         DRAWSTATE_INVALID_RENDERPASS, "DS",
9739                         "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
9740         }
9741     }
9742     return result;
9743 }
9744 
isRangeOverlapping(T offset1,T size1,T offset2,T size2)9745 template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
9746     return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
9747            ((offset1 > offset2) && (offset1 < (offset2 + size2)));
9748 }
9749 
isRegionOverlapping(VkImageSubresourceRange range1,VkImageSubresourceRange range2)9750 bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
9751     return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
9752             isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
9753 }
9754 
ValidateDependencies(const layer_data * dev_data,FRAMEBUFFER_STATE const * framebuffer,RENDER_PASS_STATE const * renderPass)9755 static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
9756                                  RENDER_PASS_STATE const *renderPass) {
9757     bool skip_call = false;
9758     auto const pFramebufferInfo = framebuffer->createInfo.ptr();
9759     auto const pCreateInfo = renderPass->createInfo.ptr();
9760     auto const & subpass_to_node = renderPass->subpassToNode;
9761     std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
9762     std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
9763     std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
9764     // Find overlapping attachments
9765     for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9766         for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
9767             VkImageView viewi = pFramebufferInfo->pAttachments[i];
9768             VkImageView viewj = pFramebufferInfo->pAttachments[j];
9769             if (viewi == viewj) {
9770                 overlapping_attachments[i].push_back(j);
9771                 overlapping_attachments[j].push_back(i);
9772                 continue;
9773             }
9774             auto view_state_i = getImageViewState(dev_data, viewi);
9775             auto view_state_j = getImageViewState(dev_data, viewj);
9776             if (!view_state_i || !view_state_j) {
9777                 continue;
9778             }
9779             auto view_ci_i = view_state_i->create_info;
9780             auto view_ci_j = view_state_j->create_info;
9781             if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
9782                 overlapping_attachments[i].push_back(j);
9783                 overlapping_attachments[j].push_back(i);
9784                 continue;
9785             }
9786             auto image_data_i = getImageState(dev_data, view_ci_i.image);
9787             auto image_data_j = getImageState(dev_data, view_ci_j.image);
9788             if (!image_data_i || !image_data_j) {
9789                 continue;
9790             }
9791             if (image_data_i->binding.mem == image_data_j->binding.mem &&
9792                 isRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
9793                                    image_data_j->binding.size)) {
9794                 overlapping_attachments[i].push_back(j);
9795                 overlapping_attachments[j].push_back(i);
9796             }
9797         }
9798     }
9799     for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
9800         uint32_t attachment = i;
9801         for (auto other_attachment : overlapping_attachments[i]) {
9802             if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9803                 skip_call |=
9804                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9805                             DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9806                                                                 "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9807                             attachment, other_attachment);
9808             }
9809             if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9810                 skip_call |=
9811                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9812                             DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9813                                                                 "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9814                             other_attachment, attachment);
9815             }
9816         }
9817     }
9818     // Find for each attachment the subpasses that use them.
9819     unordered_set<uint32_t> attachmentIndices;
9820     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9821         const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9822         attachmentIndices.clear();
9823         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9824             uint32_t attachment = subpass.pInputAttachments[j].attachment;
9825             if (attachment == VK_ATTACHMENT_UNUSED)
9826                 continue;
9827             input_attachment_to_subpass[attachment].push_back(i);
9828             for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9829                 input_attachment_to_subpass[overlapping_attachment].push_back(i);
9830             }
9831         }
9832         for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9833             uint32_t attachment = subpass.pColorAttachments[j].attachment;
9834             if (attachment == VK_ATTACHMENT_UNUSED)
9835                 continue;
9836             output_attachment_to_subpass[attachment].push_back(i);
9837             for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9838                 output_attachment_to_subpass[overlapping_attachment].push_back(i);
9839             }
9840             attachmentIndices.insert(attachment);
9841         }
9842         if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9843             uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9844             output_attachment_to_subpass[attachment].push_back(i);
9845             for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9846                 output_attachment_to_subpass[overlapping_attachment].push_back(i);
9847             }
9848 
9849             if (attachmentIndices.count(attachment)) {
9850                 skip_call |=
9851                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9852                             DRAWSTATE_INVALID_RENDERPASS, "DS",
9853                             "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
9854             }
9855         }
9856     }
9857     // If there is a dependency needed make sure one exists
9858     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9859         const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9860         // If the attachment is an input then all subpasses that output must have a dependency relationship
9861         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9862             uint32_t attachment = subpass.pInputAttachments[j].attachment;
9863             if (attachment == VK_ATTACHMENT_UNUSED)
9864                 continue;
9865             CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9866         }
9867         // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
9868         for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9869             uint32_t attachment = subpass.pColorAttachments[j].attachment;
9870             if (attachment == VK_ATTACHMENT_UNUSED)
9871                 continue;
9872             CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9873             CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9874         }
9875         if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9876             const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
9877             CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9878             CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9879         }
9880     }
9881     // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
9882     // written.
9883     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9884         const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9885         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9886             CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
9887         }
9888     }
9889     return skip_call;
9890 }
9891 // ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
9892 // VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that
9893 // READ_ONLY layout attachments don't have CLEAR as their loadOp.
ValidateLayoutVsAttachmentDescription(debug_report_data * report_data,const VkImageLayout first_layout,const uint32_t attachment,const VkAttachmentDescription & attachment_description)9894 static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout,
9895                                                   const uint32_t attachment,
9896                                                   const VkAttachmentDescription &attachment_description) {
9897     bool skip_call = false;
9898     // Verify that initial loadOp on READ_ONLY attachments is not CLEAR
9899     if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
9900         if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
9901             (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) {
9902             skip_call |=
9903                 log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9904                         VkDebugReportObjectTypeEXT(0), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9905                         "Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
9906         }
9907     }
9908     return skip_call;
9909 }
9910 
ValidateLayouts(const layer_data * dev_data,VkDevice device,const VkRenderPassCreateInfo * pCreateInfo)9911 static bool ValidateLayouts(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
9912     bool skip = false;
9913 
9914     // Track when we're observing the first use of an attachment
9915     std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true);
9916     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9917         const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9918         for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9919             auto attach_index = subpass.pColorAttachments[j].attachment;
9920             if (attach_index == VK_ATTACHMENT_UNUSED)
9921                 continue;
9922 
9923             switch (subpass.pColorAttachments[j].layout) {
9924             case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
9925                 /* This is ideal. */
9926                 break;
9927 
9928             case VK_IMAGE_LAYOUT_GENERAL:
9929                 /* May not be optimal; TODO: reconsider this warning based on
9930                  * other constraints?
9931                  */
9932                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9933                                 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9934                                 "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
9935                 break;
9936 
9937             default:
9938                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9939                                 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9940                                 "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
9941                                 string_VkImageLayout(subpass.pColorAttachments[j].layout));
9942             }
9943 
9944             if (attach_first_use[attach_index]) {
9945                 skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pColorAttachments[j].layout,
9946                                                               attach_index, pCreateInfo->pAttachments[attach_index]);
9947             }
9948             attach_first_use[attach_index] = false;
9949         }
9950         if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9951             switch (subpass.pDepthStencilAttachment->layout) {
9952             case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
9953             case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
9954                 /* These are ideal. */
9955                 break;
9956 
9957             case VK_IMAGE_LAYOUT_GENERAL:
9958                 /* May not be optimal; TODO: reconsider this warning based on
9959                  * other constraints? GENERAL can be better than doing a bunch
9960                  * of transitions.
9961                  */
9962                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9963                                 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9964                                 "GENERAL layout for depth attachment may not give optimal performance.");
9965                 break;
9966 
9967             default:
9968                 /* No other layouts are acceptable */
9969                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9970                                 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9971                                 "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
9972                                 "DEPTH_STENCIL_READ_ONLY_OPTIMAL or GENERAL.",
9973                                 string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
9974             }
9975 
9976             auto attach_index = subpass.pDepthStencilAttachment->attachment;
9977             if (attach_first_use[attach_index]) {
9978                 skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pDepthStencilAttachment->layout,
9979                                                               attach_index, pCreateInfo->pAttachments[attach_index]);
9980             }
9981             attach_first_use[attach_index] = false;
9982         }
9983         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9984             auto attach_index = subpass.pInputAttachments[j].attachment;
9985             if (attach_index == VK_ATTACHMENT_UNUSED)
9986                 continue;
9987 
9988             switch (subpass.pInputAttachments[j].layout) {
9989             case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
9990             case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
9991                 /* These are ideal. */
9992                 break;
9993 
9994             case VK_IMAGE_LAYOUT_GENERAL:
9995                 /* May not be optimal. TODO: reconsider this warning based on
9996                  * other constraints.
9997                  */
9998                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9999                                 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10000                                 "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
10001                 break;
10002 
10003             default:
10004                 /* No other layouts are acceptable */
10005                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10006                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10007                                 "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
10008                                 string_VkImageLayout(subpass.pInputAttachments[j].layout));
10009             }
10010 
10011             if (attach_first_use[attach_index]) {
10012                 skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pInputAttachments[j].layout,
10013                                                               attach_index, pCreateInfo->pAttachments[attach_index]);
10014             }
10015             attach_first_use[attach_index] = false;
10016         }
10017     }
10018     return skip;
10019 }
10020 
CreatePassDAG(const layer_data * dev_data,VkDevice device,const VkRenderPassCreateInfo * pCreateInfo,std::vector<DAGNode> & subpass_to_node,std::vector<bool> & has_self_dependency)10021 static bool CreatePassDAG(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
10022                           std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
10023     bool skip_call = false;
10024     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10025         DAGNode &subpass_node = subpass_to_node[i];
10026         subpass_node.pass = i;
10027     }
10028     for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
10029         const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
10030         if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
10031             if (dependency.srcSubpass == dependency.dstSubpass) {
10032                 skip_call |=
10033                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10034                             DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
10035             }
10036 
10037             // We don't want to add edges to the DAG for dependencies to/from
10038             // VK_SUBPASS_EXTERNAL. We don't use them for anything, and their
10039             // presence complicates other code.
10040             continue;
10041         } else if (dependency.srcSubpass > dependency.dstSubpass) {
10042             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10043                                  DRAWSTATE_INVALID_RENDERPASS, "DS",
10044                                  "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
10045         } else if (dependency.srcSubpass == dependency.dstSubpass) {
10046             has_self_dependency[dependency.srcSubpass] = true;
10047         }
10048 
10049         subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
10050         subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
10051     }
10052     return skip_call;
10053 }
10054 
10055 
CreateShaderModule(VkDevice device,const VkShaderModuleCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkShaderModule * pShaderModule)10056 VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
10057                                                   const VkAllocationCallbacks *pAllocator,
10058                                                   VkShaderModule *pShaderModule) {
10059     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10060     bool skip_call = false;
10061 
10062     /* Use SPIRV-Tools validator to try and catch any issues with the module itself */
10063     spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
10064     spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) };
10065     spv_diagnostic diag = nullptr;
10066 
10067     auto result = spvValidate(ctx, &binary, &diag);
10068     if (result != SPV_SUCCESS) {
10069         skip_call |=
10070             log_msg(dev_data->report_data, result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
10071                     VkDebugReportObjectTypeEXT(0), 0, __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
10072                     "SPIR-V module not valid: %s", diag && diag->error ? diag->error : "(no error text)");
10073     }
10074 
10075     spvDiagnosticDestroy(diag);
10076     spvContextDestroy(ctx);
10077 
10078     if (skip_call)
10079         return VK_ERROR_VALIDATION_FAILED_EXT;
10080 
10081     VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
10082 
10083     if (res == VK_SUCCESS) {
10084         std::lock_guard<std::mutex> lock(global_lock);
10085         dev_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
10086     }
10087     return res;
10088 }
10089 
ValidateAttachmentIndex(layer_data * dev_data,uint32_t attachment,uint32_t attachment_count,const char * type)10090 static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
10091     bool skip_call = false;
10092     if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
10093         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10094                              DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
10095                              "CreateRenderPass: %s attachment %d cannot be greater than the total number of attachments %d.",
10096                              type, attachment, attachment_count);
10097     }
10098     return skip_call;
10099 }
10100 
IsPowerOfTwo(unsigned x)10101 static bool IsPowerOfTwo(unsigned x) {
10102     return x && !(x & (x-1));
10103 }
10104 
ValidateRenderpassAttachmentUsage(layer_data * dev_data,const VkRenderPassCreateInfo * pCreateInfo)10105 static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
10106     bool skip_call = false;
10107     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10108         const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10109         if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
10110             skip_call |=
10111                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10112                         DRAWSTATE_INVALID_RENDERPASS, "DS",
10113                         "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
10114         }
10115         for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
10116             uint32_t attachment = subpass.pPreserveAttachments[j];
10117             if (attachment == VK_ATTACHMENT_UNUSED) {
10118                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10119                                      __LINE__, DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
10120                                      "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", j);
10121             } else {
10122                 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
10123             }
10124         }
10125 
10126         auto subpass_performs_resolve = subpass.pResolveAttachments && std::any_of(
10127             subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
10128             [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
10129 
10130         unsigned sample_count = 0;
10131 
10132         for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10133             uint32_t attachment;
10134             if (subpass.pResolveAttachments) {
10135                 attachment = subpass.pResolveAttachments[j].attachment;
10136                 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
10137 
10138                 if (!skip_call && attachment != VK_ATTACHMENT_UNUSED &&
10139                     pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
10140                     skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
10141                                          __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
10142                                          "CreateRenderPass:  Subpass %u requests multisample resolve into attachment %u, "
10143                                          "which must have VK_SAMPLE_COUNT_1_BIT but has %s",
10144                                          i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples));
10145                 }
10146             }
10147             attachment = subpass.pColorAttachments[j].attachment;
10148             skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
10149 
10150             if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
10151                 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
10152 
10153                 if (subpass_performs_resolve &&
10154                     pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
10155                     skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
10156                                          __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
10157                                          "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
10158                                          "which has VK_SAMPLE_COUNT_1_BIT",
10159                                          i, attachment);
10160                 }
10161             }
10162         }
10163 
10164         if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10165             uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10166             skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
10167 
10168             if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
10169                 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
10170             }
10171         }
10172 
10173         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10174             uint32_t attachment = subpass.pInputAttachments[j].attachment;
10175             skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
10176         }
10177 
10178         if (sample_count && !IsPowerOfTwo(sample_count)) {
10179             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
10180                                  __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
10181                                  "CreateRenderPass:  Subpass %u attempts to render to "
10182                                  "attachments with inconsistent sample counts",
10183                                  i);
10184         }
10185     }
10186     return skip_call;
10187 }
10188 
CreateRenderPass(VkDevice device,const VkRenderPassCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkRenderPass * pRenderPass)10189 VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
10190                                                 const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
10191     bool skip_call = false;
10192     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10193 
10194     std::unique_lock<std::mutex> lock(global_lock);
10195 
10196     skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
10197     // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
10198     //       ValidateLayouts.
10199     skip_call |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
10200     lock.unlock();
10201 
10202     if (skip_call) {
10203         return VK_ERROR_VALIDATION_FAILED_EXT;
10204     }
10205 
10206     VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
10207 
10208     if (VK_SUCCESS == result) {
10209         lock.lock();
10210 
10211         std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
10212         std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
10213         skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
10214 
10215         auto render_pass = unique_ptr<RENDER_PASS_STATE>(new RENDER_PASS_STATE(pCreateInfo));
10216         render_pass->renderPass = *pRenderPass;
10217         render_pass->hasSelfDependency = has_self_dependency;
10218         render_pass->subpassToNode = subpass_to_node;
10219 
10220         // TODO: Maybe fill list and then copy instead of locking
10221         std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read;
10222         std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = render_pass->attachment_first_layout;
10223         for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10224             const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10225             for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10226                 uint32_t attachment = subpass.pColorAttachments[j].attachment;
10227                 if (!attachment_first_read.count(attachment)) {
10228                     attachment_first_read.insert(std::make_pair(attachment, false));
10229                     attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
10230                 }
10231             }
10232             if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10233                 uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10234                 if (!attachment_first_read.count(attachment)) {
10235                     attachment_first_read.insert(std::make_pair(attachment, false));
10236                     attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
10237                 }
10238             }
10239             for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10240                 uint32_t attachment = subpass.pInputAttachments[j].attachment;
10241                 if (!attachment_first_read.count(attachment)) {
10242                     attachment_first_read.insert(std::make_pair(attachment, true));
10243                     attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
10244                 }
10245             }
10246         }
10247 
10248         dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
10249     }
10250     return result;
10251 }
10252 
VerifyFramebufferAndRenderPassLayouts(layer_data * dev_data,GLOBAL_CB_NODE * pCB,const VkRenderPassBeginInfo * pRenderPassBegin)10253 static bool VerifyFramebufferAndRenderPassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
10254     bool skip_call = false;
10255     auto const pRenderPassInfo = getRenderPassState(dev_data, pRenderPassBegin->renderPass)->createInfo.ptr();
10256     auto const & framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer]->createInfo;
10257     if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
10258         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10259                              DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
10260                                                                  "with a different number of attachments.");
10261     }
10262     for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
10263         const VkImageView &image_view = framebufferInfo.pAttachments[i];
10264         auto view_state = getImageViewState(dev_data, image_view);
10265         assert(view_state);
10266         const VkImage &image = view_state->create_info.image;
10267         const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
10268         IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
10269                                              pRenderPassInfo->pAttachments[i].initialLayout};
10270         // TODO: Do not iterate over every possibility - consolidate where possible
10271         for (uint32_t j = 0; j < subRange.levelCount; j++) {
10272             uint32_t level = subRange.baseMipLevel + j;
10273             for (uint32_t k = 0; k < subRange.layerCount; k++) {
10274                 uint32_t layer = subRange.baseArrayLayer + k;
10275                 VkImageSubresource sub = {subRange.aspectMask, level, layer};
10276                 IMAGE_CMD_BUF_LAYOUT_NODE node;
10277                 if (!FindLayout(pCB, image, sub, node)) {
10278                     SetLayout(pCB, image, sub, newNode);
10279                     continue;
10280                 }
10281                 if (newNode.layout != VK_IMAGE_LAYOUT_UNDEFINED &&
10282                     newNode.layout != node.layout) {
10283                     skip_call |=
10284                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10285                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
10286                                 "You cannot start a render pass using attachment %u "
10287                                 "where the render pass initial layout is %s and the previous "
10288                                 "known layout of the attachment is %s. The layouts must match, or "
10289                                 "the render pass initial layout for the attachment must be "
10290                                 "VK_IMAGE_LAYOUT_UNDEFINED",
10291                                 i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
10292                 }
10293             }
10294         }
10295     }
10296     return skip_call;
10297 }
10298 
TransitionAttachmentRefLayout(layer_data * dev_data,GLOBAL_CB_NODE * pCB,FRAMEBUFFER_STATE * pFramebuffer,VkAttachmentReference ref)10299 static void TransitionAttachmentRefLayout(layer_data *dev_data, GLOBAL_CB_NODE *pCB, FRAMEBUFFER_STATE *pFramebuffer,
10300                                           VkAttachmentReference ref) {
10301     if (ref.attachment != VK_ATTACHMENT_UNUSED) {
10302         auto image_view = pFramebuffer->createInfo.pAttachments[ref.attachment];
10303         SetLayout(dev_data, pCB, image_view, ref.layout);
10304     }
10305 }
10306 
TransitionSubpassLayouts(layer_data * dev_data,GLOBAL_CB_NODE * pCB,const VkRenderPassBeginInfo * pRenderPassBegin,const int subpass_index)10307 static void TransitionSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
10308                                      const int subpass_index) {
10309     auto renderPass = getRenderPassState(dev_data, pRenderPassBegin->renderPass);
10310     if (!renderPass)
10311         return;
10312 
10313     auto framebuffer = getFramebufferState(dev_data, pRenderPassBegin->framebuffer);
10314     if (!framebuffer)
10315         return;
10316 
10317     auto const &subpass = renderPass->createInfo.pSubpasses[subpass_index];
10318     for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10319         TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pInputAttachments[j]);
10320     }
10321     for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10322         TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pColorAttachments[j]);
10323     }
10324     if (subpass.pDepthStencilAttachment) {
10325         TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, *subpass.pDepthStencilAttachment);
10326     }
10327 }
10328 
validatePrimaryCommandBuffer(const layer_data * dev_data,const GLOBAL_CB_NODE * pCB,const std::string & cmd_name)10329 static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
10330     bool skip_call = false;
10331     if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
10332         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10333                              DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
10334                              cmd_name.c_str());
10335     }
10336     return skip_call;
10337 }
10338 
TransitionFinalSubpassLayouts(layer_data * dev_data,GLOBAL_CB_NODE * pCB,const VkRenderPassBeginInfo * pRenderPassBegin)10339 static void TransitionFinalSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
10340     auto renderPass = getRenderPassState(dev_data, pRenderPassBegin->renderPass);
10341     if (!renderPass)
10342         return;
10343 
10344     const VkRenderPassCreateInfo *pRenderPassInfo = renderPass->createInfo.ptr();
10345     auto framebuffer = getFramebufferState(dev_data, pRenderPassBegin->framebuffer);
10346     if (!framebuffer)
10347         return;
10348 
10349     for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
10350         auto image_view = framebuffer->createInfo.pAttachments[i];
10351         SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
10352     }
10353 }
10354 
VerifyRenderAreaBounds(const layer_data * dev_data,const VkRenderPassBeginInfo * pRenderPassBegin)10355 static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
10356     bool skip_call = false;
10357     const safe_VkFramebufferCreateInfo *pFramebufferInfo =
10358         &getFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
10359     if (pRenderPassBegin->renderArea.offset.x < 0 ||
10360         (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
10361         pRenderPassBegin->renderArea.offset.y < 0 ||
10362         (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
10363         skip_call |= static_cast<bool>(log_msg(
10364             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10365             DRAWSTATE_INVALID_RENDER_AREA, "CORE",
10366             "Cannot execute a render pass with renderArea not within the bound of the "
10367             "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
10368             "height %d.",
10369             pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
10370             pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
10371     }
10372     return skip_call;
10373 }
10374 
10375 // If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
10376 // [load|store]Op flag must be checked
10377 // TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
FormatSpecificLoadAndStoreOpSettings(VkFormat format,T color_depth_op,T stencil_op,T op)10378 template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
10379     if (color_depth_op != op && stencil_op != op) {
10380         return false;
10381     }
10382     bool check_color_depth_load_op = !vk_format_is_stencil_only(format);
10383     bool check_stencil_load_op = vk_format_is_depth_and_stencil(format) || !check_color_depth_load_op;
10384 
10385     return (((check_color_depth_load_op == true) && (color_depth_op == op)) ||
10386             ((check_stencil_load_op == true) && (stencil_op == op)));
10387 }
10388 
10389 VKAPI_ATTR void VKAPI_CALL
CmdBeginRenderPass(VkCommandBuffer commandBuffer,const VkRenderPassBeginInfo * pRenderPassBegin,VkSubpassContents contents)10390 CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
10391     bool skip_call = false;
10392     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10393     std::unique_lock<std::mutex> lock(global_lock);
10394     GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
10395     auto renderPass = pRenderPassBegin ? getRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr;
10396     auto framebuffer = pRenderPassBegin ? getFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
10397     if (cb_node) {
10398         if (renderPass) {
10399             uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
10400             cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
10401             for (uint32_t i = 0; i < renderPass->createInfo.attachmentCount; ++i) {
10402                 MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
10403                 auto pAttachment = &renderPass->createInfo.pAttachments[i];
10404                 if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10405                                                          pAttachment->stencilLoadOp,
10406                                                          VK_ATTACHMENT_LOAD_OP_CLEAR)) {
10407                     clear_op_size = static_cast<uint32_t>(i) + 1;
10408                     std::function<bool()> function = [=]() {
10409                         SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), true);
10410                         return false;
10411                     };
10412                     cb_node->validate_functions.push_back(function);
10413                 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10414                                                                 pAttachment->stencilLoadOp,
10415                                                                 VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
10416                     std::function<bool()> function = [=]() {
10417                         SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), false);
10418                         return false;
10419                     };
10420                     cb_node->validate_functions.push_back(function);
10421                 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10422                                                                 pAttachment->stencilLoadOp,
10423                                                                 VK_ATTACHMENT_LOAD_OP_LOAD)) {
10424                     std::function<bool()> function = [=]() {
10425                         return ValidateImageMemoryIsValid(dev_data, getImageState(dev_data, fb_info.image),
10426                                                           "vkCmdBeginRenderPass()");
10427                     };
10428                     cb_node->validate_functions.push_back(function);
10429                 }
10430                 if (renderPass->attachment_first_read[i]) {
10431                     std::function<bool()> function = [=]() {
10432                         return ValidateImageMemoryIsValid(dev_data, getImageState(dev_data, fb_info.image),
10433                                                           "vkCmdBeginRenderPass()");
10434                     };
10435                     cb_node->validate_functions.push_back(function);
10436                 }
10437             }
10438             if (clear_op_size > pRenderPassBegin->clearValueCount) {
10439                 skip_call |=
10440                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
10441                             reinterpret_cast<uint64_t &>(renderPass), __LINE__, VALIDATION_ERROR_00442, "DS",
10442                             "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there must "
10443                             "be at least %u "
10444                             "entries in pClearValues array to account for the highest index attachment in renderPass 0x%" PRIx64
10445                             " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array "
10446                             "is indexed by attachment number so even if some pClearValues entries between 0 and %u correspond to "
10447                             "attachments that aren't cleared they will be ignored. %s",
10448                             pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(renderPass),
10449                             clear_op_size, clear_op_size - 1, validation_error_map[VALIDATION_ERROR_00442]);
10450             }
10451             skip_call |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
10452             skip_call |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin);
10453             skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass");
10454             skip_call |= ValidateDependencies(dev_data, framebuffer, renderPass);
10455             skip_call |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass");
10456             skip_call |= addCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
10457             cb_node->activeRenderPass = renderPass;
10458             // This is a shallow copy as that is all that is needed for now
10459             cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
10460             cb_node->activeSubpass = 0;
10461             cb_node->activeSubpassContents = contents;
10462             cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
10463             // Connect this framebuffer and its children to this cmdBuffer
10464             AddFramebufferBinding(dev_data, cb_node, framebuffer);
10465             // transition attachments to the correct layouts for the first subpass
10466             TransitionSubpassLayouts(dev_data, cb_node, &cb_node->activeRenderPassBeginInfo, cb_node->activeSubpass);
10467         } else {
10468             skip_call |=
10469                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10470                         DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
10471         }
10472     }
10473     lock.unlock();
10474     if (!skip_call) {
10475         dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
10476     }
10477 }
10478 
CmdNextSubpass(VkCommandBuffer commandBuffer,VkSubpassContents contents)10479 VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
10480     bool skip_call = false;
10481     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10482     std::unique_lock<std::mutex> lock(global_lock);
10483     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10484     if (pCB) {
10485         skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
10486         skip_call |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
10487         skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
10488 
10489         auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
10490         if (pCB->activeSubpass == subpassCount - 1) {
10491             skip_call |=
10492                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10493                         reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SUBPASS_INDEX, "DS",
10494                         "vkCmdNextSubpass(): Attempted to advance beyond final subpass");
10495         }
10496     }
10497     lock.unlock();
10498 
10499     if (skip_call)
10500         return;
10501 
10502     dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
10503 
10504     if (pCB) {
10505       lock.lock();
10506       pCB->activeSubpass++;
10507       pCB->activeSubpassContents = contents;
10508       TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
10509     }
10510 }
10511 
CmdEndRenderPass(VkCommandBuffer commandBuffer)10512 VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
10513     bool skip_call = false;
10514     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10515     std::unique_lock<std::mutex> lock(global_lock);
10516     auto pCB = getCBNode(dev_data, commandBuffer);
10517     if (pCB) {
10518         RENDER_PASS_STATE *rp_state = pCB->activeRenderPass;
10519         auto framebuffer = getFramebufferState(dev_data, pCB->activeFramebuffer);
10520         if (rp_state) {
10521             if (pCB->activeSubpass != rp_state->createInfo.subpassCount - 1) {
10522                 skip_call |=
10523                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10524                             reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SUBPASS_INDEX, "DS",
10525                             "vkCmdEndRenderPass(): Called before reaching final subpass");
10526             }
10527 
10528             for (size_t i = 0; i < rp_state->createInfo.attachmentCount; ++i) {
10529                 MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
10530                 auto pAttachment = &rp_state->createInfo.pAttachments[i];
10531                 if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
10532                                                          pAttachment->stencilStoreOp, VK_ATTACHMENT_STORE_OP_STORE)) {
10533                     std::function<bool()> function = [=]() {
10534                         SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), true);
10535                         return false;
10536                     };
10537                     pCB->validate_functions.push_back(function);
10538                 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
10539                                                                 pAttachment->stencilStoreOp,
10540                                                                 VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
10541                     std::function<bool()> function = [=]() {
10542                         SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), false);
10543                         return false;
10544                     };
10545                     pCB->validate_functions.push_back(function);
10546                 }
10547             }
10548         }
10549         skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
10550         skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
10551         skip_call |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
10552     }
10553     lock.unlock();
10554 
10555     if (skip_call)
10556         return;
10557 
10558     dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
10559 
10560     if (pCB) {
10561         lock.lock();
10562         TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo);
10563         pCB->activeRenderPass = nullptr;
10564         pCB->activeSubpass = 0;
10565         pCB->activeFramebuffer = VK_NULL_HANDLE;
10566     }
10567 }
10568 
logInvalidAttachmentMessage(layer_data * dev_data,VkCommandBuffer secondaryBuffer,uint32_t primaryAttach,uint32_t secondaryAttach,const char * msg)10569 static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach,
10570                                         uint32_t secondaryAttach, const char *msg) {
10571     return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10572                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10573                    "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64 " which has a render pass "
10574                    "that is not compatible with the Primary Cmd Buffer current render pass. "
10575                    "Attachment %u is not compatible with %u: %s",
10576                    reinterpret_cast<uint64_t &>(secondaryBuffer), primaryAttach, secondaryAttach, msg);
10577 }
10578 
validateAttachmentCompatibility(layer_data * dev_data,VkCommandBuffer primaryBuffer,VkRenderPassCreateInfo const * primaryPassCI,uint32_t primaryAttach,VkCommandBuffer secondaryBuffer,VkRenderPassCreateInfo const * secondaryPassCI,uint32_t secondaryAttach,bool is_multi)10579 static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10580                                             VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach,
10581                                             VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI,
10582                                             uint32_t secondaryAttach, bool is_multi) {
10583     bool skip_call = false;
10584     if (primaryPassCI->attachmentCount <= primaryAttach) {
10585         primaryAttach = VK_ATTACHMENT_UNUSED;
10586     }
10587     if (secondaryPassCI->attachmentCount <= secondaryAttach) {
10588         secondaryAttach = VK_ATTACHMENT_UNUSED;
10589     }
10590     if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
10591         return skip_call;
10592     }
10593     if (primaryAttach == VK_ATTACHMENT_UNUSED) {
10594         skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
10595                                                  "The first is unused while the second is not.");
10596         return skip_call;
10597     }
10598     if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
10599         skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
10600                                                  "The second is unused while the first is not.");
10601         return skip_call;
10602     }
10603     if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) {
10604         skip_call |=
10605             logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats.");
10606     }
10607     if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) {
10608         skip_call |=
10609             logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples.");
10610     }
10611     if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) {
10612         skip_call |=
10613             logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags.");
10614     }
10615     return skip_call;
10616 }
10617 
validateSubpassCompatibility(layer_data * dev_data,VkCommandBuffer primaryBuffer,VkRenderPassCreateInfo const * primaryPassCI,VkCommandBuffer secondaryBuffer,VkRenderPassCreateInfo const * secondaryPassCI,const int subpass,bool is_multi)10618 static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10619                                          VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
10620                                          VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) {
10621     bool skip_call = false;
10622     const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass];
10623     const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass];
10624     uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
10625     for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
10626         uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
10627         if (i < primary_desc.inputAttachmentCount) {
10628             primary_input_attach = primary_desc.pInputAttachments[i].attachment;
10629         }
10630         if (i < secondary_desc.inputAttachmentCount) {
10631             secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
10632         }
10633         skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer,
10634                                                      secondaryPassCI, secondary_input_attach, is_multi);
10635     }
10636     uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
10637     for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
10638         uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
10639         if (i < primary_desc.colorAttachmentCount) {
10640             primary_color_attach = primary_desc.pColorAttachments[i].attachment;
10641         }
10642         if (i < secondary_desc.colorAttachmentCount) {
10643             secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
10644         }
10645         skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer,
10646                                                      secondaryPassCI, secondary_color_attach, is_multi);
10647         uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
10648         if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
10649             primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
10650         }
10651         if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
10652             secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
10653         }
10654         skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach,
10655                                                      secondaryBuffer, secondaryPassCI, secondary_resolve_attach, is_multi);
10656     }
10657     uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
10658     if (primary_desc.pDepthStencilAttachment) {
10659         primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
10660     }
10661     if (secondary_desc.pDepthStencilAttachment) {
10662         secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
10663     }
10664     skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach,
10665                                                  secondaryBuffer, secondaryPassCI, secondary_depthstencil_attach, is_multi);
10666     return skip_call;
10667 }
10668 
10669 // Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
10670 //  This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
10671 //  will then feed into this function
validateRenderPassCompatibility(layer_data * dev_data,VkCommandBuffer primaryBuffer,VkRenderPassCreateInfo const * primaryPassCI,VkCommandBuffer secondaryBuffer,VkRenderPassCreateInfo const * secondaryPassCI)10672 static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10673                                             VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
10674                                             VkRenderPassCreateInfo const *secondaryPassCI) {
10675     bool skip_call = false;
10676 
10677     if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) {
10678         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10679                              DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10680                              "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
10681                              " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64
10682                              " that has a subpassCount of %u.",
10683                              reinterpret_cast<uint64_t &>(secondaryBuffer), secondaryPassCI->subpassCount,
10684                              reinterpret_cast<uint64_t &>(primaryBuffer), primaryPassCI->subpassCount);
10685     } else {
10686         for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) {
10687             skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i,
10688                                                       primaryPassCI->subpassCount > 1);
10689         }
10690     }
10691     return skip_call;
10692 }
10693 
validateFramebuffer(layer_data * dev_data,VkCommandBuffer primaryBuffer,const GLOBAL_CB_NODE * pCB,VkCommandBuffer secondaryBuffer,const GLOBAL_CB_NODE * pSubCB)10694 static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
10695                                 VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
10696     bool skip_call = false;
10697     if (!pSubCB->beginInfo.pInheritanceInfo) {
10698         return skip_call;
10699     }
10700     VkFramebuffer primary_fb = pCB->activeFramebuffer;
10701     VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
10702     if (secondary_fb != VK_NULL_HANDLE) {
10703         if (primary_fb != secondary_fb) {
10704             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10705                                  DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
10706                                  "vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64
10707                                  " which has a framebuffer 0x%" PRIx64
10708                                  " that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ".",
10709                                  reinterpret_cast<uint64_t &>(secondaryBuffer), reinterpret_cast<uint64_t &>(secondary_fb),
10710                                  reinterpret_cast<uint64_t &>(primary_fb));
10711         }
10712         auto fb = getFramebufferState(dev_data, secondary_fb);
10713         if (!fb) {
10714             skip_call |=
10715                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10716                         DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10717                                                                           "which has invalid framebuffer 0x%" PRIx64 ".",
10718                         (void *)secondaryBuffer, (uint64_t)(secondary_fb));
10719             return skip_call;
10720         }
10721         auto cb_renderpass = getRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
10722         if (cb_renderpass->renderPass != fb->createInfo.renderPass) {
10723             skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer,
10724                                                          cb_renderpass->createInfo.ptr());
10725         }
10726     }
10727     return skip_call;
10728 }
10729 
validateSecondaryCommandBufferState(layer_data * dev_data,GLOBAL_CB_NODE * pCB,GLOBAL_CB_NODE * pSubCB)10730 static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
10731     bool skip_call = false;
10732     unordered_set<int> activeTypes;
10733     for (auto queryObject : pCB->activeQueries) {
10734         auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10735         if (queryPoolData != dev_data->queryPoolMap.end()) {
10736             if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
10737                 pSubCB->beginInfo.pInheritanceInfo) {
10738                 VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
10739                 if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
10740                     skip_call |= log_msg(
10741                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10742                         DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10743                         "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10744                         "which has invalid active query pool 0x%" PRIx64 ". Pipeline statistics is being queried so the command "
10745                         "buffer must have all bits set on the queryPool.",
10746                         reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
10747                 }
10748             }
10749             activeTypes.insert(queryPoolData->second.createInfo.queryType);
10750         }
10751     }
10752     for (auto queryObject : pSubCB->startedQueries) {
10753         auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10754         if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
10755             skip_call |=
10756                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10757                         DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10758                         "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10759                         "which has invalid active query pool 0x%" PRIx64 "of type %d but a query of that type has been started on "
10760                         "secondary Cmd Buffer 0x%p.",
10761                         reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
10762                         queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
10763         }
10764     }
10765 
10766     auto primary_pool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
10767     auto secondary_pool = getCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
10768     if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
10769         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10770                              reinterpret_cast<uint64_t>(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
10771                              "vkCmdExecuteCommands(): Primary command buffer 0x%" PRIxLEAST64
10772                              " created in queue family %d has secondary command buffer 0x%" PRIxLEAST64 " created in queue family %d.",
10773                              reinterpret_cast<uint64_t>(pCB->commandBuffer), primary_pool->queueFamilyIndex,
10774                              reinterpret_cast<uint64_t>(pSubCB->commandBuffer), secondary_pool->queueFamilyIndex);
10775     }
10776 
10777     return skip_call;
10778 }
10779 
10780 VKAPI_ATTR void VKAPI_CALL
CmdExecuteCommands(VkCommandBuffer commandBuffer,uint32_t commandBuffersCount,const VkCommandBuffer * pCommandBuffers)10781 CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
10782     bool skip_call = false;
10783     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10784     std::unique_lock<std::mutex> lock(global_lock);
10785     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10786     if (pCB) {
10787         GLOBAL_CB_NODE *pSubCB = NULL;
10788         for (uint32_t i = 0; i < commandBuffersCount; i++) {
10789             pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
10790             if (!pSubCB) {
10791                 skip_call |=
10792                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10793                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10794                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p in element %u of pCommandBuffers array.",
10795                             (void *)pCommandBuffers[i], i);
10796             } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
10797                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10798                                      __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10799                                      "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
10800                                      "array. All cmd buffers in pCommandBuffers array must be secondary.",
10801                                      (void *)pCommandBuffers[i], i);
10802             } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
10803                 auto secondary_rp_state = getRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
10804                 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
10805                     skip_call |= log_msg(
10806                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10807                         (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
10808                         "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
10809                         ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
10810                         (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass);
10811                 } else {
10812                     // Make sure render pass is compatible with parent command buffer pass if has continue
10813                     if (pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
10814                         skip_call |=
10815                             validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->createInfo.ptr(),
10816                                                             pCommandBuffers[i], secondary_rp_state->createInfo.ptr());
10817                     }
10818                     //  If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
10819                     skip_call |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
10820                 }
10821                 string errorString = "";
10822                 // secondaryCB must have been created w/ RP compatible w/ primaryCB active renderpass
10823                 if ((pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) &&
10824                     !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(),
10825                                                      secondary_rp_state->createInfo.ptr(), errorString)) {
10826                     skip_call |= log_msg(
10827                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10828                         (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
10829                         "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
10830                         ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
10831                         (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
10832                         (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
10833                 }
10834             }
10835             // TODO(mlentine): Move more logic into this method
10836             skip_call |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
10837             skip_call |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()");
10838             // Secondary cmdBuffers are considered pending execution starting w/
10839             // being recorded
10840             if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
10841                 if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
10842                     skip_call |= log_msg(
10843                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10844                         (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10845                         "Attempt to simultaneously execute command buffer 0x%" PRIxLEAST64
10846                         " without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set!",
10847                         (uint64_t)(pCB->commandBuffer));
10848                 }
10849                 if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
10850                     // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
10851                     skip_call |= log_msg(
10852                         dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10853                         (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10854                         "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIxLEAST64
10855                         ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
10856                         "(0x%" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10857                         "set, even though it does.",
10858                         (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
10859                     pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
10860                 }
10861             }
10862             if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
10863                 skip_call |=
10864                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10865                             reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
10866                             "vkCmdExecuteCommands(): Secondary Command Buffer "
10867                             "(0x%" PRIxLEAST64 ") cannot be submitted with a query in "
10868                             "flight and inherited queries not "
10869                             "supported on this device.",
10870                             reinterpret_cast<uint64_t>(pCommandBuffers[i]));
10871             }
10872             pSubCB->primaryCommandBuffer = pCB->commandBuffer;
10873             pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
10874             dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
10875             for (auto &function : pSubCB->queryUpdates) {
10876                 pCB->queryUpdates.push_back(function);
10877             }
10878         }
10879         skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
10880         skip_call |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
10881     }
10882     lock.unlock();
10883     if (!skip_call)
10884         dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
10885 }
10886 
10887 // For any image objects that overlap mapped memory, verify that their layouts are PREINIT or GENERAL
ValidateMapImageLayouts(VkDevice device,DEVICE_MEM_INFO const * mem_info,VkDeviceSize offset,VkDeviceSize end_offset)10888 static bool ValidateMapImageLayouts(VkDevice device, DEVICE_MEM_INFO const *mem_info, VkDeviceSize offset,
10889                                     VkDeviceSize end_offset) {
10890     bool skip_call = false;
10891     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10892     // Iterate over all bound image ranges and verify that for any that overlap the
10893     //  map ranges, the layouts are VK_IMAGE_LAYOUT_PREINITIALIZED or VK_IMAGE_LAYOUT_GENERAL
10894     // TODO : This can be optimized if we store ranges based on starting address and early exit when we pass our range
10895     for (auto image_handle : mem_info->bound_images) {
10896         auto img_it = mem_info->bound_ranges.find(image_handle);
10897         if (img_it != mem_info->bound_ranges.end()) {
10898             if (rangesIntersect(dev_data, &img_it->second, offset, end_offset)) {
10899                 std::vector<VkImageLayout> layouts;
10900                 if (FindLayouts(dev_data, VkImage(image_handle), layouts)) {
10901                     for (auto layout : layouts) {
10902                         if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
10903                             skip_call |=
10904                                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10905                                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
10906                                                                                         "GENERAL or PREINITIALIZED are supported.",
10907                                         string_VkImageLayout(layout));
10908                         }
10909                     }
10910                 }
10911             }
10912         }
10913     }
10914     return skip_call;
10915 }
10916 
10917 VKAPI_ATTR VkResult VKAPI_CALL
MapMemory(VkDevice device,VkDeviceMemory mem,VkDeviceSize offset,VkDeviceSize size,VkFlags flags,void ** ppData)10918 MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
10919     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10920 
10921     bool skip_call = false;
10922     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10923     std::unique_lock<std::mutex> lock(global_lock);
10924     DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
10925     if (mem_info) {
10926         // TODO : This could me more fine-grained to track just region that is valid
10927         mem_info->global_valid = true;
10928         auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
10929         skip_call |= ValidateMapImageLayouts(device, mem_info, offset, end_offset);
10930         // TODO : Do we need to create new "bound_range" for the mapped range?
10931         SetMemRangesValid(dev_data, mem_info, offset, end_offset);
10932         if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
10933              VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
10934             skip_call =
10935                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10936                         (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
10937                         "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
10938         }
10939     }
10940     skip_call |= ValidateMapMemRange(dev_data, mem, offset, size);
10941     lock.unlock();
10942 
10943     if (!skip_call) {
10944         result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
10945         if (VK_SUCCESS == result) {
10946             lock.lock();
10947             // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
10948             storeMemRanges(dev_data, mem, offset, size);
10949             initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
10950             lock.unlock();
10951         }
10952     }
10953     return result;
10954 }
10955 
UnmapMemory(VkDevice device,VkDeviceMemory mem)10956 VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
10957     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10958     bool skip_call = false;
10959 
10960     std::unique_lock<std::mutex> lock(global_lock);
10961     skip_call |= deleteMemRanges(dev_data, mem);
10962     lock.unlock();
10963     if (!skip_call) {
10964         dev_data->dispatch_table.UnmapMemory(device, mem);
10965     }
10966 }
10967 
validateMemoryIsMapped(layer_data * dev_data,const char * funcName,uint32_t memRangeCount,const VkMappedMemoryRange * pMemRanges)10968 static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
10969                                    const VkMappedMemoryRange *pMemRanges) {
10970     bool skip_call = false;
10971     for (uint32_t i = 0; i < memRangeCount; ++i) {
10972         auto mem_info = getMemObjInfo(dev_data, pMemRanges[i].memory);
10973         if (mem_info) {
10974             if (mem_info->mem_range.offset > pMemRanges[i].offset) {
10975                 skip_call |=
10976                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10977                             (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10978                             "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
10979                             "(" PRINTF_SIZE_T_SPECIFIER ").",
10980                             funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->mem_range.offset));
10981             }
10982 
10983             const uint64_t dev_dataTerminus = (mem_info->mem_range.size == VK_WHOLE_SIZE)
10984                                                   ? mem_info->alloc_info.allocationSize
10985                                                   : (mem_info->mem_range.offset + mem_info->mem_range.size);
10986             if (pMemRanges[i].size != VK_WHOLE_SIZE && (dev_dataTerminus < (pMemRanges[i].offset + pMemRanges[i].size))) {
10987                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10988                                      VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10989                                      MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
10990                                                                   ") exceeds the Memory Object's upper-bound "
10991                                                                   "(" PRINTF_SIZE_T_SPECIFIER ").",
10992                                      funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
10993                                      static_cast<size_t>(dev_dataTerminus));
10994             }
10995         }
10996     }
10997     return skip_call;
10998 }
10999 
ValidateAndCopyNoncoherentMemoryToDriver(layer_data * dev_data,uint32_t memRangeCount,const VkMappedMemoryRange * pMemRanges)11000 static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t memRangeCount,
11001                                                      const VkMappedMemoryRange *pMemRanges) {
11002     bool skip_call = false;
11003     for (uint32_t i = 0; i < memRangeCount; ++i) {
11004         auto mem_info = getMemObjInfo(dev_data, pMemRanges[i].memory);
11005         if (mem_info) {
11006             if (mem_info->shadow_copy) {
11007                 VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
11008                                         ? mem_info->mem_range.size
11009                                         : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
11010                 char *data = static_cast<char *>(mem_info->shadow_copy);
11011                 for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
11012                     if (data[j] != NoncoherentMemoryFillValue) {
11013                         skip_call |= log_msg(
11014                             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11015                             (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
11016                             "Memory underflow was detected on mem obj 0x%" PRIxLEAST64, (uint64_t)pMemRanges[i].memory);
11017                     }
11018                 }
11019                 for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
11020                     if (data[j] != NoncoherentMemoryFillValue) {
11021                         skip_call |= log_msg(
11022                             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11023                             (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
11024                             "Memory overflow was detected on mem obj 0x%" PRIxLEAST64, (uint64_t)pMemRanges[i].memory);
11025                     }
11026                 }
11027                 memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
11028             }
11029         }
11030     }
11031     return skip_call;
11032 }
11033 
CopyNoncoherentMemoryFromDriver(layer_data * dev_data,uint32_t memory_range_count,const VkMappedMemoryRange * mem_ranges)11034 static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t memory_range_count,
11035                                             const VkMappedMemoryRange *mem_ranges) {
11036     for (uint32_t i = 0; i < memory_range_count; ++i) {
11037         auto mem_info = getMemObjInfo(dev_data, mem_ranges[i].memory);
11038         if (mem_info && mem_info->shadow_copy) {
11039             VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
11040                                     ? mem_info->mem_range.size
11041                                     : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
11042             char *data = static_cast<char *>(mem_info->shadow_copy);
11043             memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
11044         }
11045     }
11046 }
11047 
11048 VKAPI_ATTR VkResult VKAPI_CALL
FlushMappedMemoryRanges(VkDevice device,uint32_t memRangeCount,const VkMappedMemoryRange * pMemRanges)11049 FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
11050     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11051     bool skip_call = false;
11052     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11053 
11054     std::unique_lock<std::mutex> lock(global_lock);
11055     skip_call |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, memRangeCount, pMemRanges);
11056     skip_call |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
11057     lock.unlock();
11058     if (!skip_call) {
11059         result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
11060     }
11061     return result;
11062 }
11063 
11064 VKAPI_ATTR VkResult VKAPI_CALL
InvalidateMappedMemoryRanges(VkDevice device,uint32_t memRangeCount,const VkMappedMemoryRange * pMemRanges)11065 InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
11066     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11067     bool skip_call = false;
11068     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11069 
11070     std::unique_lock<std::mutex> lock(global_lock);
11071     skip_call |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
11072     lock.unlock();
11073     if (!skip_call) {
11074         result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
11075         // Update our shadow copy with modified driver data
11076         CopyNoncoherentMemoryFromDriver(dev_data, memRangeCount, pMemRanges);
11077     }
11078     return result;
11079 }
11080 
BindImageMemory(VkDevice device,VkImage image,VkDeviceMemory mem,VkDeviceSize memoryOffset)11081 VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
11082     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11083     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11084     bool skip_call = false;
11085     std::unique_lock<std::mutex> lock(global_lock);
11086     auto image_state = getImageState(dev_data, image);
11087     if (image_state) {
11088         // Track objects tied to memory
11089         uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
11090         skip_call = SetMemBinding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
11091         VkMemoryRequirements memRequirements;
11092         lock.unlock();
11093         dev_data->dispatch_table.GetImageMemoryRequirements(device, image, &memRequirements);
11094         lock.lock();
11095 
11096         // Track and validate bound memory range information
11097         auto mem_info = getMemObjInfo(dev_data, mem);
11098         if (mem_info) {
11099             skip_call |= InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, memRequirements,
11100                                                 image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
11101             skip_call |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "vkBindImageMemory");
11102         }
11103 
11104         print_mem_list(dev_data);
11105         lock.unlock();
11106         if (!skip_call) {
11107             result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
11108             lock.lock();
11109             image_state->binding.mem = mem;
11110             image_state->binding.offset = memoryOffset;
11111             image_state->binding.size = memRequirements.size;
11112             lock.unlock();
11113         }
11114     } else {
11115         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11116                 reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
11117                 "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?",
11118                 reinterpret_cast<const uint64_t &>(image));
11119     }
11120     return result;
11121 }
11122 
SetEvent(VkDevice device,VkEvent event)11123 VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
11124     bool skip_call = false;
11125     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11126     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11127     std::unique_lock<std::mutex> lock(global_lock);
11128     auto event_state = getEventNode(dev_data, event);
11129     if (event_state) {
11130         event_state->needsSignaled = false;
11131         event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
11132         if (event_state->write_in_use) {
11133             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
11134                                  reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11135                                  "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
11136                                  reinterpret_cast<const uint64_t &>(event));
11137         }
11138     }
11139     lock.unlock();
11140     // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
11141     // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
11142     // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
11143     for (auto queue_data : dev_data->queueMap) {
11144         auto event_entry = queue_data.second.eventToStageMap.find(event);
11145         if (event_entry != queue_data.second.eventToStageMap.end()) {
11146             event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
11147         }
11148     }
11149     if (!skip_call)
11150         result = dev_data->dispatch_table.SetEvent(device, event);
11151     return result;
11152 }
11153 
11154 VKAPI_ATTR VkResult VKAPI_CALL
QueueBindSparse(VkQueue queue,uint32_t bindInfoCount,const VkBindSparseInfo * pBindInfo,VkFence fence)11155 QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
11156     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
11157     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11158     bool skip_call = false;
11159     std::unique_lock<std::mutex> lock(global_lock);
11160     auto pFence = getFenceNode(dev_data, fence);
11161     auto pQueue = getQueueNode(dev_data, queue);
11162 
11163     // First verify that fence is not in use
11164     skip_call |= ValidateFenceForSubmit(dev_data, pFence);
11165 
11166     if (pFence) {
11167         SubmitFence(pQueue, pFence, bindInfoCount);
11168     }
11169 
11170     for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
11171         const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
11172         // Track objects tied to memory
11173         for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
11174             for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
11175                 auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
11176                 if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
11177                                         (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
11178                                         "vkQueueBindSparse"))
11179                     skip_call = true;
11180             }
11181         }
11182         for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
11183             for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
11184                 auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
11185                 if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
11186                                         (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11187                                         "vkQueueBindSparse"))
11188                     skip_call = true;
11189             }
11190         }
11191         for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
11192             for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
11193                 auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
11194                 // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
11195                 VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
11196                 if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
11197                                         (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11198                                         "vkQueueBindSparse"))
11199                     skip_call = true;
11200             }
11201         }
11202 
11203         std::vector<SEMAPHORE_WAIT> semaphore_waits;
11204         std::vector<VkSemaphore> semaphore_signals;
11205         for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
11206             VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
11207             auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11208             if (pSemaphore) {
11209                 if (pSemaphore->signaled) {
11210                     if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
11211                         semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
11212                         pSemaphore->in_use.fetch_add(1);
11213                     }
11214                     pSemaphore->signaler.first = VK_NULL_HANDLE;
11215                     pSemaphore->signaled = false;
11216                 } else {
11217                     skip_call |=
11218                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11219                                 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11220                                 "vkQueueBindSparse: Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64
11221                                 " that has no way to be signaled.",
11222                                 reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
11223                 }
11224             }
11225         }
11226         for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
11227             VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
11228             auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11229             if (pSemaphore) {
11230                 if (pSemaphore->signaled) {
11231                     skip_call =
11232                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11233                                 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11234                                 "vkQueueBindSparse: Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
11235                                 ", but that semaphore is already signaled.",
11236                                 reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
11237                 }
11238                 else {
11239                     pSemaphore->signaler.first = queue;
11240                     pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
11241                     pSemaphore->signaled = true;
11242                     pSemaphore->in_use.fetch_add(1);
11243                     semaphore_signals.push_back(semaphore);
11244                 }
11245             }
11246         }
11247 
11248         pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11249                                          semaphore_waits,
11250                                          semaphore_signals,
11251                                          bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
11252     }
11253 
11254     if (pFence && !bindInfoCount) {
11255         // No work to do, just dropping a fence in the queue by itself.
11256         pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11257                                          std::vector<SEMAPHORE_WAIT>(),
11258                                          std::vector<VkSemaphore>(),
11259                                          fence);
11260     }
11261 
11262     print_mem_list(dev_data);
11263     lock.unlock();
11264 
11265     if (!skip_call)
11266         return dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
11267 
11268     return result;
11269 }
11270 
CreateSemaphore(VkDevice device,const VkSemaphoreCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSemaphore * pSemaphore)11271 VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
11272                                                const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
11273     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11274     VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
11275     if (result == VK_SUCCESS) {
11276         std::lock_guard<std::mutex> lock(global_lock);
11277         SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
11278         sNode->signaler.first = VK_NULL_HANDLE;
11279         sNode->signaler.second = 0;
11280         sNode->signaled = false;
11281     }
11282     return result;
11283 }
11284 
11285 VKAPI_ATTR VkResult VKAPI_CALL
CreateEvent(VkDevice device,const VkEventCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkEvent * pEvent)11286 CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
11287     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11288     VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
11289     if (result == VK_SUCCESS) {
11290         std::lock_guard<std::mutex> lock(global_lock);
11291         dev_data->eventMap[*pEvent].needsSignaled = false;
11292         dev_data->eventMap[*pEvent].write_in_use = 0;
11293         dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
11294     }
11295     return result;
11296 }
11297 
PreCallValidateCreateSwapchainKHR(layer_data * dev_data,VkSwapchainCreateInfoKHR const * pCreateInfo,SURFACE_STATE * surface_state,SWAPCHAIN_NODE * old_swapchain_state)11298 static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, VkSwapchainCreateInfoKHR const *pCreateInfo,
11299                                               SURFACE_STATE *surface_state, SWAPCHAIN_NODE *old_swapchain_state) {
11300     auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
11301 
11302     if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
11303         if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11304                     reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS, "DS",
11305                     "vkCreateSwapchainKHR(): surface has an existing swapchain other than oldSwapchain"))
11306             return true;
11307     }
11308     if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
11309         if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11310                     reinterpret_cast<uint64_t const &>(pCreateInfo->oldSwapchain), __LINE__, DRAWSTATE_SWAPCHAIN_WRONG_SURFACE,
11311                     "DS", "vkCreateSwapchainKHR(): pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface"))
11312             return true;
11313     }
11314 
11315     return false;
11316 }
11317 
CreateSwapchainKHR(VkDevice device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchain)11318 VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
11319                                                   const VkAllocationCallbacks *pAllocator,
11320                                                   VkSwapchainKHR *pSwapchain) {
11321     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11322     auto surface_state = getSurfaceState(dev_data->instance_data, pCreateInfo->surface);
11323     auto old_swapchain_state = getSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
11324 
11325     if (PreCallValidateCreateSwapchainKHR(dev_data, pCreateInfo, surface_state, old_swapchain_state))
11326         return VK_ERROR_VALIDATION_FAILED_EXT;
11327 
11328     VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
11329 
11330     if (VK_SUCCESS == result) {
11331         std::lock_guard<std::mutex> lock(global_lock);
11332         auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
11333         surface_state->swapchain = swapchain_state.get();
11334         dev_data->device_extensions.swapchainMap[*pSwapchain] = std::move(swapchain_state);
11335     } else {
11336         surface_state->swapchain = nullptr;
11337     }
11338 
11339     // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
11340     surface_state->old_swapchain = old_swapchain_state;
11341 
11342     return result;
11343 }
11344 
11345 VKAPI_ATTR void VKAPI_CALL
DestroySwapchainKHR(VkDevice device,VkSwapchainKHR swapchain,const VkAllocationCallbacks * pAllocator)11346 DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
11347     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11348     bool skip_call = false;
11349 
11350     std::unique_lock<std::mutex> lock(global_lock);
11351     auto swapchain_data = getSwapchainNode(dev_data, swapchain);
11352     if (swapchain_data) {
11353         if (swapchain_data->images.size() > 0) {
11354             for (auto swapchain_image : swapchain_data->images) {
11355                 auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
11356                 if (image_sub != dev_data->imageSubresourceMap.end()) {
11357                     for (auto imgsubpair : image_sub->second) {
11358                         auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
11359                         if (image_item != dev_data->imageLayoutMap.end()) {
11360                             dev_data->imageLayoutMap.erase(image_item);
11361                         }
11362                     }
11363                     dev_data->imageSubresourceMap.erase(image_sub);
11364                 }
11365                 skip_call =
11366                     ClearMemoryObjectBindings(dev_data, (uint64_t)swapchain_image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
11367                 dev_data->imageMap.erase(swapchain_image);
11368             }
11369         }
11370 
11371         auto surface_state = getSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
11372         if (surface_state) {
11373             if (surface_state->swapchain == swapchain_data)
11374                 surface_state->swapchain = nullptr;
11375             if (surface_state->old_swapchain == swapchain_data)
11376                 surface_state->old_swapchain = nullptr;
11377         }
11378 
11379         dev_data->device_extensions.swapchainMap.erase(swapchain);
11380     }
11381     lock.unlock();
11382     if (!skip_call)
11383         dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
11384 }
11385 
11386 VKAPI_ATTR VkResult VKAPI_CALL
GetSwapchainImagesKHR(VkDevice device,VkSwapchainKHR swapchain,uint32_t * pCount,VkImage * pSwapchainImages)11387 GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
11388     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11389     VkResult result = dev_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
11390 
11391     if (result == VK_SUCCESS && pSwapchainImages != NULL) {
11392         // This should never happen and is checked by param checker.
11393         if (!pCount)
11394             return result;
11395         std::lock_guard<std::mutex> lock(global_lock);
11396         const size_t count = *pCount;
11397         auto swapchain_node = getSwapchainNode(dev_data, swapchain);
11398         if (swapchain_node && !swapchain_node->images.empty()) {
11399             // TODO : Not sure I like the memcmp here, but it works
11400             const bool mismatch = (swapchain_node->images.size() != count ||
11401                                    memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
11402             if (mismatch) {
11403                 // TODO: Verify against Valid Usage section of extension
11404                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11405                         (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
11406                         "vkGetSwapchainInfoKHR(0x%" PRIx64
11407                         ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
11408                         (uint64_t)(swapchain));
11409             }
11410         }
11411         for (uint32_t i = 0; i < *pCount; ++i) {
11412             IMAGE_LAYOUT_NODE image_layout_node;
11413             image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
11414             image_layout_node.format = swapchain_node->createInfo.imageFormat;
11415             // Add imageMap entries for each swapchain image
11416             VkImageCreateInfo image_ci = {};
11417             image_ci.mipLevels = 1;
11418             image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
11419             image_ci.usage = swapchain_node->createInfo.imageUsage;
11420             image_ci.format = swapchain_node->createInfo.imageFormat;
11421             image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
11422             image_ci.extent.width = swapchain_node->createInfo.imageExtent.width;
11423             image_ci.extent.height = swapchain_node->createInfo.imageExtent.height;
11424             image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode;
11425             dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
11426             auto &image_state = dev_data->imageMap[pSwapchainImages[i]];
11427             image_state->valid = false;
11428             image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
11429             swapchain_node->images.push_back(pSwapchainImages[i]);
11430             ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
11431             dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
11432             dev_data->imageLayoutMap[subpair] = image_layout_node;
11433             dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
11434         }
11435     }
11436     return result;
11437 }
11438 
QueuePresentKHR(VkQueue queue,const VkPresentInfoKHR * pPresentInfo)11439 VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
11440     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
11441     bool skip_call = false;
11442 
11443     std::lock_guard<std::mutex> lock(global_lock);
11444     for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
11445         auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
11446         if (pSemaphore && !pSemaphore->signaled) {
11447             skip_call |=
11448                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11449                             VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11450                             "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
11451                             reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(pPresentInfo->pWaitSemaphores[i]));
11452         }
11453     }
11454 
11455     for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
11456         auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
11457         if (swapchain_data) {
11458             if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
11459                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11460                                      reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE,
11461                                      "DS", "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
11462                                      pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
11463             }
11464             else {
11465                 auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
11466                 auto image_state = getImageState(dev_data, image);
11467                 skip_call |= ValidateImageMemoryIsValid(dev_data, image_state, "vkQueuePresentKHR()");
11468 
11469                 if (!image_state->acquired) {
11470                     skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11471                                          reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED,
11472                                          "DS", "vkQueuePresentKHR: Swapchain image index %u has not been acquired.",
11473                                          pPresentInfo->pImageIndices[i]);
11474                 }
11475 
11476                 vector<VkImageLayout> layouts;
11477                 if (FindLayouts(dev_data, image, layouts)) {
11478                     for (auto layout : layouts) {
11479                         if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
11480                             skip_call |=
11481                                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
11482                                             reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
11483                                             "Images passed to present must be in layout "
11484                                             "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR but is in %s",
11485                                             string_VkImageLayout(layout));
11486                         }
11487                     }
11488                 }
11489             }
11490         }
11491     }
11492 
11493     if (skip_call) {
11494         return VK_ERROR_VALIDATION_FAILED_EXT;
11495     }
11496 
11497     VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
11498 
11499     if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
11500         // Semaphore waits occur before error generation, if the call reached
11501         // the ICD. (Confirm?)
11502         for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
11503             auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
11504             if (pSemaphore) {
11505                 pSemaphore->signaler.first = VK_NULL_HANDLE;
11506                 pSemaphore->signaled = false;
11507             }
11508         }
11509 
11510         for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
11511             // Note: this is imperfect, in that we can get confused about what
11512             // did or didn't succeed-- but if the app does that, it's confused
11513             // itself just as much.
11514             auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
11515 
11516             if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR)
11517                 continue; // this present didn't actually happen.
11518 
11519             // Mark the image as having been released to the WSI
11520             auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
11521             auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
11522             auto image_state = getImageState(dev_data, image);
11523             image_state->acquired = false;
11524         }
11525 
11526         // Note: even though presentation is directed to a queue, there is no
11527         // direct ordering between QP and subsequent work, so QP (and its
11528         // semaphore waits) /never/ participate in any completion proof.
11529     }
11530 
11531     return result;
11532 }
11533 
CreateSharedSwapchainsKHR(VkDevice device,uint32_t swapchainCount,const VkSwapchainCreateInfoKHR * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchains)11534 VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
11535                                                          const VkSwapchainCreateInfoKHR *pCreateInfos,
11536                                                          const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
11537     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11538     std::unique_lock<std::mutex> lock(global_lock);
11539     VkResult result =
11540         dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
11541     return result;
11542 }
11543 
AcquireNextImageKHR(VkDevice device,VkSwapchainKHR swapchain,uint64_t timeout,VkSemaphore semaphore,VkFence fence,uint32_t * pImageIndex)11544 VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
11545                                                    VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
11546     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11547     bool skip_call = false;
11548 
11549     std::unique_lock<std::mutex> lock(global_lock);
11550 
11551     if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
11552         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11553                              reinterpret_cast<uint64_t &>(device), __LINE__, DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE, "DS",
11554                              "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way "
11555                              "to determine the completion of this operation.");
11556     }
11557 
11558     auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11559     if (pSemaphore && pSemaphore->signaled) {
11560         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11561                              reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11562                              "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
11563     }
11564 
11565     auto pFence = getFenceNode(dev_data, fence);
11566     if (pFence) {
11567         skip_call |= ValidateFenceForSubmit(dev_data, pFence);
11568     }
11569     lock.unlock();
11570 
11571     if (skip_call)
11572         return VK_ERROR_VALIDATION_FAILED_EXT;
11573 
11574     VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
11575 
11576     lock.lock();
11577     if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
11578         if (pFence) {
11579             pFence->state = FENCE_INFLIGHT;
11580             pFence->signaler.first = VK_NULL_HANDLE;   // ANI isn't on a queue, so this can't participate in a completion proof.
11581         }
11582 
11583         // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
11584         if (pSemaphore) {
11585             pSemaphore->signaled = true;
11586             pSemaphore->signaler.first = VK_NULL_HANDLE;
11587         }
11588 
11589         // Mark the image as acquired.
11590         auto swapchain_data = getSwapchainNode(dev_data, swapchain);
11591         auto image = swapchain_data->images[*pImageIndex];
11592         auto image_state = getImageState(dev_data, image);
11593         image_state->acquired = true;
11594     }
11595     lock.unlock();
11596 
11597     return result;
11598 }
11599 
EnumeratePhysicalDevices(VkInstance instance,uint32_t * pPhysicalDeviceCount,VkPhysicalDevice * pPhysicalDevices)11600 VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
11601                                                         VkPhysicalDevice *pPhysicalDevices) {
11602     bool skip_call = false;
11603     instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11604 
11605     if (instance_data) {
11606         // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
11607         if (NULL == pPhysicalDevices) {
11608             instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
11609         } else {
11610             if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) {
11611                 // Flag warning here. You can call this without having queried the count, but it may not be
11612                 // robust on platforms with multiple physical devices.
11613                 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
11614                                      0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
11615                                      "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
11616                                      "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
11617             } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
11618             else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) {
11619                 // Having actual count match count from app is not a requirement, so this can be a warning
11620                 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11621                                      VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11622                                      "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
11623                                      "supported by this instance is %u.",
11624                                      *pPhysicalDeviceCount, instance_data->physical_devices_count);
11625             }
11626             instance_data->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
11627         }
11628         if (skip_call) {
11629             return VK_ERROR_VALIDATION_FAILED_EXT;
11630         }
11631         VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
11632         if (NULL == pPhysicalDevices) {
11633             instance_data->physical_devices_count = *pPhysicalDeviceCount;
11634         } else if (result == VK_SUCCESS){ // Save physical devices
11635             for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
11636                 auto & phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
11637                 phys_device_state.phys_device = pPhysicalDevices[i];
11638                 // Init actual features for each physical device
11639                 instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
11640             }
11641         }
11642         return result;
11643     } else {
11644         log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__,
11645                 DEVLIMITS_INVALID_INSTANCE, "DL", "Invalid instance (0x%" PRIxLEAST64 ") passed into vkEnumeratePhysicalDevices().",
11646                 (uint64_t)instance);
11647     }
11648     return VK_ERROR_VALIDATION_FAILED_EXT;
11649 }
11650 
11651 VKAPI_ATTR void VKAPI_CALL
GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,uint32_t * pCount,VkQueueFamilyProperties * pQueueFamilyProperties)11652 GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
11653     VkQueueFamilyProperties *pQueueFamilyProperties) {
11654     bool skip_call = false;
11655     instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11656     auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
11657     if (physical_device_state) {
11658         if (!pQueueFamilyProperties) {
11659             physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
11660         }
11661         else {
11662             // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to
11663             // get count
11664             if (UNCALLED == physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
11665                 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11666                     VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
11667                     "Call sequence has vkGetPhysicalDeviceQueueFamilyProperties() w/ non-NULL "
11668                     "pQueueFamilyProperties. You should first call vkGetPhysicalDeviceQueueFamilyProperties() w/ "
11669                     "NULL pQueueFamilyProperties to query pCount.");
11670             }
11671             // Then verify that pCount that is passed in on second call matches what was returned
11672             if (physical_device_state->queueFamilyPropertiesCount != *pCount) {
11673 
11674                 // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so
11675                 // provide as warning
11676                 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11677                     VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11678                     "Call to vkGetPhysicalDeviceQueueFamilyProperties() w/ pCount value %u, but actual count "
11679                     "supported by this physicalDevice is %u.",
11680                     *pCount, physical_device_state->queueFamilyPropertiesCount);
11681             }
11682             physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
11683         }
11684         if (skip_call) {
11685             return;
11686         }
11687         instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount, pQueueFamilyProperties);
11688         if (!pQueueFamilyProperties) {
11689             physical_device_state->queueFamilyPropertiesCount = *pCount;
11690         }
11691         else { // Save queue family properties
11692             if (physical_device_state->queue_family_properties.size() < *pCount)
11693                 physical_device_state->queue_family_properties.resize(*pCount);
11694             for (uint32_t i = 0; i < *pCount; i++) {
11695                 physical_device_state->queue_family_properties[i] = pQueueFamilyProperties[i];
11696             }
11697         }
11698     }
11699     else {
11700         log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
11701             __LINE__, DEVLIMITS_INVALID_PHYSICAL_DEVICE, "DL",
11702             "Invalid physicalDevice (0x%" PRIxLEAST64 ") passed into vkGetPhysicalDeviceQueueFamilyProperties().",
11703             (uint64_t)physicalDevice);
11704     }
11705 }
11706 
11707 template<typename TCreateInfo, typename FPtr>
CreateSurface(VkInstance instance,TCreateInfo const * pCreateInfo,VkAllocationCallbacks const * pAllocator,VkSurfaceKHR * pSurface,FPtr fptr)11708 static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo,
11709                               VkAllocationCallbacks const *pAllocator, VkSurfaceKHR *pSurface,
11710                               FPtr fptr)
11711 {
11712     instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11713 
11714     // Call down the call chain:
11715     VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
11716 
11717     if (result == VK_SUCCESS) {
11718         std::unique_lock<std::mutex> lock(global_lock);
11719         instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
11720         lock.unlock();
11721     }
11722 
11723     return result;
11724 }
11725 
DestroySurfaceKHR(VkInstance instance,VkSurfaceKHR surface,const VkAllocationCallbacks * pAllocator)11726 VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
11727     bool skip_call = false;
11728     instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11729     std::unique_lock<std::mutex> lock(global_lock);
11730     auto surface_state = getSurfaceState(instance_data, surface);
11731 
11732     if (surface_state) {
11733         // TODO: track swapchains created from this surface.
11734         instance_data->surface_map.erase(surface);
11735     }
11736     lock.unlock();
11737 
11738     if (!skip_call) {
11739         // Call down the call chain:
11740         instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
11741     }
11742 }
11743 
11744 #ifdef VK_USE_PLATFORM_ANDROID_KHR
CreateAndroidSurfaceKHR(VkInstance instance,const VkAndroidSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)11745 VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
11746                                                        const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11747     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
11748 }
11749 #endif // VK_USE_PLATFORM_ANDROID_KHR
11750 
11751 #ifdef VK_USE_PLATFORM_MIR_KHR
CreateMirSurfaceKHR(VkInstance instance,const VkMirSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)11752 VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
11753                                                    const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11754     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
11755 }
11756 #endif // VK_USE_PLATFORM_MIR_KHR
11757 
11758 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
CreateWaylandSurfaceKHR(VkInstance instance,const VkWaylandSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)11759 VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
11760                                                        const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11761     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
11762 }
11763 #endif // VK_USE_PLATFORM_WAYLAND_KHR
11764 
11765 #ifdef VK_USE_PLATFORM_WIN32_KHR
CreateWin32SurfaceKHR(VkInstance instance,const VkWin32SurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)11766 VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
11767                                                      const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11768     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
11769 }
11770 #endif // VK_USE_PLATFORM_WIN32_KHR
11771 
11772 #ifdef VK_USE_PLATFORM_XCB_KHR
CreateXcbSurfaceKHR(VkInstance instance,const VkXcbSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)11773 VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
11774                                                    const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11775     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
11776 }
11777 #endif // VK_USE_PLATFORM_XCB_KHR
11778 
11779 #ifdef VK_USE_PLATFORM_XLIB_KHR
CreateXlibSurfaceKHR(VkInstance instance,const VkXlibSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface)11780 VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
11781                                                    const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11782     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
11783 }
11784 #endif // VK_USE_PLATFORM_XLIB_KHR
11785 
11786 
11787 VKAPI_ATTR VkResult VKAPI_CALL
CreateDebugReportCallbackEXT(VkInstance instance,const VkDebugReportCallbackCreateInfoEXT * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDebugReportCallbackEXT * pMsgCallback)11788 CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
11789                              const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
11790     instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11791     VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
11792     if (VK_SUCCESS == res) {
11793         std::lock_guard<std::mutex> lock(global_lock);
11794         res = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
11795     }
11796     return res;
11797 }
11798 
DestroyDebugReportCallbackEXT(VkInstance instance,VkDebugReportCallbackEXT msgCallback,const VkAllocationCallbacks * pAllocator)11799 VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance,
11800                                                          VkDebugReportCallbackEXT msgCallback,
11801                                                          const VkAllocationCallbacks *pAllocator) {
11802     instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11803     instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
11804     std::lock_guard<std::mutex> lock(global_lock);
11805     layer_destroy_msg_callback(instance_data->report_data, msgCallback, pAllocator);
11806 }
11807 
11808 VKAPI_ATTR void VKAPI_CALL
DebugReportMessageEXT(VkInstance instance,VkDebugReportFlagsEXT flags,VkDebugReportObjectTypeEXT objType,uint64_t object,size_t location,int32_t msgCode,const char * pLayerPrefix,const char * pMsg)11809 DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
11810                       size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
11811     instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11812     instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
11813 }
11814 
11815 VKAPI_ATTR VkResult VKAPI_CALL
EnumerateInstanceLayerProperties(uint32_t * pCount,VkLayerProperties * pProperties)11816 EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
11817     return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11818 }
11819 
11820 VKAPI_ATTR VkResult VKAPI_CALL
EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,uint32_t * pCount,VkLayerProperties * pProperties)11821 EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
11822     return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11823 }
11824 
11825 VKAPI_ATTR VkResult VKAPI_CALL
EnumerateInstanceExtensionProperties(const char * pLayerName,uint32_t * pCount,VkExtensionProperties * pProperties)11826 EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
11827     if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11828         return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
11829 
11830     return VK_ERROR_LAYER_NOT_PRESENT;
11831 }
11832 
EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,const char * pLayerName,uint32_t * pCount,VkExtensionProperties * pProperties)11833 VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
11834                                                                   const char *pLayerName, uint32_t *pCount,
11835                                                                   VkExtensionProperties *pProperties) {
11836     if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11837         return util_GetExtensionProperties(0, NULL, pCount, pProperties);
11838 
11839     assert(physicalDevice);
11840 
11841     instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11842     return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
11843 }
11844 
11845 static PFN_vkVoidFunction
11846 intercept_core_instance_command(const char *name);
11847 
11848 static PFN_vkVoidFunction
11849 intercept_core_device_command(const char *name);
11850 
11851 static PFN_vkVoidFunction
11852 intercept_khr_swapchain_command(const char *name, VkDevice dev);
11853 
11854 static PFN_vkVoidFunction
11855 intercept_khr_surface_command(const char *name, VkInstance instance);
11856 
GetDeviceProcAddr(VkDevice dev,const char * funcName)11857 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
11858     PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
11859     if (proc)
11860         return proc;
11861 
11862     assert(dev);
11863 
11864     proc = intercept_khr_swapchain_command(funcName, dev);
11865     if (proc)
11866         return proc;
11867 
11868     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
11869 
11870     auto &table = dev_data->dispatch_table;
11871     if (!table.GetDeviceProcAddr)
11872         return nullptr;
11873     return table.GetDeviceProcAddr(dev, funcName);
11874 }
11875 
GetInstanceProcAddr(VkInstance instance,const char * funcName)11876 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
11877     PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
11878     if (!proc)
11879         proc = intercept_core_device_command(funcName);
11880     if (!proc)
11881         proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
11882     if (!proc)
11883         proc = intercept_khr_surface_command(funcName, instance);
11884     if (proc)
11885         return proc;
11886 
11887     assert(instance);
11888 
11889     instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11890     proc = debug_report_get_instance_proc_addr(instance_data->report_data, funcName);
11891     if (proc)
11892         return proc;
11893 
11894     auto &table = instance_data->dispatch_table;
11895     if (!table.GetInstanceProcAddr)
11896         return nullptr;
11897     return table.GetInstanceProcAddr(instance, funcName);
11898 }
11899 
11900 static PFN_vkVoidFunction
intercept_core_instance_command(const char * name)11901 intercept_core_instance_command(const char *name) {
11902     static const struct {
11903         const char *name;
11904         PFN_vkVoidFunction proc;
11905     } core_instance_commands[] = {
11906         { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) },
11907         { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) },
11908         { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) },
11909         { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) },
11910         { "vkEnumeratePhysicalDevices", reinterpret_cast<PFN_vkVoidFunction>(EnumeratePhysicalDevices) },
11911         { "vkGetPhysicalDeviceQueueFamilyProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties) },
11912         { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) },
11913         { "vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties) },
11914         { "vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties) },
11915         { "vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties) },
11916         { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) },
11917     };
11918 
11919     for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
11920         if (!strcmp(core_instance_commands[i].name, name))
11921             return core_instance_commands[i].proc;
11922     }
11923 
11924     return nullptr;
11925 }
11926 
11927 static PFN_vkVoidFunction
intercept_core_device_command(const char * name)11928 intercept_core_device_command(const char *name) {
11929     static const struct {
11930         const char *name;
11931         PFN_vkVoidFunction proc;
11932     } core_device_commands[] = {
11933         {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
11934         {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
11935         {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
11936         {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
11937         {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
11938         {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
11939         {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
11940         {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
11941         {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
11942         {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
11943         {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
11944         {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
11945         {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
11946         {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
11947         {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
11948         {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
11949         {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
11950         {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
11951         {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
11952         {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
11953         {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
11954         {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
11955         {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
11956         {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
11957         {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
11958         {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
11959         {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
11960         {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
11961         {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
11962         {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
11963         {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
11964         {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
11965         {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
11966         {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
11967         {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
11968         {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
11969         {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
11970         {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
11971         {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
11972         {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
11973         {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
11974         {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
11975         {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
11976         {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
11977         {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
11978         {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
11979         {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
11980         {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
11981         {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
11982         {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
11983         {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
11984         {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
11985         {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
11986         {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
11987         {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
11988         {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
11989         {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
11990         {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
11991         {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
11992         {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
11993         {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
11994         {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
11995         {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
11996         {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
11997         {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
11998         {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
11999         {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
12000         {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
12001         {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
12002         {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
12003         {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
12004         {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
12005         {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
12006         {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
12007         {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
12008         {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
12009         {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
12010         {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
12011         {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
12012         {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
12013         {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
12014         {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
12015         {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
12016         {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
12017         {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
12018         {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
12019         {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
12020         {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
12021         {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
12022         {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
12023         {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
12024         {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
12025         {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
12026         {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
12027         {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
12028         {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
12029         {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
12030         {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
12031         {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
12032         {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
12033         {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
12034         {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
12035         {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
12036         {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
12037         {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
12038         {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
12039         {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
12040         {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
12041         {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
12042         {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
12043         {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
12044         {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
12045         {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
12046         {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
12047         {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
12048         {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
12049     };
12050 
12051     for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
12052         if (!strcmp(core_device_commands[i].name, name))
12053             return core_device_commands[i].proc;
12054     }
12055 
12056     return nullptr;
12057 }
12058 
12059 static PFN_vkVoidFunction
intercept_khr_swapchain_command(const char * name,VkDevice dev)12060 intercept_khr_swapchain_command(const char *name, VkDevice dev) {
12061     static const struct {
12062         const char *name;
12063         PFN_vkVoidFunction proc;
12064     } khr_swapchain_commands[] = {
12065         { "vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR) },
12066         { "vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR) },
12067         { "vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR) },
12068         { "vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR) },
12069         { "vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR) },
12070     };
12071     layer_data *dev_data = nullptr;
12072 
12073     if (dev) {
12074         dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
12075         if (!dev_data->device_extensions.wsi_enabled)
12076             return nullptr;
12077     }
12078 
12079     for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
12080         if (!strcmp(khr_swapchain_commands[i].name, name))
12081             return khr_swapchain_commands[i].proc;
12082     }
12083 
12084     if (dev_data) {
12085         if (!dev_data->device_extensions.wsi_display_swapchain_enabled)
12086             return nullptr;
12087     }
12088 
12089     if (!strcmp("vkCreateSharedSwapchainsKHR", name))
12090         return reinterpret_cast<PFN_vkVoidFunction>(CreateSharedSwapchainsKHR);
12091 
12092     return nullptr;
12093 }
12094 
12095 static PFN_vkVoidFunction
intercept_khr_surface_command(const char * name,VkInstance instance)12096 intercept_khr_surface_command(const char *name, VkInstance instance) {
12097     static const struct {
12098         const char *name;
12099         PFN_vkVoidFunction proc;
12100         bool instance_layer_data::*enable;
12101     } khr_surface_commands[] = {
12102 #ifdef VK_USE_PLATFORM_ANDROID_KHR
12103         {"vkCreateAndroidSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateAndroidSurfaceKHR),
12104             &instance_layer_data::androidSurfaceExtensionEnabled},
12105 #endif // VK_USE_PLATFORM_ANDROID_KHR
12106 #ifdef VK_USE_PLATFORM_MIR_KHR
12107         {"vkCreateMirSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateMirSurfaceKHR),
12108             &instance_layer_data::mirSurfaceExtensionEnabled},
12109 #endif // VK_USE_PLATFORM_MIR_KHR
12110 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
12111         {"vkCreateWaylandSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWaylandSurfaceKHR),
12112             &instance_layer_data::waylandSurfaceExtensionEnabled},
12113 #endif // VK_USE_PLATFORM_WAYLAND_KHR
12114 #ifdef VK_USE_PLATFORM_WIN32_KHR
12115         {"vkCreateWin32SurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWin32SurfaceKHR),
12116             &instance_layer_data::win32SurfaceExtensionEnabled},
12117 #endif // VK_USE_PLATFORM_WIN32_KHR
12118 #ifdef VK_USE_PLATFORM_XCB_KHR
12119         {"vkCreateXcbSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXcbSurfaceKHR),
12120             &instance_layer_data::xcbSurfaceExtensionEnabled},
12121 #endif // VK_USE_PLATFORM_XCB_KHR
12122 #ifdef VK_USE_PLATFORM_XLIB_KHR
12123         {"vkCreateXlibSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXlibSurfaceKHR),
12124             &instance_layer_data::xlibSurfaceExtensionEnabled},
12125 #endif // VK_USE_PLATFORM_XLIB_KHR
12126         {"vkDestroySurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySurfaceKHR),
12127             &instance_layer_data::surfaceExtensionEnabled},
12128     };
12129 
12130     instance_layer_data *instance_data = nullptr;
12131     if (instance) {
12132         instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12133     }
12134 
12135     for (size_t i = 0; i < ARRAY_SIZE(khr_surface_commands); i++) {
12136         if (!strcmp(khr_surface_commands[i].name, name)) {
12137             if (instance_data && !(instance_data->*(khr_surface_commands[i].enable)))
12138                 return nullptr;
12139             return khr_surface_commands[i].proc;
12140         }
12141     }
12142 
12143     return nullptr;
12144 }
12145 
12146 } // namespace core_validation
12147 
12148 // vk_layer_logging.h expects these to be defined
12149 
12150 VKAPI_ATTR VkResult VKAPI_CALL
vkCreateDebugReportCallbackEXT(VkInstance instance,const VkDebugReportCallbackCreateInfoEXT * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDebugReportCallbackEXT * pMsgCallback)12151 vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
12152                                const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
12153     return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
12154 }
12155 
12156 VKAPI_ATTR void VKAPI_CALL
vkDestroyDebugReportCallbackEXT(VkInstance instance,VkDebugReportCallbackEXT msgCallback,const VkAllocationCallbacks * pAllocator)12157 vkDestroyDebugReportCallbackEXT(VkInstance instance,
12158                                 VkDebugReportCallbackEXT msgCallback,
12159                                 const VkAllocationCallbacks *pAllocator) {
12160     core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
12161 }
12162 
12163 VKAPI_ATTR void VKAPI_CALL
vkDebugReportMessageEXT(VkInstance instance,VkDebugReportFlagsEXT flags,VkDebugReportObjectTypeEXT objType,uint64_t object,size_t location,int32_t msgCode,const char * pLayerPrefix,const char * pMsg)12164 vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
12165                         size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
12166     core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
12167 }
12168 
12169 // loader-layer interface v0, just wrappers since there is only a layer
12170 
12171 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkEnumerateInstanceExtensionProperties(const char * pLayerName,uint32_t * pCount,VkExtensionProperties * pProperties)12172 vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
12173     return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
12174 }
12175 
12176 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkEnumerateInstanceLayerProperties(uint32_t * pCount,VkLayerProperties * pProperties)12177 vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
12178     return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
12179 }
12180 
12181 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,uint32_t * pCount,VkLayerProperties * pProperties)12182 vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
12183     // the layer command handles VK_NULL_HANDLE just fine internally
12184     assert(physicalDevice == VK_NULL_HANDLE);
12185     return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
12186 }
12187 
vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,const char * pLayerName,uint32_t * pCount,VkExtensionProperties * pProperties)12188 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
12189                                                                                     const char *pLayerName, uint32_t *pCount,
12190                                                                                     VkExtensionProperties *pProperties) {
12191     // the layer command handles VK_NULL_HANDLE just fine internally
12192     assert(physicalDevice == VK_NULL_HANDLE);
12193     return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
12194 }
12195 
vkGetDeviceProcAddr(VkDevice dev,const char * funcName)12196 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
12197     return core_validation::GetDeviceProcAddr(dev, funcName);
12198 }
12199 
vkGetInstanceProcAddr(VkInstance instance,const char * funcName)12200 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
12201     return core_validation::GetInstanceProcAddr(instance, funcName);
12202 }
12203