• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright (c) 2015-2019 The Khronos Group Inc.
2  * Copyright (c) 2015-2019 Valve Corporation
3  * Copyright (c) 2015-2019 LunarG, Inc.
4  * Copyright (C) 2015-2019 Google Inc.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * Author: Cody Northrop <cnorthrop@google.com>
19  * Author: Michael Lentine <mlentine@google.com>
20  * Author: Tobin Ehlis <tobine@google.com>
21  * Author: Chia-I Wu <olv@google.com>
22  * Author: Chris Forbes <chrisf@ijw.co.nz>
23  * Author: Mark Lobodzinski <mark@lunarg.com>
24  * Author: Ian Elliott <ianelliott@google.com>
25  * Author: Dave Houlton <daveh@lunarg.com>
26  * Author: Dustin Graves <dustin@lunarg.com>
27  * Author: Jeremy Hayes <jeremy@lunarg.com>
28  * Author: Jon Ashburn <jon@lunarg.com>
29  * Author: Karl Schultz <karl@lunarg.com>
30  * Author: Mark Young <marky@lunarg.com>
31  * Author: Mike Schuchardt <mikes@lunarg.com>
32  * Author: Mike Weiblen <mikew@lunarg.com>
33  * Author: Tony Barbour <tony@LunarG.com>
34  * Author: John Zulauf <jzulauf@lunarg.com>
35  * Author: Shannon McPherson <shannon@lunarg.com>
36  */
37 
38 // Allow use of STL min and max functions in Windows
39 #define NOMINMAX
40 
41 #include <algorithm>
42 #include <array>
43 #include <assert.h>
44 #include <cmath>
45 #include <iostream>
46 #include <list>
47 #include <map>
48 #include <memory>
49 #include <mutex>
50 #include <set>
51 #include <sstream>
52 #include <stdio.h>
53 #include <stdlib.h>
54 #include <string.h>
55 #include <string>
56 #include <valarray>
57 
58 #include "vk_loader_platform.h"
59 #include "vk_dispatch_table_helper.h"
60 #include "vk_enum_string_helper.h"
61 #if defined(__GNUC__)
62 #pragma GCC diagnostic ignored "-Wwrite-strings"
63 #endif
64 #if defined(__GNUC__)
65 #pragma GCC diagnostic warning "-Wwrite-strings"
66 #endif
67 #include "chassis.h"
68 #include "convert_to_renderpass2.h"
69 #include "core_validation.h"
70 #include "buffer_validation.h"
71 #include "shader_validation.h"
72 #include "vk_layer_utils.h"
73 
74 // These functions are defined *outside* the core_validation namespace as their type
75 // is also defined outside that namespace
hash() const76 size_t PipelineLayoutCompatDef::hash() const {
77     hash_util::HashCombiner hc;
78     // The set number is integral to the CompatDef's distinctiveness
79     hc << set << push_constant_ranges.get();
80     const auto &descriptor_set_layouts = *set_layouts_id.get();
81     for (uint32_t i = 0; i <= set; i++) {
82         hc << descriptor_set_layouts[i].get();
83     }
84     return hc.Value();
85 }
86 
operator ==(const PipelineLayoutCompatDef & other) const87 bool PipelineLayoutCompatDef::operator==(const PipelineLayoutCompatDef &other) const {
88     if ((set != other.set) || (push_constant_ranges != other.push_constant_ranges)) {
89         return false;
90     }
91 
92     if (set_layouts_id == other.set_layouts_id) {
93         // if it's the same set_layouts_id, then *any* subset will match
94         return true;
95     }
96 
97     // They aren't exactly the same PipelineLayoutSetLayouts, so we need to check if the required subsets match
98     const auto &descriptor_set_layouts = *set_layouts_id.get();
99     assert(set < descriptor_set_layouts.size());
100     const auto &other_ds_layouts = *other.set_layouts_id.get();
101     assert(set < other_ds_layouts.size());
102     for (uint32_t i = 0; i <= set; i++) {
103         if (descriptor_set_layouts[i] != other_ds_layouts[i]) {
104             return false;
105         }
106     }
107     return true;
108 }
109 
110 using std::max;
111 using std::string;
112 using std::stringstream;
113 using std::unique_ptr;
114 using std::unordered_map;
115 using std::unordered_set;
116 using std::vector;
117 
118 // WSI Image Objects bypass usual Image Object creation methods.  A special Memory
119 // Object value will be used to identify them internally.
120 static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
121 // 2nd special memory handle used to flag object as unbound from memory
122 static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
123 
124 // Return buffer state ptr for specified buffer or else NULL
GetBufferState(VkBuffer buffer)125 BUFFER_STATE *CoreChecks::GetBufferState(VkBuffer buffer) {
126     auto buff_it = bufferMap.find(buffer);
127     if (buff_it == bufferMap.end()) {
128         return nullptr;
129     }
130     return buff_it->second.get();
131 }
132 
133 // Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
GetImageViewState(VkImageView image_view)134 IMAGE_VIEW_STATE *CoreChecks::GetImageViewState(VkImageView image_view) {
135     auto iv_it = imageViewMap.find(image_view);
136     if (iv_it == imageViewMap.end()) {
137         return nullptr;
138     }
139     return iv_it->second.get();
140 }
141 
142 // Get the global map of pending releases
GetGlobalQFOReleaseBarrierMap(const QFOTransferBarrier<VkImageMemoryBarrier>::Tag & type_tag)143 GlobalQFOTransferBarrierMap<VkImageMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap(
144     const QFOTransferBarrier<VkImageMemoryBarrier>::Tag &type_tag) {
145     return qfo_release_image_barrier_map;
146 }
GetGlobalQFOReleaseBarrierMap(const QFOTransferBarrier<VkBufferMemoryBarrier>::Tag & type_tag)147 GlobalQFOTransferBarrierMap<VkBufferMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap(
148     const QFOTransferBarrier<VkBufferMemoryBarrier>::Tag &type_tag) {
149     return qfo_release_buffer_barrier_map;
150 }
151 
152 // Get the image viewstate for a given framebuffer attachment
GetAttachmentImageViewState(FRAMEBUFFER_STATE * framebuffer,uint32_t index)153 IMAGE_VIEW_STATE *CoreChecks::GetAttachmentImageViewState(FRAMEBUFFER_STATE *framebuffer, uint32_t index) {
154     assert(framebuffer && (index < framebuffer->createInfo.attachmentCount));
155 #ifdef FRAMEBUFFER_ATTACHMENT_STATE_CACHE
156     return framebuffer->attachments[index].view_state;
157 #else
158     const VkImageView &image_view = framebuffer->createInfo.pAttachments[index];
159     return GetImageViewState(image_view);
160 #endif
161 }
162 
163 // Return sampler node ptr for specified sampler or else NULL
GetSamplerState(VkSampler sampler)164 SAMPLER_STATE *CoreChecks::GetSamplerState(VkSampler sampler) {
165     auto sampler_it = samplerMap.find(sampler);
166     if (sampler_it == samplerMap.end()) {
167         return nullptr;
168     }
169     return sampler_it->second.get();
170 }
171 // Return image state ptr for specified image or else NULL
GetImageState(VkImage image)172 IMAGE_STATE *CoreChecks::GetImageState(VkImage image) {
173     auto img_it = imageMap.find(image);
174     if (img_it == imageMap.end()) {
175         return nullptr;
176     }
177     return img_it->second.get();
178 }
179 // Return swapchain node for specified swapchain or else NULL
GetSwapchainNode(VkSwapchainKHR swapchain)180 SWAPCHAIN_NODE *CoreChecks::GetSwapchainNode(VkSwapchainKHR swapchain) {
181     auto swp_it = swapchainMap.find(swapchain);
182     if (swp_it == swapchainMap.end()) {
183         return nullptr;
184     }
185     return swp_it->second.get();
186 }
187 // Return buffer node ptr for specified buffer or else NULL
GetBufferViewState(VkBufferView buffer_view)188 BUFFER_VIEW_STATE *CoreChecks::GetBufferViewState(VkBufferView buffer_view) {
189     auto bv_it = bufferViewMap.find(buffer_view);
190     if (bv_it == bufferViewMap.end()) {
191         return nullptr;
192     }
193     return bv_it->second.get();
194 }
195 
GetFenceNode(VkFence fence)196 FENCE_NODE *CoreChecks::GetFenceNode(VkFence fence) {
197     auto it = fenceMap.find(fence);
198     if (it == fenceMap.end()) {
199         return nullptr;
200     }
201     return &it->second;
202 }
203 
GetEventNode(VkEvent event)204 EVENT_STATE *CoreChecks::GetEventNode(VkEvent event) {
205     auto it = eventMap.find(event);
206     if (it == eventMap.end()) {
207         return nullptr;
208     }
209     return &it->second;
210 }
211 
GetQueryPoolNode(VkQueryPool query_pool)212 QUERY_POOL_NODE *CoreChecks::GetQueryPoolNode(VkQueryPool query_pool) {
213     auto it = queryPoolMap.find(query_pool);
214     if (it == queryPoolMap.end()) {
215         return nullptr;
216     }
217     return &it->second;
218 }
219 
GetQueueState(VkQueue queue)220 QUEUE_STATE *CoreChecks::GetQueueState(VkQueue queue) {
221     auto it = queueMap.find(queue);
222     if (it == queueMap.end()) {
223         return nullptr;
224     }
225     return &it->second;
226 }
227 
GetSemaphoreNode(VkSemaphore semaphore)228 SEMAPHORE_NODE *CoreChecks::GetSemaphoreNode(VkSemaphore semaphore) {
229     auto it = semaphoreMap.find(semaphore);
230     if (it == semaphoreMap.end()) {
231         return nullptr;
232     }
233     return &it->second;
234 }
235 
GetCommandPoolNode(VkCommandPool pool)236 COMMAND_POOL_NODE *CoreChecks::GetCommandPoolNode(VkCommandPool pool) {
237     auto it = commandPoolMap.find(pool);
238     if (it == commandPoolMap.end()) {
239         return nullptr;
240     }
241     return &it->second;
242 }
243 
GetPhysicalDeviceState(VkPhysicalDevice phys)244 PHYSICAL_DEVICE_STATE *CoreChecks::GetPhysicalDeviceState(VkPhysicalDevice phys) {
245     auto *phys_dev_map = ((physical_device_map.size() > 0) ? &physical_device_map : &instance_state->physical_device_map);
246     auto it = phys_dev_map->find(phys);
247     if (it == phys_dev_map->end()) {
248         return nullptr;
249     }
250     return &it->second;
251 }
252 
GetPhysicalDeviceState()253 PHYSICAL_DEVICE_STATE *CoreChecks::GetPhysicalDeviceState() { return physical_device_state; }
254 
GetSurfaceState(VkSurfaceKHR surface)255 SURFACE_STATE *CoreChecks::GetSurfaceState(VkSurfaceKHR surface) {
256     auto *surf_map = ((surface_map.size() > 0) ? &surface_map : &instance_state->surface_map);
257     auto it = surf_map->find(surface);
258     if (it == surf_map->end()) {
259         return nullptr;
260     }
261     return &it->second;
262 }
263 
264 // Return ptr to memory binding for given handle of specified type
GetObjectMemBinding(uint64_t handle,VulkanObjectType type)265 BINDABLE *CoreChecks::GetObjectMemBinding(uint64_t handle, VulkanObjectType type) {
266     switch (type) {
267         case kVulkanObjectTypeImage:
268             return GetImageState(VkImage(handle));
269         case kVulkanObjectTypeBuffer:
270             return GetBufferState(VkBuffer(handle));
271         default:
272             break;
273     }
274     return nullptr;
275 }
276 
GetYcbcrConversionFormatMap()277 std::unordered_map<VkSamplerYcbcrConversion, uint64_t> *CoreChecks::GetYcbcrConversionFormatMap() {
278     return &ycbcr_conversion_ahb_fmt_map;
279 }
280 
GetAHBExternalFormatsSet()281 std::unordered_set<uint64_t> *CoreChecks::GetAHBExternalFormatsSet() { return &ahb_ext_formats_set; }
282 
283 // prototype
284 GLOBAL_CB_NODE *GetCBNode(layer_data const *, const VkCommandBuffer);
285 
286 // Return ptr to info in map container containing mem, or NULL if not found
287 //  Calls to this function should be wrapped in mutex
GetMemObjInfo(const VkDeviceMemory mem)288 DEVICE_MEM_INFO *CoreChecks::GetMemObjInfo(const VkDeviceMemory mem) {
289     auto mem_it = memObjMap.find(mem);
290     if (mem_it == memObjMap.end()) {
291         return NULL;
292     }
293     return mem_it->second.get();
294 }
295 
AddMemObjInfo(layer_data * dev_data,void * object,const VkDeviceMemory mem,const VkMemoryAllocateInfo * pAllocateInfo)296 void CoreChecks::AddMemObjInfo(layer_data *dev_data, void *object, const VkDeviceMemory mem,
297                                const VkMemoryAllocateInfo *pAllocateInfo) {
298     assert(object != NULL);
299 
300     auto *mem_info = new DEVICE_MEM_INFO(object, mem, pAllocateInfo);
301     dev_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(mem_info);
302 
303     auto dedicated = lvl_find_in_chain<VkMemoryDedicatedAllocateInfoKHR>(pAllocateInfo->pNext);
304     if (dedicated) {
305         mem_info->is_dedicated = true;
306         mem_info->dedicated_buffer = dedicated->buffer;
307         mem_info->dedicated_image = dedicated->image;
308     }
309     auto export_info = lvl_find_in_chain<VkExportMemoryAllocateInfo>(pAllocateInfo->pNext);
310     if (export_info) {
311         mem_info->is_export = true;
312         mem_info->export_handle_type_flags = export_info->handleTypes;
313     }
314 }
315 
316 // Create binding link between given sampler and command buffer node
AddCommandBufferBindingSampler(GLOBAL_CB_NODE * cb_node,SAMPLER_STATE * sampler_state)317 void CoreChecks::AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
318     sampler_state->cb_bindings.insert(cb_node);
319     cb_node->object_bindings.insert({HandleToUint64(sampler_state->sampler), kVulkanObjectTypeSampler});
320 }
321 
322 // Create binding link between given image node and command buffer node
AddCommandBufferBindingImage(const layer_data * dev_data,GLOBAL_CB_NODE * cb_node,IMAGE_STATE * image_state)323 void CoreChecks::AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
324     // Skip validation if this image was created through WSI
325     if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
326         // First update CB binding in MemObj mini CB list
327         for (auto mem_binding : image_state->GetBoundMemory()) {
328             DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(mem_binding);
329             if (pMemInfo) {
330                 pMemInfo->cb_bindings.insert(cb_node);
331                 // Now update CBInfo's Mem reference list
332                 cb_node->memObjs.insert(mem_binding);
333             }
334         }
335         // Now update cb binding for image
336         cb_node->object_bindings.insert({HandleToUint64(image_state->image), kVulkanObjectTypeImage});
337         image_state->cb_bindings.insert(cb_node);
338     }
339 }
340 
341 // Create binding link between given image view node and its image with command buffer node
AddCommandBufferBindingImageView(const layer_data * dev_data,GLOBAL_CB_NODE * cb_node,IMAGE_VIEW_STATE * view_state)342 void CoreChecks::AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node,
343                                                   IMAGE_VIEW_STATE *view_state) {
344     // First add bindings for imageView
345     view_state->cb_bindings.insert(cb_node);
346     cb_node->object_bindings.insert({HandleToUint64(view_state->image_view), kVulkanObjectTypeImageView});
347     auto image_state = GetImageState(view_state->create_info.image);
348     // Add bindings for image within imageView
349     if (image_state) {
350         AddCommandBufferBindingImage(dev_data, cb_node, image_state);
351     }
352 }
353 
354 // Create binding link between given buffer node and command buffer node
AddCommandBufferBindingBuffer(const layer_data * dev_data,GLOBAL_CB_NODE * cb_node,BUFFER_STATE * buffer_state)355 void CoreChecks::AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
356     // First update CB binding in MemObj mini CB list
357     for (auto mem_binding : buffer_state->GetBoundMemory()) {
358         DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(mem_binding);
359         if (pMemInfo) {
360             pMemInfo->cb_bindings.insert(cb_node);
361             // Now update CBInfo's Mem reference list
362             cb_node->memObjs.insert(mem_binding);
363         }
364     }
365     // Now update cb binding for buffer
366     cb_node->object_bindings.insert({HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer});
367     buffer_state->cb_bindings.insert(cb_node);
368 }
369 
370 // Create binding link between given buffer view node and its buffer with command buffer node
AddCommandBufferBindingBufferView(const layer_data * dev_data,GLOBAL_CB_NODE * cb_node,BUFFER_VIEW_STATE * view_state)371 void CoreChecks::AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node,
372                                                    BUFFER_VIEW_STATE *view_state) {
373     // First add bindings for bufferView
374     view_state->cb_bindings.insert(cb_node);
375     cb_node->object_bindings.insert({HandleToUint64(view_state->buffer_view), kVulkanObjectTypeBufferView});
376     auto buffer_state = GetBufferState(view_state->create_info.buffer);
377     // Add bindings for buffer within bufferView
378     if (buffer_state) {
379         AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state);
380     }
381 }
382 
383 // For every mem obj bound to particular CB, free bindings related to that CB
ClearCmdBufAndMemReferences(layer_data * dev_data,GLOBAL_CB_NODE * cb_node)384 void CoreChecks::ClearCmdBufAndMemReferences(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
385     if (cb_node) {
386         if (cb_node->memObjs.size() > 0) {
387             for (auto mem : cb_node->memObjs) {
388                 DEVICE_MEM_INFO *pInfo = GetMemObjInfo(mem);
389                 if (pInfo) {
390                     pInfo->cb_bindings.erase(cb_node);
391                 }
392             }
393             cb_node->memObjs.clear();
394         }
395     }
396 }
397 
398 // Clear a single object binding from given memory object
ClearMemoryObjectBinding(uint64_t handle,VulkanObjectType type,VkDeviceMemory mem)399 void CoreChecks::ClearMemoryObjectBinding(uint64_t handle, VulkanObjectType type, VkDeviceMemory mem) {
400     DEVICE_MEM_INFO *mem_info = GetMemObjInfo(mem);
401     // This obj is bound to a memory object. Remove the reference to this object in that memory object's list
402     if (mem_info) {
403         mem_info->obj_bindings.erase({handle, type});
404     }
405 }
406 
407 // ClearMemoryObjectBindings clears the binding of objects to memory
408 //  For the given object it pulls the memory bindings and makes sure that the bindings
409 //  no longer refer to the object being cleared. This occurs when objects are destroyed.
ClearMemoryObjectBindings(uint64_t handle,VulkanObjectType type)410 void CoreChecks::ClearMemoryObjectBindings(uint64_t handle, VulkanObjectType type) {
411     BINDABLE *mem_binding = GetObjectMemBinding(handle, type);
412     if (mem_binding) {
413         if (!mem_binding->sparse) {
414             ClearMemoryObjectBinding(handle, type, mem_binding->binding.mem);
415         } else {  // Sparse, clear all bindings
416             for (auto &sparse_mem_binding : mem_binding->sparse_bindings) {
417                 ClearMemoryObjectBinding(handle, type, sparse_mem_binding.mem);
418             }
419         }
420     }
421 }
422 
423 // For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
VerifyBoundMemoryIsValid(const layer_data * dev_data,VkDeviceMemory mem,uint64_t handle,const char * api_name,const char * type_name,const char * error_code)424 bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
425                               const char *type_name, const char *error_code) {
426     bool result = false;
427     if (VK_NULL_HANDLE == mem) {
428         result =
429             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle, error_code,
430                     "%s: Vk%s object %s used with no memory bound. Memory should be bound by calling vkBind%sMemory().", api_name,
431                     type_name, dev_data->report_data->FormatHandle(handle).c_str(), type_name);
432     } else if (MEMORY_UNBOUND == mem) {
433         result =
434             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle, error_code,
435                     "%s: Vk%s object %s used with no memory bound and previously bound memory was freed. Memory must not be freed "
436                     "prior to this operation.",
437                     api_name, type_name, dev_data->report_data->FormatHandle(handle).c_str());
438     }
439     return result;
440 }
441 
442 // Check to see if memory was ever bound to this image
ValidateMemoryIsBoundToImage(const layer_data * dev_data,const IMAGE_STATE * image_state,const char * api_name,const char * error_code)443 bool CoreChecks::ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
444                                               const char *error_code) {
445     bool result = false;
446     if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
447         result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), api_name, "Image",
448                                           error_code);
449     }
450     return result;
451 }
452 
453 // Check to see if memory was bound to this buffer
ValidateMemoryIsBoundToBuffer(const layer_data * dev_data,const BUFFER_STATE * buffer_state,const char * api_name,const char * error_code)454 bool CoreChecks::ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
455                                                const char *error_code) {
456     bool result = false;
457     if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
458         result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), api_name,
459                                           "Buffer", error_code);
460     }
461     return result;
462 }
463 
464 // SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object.
465 // Corresponding valid usage checks are in ValidateSetMemBinding().
SetMemBinding(layer_data * dev_data,VkDeviceMemory mem,BINDABLE * mem_binding,VkDeviceSize memory_offset,uint64_t handle,VulkanObjectType type)466 void CoreChecks::SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, BINDABLE *mem_binding, VkDeviceSize memory_offset,
467                                uint64_t handle, VulkanObjectType type) {
468     assert(mem_binding);
469     mem_binding->binding.mem = mem;
470     mem_binding->UpdateBoundMemorySet();  // force recreation of cached set
471     mem_binding->binding.offset = memory_offset;
472     mem_binding->binding.size = mem_binding->requirements.size;
473 
474     if (mem != VK_NULL_HANDLE) {
475         DEVICE_MEM_INFO *mem_info = GetMemObjInfo(mem);
476         if (mem_info) {
477             mem_info->obj_bindings.insert({handle, type});
478             // For image objects, make sure default memory state is correctly set
479             // TODO : What's the best/correct way to handle this?
480             if (kVulkanObjectTypeImage == type) {
481                 auto const image_state = reinterpret_cast<const IMAGE_STATE *>(mem_binding);
482                 if (image_state) {
483                     VkImageCreateInfo ici = image_state->createInfo;
484                     if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
485                         // TODO::  More memory state transition stuff.
486                     }
487                 }
488             }
489         }
490     }
491 }
492 
493 // Valid usage checks for a call to SetMemBinding().
494 // For NULL mem case, output warning
495 // Make sure given object is in global object map
496 //  IF a previous binding existed, output validation error
497 //  Otherwise, add reference from objectInfo to memoryInfo
498 //  Add reference off of objInfo
499 // TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
ValidateSetMemBinding(layer_data * dev_data,VkDeviceMemory mem,uint64_t handle,VulkanObjectType type,const char * apiName)500 bool CoreChecks::ValidateSetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type,
501                                        const char *apiName) {
502     bool skip = false;
503     // It's an error to bind an object to NULL memory
504     if (mem != VK_NULL_HANDLE) {
505         BINDABLE *mem_binding = GetObjectMemBinding(handle, type);
506         assert(mem_binding);
507         if (mem_binding->sparse) {
508             const char *error_code = "VUID-vkBindImageMemory-image-01045";
509             const char *handle_type = "IMAGE";
510             if (type == kVulkanObjectTypeBuffer) {
511                 error_code = "VUID-vkBindBufferMemory-buffer-01030";
512                 handle_type = "BUFFER";
513             } else {
514                 assert(type == kVulkanObjectTypeImage);
515             }
516             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
517                             HandleToUint64(mem), error_code,
518                             "In %s, attempting to bind memory (%s) to object (%s) which was created with sparse memory flags "
519                             "(VK_%s_CREATE_SPARSE_*_BIT).",
520                             apiName, dev_data->report_data->FormatHandle(mem).c_str(),
521                             dev_data->report_data->FormatHandle(handle).c_str(), handle_type);
522         }
523         DEVICE_MEM_INFO *mem_info = GetMemObjInfo(mem);
524         if (mem_info) {
525             DEVICE_MEM_INFO *prev_binding = GetMemObjInfo(mem_binding->binding.mem);
526             if (prev_binding) {
527                 const char *error_code = "VUID-vkBindImageMemory-image-01044";
528                 if (type == kVulkanObjectTypeBuffer) {
529                     error_code = "VUID-vkBindBufferMemory-buffer-01029";
530                 } else {
531                     assert(type == kVulkanObjectTypeImage);
532                 }
533                 skip |= log_msg(
534                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
535                     HandleToUint64(mem), error_code,
536                     "In %s, attempting to bind memory (%s) to object (%s) which has already been bound to mem object %s.", apiName,
537                     dev_data->report_data->FormatHandle(mem).c_str(), dev_data->report_data->FormatHandle(handle).c_str(),
538                     dev_data->report_data->FormatHandle(prev_binding->mem).c_str());
539             } else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
540                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
541                                 HandleToUint64(mem), kVUID_Core_MemTrack_RebindObject,
542                                 "In %s, attempting to bind memory (%s) to object (%s) which was previous bound to memory that has "
543                                 "since been freed. Memory bindings are immutable in "
544                                 "Vulkan so this attempt to bind to new memory is not allowed.",
545                                 apiName, dev_data->report_data->FormatHandle(mem).c_str(),
546                                 dev_data->report_data->FormatHandle(handle).c_str());
547             }
548         }
549     }
550     return skip;
551 }
552 
553 // For NULL mem case, clear any previous binding Else...
554 // Make sure given object is in its object map
555 //  IF a previous binding existed, update binding
556 //  Add reference from objectInfo to memoryInfo
557 //  Add reference off of object's binding info
558 // Return VK_TRUE if addition is successful, VK_FALSE otherwise
SetSparseMemBinding(layer_data * dev_data,MEM_BINDING binding,uint64_t handle,VulkanObjectType type)559 bool CoreChecks::SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VulkanObjectType type) {
560     bool skip = VK_FALSE;
561     // Handle NULL case separately, just clear previous binding & decrement reference
562     if (binding.mem == VK_NULL_HANDLE) {
563         // TODO : This should cause the range of the resource to be unbound according to spec
564     } else {
565         BINDABLE *mem_binding = GetObjectMemBinding(handle, type);
566         assert(mem_binding);
567         if (mem_binding) {  // Invalid handles are reported by object tracker, but Get returns NULL for them, so avoid SEGV here
568             assert(mem_binding->sparse);
569             DEVICE_MEM_INFO *mem_info = GetMemObjInfo(binding.mem);
570             if (mem_info) {
571                 mem_info->obj_bindings.insert({handle, type});
572                 // Need to set mem binding for this object
573                 mem_binding->sparse_bindings.insert(binding);
574                 mem_binding->UpdateBoundMemorySet();
575             }
576         }
577     }
578     return skip;
579 }
580 
ValidateDeviceQueueFamily(layer_data * device_data,uint32_t queue_family,const char * cmd_name,const char * parameter_name,const char * error_code,bool optional=false)581 bool CoreChecks::ValidateDeviceQueueFamily(layer_data *device_data, uint32_t queue_family, const char *cmd_name,
582                                            const char *parameter_name, const char *error_code, bool optional = false) {
583     bool skip = false;
584     if (!optional && queue_family == VK_QUEUE_FAMILY_IGNORED) {
585         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
586                         HandleToUint64(device_data->device), error_code,
587                         "%s: %s is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family index value.",
588                         cmd_name, parameter_name);
589     } else if (device_data->queue_family_index_map.find(queue_family) == device_data->queue_family_index_map.end()) {
590         skip |=
591             log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
592                     HandleToUint64(device_data->device), error_code,
593                     "%s: %s (= %" PRIu32
594                     ") is not one of the queue families given via VkDeviceQueueCreateInfo structures when the device was created.",
595                     cmd_name, parameter_name, queue_family);
596     }
597 
598     return skip;
599 }
600 
ValidateQueueFamilies(layer_data * device_data,uint32_t queue_family_count,const uint32_t * queue_families,const char * cmd_name,const char * array_parameter_name,const char * unique_error_code,const char * valid_error_code,bool optional=false)601 bool CoreChecks::ValidateQueueFamilies(layer_data *device_data, uint32_t queue_family_count, const uint32_t *queue_families,
602                                        const char *cmd_name, const char *array_parameter_name, const char *unique_error_code,
603                                        const char *valid_error_code, bool optional = false) {
604     bool skip = false;
605     if (queue_families) {
606         std::unordered_set<uint32_t> set;
607         for (uint32_t i = 0; i < queue_family_count; ++i) {
608             std::string parameter_name = std::string(array_parameter_name) + "[" + std::to_string(i) + "]";
609 
610             if (set.count(queue_families[i])) {
611                 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
612                                 HandleToUint64(device_data->device), unique_error_code,
613                                 "%s: %s (=%" PRIu32 ") is not unique within %s array.", cmd_name, parameter_name.c_str(),
614                                 queue_families[i], array_parameter_name);
615             } else {
616                 set.insert(queue_families[i]);
617                 skip |= ValidateDeviceQueueFamily(device_data, queue_families[i], cmd_name, parameter_name.c_str(),
618                                                   valid_error_code, optional);
619             }
620         }
621     }
622     return skip;
623 }
624 
625 // Check object status for selected flag state
ValidateStatus(layer_data * dev_data,GLOBAL_CB_NODE * pNode,CBStatusFlags status_mask,VkFlags msg_flags,const char * fail_msg,const char * msg_code)626 bool CoreChecks::ValidateStatus(layer_data *dev_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
627                                 const char *fail_msg, const char *msg_code) {
628     if (!(pNode->status & status_mask)) {
629         return log_msg(dev_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
630                        HandleToUint64(pNode->commandBuffer), msg_code, "command buffer object %s: %s..",
631                        dev_data->report_data->FormatHandle(pNode->commandBuffer).c_str(), fail_msg);
632     }
633     return false;
634 }
635 
636 // Retrieve pipeline node ptr for given pipeline object
GetPipelineState(VkPipeline pipeline)637 PIPELINE_STATE *CoreChecks::GetPipelineState(VkPipeline pipeline) {
638     auto it = pipelineMap.find(pipeline);
639     if (it == pipelineMap.end()) {
640         return nullptr;
641     }
642     return it->second.get();
643 }
644 
GetRenderPassState(VkRenderPass renderpass)645 RENDER_PASS_STATE *CoreChecks::GetRenderPassState(VkRenderPass renderpass) {
646     auto it = renderPassMap.find(renderpass);
647     if (it == renderPassMap.end()) {
648         return nullptr;
649     }
650     return it->second.get();
651 }
652 
GetRenderPassStateSharedPtr(VkRenderPass renderpass)653 std::shared_ptr<RENDER_PASS_STATE> CoreChecks::GetRenderPassStateSharedPtr(VkRenderPass renderpass) {
654     auto it = renderPassMap.find(renderpass);
655     if (it == renderPassMap.end()) {
656         return nullptr;
657     }
658     return it->second;
659 }
660 
GetFramebufferState(VkFramebuffer framebuffer)661 FRAMEBUFFER_STATE *CoreChecks::GetFramebufferState(VkFramebuffer framebuffer) {
662     auto it = frameBufferMap.find(framebuffer);
663     if (it == frameBufferMap.end()) {
664         return nullptr;
665     }
666     return it->second.get();
667 }
668 
GetDescriptorSetLayout(layer_data const * dev_data,VkDescriptorSetLayout dsLayout)669 std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> const GetDescriptorSetLayout(layer_data const *dev_data,
670                                                                                          VkDescriptorSetLayout dsLayout) {
671     auto it = dev_data->descriptorSetLayoutMap.find(dsLayout);
672     if (it == dev_data->descriptorSetLayoutMap.end()) {
673         return nullptr;
674     }
675     return it->second;
676 }
677 
GetPipelineLayout(layer_data const * dev_data,VkPipelineLayout pipeLayout)678 PIPELINE_LAYOUT_NODE const *CoreChecks::GetPipelineLayout(layer_data const *dev_data, VkPipelineLayout pipeLayout) {
679     auto it = dev_data->pipelineLayoutMap.find(pipeLayout);
680     if (it == dev_data->pipelineLayoutMap.end()) {
681         return nullptr;
682     }
683     return &it->second;
684 }
685 
GetShaderModuleState(VkShaderModule module)686 shader_module const *CoreChecks::GetShaderModuleState(VkShaderModule module) {
687     auto it = shaderModuleMap.find(module);
688     if (it == shaderModuleMap.end()) {
689         return nullptr;
690     }
691     return it->second.get();
692 }
693 
GetDescriptorTemplateState(const layer_data * dev_data,VkDescriptorUpdateTemplateKHR descriptor_update_template)694 const TEMPLATE_STATE *CoreChecks::GetDescriptorTemplateState(const layer_data *dev_data,
695                                                              VkDescriptorUpdateTemplateKHR descriptor_update_template) {
696     const auto it = dev_data->desc_template_map.find(descriptor_update_template);
697     if (it == dev_data->desc_template_map.cend()) {
698         return nullptr;
699     }
700     return it->second.get();
701 }
702 
703 // Return true if for a given PSO, the given state enum is dynamic, else return false
IsDynamic(const PIPELINE_STATE * pPipeline,const VkDynamicState state)704 static bool IsDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
705     if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
706         for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
707             if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true;
708         }
709     }
710     return false;
711 }
712 
713 // Validate state stored as flags at time of draw call
ValidateDrawStateFlags(layer_data * dev_data,GLOBAL_CB_NODE * pCB,const PIPELINE_STATE * pPipe,bool indexed,const char * msg_code)714 bool CoreChecks::ValidateDrawStateFlags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
715                                         const char *msg_code) {
716     bool result = false;
717     if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
718         pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) {
719         result |= ValidateStatus(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
720                                  "Dynamic line width state not set for this command buffer", msg_code);
721     }
722     if (pPipe->graphicsPipelineCI.pRasterizationState &&
723         (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
724         result |= ValidateStatus(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
725                                  "Dynamic depth bias state not set for this command buffer", msg_code);
726     }
727     if (pPipe->blendConstantsEnabled) {
728         result |= ValidateStatus(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
729                                  "Dynamic blend constants state not set for this command buffer", msg_code);
730     }
731     if (pPipe->graphicsPipelineCI.pDepthStencilState &&
732         (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
733         result |= ValidateStatus(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
734                                  "Dynamic depth bounds state not set for this command buffer", msg_code);
735     }
736     if (pPipe->graphicsPipelineCI.pDepthStencilState &&
737         (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
738         result |= ValidateStatus(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
739                                  "Dynamic stencil read mask state not set for this command buffer", msg_code);
740         result |= ValidateStatus(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
741                                  "Dynamic stencil write mask state not set for this command buffer", msg_code);
742         result |= ValidateStatus(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
743                                  "Dynamic stencil reference state not set for this command buffer", msg_code);
744     }
745     if (indexed) {
746         result |= ValidateStatus(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
747                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
748     }
749 
750     return result;
751 }
752 
LogInvalidAttachmentMessage(layer_data const * dev_data,const char * type1_string,const RENDER_PASS_STATE * rp1_state,const char * type2_string,const RENDER_PASS_STATE * rp2_state,uint32_t primary_attach,uint32_t secondary_attach,const char * msg,const char * caller,const char * error_code)753 bool CoreChecks::LogInvalidAttachmentMessage(layer_data const *dev_data, const char *type1_string,
754                                              const RENDER_PASS_STATE *rp1_state, const char *type2_string,
755                                              const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach,
756                                              const char *msg, const char *caller, const char *error_code) {
757     return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
758                    HandleToUint64(rp1_state->renderPass), error_code,
759                    "%s: RenderPasses incompatible between %s w/ renderPass %s and %s w/ renderPass %s Attachment %u is not "
760                    "compatible with %u: %s.",
761                    caller, type1_string, dev_data->report_data->FormatHandle(rp1_state->renderPass).c_str(), type2_string,
762                    dev_data->report_data->FormatHandle(rp2_state->renderPass).c_str(), primary_attach, secondary_attach, msg);
763 }
764 
ValidateAttachmentCompatibility(layer_data const * dev_data,const char * type1_string,const RENDER_PASS_STATE * rp1_state,const char * type2_string,const RENDER_PASS_STATE * rp2_state,uint32_t primary_attach,uint32_t secondary_attach,const char * caller,const char * error_code)765 bool CoreChecks::ValidateAttachmentCompatibility(layer_data const *dev_data, const char *type1_string,
766                                                  const RENDER_PASS_STATE *rp1_state, const char *type2_string,
767                                                  const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach,
768                                                  uint32_t secondary_attach, const char *caller, const char *error_code) {
769     bool skip = false;
770     const auto &primaryPassCI = rp1_state->createInfo;
771     const auto &secondaryPassCI = rp2_state->createInfo;
772     if (primaryPassCI.attachmentCount <= primary_attach) {
773         primary_attach = VK_ATTACHMENT_UNUSED;
774     }
775     if (secondaryPassCI.attachmentCount <= secondary_attach) {
776         secondary_attach = VK_ATTACHMENT_UNUSED;
777     }
778     if (primary_attach == VK_ATTACHMENT_UNUSED && secondary_attach == VK_ATTACHMENT_UNUSED) {
779         return skip;
780     }
781     if (primary_attach == VK_ATTACHMENT_UNUSED) {
782         skip |= LogInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
783                                             secondary_attach, "The first is unused while the second is not.", caller, error_code);
784         return skip;
785     }
786     if (secondary_attach == VK_ATTACHMENT_UNUSED) {
787         skip |= LogInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
788                                             secondary_attach, "The second is unused while the first is not.", caller, error_code);
789         return skip;
790     }
791     if (primaryPassCI.pAttachments[primary_attach].format != secondaryPassCI.pAttachments[secondary_attach].format) {
792         skip |= LogInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
793                                             secondary_attach, "They have different formats.", caller, error_code);
794     }
795     if (primaryPassCI.pAttachments[primary_attach].samples != secondaryPassCI.pAttachments[secondary_attach].samples) {
796         skip |= LogInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
797                                             secondary_attach, "They have different samples.", caller, error_code);
798     }
799     if (primaryPassCI.pAttachments[primary_attach].flags != secondaryPassCI.pAttachments[secondary_attach].flags) {
800         skip |= LogInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
801                                             secondary_attach, "They have different flags.", caller, error_code);
802     }
803 
804     return skip;
805 }
806 
ValidateSubpassCompatibility(layer_data const * dev_data,const char * type1_string,const RENDER_PASS_STATE * rp1_state,const char * type2_string,const RENDER_PASS_STATE * rp2_state,const int subpass,const char * caller,const char * error_code)807 bool CoreChecks::ValidateSubpassCompatibility(layer_data const *dev_data, const char *type1_string,
808                                               const RENDER_PASS_STATE *rp1_state, const char *type2_string,
809                                               const RENDER_PASS_STATE *rp2_state, const int subpass, const char *caller,
810                                               const char *error_code) {
811     bool skip = false;
812     const auto &primary_desc = rp1_state->createInfo.pSubpasses[subpass];
813     const auto &secondary_desc = rp2_state->createInfo.pSubpasses[subpass];
814     uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
815     for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
816         uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
817         if (i < primary_desc.inputAttachmentCount) {
818             primary_input_attach = primary_desc.pInputAttachments[i].attachment;
819         }
820         if (i < secondary_desc.inputAttachmentCount) {
821             secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
822         }
823         skip |= ValidateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_input_attach,
824                                                 secondary_input_attach, caller, error_code);
825     }
826     uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
827     for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
828         uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
829         if (i < primary_desc.colorAttachmentCount) {
830             primary_color_attach = primary_desc.pColorAttachments[i].attachment;
831         }
832         if (i < secondary_desc.colorAttachmentCount) {
833             secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
834         }
835         skip |= ValidateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_color_attach,
836                                                 secondary_color_attach, caller, error_code);
837         if (rp1_state->createInfo.subpassCount > 1) {
838             uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
839             if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
840                 primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
841             }
842             if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
843                 secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
844             }
845             skip |= ValidateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state,
846                                                     primary_resolve_attach, secondary_resolve_attach, caller, error_code);
847         }
848     }
849     uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
850     if (primary_desc.pDepthStencilAttachment) {
851         primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
852     }
853     if (secondary_desc.pDepthStencilAttachment) {
854         secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
855     }
856     skip |= ValidateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_depthstencil_attach,
857                                             secondary_depthstencil_attach, caller, error_code);
858     return skip;
859 }
860 
861 // Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
862 //  This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
863 //  will then feed into this function
ValidateRenderPassCompatibility(layer_data const * dev_data,const char * type1_string,const RENDER_PASS_STATE * rp1_state,const char * type2_string,const RENDER_PASS_STATE * rp2_state,const char * caller,const char * error_code)864 bool CoreChecks::ValidateRenderPassCompatibility(layer_data const *dev_data, const char *type1_string,
865                                                  const RENDER_PASS_STATE *rp1_state, const char *type2_string,
866                                                  const RENDER_PASS_STATE *rp2_state, const char *caller, const char *error_code) {
867     bool skip = false;
868 
869     if (rp1_state->createInfo.subpassCount != rp2_state->createInfo.subpassCount) {
870         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
871                         HandleToUint64(rp1_state->renderPass), error_code,
872                         "%s: RenderPasses incompatible between %s w/ renderPass %s with a subpassCount of %u and %s w/ renderPass "
873                         "%s with a subpassCount of %u.",
874                         caller, type1_string, dev_data->report_data->FormatHandle(rp1_state->renderPass).c_str(),
875                         rp1_state->createInfo.subpassCount, type2_string,
876                         dev_data->report_data->FormatHandle(rp2_state->renderPass).c_str(), rp2_state->createInfo.subpassCount);
877     } else {
878         for (uint32_t i = 0; i < rp1_state->createInfo.subpassCount; ++i) {
879             skip |= ValidateSubpassCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, i, caller, error_code);
880         }
881     }
882     return skip;
883 }
884 
885 // Return Set node ptr for specified set or else NULL
GetSetNode(VkDescriptorSet set)886 cvdescriptorset::DescriptorSet *CoreChecks::GetSetNode(VkDescriptorSet set) {
887     auto set_it = setMap.find(set);
888     if (set_it == setMap.end()) {
889         return NULL;
890     }
891     return set_it->second;
892 }
893 
894 // For given pipeline, return number of MSAA samples, or one if MSAA disabled
GetNumSamples(PIPELINE_STATE const * pipe)895 static VkSampleCountFlagBits GetNumSamples(PIPELINE_STATE const *pipe) {
896     if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
897         VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
898         return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
899     }
900     return VK_SAMPLE_COUNT_1_BIT;
901 }
902 
ListBits(std::ostream & s,uint32_t bits)903 static void ListBits(std::ostream &s, uint32_t bits) {
904     for (int i = 0; i < 32 && bits; i++) {
905         if (bits & (1 << i)) {
906             s << i;
907             bits &= ~(1 << i);
908             if (bits) {
909                 s << ",";
910             }
911         }
912     }
913 }
914 
915 // Validate draw-time state related to the PSO
ValidatePipelineDrawtimeState(layer_data const * dev_data,LAST_BOUND_STATE const & state,const GLOBAL_CB_NODE * pCB,CMD_TYPE cmd_type,PIPELINE_STATE const * pPipeline,const char * caller)916 bool CoreChecks::ValidatePipelineDrawtimeState(layer_data const *dev_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
917                                                CMD_TYPE cmd_type, PIPELINE_STATE const *pPipeline, const char *caller) {
918     bool skip = false;
919 
920     // Verify vertex binding
921     if (pPipeline->vertex_binding_descriptions_.size() > 0) {
922         for (size_t i = 0; i < pPipeline->vertex_binding_descriptions_.size(); i++) {
923             const auto vertex_binding = pPipeline->vertex_binding_descriptions_[i].binding;
924             if ((pCB->current_draw_data.vertex_buffer_bindings.size() < (vertex_binding + 1)) ||
925                 (pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer == VK_NULL_HANDLE)) {
926                 skip |= log_msg(
927                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
928                     HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_VtxIndexOutOfBounds,
929                     "The Pipeline State Object (%s) expects that this Command Buffer's vertex binding Index %u should be set via "
930                     "vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at "
931                     "index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
932                     dev_data->report_data->FormatHandle(state.pipeline_state->pipeline).c_str(), vertex_binding, i, vertex_binding);
933             }
934         }
935 
936         // Verify vertex attribute address alignment
937         for (size_t i = 0; i < pPipeline->vertex_attribute_descriptions_.size(); i++) {
938             const auto &attribute_description = pPipeline->vertex_attribute_descriptions_[i];
939             const auto vertex_binding = attribute_description.binding;
940             const auto attribute_offset = attribute_description.offset;
941             const auto attribute_format = attribute_description.format;
942 
943             const auto &vertex_binding_map_it = pPipeline->vertex_binding_to_index_map_.find(vertex_binding);
944             if ((vertex_binding_map_it != pPipeline->vertex_binding_to_index_map_.cend()) &&
945                 (vertex_binding < pCB->current_draw_data.vertex_buffer_bindings.size()) &&
946                 (pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer != VK_NULL_HANDLE)) {
947                 const auto vertex_buffer_stride = pPipeline->vertex_binding_descriptions_[vertex_binding_map_it->second].stride;
948                 const auto vertex_buffer_offset = pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].offset;
949                 const auto buffer_state = GetBufferState(pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer);
950 
951                 // Use only memory binding offset as base memory should be properly aligned by the driver
952                 const auto buffer_binding_address = buffer_state->binding.offset + vertex_buffer_offset;
953                 // Use 1 as vertex/instance index to use buffer stride as well
954                 const auto attrib_address = buffer_binding_address + vertex_buffer_stride + attribute_offset;
955 
956                 uint32_t vtx_attrib_req_alignment = FormatElementSize(attribute_format);
957                 if (FormatElementIsTexel(attribute_format)) {
958                     vtx_attrib_req_alignment /= FormatChannelCount(attribute_format);
959                 }
960 
961                 if (SafeModulo(attrib_address, vtx_attrib_req_alignment) != 0) {
962                     skip |= log_msg(
963                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
964                         HandleToUint64(pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer),
965                         kVUID_Core_DrawState_InvalidVtxAttributeAlignment,
966                         "Invalid attribAddress alignment for vertex attribute " PRINTF_SIZE_T_SPECIFIER
967                         " from pipeline (%s) and vertex buffer (%s).",
968                         i, dev_data->report_data->FormatHandle(state.pipeline_state->pipeline).c_str(),
969                         dev_data->report_data->FormatHandle(pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer)
970                             .c_str());
971                 }
972             }
973         }
974     } else {
975         if ((!pCB->current_draw_data.vertex_buffer_bindings.empty()) && (!pCB->vertex_buffer_used)) {
976             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
977                             VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer),
978                             kVUID_Core_DrawState_VtxIndexOutOfBounds,
979                             "Vertex buffers are bound to command buffer (%s) but no vertex buffers are attached to this Pipeline "
980                             "State Object (%s).",
981                             dev_data->report_data->FormatHandle(pCB->commandBuffer).c_str(),
982                             dev_data->report_data->FormatHandle(state.pipeline_state->pipeline).c_str());
983         }
984     }
985 
986     // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
987     // Skip check if rasterization is disabled or there is no viewport.
988     if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
989          (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
990         pPipeline->graphicsPipelineCI.pViewportState) {
991         bool dynViewport = IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
992         bool dynScissor = IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
993 
994         if (dynViewport) {
995             const auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
996             const auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
997             if (missingViewportMask) {
998                 std::stringstream ss;
999                 ss << "Dynamic viewport(s) ";
1000                 ListBits(ss, missingViewportMask);
1001                 ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
1002                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1003                                 kVUID_Core_DrawState_ViewportScissorMismatch, "%s", ss.str().c_str());
1004             }
1005         }
1006 
1007         if (dynScissor) {
1008             const auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
1009             const auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
1010             if (missingScissorMask) {
1011                 std::stringstream ss;
1012                 ss << "Dynamic scissor(s) ";
1013                 ListBits(ss, missingScissorMask);
1014                 ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
1015                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1016                                 kVUID_Core_DrawState_ViewportScissorMismatch, "%s", ss.str().c_str());
1017             }
1018         }
1019     }
1020 
1021     // Verify that any MSAA request in PSO matches sample# in bound FB
1022     // Skip the check if rasterization is disabled.
1023     if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
1024         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
1025         VkSampleCountFlagBits pso_num_samples = GetNumSamples(pPipeline);
1026         if (pCB->activeRenderPass) {
1027             const auto render_pass_info = pCB->activeRenderPass->createInfo.ptr();
1028             const VkSubpassDescription2KHR *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
1029             uint32_t i;
1030             unsigned subpass_num_samples = 0;
1031 
1032             for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
1033                 const auto attachment = subpass_desc->pColorAttachments[i].attachment;
1034                 if (attachment != VK_ATTACHMENT_UNUSED)
1035                     subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
1036             }
1037 
1038             if (subpass_desc->pDepthStencilAttachment &&
1039                 subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1040                 const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
1041                 subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
1042             }
1043 
1044             if (!(dev_data->device_extensions.vk_amd_mixed_attachment_samples ||
1045                   dev_data->device_extensions.vk_nv_framebuffer_mixed_samples) &&
1046                 ((subpass_num_samples & static_cast<unsigned>(pso_num_samples)) != subpass_num_samples)) {
1047                 skip |=
1048                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1049                             HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_NumSamplesMismatch,
1050                             "Num samples mismatch! At draw-time in Pipeline (%s) with %u samples while current RenderPass (%s) w/ "
1051                             "%u samples!",
1052                             dev_data->report_data->FormatHandle(pPipeline->pipeline).c_str(), pso_num_samples,
1053                             dev_data->report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str(), subpass_num_samples);
1054             }
1055         } else {
1056             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1057                             HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_NoActiveRenderpass,
1058                             "No active render pass found at draw-time in Pipeline (%s)!",
1059                             dev_data->report_data->FormatHandle(pPipeline->pipeline).c_str());
1060         }
1061     }
1062     // Verify that PSO creation renderPass is compatible with active renderPass
1063     if (pCB->activeRenderPass) {
1064         // TODO: Move all of the error codes common across different Draws into a LUT accessed by cmd_type
1065         // TODO: AMD extension codes are included here, but actual function entrypoints are not yet intercepted
1066         // Error codes for renderpass and subpass mismatches
1067         auto rp_error = "VUID-vkCmdDraw-renderPass-00435", sp_error = "VUID-vkCmdDraw-subpass-00436";
1068         switch (cmd_type) {
1069             case CMD_DRAWINDEXED:
1070                 rp_error = "VUID-vkCmdDrawIndexed-renderPass-00454";
1071                 sp_error = "VUID-vkCmdDrawIndexed-subpass-00455";
1072                 break;
1073             case CMD_DRAWINDIRECT:
1074                 rp_error = "VUID-vkCmdDrawIndirect-renderPass-00479";
1075                 sp_error = "VUID-vkCmdDrawIndirect-subpass-00480";
1076                 break;
1077             case CMD_DRAWINDIRECTCOUNTAMD:
1078                 rp_error = "VUID-vkCmdDrawIndirectCountAMD-renderPass-00507";
1079                 sp_error = "VUID-vkCmdDrawIndirectCountAMD-subpass-00508";
1080                 break;
1081             case CMD_DRAWINDIRECTCOUNTKHR:
1082                 rp_error = "VUID-vkCmdDrawIndirectCountKHR-renderPass-03113";
1083                 sp_error = "VUID-vkCmdDrawIndirectCountKHR-subpass-03114";
1084                 break;
1085             case CMD_DRAWINDEXEDINDIRECT:
1086                 rp_error = "VUID-vkCmdDrawIndexedIndirect-renderPass-00531";
1087                 sp_error = "VUID-vkCmdDrawIndexedIndirect-subpass-00532";
1088                 break;
1089             case CMD_DRAWINDEXEDINDIRECTCOUNTAMD:
1090                 rp_error = "VUID-vkCmdDrawIndexedIndirectCountAMD-renderPass-00560";
1091                 sp_error = "VUID-vkCmdDrawIndexedIndirectCountAMD-subpass-00561";
1092                 break;
1093             case CMD_DRAWINDEXEDINDIRECTCOUNTKHR:
1094                 rp_error = "VUID-vkCmdDrawIndexedIndirectCountKHR-renderPass-03145";
1095                 sp_error = "VUID-vkCmdDrawIndexedIndirectCountKHR-subpass-03146";
1096                 break;
1097             case CMD_DRAWMESHTASKSNV:
1098                 rp_error = "VUID-vkCmdDrawMeshTasksNV-renderPass-02120";
1099                 sp_error = "VUID-vkCmdDrawMeshTasksNV-subpass-02121";
1100                 break;
1101             case CMD_DRAWMESHTASKSINDIRECTNV:
1102                 rp_error = "VUID-vkCmdDrawMeshTasksIndirectNV-renderPass-02148";
1103                 sp_error = "VUID-vkCmdDrawMeshTasksIndirectNV-subpass-02149";
1104                 break;
1105             case CMD_DRAWMESHTASKSINDIRECTCOUNTNV:
1106                 rp_error = "VUID-vkCmdDrawMeshTasksIndirectCountNV-renderPass-02184";
1107                 sp_error = "VUID-vkCmdDrawMeshTasksIndirectCountNV-subpass-02185";
1108                 break;
1109             default:
1110                 assert(CMD_DRAW == cmd_type);
1111                 break;
1112         }
1113         if (pCB->activeRenderPass->renderPass != pPipeline->rp_state->renderPass) {
1114             // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
1115             skip |= ValidateRenderPassCompatibility(dev_data, "active render pass", pCB->activeRenderPass, "pipeline state object",
1116                                                     pPipeline->rp_state.get(), caller, rp_error);
1117         }
1118         if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
1119             skip |=
1120                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1121                         HandleToUint64(pPipeline->pipeline), sp_error, "Pipeline was built for subpass %u but used in subpass %u.",
1122                         pPipeline->graphicsPipelineCI.subpass, pCB->activeSubpass);
1123         }
1124     }
1125 
1126     return skip;
1127 }
1128 
1129 // For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
1130 // pipelineLayout[layoutIndex]
VerifySetLayoutCompatibility(const cvdescriptorset::DescriptorSet * descriptor_set,PIPELINE_LAYOUT_NODE const * pipeline_layout,const uint32_t layoutIndex,string & errorMsg)1131 static bool VerifySetLayoutCompatibility(const cvdescriptorset::DescriptorSet *descriptor_set,
1132                                          PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
1133                                          string &errorMsg) {
1134     auto num_sets = pipeline_layout->set_layouts.size();
1135     if (layoutIndex >= num_sets) {
1136         stringstream errorStr;
1137         errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
1138                  << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
1139                  << layoutIndex;
1140         errorMsg = errorStr.str();
1141         return false;
1142     }
1143     if (descriptor_set->IsPushDescriptor()) return true;
1144     auto layout_node = pipeline_layout->set_layouts[layoutIndex];
1145     return descriptor_set->IsCompatible(layout_node.get(), &errorMsg);
1146 }
1147 
1148 // Validate overall state at the time of a draw call
ValidateCmdBufDrawState(layer_data * dev_data,GLOBAL_CB_NODE * cb_node,CMD_TYPE cmd_type,const bool indexed,const VkPipelineBindPoint bind_point,const char * function,const char * pipe_err_code,const char * state_err_code)1149 bool CoreChecks::ValidateCmdBufDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, CMD_TYPE cmd_type, const bool indexed,
1150                                          const VkPipelineBindPoint bind_point, const char *function, const char *pipe_err_code,
1151                                          const char *state_err_code) {
1152     bool result = false;
1153     auto const &state = cb_node->lastBound[bind_point];
1154     PIPELINE_STATE *pPipe = state.pipeline_state;
1155     if (nullptr == pPipe) {
1156         return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1157                        HandleToUint64(cb_node->commandBuffer), pipe_err_code,
1158                        "Must not call %s on this command buffer while there is no %s pipeline bound.", function,
1159                        bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS ? "Graphics" : "Compute");
1160     }
1161 
1162     // First check flag states
1163     if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
1164         result = ValidateDrawStateFlags(dev_data, cb_node, pPipe, indexed, state_err_code);
1165 
1166     // Now complete other state checks
1167     string errorString;
1168     auto const &pipeline_layout = pPipe->pipeline_layout;
1169 
1170     for (const auto &set_binding_pair : pPipe->active_slots) {
1171         uint32_t setIndex = set_binding_pair.first;
1172         // If valid set is not bound throw an error
1173         if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
1174             result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1175                               HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_DescriptorSetNotBound,
1176                               "VkPipeline %s uses set #%u but that set is not bound.",
1177                               dev_data->report_data->FormatHandle(pPipe->pipeline).c_str(), setIndex);
1178         } else if (!VerifySetLayoutCompatibility(state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex, errorString)) {
1179             // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
1180             VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
1181             result |=
1182                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1183                         HandleToUint64(setHandle), kVUID_Core_DrawState_PipelineLayoutsIncompatible,
1184                         "VkDescriptorSet (%s) bound as set #%u is not compatible with overlapping VkPipelineLayout %s due to: %s",
1185                         dev_data->report_data->FormatHandle(setHandle).c_str(), setIndex,
1186                         dev_data->report_data->FormatHandle(pipeline_layout.layout).c_str(), errorString.c_str());
1187         } else {  // Valid set is bound and layout compatible, validate that it's updated
1188             // Pull the set node
1189             cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1190             // Validate the draw-time state for this descriptor set
1191             std::string err_str;
1192             if (!descriptor_set->IsPushDescriptor()) {
1193                 // For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor
1194                 // binding validation. Take the requested binding set and prefilter it to eliminate redundant validation checks.
1195                 // Here, the currently bound pipeline determines whether an image validation check is redundant...
1196                 // for images are the "req" portion of the binding_req is indirectly (but tightly) coupled to the pipeline.
1197                 const cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second, cb_node,
1198                                                                            pPipe);
1199                 const auto &binding_req_map = reduced_map.Map();
1200 
1201                 if (!descriptor_set->ValidateDrawState(binding_req_map, state.dynamicOffsets[setIndex], cb_node, function,
1202                                                        &err_str)) {
1203                     auto set = descriptor_set->GetSet();
1204                     result |= log_msg(
1205                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1206                         HandleToUint64(set), kVUID_Core_DrawState_DescriptorSetNotUpdated,
1207                         "Descriptor set %s bound as set #%u encountered the following validation error at %s time: %s",
1208                         dev_data->report_data->FormatHandle(set).c_str(), setIndex, function, err_str.c_str());
1209                 }
1210             }
1211         }
1212     }
1213 
1214     // Check general pipeline state that needs to be validated at drawtime
1215     if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
1216         result |= ValidatePipelineDrawtimeState(dev_data, state, cb_node, cmd_type, pPipe, function);
1217 
1218     return result;
1219 }
1220 
UpdateDrawState(layer_data * dev_data,GLOBAL_CB_NODE * cb_state,const VkPipelineBindPoint bind_point)1221 void CoreChecks::UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
1222     auto const &state = cb_state->lastBound[bind_point];
1223     PIPELINE_STATE *pPipe = state.pipeline_state;
1224     if (VK_NULL_HANDLE != state.pipeline_layout) {
1225         for (const auto &set_binding_pair : pPipe->active_slots) {
1226             uint32_t setIndex = set_binding_pair.first;
1227             // Pull the set node
1228             cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1229             if (!descriptor_set->IsPushDescriptor()) {
1230                 // For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor binding
1231                 const cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second, cb_state);
1232                 const auto &binding_req_map = reduced_map.Map();
1233 
1234                 // Bind this set and its active descriptor resources to the command buffer
1235                 descriptor_set->UpdateDrawState(cb_state, binding_req_map);
1236                 // For given active slots record updated images & buffers
1237                 descriptor_set->GetStorageUpdates(binding_req_map, &cb_state->updateBuffers, &cb_state->updateImages);
1238             }
1239         }
1240     }
1241     if (!pPipe->vertex_binding_descriptions_.empty()) {
1242         cb_state->vertex_buffer_used = true;
1243     }
1244 }
1245 
ValidatePipelineLocked(layer_data * dev_data,std::vector<std::unique_ptr<PIPELINE_STATE>> const & pPipelines,int pipelineIndex)1246 bool CoreChecks::ValidatePipelineLocked(layer_data *dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines,
1247                                         int pipelineIndex) {
1248     bool skip = false;
1249 
1250     PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
1251 
1252     // If create derivative bit is set, check that we've specified a base
1253     // pipeline correctly, and that the base pipeline was created to allow
1254     // derivatives.
1255     if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
1256         PIPELINE_STATE *pBasePipeline = nullptr;
1257         if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
1258               (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
1259             // This check is a superset of "VUID-VkGraphicsPipelineCreateInfo-flags-00724" and
1260             // "VUID-VkGraphicsPipelineCreateInfo-flags-00725"
1261             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1262                             HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_InvalidPipelineCreateState,
1263                             "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
1264         } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
1265             if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
1266                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1267                                 HandleToUint64(pPipeline->pipeline), "VUID-vkCreateGraphicsPipelines-flags-00720",
1268                                 "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
1269             } else {
1270                 pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex].get();
1271             }
1272         } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
1273             pBasePipeline = GetPipelineState(pPipeline->graphicsPipelineCI.basePipelineHandle);
1274         }
1275 
1276         if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
1277             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1278                             HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_InvalidPipelineCreateState,
1279                             "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
1280         }
1281     }
1282 
1283     return skip;
1284 }
1285 
1286 // UNLOCKED pipeline validation. DO NOT lookup objects in the layer_data->* maps in this function.
ValidatePipelineUnlocked(layer_data * dev_data,std::vector<std::unique_ptr<PIPELINE_STATE>> const & pPipelines,int pipelineIndex)1287 bool CoreChecks::ValidatePipelineUnlocked(layer_data *dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines,
1288                                           int pipelineIndex) {
1289     bool skip = false;
1290 
1291     PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
1292 
1293     // Ensure the subpass index is valid. If not, then ValidateAndCapturePipelineShaderState
1294     // produces nonsense errors that confuse users. Other layers should already
1295     // emit errors for renderpass being invalid.
1296     auto subpass_desc = &pPipeline->rp_state->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass];
1297     if (pPipeline->graphicsPipelineCI.subpass >= pPipeline->rp_state->createInfo.subpassCount) {
1298         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1299                         HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-subpass-00759",
1300                         "Invalid Pipeline CreateInfo State: Subpass index %u is out of range for this renderpass (0..%u).",
1301                         pPipeline->graphicsPipelineCI.subpass, pPipeline->rp_state->createInfo.subpassCount - 1);
1302         subpass_desc = nullptr;
1303     }
1304 
1305     if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
1306         const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
1307         if (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
1308             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1309                             HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-attachmentCount-00746",
1310                             "vkCreateGraphicsPipelines(): Render pass (%s) subpass %u has colorAttachmentCount of %u which doesn't "
1311                             "match the pColorBlendState->attachmentCount of %u.",
1312                             dev_data->report_data->FormatHandle(pPipeline->rp_state->renderPass).c_str(),
1313                             pPipeline->graphicsPipelineCI.subpass, subpass_desc->colorAttachmentCount,
1314                             color_blend_state->attachmentCount);
1315         }
1316         if (!dev_data->enabled_features.core.independentBlend) {
1317             if (pPipeline->attachments.size() > 1) {
1318                 VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
1319                 for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
1320                     // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
1321                     // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
1322                     // only attachment state, so memcmp is best suited for the comparison
1323                     if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
1324                                sizeof(pAttachments[0]))) {
1325                         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1326                                         VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, HandleToUint64(pPipeline->pipeline),
1327                                         "VUID-VkPipelineColorBlendStateCreateInfo-pAttachments-00605",
1328                                         "Invalid Pipeline CreateInfo: If independent blend feature not enabled, all elements of "
1329                                         "pAttachments must be identical.");
1330                         break;
1331                     }
1332                 }
1333             }
1334         }
1335         if (!dev_data->enabled_features.core.logicOp &&
1336             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
1337             skip |=
1338                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1339                         HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00606",
1340                         "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE.");
1341         }
1342         for (size_t i = 0; i < pPipeline->attachments.size(); i++) {
1343             if ((pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
1344                 (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
1345                 (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
1346                 (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
1347                 if (!dev_data->enabled_features.core.dualSrcBlend) {
1348                     skip |= log_msg(
1349                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1350                         HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineColorBlendAttachmentState-srcColorBlendFactor-00608",
1351                         "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
1352                         "].srcColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
1353                         "enabled.",
1354                         pipelineIndex, i, pPipeline->attachments[i].srcColorBlendFactor);
1355                 }
1356             }
1357             if ((pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
1358                 (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
1359                 (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
1360                 (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
1361                 if (!dev_data->enabled_features.core.dualSrcBlend) {
1362                     skip |= log_msg(
1363                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1364                         HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineColorBlendAttachmentState-dstColorBlendFactor-00609",
1365                         "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
1366                         "].dstColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
1367                         "enabled.",
1368                         pipelineIndex, i, pPipeline->attachments[i].dstColorBlendFactor);
1369                 }
1370             }
1371             if ((pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
1372                 (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
1373                 (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
1374                 (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
1375                 if (!dev_data->enabled_features.core.dualSrcBlend) {
1376                     skip |= log_msg(
1377                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1378                         HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineColorBlendAttachmentState-srcAlphaBlendFactor-00610",
1379                         "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
1380                         "].srcAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
1381                         "enabled.",
1382                         pipelineIndex, i, pPipeline->attachments[i].srcAlphaBlendFactor);
1383                 }
1384             }
1385             if ((pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
1386                 (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
1387                 (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
1388                 (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
1389                 if (!dev_data->enabled_features.core.dualSrcBlend) {
1390                     skip |= log_msg(
1391                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1392                         HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineColorBlendAttachmentState-dstAlphaBlendFactor-00611",
1393                         "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
1394                         "].dstAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
1395                         "enabled.",
1396                         pipelineIndex, i, pPipeline->attachments[i].dstAlphaBlendFactor);
1397                 }
1398             }
1399         }
1400     }
1401 
1402     if (ValidateAndCapturePipelineShaderState(dev_data, pPipeline)) {
1403         skip = true;
1404     }
1405     // Each shader's stage must be unique
1406     if (pPipeline->duplicate_shaders) {
1407         for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
1408             if (pPipeline->duplicate_shaders & stage) {
1409                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1410                                 HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_InvalidPipelineCreateState,
1411                                 "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
1412                                 string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
1413             }
1414         }
1415     }
1416     if (dev_data->device_extensions.vk_nv_mesh_shader) {
1417         // VS or mesh is required
1418         if (!(pPipeline->active_shaders & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_MESH_BIT_NV))) {
1419             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1420                             HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-stage-02096",
1421                             "Invalid Pipeline CreateInfo State: Vertex Shader or Mesh Shader required.");
1422         }
1423         // Can't mix mesh and VTG
1424         if ((pPipeline->active_shaders & (VK_SHADER_STAGE_MESH_BIT_NV | VK_SHADER_STAGE_TASK_BIT_NV)) &&
1425             (pPipeline->active_shaders &
1426              (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT |
1427               VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))) {
1428             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1429                             HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-pStages-02095",
1430                             "Invalid Pipeline CreateInfo State: Geometric shader stages must either be all mesh (mesh | task) "
1431                             "or all VTG (vertex, tess control, tess eval, geom).");
1432         }
1433     } else {
1434         // VS is required
1435         if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
1436             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1437                             HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-stage-00727",
1438                             "Invalid Pipeline CreateInfo State: Vertex Shader required.");
1439         }
1440     }
1441 
1442     if (!dev_data->enabled_features.mesh_shader.meshShader && (pPipeline->active_shaders & VK_SHADER_STAGE_MESH_BIT_NV)) {
1443         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1444                         HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineShaderStageCreateInfo-stage-02091",
1445                         "Invalid Pipeline CreateInfo State: Mesh Shader not supported.");
1446     }
1447 
1448     if (!dev_data->enabled_features.mesh_shader.taskShader && (pPipeline->active_shaders & VK_SHADER_STAGE_TASK_BIT_NV)) {
1449         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1450                         HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineShaderStageCreateInfo-stage-02092",
1451                         "Invalid Pipeline CreateInfo State: Task Shader not supported.");
1452     }
1453 
1454     // Either both or neither TC/TE shaders should be defined
1455     bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0;
1456     bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0;
1457     if (has_control && !has_eval) {
1458         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1459                         HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-pStages-00729",
1460                         "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair.");
1461     }
1462     if (!has_control && has_eval) {
1463         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1464                         HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-pStages-00730",
1465                         "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair.");
1466     }
1467     // Compute shaders should be specified independent of Gfx shaders
1468     if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
1469         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1470                         HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-stage-00728",
1471                         "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline.");
1472     }
1473 
1474     if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pInputAssemblyState) {
1475         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1476                         HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-pStages-02098",
1477                         "Invalid Pipeline CreateInfo State: Missing pInputAssemblyState.");
1478     }
1479 
1480     // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
1481     // Mismatching primitive topology and tessellation fails graphics pipeline creation.
1482     if (has_control && has_eval &&
1483         (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
1484          pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
1485         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1486                         HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-pStages-00736",
1487                         "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA topology for "
1488                         "tessellation pipelines.");
1489     }
1490     if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
1491         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
1492         if (!has_control || !has_eval) {
1493             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1494                             HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-topology-00737",
1495                             "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid "
1496                             "for tessellation pipelines.");
1497         }
1498     }
1499 
1500     // If a rasterization state is provided...
1501     if (pPipeline->graphicsPipelineCI.pRasterizationState) {
1502         if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) &&
1503             (!dev_data->enabled_features.core.depthClamp)) {
1504             skip |=
1505                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1506                         HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineRasterizationStateCreateInfo-depthClampEnable-00782",
1507                         "vkCreateGraphicsPipelines(): the depthClamp device feature is disabled: the depthClampEnable member "
1508                         "of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE.");
1509         }
1510 
1511         if (!IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) &&
1512             (pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) &&
1513             (!dev_data->enabled_features.core.depthBiasClamp)) {
1514             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1515                             HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_InvalidFeature,
1516                             "vkCreateGraphicsPipelines(): the depthBiasClamp device feature is disabled: the depthBiasClamp member "
1517                             "of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the "
1518                             "VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled");
1519         }
1520 
1521         // If rasterization is enabled...
1522         if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) {
1523             if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) &&
1524                 (!dev_data->enabled_features.core.alphaToOne)) {
1525                 skip |=
1526                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1527                             HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineMultisampleStateCreateInfo-alphaToOneEnable-00785",
1528                             "vkCreateGraphicsPipelines(): the alphaToOne device feature is disabled: the alphaToOneEnable "
1529                             "member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE.");
1530             }
1531 
1532             // If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure
1533             if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
1534                 subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1535                 if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
1536                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1537                                     HandleToUint64(pPipeline->pipeline),
1538                                     "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00752",
1539                                     "Invalid Pipeline CreateInfo State: pDepthStencilState is NULL when rasterization is enabled "
1540                                     "and subpass uses a depth/stencil attachment.");
1541 
1542                 } else if ((pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) &&
1543                            (!dev_data->enabled_features.core.depthBounds)) {
1544                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1545                                     HandleToUint64(pPipeline->pipeline),
1546                                     "VUID-VkPipelineDepthStencilStateCreateInfo-depthBoundsTestEnable-00598",
1547                                     "vkCreateGraphicsPipelines(): the depthBounds device feature is disabled: the "
1548                                     "depthBoundsTestEnable member of the VkPipelineDepthStencilStateCreateInfo structure must be "
1549                                     "set to VK_FALSE.");
1550                 }
1551             }
1552 
1553             // If subpass uses color attachments, pColorBlendState must be valid pointer
1554             if (subpass_desc) {
1555                 uint32_t color_attachment_count = 0;
1556                 for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
1557                     if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1558                         ++color_attachment_count;
1559                     }
1560                 }
1561                 if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) {
1562                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1563                                     HandleToUint64(pPipeline->pipeline),
1564                                     "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00753",
1565                                     "Invalid Pipeline CreateInfo State: pColorBlendState is NULL when rasterization is enabled and "
1566                                     "subpass uses color attachments.");
1567                 }
1568             }
1569         }
1570     }
1571 
1572     if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pVertexInputState) {
1573         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1574                         HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-pStages-02097",
1575                         "Invalid Pipeline CreateInfo State: Missing pVertexInputState.");
1576     }
1577 
1578     auto vi = pPipeline->graphicsPipelineCI.pVertexInputState;
1579     if (vi != NULL) {
1580         for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) {
1581             VkFormat format = vi->pVertexAttributeDescriptions[j].format;
1582             // Internal call to get format info.  Still goes through layers, could potentially go directly to ICD.
1583             VkFormatProperties properties;
1584             dev_data->instance_dispatch_table.GetPhysicalDeviceFormatProperties(dev_data->physical_device, format, &properties);
1585             if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
1586                 skip |=
1587                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1588                             "VUID-VkVertexInputAttributeDescription-format-00623",
1589                             "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
1590                             "(%s) is not a supported vertex buffer format.",
1591                             pipelineIndex, j, string_VkFormat(format));
1592             }
1593         }
1594     }
1595 
1596     auto accumColorSamples = [subpass_desc, pPipeline](uint32_t &samples) {
1597         for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; i++) {
1598             const auto attachment = subpass_desc->pColorAttachments[i].attachment;
1599             if (attachment != VK_ATTACHMENT_UNUSED) {
1600                 samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
1601             }
1602         }
1603     };
1604 
1605     if (!(dev_data->device_extensions.vk_amd_mixed_attachment_samples ||
1606           dev_data->device_extensions.vk_nv_framebuffer_mixed_samples)) {
1607         uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline));
1608         uint32_t subpass_num_samples = 0;
1609 
1610         accumColorSamples(subpass_num_samples);
1611 
1612         if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1613             const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
1614             subpass_num_samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
1615         }
1616 
1617         // subpass_num_samples is 0 when the subpass has no attachments or if all attachments are VK_ATTACHMENT_UNUSED.
1618         // Only validate the value of subpass_num_samples if the subpass has attachments that are not VK_ATTACHMENT_UNUSED.
1619         if (subpass_num_samples && (!IsPowerOfTwo(subpass_num_samples) || (subpass_num_samples != raster_samples))) {
1620             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1621                             HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-subpass-00757",
1622                             "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) "
1623                             "does not match the number of samples of the RenderPass color and/or depth attachment.",
1624                             pipelineIndex, raster_samples);
1625         }
1626     }
1627 
1628     if (dev_data->device_extensions.vk_amd_mixed_attachment_samples) {
1629         VkSampleCountFlagBits max_sample_count = static_cast<VkSampleCountFlagBits>(0);
1630         for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
1631             if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1632                 max_sample_count =
1633                     std::max(max_sample_count,
1634                              pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pColorAttachments[i].attachment].samples);
1635             }
1636         }
1637         if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1638             max_sample_count =
1639                 std::max(max_sample_count,
1640                          pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples);
1641         }
1642         if ((pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) &&
1643             (pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples != max_sample_count)) {
1644             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1645                             HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-subpass-01505",
1646                             "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%s) != max "
1647                             "attachment samples (%s) used in subpass %u.",
1648                             pipelineIndex,
1649                             string_VkSampleCountFlagBits(pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples),
1650                             string_VkSampleCountFlagBits(max_sample_count), pPipeline->graphicsPipelineCI.subpass);
1651         }
1652     }
1653 
1654     if (dev_data->device_extensions.vk_nv_framebuffer_mixed_samples) {
1655         uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline));
1656         uint32_t subpass_color_samples = 0;
1657 
1658         accumColorSamples(subpass_color_samples);
1659 
1660         if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1661             const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
1662             const uint32_t subpass_depth_samples =
1663                 static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
1664 
1665             if (pPipeline->graphicsPipelineCI.pDepthStencilState) {
1666                 const bool ds_test_enabled = (pPipeline->graphicsPipelineCI.pDepthStencilState->depthTestEnable == VK_TRUE) ||
1667                                              (pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) ||
1668                                              (pPipeline->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE);
1669 
1670                 if (ds_test_enabled && (!IsPowerOfTwo(subpass_depth_samples) || (raster_samples != subpass_depth_samples))) {
1671                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1672                                     HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-subpass-01411",
1673                                     "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) "
1674                                     "does not match the number of samples of the RenderPass depth attachment (%u).",
1675                                     pipelineIndex, raster_samples, subpass_depth_samples);
1676                 }
1677             }
1678         }
1679 
1680         if (IsPowerOfTwo(subpass_color_samples)) {
1681             if (raster_samples < subpass_color_samples) {
1682                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1683                                 HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-subpass-01412",
1684                                 "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) "
1685                                 "is not greater or equal to the number of samples of the RenderPass color attachment (%u).",
1686                                 pipelineIndex, raster_samples, subpass_color_samples);
1687             }
1688 
1689             if (pPipeline->graphicsPipelineCI.pMultisampleState) {
1690                 if ((raster_samples > subpass_color_samples) &&
1691                     (pPipeline->graphicsPipelineCI.pMultisampleState->sampleShadingEnable == VK_TRUE)) {
1692                     skip |= log_msg(
1693                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1694                         HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineMultisampleStateCreateInfo-rasterizationSamples-01415",
1695                         "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->sampleShadingEnable must be VK_FALSE when "
1696                         "pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) is greater than the number of samples of the "
1697                         "subpass color attachment (%u).",
1698                         pipelineIndex, pipelineIndex, raster_samples, subpass_color_samples);
1699                 }
1700 
1701                 const auto *coverage_modulation_state = lvl_find_in_chain<VkPipelineCoverageModulationStateCreateInfoNV>(
1702                     pPipeline->graphicsPipelineCI.pMultisampleState->pNext);
1703 
1704                 if (coverage_modulation_state && (coverage_modulation_state->coverageModulationTableEnable == VK_TRUE)) {
1705                     if (coverage_modulation_state->coverageModulationTableCount != (raster_samples / subpass_color_samples)) {
1706                         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1707                                         VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, HandleToUint64(pPipeline->pipeline),
1708                                         "VUID-VkPipelineCoverageModulationStateCreateInfoNV-coverageModulationTableEnable-01405",
1709                                         "vkCreateGraphicsPipelines: pCreateInfos[%d] VkPipelineCoverageModulationStateCreateInfoNV "
1710                                         "coverageModulationTableCount of %u is invalid.",
1711                                         pipelineIndex, coverage_modulation_state->coverageModulationTableCount);
1712                     }
1713                 }
1714             }
1715         }
1716     }
1717 
1718     if (dev_data->device_extensions.vk_nv_fragment_coverage_to_color) {
1719         const auto coverage_to_color_state =
1720             lvl_find_in_chain<VkPipelineCoverageToColorStateCreateInfoNV>(pPipeline->graphicsPipelineCI.pMultisampleState);
1721 
1722         if (coverage_to_color_state && coverage_to_color_state->coverageToColorEnable == VK_TRUE) {
1723             bool attachment_is_valid = false;
1724             std::string error_detail;
1725 
1726             if (coverage_to_color_state->coverageToColorLocation < subpass_desc->colorAttachmentCount) {
1727                 const auto color_attachment_ref = subpass_desc->pColorAttachments[coverage_to_color_state->coverageToColorLocation];
1728                 if (color_attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
1729                     const auto color_attachment = pPipeline->rp_state->createInfo.pAttachments[color_attachment_ref.attachment];
1730 
1731                     switch (color_attachment.format) {
1732                         case VK_FORMAT_R8_UINT:
1733                         case VK_FORMAT_R8_SINT:
1734                         case VK_FORMAT_R16_UINT:
1735                         case VK_FORMAT_R16_SINT:
1736                         case VK_FORMAT_R32_UINT:
1737                         case VK_FORMAT_R32_SINT:
1738                             attachment_is_valid = true;
1739                             break;
1740                         default:
1741                             string_sprintf(&error_detail, "references an attachment with an invalid format (%s).",
1742                                            string_VkFormat(color_attachment.format));
1743                             break;
1744                     }
1745                 } else {
1746                     string_sprintf(&error_detail,
1747                                    "references an invalid attachment. The subpass pColorAttachments[%" PRIu32
1748                                    "].attachment has the value "
1749                                    "VK_ATTACHMENT_UNUSED.",
1750                                    coverage_to_color_state->coverageToColorLocation);
1751                 }
1752             } else {
1753                 string_sprintf(&error_detail,
1754                                "references an non-existing attachment since the subpass colorAttachmentCount is %" PRIu32 ".",
1755                                subpass_desc->colorAttachmentCount);
1756             }
1757 
1758             if (!attachment_is_valid) {
1759                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1760                                 HandleToUint64(pPipeline->pipeline),
1761                                 "VUID-VkPipelineCoverageToColorStateCreateInfoNV-coverageToColorEnable-01404",
1762                                 "vkCreateGraphicsPipelines: pCreateInfos[%" PRId32
1763                                 "].pMultisampleState VkPipelineCoverageToColorStateCreateInfoNV "
1764                                 "coverageToColorLocation = %" PRIu32 " %s",
1765                                 pipelineIndex, coverage_to_color_state->coverageToColorLocation, error_detail.c_str());
1766             }
1767         }
1768     }
1769 
1770     return skip;
1771 }
1772 
1773 // Block of code at start here specifically for managing/tracking DSs
1774 
1775 // Return Pool node ptr for specified pool or else NULL
GetDescriptorPoolState(const VkDescriptorPool pool)1776 DESCRIPTOR_POOL_STATE *CoreChecks::GetDescriptorPoolState(const VkDescriptorPool pool) {
1777     auto pool_it = descriptorPoolMap.find(pool);
1778     if (pool_it == descriptorPoolMap.end()) {
1779         return NULL;
1780     }
1781     return pool_it->second;
1782 }
1783 
1784 // Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
1785 // func_str is the name of the calling function
1786 // Return false if no errors occur
1787 // Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
ValidateIdleDescriptorSet(const layer_data * dev_data,VkDescriptorSet set,const char * func_str)1788 bool CoreChecks::ValidateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, const char *func_str) {
1789     if (dev_data->disabled.idle_descriptor_set) return false;
1790     bool skip = false;
1791     auto set_node = dev_data->setMap.find(set);
1792     if (set_node == dev_data->setMap.end()) {
1793         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1794                         HandleToUint64(set), kVUID_Core_DrawState_DoubleDestroy,
1795                         "Cannot call %s() on descriptor set %s that has not been allocated.", func_str,
1796                         dev_data->report_data->FormatHandle(set).c_str());
1797     } else {
1798         // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
1799         if (set_node->second->in_use.load()) {
1800             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1801                             HandleToUint64(set), "VUID-vkFreeDescriptorSets-pDescriptorSets-00309",
1802                             "Cannot call %s() on descriptor set %s that is in use by a command buffer.", func_str,
1803                             dev_data->report_data->FormatHandle(set).c_str());
1804         }
1805     }
1806     return skip;
1807 }
1808 
1809 // Remove set from setMap and delete the set
FreeDescriptorSet(layer_data * dev_data,cvdescriptorset::DescriptorSet * descriptor_set)1810 void CoreChecks::FreeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
1811     dev_data->setMap.erase(descriptor_set->GetSet());
1812     delete descriptor_set;
1813 }
1814 // Free all DS Pools including their Sets & related sub-structs
1815 // NOTE : Calls to this function should be wrapped in mutex
DeletePools(layer_data * dev_data)1816 void CoreChecks::DeletePools(layer_data *dev_data) {
1817     for (auto ii = dev_data->descriptorPoolMap.begin(); ii != dev_data->descriptorPoolMap.end();) {
1818         // Remove this pools' sets from setMap and delete them
1819         for (auto ds : ii->second->sets) {
1820             FreeDescriptorSet(dev_data, ds);
1821         }
1822         ii->second->sets.clear();
1823         delete ii->second;
1824         ii = dev_data->descriptorPoolMap.erase(ii);
1825     }
1826 }
1827 
1828 // For given CB object, fetch associated CB Node from map
GetCBNode(const VkCommandBuffer cb)1829 GLOBAL_CB_NODE *CoreChecks::GetCBNode(const VkCommandBuffer cb) {
1830     auto it = commandBufferMap.find(cb);
1831     if (it == commandBufferMap.end()) {
1832         return NULL;
1833     }
1834     return it->second;
1835 }
1836 
1837 // If a renderpass is active, verify that the given command type is appropriate for current subpass state
ValidateCmdSubpassState(const layer_data * dev_data,const GLOBAL_CB_NODE * pCB,const CMD_TYPE cmd_type)1838 bool CoreChecks::ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
1839     if (!pCB->activeRenderPass) return false;
1840     bool skip = false;
1841     if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
1842         (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS &&
1843          cmd_type != CMD_NEXTSUBPASS2KHR && cmd_type != CMD_ENDRENDERPASS2KHR)) {
1844         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1845                         HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer,
1846                         "Commands cannot be called in a subpass using secondary command buffers.");
1847     } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
1848         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1849                         HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer,
1850                         "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
1851     }
1852     return skip;
1853 }
1854 
ValidateCmdQueueFlags(layer_data * dev_data,const GLOBAL_CB_NODE * cb_node,const char * caller_name,VkQueueFlags required_flags,const char * error_code)1855 bool CoreChecks::ValidateCmdQueueFlags(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *caller_name,
1856                                        VkQueueFlags required_flags, const char *error_code) {
1857     auto pool = GetCommandPoolNode(cb_node->createInfo.commandPool);
1858     if (pool) {
1859         VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex].queueFlags;
1860         if (!(required_flags & queue_flags)) {
1861             string required_flags_string;
1862             for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT}) {
1863                 if (flag & required_flags) {
1864                     if (required_flags_string.size()) {
1865                         required_flags_string += " or ";
1866                     }
1867                     required_flags_string += string_VkQueueFlagBits(flag);
1868                 }
1869             }
1870             return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1871                            HandleToUint64(cb_node->commandBuffer), error_code,
1872                            "Cannot call %s on a command buffer allocated from a pool without %s capabilities..", caller_name,
1873                            required_flags_string.c_str());
1874         }
1875     }
1876     return false;
1877 }
1878 
GetCauseStr(VK_OBJECT obj)1879 static char const *GetCauseStr(VK_OBJECT obj) {
1880     if (obj.type == kVulkanObjectTypeDescriptorSet) return "destroyed or updated";
1881     if (obj.type == kVulkanObjectTypeCommandBuffer) return "destroyed or rerecorded";
1882     return "destroyed";
1883 }
1884 
ReportInvalidCommandBuffer(layer_data * dev_data,const GLOBAL_CB_NODE * cb_state,const char * call_source)1885 bool CoreChecks::ReportInvalidCommandBuffer(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const char *call_source) {
1886     bool skip = false;
1887     for (auto obj : cb_state->broken_bindings) {
1888         const char *type_str = object_string[obj.type];
1889         const char *cause_str = GetCauseStr(obj);
1890         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1891                         HandleToUint64(cb_state->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer,
1892                         "You are adding %s to command buffer %s that is invalid because bound %s %s was %s.", call_source,
1893                         dev_data->report_data->FormatHandle(cb_state->commandBuffer).c_str(), type_str,
1894                         dev_data->report_data->FormatHandle(obj.handle).c_str(), cause_str);
1895     }
1896     return skip;
1897 }
1898 
1899 // 'commandBuffer must be in the recording state' valid usage error code for each command
1900 // Note: grepping for ^^^^^^^^^ in vk_validation_database is easily massaged into the following list
1901 // Note: C++11 doesn't automatically devolve enum types to the underlying type for hash traits purposes (fixed in C++14)
1902 using CmdTypeHashType = std::underlying_type<CMD_TYPE>::type;
1903 static const std::unordered_map<CmdTypeHashType, std::string> must_be_recording_map = {
1904     {CMD_NONE, kVUIDUndefined},  // UNMATCHED
1905     {CMD_BEGINQUERY, "VUID-vkCmdBeginQuery-commandBuffer-recording"},
1906     {CMD_BEGINRENDERPASS, "VUID-vkCmdBeginRenderPass-commandBuffer-recording"},
1907     {CMD_BEGINRENDERPASS2KHR, "VUID-vkCmdBeginRenderPass2KHR-commandBuffer-recording"},
1908     {CMD_BINDDESCRIPTORSETS, "VUID-vkCmdBindDescriptorSets-commandBuffer-recording"},
1909     {CMD_BINDINDEXBUFFER, "VUID-vkCmdBindIndexBuffer-commandBuffer-recording"},
1910     {CMD_BINDPIPELINE, "VUID-vkCmdBindPipeline-commandBuffer-recording"},
1911     {CMD_BINDSHADINGRATEIMAGE, "VUID-vkCmdBindShadingRateImageNV-commandBuffer-recording"},
1912     {CMD_BINDVERTEXBUFFERS, "VUID-vkCmdBindVertexBuffers-commandBuffer-recording"},
1913     {CMD_BLITIMAGE, "VUID-vkCmdBlitImage-commandBuffer-recording"},
1914     {CMD_CLEARATTACHMENTS, "VUID-vkCmdClearAttachments-commandBuffer-recording"},
1915     {CMD_CLEARCOLORIMAGE, "VUID-vkCmdClearColorImage-commandBuffer-recording"},
1916     {CMD_CLEARDEPTHSTENCILIMAGE, "VUID-vkCmdClearDepthStencilImage-commandBuffer-recording"},
1917     {CMD_COPYBUFFER, "VUID-vkCmdCopyBuffer-commandBuffer-recording"},
1918     {CMD_COPYBUFFERTOIMAGE, "VUID-vkCmdCopyBufferToImage-commandBuffer-recording"},
1919     {CMD_COPYIMAGE, "VUID-vkCmdCopyImage-commandBuffer-recording"},
1920     {CMD_COPYIMAGETOBUFFER, "VUID-vkCmdCopyImageToBuffer-commandBuffer-recording"},
1921     {CMD_COPYQUERYPOOLRESULTS, "VUID-vkCmdCopyQueryPoolResults-commandBuffer-recording"},
1922     {CMD_DEBUGMARKERBEGINEXT, "VUID-vkCmdDebugMarkerBeginEXT-commandBuffer-recording"},
1923     {CMD_DEBUGMARKERENDEXT, "VUID-vkCmdDebugMarkerEndEXT-commandBuffer-recording"},
1924     {CMD_DEBUGMARKERINSERTEXT, "VUID-vkCmdDebugMarkerInsertEXT-commandBuffer-recording"},
1925     {CMD_DISPATCH, "VUID-vkCmdDispatch-commandBuffer-recording"},
1926     // Exclude KHX (if not already present) { CMD_DISPATCHBASEKHX, "VUID-vkCmdDispatchBase-commandBuffer-recording" },
1927     {CMD_DISPATCHINDIRECT, "VUID-vkCmdDispatchIndirect-commandBuffer-recording"},
1928     {CMD_DRAW, "VUID-vkCmdDraw-commandBuffer-recording"},
1929     {CMD_DRAWINDEXED, "VUID-vkCmdDrawIndexed-commandBuffer-recording"},
1930     {CMD_DRAWINDEXEDINDIRECT, "VUID-vkCmdDrawIndexedIndirect-commandBuffer-recording"},
1931     // Exclude vendor ext (if not already present) { CMD_DRAWINDEXEDINDIRECTCOUNTAMD,
1932     // "VUID-vkCmdDrawIndexedIndirectCountAMD-commandBuffer-recording" },
1933     {CMD_DRAWINDEXEDINDIRECTCOUNTKHR, "VUID-vkCmdDrawIndexedIndirectCountKHR-commandBuffer-recording"},
1934     {CMD_DRAWINDIRECT, "VUID-vkCmdDrawIndirect-commandBuffer-recording"},
1935     // Exclude vendor ext (if not already present) { CMD_DRAWINDIRECTCOUNTAMD,
1936     // "VUID-vkCmdDrawIndirectCountAMD-commandBuffer-recording" },
1937     {CMD_DRAWINDIRECTCOUNTKHR, "VUID-vkCmdDrawIndirectCountKHR-commandBuffer-recording"},
1938     {CMD_DRAWMESHTASKSNV, "VUID-vkCmdDrawMeshTasksNV-commandBuffer-recording"},
1939     {CMD_DRAWMESHTASKSINDIRECTNV, "VUID-vkCmdDrawMeshTasksIndirectNV-commandBuffer-recording"},
1940     {CMD_DRAWMESHTASKSINDIRECTCOUNTNV, "VUID-vkCmdDrawMeshTasksIndirectCountNV-commandBuffer-recording"},
1941     {CMD_ENDCOMMANDBUFFER, "VUID-vkEndCommandBuffer-commandBuffer-00059"},
1942     {CMD_ENDQUERY, "VUID-vkCmdEndQuery-commandBuffer-recording"},
1943     {CMD_ENDRENDERPASS, "VUID-vkCmdEndRenderPass-commandBuffer-recording"},
1944     {CMD_ENDRENDERPASS2KHR, "VUID-vkCmdEndRenderPass2KHR-commandBuffer-recording"},
1945     {CMD_EXECUTECOMMANDS, "VUID-vkCmdExecuteCommands-commandBuffer-recording"},
1946     {CMD_FILLBUFFER, "VUID-vkCmdFillBuffer-commandBuffer-recording"},
1947     {CMD_NEXTSUBPASS, "VUID-vkCmdNextSubpass-commandBuffer-recording"},
1948     {CMD_NEXTSUBPASS2KHR, "VUID-vkCmdNextSubpass2KHR-commandBuffer-recording"},
1949     {CMD_PIPELINEBARRIER, "VUID-vkCmdPipelineBarrier-commandBuffer-recording"},
1950     // Exclude vendor ext (if not already present) { CMD_PROCESSCOMMANDSNVX, "VUID-vkCmdProcessCommandsNVX-commandBuffer-recording"
1951     // },
1952     {CMD_PUSHCONSTANTS, "VUID-vkCmdPushConstants-commandBuffer-recording"},
1953     {CMD_PUSHDESCRIPTORSETKHR, "VUID-vkCmdPushDescriptorSetKHR-commandBuffer-recording"},
1954     {CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-recording"},
1955     // Exclude vendor ext (if not already present) { CMD_RESERVESPACEFORCOMMANDSNVX,
1956     // "VUID-vkCmdReserveSpaceForCommandsNVX-commandBuffer-recording" },
1957     {CMD_RESETEVENT, "VUID-vkCmdResetEvent-commandBuffer-recording"},
1958     {CMD_RESETQUERYPOOL, "VUID-vkCmdResetQueryPool-commandBuffer-recording"},
1959     {CMD_RESOLVEIMAGE, "VUID-vkCmdResolveImage-commandBuffer-recording"},
1960     {CMD_SETBLENDCONSTANTS, "VUID-vkCmdSetBlendConstants-commandBuffer-recording"},
1961     {CMD_SETDEPTHBIAS, "VUID-vkCmdSetDepthBias-commandBuffer-recording"},
1962     {CMD_SETDEPTHBOUNDS, "VUID-vkCmdSetDepthBounds-commandBuffer-recording"},
1963     // Exclude KHX (if not already present) { CMD_SETDEVICEMASKKHX, "VUID-vkCmdSetDeviceMask-commandBuffer-recording" },
1964     {CMD_SETDISCARDRECTANGLEEXT, "VUID-vkCmdSetDiscardRectangleEXT-commandBuffer-recording"},
1965     {CMD_SETEVENT, "VUID-vkCmdSetEvent-commandBuffer-recording"},
1966     {CMD_SETEXCLUSIVESCISSOR, "VUID-vkCmdSetExclusiveScissorNV-commandBuffer-recording"},
1967     {CMD_SETLINEWIDTH, "VUID-vkCmdSetLineWidth-commandBuffer-recording"},
1968     {CMD_SETSAMPLELOCATIONSEXT, "VUID-vkCmdSetSampleLocationsEXT-commandBuffer-recording"},
1969     {CMD_SETSCISSOR, "VUID-vkCmdSetScissor-commandBuffer-recording"},
1970     {CMD_SETSTENCILCOMPAREMASK, "VUID-vkCmdSetStencilCompareMask-commandBuffer-recording"},
1971     {CMD_SETSTENCILREFERENCE, "VUID-vkCmdSetStencilReference-commandBuffer-recording"},
1972     {CMD_SETSTENCILWRITEMASK, "VUID-vkCmdSetStencilWriteMask-commandBuffer-recording"},
1973     {CMD_SETVIEWPORT, "VUID-vkCmdSetViewport-commandBuffer-recording"},
1974     {CMD_SETVIEWPORTSHADINGRATEPALETTE, "VUID-vkCmdSetViewportShadingRatePaletteNV-commandBuffer-recording"},
1975     // Exclude vendor ext (if not already present) { CMD_SETVIEWPORTWSCALINGNV,
1976     // "VUID-vkCmdSetViewportWScalingNV-commandBuffer-recording" },
1977     {CMD_UPDATEBUFFER, "VUID-vkCmdUpdateBuffer-commandBuffer-recording"},
1978     {CMD_WAITEVENTS, "VUID-vkCmdWaitEvents-commandBuffer-recording"},
1979     {CMD_WRITETIMESTAMP, "VUID-vkCmdWriteTimestamp-commandBuffer-recording"},
1980 };
1981 
1982 // Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if
1983 // there's an issue with the Cmd ordering
ValidateCmd(layer_data * dev_data,const GLOBAL_CB_NODE * cb_state,const CMD_TYPE cmd,const char * caller_name)1984 bool CoreChecks::ValidateCmd(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd, const char *caller_name) {
1985     switch (cb_state->state) {
1986         case CB_RECORDING:
1987             return ValidateCmdSubpassState(dev_data, cb_state, cmd);
1988 
1989         case CB_INVALID_COMPLETE:
1990         case CB_INVALID_INCOMPLETE:
1991             return ReportInvalidCommandBuffer(dev_data, cb_state, caller_name);
1992 
1993         default:
1994             auto error_it = must_be_recording_map.find(cmd);
1995             // This assert lets us know that a vkCmd.* entrypoint has been added without enabling it in the map
1996             assert(error_it != must_be_recording_map.cend());
1997             if (error_it == must_be_recording_map.cend()) {
1998                 error_it = must_be_recording_map.find(CMD_NONE);  // But we'll handle the asserting case, in case of a test gap
1999             }
2000             const auto error = error_it->second;
2001             return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2002                            HandleToUint64(cb_state->commandBuffer), error,
2003                            "You must call vkBeginCommandBuffer() before this call to %s.", caller_name);
2004     }
2005 }
2006 
2007 // For given object struct return a ptr of BASE_NODE type for its wrapping struct
GetStateStructPtrFromObject(layer_data * dev_data,VK_OBJECT object_struct)2008 BASE_NODE *CoreChecks::GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
2009     BASE_NODE *base_ptr = nullptr;
2010     switch (object_struct.type) {
2011         case kVulkanObjectTypeDescriptorSet: {
2012             base_ptr = GetSetNode(reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
2013             break;
2014         }
2015         case kVulkanObjectTypeSampler: {
2016             base_ptr = GetSamplerState(reinterpret_cast<VkSampler &>(object_struct.handle));
2017             break;
2018         }
2019         case kVulkanObjectTypeQueryPool: {
2020             base_ptr = GetQueryPoolNode(reinterpret_cast<VkQueryPool &>(object_struct.handle));
2021             break;
2022         }
2023         case kVulkanObjectTypePipeline: {
2024             base_ptr = GetPipelineState(reinterpret_cast<VkPipeline &>(object_struct.handle));
2025             break;
2026         }
2027         case kVulkanObjectTypeBuffer: {
2028             base_ptr = GetBufferState(reinterpret_cast<VkBuffer &>(object_struct.handle));
2029             break;
2030         }
2031         case kVulkanObjectTypeBufferView: {
2032             base_ptr = GetBufferViewState(reinterpret_cast<VkBufferView &>(object_struct.handle));
2033             break;
2034         }
2035         case kVulkanObjectTypeImage: {
2036             base_ptr = GetImageState(reinterpret_cast<VkImage &>(object_struct.handle));
2037             break;
2038         }
2039         case kVulkanObjectTypeImageView: {
2040             base_ptr = GetImageViewState(reinterpret_cast<VkImageView &>(object_struct.handle));
2041             break;
2042         }
2043         case kVulkanObjectTypeEvent: {
2044             base_ptr = GetEventNode(reinterpret_cast<VkEvent &>(object_struct.handle));
2045             break;
2046         }
2047         case kVulkanObjectTypeDescriptorPool: {
2048             base_ptr = GetDescriptorPoolState(reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
2049             break;
2050         }
2051         case kVulkanObjectTypeCommandPool: {
2052             base_ptr = GetCommandPoolNode(reinterpret_cast<VkCommandPool &>(object_struct.handle));
2053             break;
2054         }
2055         case kVulkanObjectTypeFramebuffer: {
2056             base_ptr = GetFramebufferState(reinterpret_cast<VkFramebuffer &>(object_struct.handle));
2057             break;
2058         }
2059         case kVulkanObjectTypeRenderPass: {
2060             base_ptr = GetRenderPassState(reinterpret_cast<VkRenderPass &>(object_struct.handle));
2061             break;
2062         }
2063         case kVulkanObjectTypeDeviceMemory: {
2064             base_ptr = GetMemObjInfo(reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
2065             break;
2066         }
2067         default:
2068             // TODO : Any other objects to be handled here?
2069             assert(0);
2070             break;
2071     }
2072     return base_ptr;
2073 }
2074 
2075 // Tie the VK_OBJECT to the cmd buffer which includes:
2076 //  Add object_binding to cmd buffer
2077 //  Add cb_binding to object
AddCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE * > * cb_bindings,VK_OBJECT obj,GLOBAL_CB_NODE * cb_node)2078 static void AddCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
2079     cb_bindings->insert(cb_node);
2080     cb_node->object_bindings.insert(obj);
2081 }
2082 // For a given object, if cb_node is in that objects cb_bindings, remove cb_node
RemoveCommandBufferBinding(layer_data * dev_data,VK_OBJECT const * object,GLOBAL_CB_NODE * cb_node)2083 void CoreChecks::RemoveCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
2084     BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
2085     if (base_obj) base_obj->cb_bindings.erase(cb_node);
2086 }
2087 // Reset the command buffer state
2088 //  Maintain the createInfo and set state to CB_NEW, but clear all other state
ResetCommandBufferState(layer_data * dev_data,const VkCommandBuffer cb)2089 void CoreChecks::ResetCommandBufferState(layer_data *dev_data, const VkCommandBuffer cb) {
2090     GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
2091     if (pCB) {
2092         pCB->in_use.store(0);
2093         // Reset CB state (note that createInfo is not cleared)
2094         pCB->commandBuffer = cb;
2095         memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
2096         memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
2097         pCB->hasDrawCmd = false;
2098         pCB->state = CB_NEW;
2099         pCB->submitCount = 0;
2100         pCB->image_layout_change_count = 1;  // Start at 1. 0 is insert value for validation cache versions, s.t. new == dirty
2101         pCB->status = 0;
2102         pCB->static_status = 0;
2103         pCB->viewportMask = 0;
2104         pCB->scissorMask = 0;
2105 
2106         for (auto &item : pCB->lastBound) {
2107             item.second.reset();
2108         }
2109 
2110         memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
2111         pCB->activeRenderPass = nullptr;
2112         pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
2113         pCB->activeSubpass = 0;
2114         pCB->broken_bindings.clear();
2115         pCB->waitedEvents.clear();
2116         pCB->events.clear();
2117         pCB->writeEventsBeforeWait.clear();
2118         pCB->waitedEventsBeforeQueryReset.clear();
2119         pCB->queryToStateMap.clear();
2120         pCB->activeQueries.clear();
2121         pCB->startedQueries.clear();
2122         pCB->imageLayoutMap.clear();
2123         pCB->eventToStageMap.clear();
2124         pCB->draw_data.clear();
2125         pCB->current_draw_data.vertex_buffer_bindings.clear();
2126         pCB->vertex_buffer_used = false;
2127         pCB->primaryCommandBuffer = VK_NULL_HANDLE;
2128         // If secondary, invalidate any primary command buffer that may call us.
2129         if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
2130             InvalidateCommandBuffers(dev_data, pCB->linkedCommandBuffers, {HandleToUint64(cb), kVulkanObjectTypeCommandBuffer});
2131         }
2132 
2133         // Remove reverse command buffer links.
2134         for (auto pSubCB : pCB->linkedCommandBuffers) {
2135             pSubCB->linkedCommandBuffers.erase(pCB);
2136         }
2137         pCB->linkedCommandBuffers.clear();
2138         pCB->updateImages.clear();
2139         pCB->updateBuffers.clear();
2140         ClearCmdBufAndMemReferences(dev_data, pCB);
2141         pCB->queue_submit_functions.clear();
2142         pCB->cmd_execute_commands_functions.clear();
2143         pCB->eventUpdates.clear();
2144         pCB->queryUpdates.clear();
2145 
2146         // Remove object bindings
2147         for (auto obj : pCB->object_bindings) {
2148             RemoveCommandBufferBinding(dev_data, &obj, pCB);
2149         }
2150         pCB->object_bindings.clear();
2151         // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
2152         for (auto framebuffer : pCB->framebuffers) {
2153             auto fb_state = GetFramebufferState(framebuffer);
2154             if (fb_state) fb_state->cb_bindings.erase(pCB);
2155         }
2156         pCB->framebuffers.clear();
2157         pCB->activeFramebuffer = VK_NULL_HANDLE;
2158         memset(&pCB->index_buffer_binding, 0, sizeof(pCB->index_buffer_binding));
2159 
2160         pCB->qfo_transfer_image_barriers.Reset();
2161         pCB->qfo_transfer_buffer_barriers.Reset();
2162     }
2163 }
2164 
MakeStaticStateMask(VkPipelineDynamicStateCreateInfo const * ds)2165 CBStatusFlags MakeStaticStateMask(VkPipelineDynamicStateCreateInfo const *ds) {
2166     // initially assume everything is static state
2167     CBStatusFlags flags = CBSTATUS_ALL_STATE_SET;
2168 
2169     if (ds) {
2170         for (uint32_t i = 0; i < ds->dynamicStateCount; i++) {
2171             switch (ds->pDynamicStates[i]) {
2172                 case VK_DYNAMIC_STATE_LINE_WIDTH:
2173                     flags &= ~CBSTATUS_LINE_WIDTH_SET;
2174                     break;
2175                 case VK_DYNAMIC_STATE_DEPTH_BIAS:
2176                     flags &= ~CBSTATUS_DEPTH_BIAS_SET;
2177                     break;
2178                 case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
2179                     flags &= ~CBSTATUS_BLEND_CONSTANTS_SET;
2180                     break;
2181                 case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
2182                     flags &= ~CBSTATUS_DEPTH_BOUNDS_SET;
2183                     break;
2184                 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
2185                     flags &= ~CBSTATUS_STENCIL_READ_MASK_SET;
2186                     break;
2187                 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
2188                     flags &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
2189                     break;
2190                 case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
2191                     flags &= ~CBSTATUS_STENCIL_REFERENCE_SET;
2192                     break;
2193                 case VK_DYNAMIC_STATE_SCISSOR:
2194                     flags &= ~CBSTATUS_SCISSOR_SET;
2195                     break;
2196                 case VK_DYNAMIC_STATE_VIEWPORT:
2197                     flags &= ~CBSTATUS_VIEWPORT_SET;
2198                     break;
2199                 case VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV:
2200                     flags &= ~CBSTATUS_EXCLUSIVE_SCISSOR_SET;
2201                     break;
2202                 case VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV:
2203                     flags &= ~CBSTATUS_SHADING_RATE_PALETTE_SET;
2204                     break;
2205                 default:
2206                     break;
2207             }
2208         }
2209     }
2210 
2211     return flags;
2212 }
2213 
2214 // Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
2215 // render pass.
InsideRenderPass(const layer_data * dev_data,const GLOBAL_CB_NODE * pCB,const char * apiName,const char * msgCode)2216 bool CoreChecks::InsideRenderPass(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const char *apiName, const char *msgCode) {
2217     bool inside = false;
2218     if (pCB->activeRenderPass) {
2219         inside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2220                          HandleToUint64(pCB->commandBuffer), msgCode,
2221                          "%s: It is invalid to issue this call inside an active render pass (%s).", apiName,
2222                          dev_data->report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str());
2223     }
2224     return inside;
2225 }
2226 
2227 // Flags validation error if the associated call is made outside a render pass. The apiName
2228 // routine should ONLY be called inside a render pass.
OutsideRenderPass(const layer_data * dev_data,GLOBAL_CB_NODE * pCB,const char * apiName,const char * msgCode)2229 bool CoreChecks::OutsideRenderPass(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *apiName, const char *msgCode) {
2230     bool outside = false;
2231     if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
2232         ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
2233          !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
2234         outside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2235                           HandleToUint64(pCB->commandBuffer), msgCode, "%s: This call must be issued inside an active render pass.",
2236                           apiName);
2237     }
2238     return outside;
2239 }
2240 
InitGpuValidation(instance_layer_data * instance_data)2241 void CoreChecks::InitGpuValidation(instance_layer_data *instance_data) {
2242     // Process the layer settings file.
2243     enum CoreValidationGpuFlagBits {
2244         CORE_VALIDATION_GPU_VALIDATION_ALL_BIT = 0x00000001,
2245         CORE_VALIDATION_GPU_VALIDATION_RESERVE_BINDING_SLOT_BIT = 0x00000002,
2246     };
2247     typedef VkFlags CoreGPUFlags;
2248     static const std::unordered_map<std::string, VkFlags> gpu_flags_option_definitions = {
2249         {std::string("all"), CORE_VALIDATION_GPU_VALIDATION_ALL_BIT},
2250         {std::string("reserve_binding_slot"), CORE_VALIDATION_GPU_VALIDATION_RESERVE_BINDING_SLOT_BIT},
2251     };
2252     std::string gpu_flags_key = "lunarg_core_validation.gpu_validation";
2253     CoreGPUFlags gpu_flags = GetLayerOptionFlags(gpu_flags_key, gpu_flags_option_definitions, 0);
2254     if (gpu_flags & CORE_VALIDATION_GPU_VALIDATION_ALL_BIT) {
2255         instance_data->instance_state->enabled.gpu_validation = true;
2256     }
2257     if (gpu_flags & CORE_VALIDATION_GPU_VALIDATION_RESERVE_BINDING_SLOT_BIT) {
2258         instance_data->instance_state->enabled.gpu_validation_reserve_binding_slot = true;
2259     }
2260 }
2261 
PostCallRecordCreateInstance(const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkInstance * pInstance,VkResult result)2262 void CoreChecks::PostCallRecordCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
2263                                               VkInstance *pInstance, VkResult result) {
2264     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), instance_layer_data_map);
2265     if (VK_SUCCESS != result) return;
2266     InitGpuValidation(instance_data);
2267 }
2268 
ValidatePhysicalDeviceQueueFamily(instance_layer_data * instance_data,const PHYSICAL_DEVICE_STATE * pd_state,uint32_t requested_queue_family,const char * err_code,const char * cmd_name,const char * queue_family_var_name)2269 static bool ValidatePhysicalDeviceQueueFamily(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
2270                                               uint32_t requested_queue_family, const char *err_code, const char *cmd_name,
2271                                               const char *queue_family_var_name) {
2272     bool skip = false;
2273 
2274     const char *conditional_ext_cmd = instance_data->instance_extensions.vk_khr_get_physical_device_properties_2
2275                                           ? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
2276                                           : "";
2277 
2278     std::string count_note = (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
2279                                  ? "the pQueueFamilyPropertyCount was never obtained"
2280                                  : "i.e. is not less than " + std::to_string(pd_state->queue_family_count);
2281 
2282     if (requested_queue_family >= pd_state->queue_family_count) {
2283         skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2284                         HandleToUint64(pd_state->phys_device), err_code,
2285                         "%s: %s (= %" PRIu32
2286                         ") is not less than any previously obtained pQueueFamilyPropertyCount from "
2287                         "vkGetPhysicalDeviceQueueFamilyProperties%s (%s).",
2288                         cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd, count_note.c_str());
2289     }
2290     return skip;
2291 }
2292 
2293 // Verify VkDeviceQueueCreateInfos
ValidateDeviceQueueCreateInfos(instance_layer_data * instance_data,const PHYSICAL_DEVICE_STATE * pd_state,uint32_t info_count,const VkDeviceQueueCreateInfo * infos)2294 static bool ValidateDeviceQueueCreateInfos(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
2295                                            uint32_t info_count, const VkDeviceQueueCreateInfo *infos) {
2296     bool skip = false;
2297 
2298     std::unordered_set<uint32_t> queue_family_set;
2299 
2300     for (uint32_t i = 0; i < info_count; ++i) {
2301         const auto requested_queue_family = infos[i].queueFamilyIndex;
2302 
2303         // Verify that requested queue family is known to be valid at this point in time
2304         std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex";
2305         skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, requested_queue_family,
2306                                                   "VUID-VkDeviceQueueCreateInfo-queueFamilyIndex-00381", "vkCreateDevice",
2307                                                   queue_family_var_name.c_str());
2308         if (queue_family_set.count(requested_queue_family)) {
2309             skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2310                             HandleToUint64(pd_state->phys_device), "VUID-VkDeviceCreateInfo-queueFamilyIndex-00372",
2311                             "CreateDevice(): %s (=%" PRIu32 ") is not unique within pQueueCreateInfos.",
2312                             queue_family_var_name.c_str(), requested_queue_family);
2313         } else {
2314             queue_family_set.insert(requested_queue_family);
2315         }
2316 
2317         // Verify that requested  queue count of queue family is known to be valid at this point in time
2318         if (requested_queue_family < pd_state->queue_family_count) {
2319             const auto requested_queue_count = infos[i].queueCount;
2320             const auto queue_family_props_count = pd_state->queue_family_properties.size();
2321             const bool queue_family_has_props = requested_queue_family < queue_family_props_count;
2322             const char *conditional_ext_cmd = instance_data->instance_extensions.vk_khr_get_physical_device_properties_2
2323                                                   ? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
2324                                                   : "";
2325             std::string count_note =
2326                 !queue_family_has_props
2327                     ? "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained"
2328                     : "i.e. is not less than or equal to " +
2329                           std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount);
2330 
2331             if (!queue_family_has_props ||
2332                 requested_queue_count > pd_state->queue_family_properties[requested_queue_family].queueCount) {
2333                 skip |= log_msg(
2334                     instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2335                     HandleToUint64(pd_state->phys_device), "VUID-VkDeviceQueueCreateInfo-queueCount-00382",
2336                     "vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32
2337                     ") is not less than or equal to available queue count for this pCreateInfo->pQueueCreateInfos[%" PRIu32
2338                     "].queueFamilyIndex} (=%" PRIu32 ") obtained previously from vkGetPhysicalDeviceQueueFamilyProperties%s (%s).",
2339                     i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str());
2340             }
2341         }
2342     }
2343 
2344     return skip;
2345 }
2346 
PreCallValidateCreateDevice(VkPhysicalDevice gpu,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice)2347 bool CoreChecks::PreCallValidateCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
2348                                              const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
2349     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map);
2350     bool skip = false;
2351     auto pd_state = GetPhysicalDeviceState(gpu);
2352 
2353     // TODO: object_tracker should perhaps do this instead
2354     //       and it does not seem to currently work anyway -- the loader just crashes before this point
2355     if (!pd_state) {
2356         skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2357                         0, kVUID_Core_DevLimit_MustQueryCount,
2358                         "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
2359     }
2360     skip |=
2361         ValidateDeviceQueueCreateInfos(instance_data, pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos);
2362     return skip;
2363 }
2364 
PreCallRecordCreateDevice(VkPhysicalDevice gpu,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice,std::unique_ptr<safe_VkDeviceCreateInfo> & modified_create_info)2365 void CoreChecks::PreCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
2366                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice,
2367                                            std::unique_ptr<safe_VkDeviceCreateInfo> &modified_create_info) {
2368     // GPU Validation can possibly turn on device features, so give it a chance to change the create info.
2369     if (GetEnables()->gpu_validation) {
2370         VkPhysicalDeviceFeatures supported_features;
2371         instance_dispatch_table.GetPhysicalDeviceFeatures(gpu, &supported_features);
2372         GpuPreCallRecordCreateDevice(gpu, modified_create_info, &supported_features);
2373     }
2374 }
2375 
PostCallRecordCreateDevice(VkPhysicalDevice gpu,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice,VkResult result)2376 void CoreChecks::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
2377                                             const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
2378     if (VK_SUCCESS != result) return;
2379 
2380     const VkPhysicalDeviceFeatures *enabled_features_found = pCreateInfo->pEnabledFeatures;
2381     if (nullptr == enabled_features_found) {
2382         const auto *features2 = lvl_find_in_chain<VkPhysicalDeviceFeatures2KHR>(pCreateInfo->pNext);
2383         if (features2) {
2384             enabled_features_found = &(features2->features);
2385         }
2386     }
2387 
2388     ValidationObject *device_object = ::GetLayerDataPtr(::get_dispatch_key(*pDevice), ::layer_data_map);
2389     ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeCoreValidation);
2390     CoreChecks *core_checks = static_cast<CoreChecks *>(validation_data);
2391 
2392     if (nullptr == enabled_features_found) {
2393         core_checks->enabled_features.core = {};
2394     } else {
2395         core_checks->enabled_features.core = *enabled_features_found;
2396     }
2397 
2398     // Make sure that queue_family_properties are obtained for this device's physical_device, even if the app has not
2399     // previously set them through an explicit API call.
2400     uint32_t count;
2401     auto pd_state = GetPhysicalDeviceState(gpu);
2402     instance_dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
2403     pd_state->queue_family_count = std::max(pd_state->queue_family_count, count);
2404     pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count));
2405     instance_dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, &pd_state->queue_family_properties[0]);
2406     // Save local link to this device's physical device state
2407     core_checks->physical_device_state = pd_state;
2408 
2409     const auto *device_group_ci = lvl_find_in_chain<VkDeviceGroupDeviceCreateInfo>(pCreateInfo->pNext);
2410     core_checks->physical_device_count =
2411         device_group_ci && device_group_ci->physicalDeviceCount > 0 ? device_group_ci->physicalDeviceCount : 1;
2412 
2413     const auto *descriptor_indexing_features = lvl_find_in_chain<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(pCreateInfo->pNext);
2414     if (descriptor_indexing_features) {
2415         core_checks->enabled_features.descriptor_indexing = *descriptor_indexing_features;
2416     }
2417 
2418     const auto *eight_bit_storage_features = lvl_find_in_chain<VkPhysicalDevice8BitStorageFeaturesKHR>(pCreateInfo->pNext);
2419     if (eight_bit_storage_features) {
2420         core_checks->enabled_features.eight_bit_storage = *eight_bit_storage_features;
2421     }
2422 
2423     const auto *exclusive_scissor_features = lvl_find_in_chain<VkPhysicalDeviceExclusiveScissorFeaturesNV>(pCreateInfo->pNext);
2424     if (exclusive_scissor_features) {
2425         core_checks->enabled_features.exclusive_scissor = *exclusive_scissor_features;
2426     }
2427 
2428     const auto *shading_rate_image_features = lvl_find_in_chain<VkPhysicalDeviceShadingRateImageFeaturesNV>(pCreateInfo->pNext);
2429     if (shading_rate_image_features) {
2430         core_checks->enabled_features.shading_rate_image = *shading_rate_image_features;
2431     }
2432 
2433     const auto *mesh_shader_features = lvl_find_in_chain<VkPhysicalDeviceMeshShaderFeaturesNV>(pCreateInfo->pNext);
2434     if (mesh_shader_features) {
2435         core_checks->enabled_features.mesh_shader = *mesh_shader_features;
2436     }
2437 
2438     const auto *inline_uniform_block_features =
2439         lvl_find_in_chain<VkPhysicalDeviceInlineUniformBlockFeaturesEXT>(pCreateInfo->pNext);
2440     if (inline_uniform_block_features) {
2441         core_checks->enabled_features.inline_uniform_block = *inline_uniform_block_features;
2442     }
2443 
2444     const auto *transform_feedback_features = lvl_find_in_chain<VkPhysicalDeviceTransformFeedbackFeaturesEXT>(pCreateInfo->pNext);
2445     if (transform_feedback_features) {
2446         core_checks->enabled_features.transform_feedback_features = *transform_feedback_features;
2447     }
2448 
2449     const auto *float16_int8_features = lvl_find_in_chain<VkPhysicalDeviceFloat16Int8FeaturesKHR>(pCreateInfo->pNext);
2450     if (float16_int8_features) {
2451         core_checks->enabled_features.float16_int8 = *float16_int8_features;
2452     }
2453 
2454     const auto *vtx_attrib_div_features = lvl_find_in_chain<VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT>(pCreateInfo->pNext);
2455     if (vtx_attrib_div_features) {
2456         core_checks->enabled_features.vtx_attrib_divisor_features = *vtx_attrib_div_features;
2457     }
2458 
2459     const auto *scalar_block_layout_features = lvl_find_in_chain<VkPhysicalDeviceScalarBlockLayoutFeaturesEXT>(pCreateInfo->pNext);
2460     if (scalar_block_layout_features) {
2461         core_checks->enabled_features.scalar_block_layout_features = *scalar_block_layout_features;
2462     }
2463 
2464     const auto *buffer_address = lvl_find_in_chain<VkPhysicalDeviceBufferAddressFeaturesEXT>(pCreateInfo->pNext);
2465     if (buffer_address) {
2466         core_checks->enabled_features.buffer_address = *buffer_address;
2467     }
2468 
2469     // Store physical device properties and physical device mem limits into device layer_data structs
2470     instance_dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &core_checks->phys_dev_mem_props);
2471     instance_dispatch_table.GetPhysicalDeviceProperties(gpu, &core_checks->phys_dev_props);
2472 
2473     if (core_checks->device_extensions.vk_khr_push_descriptor) {
2474         // Get the needed push_descriptor limits
2475         auto push_descriptor_prop = lvl_init_struct<VkPhysicalDevicePushDescriptorPropertiesKHR>();
2476         auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&push_descriptor_prop);
2477         instance_dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
2478         core_checks->phys_dev_ext_props.max_push_descriptors = push_descriptor_prop.maxPushDescriptors;
2479     }
2480     if (core_checks->device_extensions.vk_ext_descriptor_indexing) {
2481         // Get the needed descriptor_indexing limits
2482         auto descriptor_indexing_props = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingPropertiesEXT>();
2483         auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&descriptor_indexing_props);
2484         instance_dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
2485         core_checks->phys_dev_ext_props.descriptor_indexing_props = descriptor_indexing_props;
2486     }
2487     if (core_checks->device_extensions.vk_nv_shading_rate_image) {
2488         // Get the needed shading rate image limits
2489         auto shading_rate_image_props = lvl_init_struct<VkPhysicalDeviceShadingRateImagePropertiesNV>();
2490         auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&shading_rate_image_props);
2491         instance_dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
2492         core_checks->phys_dev_ext_props.shading_rate_image_props = shading_rate_image_props;
2493     }
2494     if (core_checks->device_extensions.vk_nv_mesh_shader) {
2495         // Get the needed mesh shader limits
2496         auto mesh_shader_props = lvl_init_struct<VkPhysicalDeviceMeshShaderPropertiesNV>();
2497         auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&mesh_shader_props);
2498         instance_dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
2499         core_checks->phys_dev_ext_props.mesh_shader_props = mesh_shader_props;
2500     }
2501     if (core_checks->device_extensions.vk_ext_inline_uniform_block) {
2502         // Get the needed inline uniform block limits
2503         auto inline_uniform_block_props = lvl_init_struct<VkPhysicalDeviceInlineUniformBlockPropertiesEXT>();
2504         auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&inline_uniform_block_props);
2505         instance_dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
2506         core_checks->phys_dev_ext_props.inline_uniform_block_props = inline_uniform_block_props;
2507     }
2508     if (core_checks->device_extensions.vk_ext_vertex_attribute_divisor) {
2509         // Get the needed vertex attribute divisor limits
2510         auto vtx_attrib_divisor_props = lvl_init_struct<VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT>();
2511         auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&vtx_attrib_divisor_props);
2512         instance_dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
2513         core_checks->phys_dev_ext_props.vtx_attrib_divisor_props = vtx_attrib_divisor_props;
2514     }
2515     if (core_checks->device_extensions.vk_khr_depth_stencil_resolve) {
2516         // Get the needed depth and stencil resolve modes
2517         auto depth_stencil_resolve_props = lvl_init_struct<VkPhysicalDeviceDepthStencilResolvePropertiesKHR>();
2518         auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&depth_stencil_resolve_props);
2519         instance_dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
2520         core_checks->phys_dev_ext_props.depth_stencil_resolve_props = depth_stencil_resolve_props;
2521     }
2522     if (GetEnables()->gpu_validation) {
2523         // Copy any needed instance data into the gpu validation state
2524         core_checks->gpu_validation_state.reserve_binding_slot = GetEnables()->gpu_validation_reserve_binding_slot;
2525         core_checks->GpuPostCallRecordCreateDevice(core_checks);
2526     }
2527 
2528     // Store queue family data
2529     if ((pCreateInfo != nullptr) && (pCreateInfo->pQueueCreateInfos != nullptr)) {
2530         for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; ++i) {
2531             core_checks->queue_family_index_map.insert(
2532                 std::make_pair(pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex, pCreateInfo->pQueueCreateInfos[i].queueCount));
2533         }
2534     }
2535 }
2536 
PreCallRecordDestroyDevice(VkDevice device,const VkAllocationCallbacks * pAllocator)2537 void CoreChecks::PreCallRecordDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
2538     if (!device) return;
2539     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
2540     if (GetEnables()->gpu_validation) {
2541         GpuPreCallRecordDestroyDevice(device_data);
2542     }
2543     device_data->pipelineMap.clear();
2544     device_data->renderPassMap.clear();
2545     for (auto ii = device_data->commandBufferMap.begin(); ii != device_data->commandBufferMap.end(); ++ii) {
2546         delete (*ii).second;
2547     }
2548     device_data->commandBufferMap.clear();
2549     // This will also delete all sets in the pool & remove them from setMap
2550     DeletePools(device_data);
2551     // All sets should be removed
2552     assert(device_data->setMap.empty());
2553     device_data->descriptorSetLayoutMap.clear();
2554     device_data->imageViewMap.clear();
2555     device_data->imageMap.clear();
2556     device_data->imageSubresourceMap.clear();
2557     device_data->imageLayoutMap.clear();
2558     device_data->bufferViewMap.clear();
2559     device_data->bufferMap.clear();
2560     // Queues persist until device is destroyed
2561     device_data->queueMap.clear();
2562     layer_debug_utils_destroy_device(device);
2563 }
2564 
2565 // For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id
2566 //   and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id.
2567 // Similarly for mesh and task shaders.
ValidateStageMaskGsTsEnables(const layer_data * dev_data,VkPipelineStageFlags stageMask,const char * caller,const char * geo_error_id,const char * tess_error_id,const char * mesh_error_id,const char * task_error_id)2568 static bool ValidateStageMaskGsTsEnables(const layer_data *dev_data, VkPipelineStageFlags stageMask, const char *caller,
2569                                          const char *geo_error_id, const char *tess_error_id, const char *mesh_error_id,
2570                                          const char *task_error_id) {
2571     bool skip = false;
2572     if (!dev_data->enabled_features.core.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
2573         skip |=
2574             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, geo_error_id,
2575                     "%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when device does not have "
2576                     "geometryShader feature enabled.",
2577                     caller);
2578     }
2579     if (!dev_data->enabled_features.core.tessellationShader &&
2580         (stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) {
2581         skip |=
2582             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, tess_error_id,
2583                     "%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT and/or "
2584                     "VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device does not have "
2585                     "tessellationShader feature enabled.",
2586                     caller);
2587     }
2588     if (!dev_data->enabled_features.mesh_shader.meshShader && (stageMask & VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV)) {
2589         skip |=
2590             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, mesh_error_id,
2591                     "%s call includes a stageMask with VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV bit set when device does not have "
2592                     "VkPhysicalDeviceMeshShaderFeaturesNV::meshShader feature enabled.",
2593                     caller);
2594     }
2595     if (!dev_data->enabled_features.mesh_shader.taskShader && (stageMask & VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV)) {
2596         skip |=
2597             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, task_error_id,
2598                     "%s call includes a stageMask with VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV bit set when device does not have "
2599                     "VkPhysicalDeviceMeshShaderFeaturesNV::taskShader feature enabled.",
2600                     caller);
2601     }
2602     return skip;
2603 }
2604 
2605 // Loop through bound objects and increment their in_use counts.
IncrementBoundObjects(layer_data * dev_data,GLOBAL_CB_NODE const * cb_node)2606 void CoreChecks::IncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2607     for (auto obj : cb_node->object_bindings) {
2608         auto base_obj = GetStateStructPtrFromObject(dev_data, obj);
2609         if (base_obj) {
2610             base_obj->in_use.fetch_add(1);
2611         }
2612     }
2613 }
2614 // Track which resources are in-flight by atomically incrementing their "in_use" count
IncrementResources(layer_data * dev_data,GLOBAL_CB_NODE * cb_node)2615 void CoreChecks::IncrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2616     cb_node->submitCount++;
2617     cb_node->in_use.fetch_add(1);
2618 
2619     // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
2620     IncrementBoundObjects(dev_data, cb_node);
2621     // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2622     //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2623     //  should then be flagged prior to calling this function
2624     for (auto draw_data_element : cb_node->draw_data) {
2625         for (auto &vertex_buffer : draw_data_element.vertex_buffer_bindings) {
2626             auto buffer_state = GetBufferState(vertex_buffer.buffer);
2627             if (buffer_state) {
2628                 buffer_state->in_use.fetch_add(1);
2629             }
2630         }
2631     }
2632     for (auto event : cb_node->writeEventsBeforeWait) {
2633         auto event_state = GetEventNode(event);
2634         if (event_state) event_state->write_in_use++;
2635     }
2636 }
2637 
2638 // Note: This function assumes that the global lock is held by the calling thread.
2639 // For the given queue, verify the queue state up to the given seq number.
2640 // Currently the only check is to make sure that if there are events to be waited on prior to
2641 //  a QueryReset, make sure that all such events have been signalled.
VerifyQueueStateToSeq(layer_data * dev_data,QUEUE_STATE * initial_queue,uint64_t initial_seq)2642 bool CoreChecks::VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *initial_queue, uint64_t initial_seq) {
2643     bool skip = false;
2644 
2645     // sequence number we want to validate up to, per queue
2646     std::unordered_map<QUEUE_STATE *, uint64_t> target_seqs{{initial_queue, initial_seq}};
2647     // sequence number we've completed validation for, per queue
2648     std::unordered_map<QUEUE_STATE *, uint64_t> done_seqs;
2649     std::vector<QUEUE_STATE *> worklist{initial_queue};
2650 
2651     while (worklist.size()) {
2652         auto queue = worklist.back();
2653         worklist.pop_back();
2654 
2655         auto target_seq = target_seqs[queue];
2656         auto seq = std::max(done_seqs[queue], queue->seq);
2657         auto sub_it = queue->submissions.begin() + int(seq - queue->seq);  // seq >= queue->seq
2658 
2659         for (; seq < target_seq; ++sub_it, ++seq) {
2660             for (auto &wait : sub_it->waitSemaphores) {
2661                 auto other_queue = GetQueueState(wait.queue);
2662 
2663                 if (other_queue == queue) continue;  // semaphores /always/ point backwards, so no point here.
2664 
2665                 auto other_target_seq = std::max(target_seqs[other_queue], wait.seq);
2666                 auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq);
2667 
2668                 // if this wait is for another queue, and covers new sequence
2669                 // numbers beyond what we've already validated, mark the new
2670                 // target seq and (possibly-re)add the queue to the worklist.
2671                 if (other_done_seq < other_target_seq) {
2672                     target_seqs[other_queue] = other_target_seq;
2673                     worklist.push_back(other_queue);
2674                 }
2675             }
2676         }
2677 
2678         // finally mark the point we've now validated this queue to.
2679         done_seqs[queue] = seq;
2680     }
2681 
2682     return skip;
2683 }
2684 
2685 // When the given fence is retired, verify outstanding queue operations through the point of the fence
VerifyQueueStateToFence(layer_data * dev_data,VkFence fence)2686 bool CoreChecks::VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
2687     auto fence_state = GetFenceNode(fence);
2688     if (fence_state && fence_state->scope == kSyncScopeInternal && VK_NULL_HANDLE != fence_state->signaler.first) {
2689         return VerifyQueueStateToSeq(dev_data, GetQueueState(fence_state->signaler.first), fence_state->signaler.second);
2690     }
2691     return false;
2692 }
2693 
2694 // Decrement in-use count for objects bound to command buffer
DecrementBoundResources(layer_data * dev_data,GLOBAL_CB_NODE const * cb_node)2695 void CoreChecks::DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2696     BASE_NODE *base_obj = nullptr;
2697     for (auto obj : cb_node->object_bindings) {
2698         base_obj = GetStateStructPtrFromObject(dev_data, obj);
2699         if (base_obj) {
2700             base_obj->in_use.fetch_sub(1);
2701         }
2702     }
2703 }
2704 
RetireWorkOnQueue(layer_data * dev_data,QUEUE_STATE * pQueue,uint64_t seq)2705 void CoreChecks::RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
2706     std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
2707 
2708     // Roll this queue forward, one submission at a time.
2709     while (pQueue->seq < seq) {
2710         auto &submission = pQueue->submissions.front();
2711 
2712         for (auto &wait : submission.waitSemaphores) {
2713             auto pSemaphore = GetSemaphoreNode(wait.semaphore);
2714             if (pSemaphore) {
2715                 pSemaphore->in_use.fetch_sub(1);
2716             }
2717             auto &lastSeq = otherQueueSeqs[wait.queue];
2718             lastSeq = std::max(lastSeq, wait.seq);
2719         }
2720 
2721         for (auto &semaphore : submission.signalSemaphores) {
2722             auto pSemaphore = GetSemaphoreNode(semaphore);
2723             if (pSemaphore) {
2724                 pSemaphore->in_use.fetch_sub(1);
2725             }
2726         }
2727 
2728         for (auto &semaphore : submission.externalSemaphores) {
2729             auto pSemaphore = GetSemaphoreNode(semaphore);
2730             if (pSemaphore) {
2731                 pSemaphore->in_use.fetch_sub(1);
2732             }
2733         }
2734 
2735         for (auto cb : submission.cbs) {
2736             auto cb_node = GetCBNode(cb);
2737             if (!cb_node) {
2738                 continue;
2739             }
2740             // First perform decrement on general case bound objects
2741             DecrementBoundResources(dev_data, cb_node);
2742             for (auto draw_data_element : cb_node->draw_data) {
2743                 for (auto &vertex_buffer_binding : draw_data_element.vertex_buffer_bindings) {
2744                     auto buffer_state = GetBufferState(vertex_buffer_binding.buffer);
2745                     if (buffer_state) {
2746                         buffer_state->in_use.fetch_sub(1);
2747                     }
2748                 }
2749             }
2750             for (auto event : cb_node->writeEventsBeforeWait) {
2751                 auto eventNode = dev_data->eventMap.find(event);
2752                 if (eventNode != dev_data->eventMap.end()) {
2753                     eventNode->second.write_in_use--;
2754                 }
2755             }
2756             for (auto queryStatePair : cb_node->queryToStateMap) {
2757                 dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
2758             }
2759             for (auto eventStagePair : cb_node->eventToStageMap) {
2760                 dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
2761             }
2762 
2763             cb_node->in_use.fetch_sub(1);
2764         }
2765 
2766         auto pFence = GetFenceNode(submission.fence);
2767         if (pFence && pFence->scope == kSyncScopeInternal) {
2768             pFence->state = FENCE_RETIRED;
2769         }
2770 
2771         pQueue->submissions.pop_front();
2772         pQueue->seq++;
2773     }
2774 
2775     // Roll other queues forward to the highest seq we saw a wait for
2776     for (auto qs : otherQueueSeqs) {
2777         RetireWorkOnQueue(dev_data, GetQueueState(qs.first), qs.second);
2778     }
2779 }
2780 
2781 // Submit a fence to a queue, delimiting previous fences and previous untracked
2782 // work by it.
SubmitFence(QUEUE_STATE * pQueue,FENCE_NODE * pFence,uint64_t submitCount)2783 static void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) {
2784     pFence->state = FENCE_INFLIGHT;
2785     pFence->signaler.first = pQueue->queue;
2786     pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
2787 }
2788 
ValidateCommandBufferSimultaneousUse(layer_data * dev_data,GLOBAL_CB_NODE * pCB,int current_submit_count)2789 bool CoreChecks::ValidateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
2790     bool skip = false;
2791     if ((pCB->in_use.load() || current_submit_count > 1) &&
2792         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2793         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2794                         "VUID-vkQueueSubmit-pCommandBuffers-00071",
2795                         "Command Buffer %s is already in use and is not marked for simultaneous use.",
2796                         dev_data->report_data->FormatHandle(pCB->commandBuffer).c_str());
2797     }
2798     return skip;
2799 }
2800 
ValidateCommandBufferState(layer_data * dev_data,GLOBAL_CB_NODE * cb_state,const char * call_source,int current_submit_count,const char * vu_id)2801 bool CoreChecks::ValidateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const char *call_source,
2802                                             int current_submit_count, const char *vu_id) {
2803     bool skip = false;
2804     if (dev_data->instance_data->disabled.command_buffer_state) return skip;
2805     // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
2806     if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) &&
2807         (cb_state->submitCount + current_submit_count > 1)) {
2808         skip |= log_msg(
2809             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2810             kVUID_Core_DrawState_CommandBufferSingleSubmitViolation,
2811             "Commandbuffer %s was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted 0x%" PRIxLEAST64
2812             "times.",
2813             dev_data->report_data->FormatHandle(cb_state->commandBuffer).c_str(), cb_state->submitCount + current_submit_count);
2814     }
2815 
2816     // Validate that cmd buffers have been updated
2817     switch (cb_state->state) {
2818         case CB_INVALID_INCOMPLETE:
2819         case CB_INVALID_COMPLETE:
2820             skip |= ReportInvalidCommandBuffer(dev_data, cb_state, call_source);
2821             break;
2822 
2823         case CB_NEW:
2824             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2825                             (uint64_t)(cb_state->commandBuffer), vu_id,
2826                             "Command buffer %s used in the call to %s is unrecorded and contains no commands.",
2827                             dev_data->report_data->FormatHandle(cb_state->commandBuffer).c_str(), call_source);
2828             break;
2829 
2830         case CB_RECORDING:
2831             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2832                             HandleToUint64(cb_state->commandBuffer), kVUID_Core_DrawState_NoEndCommandBuffer,
2833                             "You must call vkEndCommandBuffer() on command buffer %s before this call to %s!",
2834                             dev_data->report_data->FormatHandle(cb_state->commandBuffer).c_str(), call_source);
2835             break;
2836 
2837         default: /* recorded */
2838             break;
2839     }
2840     return skip;
2841 }
2842 
ValidateResources(layer_data * dev_data,GLOBAL_CB_NODE * cb_node)2843 bool CoreChecks::ValidateResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2844     bool skip = false;
2845 
2846     // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2847     //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2848     //  should then be flagged prior to calling this function
2849     for (const auto &draw_data_element : cb_node->draw_data) {
2850         for (const auto &vertex_buffer_binding : draw_data_element.vertex_buffer_bindings) {
2851             auto buffer_state = GetBufferState(vertex_buffer_binding.buffer);
2852             if ((vertex_buffer_binding.buffer != VK_NULL_HANDLE) && (!buffer_state)) {
2853                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
2854                                 HandleToUint64(vertex_buffer_binding.buffer), kVUID_Core_DrawState_InvalidBuffer,
2855                                 "Cannot submit cmd buffer using deleted buffer %s.",
2856                                 dev_data->report_data->FormatHandle(vertex_buffer_binding.buffer).c_str());
2857             }
2858         }
2859     }
2860     return skip;
2861 }
2862 
2863 // Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices
ValidImageBufferQueue(layer_data * dev_data,GLOBAL_CB_NODE * cb_node,const VK_OBJECT * object,VkQueue queue,uint32_t count,const uint32_t * indices)2864 bool CoreChecks::ValidImageBufferQueue(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const VK_OBJECT *object, VkQueue queue,
2865                                        uint32_t count, const uint32_t *indices) {
2866     bool found = false;
2867     bool skip = false;
2868     auto queue_state = GetQueueState(queue);
2869     if (queue_state) {
2870         for (uint32_t i = 0; i < count; i++) {
2871             if (indices[i] == queue_state->queueFamilyIndex) {
2872                 found = true;
2873                 break;
2874             }
2875         }
2876 
2877         if (!found) {
2878             skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object->type],
2879                            object->handle, kVUID_Core_DrawState_InvalidQueueFamily,
2880                            "vkQueueSubmit: Command buffer %s contains %s %s which was not created allowing concurrent access to "
2881                            "this queue family %d.",
2882                            dev_data->report_data->FormatHandle(cb_node->commandBuffer).c_str(), object_string[object->type],
2883                            dev_data->report_data->FormatHandle(object->handle).c_str(), queue_state->queueFamilyIndex);
2884         }
2885     }
2886     return skip;
2887 }
2888 
2889 // Validate that queueFamilyIndices of primary command buffers match this queue
2890 // Secondary command buffers were previously validated in vkCmdExecuteCommands().
ValidateQueueFamilyIndices(layer_data * dev_data,GLOBAL_CB_NODE * pCB,VkQueue queue)2891 bool CoreChecks::ValidateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
2892     bool skip = false;
2893     auto pPool = GetCommandPoolNode(pCB->createInfo.commandPool);
2894     auto queue_state = GetQueueState(queue);
2895 
2896     if (pPool && queue_state) {
2897         if (pPool->queueFamilyIndex != queue_state->queueFamilyIndex) {
2898             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2899                             HandleToUint64(pCB->commandBuffer), "VUID-vkQueueSubmit-pCommandBuffers-00074",
2900                             "vkQueueSubmit: Primary command buffer %s created in queue family %d is being submitted on queue %s "
2901                             "from queue family %d.",
2902                             dev_data->report_data->FormatHandle(pCB->commandBuffer).c_str(), pPool->queueFamilyIndex,
2903                             dev_data->report_data->FormatHandle(queue).c_str(), queue_state->queueFamilyIndex);
2904         }
2905 
2906         // Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family
2907         for (auto object : pCB->object_bindings) {
2908             if (object.type == kVulkanObjectTypeImage) {
2909                 auto image_state = GetImageState(reinterpret_cast<VkImage &>(object.handle));
2910                 if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2911                     skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, image_state->createInfo.queueFamilyIndexCount,
2912                                                   image_state->createInfo.pQueueFamilyIndices);
2913                 }
2914             } else if (object.type == kVulkanObjectTypeBuffer) {
2915                 auto buffer_state = GetBufferState(reinterpret_cast<VkBuffer &>(object.handle));
2916                 if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2917                     skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, buffer_state->createInfo.queueFamilyIndexCount,
2918                                                   buffer_state->createInfo.pQueueFamilyIndices);
2919                 }
2920             }
2921         }
2922     }
2923 
2924     return skip;
2925 }
2926 
ValidatePrimaryCommandBufferState(layer_data * dev_data,GLOBAL_CB_NODE * pCB,int current_submit_count,QFOTransferCBScoreboards<VkImageMemoryBarrier> * qfo_image_scoreboards,QFOTransferCBScoreboards<VkBufferMemoryBarrier> * qfo_buffer_scoreboards)2927 bool CoreChecks::ValidatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count,
2928                                                    QFOTransferCBScoreboards<VkImageMemoryBarrier> *qfo_image_scoreboards,
2929                                                    QFOTransferCBScoreboards<VkBufferMemoryBarrier> *qfo_buffer_scoreboards) {
2930     // Track in-use for resources off of primary and any secondary CBs
2931     bool skip = false;
2932 
2933     // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
2934     // on device
2935     skip |= ValidateCommandBufferSimultaneousUse(dev_data, pCB, current_submit_count);
2936 
2937     skip |= ValidateResources(dev_data, pCB);
2938     skip |= ValidateQueuedQFOTransfers(dev_data, pCB, qfo_image_scoreboards, qfo_buffer_scoreboards);
2939 
2940     for (auto pSubCB : pCB->linkedCommandBuffers) {
2941         skip |= ValidateResources(dev_data, pSubCB);
2942         skip |= ValidateQueuedQFOTransfers(dev_data, pSubCB, qfo_image_scoreboards, qfo_buffer_scoreboards);
2943         // TODO: replace with InvalidateCommandBuffers() at recording.
2944         if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
2945             !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2946             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2947                     "VUID-vkQueueSubmit-pCommandBuffers-00073",
2948                     "Commandbuffer %s was submitted with secondary buffer %s but that buffer has subsequently been bound to "
2949                     "primary cmd buffer %s and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
2950                     dev_data->report_data->FormatHandle(pCB->commandBuffer).c_str(),
2951                     dev_data->report_data->FormatHandle(pSubCB->commandBuffer).c_str(),
2952                     dev_data->report_data->FormatHandle(pSubCB->primaryCommandBuffer).c_str());
2953         }
2954     }
2955 
2956     skip |= ValidateCommandBufferState(dev_data, pCB, "vkQueueSubmit()", current_submit_count,
2957                                        "VUID-vkQueueSubmit-pCommandBuffers-00072");
2958 
2959     return skip;
2960 }
2961 
ValidateFenceForSubmit(layer_data * dev_data,FENCE_NODE * pFence)2962 bool CoreChecks::ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence) {
2963     bool skip = false;
2964 
2965     if (pFence && pFence->scope == kSyncScopeInternal) {
2966         if (pFence->state == FENCE_INFLIGHT) {
2967             // TODO: opportunities for "VUID-vkQueueSubmit-fence-00064", "VUID-vkQueueBindSparse-fence-01114",
2968             // "VUID-vkAcquireNextImageKHR-fence-01287"
2969             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2970                             HandleToUint64(pFence->fence), kVUID_Core_DrawState_InvalidFence,
2971                             "Fence %s is already in use by another submission.",
2972                             dev_data->report_data->FormatHandle(pFence->fence).c_str());
2973         }
2974 
2975         else if (pFence->state == FENCE_RETIRED) {
2976             // TODO: opportunities for "VUID-vkQueueSubmit-fence-00063", "VUID-vkQueueBindSparse-fence-01113",
2977             // "VUID-vkAcquireNextImageKHR-fence-01287"
2978             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2979                             HandleToUint64(pFence->fence), kVUID_Core_MemTrack_FenceState,
2980                             "Fence %s submitted in SIGNALED state.  Fences must be reset before being submitted",
2981                             dev_data->report_data->FormatHandle(pFence->fence).c_str());
2982         }
2983     }
2984 
2985     return skip;
2986 }
2987 
PostCallRecordQueueSubmit(VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence,VkResult result)2988 void CoreChecks::PostCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence,
2989                                            VkResult result) {
2990     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
2991     uint64_t early_retire_seq = 0;
2992     auto pQueue = GetQueueState(queue);
2993     auto pFence = GetFenceNode(fence);
2994 
2995     if (pFence) {
2996         if (pFence->scope == kSyncScopeInternal) {
2997             // Mark fence in use
2998             SubmitFence(pQueue, pFence, std::max(1u, submitCount));
2999             if (!submitCount) {
3000                 // If no submissions, but just dropping a fence on the end of the queue,
3001                 // record an empty submission with just the fence, so we can determine
3002                 // its completion.
3003                 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(),
3004                                                  std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence);
3005             }
3006         } else {
3007             // Retire work up until this fence early, we will not see the wait that corresponds to this signal
3008             early_retire_seq = pQueue->seq + pQueue->submissions.size();
3009             if (!device_data->external_sync_warning) {
3010                 device_data->external_sync_warning = true;
3011                 log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
3012                         HandleToUint64(fence), kVUID_Core_DrawState_QueueForwardProgress,
3013                         "vkQueueSubmit(): Signaling external fence %s on queue %s will disable validation of preceding command "
3014                         "buffer lifecycle states and the in-use status of associated objects.",
3015                         device_data->report_data->FormatHandle(fence).c_str(),
3016                         device_data->report_data->FormatHandle(queue).c_str());
3017             }
3018         }
3019     }
3020 
3021     // Now process each individual submit
3022     for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
3023         std::vector<VkCommandBuffer> cbs;
3024         const VkSubmitInfo *submit = &pSubmits[submit_idx];
3025         vector<SEMAPHORE_WAIT> semaphore_waits;
3026         vector<VkSemaphore> semaphore_signals;
3027         vector<VkSemaphore> semaphore_externals;
3028         for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
3029             VkSemaphore semaphore = submit->pWaitSemaphores[i];
3030             auto pSemaphore = GetSemaphoreNode(semaphore);
3031             if (pSemaphore) {
3032                 if (pSemaphore->scope == kSyncScopeInternal) {
3033                     if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
3034                         semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
3035                         pSemaphore->in_use.fetch_add(1);
3036                     }
3037                     pSemaphore->signaler.first = VK_NULL_HANDLE;
3038                     pSemaphore->signaled = false;
3039                 } else {
3040                     semaphore_externals.push_back(semaphore);
3041                     pSemaphore->in_use.fetch_add(1);
3042                     if (pSemaphore->scope == kSyncScopeExternalTemporary) {
3043                         pSemaphore->scope = kSyncScopeInternal;
3044                     }
3045                 }
3046             }
3047         }
3048         for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
3049             VkSemaphore semaphore = submit->pSignalSemaphores[i];
3050             auto pSemaphore = GetSemaphoreNode(semaphore);
3051             if (pSemaphore) {
3052                 if (pSemaphore->scope == kSyncScopeInternal) {
3053                     pSemaphore->signaler.first = queue;
3054                     pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
3055                     pSemaphore->signaled = true;
3056                     pSemaphore->in_use.fetch_add(1);
3057                     semaphore_signals.push_back(semaphore);
3058                 } else {
3059                     // Retire work up until this submit early, we will not see the wait that corresponds to this signal
3060                     early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1);
3061                     if (!device_data->external_sync_warning) {
3062                         device_data->external_sync_warning = true;
3063                         log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
3064                                 VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, HandleToUint64(semaphore),
3065                                 kVUID_Core_DrawState_QueueForwardProgress,
3066                                 "vkQueueSubmit(): Signaling external semaphore %s on queue %s will disable validation of preceding "
3067                                 "command buffer lifecycle states and the in-use status of associated objects.",
3068                                 device_data->report_data->FormatHandle(semaphore).c_str(),
3069                                 device_data->report_data->FormatHandle(queue).c_str());
3070                     }
3071                 }
3072             }
3073         }
3074         for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
3075             auto cb_node = GetCBNode(submit->pCommandBuffers[i]);
3076             if (cb_node) {
3077                 cbs.push_back(submit->pCommandBuffers[i]);
3078                 for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
3079                     cbs.push_back(secondaryCmdBuffer->commandBuffer);
3080                     UpdateCmdBufImageLayouts(device_data, secondaryCmdBuffer);
3081                     IncrementResources(device_data, secondaryCmdBuffer);
3082                     RecordQueuedQFOTransfers(device_data, secondaryCmdBuffer);
3083                 }
3084                 UpdateCmdBufImageLayouts(device_data, cb_node);
3085                 IncrementResources(device_data, cb_node);
3086                 RecordQueuedQFOTransfers(device_data, cb_node);
3087             }
3088         }
3089         pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals, semaphore_externals,
3090                                          submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
3091     }
3092 
3093     if (early_retire_seq) {
3094         RetireWorkOnQueue(device_data, pQueue, early_retire_seq);
3095     }
3096 
3097     if (GetEnables()->gpu_validation) {
3098         GpuPostCallQueueSubmit(device_data, queue, submitCount, pSubmits, fence);
3099     }
3100 }
3101 
PreCallValidateQueueSubmit(VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)3102 bool CoreChecks::PreCallValidateQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
3103     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
3104     auto pFence = GetFenceNode(fence);
3105     bool skip = ValidateFenceForSubmit(device_data, pFence);
3106     if (skip) {
3107         return true;
3108     }
3109 
3110     unordered_set<VkSemaphore> signaled_semaphores;
3111     unordered_set<VkSemaphore> unsignaled_semaphores;
3112     unordered_set<VkSemaphore> internal_semaphores;
3113     vector<VkCommandBuffer> current_cmds;
3114     unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> localImageLayoutMap;
3115     // Now verify each individual submit
3116     for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
3117         const VkSubmitInfo *submit = &pSubmits[submit_idx];
3118         for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
3119             skip |= ValidateStageMaskGsTsEnables(
3120                 device_data, submit->pWaitDstStageMask[i], "vkQueueSubmit()", "VUID-VkSubmitInfo-pWaitDstStageMask-00076",
3121                 "VUID-VkSubmitInfo-pWaitDstStageMask-00077", "VUID-VkSubmitInfo-pWaitDstStageMask-02089",
3122                 "VUID-VkSubmitInfo-pWaitDstStageMask-02090");
3123             VkSemaphore semaphore = submit->pWaitSemaphores[i];
3124             auto pSemaphore = GetSemaphoreNode(semaphore);
3125             if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
3126                 if (unsignaled_semaphores.count(semaphore) ||
3127                     (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
3128                     skip |=
3129                         log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
3130                                 HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
3131                                 "Queue %s is waiting on semaphore %s that has no way to be signaled.",
3132                                 device_data->report_data->FormatHandle(queue).c_str(),
3133                                 device_data->report_data->FormatHandle(semaphore).c_str());
3134                 } else {
3135                     signaled_semaphores.erase(semaphore);
3136                     unsignaled_semaphores.insert(semaphore);
3137                 }
3138             }
3139             if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) {
3140                 internal_semaphores.insert(semaphore);
3141             }
3142         }
3143         for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
3144             VkSemaphore semaphore = submit->pSignalSemaphores[i];
3145             auto pSemaphore = GetSemaphoreNode(semaphore);
3146             if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
3147                 if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
3148                     skip |=
3149                         log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
3150                                 HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
3151                                 "Queue %s is signaling semaphore %s that was previously signaled by queue %s but has not since "
3152                                 "been waited on by any queue.",
3153                                 device_data->report_data->FormatHandle(queue).c_str(),
3154                                 device_data->report_data->FormatHandle(semaphore).c_str(),
3155                                 device_data->report_data->FormatHandle(pSemaphore->signaler.first).c_str());
3156                 } else {
3157                     unsignaled_semaphores.erase(semaphore);
3158                     signaled_semaphores.insert(semaphore);
3159                 }
3160             }
3161         }
3162         QFOTransferCBScoreboards<VkImageMemoryBarrier> qfo_image_scoreboards;
3163         QFOTransferCBScoreboards<VkBufferMemoryBarrier> qfo_buffer_scoreboards;
3164 
3165         for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
3166             auto cb_node = GetCBNode(submit->pCommandBuffers[i]);
3167             if (cb_node) {
3168                 skip |= ValidateCmdBufImageLayouts(device_data, cb_node, device_data->imageLayoutMap, localImageLayoutMap);
3169                 current_cmds.push_back(submit->pCommandBuffers[i]);
3170                 skip |= ValidatePrimaryCommandBufferState(
3171                     device_data, cb_node, (int)std::count(current_cmds.begin(), current_cmds.end(), submit->pCommandBuffers[i]),
3172                     &qfo_image_scoreboards, &qfo_buffer_scoreboards);
3173                 skip |= ValidateQueueFamilyIndices(device_data, cb_node, queue);
3174 
3175                 // Potential early exit here as bad object state may crash in delayed function calls
3176                 if (skip) {
3177                     return true;
3178                 }
3179 
3180                 // Call submit-time functions to validate/update state
3181                 for (auto &function : cb_node->queue_submit_functions) {
3182                     skip |= function();
3183                 }
3184                 for (auto &function : cb_node->eventUpdates) {
3185                     skip |= function(queue);
3186                 }
3187                 for (auto &function : cb_node->queryUpdates) {
3188                     skip |= function(queue);
3189                 }
3190             }
3191         }
3192     }
3193     return skip;
3194 }
3195 
3196 #ifdef VK_USE_PLATFORM_ANDROID_KHR
3197 // Android-specific validation that uses types defined only on Android and only for NDK versions
3198 // that support the VK_ANDROID_external_memory_android_hardware_buffer extension.
3199 // This chunk could move into a seperate core_validation_android.cpp file... ?
3200 
3201 // clang-format off
3202 
3203 // Map external format and usage flags to/from equivalent Vulkan flags
3204 // (Tables as of v1.1.92)
3205 
3206 // AHardwareBuffer Format                       Vulkan Format
3207 // ======================                       =============
3208 // AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM        VK_FORMAT_R8G8B8A8_UNORM
3209 // AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM        VK_FORMAT_R8G8B8A8_UNORM
3210 // AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM          VK_FORMAT_R8G8B8_UNORM
3211 // AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM          VK_FORMAT_R5G6B5_UNORM_PACK16
3212 // AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT    VK_FORMAT_R16G16B16A16_SFLOAT
3213 // AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM     VK_FORMAT_A2B10G10R10_UNORM_PACK32
3214 // AHARDWAREBUFFER_FORMAT_D16_UNORM             VK_FORMAT_D16_UNORM
3215 // AHARDWAREBUFFER_FORMAT_D24_UNORM             VK_FORMAT_X8_D24_UNORM_PACK32
3216 // AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT     VK_FORMAT_D24_UNORM_S8_UINT
3217 // AHARDWAREBUFFER_FORMAT_D32_FLOAT             VK_FORMAT_D32_SFLOAT
3218 // AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT     VK_FORMAT_D32_SFLOAT_S8_UINT
3219 // AHARDWAREBUFFER_FORMAT_S8_UINT               VK_FORMAT_S8_UINT
3220 
3221 // The AHARDWAREBUFFER_FORMAT_* are an enum in the NDK headers, but get passed in to Vulkan
3222 // as uint32_t. Casting the enums here avoids scattering casts around in the code.
3223 std::map<uint32_t, VkFormat> ahb_format_map_a2v = {
3224     { (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM,        VK_FORMAT_R8G8B8A8_UNORM },
3225     { (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM,        VK_FORMAT_R8G8B8A8_UNORM },
3226     { (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM,          VK_FORMAT_R8G8B8_UNORM },
3227     { (uint32_t)AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM,          VK_FORMAT_R5G6B5_UNORM_PACK16 },
3228     { (uint32_t)AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT,    VK_FORMAT_R16G16B16A16_SFLOAT },
3229     { (uint32_t)AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM,     VK_FORMAT_A2B10G10R10_UNORM_PACK32 },
3230     { (uint32_t)AHARDWAREBUFFER_FORMAT_D16_UNORM,             VK_FORMAT_D16_UNORM },
3231     { (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM,             VK_FORMAT_X8_D24_UNORM_PACK32 },
3232     { (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT,     VK_FORMAT_D24_UNORM_S8_UINT },
3233     { (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT,             VK_FORMAT_D32_SFLOAT },
3234     { (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT,     VK_FORMAT_D32_SFLOAT_S8_UINT },
3235     { (uint32_t)AHARDWAREBUFFER_FORMAT_S8_UINT,               VK_FORMAT_S8_UINT }
3236 };
3237 
3238 // AHardwareBuffer Usage                        Vulkan Usage or Creation Flag (Intermixed - Aargh!)
3239 // =====================                        ===================================================
3240 // None                                         VK_IMAGE_USAGE_TRANSFER_SRC_BIT
3241 // None                                         VK_IMAGE_USAGE_TRANSFER_DST_BIT
3242 // AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE      VK_IMAGE_USAGE_SAMPLED_BIT
3243 // AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE      VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT
3244 // AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT       VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
3245 // AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP           VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT
3246 // AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE    None
3247 // AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT      VK_IMAGE_CREATE_PROTECTED_BIT
3248 // None                                         VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT
3249 // None                                         VK_IMAGE_CREATE_EXTENDED_USAGE_BIT
3250 
3251 // Same casting rationale. De-mixing the table to prevent type confusion and aliasing
3252 std::map<uint64_t, VkImageUsageFlags> ahb_usage_map_a2v = {
3253     { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE,    (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) },
3254     { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT,     VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT },
3255     { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE,  0 },   // No equivalent
3256 };
3257 
3258 std::map<uint64_t, VkImageCreateFlags> ahb_create_map_a2v = {
3259     { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP,         VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT },
3260     { (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT,    VK_IMAGE_CREATE_PROTECTED_BIT },
3261     { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE,  0 },   // No equivalent
3262 };
3263 
3264 std::map<VkImageUsageFlags, uint64_t> ahb_usage_map_v2a = {
3265     { VK_IMAGE_USAGE_SAMPLED_BIT,           (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE },
3266     { VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT,  (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE },
3267     { VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,  (uint64_t)AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT  },
3268 };
3269 
3270 std::map<VkImageCreateFlags, uint64_t> ahb_create_map_v2a = {
3271     { VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT,  (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP },
3272     { VK_IMAGE_CREATE_PROTECTED_BIT,        (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT },
3273 };
3274 
3275 // clang-format on
3276 
3277 //
3278 // AHB-extension new APIs
3279 //
PreCallValidateGetAndroidHardwareBufferProperties(VkDevice device,const struct AHardwareBuffer * buffer,VkAndroidHardwareBufferPropertiesANDROID * pProperties)3280 bool CoreChecks::PreCallValidateGetAndroidHardwareBufferProperties(VkDevice device, const struct AHardwareBuffer *buffer,
3281                                                                    VkAndroidHardwareBufferPropertiesANDROID *pProperties) {
3282     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3283     bool skip = false;
3284     //  buffer must be a valid Android hardware buffer object with at least one of the AHARDWAREBUFFER_USAGE_GPU_* usage flags.
3285     AHardwareBuffer_Desc ahb_desc;
3286     AHardwareBuffer_describe(buffer, &ahb_desc);
3287     uint32_t required_flags = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT |
3288                               AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE |
3289                               AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER;
3290     if (0 == (ahb_desc.usage & required_flags)) {
3291         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3292                         HandleToUint64(device_data->device), "VUID-vkGetAndroidHardwareBufferPropertiesANDROID-buffer-01884",
3293                         "vkGetAndroidHardwareBufferPropertiesANDROID: The AHardwareBuffer's AHardwareBuffer_Desc.usage (0x%" PRIx64
3294                         ") does not have any AHARDWAREBUFFER_USAGE_GPU_* flags set.",
3295                         ahb_desc.usage);
3296     }
3297     return skip;
3298 }
3299 
PostCallRecordGetAndroidHardwareBufferProperties(VkDevice device,const struct AHardwareBuffer * buffer,VkAndroidHardwareBufferPropertiesANDROID * pProperties,VkResult result)3300 void CoreChecks::PostCallRecordGetAndroidHardwareBufferProperties(VkDevice device, const struct AHardwareBuffer *buffer,
3301                                                                   VkAndroidHardwareBufferPropertiesANDROID *pProperties,
3302                                                                   VkResult result) {
3303     if (VK_SUCCESS != result) return;
3304     auto ahb_format_props = lvl_find_in_chain<VkAndroidHardwareBufferFormatPropertiesANDROID>(pProperties->pNext);
3305     if (ahb_format_props) {
3306         auto ext_formats = GetAHBExternalFormatsSet();
3307         ext_formats->insert(ahb_format_props->externalFormat);
3308     }
3309 }
3310 
PreCallValidateGetMemoryAndroidHardwareBuffer(VkDevice device,const VkMemoryGetAndroidHardwareBufferInfoANDROID * pInfo,struct AHardwareBuffer ** pBuffer)3311 bool CoreChecks::PreCallValidateGetMemoryAndroidHardwareBuffer(VkDevice device,
3312                                                                const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
3313                                                                struct AHardwareBuffer **pBuffer) {
3314     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3315     bool skip = false;
3316     DEVICE_MEM_INFO *mem_info = GetMemObjInfo(pInfo->memory);
3317 
3318     // VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID must have been included in
3319     // VkExportMemoryAllocateInfoKHR::handleTypes when memory was created.
3320     if (!mem_info->is_export ||
3321         (0 == (mem_info->export_handle_type_flags & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID))) {
3322         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3323                         HandleToUint64(device), "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-handleTypes-01882",
3324                         "vkGetMemoryAndroidHardwareBufferANDROID: The VkDeviceMemory (%s) was not allocated for export, or the "
3325                         "export handleTypes (0x%" PRIx32
3326                         ") did not contain VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.",
3327                         device_data->report_data->FormatHandle(pInfo->memory).c_str(), mem_info->export_handle_type_flags);
3328     }
3329 
3330     // If the pNext chain of the VkMemoryAllocateInfo used to allocate memory included a VkMemoryDedicatedAllocateInfo
3331     // with non-NULL image member, then that image must already be bound to memory.
3332     if (mem_info->is_dedicated && (VK_NULL_HANDLE != mem_info->dedicated_image)) {
3333         auto image_state = GetImageState(mem_info->dedicated_image);
3334         if ((nullptr == image_state) || (0 == (image_state->GetBoundMemory().count(pInfo->memory)))) {
3335             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3336                             HandleToUint64(device), "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-pNext-01883",
3337                             "vkGetMemoryAndroidHardwareBufferANDROID: The VkDeviceMemory (%s) was allocated using a dedicated "
3338                             "image (%s), but that image is not bound to the VkDeviceMemory object.",
3339                             device_data->report_data->FormatHandle(pInfo->memory).c_str(),
3340                             device_data->report_data->FormatHandle(mem_info->dedicated_image).c_str());
3341         }
3342     }
3343 
3344     return skip;
3345 }
3346 
3347 //
3348 // AHB-specific validation within non-AHB APIs
3349 //
ValidateAllocateMemoryANDROID(layer_data * dev_data,const VkMemoryAllocateInfo * alloc_info)3350 bool CoreChecks::ValidateAllocateMemoryANDROID(layer_data *dev_data, const VkMemoryAllocateInfo *alloc_info) {
3351     bool skip = false;
3352     auto import_ahb_info = lvl_find_in_chain<VkImportAndroidHardwareBufferInfoANDROID>(alloc_info->pNext);
3353     auto exp_mem_alloc_info = lvl_find_in_chain<VkExportMemoryAllocateInfo>(alloc_info->pNext);
3354     auto mem_ded_alloc_info = lvl_find_in_chain<VkMemoryDedicatedAllocateInfo>(alloc_info->pNext);
3355 
3356     if ((import_ahb_info) && (NULL != import_ahb_info->buffer)) {
3357         // This is an import with handleType of VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID
3358         AHardwareBuffer_Desc ahb_desc = {};
3359         AHardwareBuffer_describe(import_ahb_info->buffer, &ahb_desc);
3360 
3361         //  If buffer is not NULL, it must be a valid Android hardware buffer object with AHardwareBuffer_Desc::format and
3362         //  AHardwareBuffer_Desc::usage compatible with Vulkan as described in Android Hardware Buffers.
3363         //
3364         //  BLOB & GPU_DATA_BUFFER combo specifically allowed
3365         if ((AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) || (0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) {
3366             // Otherwise, must be a combination from the AHardwareBuffer Format and Usage Equivalence tables
3367             // Usage must have at least one bit from the table. It may have additional bits not in the table
3368             uint64_t ahb_equiv_usage_bits = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT |
3369                                             AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE |
3370                                             AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT;
3371             if ((0 == (ahb_desc.usage & ahb_equiv_usage_bits)) || (0 == ahb_format_map_a2v.count(ahb_desc.format))) {
3372                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3373                                 HandleToUint64(dev_data->device), "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01881",
3374                                 "vkAllocateMemory: The AHardwareBuffer_Desc's format ( %u ) and/or usage ( 0x%" PRIx64
3375                                 " ) are not compatible with Vulkan.",
3376                                 ahb_desc.format, ahb_desc.usage);
3377             }
3378         }
3379 
3380         // Collect external buffer info
3381         VkPhysicalDeviceExternalBufferInfo pdebi = {};
3382         pdebi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO;
3383         pdebi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
3384         if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) {
3385             pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE];
3386         }
3387         if (AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT & ahb_desc.usage) {
3388             pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT];
3389         }
3390         VkExternalBufferProperties ext_buf_props = {};
3391         ext_buf_props.sType = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES;
3392 
3393         instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(dev_data->instance_data->instance), layer_data_map);
3394         instance_data->instance_dispatch_table.GetPhysicalDeviceExternalBufferProperties(dev_data->physical_device, &pdebi,
3395                                                                                          &ext_buf_props);
3396 
3397         // Collect external format info
3398         VkPhysicalDeviceExternalImageFormatInfo pdeifi = {};
3399         pdeifi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO;
3400         pdeifi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
3401         VkPhysicalDeviceImageFormatInfo2 pdifi2 = {};
3402         pdifi2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
3403         pdifi2.pNext = &pdeifi;
3404         if (0 < ahb_format_map_a2v.count(ahb_desc.format)) pdifi2.format = ahb_format_map_a2v[ahb_desc.format];
3405         pdifi2.type = VK_IMAGE_TYPE_2D;           // Seems likely
3406         pdifi2.tiling = VK_IMAGE_TILING_OPTIMAL;  // Ditto
3407         if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) {
3408             pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE];
3409         }
3410         if (AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT & ahb_desc.usage) {
3411             pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT];
3412         }
3413         if (AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP & ahb_desc.usage) {
3414             pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP];
3415         }
3416         if (AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT & ahb_desc.usage) {
3417             pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT];
3418         }
3419 
3420         VkExternalImageFormatProperties ext_img_fmt_props = {};
3421         ext_img_fmt_props.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES;
3422         VkImageFormatProperties2 ifp2 = {};
3423         ifp2.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
3424         ifp2.pNext = &ext_img_fmt_props;
3425 
3426         VkResult fmt_lookup_result = GetPDImageFormatProperties2(&pdifi2, &ifp2);
3427 
3428         //  If buffer is not NULL, Android hardware buffers must be supported for import, as reported by
3429         //  VkExternalImageFormatProperties or VkExternalBufferProperties.
3430         if (0 == (ext_buf_props.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT)) {
3431             if ((VK_SUCCESS != fmt_lookup_result) || (0 == (ext_img_fmt_props.externalMemoryProperties.externalMemoryFeatures &
3432                                                             VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT))) {
3433                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3434                                 HandleToUint64(dev_data->device), "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01880",
3435                                 "vkAllocateMemory: Neither the VkExternalImageFormatProperties nor the VkExternalBufferProperties "
3436                                 "structs for the AHardwareBuffer include the VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT flag.");
3437             }
3438         }
3439 
3440         // Retrieve buffer and format properties of the provided AHardwareBuffer
3441         VkAndroidHardwareBufferFormatPropertiesANDROID ahb_format_props = {};
3442         ahb_format_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
3443         VkAndroidHardwareBufferPropertiesANDROID ahb_props = {};
3444         ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
3445         ahb_props.pNext = &ahb_format_props;
3446         dev_data->device_dispatch_table.GetAndroidHardwareBufferPropertiesANDROID(dev_data->device, import_ahb_info->buffer,
3447                                                                                   &ahb_props);
3448 
3449         // allocationSize must be the size returned by vkGetAndroidHardwareBufferPropertiesANDROID for the Android hardware buffer
3450         if (alloc_info->allocationSize != ahb_props.allocationSize) {
3451             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3452                             HandleToUint64(dev_data->device), "VUID-VkMemoryAllocateInfo-allocationSize-02383",
3453                             "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID "
3454                             "struct, allocationSize (%" PRId64
3455                             ") does not match the AHardwareBuffer's reported allocationSize (%" PRId64 ").",
3456                             alloc_info->allocationSize, ahb_props.allocationSize);
3457         }
3458 
3459         // memoryTypeIndex must be one of those returned by vkGetAndroidHardwareBufferPropertiesANDROID for the AHardwareBuffer
3460         // Note: memoryTypeIndex is an index, memoryTypeBits is a bitmask
3461         uint32_t mem_type_bitmask = 1 << alloc_info->memoryTypeIndex;
3462         if (0 == (mem_type_bitmask & ahb_props.memoryTypeBits)) {
3463             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3464                             HandleToUint64(dev_data->device), "VUID-VkMemoryAllocateInfo-memoryTypeIndex-02385",
3465                             "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID "
3466                             "struct, memoryTypeIndex (%" PRId32
3467                             ") does not correspond to a bit set in AHardwareBuffer's reported "
3468                             "memoryTypeBits bitmask (0x%" PRIx32 ").",
3469                             alloc_info->memoryTypeIndex, ahb_props.memoryTypeBits);
3470         }
3471 
3472         // Checks for allocations without a dedicated allocation requirement
3473         if ((nullptr == mem_ded_alloc_info) || (VK_NULL_HANDLE == mem_ded_alloc_info->image)) {
3474             // the Android hardware buffer must have a format of AHARDWAREBUFFER_FORMAT_BLOB and a usage that includes
3475             // AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER
3476             if (((uint64_t)AHARDWAREBUFFER_FORMAT_BLOB != ahb_format_props.externalFormat) ||
3477                 (0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) {
3478                 skip |= log_msg(
3479                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3480                     HandleToUint64(dev_data->device), "VUID-VkMemoryAllocateInfo-pNext-02384",
3481                     "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID "
3482                     "struct without a dedicated allocation requirement, while the AHardwareBuffer's external format (0x%" PRIx64
3483                     ") is not AHARDWAREBUFFER_FORMAT_BLOB or usage (0x%" PRIx64
3484                     ") does not include AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER.",
3485                     ahb_format_props.externalFormat, ahb_desc.usage);
3486             }
3487         } else {  // Checks specific to import with a dedicated allocation requirement
3488             VkImageCreateInfo *ici = &(GetImageState(mem_ded_alloc_info->image)->createInfo);
3489 
3490             // The Android hardware buffer's usage must include at least one of AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT or
3491             // AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE
3492             if (0 == (ahb_desc.usage & (AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT | AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE))) {
3493                 skip |= log_msg(
3494                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3495                     HandleToUint64(dev_data->device), "VUID-VkMemoryAllocateInfo-pNext-02386",
3496                     "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID and a "
3497                     "dedicated allocation requirement, while the AHardwareBuffer's usage (0x%" PRIx64
3498                     ") contains neither AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT nor AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE.",
3499                     ahb_desc.usage);
3500             }
3501 
3502             //  the format of image must be VK_FORMAT_UNDEFINED or the format returned by
3503             //  vkGetAndroidHardwareBufferPropertiesANDROID
3504             if ((ici->format != ahb_format_props.format) && (VK_FORMAT_UNDEFINED != ici->format)) {
3505                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3506                                 HandleToUint64(dev_data->device), "VUID-VkMemoryAllocateInfo-pNext-02387",
3507                                 "vkAllocateMemory: VkMemoryAllocateInfo struct with chained "
3508                                 "VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's "
3509                                 "format (%s) is not VK_FORMAT_UNDEFINED and does not match the AHardwareBuffer's format (%s).",
3510                                 string_VkFormat(ici->format), string_VkFormat(ahb_format_props.format));
3511             }
3512 
3513             // The width, height, and array layer dimensions of image and the Android hardwarebuffer must be identical
3514             if ((ici->extent.width != ahb_desc.width) || (ici->extent.height != ahb_desc.height) ||
3515                 (ici->arrayLayers != ahb_desc.layers)) {
3516                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3517                                 HandleToUint64(dev_data->device), "VUID-VkMemoryAllocateInfo-pNext-02388",
3518                                 "vkAllocateMemory: VkMemoryAllocateInfo struct with chained "
3519                                 "VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's "
3520                                 "width, height, and arrayLayers (%" PRId32 " %" PRId32 " %" PRId32
3521                                 ") do not match those of the AHardwareBuffer (%" PRId32 " %" PRId32 " %" PRId32 ").",
3522                                 ici->extent.width, ici->extent.height, ici->arrayLayers, ahb_desc.width, ahb_desc.height,
3523                                 ahb_desc.layers);
3524             }
3525 
3526             // If the Android hardware buffer's usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, the image must
3527             // have either a full mipmap chain or exactly 1 mip level.
3528             //
3529             // NOTE! The language of this VUID contradicts the language in the spec (1.1.93), which says "The
3530             // AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE flag does not correspond to a Vulkan image usage or creation flag. Instead,
3531             // its presence indicates that the Android hardware buffer contains a complete mipmap chain, and its absence indicates
3532             // that the Android hardware buffer contains only a single mip level."
3533             //
3534             // TODO: This code implements the VUID's meaning, but it seems likely that the spec text is actually correct.
3535             // Clarification requested.
3536             if ((ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE) && (ici->mipLevels != 1) &&
3537                 (ici->mipLevels != FullMipChainLevels(ici->extent))) {
3538                 skip |=
3539                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3540                             HandleToUint64(dev_data->device), "VUID-VkMemoryAllocateInfo-pNext-02389",
3541                             "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, "
3542                             "usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE but mipLevels (%" PRId32
3543                             ") is neither 1 nor full mip "
3544                             "chain levels (%" PRId32 ").",
3545                             ici->mipLevels, FullMipChainLevels(ici->extent));
3546             }
3547 
3548             // each bit set in the usage of image must be listed in AHardwareBuffer Usage Equivalence, and if there is a
3549             // corresponding AHARDWAREBUFFER_USAGE bit listed that bit must be included in the Android hardware buffer's
3550             // AHardwareBuffer_Desc::usage
3551             if (ici->usage &
3552                 ~(VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
3553                   VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
3554                 skip |=
3555                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3556                             HandleToUint64(dev_data->device), "VUID-VkMemoryAllocateInfo-pNext-02390",
3557                             "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, "
3558                             "dedicated image usage bits include one or more with no AHardwareBuffer equivalent.");
3559             }
3560 
3561             bool illegal_usage = false;
3562             std::vector<VkImageUsageFlags> usages = {VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT,
3563                                                      VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT};
3564             for (VkImageUsageFlags ubit : usages) {
3565                 if (ici->usage & ubit) {
3566                     uint64_t ahb_usage = ahb_usage_map_v2a[ubit];
3567                     if (0 == (ahb_usage & ahb_desc.usage)) illegal_usage = true;
3568                 }
3569             }
3570             if (illegal_usage) {
3571                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3572                                 HandleToUint64(dev_data->device), "VUID-VkMemoryAllocateInfo-pNext-02390",
3573                                 "vkAllocateMemory: VkMemoryAllocateInfo struct with chained "
3574                                 "VkImportAndroidHardwareBufferInfoANDROID, one or more AHardwareBuffer usage bits equivalent to "
3575                                 "the provided image's usage bits are missing from AHardwareBuffer_Desc.usage.");
3576             }
3577         }
3578     } else {  // Not an import
3579         if ((exp_mem_alloc_info) && (mem_ded_alloc_info) &&
3580             (0 != (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID & exp_mem_alloc_info->handleTypes)) &&
3581             (VK_NULL_HANDLE != mem_ded_alloc_info->image)) {
3582             // This is an Android HW Buffer export
3583             if (0 != alloc_info->allocationSize) {
3584                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3585                                 HandleToUint64(dev_data->device), "VUID-VkMemoryAllocateInfo-pNext-01874",
3586                                 "vkAllocateMemory: pNext chain indicates a dedicated Android Hardware Buffer export allocation, "
3587                                 "but allocationSize is non-zero.");
3588             }
3589         } else {
3590             if (0 == alloc_info->allocationSize) {
3591                 skip |= log_msg(
3592                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3593                     HandleToUint64(dev_data->device), "VUID-VkMemoryAllocateInfo-pNext-01874",
3594                     "vkAllocateMemory: pNext chain does not indicate a dedicated export allocation, but allocationSize is 0.");
3595             };
3596         }
3597     }
3598     return skip;
3599 }
3600 
ValidateGetImageMemoryRequirements2ANDROID(layer_data * dev_data,const VkImage image)3601 bool CoreChecks::ValidateGetImageMemoryRequirements2ANDROID(layer_data *dev_data, const VkImage image) {
3602     bool skip = false;
3603 
3604     IMAGE_STATE *image_state = GetImageState(image);
3605     if (image_state->imported_ahb && (0 == image_state->GetBoundMemory().size())) {
3606         skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image),
3607                         "VUID-VkImageMemoryRequirementsInfo2-image-01897",
3608                         "vkGetImageMemoryRequirements2: Attempt to query layout from an image created with "
3609                         "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID handleType, which has not yet been "
3610                         "bound to memory.");
3611     }
3612     return skip;
3613 }
3614 
ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(const debug_report_data * report_data,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,const VkImageFormatProperties2 * pImageFormatProperties)3615 static bool ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(const debug_report_data *report_data,
3616                                                                    const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
3617                                                                    const VkImageFormatProperties2 *pImageFormatProperties) {
3618     bool skip = false;
3619     const VkAndroidHardwareBufferUsageANDROID *ahb_usage =
3620         lvl_find_in_chain<VkAndroidHardwareBufferUsageANDROID>(pImageFormatProperties->pNext);
3621     if (nullptr != ahb_usage) {
3622         const VkPhysicalDeviceExternalImageFormatInfo *pdeifi =
3623             lvl_find_in_chain<VkPhysicalDeviceExternalImageFormatInfo>(pImageFormatInfo->pNext);
3624         if ((nullptr == pdeifi) || (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID != pdeifi->handleType)) {
3625             skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
3626                             "VUID-vkGetPhysicalDeviceImageFormatProperties2-pNext-01868",
3627                             "vkGetPhysicalDeviceImageFormatProperties2: pImageFormatProperties includes a chained "
3628                             "VkAndroidHardwareBufferUsageANDROID struct, but pImageFormatInfo does not include a chained "
3629                             "VkPhysicalDeviceExternalImageFormatInfo struct with handleType "
3630                             "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.");
3631         }
3632     }
3633     return skip;
3634 }
3635 
ValidateCreateSamplerYcbcrConversionANDROID(const layer_data * dev_data,const VkSamplerYcbcrConversionCreateInfo * create_info)3636 bool CoreChecks::ValidateCreateSamplerYcbcrConversionANDROID(const layer_data *dev_data,
3637                                                              const VkSamplerYcbcrConversionCreateInfo *create_info) {
3638     const VkExternalFormatANDROID *ext_format_android = lvl_find_in_chain<VkExternalFormatANDROID>(create_info->pNext);
3639     if ((nullptr != ext_format_android) && (0 != ext_format_android->externalFormat)) {
3640         if (VK_FORMAT_UNDEFINED != create_info->format) {
3641             return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3642                            VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, 0,
3643                            "VUID-VkSamplerYcbcrConversionCreateInfo-format-01904",
3644                            "vkCreateSamplerYcbcrConversion[KHR]: CreateInfo format is not VK_FORMAT_UNDEFINED while "
3645                            "there is a chained VkExternalFormatANDROID struct.");
3646         }
3647     } else if (VK_FORMAT_UNDEFINED == create_info->format) {
3648         return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3649                        VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, 0,
3650                        "VUID-VkSamplerYcbcrConversionCreateInfo-format-01904",
3651                        "vkCreateSamplerYcbcrConversion[KHR]: CreateInfo format is VK_FORMAT_UNDEFINED with no chained "
3652                        "VkExternalFormatANDROID struct.");
3653     }
3654     return false;
3655 }
3656 
RecordCreateSamplerYcbcrConversionANDROID(layer_data * dev_data,const VkSamplerYcbcrConversionCreateInfo * create_info,VkSamplerYcbcrConversion ycbcr_conversion)3657 void CoreChecks::RecordCreateSamplerYcbcrConversionANDROID(layer_data *dev_data,
3658                                                            const VkSamplerYcbcrConversionCreateInfo *create_info,
3659                                                            VkSamplerYcbcrConversion ycbcr_conversion) {
3660     const VkExternalFormatANDROID *ext_format_android = lvl_find_in_chain<VkExternalFormatANDROID>(create_info->pNext);
3661     if (ext_format_android && (0 != ext_format_android->externalFormat)) {
3662         dev_data->ycbcr_conversion_ahb_fmt_map.emplace(ycbcr_conversion, ext_format_android->externalFormat);
3663     }
3664 };
3665 
RecordDestroySamplerYcbcrConversionANDROID(layer_data * dev_data,VkSamplerYcbcrConversion ycbcr_conversion)3666 void CoreChecks::RecordDestroySamplerYcbcrConversionANDROID(layer_data *dev_data, VkSamplerYcbcrConversion ycbcr_conversion) {
3667     dev_data->ycbcr_conversion_ahb_fmt_map.erase(ycbcr_conversion);
3668 };
3669 
3670 #else  // !VK_USE_PLATFORM_ANDROID_KHR
3671 
ValidateAllocateMemoryANDROID(layer_data * dev_data,const VkMemoryAllocateInfo * alloc_info)3672 bool CoreChecks::ValidateAllocateMemoryANDROID(layer_data *dev_data, const VkMemoryAllocateInfo *alloc_info) { return false; }
3673 
ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(const debug_report_data * report_data,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,const VkImageFormatProperties2 * pImageFormatProperties)3674 static bool ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(const debug_report_data *report_data,
3675                                                                    const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
3676                                                                    const VkImageFormatProperties2 *pImageFormatProperties) {
3677     return false;
3678 }
3679 
ValidateCreateSamplerYcbcrConversionANDROID(const layer_data * dev_data,const VkSamplerYcbcrConversionCreateInfo * create_info)3680 bool CoreChecks::ValidateCreateSamplerYcbcrConversionANDROID(const layer_data *dev_data,
3681                                                              const VkSamplerYcbcrConversionCreateInfo *create_info) {
3682     return false;
3683 }
3684 
ValidateGetImageMemoryRequirements2ANDROID(layer_data * dev_data,const VkImage image)3685 bool CoreChecks::ValidateGetImageMemoryRequirements2ANDROID(layer_data *dev_data, const VkImage image) { return false; }
3686 
RecordCreateSamplerYcbcrConversionANDROID(layer_data * dev_data,const VkSamplerYcbcrConversionCreateInfo * create_info,VkSamplerYcbcrConversion ycbcr_conversion)3687 void CoreChecks::RecordCreateSamplerYcbcrConversionANDROID(layer_data *dev_data,
3688                                                            const VkSamplerYcbcrConversionCreateInfo *create_info,
3689                                                            VkSamplerYcbcrConversion ycbcr_conversion){};
3690 
RecordDestroySamplerYcbcrConversionANDROID(layer_data * dev_data,VkSamplerYcbcrConversion ycbcr_conversion)3691 void CoreChecks::RecordDestroySamplerYcbcrConversionANDROID(layer_data *dev_data, VkSamplerYcbcrConversion ycbcr_conversion){};
3692 
3693 #endif  // VK_USE_PLATFORM_ANDROID_KHR
3694 
PreCallValidateAllocateMemory(VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMemory)3695 bool CoreChecks::PreCallValidateAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
3696                                                const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
3697     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3698     bool skip = false;
3699     if (device_data->memObjMap.size() >= device_data->phys_dev_props.limits.maxMemoryAllocationCount) {
3700         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3701                         HandleToUint64(device), kVUIDUndefined,
3702                         "Number of currently valid memory objects is not less than the maximum allowed (%u).",
3703                         device_data->phys_dev_props.limits.maxMemoryAllocationCount);
3704     }
3705 
3706     if (GetDeviceExtensions()->vk_android_external_memory_android_hardware_buffer) {
3707         skip |= ValidateAllocateMemoryANDROID(device_data, pAllocateInfo);
3708     } else {
3709         if (0 == pAllocateInfo->allocationSize) {
3710             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3711                             HandleToUint64(device), "VUID-VkMemoryAllocateInfo-allocationSize-00638",
3712                             "vkAllocateMemory: allocationSize is 0.");
3713         };
3714     }
3715     // TODO: VUIDs ending in 00643, 00644, 00646, 00647, 01742, 01743, 01745, 00645, 00648, 01744
3716     return skip;
3717 }
3718 
PostCallRecordAllocateMemory(VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMemory,VkResult result)3719 void CoreChecks::PostCallRecordAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
3720                                               const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory, VkResult result) {
3721     if (VK_SUCCESS == result) {
3722         layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3723         AddMemObjInfo(device_data, device, *pMemory, pAllocateInfo);
3724     }
3725     return;
3726 }
3727 
3728 // For given obj node, if it is use, flag a validation error and return callback result, else return false
ValidateObjectNotInUse(const layer_data * dev_data,BASE_NODE * obj_node,VK_OBJECT obj_struct,const char * caller_name,const char * error_code)3729 bool CoreChecks::ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct,
3730                                         const char *caller_name, const char *error_code) {
3731     if (dev_data->instance_data->disabled.object_in_use) return false;
3732     bool skip = false;
3733     if (obj_node->in_use.load()) {
3734         skip |=
3735             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_struct.type], obj_struct.handle,
3736                     error_code, "Cannot call %s on %s %s that is currently in use by a command buffer.", caller_name,
3737                     object_string[obj_struct.type], dev_data->report_data->FormatHandle(obj_struct.handle).c_str());
3738     }
3739     return skip;
3740 }
3741 
PreCallValidateFreeMemory(VkDevice device,VkDeviceMemory mem,const VkAllocationCallbacks * pAllocator)3742 bool CoreChecks::PreCallValidateFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
3743     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3744     DEVICE_MEM_INFO *mem_info = GetMemObjInfo(mem);
3745     VK_OBJECT obj_struct = {HandleToUint64(mem), kVulkanObjectTypeDeviceMemory};
3746     bool skip = false;
3747     if (mem_info) {
3748         skip |= ValidateObjectNotInUse(device_data, mem_info, obj_struct, "vkFreeMemory", "VUID-vkFreeMemory-memory-00677");
3749     }
3750     return skip;
3751 }
3752 
PreCallRecordFreeMemory(VkDevice device,VkDeviceMemory mem,const VkAllocationCallbacks * pAllocator)3753 void CoreChecks::PreCallRecordFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
3754     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3755     if (!mem) return;
3756     DEVICE_MEM_INFO *mem_info = GetMemObjInfo(mem);
3757     VK_OBJECT obj_struct = {HandleToUint64(mem), kVulkanObjectTypeDeviceMemory};
3758 
3759     // Clear mem binding for any bound objects
3760     for (auto obj : mem_info->obj_bindings) {
3761         log_msg(device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, get_debug_report_enum[obj.type], obj.handle,
3762                 kVUID_Core_MemTrack_FreedMemRef, "VK Object %s still has a reference to mem obj %s.",
3763                 device_data->report_data->FormatHandle(obj.handle).c_str(),
3764                 device_data->report_data->FormatHandle(mem_info->mem).c_str());
3765         BINDABLE *bindable_state = nullptr;
3766         switch (obj.type) {
3767             case kVulkanObjectTypeImage:
3768                 bindable_state = GetImageState(reinterpret_cast<VkImage &>(obj.handle));
3769                 break;
3770             case kVulkanObjectTypeBuffer:
3771                 bindable_state = GetBufferState(reinterpret_cast<VkBuffer &>(obj.handle));
3772                 break;
3773             default:
3774                 // Should only have buffer or image objects bound to memory
3775                 assert(0);
3776         }
3777 
3778         assert(bindable_state);
3779         bindable_state->binding.mem = MEMORY_UNBOUND;
3780         bindable_state->UpdateBoundMemorySet();
3781     }
3782     // Any bound cmd buffers are now invalid
3783     InvalidateCommandBuffers(device_data, mem_info->cb_bindings, obj_struct);
3784     device_data->memObjMap.erase(mem);
3785 }
3786 
3787 // Validate that given Map memory range is valid. This means that the memory should not already be mapped,
3788 //  and that the size of the map range should be:
3789 //  1. Not zero
3790 //  2. Within the size of the memory allocation
ValidateMapMemRange(layer_data * dev_data,VkDeviceMemory mem,VkDeviceSize offset,VkDeviceSize size)3791 static bool ValidateMapMemRange(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
3792     bool skip = false;
3793 
3794     if (size == 0) {
3795         skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3796                        HandleToUint64(mem), kVUID_Core_MemTrack_InvalidMap,
3797                        "VkMapMemory: Attempting to map memory range of size zero");
3798     }
3799 
3800     auto mem_element = dev_data->memObjMap.find(mem);
3801     if (mem_element != dev_data->memObjMap.end()) {
3802         auto mem_info = mem_element->second.get();
3803         // It is an application error to call VkMapMemory on an object that is already mapped
3804         if (mem_info->mem_range.size != 0) {
3805             skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3806                            HandleToUint64(mem), kVUID_Core_MemTrack_InvalidMap,
3807                            "VkMapMemory: Attempting to map memory on an already-mapped object %s.",
3808                            dev_data->report_data->FormatHandle(mem).c_str());
3809         }
3810 
3811         // Validate that offset + size is within object's allocationSize
3812         if (size == VK_WHOLE_SIZE) {
3813             if (offset >= mem_info->alloc_info.allocationSize) {
3814                 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3815                                HandleToUint64(mem), kVUID_Core_MemTrack_InvalidMap,
3816                                "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
3817                                " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
3818                                offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
3819             }
3820         } else {
3821             if ((offset + size) > mem_info->alloc_info.allocationSize) {
3822                 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3823                                HandleToUint64(mem), "VUID-vkMapMemory-size-00681",
3824                                "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ".",
3825                                offset, size + offset, mem_info->alloc_info.allocationSize);
3826             }
3827         }
3828     }
3829     return skip;
3830 }
3831 
StoreMemRanges(layer_data * dev_data,VkDeviceMemory mem,VkDeviceSize offset,VkDeviceSize size)3832 void CoreChecks::StoreMemRanges(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
3833     auto mem_info = GetMemObjInfo(mem);
3834     if (mem_info) {
3835         mem_info->mem_range.offset = offset;
3836         mem_info->mem_range.size = size;
3837     }
3838 }
3839 
3840 // Guard value for pad data
3841 static char NoncoherentMemoryFillValue = 0xb;
3842 
InitializeAndTrackMemory(layer_data * dev_data,VkDeviceMemory mem,VkDeviceSize offset,VkDeviceSize size,void ** ppData)3843 void CoreChecks::InitializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
3844                                           void **ppData) {
3845     auto mem_info = GetMemObjInfo(mem);
3846     if (mem_info) {
3847         mem_info->p_driver_data = *ppData;
3848         uint32_t index = mem_info->alloc_info.memoryTypeIndex;
3849         if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
3850             mem_info->shadow_copy = 0;
3851         } else {
3852             if (size == VK_WHOLE_SIZE) {
3853                 size = mem_info->alloc_info.allocationSize - offset;
3854             }
3855             mem_info->shadow_pad_size = dev_data->phys_dev_props.limits.minMemoryMapAlignment;
3856             assert(SafeModulo(mem_info->shadow_pad_size, dev_data->phys_dev_props.limits.minMemoryMapAlignment) == 0);
3857             // Ensure start of mapped region reflects hardware alignment constraints
3858             uint64_t map_alignment = dev_data->phys_dev_props.limits.minMemoryMapAlignment;
3859 
3860             // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
3861             uint64_t start_offset = offset % map_alignment;
3862             // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
3863             mem_info->shadow_copy_base =
3864                 malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
3865 
3866             mem_info->shadow_copy =
3867                 reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
3868                                          ~(map_alignment - 1)) +
3869                 start_offset;
3870             assert(SafeModulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
3871                               map_alignment) == 0);
3872 
3873             memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
3874             *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
3875         }
3876     }
3877 }
3878 
3879 // Verify that state for fence being waited on is appropriate. That is,
3880 //  a fence being waited on should not already be signaled and
3881 //  it should have been submitted on a queue or during acquire next image
VerifyWaitFenceState(layer_data * dev_data,VkFence fence,const char * apiCall)3882 bool CoreChecks::VerifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
3883     bool skip = false;
3884 
3885     auto pFence = GetFenceNode(fence);
3886     if (pFence && pFence->scope == kSyncScopeInternal) {
3887         if (pFence->state == FENCE_UNSIGNALED) {
3888             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
3889                             HandleToUint64(fence), kVUID_Core_MemTrack_FenceState,
3890                             "%s called for fence %s which has not been submitted on a Queue or during acquire next image.", apiCall,
3891                             dev_data->report_data->FormatHandle(fence).c_str());
3892         }
3893     }
3894     return skip;
3895 }
3896 
RetireFence(layer_data * dev_data,VkFence fence)3897 void CoreChecks::RetireFence(layer_data *dev_data, VkFence fence) {
3898     auto pFence = GetFenceNode(fence);
3899     if (pFence && pFence->scope == kSyncScopeInternal) {
3900         if (pFence->signaler.first != VK_NULL_HANDLE) {
3901             // Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
3902             RetireWorkOnQueue(dev_data, GetQueueState(pFence->signaler.first), pFence->signaler.second);
3903         } else {
3904             // Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
3905             // the fence as retired.
3906             pFence->state = FENCE_RETIRED;
3907         }
3908     }
3909 }
3910 
PreCallValidateWaitForFences(VkDevice device,uint32_t fenceCount,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout)3911 bool CoreChecks::PreCallValidateWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
3912                                               uint64_t timeout) {
3913     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3914     // Verify fence status of submitted fences
3915     if (device_data->instance_data->disabled.wait_for_fences) return false;
3916     bool skip = false;
3917     for (uint32_t i = 0; i < fenceCount; i++) {
3918         skip |= VerifyWaitFenceState(device_data, pFences[i], "vkWaitForFences");
3919         skip |= VerifyQueueStateToFence(device_data, pFences[i]);
3920     }
3921     return skip;
3922 }
3923 
PostCallRecordWaitForFences(VkDevice device,uint32_t fenceCount,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout,VkResult result)3924 void CoreChecks::PostCallRecordWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
3925                                              uint64_t timeout, VkResult result) {
3926     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3927     if (VK_SUCCESS != result) return;
3928 
3929     // When we know that all fences are complete we can clean/remove their CBs
3930     if ((VK_TRUE == waitAll) || (1 == fenceCount)) {
3931         for (uint32_t i = 0; i < fenceCount; i++) {
3932             RetireFence(device_data, pFences[i]);
3933         }
3934     }
3935     // NOTE : Alternate case not handled here is when some fences have completed. In
3936     //  this case for app to guarantee which fences completed it will have to call
3937     //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
3938 }
3939 
PreCallValidateGetFenceStatus(VkDevice device,VkFence fence)3940 bool CoreChecks::PreCallValidateGetFenceStatus(VkDevice device, VkFence fence) {
3941     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3942     return VerifyWaitFenceState(device_data, fence, "vkGetFenceStatus()");
3943 }
3944 
PostCallRecordGetFenceStatus(VkDevice device,VkFence fence,VkResult result)3945 void CoreChecks::PostCallRecordGetFenceStatus(VkDevice device, VkFence fence, VkResult result) {
3946     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3947     if (VK_SUCCESS != result) return;
3948     RetireFence(device_data, fence);
3949 }
3950 
RecordGetDeviceQueueState(layer_data * device_data,uint32_t queue_family_index,VkQueue queue)3951 void CoreChecks::RecordGetDeviceQueueState(layer_data *device_data, uint32_t queue_family_index, VkQueue queue) {
3952     // Add queue to tracking set only if it is new
3953     auto queue_is_new = device_data->queues.emplace(queue);
3954     if (queue_is_new.second == true) {
3955         QUEUE_STATE *queue_state = &device_data->queueMap[queue];
3956         queue_state->queue = queue;
3957         queue_state->queueFamilyIndex = queue_family_index;
3958         queue_state->seq = 0;
3959     }
3960 }
3961 
ValidateGetDeviceQueue(layer_data * device_data,uint32_t queueFamilyIndex,uint32_t queueIndex,VkQueue * pQueue,const char * valid_qfi_vuid,const char * qfi_in_range_vuid)3962 bool CoreChecks::ValidateGetDeviceQueue(layer_data *device_data, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue,
3963                                         const char *valid_qfi_vuid, const char *qfi_in_range_vuid) {
3964     bool skip = false;
3965 
3966     skip |= ValidateDeviceQueueFamily(device_data, queueFamilyIndex, "vkGetDeviceQueue", "queueFamilyIndex", valid_qfi_vuid);
3967     const auto &queue_data = device_data->queue_family_index_map.find(queueFamilyIndex);
3968     if (queue_data != device_data->queue_family_index_map.end() && queue_data->second <= queueIndex) {
3969         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3970                         HandleToUint64(device_data->device), qfi_in_range_vuid,
3971                         "vkGetDeviceQueue: queueIndex (=%" PRIu32
3972                         ") is not less than the number of queues requested from queueFamilyIndex (=%" PRIu32
3973                         ") when the device was created (i.e. is not less than %" PRIu32 ").",
3974                         queueIndex, queueFamilyIndex, queue_data->second);
3975     }
3976     return skip;
3977 }
3978 
PreCallValidateGetDeviceQueue(VkDevice device,uint32_t queueFamilyIndex,uint32_t queueIndex,VkQueue * pQueue)3979 bool CoreChecks::PreCallValidateGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
3980     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3981     return ValidateGetDeviceQueue(device_data, queueFamilyIndex, queueIndex, pQueue, "VUID-vkGetDeviceQueue-queueFamilyIndex-00384",
3982                                   "VUID-vkGetDeviceQueue-queueIndex-00385");
3983 }
3984 
PostCallRecordGetDeviceQueue(VkDevice device,uint32_t queueFamilyIndex,uint32_t queueIndex,VkQueue * pQueue)3985 void CoreChecks::PostCallRecordGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
3986     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3987     RecordGetDeviceQueueState(device_data, queueFamilyIndex, *pQueue);
3988 }
3989 
PostCallRecordGetDeviceQueue2(VkDevice device,const VkDeviceQueueInfo2 * pQueueInfo,VkQueue * pQueue)3990 void CoreChecks::PostCallRecordGetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) {
3991     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3992     RecordGetDeviceQueueState(device_data, pQueueInfo->queueFamilyIndex, *pQueue);
3993 }
3994 
PreCallValidateQueueWaitIdle(VkQueue queue)3995 bool CoreChecks::PreCallValidateQueueWaitIdle(VkQueue queue) {
3996     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
3997     QUEUE_STATE *queue_state = GetQueueState(queue);
3998     if (device_data->instance_data->disabled.queue_wait_idle) return false;
3999     return VerifyQueueStateToSeq(device_data, queue_state, queue_state->seq + queue_state->submissions.size());
4000 }
4001 
PostCallRecordQueueWaitIdle(VkQueue queue,VkResult result)4002 void CoreChecks::PostCallRecordQueueWaitIdle(VkQueue queue, VkResult result) {
4003     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
4004     if (VK_SUCCESS != result) return;
4005     QUEUE_STATE *queue_state = GetQueueState(queue);
4006     RetireWorkOnQueue(device_data, queue_state, queue_state->seq + queue_state->submissions.size());
4007 }
4008 
PreCallValidateDeviceWaitIdle(VkDevice device)4009 bool CoreChecks::PreCallValidateDeviceWaitIdle(VkDevice device) {
4010     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4011     if (device_data->instance_data->disabled.device_wait_idle) return false;
4012     bool skip = false;
4013     for (auto &queue : device_data->queueMap) {
4014         skip |= VerifyQueueStateToSeq(device_data, &queue.second, queue.second.seq + queue.second.submissions.size());
4015     }
4016     return skip;
4017 }
4018 
PostCallRecordDeviceWaitIdle(VkDevice device,VkResult result)4019 void CoreChecks::PostCallRecordDeviceWaitIdle(VkDevice device, VkResult result) {
4020     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4021     if (VK_SUCCESS != result) return;
4022     for (auto &queue : device_data->queueMap) {
4023         RetireWorkOnQueue(device_data, &queue.second, queue.second.seq + queue.second.submissions.size());
4024     }
4025 }
4026 
PreCallValidateDestroyFence(VkDevice device,VkFence fence,const VkAllocationCallbacks * pAllocator)4027 bool CoreChecks::PreCallValidateDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
4028     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4029     FENCE_NODE *fence_node = GetFenceNode(fence);
4030     bool skip = false;
4031     if (fence_node) {
4032         if (fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
4033             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4034                             HandleToUint64(fence), "VUID-vkDestroyFence-fence-01120", "Fence %s is in use.",
4035                             device_data->report_data->FormatHandle(fence).c_str());
4036         }
4037     }
4038     return skip;
4039 }
4040 
PreCallRecordDestroyFence(VkDevice device,VkFence fence,const VkAllocationCallbacks * pAllocator)4041 void CoreChecks::PreCallRecordDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
4042     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4043     if (!fence) return;
4044     device_data->fenceMap.erase(fence);
4045 }
4046 
PreCallValidateDestroySemaphore(VkDevice device,VkSemaphore semaphore,const VkAllocationCallbacks * pAllocator)4047 bool CoreChecks::PreCallValidateDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
4048     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4049     SEMAPHORE_NODE *sema_node = GetSemaphoreNode(semaphore);
4050     VK_OBJECT obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
4051     if (device_data->instance_data->disabled.destroy_semaphore) return false;
4052     bool skip = false;
4053     if (sema_node) {
4054         skip |= ValidateObjectNotInUse(device_data, sema_node, obj_struct, "vkDestroySemaphore",
4055                                        "VUID-vkDestroySemaphore-semaphore-01137");
4056     }
4057     return skip;
4058 }
4059 
PreCallRecordDestroySemaphore(VkDevice device,VkSemaphore semaphore,const VkAllocationCallbacks * pAllocator)4060 void CoreChecks::PreCallRecordDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
4061     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4062     if (!semaphore) return;
4063     device_data->semaphoreMap.erase(semaphore);
4064 }
4065 
PreCallValidateDestroyEvent(VkDevice device,VkEvent event,const VkAllocationCallbacks * pAllocator)4066 bool CoreChecks::PreCallValidateDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
4067     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4068     EVENT_STATE *event_state = GetEventNode(event);
4069     VK_OBJECT obj_struct = {HandleToUint64(event), kVulkanObjectTypeEvent};
4070     bool skip = false;
4071     if (event_state) {
4072         skip |= ValidateObjectNotInUse(device_data, event_state, obj_struct, "vkDestroyEvent", "VUID-vkDestroyEvent-event-01145");
4073     }
4074     return skip;
4075 }
4076 
PreCallRecordDestroyEvent(VkDevice device,VkEvent event,const VkAllocationCallbacks * pAllocator)4077 void CoreChecks::PreCallRecordDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
4078     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4079     if (!event) return;
4080     EVENT_STATE *event_state = GetEventNode(event);
4081     VK_OBJECT obj_struct = {HandleToUint64(event), kVulkanObjectTypeEvent};
4082     InvalidateCommandBuffers(device_data, event_state->cb_bindings, obj_struct);
4083     device_data->eventMap.erase(event);
4084 }
4085 
PreCallValidateDestroyQueryPool(VkDevice device,VkQueryPool queryPool,const VkAllocationCallbacks * pAllocator)4086 bool CoreChecks::PreCallValidateDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
4087     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4088     QUERY_POOL_NODE *qp_state = GetQueryPoolNode(queryPool);
4089     VK_OBJECT obj_struct = {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool};
4090     bool skip = false;
4091     if (qp_state) {
4092         skip |= ValidateObjectNotInUse(device_data, qp_state, obj_struct, "vkDestroyQueryPool",
4093                                        "VUID-vkDestroyQueryPool-queryPool-00793");
4094     }
4095     return skip;
4096 }
4097 
PreCallRecordDestroyQueryPool(VkDevice device,VkQueryPool queryPool,const VkAllocationCallbacks * pAllocator)4098 void CoreChecks::PreCallRecordDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
4099     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4100     if (!queryPool) return;
4101     QUERY_POOL_NODE *qp_state = GetQueryPoolNode(queryPool);
4102     VK_OBJECT obj_struct = {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool};
4103     InvalidateCommandBuffers(device_data, qp_state->cb_bindings, obj_struct);
4104     device_data->queryPoolMap.erase(queryPool);
4105 }
4106 
PreCallValidateGetQueryPoolResults(VkDevice device,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount,size_t dataSize,void * pData,VkDeviceSize stride,VkQueryResultFlags flags)4107 bool CoreChecks::PreCallValidateGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
4108                                                     uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
4109                                                     VkQueryResultFlags flags) {
4110     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4111     bool skip = false;
4112     auto query_pool_state = device_data->queryPoolMap.find(queryPool);
4113     if (query_pool_state != device_data->queryPoolMap.end()) {
4114         if ((query_pool_state->second.createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && (flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
4115             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
4116                             "VUID-vkGetQueryPoolResults-queryType-00818",
4117                             "QueryPool %s was created with a queryType of VK_QUERY_TYPE_TIMESTAMP but flags contains "
4118                             "VK_QUERY_RESULT_PARTIAL_BIT.",
4119                             device_data->report_data->FormatHandle(queryPool).c_str());
4120         }
4121     }
4122     return skip;
4123 }
4124 
PostCallRecordGetQueryPoolResults(VkDevice device,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount,size_t dataSize,void * pData,VkDeviceSize stride,VkQueryResultFlags flags,VkResult result)4125 void CoreChecks::PostCallRecordGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
4126                                                    size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags,
4127                                                    VkResult result) {
4128     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4129 
4130     if ((VK_SUCCESS != result) && (VK_NOT_READY != result)) return;
4131     // TODO: clean this up, it's insanely wasteful.
4132     unordered_map<QueryObject, std::vector<VkCommandBuffer>> queries_in_flight;
4133     for (auto cmd_buffer : device_data->commandBufferMap) {
4134         if (cmd_buffer.second->in_use.load()) {
4135             for (auto query_state_pair : cmd_buffer.second->queryToStateMap) {
4136                 queries_in_flight[query_state_pair.first].push_back(cmd_buffer.first);
4137             }
4138         }
4139     }
4140     for (uint32_t i = 0; i < queryCount; ++i) {
4141         QueryObject query = {queryPool, firstQuery + i};
4142         auto qif_pair = queries_in_flight.find(query);
4143         auto query_state_pair = device_data->queryToStateMap.find(query);
4144         if (query_state_pair != device_data->queryToStateMap.end()) {
4145             // Available and in flight
4146             if (qif_pair != queries_in_flight.end() && query_state_pair != device_data->queryToStateMap.end() &&
4147                 query_state_pair->second) {
4148                 for (auto cmd_buffer : qif_pair->second) {
4149                     auto cb = GetCBNode(cmd_buffer);
4150                     auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
4151                     if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) {
4152                         for (auto event : query_event_pair->second) {
4153                             device_data->eventMap[event].needsSignaled = true;
4154                         }
4155                     }
4156                 }
4157             }
4158         }
4159     }
4160 }
4161 
4162 // Return true if given ranges intersect, else false
4163 // Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
4164 //  in an error so not checking that here
4165 // pad_ranges bool indicates a linear and non-linear comparison which requires padding
4166 // In the case where padding is required, if an alias is encountered then a validation error is reported and skip
4167 //  may be set by the callback function so caller should merge in skip value if padding case is possible.
4168 // This check can be skipped by passing skip_checks=true, for call sites outside the validation path.
RangesIntersect(layer_data const * dev_data,MEMORY_RANGE const * range1,MEMORY_RANGE const * range2,bool * skip,bool skip_checks)4169 bool CoreChecks::RangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip,
4170                                  bool skip_checks) {
4171     *skip = false;
4172     auto r1_start = range1->start;
4173     auto r1_end = range1->end;
4174     auto r2_start = range2->start;
4175     auto r2_end = range2->end;
4176     VkDeviceSize pad_align = 1;
4177     if (range1->linear != range2->linear) {
4178         pad_align = dev_data->phys_dev_props.limits.bufferImageGranularity;
4179     }
4180     if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1))) return false;
4181     if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1))) return false;
4182 
4183     if (!skip_checks && (range1->linear != range2->linear)) {
4184         // In linear vs. non-linear case, warn of aliasing
4185         const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
4186         const char *r1_type_str = range1->image ? "image" : "buffer";
4187         const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
4188         const char *r2_type_str = range2->image ? "image" : "buffer";
4189         auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
4190         *skip |= log_msg(
4191             dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, kVUID_Core_MemTrack_InvalidAliasing,
4192             "%s %s %s is aliased with %s %s %s which may indicate a bug. For further info refer to the Buffer-Image Granularity "
4193             "section of the Vulkan specification. "
4194             "(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/xhtml/vkspec.html#resources-bufferimagegranularity)",
4195             r1_linear_str, r1_type_str, dev_data->report_data->FormatHandle(range1->handle).c_str(), r2_linear_str, r2_type_str,
4196             dev_data->report_data->FormatHandle(range2->handle).c_str());
4197     }
4198     // Ranges intersect
4199     return true;
4200 }
4201 // Simplified RangesIntersect that calls above function to check range1 for intersection with offset & end addresses
RangesIntersect(layer_data const * dev_data,MEMORY_RANGE const * range1,VkDeviceSize offset,VkDeviceSize end)4202 bool CoreChecks::RangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
4203     // Create a local MEMORY_RANGE struct to wrap offset/size
4204     MEMORY_RANGE range_wrap;
4205     // Synch linear with range1 to avoid padding and potential validation error case
4206     range_wrap.linear = range1->linear;
4207     range_wrap.start = offset;
4208     range_wrap.end = end;
4209     bool tmp_bool;
4210     return RangesIntersect(dev_data, range1, &range_wrap, &tmp_bool, true);
4211 }
4212 
ValidateInsertMemoryRange(layer_data const * dev_data,uint64_t handle,DEVICE_MEM_INFO * mem_info,VkDeviceSize memoryOffset,VkMemoryRequirements memRequirements,bool is_image,bool is_linear,const char * api_name)4213 bool CoreChecks::ValidateInsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info,
4214                                            VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image,
4215                                            bool is_linear, const char *api_name) {
4216     bool skip = false;
4217 
4218     MEMORY_RANGE range;
4219     range.image = is_image;
4220     range.handle = handle;
4221     range.linear = is_linear;
4222     range.memory = mem_info->mem;
4223     range.start = memoryOffset;
4224     range.size = memRequirements.size;
4225     range.end = memoryOffset + memRequirements.size - 1;
4226     range.aliases.clear();
4227 
4228     // Check for aliasing problems.
4229     for (auto &obj_range_pair : mem_info->bound_ranges) {
4230         auto check_range = &obj_range_pair.second;
4231         bool intersection_error = false;
4232         if (RangesIntersect(dev_data, &range, check_range, &intersection_error, false)) {
4233             skip |= intersection_error;
4234             range.aliases.insert(check_range);
4235         }
4236     }
4237 
4238     if (memoryOffset >= mem_info->alloc_info.allocationSize) {
4239         const char *error_code =
4240             is_image ? "VUID-vkBindImageMemory-memoryOffset-01046" : "VUID-vkBindBufferMemory-memoryOffset-01031";
4241         skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4242                        HandleToUint64(mem_info->mem), error_code,
4243                        "In %s, attempting to bind memory (%s) to object (%s), memoryOffset=0x%" PRIxLEAST64
4244                        " must be less than the memory allocation size 0x%" PRIxLEAST64 ".",
4245                        api_name, dev_data->report_data->FormatHandle(mem_info->mem).c_str(),
4246                        dev_data->report_data->FormatHandle(handle).c_str(), memoryOffset, mem_info->alloc_info.allocationSize);
4247     }
4248 
4249     return skip;
4250 }
4251 
4252 // Object with given handle is being bound to memory w/ given mem_info struct.
4253 //  Track the newly bound memory range with given memoryOffset
4254 //  Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
4255 //  and non-linear range incorrectly overlap.
4256 // Return true if an error is flagged and the user callback returns "true", otherwise false
4257 // is_image indicates an image object, otherwise handle is for a buffer
4258 // is_linear indicates a buffer or linear image
InsertMemoryRange(layer_data const * dev_data,uint64_t handle,DEVICE_MEM_INFO * mem_info,VkDeviceSize memoryOffset,VkMemoryRequirements memRequirements,bool is_image,bool is_linear)4259 void CoreChecks::InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info,
4260                                    VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
4261     MEMORY_RANGE range;
4262 
4263     range.image = is_image;
4264     range.handle = handle;
4265     range.linear = is_linear;
4266     range.memory = mem_info->mem;
4267     range.start = memoryOffset;
4268     range.size = memRequirements.size;
4269     range.end = memoryOffset + memRequirements.size - 1;
4270     range.aliases.clear();
4271     // Update Memory aliasing
4272     // Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
4273     // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
4274     std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
4275     for (auto &obj_range_pair : mem_info->bound_ranges) {
4276         auto check_range = &obj_range_pair.second;
4277         bool intersection_error = false;
4278         if (RangesIntersect(dev_data, &range, check_range, &intersection_error, true)) {
4279             range.aliases.insert(check_range);
4280             tmp_alias_ranges.insert(check_range);
4281         }
4282     }
4283     mem_info->bound_ranges[handle] = std::move(range);
4284     for (auto tmp_range : tmp_alias_ranges) {
4285         tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
4286     }
4287     if (is_image)
4288         mem_info->bound_images.insert(handle);
4289     else
4290         mem_info->bound_buffers.insert(handle);
4291 }
4292 
ValidateInsertImageMemoryRange(layer_data const * dev_data,VkImage image,DEVICE_MEM_INFO * mem_info,VkDeviceSize mem_offset,VkMemoryRequirements mem_reqs,bool is_linear,const char * api_name)4293 bool CoreChecks::ValidateInsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info,
4294                                                 VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear,
4295                                                 const char *api_name) {
4296     return ValidateInsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear, api_name);
4297 }
InsertImageMemoryRange(layer_data const * dev_data,VkImage image,DEVICE_MEM_INFO * mem_info,VkDeviceSize mem_offset,VkMemoryRequirements mem_reqs,bool is_linear)4298 void CoreChecks::InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info,
4299                                         VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear) {
4300     InsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear);
4301 }
4302 
ValidateInsertBufferMemoryRange(layer_data const * dev_data,VkBuffer buffer,DEVICE_MEM_INFO * mem_info,VkDeviceSize mem_offset,VkMemoryRequirements mem_reqs,const char * api_name)4303 bool CoreChecks::ValidateInsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info,
4304                                                  VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, const char *api_name) {
4305     return ValidateInsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true, api_name);
4306 }
InsertBufferMemoryRange(layer_data const * dev_data,VkBuffer buffer,DEVICE_MEM_INFO * mem_info,VkDeviceSize mem_offset,VkMemoryRequirements mem_reqs)4307 void CoreChecks::InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info,
4308                                          VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs) {
4309     InsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true);
4310 }
4311 
4312 // Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
4313 //  is_image indicates if handle is for image or buffer
4314 //  This function will also remove the handle-to-index mapping from the appropriate
4315 //  map and clean up any aliases for range being removed.
RemoveMemoryRange(uint64_t handle,DEVICE_MEM_INFO * mem_info,bool is_image)4316 static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
4317     auto erase_range = &mem_info->bound_ranges[handle];
4318     for (auto alias_range : erase_range->aliases) {
4319         alias_range->aliases.erase(erase_range);
4320     }
4321     erase_range->aliases.clear();
4322     mem_info->bound_ranges.erase(handle);
4323     if (is_image) {
4324         mem_info->bound_images.erase(handle);
4325     } else {
4326         mem_info->bound_buffers.erase(handle);
4327     }
4328 }
4329 
RemoveBufferMemoryRange(uint64_t handle,DEVICE_MEM_INFO * mem_info)4330 void CoreChecks::RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
4331 
RemoveImageMemoryRange(uint64_t handle,DEVICE_MEM_INFO * mem_info)4332 void CoreChecks::RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
4333 
ValidateMemoryTypes(const layer_data * dev_data,const DEVICE_MEM_INFO * mem_info,const uint32_t memory_type_bits,const char * funcName,const char * msgCode)4334 bool CoreChecks::ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
4335                                      const char *funcName, const char *msgCode) {
4336     bool skip = false;
4337     if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
4338         skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4339                        HandleToUint64(mem_info->mem), msgCode,
4340                        "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
4341                        "type (0x%X) of this memory object %s.",
4342                        funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex,
4343                        dev_data->report_data->FormatHandle(mem_info->mem).c_str());
4344     }
4345     return skip;
4346 }
4347 
ValidateBindBufferMemory(layer_data * device_data,VkBuffer buffer,VkDeviceMemory mem,VkDeviceSize memoryOffset,const char * api_name)4348 bool CoreChecks::ValidateBindBufferMemory(layer_data *device_data, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset,
4349                                           const char *api_name) {
4350     BUFFER_STATE *buffer_state = GetBufferState(buffer);
4351 
4352     bool skip = false;
4353     if (buffer_state) {
4354         // Track objects tied to memory
4355         uint64_t buffer_handle = HandleToUint64(buffer);
4356         skip = ValidateSetMemBinding(device_data, mem, buffer_handle, kVulkanObjectTypeBuffer, api_name);
4357         if (!buffer_state->memory_requirements_checked) {
4358             // There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling
4359             // BindBufferMemory, but it's implied in that memory being bound must conform with VkMemoryRequirements from
4360             // vkGetBufferMemoryRequirements()
4361             skip |=
4362                 log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4363                         buffer_handle, kVUID_Core_DrawState_InvalidBuffer,
4364                         "%s: Binding memory to buffer %s but vkGetBufferMemoryRequirements() has not been called on that buffer.",
4365                         api_name, device_data->report_data->FormatHandle(buffer_handle).c_str());
4366             // Make the call for them so we can verify the state
4367             device_data->device_dispatch_table.GetBufferMemoryRequirements(device_data->device, buffer,
4368                                                                            &buffer_state->requirements);
4369         }
4370 
4371         // Validate bound memory range information
4372         const auto mem_info = GetMemObjInfo(mem);
4373         if (mem_info) {
4374             skip |=
4375                 ValidateInsertBufferMemoryRange(device_data, buffer, mem_info, memoryOffset, buffer_state->requirements, api_name);
4376             skip |= ValidateMemoryTypes(device_data, mem_info, buffer_state->requirements.memoryTypeBits, api_name,
4377                                         "VUID-vkBindBufferMemory-memory-01035");
4378         }
4379 
4380         // Validate memory requirements alignment
4381         if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
4382             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4383                             buffer_handle, "VUID-vkBindBufferMemory-memoryOffset-01036",
4384                             "%s: memoryOffset is 0x%" PRIxLEAST64
4385                             " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
4386                             ", returned from a call to vkGetBufferMemoryRequirements with buffer.",
4387                             api_name, memoryOffset, buffer_state->requirements.alignment);
4388         }
4389 
4390         if (mem_info) {
4391             // Validate memory requirements size
4392             if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) {
4393                 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4394                                 buffer_handle, "VUID-vkBindBufferMemory-size-01037",
4395                                 "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
4396                                 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
4397                                 ", returned from a call to vkGetBufferMemoryRequirements with buffer.",
4398                                 api_name, mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size);
4399             }
4400 
4401             // Validate dedicated allocation
4402             if (mem_info->is_dedicated && ((mem_info->dedicated_buffer != buffer) || (memoryOffset != 0))) {
4403                 // TODO: Add vkBindBufferMemory2KHR error message when added to spec.
4404                 auto validation_error = kVUIDUndefined;
4405                 if (strcmp(api_name, "vkBindBufferMemory()") == 0) {
4406                     validation_error = "VUID-vkBindBufferMemory-memory-01508";
4407                 }
4408                 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4409                                 buffer_handle, validation_error,
4410                                 "%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfoKHR::buffer %s must be equal "
4411                                 "to buffer %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
4412                                 api_name, device_data->report_data->FormatHandle(mem).c_str(),
4413                                 device_data->report_data->FormatHandle(mem_info->dedicated_buffer).c_str(),
4414                                 device_data->report_data->FormatHandle(buffer_handle).c_str(), memoryOffset);
4415             }
4416         }
4417     }
4418     return skip;
4419 }
4420 
PreCallValidateBindBufferMemory(VkDevice device,VkBuffer buffer,VkDeviceMemory mem,VkDeviceSize memoryOffset)4421 bool CoreChecks::PreCallValidateBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
4422     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4423     const char *api_name = "vkBindBufferMemory()";
4424     return ValidateBindBufferMemory(device_data, buffer, mem, memoryOffset, api_name);
4425 }
4426 
UpdateBindBufferMemoryState(layer_data * device_data,VkBuffer buffer,VkDeviceMemory mem,VkDeviceSize memoryOffset)4427 void CoreChecks::UpdateBindBufferMemoryState(layer_data *device_data, VkBuffer buffer, VkDeviceMemory mem,
4428                                              VkDeviceSize memoryOffset) {
4429     BUFFER_STATE *buffer_state = GetBufferState(buffer);
4430     if (buffer_state) {
4431         // Track bound memory range information
4432         auto mem_info = GetMemObjInfo(mem);
4433         if (mem_info) {
4434             InsertBufferMemoryRange(device_data, buffer, mem_info, memoryOffset, buffer_state->requirements);
4435         }
4436         // Track objects tied to memory
4437         uint64_t buffer_handle = HandleToUint64(buffer);
4438         SetMemBinding(device_data, mem, buffer_state, memoryOffset, buffer_handle, kVulkanObjectTypeBuffer);
4439     }
4440 }
4441 
PostCallRecordBindBufferMemory(VkDevice device,VkBuffer buffer,VkDeviceMemory mem,VkDeviceSize memoryOffset,VkResult result)4442 void CoreChecks::PostCallRecordBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset,
4443                                                 VkResult result) {
4444     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4445     if (VK_SUCCESS != result) return;
4446     UpdateBindBufferMemoryState(device_data, buffer, mem, memoryOffset);
4447 }
4448 
PreCallValidateBindBufferMemory2(VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfoKHR * pBindInfos)4449 bool CoreChecks::PreCallValidateBindBufferMemory2(VkDevice device, uint32_t bindInfoCount,
4450                                                   const VkBindBufferMemoryInfoKHR *pBindInfos) {
4451     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4452     char api_name[64];
4453     bool skip = false;
4454 
4455     for (uint32_t i = 0; i < bindInfoCount; i++) {
4456         sprintf(api_name, "vkBindBufferMemory2() pBindInfos[%u]", i);
4457         skip |=
4458             ValidateBindBufferMemory(device_data, pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name);
4459     }
4460     return skip;
4461 }
4462 
PreCallValidateBindBufferMemory2KHR(VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfoKHR * pBindInfos)4463 bool CoreChecks::PreCallValidateBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
4464                                                      const VkBindBufferMemoryInfoKHR *pBindInfos) {
4465     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4466     char api_name[64];
4467     bool skip = false;
4468 
4469     for (uint32_t i = 0; i < bindInfoCount; i++) {
4470         sprintf(api_name, "vkBindBufferMemory2KHR() pBindInfos[%u]", i);
4471         skip |=
4472             ValidateBindBufferMemory(device_data, pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name);
4473     }
4474     return skip;
4475 }
4476 
PostCallRecordBindBufferMemory2(VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfoKHR * pBindInfos,VkResult result)4477 void CoreChecks::PostCallRecordBindBufferMemory2(VkDevice device, uint32_t bindInfoCount,
4478                                                  const VkBindBufferMemoryInfoKHR *pBindInfos, VkResult result) {
4479     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4480     for (uint32_t i = 0; i < bindInfoCount; i++) {
4481         UpdateBindBufferMemoryState(device_data, pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset);
4482     }
4483 }
4484 
PostCallRecordBindBufferMemory2KHR(VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfoKHR * pBindInfos,VkResult result)4485 void CoreChecks::PostCallRecordBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
4486                                                     const VkBindBufferMemoryInfoKHR *pBindInfos, VkResult result) {
4487     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4488     for (uint32_t i = 0; i < bindInfoCount; i++) {
4489         UpdateBindBufferMemoryState(device_data, pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset);
4490     }
4491 }
4492 
RecordGetBufferMemoryRequirementsState(layer_data * device_data,VkBuffer buffer,VkMemoryRequirements * pMemoryRequirements)4493 void CoreChecks::RecordGetBufferMemoryRequirementsState(layer_data *device_data, VkBuffer buffer,
4494                                                         VkMemoryRequirements *pMemoryRequirements) {
4495     BUFFER_STATE *buffer_state = GetBufferState(buffer);
4496     if (buffer_state) {
4497         buffer_state->requirements = *pMemoryRequirements;
4498         buffer_state->memory_requirements_checked = true;
4499     }
4500 }
4501 
PostCallRecordGetBufferMemoryRequirements(VkDevice device,VkBuffer buffer,VkMemoryRequirements * pMemoryRequirements)4502 void CoreChecks::PostCallRecordGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer,
4503                                                            VkMemoryRequirements *pMemoryRequirements) {
4504     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4505     RecordGetBufferMemoryRequirementsState(device_data, buffer, pMemoryRequirements);
4506 }
4507 
PostCallRecordGetBufferMemoryRequirements2(VkDevice device,const VkBufferMemoryRequirementsInfo2KHR * pInfo,VkMemoryRequirements2KHR * pMemoryRequirements)4508 void CoreChecks::PostCallRecordGetBufferMemoryRequirements2(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
4509                                                             VkMemoryRequirements2KHR *pMemoryRequirements) {
4510     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4511     RecordGetBufferMemoryRequirementsState(device_data, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
4512 }
4513 
PostCallRecordGetBufferMemoryRequirements2KHR(VkDevice device,const VkBufferMemoryRequirementsInfo2KHR * pInfo,VkMemoryRequirements2KHR * pMemoryRequirements)4514 void CoreChecks::PostCallRecordGetBufferMemoryRequirements2KHR(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
4515                                                                VkMemoryRequirements2KHR *pMemoryRequirements) {
4516     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4517     RecordGetBufferMemoryRequirementsState(device_data, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
4518 }
4519 
ValidateGetImageMemoryRequirements2(layer_data * dev_data,const VkImageMemoryRequirementsInfo2 * pInfo)4520 bool CoreChecks::ValidateGetImageMemoryRequirements2(layer_data *dev_data, const VkImageMemoryRequirementsInfo2 *pInfo) {
4521     bool skip = false;
4522     if (GetDeviceExtensions()->vk_android_external_memory_android_hardware_buffer) {
4523         skip |= ValidateGetImageMemoryRequirements2ANDROID(dev_data, pInfo->image);
4524     }
4525     return skip;
4526 }
4527 
PreCallValidateGetImageMemoryRequirements2(VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)4528 bool CoreChecks::PreCallValidateGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
4529                                                             VkMemoryRequirements2 *pMemoryRequirements) {
4530     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4531     return ValidateGetImageMemoryRequirements2(device_data, pInfo);
4532 }
4533 
PreCallValidateGetImageMemoryRequirements2KHR(VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)4534 bool CoreChecks::PreCallValidateGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
4535                                                                VkMemoryRequirements2 *pMemoryRequirements) {
4536     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4537     return ValidateGetImageMemoryRequirements2(device_data, pInfo);
4538 }
4539 
RecordGetImageMemoryRequiementsState(layer_data * device_data,VkImage image,VkMemoryRequirements * pMemoryRequirements)4540 void CoreChecks::RecordGetImageMemoryRequiementsState(layer_data *device_data, VkImage image,
4541                                                       VkMemoryRequirements *pMemoryRequirements) {
4542     IMAGE_STATE *image_state = GetImageState(image);
4543     if (image_state) {
4544         image_state->requirements = *pMemoryRequirements;
4545         image_state->memory_requirements_checked = true;
4546     }
4547 }
4548 
PostCallRecordGetImageMemoryRequirements(VkDevice device,VkImage image,VkMemoryRequirements * pMemoryRequirements)4549 void CoreChecks::PostCallRecordGetImageMemoryRequirements(VkDevice device, VkImage image,
4550                                                           VkMemoryRequirements *pMemoryRequirements) {
4551     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4552     RecordGetImageMemoryRequiementsState(device_data, image, pMemoryRequirements);
4553 }
4554 
PostCallRecordGetImageMemoryRequirements2(VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)4555 void CoreChecks::PostCallRecordGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
4556                                                            VkMemoryRequirements2 *pMemoryRequirements) {
4557     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4558     RecordGetImageMemoryRequiementsState(device_data, pInfo->image, &pMemoryRequirements->memoryRequirements);
4559 }
4560 
PostCallRecordGetImageMemoryRequirements2KHR(VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)4561 void CoreChecks::PostCallRecordGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
4562                                                               VkMemoryRequirements2 *pMemoryRequirements) {
4563     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4564     RecordGetImageMemoryRequiementsState(device_data, pInfo->image, &pMemoryRequirements->memoryRequirements);
4565 }
4566 
RecordGetImageSparseMemoryRequirementsState(IMAGE_STATE * image_state,VkSparseImageMemoryRequirements * sparse_image_memory_requirements)4567 static void RecordGetImageSparseMemoryRequirementsState(IMAGE_STATE *image_state,
4568                                                         VkSparseImageMemoryRequirements *sparse_image_memory_requirements) {
4569     image_state->sparse_requirements.emplace_back(*sparse_image_memory_requirements);
4570     if (sparse_image_memory_requirements->formatProperties.aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) {
4571         image_state->sparse_metadata_required = true;
4572     }
4573 }
4574 
PostCallRecordGetImageSparseMemoryRequirements(VkDevice device,VkImage image,uint32_t * pSparseMemoryRequirementCount,VkSparseImageMemoryRequirements * pSparseMemoryRequirements)4575 void CoreChecks::PostCallRecordGetImageSparseMemoryRequirements(VkDevice device, VkImage image,
4576                                                                 uint32_t *pSparseMemoryRequirementCount,
4577                                                                 VkSparseImageMemoryRequirements *pSparseMemoryRequirements) {
4578     auto image_state = GetImageState(image);
4579     image_state->get_sparse_reqs_called = true;
4580     if (!pSparseMemoryRequirements) return;
4581     for (uint32_t i = 0; i < *pSparseMemoryRequirementCount; i++) {
4582         RecordGetImageSparseMemoryRequirementsState(image_state, &pSparseMemoryRequirements[i]);
4583     }
4584 }
4585 
PostCallRecordGetImageSparseMemoryRequirements2(VkDevice device,const VkImageSparseMemoryRequirementsInfo2KHR * pInfo,uint32_t * pSparseMemoryRequirementCount,VkSparseImageMemoryRequirements2KHR * pSparseMemoryRequirements)4586 void CoreChecks::PostCallRecordGetImageSparseMemoryRequirements2(VkDevice device,
4587                                                                  const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
4588                                                                  uint32_t *pSparseMemoryRequirementCount,
4589                                                                  VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
4590     auto image_state = GetImageState(pInfo->image);
4591     image_state->get_sparse_reqs_called = true;
4592     if (!pSparseMemoryRequirements) return;
4593     for (uint32_t i = 0; i < *pSparseMemoryRequirementCount; i++) {
4594         assert(!pSparseMemoryRequirements[i].pNext);  // TODO: If an extension is ever added here we need to handle it
4595         RecordGetImageSparseMemoryRequirementsState(image_state, &pSparseMemoryRequirements[i].memoryRequirements);
4596     }
4597 }
4598 
PostCallRecordGetImageSparseMemoryRequirements2KHR(VkDevice device,const VkImageSparseMemoryRequirementsInfo2KHR * pInfo,uint32_t * pSparseMemoryRequirementCount,VkSparseImageMemoryRequirements2KHR * pSparseMemoryRequirements)4599 void CoreChecks::PostCallRecordGetImageSparseMemoryRequirements2KHR(
4600     VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR *pInfo, uint32_t *pSparseMemoryRequirementCount,
4601     VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
4602     auto image_state = GetImageState(pInfo->image);
4603     image_state->get_sparse_reqs_called = true;
4604     if (!pSparseMemoryRequirements) return;
4605     for (uint32_t i = 0; i < *pSparseMemoryRequirementCount; i++) {
4606         assert(!pSparseMemoryRequirements[i].pNext);  // TODO: If an extension is ever added here we need to handle it
4607         RecordGetImageSparseMemoryRequirementsState(image_state, &pSparseMemoryRequirements[i].memoryRequirements);
4608     }
4609 }
4610 
PreCallValidateGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)4611 bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice,
4612                                                                         const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
4613                                                                         VkImageFormatProperties2 *pImageFormatProperties) {
4614     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
4615     // Can't wrap AHB-specific validation in a device extension check here, but no harm
4616     bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(instance_data->report_data, pImageFormatInfo,
4617                                                                        pImageFormatProperties);
4618     return skip;
4619 }
4620 
PreCallValidateGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)4621 bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice,
4622                                                                            const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
4623                                                                            VkImageFormatProperties2 *pImageFormatProperties) {
4624     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
4625     // Can't wrap AHB-specific validation in a device extension check here, but no harm
4626     bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(instance_data->report_data, pImageFormatInfo,
4627                                                                        pImageFormatProperties);
4628     return skip;
4629 }
4630 
PreCallRecordDestroyShaderModule(VkDevice device,VkShaderModule shaderModule,const VkAllocationCallbacks * pAllocator)4631 void CoreChecks::PreCallRecordDestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
4632                                                   const VkAllocationCallbacks *pAllocator) {
4633     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4634     if (!shaderModule) return;
4635     device_data->shaderModuleMap.erase(shaderModule);
4636 }
4637 
PreCallValidateDestroyPipeline(VkDevice device,VkPipeline pipeline,const VkAllocationCallbacks * pAllocator)4638 bool CoreChecks::PreCallValidateDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
4639     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4640     PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline);
4641     VK_OBJECT obj_struct = {HandleToUint64(pipeline), kVulkanObjectTypePipeline};
4642     if (device_data->instance_data->disabled.destroy_pipeline) return false;
4643     bool skip = false;
4644     if (pipeline_state) {
4645         skip |= ValidateObjectNotInUse(device_data, pipeline_state, obj_struct, "vkDestroyPipeline",
4646                                        "VUID-vkDestroyPipeline-pipeline-00765");
4647     }
4648     return skip;
4649 }
4650 
PreCallRecordDestroyPipeline(VkDevice device,VkPipeline pipeline,const VkAllocationCallbacks * pAllocator)4651 void CoreChecks::PreCallRecordDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
4652     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4653     if (!pipeline) return;
4654     PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline);
4655     VK_OBJECT obj_struct = {HandleToUint64(pipeline), kVulkanObjectTypePipeline};
4656     // Any bound cmd buffers are now invalid
4657     InvalidateCommandBuffers(device_data, pipeline_state->cb_bindings, obj_struct);
4658     if (GetEnables()->gpu_validation) {
4659         GpuPreCallRecordDestroyPipeline(device_data, pipeline);
4660     }
4661     device_data->pipelineMap.erase(pipeline);
4662 }
4663 
PreCallRecordDestroyPipelineLayout(VkDevice device,VkPipelineLayout pipelineLayout,const VkAllocationCallbacks * pAllocator)4664 void CoreChecks::PreCallRecordDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
4665                                                     const VkAllocationCallbacks *pAllocator) {
4666     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4667     if (!pipelineLayout) return;
4668     device_data->pipelineLayoutMap.erase(pipelineLayout);
4669 }
4670 
PreCallValidateDestroySampler(VkDevice device,VkSampler sampler,const VkAllocationCallbacks * pAllocator)4671 bool CoreChecks::PreCallValidateDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
4672     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4673     SAMPLER_STATE *sampler_state = GetSamplerState(sampler);
4674     VK_OBJECT obj_struct = {HandleToUint64(sampler), kVulkanObjectTypeSampler};
4675     if (device_data->instance_data->disabled.destroy_sampler) return false;
4676     bool skip = false;
4677     if (sampler_state) {
4678         skip |= ValidateObjectNotInUse(device_data, sampler_state, obj_struct, "vkDestroySampler",
4679                                        "VUID-vkDestroySampler-sampler-01082");
4680     }
4681     return skip;
4682 }
4683 
PreCallRecordDestroySampler(VkDevice device,VkSampler sampler,const VkAllocationCallbacks * pAllocator)4684 void CoreChecks::PreCallRecordDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
4685     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4686     if (!sampler) return;
4687     SAMPLER_STATE *sampler_state = GetSamplerState(sampler);
4688     VK_OBJECT obj_struct = {HandleToUint64(sampler), kVulkanObjectTypeSampler};
4689     // Any bound cmd buffers are now invalid
4690     if (sampler_state) {
4691         InvalidateCommandBuffers(device_data, sampler_state->cb_bindings, obj_struct);
4692     }
4693     device_data->samplerMap.erase(sampler);
4694 }
4695 
PreCallRecordDestroyDescriptorSetLayout(VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)4696 void CoreChecks::PreCallRecordDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
4697                                                          const VkAllocationCallbacks *pAllocator) {
4698     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4699     if (!descriptorSetLayout) return;
4700     auto layout_it = device_data->descriptorSetLayoutMap.find(descriptorSetLayout);
4701     if (layout_it != device_data->descriptorSetLayoutMap.end()) {
4702         layout_it->second.get()->MarkDestroyed();
4703         device_data->descriptorSetLayoutMap.erase(layout_it);
4704     }
4705 }
4706 
PreCallValidateDestroyDescriptorPool(VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)4707 bool CoreChecks::PreCallValidateDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
4708                                                       const VkAllocationCallbacks *pAllocator) {
4709     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4710     DESCRIPTOR_POOL_STATE *desc_pool_state = GetDescriptorPoolState(descriptorPool);
4711     VK_OBJECT obj_struct = {HandleToUint64(descriptorPool), kVulkanObjectTypeDescriptorPool};
4712     if (device_data->instance_data->disabled.destroy_descriptor_pool) return false;
4713     bool skip = false;
4714     if (desc_pool_state) {
4715         skip |= ValidateObjectNotInUse(device_data, desc_pool_state, obj_struct, "vkDestroyDescriptorPool",
4716                                        "VUID-vkDestroyDescriptorPool-descriptorPool-00303");
4717     }
4718     return skip;
4719 }
4720 
PreCallRecordDestroyDescriptorPool(VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)4721 void CoreChecks::PreCallRecordDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
4722                                                     const VkAllocationCallbacks *pAllocator) {
4723     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4724     if (!descriptorPool) return;
4725     DESCRIPTOR_POOL_STATE *desc_pool_state = GetDescriptorPoolState(descriptorPool);
4726     VK_OBJECT obj_struct = {HandleToUint64(descriptorPool), kVulkanObjectTypeDescriptorPool};
4727     if (desc_pool_state) {
4728         // Any bound cmd buffers are now invalid
4729         InvalidateCommandBuffers(device_data, desc_pool_state->cb_bindings, obj_struct);
4730         // Free sets that were in this pool
4731         for (auto ds : desc_pool_state->sets) {
4732             FreeDescriptorSet(device_data, ds);
4733         }
4734         device_data->descriptorPoolMap.erase(descriptorPool);
4735         delete desc_pool_state;
4736     }
4737 }
4738 
4739 // Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result
4740 //  If this is a secondary command buffer, then make sure its primary is also in-flight
4741 //  If primary is not in-flight, then remove secondary from global in-flight set
4742 // This function is only valid at a point when cmdBuffer is being reset or freed
CheckCommandBufferInFlight(layer_data * dev_data,const GLOBAL_CB_NODE * cb_node,const char * action,const char * error_code)4743 bool CoreChecks::CheckCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
4744                                             const char *error_code) {
4745     bool skip = false;
4746     if (cb_node->in_use.load()) {
4747         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4748                         HandleToUint64(cb_node->commandBuffer), error_code, "Attempt to %s command buffer (%s) which is in use.",
4749                         action, dev_data->report_data->FormatHandle(cb_node->commandBuffer).c_str());
4750     }
4751     return skip;
4752 }
4753 
4754 // Iterate over all cmdBuffers in given commandPool and verify that each is not in use
CheckCommandBuffersInFlight(layer_data * dev_data,COMMAND_POOL_NODE * pPool,const char * action,const char * error_code)4755 bool CoreChecks::CheckCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
4756                                              const char *error_code) {
4757     bool skip = false;
4758     for (auto cmd_buffer : pPool->commandBuffers) {
4759         skip |= CheckCommandBufferInFlight(dev_data, GetCBNode(cmd_buffer), action, error_code);
4760     }
4761     return skip;
4762 }
4763 
4764 // Free all command buffers in given list, removing all references/links to them using ResetCommandBufferState
FreeCommandBufferStates(layer_data * dev_data,COMMAND_POOL_NODE * pool_state,const uint32_t command_buffer_count,const VkCommandBuffer * command_buffers)4765 void CoreChecks::FreeCommandBufferStates(layer_data *dev_data, COMMAND_POOL_NODE *pool_state, const uint32_t command_buffer_count,
4766                                          const VkCommandBuffer *command_buffers) {
4767     if (GetEnables()->gpu_validation) {
4768         GpuPreCallRecordFreeCommandBuffers(dev_data, command_buffer_count, command_buffers);
4769     }
4770     for (uint32_t i = 0; i < command_buffer_count; i++) {
4771         auto cb_state = GetCBNode(command_buffers[i]);
4772         // Remove references to command buffer's state and delete
4773         if (cb_state) {
4774             // reset prior to delete, removing various references to it.
4775             // TODO: fix this, it's insane.
4776             ResetCommandBufferState(dev_data, cb_state->commandBuffer);
4777             // Remove the cb_state's references from layer_data and COMMAND_POOL_NODE
4778             dev_data->commandBufferMap.erase(cb_state->commandBuffer);
4779             pool_state->commandBuffers.erase(command_buffers[i]);
4780             delete cb_state;
4781         }
4782     }
4783 }
4784 
PreCallValidateFreeCommandBuffers(VkDevice device,VkCommandPool commandPool,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)4785 bool CoreChecks::PreCallValidateFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
4786                                                    const VkCommandBuffer *pCommandBuffers) {
4787     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4788     bool skip = false;
4789     for (uint32_t i = 0; i < commandBufferCount; i++) {
4790         auto cb_node = GetCBNode(pCommandBuffers[i]);
4791         // Delete CB information structure, and remove from commandBufferMap
4792         if (cb_node) {
4793             skip |= CheckCommandBufferInFlight(device_data, cb_node, "free", "VUID-vkFreeCommandBuffers-pCommandBuffers-00047");
4794         }
4795     }
4796     return skip;
4797 }
4798 
PreCallRecordFreeCommandBuffers(VkDevice device,VkCommandPool commandPool,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)4799 void CoreChecks::PreCallRecordFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
4800                                                  const VkCommandBuffer *pCommandBuffers) {
4801     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4802     auto pPool = GetCommandPoolNode(commandPool);
4803     FreeCommandBufferStates(device_data, pPool, commandBufferCount, pCommandBuffers);
4804 }
4805 
PreCallValidateCreateCommandPool(VkDevice device,const VkCommandPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkCommandPool * pCommandPool)4806 bool CoreChecks::PreCallValidateCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
4807                                                   const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) {
4808     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4809     return ValidateDeviceQueueFamily(device_data, pCreateInfo->queueFamilyIndex, "vkCreateCommandPool",
4810                                      "pCreateInfo->queueFamilyIndex", "VUID-vkCreateCommandPool-queueFamilyIndex-01937");
4811 }
4812 
PostCallRecordCreateCommandPool(VkDevice device,const VkCommandPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkCommandPool * pCommandPool,VkResult result)4813 void CoreChecks::PostCallRecordCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
4814                                                  const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool,
4815                                                  VkResult result) {
4816     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4817     if (VK_SUCCESS != result) return;
4818     device_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
4819     device_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
4820 }
4821 
PreCallValidateCreateQueryPool(VkDevice device,const VkQueryPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkQueryPool * pQueryPool)4822 bool CoreChecks::PreCallValidateCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
4823                                                 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
4824     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4825     bool skip = false;
4826     if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
4827         if (!device_data->enabled_features.core.pipelineStatisticsQuery) {
4828             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
4829                             "VUID-VkQueryPoolCreateInfo-queryType-00791",
4830                             "Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device with "
4831                             "VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE.");
4832         }
4833     }
4834     return skip;
4835 }
4836 
PostCallRecordCreateQueryPool(VkDevice device,const VkQueryPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkQueryPool * pQueryPool,VkResult result)4837 void CoreChecks::PostCallRecordCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
4838                                                const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool, VkResult result) {
4839     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4840     if (VK_SUCCESS != result) return;
4841     QUERY_POOL_NODE *qp_node = &device_data->queryPoolMap[*pQueryPool];
4842     qp_node->createInfo = *pCreateInfo;
4843 }
4844 
PreCallValidateDestroyCommandPool(VkDevice device,VkCommandPool commandPool,const VkAllocationCallbacks * pAllocator)4845 bool CoreChecks::PreCallValidateDestroyCommandPool(VkDevice device, VkCommandPool commandPool,
4846                                                    const VkAllocationCallbacks *pAllocator) {
4847     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4848 
4849     COMMAND_POOL_NODE *cp_state = GetCommandPoolNode(commandPool);
4850     if (device_data->instance_data->disabled.destroy_command_pool) return false;
4851     bool skip = false;
4852     if (cp_state) {
4853         // Verify that command buffers in pool are complete (not in-flight)
4854         skip |= CheckCommandBuffersInFlight(device_data, cp_state, "destroy command pool with",
4855                                             "VUID-vkDestroyCommandPool-commandPool-00041");
4856     }
4857     return skip;
4858 }
4859 
PreCallRecordDestroyCommandPool(VkDevice device,VkCommandPool commandPool,const VkAllocationCallbacks * pAllocator)4860 void CoreChecks::PreCallRecordDestroyCommandPool(VkDevice device, VkCommandPool commandPool,
4861                                                  const VkAllocationCallbacks *pAllocator) {
4862     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4863     if (!commandPool) return;
4864     COMMAND_POOL_NODE *cp_state = GetCommandPoolNode(commandPool);
4865     // Remove cmdpool from cmdpoolmap, after freeing layer data for the command buffers
4866     // "When a pool is destroyed, all command buffers allocated from the pool are freed."
4867     if (cp_state) {
4868         // Create a vector, as FreeCommandBufferStates deletes from cp_state->commandBuffers during iteration.
4869         std::vector<VkCommandBuffer> cb_vec{cp_state->commandBuffers.begin(), cp_state->commandBuffers.end()};
4870         FreeCommandBufferStates(device_data, cp_state, static_cast<uint32_t>(cb_vec.size()), cb_vec.data());
4871         device_data->commandPoolMap.erase(commandPool);
4872     }
4873 }
4874 
PreCallValidateResetCommandPool(VkDevice device,VkCommandPool commandPool,VkCommandPoolResetFlags flags)4875 bool CoreChecks::PreCallValidateResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
4876     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4877     auto command_pool_state = GetCommandPoolNode(commandPool);
4878     return CheckCommandBuffersInFlight(device_data, command_pool_state, "reset command pool with",
4879                                        "VUID-vkResetCommandPool-commandPool-00040");
4880 }
4881 
PostCallRecordResetCommandPool(VkDevice device,VkCommandPool commandPool,VkCommandPoolResetFlags flags,VkResult result)4882 void CoreChecks::PostCallRecordResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags,
4883                                                 VkResult result) {
4884     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4885     if (VK_SUCCESS != result) return;
4886     // Reset all of the CBs allocated from this pool
4887     auto command_pool_state = GetCommandPoolNode(commandPool);
4888     for (auto cmdBuffer : command_pool_state->commandBuffers) {
4889         ResetCommandBufferState(device_data, cmdBuffer);
4890     }
4891 }
4892 
PreCallValidateResetFences(VkDevice device,uint32_t fenceCount,const VkFence * pFences)4893 bool CoreChecks::PreCallValidateResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
4894     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4895     bool skip = false;
4896     for (uint32_t i = 0; i < fenceCount; ++i) {
4897         auto pFence = GetFenceNode(pFences[i]);
4898         if (pFence && pFence->scope == kSyncScopeInternal && pFence->state == FENCE_INFLIGHT) {
4899             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4900                             HandleToUint64(pFences[i]), "VUID-vkResetFences-pFences-01123", "Fence %s is in use.",
4901                             device_data->report_data->FormatHandle(pFences[i]).c_str());
4902         }
4903     }
4904     return skip;
4905 }
4906 
PostCallRecordResetFences(VkDevice device,uint32_t fenceCount,const VkFence * pFences,VkResult result)4907 void CoreChecks::PostCallRecordResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkResult result) {
4908     for (uint32_t i = 0; i < fenceCount; ++i) {
4909         auto pFence = GetFenceNode(pFences[i]);
4910         if (pFence) {
4911             if (pFence->scope == kSyncScopeInternal) {
4912                 pFence->state = FENCE_UNSIGNALED;
4913             } else if (pFence->scope == kSyncScopeExternalTemporary) {
4914                 pFence->scope = kSyncScopeInternal;
4915             }
4916         }
4917     }
4918 }
4919 
4920 // For given cb_nodes, invalidate them and track object causing invalidation
InvalidateCommandBuffers(const layer_data * dev_data,std::unordered_set<GLOBAL_CB_NODE * > const & cb_nodes,VK_OBJECT obj)4921 void CoreChecks::InvalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes,
4922                                           VK_OBJECT obj) {
4923     for (auto cb_node : cb_nodes) {
4924         if (cb_node->state == CB_RECORDING) {
4925             log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4926                     HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer,
4927                     "Invalidating a command buffer that's currently being recorded: %s.",
4928                     dev_data->report_data->FormatHandle(cb_node->commandBuffer).c_str());
4929             cb_node->state = CB_INVALID_INCOMPLETE;
4930         } else if (cb_node->state == CB_RECORDED) {
4931             cb_node->state = CB_INVALID_COMPLETE;
4932         }
4933         cb_node->broken_bindings.push_back(obj);
4934 
4935         // if secondary, then propagate the invalidation to the primaries that will call us.
4936         if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
4937             InvalidateCommandBuffers(dev_data, cb_node->linkedCommandBuffers, obj);
4938         }
4939     }
4940 }
4941 
PreCallValidateDestroyFramebuffer(VkDevice device,VkFramebuffer framebuffer,const VkAllocationCallbacks * pAllocator)4942 bool CoreChecks::PreCallValidateDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer,
4943                                                    const VkAllocationCallbacks *pAllocator) {
4944     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4945     FRAMEBUFFER_STATE *framebuffer_state = GetFramebufferState(framebuffer);
4946     VK_OBJECT obj_struct = {HandleToUint64(framebuffer), kVulkanObjectTypeFramebuffer};
4947     bool skip = false;
4948     if (framebuffer_state) {
4949         skip |= ValidateObjectNotInUse(device_data, framebuffer_state, obj_struct, "vkDestroyFramebuffer",
4950                                        "VUID-vkDestroyFramebuffer-framebuffer-00892");
4951     }
4952     return skip;
4953 }
4954 
PreCallRecordDestroyFramebuffer(VkDevice device,VkFramebuffer framebuffer,const VkAllocationCallbacks * pAllocator)4955 void CoreChecks::PreCallRecordDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer,
4956                                                  const VkAllocationCallbacks *pAllocator) {
4957     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4958     if (!framebuffer) return;
4959     FRAMEBUFFER_STATE *framebuffer_state = GetFramebufferState(framebuffer);
4960     VK_OBJECT obj_struct = {HandleToUint64(framebuffer), kVulkanObjectTypeFramebuffer};
4961     InvalidateCommandBuffers(device_data, framebuffer_state->cb_bindings, obj_struct);
4962     device_data->frameBufferMap.erase(framebuffer);
4963 }
4964 
PreCallValidateDestroyRenderPass(VkDevice device,VkRenderPass renderPass,const VkAllocationCallbacks * pAllocator)4965 bool CoreChecks::PreCallValidateDestroyRenderPass(VkDevice device, VkRenderPass renderPass,
4966                                                   const VkAllocationCallbacks *pAllocator) {
4967     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4968     RENDER_PASS_STATE *rp_state = GetRenderPassState(renderPass);
4969     VK_OBJECT obj_struct = {HandleToUint64(renderPass), kVulkanObjectTypeRenderPass};
4970     bool skip = false;
4971     if (rp_state) {
4972         skip |= ValidateObjectNotInUse(device_data, rp_state, obj_struct, "vkDestroyRenderPass",
4973                                        "VUID-vkDestroyRenderPass-renderPass-00873");
4974     }
4975     return skip;
4976 }
4977 
PreCallRecordDestroyRenderPass(VkDevice device,VkRenderPass renderPass,const VkAllocationCallbacks * pAllocator)4978 void CoreChecks::PreCallRecordDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
4979     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4980     if (!renderPass) return;
4981     RENDER_PASS_STATE *rp_state = GetRenderPassState(renderPass);
4982     VK_OBJECT obj_struct = {HandleToUint64(renderPass), kVulkanObjectTypeRenderPass};
4983     InvalidateCommandBuffers(device_data, rp_state->cb_bindings, obj_struct);
4984     device_data->renderPassMap.erase(renderPass);
4985 }
4986 
4987 // Access helper functions for external modules
GetPDFormatProperties(const VkFormat format)4988 VkFormatProperties CoreChecks::GetPDFormatProperties(const VkFormat format) {
4989     VkFormatProperties format_properties;
4990     instance_dispatch_table.GetPhysicalDeviceFormatProperties(physical_device, format, &format_properties);
4991     return format_properties;
4992 }
4993 
GetPDImageFormatProperties(const VkImageCreateInfo * image_ci,VkImageFormatProperties * pImageFormatProperties)4994 VkResult CoreChecks::GetPDImageFormatProperties(const VkImageCreateInfo *image_ci,
4995                                                 VkImageFormatProperties *pImageFormatProperties) {
4996     return instance_dispatch_table.GetPhysicalDeviceImageFormatProperties(physical_device, image_ci->format, image_ci->imageType,
4997                                                                           image_ci->tiling, image_ci->usage, image_ci->flags,
4998                                                                           pImageFormatProperties);
4999 }
5000 
GetPDImageFormatProperties2(const VkPhysicalDeviceImageFormatInfo2 * phys_dev_image_fmt_info,VkImageFormatProperties2 * pImageFormatProperties)5001 VkResult CoreChecks::GetPDImageFormatProperties2(const VkPhysicalDeviceImageFormatInfo2 *phys_dev_image_fmt_info,
5002                                                  VkImageFormatProperties2 *pImageFormatProperties) {
5003     if (!instance_extensions.vk_khr_get_physical_device_properties_2) return VK_ERROR_EXTENSION_NOT_PRESENT;
5004     return instance_dispatch_table.GetPhysicalDeviceImageFormatProperties2(physical_device, phys_dev_image_fmt_info,
5005                                                                            pImageFormatProperties);
5006 }
5007 
GetReportData()5008 const debug_report_data *CoreChecks::GetReportData() { return report_data; }
5009 
GetDispatchTable()5010 const VkLayerDispatchTable *CoreChecks::GetDispatchTable() { return &device_dispatch_table; }
5011 
GetPDProperties()5012 const VkPhysicalDeviceProperties *CoreChecks::GetPDProperties() { return &phys_dev_props; }
5013 
GetPhysicalDeviceMemoryProperties()5014 const VkPhysicalDeviceMemoryProperties *CoreChecks::GetPhysicalDeviceMemoryProperties() { return &phys_dev_mem_props; }
5015 
GetDisables()5016 const CHECK_DISABLED *CoreChecks::GetDisables() { return &instance_state->disabled; }
5017 
GetEnables()5018 const CHECK_ENABLED *CoreChecks::GetEnables() { return &instance_state->enabled; }
5019 
GetImageMap()5020 std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> *CoreChecks::GetImageMap() { return &imageMap; }
5021 
GetImageSubresourceMap()5022 std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *CoreChecks::GetImageSubresourceMap() {
5023     return &imageSubresourceMap;
5024 }
5025 
GetImageLayoutMap()5026 std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> *CoreChecks::GetImageLayoutMap() { return &imageLayoutMap; }
5027 
GetBufferMap()5028 std::unordered_map<VkBuffer, std::unique_ptr<BUFFER_STATE>> *CoreChecks::GetBufferMap() { return &bufferMap; }
5029 
GetBufferViewMap()5030 std::unordered_map<VkBufferView, std::unique_ptr<BUFFER_VIEW_STATE>> *CoreChecks::GetBufferViewMap() { return &bufferViewMap; }
5031 
GetImageViewMap()5032 std::unordered_map<VkImageView, std::unique_ptr<IMAGE_VIEW_STATE>> *CoreChecks::GetImageViewMap() { return &imageViewMap; }
5033 
GetEnabledFeatures()5034 const DeviceFeatures *CoreChecks::GetEnabledFeatures() { return &enabled_features; }
5035 
GetDeviceExtensions()5036 const DeviceExtensions *CoreChecks::GetDeviceExtensions() { return &device_extensions; }
5037 
GetGpuValidationState()5038 GpuValidationState *CoreChecks::GetGpuValidationState() { return &gpu_validation_state; }
5039 
GetDevice()5040 VkDevice CoreChecks::GetDevice() { return device; }
5041 
GetApiVersion()5042 uint32_t CoreChecks::GetApiVersion() { return api_version; }
5043 
PostCallRecordCreateFence(VkDevice device,const VkFenceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFence * pFence,VkResult result)5044 void CoreChecks::PostCallRecordCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo,
5045                                            const VkAllocationCallbacks *pAllocator, VkFence *pFence, VkResult result) {
5046     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5047     if (VK_SUCCESS != result) return;
5048     auto &fence_node = device_data->fenceMap[*pFence];
5049     fence_node.fence = *pFence;
5050     fence_node.createInfo = *pCreateInfo;
5051     fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
5052 }
5053 
5054 // Validation cache:
5055 // CV is the bottommost implementor of this extension. Don't pass calls down.
5056 // utility function to set collective state for pipeline
SetPipelineState(PIPELINE_STATE * pPipe)5057 void SetPipelineState(PIPELINE_STATE *pPipe) {
5058     // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
5059     if (pPipe->graphicsPipelineCI.pColorBlendState) {
5060         for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
5061             if (VK_TRUE == pPipe->attachments[i].blendEnable) {
5062                 if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5063                      (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5064                     ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5065                      (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5066                     ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5067                      (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5068                     ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5069                      (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
5070                     pPipe->blendConstantsEnabled = true;
5071                 }
5072             }
5073         }
5074     }
5075 }
5076 
ValidatePipelineVertexDivisors(layer_data * dev_data,std::vector<std::unique_ptr<PIPELINE_STATE>> const & pipe_state_vec,const uint32_t count,const VkGraphicsPipelineCreateInfo * pipe_cis)5077 bool CoreChecks::ValidatePipelineVertexDivisors(layer_data *dev_data,
5078                                                 std::vector<std::unique_ptr<PIPELINE_STATE>> const &pipe_state_vec,
5079                                                 const uint32_t count, const VkGraphicsPipelineCreateInfo *pipe_cis) {
5080     bool skip = false;
5081     const VkPhysicalDeviceLimits *device_limits = &(GetPDProperties()->limits);
5082 
5083     for (uint32_t i = 0; i < count; i++) {
5084         auto pvids_ci = lvl_find_in_chain<VkPipelineVertexInputDivisorStateCreateInfoEXT>(pipe_cis[i].pVertexInputState->pNext);
5085         if (nullptr == pvids_ci) continue;
5086 
5087         const PIPELINE_STATE *pipe_state = pipe_state_vec[i].get();
5088         for (uint32_t j = 0; j < pvids_ci->vertexBindingDivisorCount; j++) {
5089             const VkVertexInputBindingDivisorDescriptionEXT *vibdd = &(pvids_ci->pVertexBindingDivisors[j]);
5090             if (vibdd->binding >= device_limits->maxVertexInputBindings) {
5091                 skip |= log_msg(
5092                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
5093                     HandleToUint64(pipe_state->pipeline), "VUID-VkVertexInputBindingDivisorDescriptionEXT-binding-01869",
5094                     "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
5095                     "pVertexBindingDivisors[%1u] binding index of (%1u) exceeds device maxVertexInputBindings (%1u).",
5096                     i, j, vibdd->binding, device_limits->maxVertexInputBindings);
5097             }
5098             if (vibdd->divisor > dev_data->phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor) {
5099                 skip |= log_msg(
5100                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
5101                     HandleToUint64(pipe_state->pipeline), "VUID-VkVertexInputBindingDivisorDescriptionEXT-divisor-01870",
5102                     "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
5103                     "pVertexBindingDivisors[%1u] divisor of (%1u) exceeds extension maxVertexAttribDivisor (%1u).",
5104                     i, j, vibdd->divisor, dev_data->phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor);
5105             }
5106             if ((0 == vibdd->divisor) &&
5107                 !dev_data->enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateZeroDivisor) {
5108                 skip |= log_msg(
5109                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
5110                     HandleToUint64(pipe_state->pipeline),
5111                     "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateZeroDivisor-02228",
5112                     "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
5113                     "pVertexBindingDivisors[%1u] divisor must not be 0 when vertexAttributeInstanceRateZeroDivisor feature is not "
5114                     "enabled.",
5115                     i, j);
5116             }
5117             if ((1 != vibdd->divisor) &&
5118                 !dev_data->enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateDivisor) {
5119                 skip |= log_msg(
5120                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
5121                     HandleToUint64(pipe_state->pipeline),
5122                     "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateDivisor-02229",
5123                     "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
5124                     "pVertexBindingDivisors[%1u] divisor (%1u) must be 1 when vertexAttributeInstanceRateDivisor feature is not "
5125                     "enabled.",
5126                     i, j, vibdd->divisor);
5127             }
5128 
5129             // Find the corresponding binding description and validate input rate setting
5130             bool failed_01871 = true;
5131             for (size_t k = 0; k < pipe_state->vertex_binding_descriptions_.size(); k++) {
5132                 if ((vibdd->binding == pipe_state->vertex_binding_descriptions_[k].binding) &&
5133                     (VK_VERTEX_INPUT_RATE_INSTANCE == pipe_state->vertex_binding_descriptions_[k].inputRate)) {
5134                     failed_01871 = false;
5135                     break;
5136                 }
5137             }
5138             if (failed_01871) {  // Description not found, or has incorrect inputRate value
5139                 skip |= log_msg(
5140                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
5141                     HandleToUint64(pipe_state->pipeline), "VUID-VkVertexInputBindingDivisorDescriptionEXT-inputRate-01871",
5142                     "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
5143                     "pVertexBindingDivisors[%1u] specifies binding index (%1u), but that binding index's "
5144                     "VkVertexInputBindingDescription.inputRate member is not VK_VERTEX_INPUT_RATE_INSTANCE.",
5145                     i, j, vibdd->binding);
5146             }
5147         }
5148     }
5149     return skip;
5150 }
5151 
PreCallValidateCreateGraphicsPipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t count,const VkGraphicsPipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines,void * cgpl_state_data)5152 bool CoreChecks::PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
5153                                                         const VkGraphicsPipelineCreateInfo *pCreateInfos,
5154                                                         const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
5155                                                         void *cgpl_state_data) {
5156     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5157 
5158     bool skip = false;
5159     create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data);
5160     cgpl_state->pipe_state.reserve(count);
5161     for (uint32_t i = 0; i < count; i++) {
5162         cgpl_state->pipe_state.push_back(std::unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE));
5163         (cgpl_state->pipe_state)[i]->initGraphicsPipeline(&pCreateInfos[i],
5164                                                           GetRenderPassStateSharedPtr(pCreateInfos[i].renderPass));
5165         (cgpl_state->pipe_state)[i]->pipeline_layout = *GetPipelineLayout(device_data, pCreateInfos[i].layout);
5166     }
5167 
5168     for (uint32_t i = 0; i < count; i++) {
5169         skip |= ValidatePipelineLocked(device_data, cgpl_state->pipe_state, i);
5170     }
5171 
5172     for (uint32_t i = 0; i < count; i++) {
5173         skip |= ValidatePipelineUnlocked(device_data, cgpl_state->pipe_state, i);
5174     }
5175 
5176     if (device_data->device_extensions.vk_ext_vertex_attribute_divisor) {
5177         skip |= ValidatePipelineVertexDivisors(device_data, cgpl_state->pipe_state, count, pCreateInfos);
5178     }
5179 
5180     return skip;
5181 }
5182 
5183 // GPU validation may replace pCreateInfos for the down-chain call
PreCallRecordCreateGraphicsPipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t count,const VkGraphicsPipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines,void * cgpl_state_data)5184 void CoreChecks::PreCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
5185                                                       const VkGraphicsPipelineCreateInfo *pCreateInfos,
5186                                                       const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
5187                                                       void *cgpl_state_data) {
5188     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5189     create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data);
5190     cgpl_state->pCreateInfos = pCreateInfos;
5191     // GPU Validation may replace instrumented shaders with non-instrumented ones, so allow it to modify the createinfos.
5192     if (GetEnables()->gpu_validation) {
5193         cgpl_state->gpu_create_infos = GpuPreCallRecordCreateGraphicsPipelines(device_data, pipelineCache, count, pCreateInfos,
5194                                                                                pAllocator, pPipelines, cgpl_state->pipe_state);
5195         cgpl_state->pCreateInfos = reinterpret_cast<VkGraphicsPipelineCreateInfo *>(cgpl_state->gpu_create_infos.data());
5196     }
5197 }
5198 
PostCallRecordCreateGraphicsPipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t count,const VkGraphicsPipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines,VkResult result,void * cgpl_state_data)5199 void CoreChecks::PostCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
5200                                                        const VkGraphicsPipelineCreateInfo *pCreateInfos,
5201                                                        const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
5202                                                        VkResult result, void *cgpl_state_data) {
5203     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5204     create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data);
5205     // This API may create pipelines regardless of the return value
5206     for (uint32_t i = 0; i < count; i++) {
5207         if (pPipelines[i] != VK_NULL_HANDLE) {
5208             (cgpl_state->pipe_state)[i]->pipeline = pPipelines[i];
5209             device_data->pipelineMap[pPipelines[i]] = std::move((cgpl_state->pipe_state)[i]);
5210         }
5211     }
5212     // GPU val needs clean up regardless of result
5213     if (GetEnables()->gpu_validation) {
5214         GpuPostCallRecordCreateGraphicsPipelines(device_data, count, pCreateInfos, pAllocator, pPipelines);
5215         cgpl_state->gpu_create_infos.clear();
5216     }
5217     cgpl_state->pipe_state.clear();
5218 }
5219 
PreCallValidateCreateComputePipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t count,const VkComputePipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines,void * pipe_state_data)5220 bool CoreChecks::PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
5221                                                        const VkComputePipelineCreateInfo *pCreateInfos,
5222                                                        const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
5223                                                        void *pipe_state_data) {
5224     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5225     bool skip = false;
5226     std::vector<std::unique_ptr<PIPELINE_STATE>> *pipe_state =
5227         reinterpret_cast<std::vector<std::unique_ptr<PIPELINE_STATE>> *>(pipe_state_data);
5228     pipe_state->reserve(count);
5229     for (uint32_t i = 0; i < count; i++) {
5230         // Create and initialize internal tracking data structure
5231         pipe_state->push_back(unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE));
5232         (*pipe_state)[i]->initComputePipeline(&pCreateInfos[i]);
5233         (*pipe_state)[i]->pipeline_layout = *GetPipelineLayout(device_data, pCreateInfos[i].layout);
5234 
5235         // TODO: Add Compute Pipeline Verification
5236         skip |= ValidateComputePipeline(device_data, (*pipe_state)[i].get());
5237     }
5238     return skip;
5239 }
5240 
PostCallRecordCreateComputePipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t count,const VkComputePipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines,VkResult result,void * pipe_state_data)5241 void CoreChecks::PostCallRecordCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
5242                                                       const VkComputePipelineCreateInfo *pCreateInfos,
5243                                                       const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
5244                                                       VkResult result, void *pipe_state_data) {
5245     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5246     std::vector<std::unique_ptr<PIPELINE_STATE>> *pipe_state =
5247         reinterpret_cast<std::vector<std::unique_ptr<PIPELINE_STATE>> *>(pipe_state_data);
5248 
5249     // This API may create pipelines regardless of the return value
5250     for (uint32_t i = 0; i < count; i++) {
5251         if (pPipelines[i] != VK_NULL_HANDLE) {
5252             (*pipe_state)[i]->pipeline = pPipelines[i];
5253             device_data->pipelineMap[pPipelines[i]] = std::move((*pipe_state)[i]);
5254         }
5255     }
5256 }
5257 
PreCallValidateCreateRayTracingPipelinesNV(VkDevice device,VkPipelineCache pipelineCache,uint32_t count,const VkRayTracingPipelineCreateInfoNV * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines,void * pipe_state_data)5258 bool CoreChecks::PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
5259                                                             const VkRayTracingPipelineCreateInfoNV *pCreateInfos,
5260                                                             const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
5261                                                             void *pipe_state_data) {
5262     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5263     bool skip = false;
5264     // The order of operations here is a little convoluted but gets the job done
5265     //  1. Pipeline create state is first shadowed into PIPELINE_STATE struct
5266     //  2. Create state is then validated (which uses flags setup during shadowing)
5267     //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
5268     uint32_t i = 0;
5269     vector<std::unique_ptr<PIPELINE_STATE>> *pipe_state =
5270         reinterpret_cast<vector<std::unique_ptr<PIPELINE_STATE>> *>(pipe_state_data);
5271     pipe_state->reserve(count);
5272     for (i = 0; i < count; i++) {
5273         pipe_state->push_back(std::unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE));
5274         (*pipe_state)[i]->initRayTracingPipelineNV(&pCreateInfos[i]);
5275         (*pipe_state)[i]->pipeline_layout = *GetPipelineLayout(device_data, pCreateInfos[i].layout);
5276     }
5277 
5278     for (i = 0; i < count; i++) {
5279         skip |= ValidateRayTracingPipelineNV(device_data, (*pipe_state)[i].get());
5280     }
5281 
5282     return skip;
5283 }
5284 
PostCallRecordCreateRayTracingPipelinesNV(VkDevice device,VkPipelineCache pipelineCache,uint32_t count,const VkRayTracingPipelineCreateInfoNV * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines,VkResult result,void * pipe_state_data)5285 void CoreChecks::PostCallRecordCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
5286                                                            const VkRayTracingPipelineCreateInfoNV *pCreateInfos,
5287                                                            const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
5288                                                            VkResult result, void *pipe_state_data) {
5289     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5290     vector<std::unique_ptr<PIPELINE_STATE>> *pipe_state =
5291         reinterpret_cast<vector<std::unique_ptr<PIPELINE_STATE>> *>(pipe_state_data);
5292     // This API may create pipelines regardless of the return value
5293     for (uint32_t i = 0; i < count; i++) {
5294         if (pPipelines[i] != VK_NULL_HANDLE) {
5295             (*pipe_state)[i]->pipeline = pPipelines[i];
5296             device_data->pipelineMap[pPipelines[i]] = std::move((*pipe_state)[i]);
5297         }
5298     }
5299 }
5300 
PostCallRecordCreateSampler(VkDevice device,const VkSamplerCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSampler * pSampler,VkResult result)5301 void CoreChecks::PostCallRecordCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
5302                                              const VkAllocationCallbacks *pAllocator, VkSampler *pSampler, VkResult result) {
5303     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5304     device_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
5305 }
5306 
PreCallValidateCreateDescriptorSetLayout(VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorSetLayout * pSetLayout)5307 bool CoreChecks::PreCallValidateCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
5308                                                           const VkAllocationCallbacks *pAllocator,
5309                                                           VkDescriptorSetLayout *pSetLayout) {
5310     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5311     if (device_data->instance_data->disabled.create_descriptor_set_layout) return false;
5312     return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(
5313         device_data->report_data, pCreateInfo, device_data->device_extensions.vk_khr_push_descriptor,
5314         device_data->phys_dev_ext_props.max_push_descriptors, device_data->device_extensions.vk_ext_descriptor_indexing,
5315         &device_data->enabled_features.descriptor_indexing, &device_data->enabled_features.inline_uniform_block,
5316         &device_data->phys_dev_ext_props.inline_uniform_block_props);
5317 }
5318 
PostCallRecordCreateDescriptorSetLayout(VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorSetLayout * pSetLayout,VkResult result)5319 void CoreChecks::PostCallRecordCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
5320                                                          const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout,
5321                                                          VkResult result) {
5322     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5323     if (VK_SUCCESS != result) return;
5324     device_data->descriptorSetLayoutMap[*pSetLayout] =
5325         std::make_shared<cvdescriptorset::DescriptorSetLayout>(pCreateInfo, *pSetLayout);
5326 }
5327 
5328 // Used by CreatePipelineLayout and CmdPushConstants.
5329 // Note that the index argument is optional and only used by CreatePipelineLayout.
ValidatePushConstantRange(const layer_data * dev_data,const uint32_t offset,const uint32_t size,const char * caller_name,uint32_t index=0)5330 static bool ValidatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
5331                                       const char *caller_name, uint32_t index = 0) {
5332     if (dev_data->instance_data->disabled.push_constant_range) return false;
5333     uint32_t const maxPushConstantsSize = dev_data->phys_dev_props.limits.maxPushConstantsSize;
5334     bool skip = false;
5335     // Check that offset + size don't exceed the max.
5336     // Prevent arithetic overflow here by avoiding addition and testing in this order.
5337     if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
5338         // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
5339         if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5340             if (offset >= maxPushConstantsSize) {
5341                 skip |= log_msg(
5342                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5343                     "VUID-VkPushConstantRange-offset-00294",
5344                     "%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
5345                     caller_name, index, offset, maxPushConstantsSize);
5346             }
5347             if (size > maxPushConstantsSize - offset) {
5348                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5349                                 "VUID-VkPushConstantRange-size-00298",
5350                                 "%s call has push constants index %u with offset %u and size %u that exceeds this device's "
5351                                 "maxPushConstantSize of %u.",
5352                                 caller_name, index, offset, size, maxPushConstantsSize);
5353             }
5354         } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5355             if (offset >= maxPushConstantsSize) {
5356                 skip |= log_msg(
5357                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5358                     "VUID-vkCmdPushConstants-offset-00370",
5359                     "%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
5360                     caller_name, index, offset, maxPushConstantsSize);
5361             }
5362             if (size > maxPushConstantsSize - offset) {
5363                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5364                                 "VUID-vkCmdPushConstants-size-00371",
5365                                 "%s call has push constants index %u with offset %u and size %u that exceeds this device's "
5366                                 "maxPushConstantSize of %u.",
5367                                 caller_name, index, offset, size, maxPushConstantsSize);
5368             }
5369         } else {
5370             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5371                             kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
5372         }
5373     }
5374     // size needs to be non-zero and a multiple of 4.
5375     if ((size == 0) || ((size & 0x3) != 0)) {
5376         if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5377             if (size == 0) {
5378                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5379                                 "VUID-VkPushConstantRange-size-00296",
5380                                 "%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
5381                                 index, size);
5382             }
5383             if (size & 0x3) {
5384                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5385                                 "VUID-VkPushConstantRange-size-00297",
5386                                 "%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
5387                                 index, size);
5388             }
5389         } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5390             if (size == 0) {
5391                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5392                                 "VUID-vkCmdPushConstants-size-arraylength",
5393                                 "%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
5394                                 index, size);
5395             }
5396             if (size & 0x3) {
5397                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5398                                 "VUID-vkCmdPushConstants-size-00369",
5399                                 "%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
5400                                 index, size);
5401             }
5402         } else {
5403             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5404                             kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
5405         }
5406     }
5407     // offset needs to be a multiple of 4.
5408     if ((offset & 0x3) != 0) {
5409         if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5410             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5411                             "VUID-VkPushConstantRange-offset-00295",
5412                             "%s call has push constants index %u with offset %u. Offset must be a multiple of 4.", caller_name,
5413                             index, offset);
5414         } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5415             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5416                             "VUID-vkCmdPushConstants-offset-00368",
5417                             "%s call has push constants with offset %u. Offset must be a multiple of 4.", caller_name, offset);
5418         } else {
5419             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5420                             kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
5421         }
5422     }
5423     return skip;
5424 }
5425 
5426 enum DSL_DESCRIPTOR_GROUPS {
5427     DSL_TYPE_SAMPLERS = 0,
5428     DSL_TYPE_UNIFORM_BUFFERS,
5429     DSL_TYPE_STORAGE_BUFFERS,
5430     DSL_TYPE_SAMPLED_IMAGES,
5431     DSL_TYPE_STORAGE_IMAGES,
5432     DSL_TYPE_INPUT_ATTACHMENTS,
5433     DSL_TYPE_INLINE_UNIFORM_BLOCK,
5434     DSL_NUM_DESCRIPTOR_GROUPS
5435 };
5436 
5437 // Used by PreCallValidateCreatePipelineLayout.
5438 // Returns an array of size DSL_NUM_DESCRIPTOR_GROUPS of the maximum number of descriptors used in any single pipeline stage
GetDescriptorCountMaxPerStage(const layer_data * dev_data,const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts,bool skip_update_after_bind)5439 std::valarray<uint32_t> GetDescriptorCountMaxPerStage(
5440     const layer_data *dev_data, const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts,
5441     bool skip_update_after_bind) {
5442     // Identify active pipeline stages
5443     std::vector<VkShaderStageFlags> stage_flags = {VK_SHADER_STAGE_VERTEX_BIT, VK_SHADER_STAGE_FRAGMENT_BIT,
5444                                                    VK_SHADER_STAGE_COMPUTE_BIT};
5445     if (dev_data->enabled_features.core.geometryShader) {
5446         stage_flags.push_back(VK_SHADER_STAGE_GEOMETRY_BIT);
5447     }
5448     if (dev_data->enabled_features.core.tessellationShader) {
5449         stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
5450         stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
5451     }
5452 
5453     // Allow iteration over enum values
5454     std::vector<DSL_DESCRIPTOR_GROUPS> dsl_groups = {
5455         DSL_TYPE_SAMPLERS,       DSL_TYPE_UNIFORM_BUFFERS,   DSL_TYPE_STORAGE_BUFFERS,     DSL_TYPE_SAMPLED_IMAGES,
5456         DSL_TYPE_STORAGE_IMAGES, DSL_TYPE_INPUT_ATTACHMENTS, DSL_TYPE_INLINE_UNIFORM_BLOCK};
5457 
5458     // Sum by layouts per stage, then pick max of stages per type
5459     std::valarray<uint32_t> max_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS);  // max descriptor sum among all pipeline stages
5460     for (auto stage : stage_flags) {
5461         std::valarray<uint32_t> stage_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS);  // per-stage sums
5462         for (auto dsl : set_layouts) {
5463             if (skip_update_after_bind &&
5464                 (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) {
5465                 continue;
5466             }
5467 
5468             for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
5469                 const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
5470                 // Bindings with a descriptorCount of 0 are "reserved" and should be skipped
5471                 if (0 != (stage & binding->stageFlags) && binding->descriptorCount > 0) {
5472                     switch (binding->descriptorType) {
5473                         case VK_DESCRIPTOR_TYPE_SAMPLER:
5474                             stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
5475                             break;
5476                         case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
5477                         case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
5478                             stage_sum[DSL_TYPE_UNIFORM_BUFFERS] += binding->descriptorCount;
5479                             break;
5480                         case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
5481                         case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
5482                             stage_sum[DSL_TYPE_STORAGE_BUFFERS] += binding->descriptorCount;
5483                             break;
5484                         case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
5485                         case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
5486                             stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
5487                             break;
5488                         case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
5489                         case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
5490                             stage_sum[DSL_TYPE_STORAGE_IMAGES] += binding->descriptorCount;
5491                             break;
5492                         case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
5493                             stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
5494                             stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
5495                             break;
5496                         case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
5497                             stage_sum[DSL_TYPE_INPUT_ATTACHMENTS] += binding->descriptorCount;
5498                             break;
5499                         case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
5500                             // count one block per binding. descriptorCount is number of bytes
5501                             stage_sum[DSL_TYPE_INLINE_UNIFORM_BLOCK]++;
5502                             break;
5503                         default:
5504                             break;
5505                     }
5506                 }
5507             }
5508         }
5509         for (auto type : dsl_groups) {
5510             max_sum[type] = std::max(stage_sum[type], max_sum[type]);
5511         }
5512     }
5513     return max_sum;
5514 }
5515 
5516 // Used by PreCallValidateCreatePipelineLayout.
5517 // Returns a map indexed by VK_DESCRIPTOR_TYPE_* enum of the summed descriptors by type.
5518 // Note: descriptors only count against the limit once even if used by multiple stages.
GetDescriptorSum(const layer_data * dev_data,const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> & set_layouts,bool skip_update_after_bind)5519 std::map<uint32_t, uint32_t> GetDescriptorSum(
5520     const layer_data *dev_data, const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts,
5521     bool skip_update_after_bind) {
5522     std::map<uint32_t, uint32_t> sum_by_type;
5523     for (auto dsl : set_layouts) {
5524         if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) {
5525             continue;
5526         }
5527 
5528         for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
5529             const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
5530             // Bindings with a descriptorCount of 0 are "reserved" and should be skipped
5531             if (binding->descriptorCount > 0) {
5532                 if (binding->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
5533                     // count one block per binding. descriptorCount is number of bytes
5534                     sum_by_type[binding->descriptorType]++;
5535                 } else {
5536                     sum_by_type[binding->descriptorType] += binding->descriptorCount;
5537                 }
5538             }
5539         }
5540     }
5541     return sum_by_type;
5542 }
5543 
PreCallValidateCreatePipelineLayout(VkDevice device,const VkPipelineLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineLayout * pPipelineLayout)5544 bool CoreChecks::PreCallValidateCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
5545                                                      const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
5546     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5547     bool skip = false;
5548 
5549     // Validate layout count against device physical limit
5550     if (pCreateInfo->setLayoutCount > device_data->phys_dev_props.limits.maxBoundDescriptorSets) {
5551         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5552                         "VUID-VkPipelineLayoutCreateInfo-setLayoutCount-00286",
5553                         "vkCreatePipelineLayout(): setLayoutCount (%d) exceeds physical device maxBoundDescriptorSets limit (%d).",
5554                         pCreateInfo->setLayoutCount, device_data->phys_dev_props.limits.maxBoundDescriptorSets);
5555     }
5556 
5557     // Validate Push Constant ranges
5558     uint32_t i, j;
5559     for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5560         skip |= ValidatePushConstantRange(device_data, pCreateInfo->pPushConstantRanges[i].offset,
5561                                           pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
5562         if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
5563             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5564                             "VUID-VkPushConstantRange-stageFlags-requiredbitmask",
5565                             "vkCreatePipelineLayout() call has no stageFlags set.");
5566         }
5567     }
5568 
5569     // As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges.
5570     for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5571         for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
5572             if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) {
5573                 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5574                                 "VUID-VkPipelineLayoutCreateInfo-pPushConstantRanges-00292",
5575                                 "vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d.", i, j);
5576             }
5577         }
5578     }
5579 
5580     // Early-out
5581     if (skip) return skip;
5582 
5583     std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts(pCreateInfo->setLayoutCount, nullptr);
5584     unsigned int push_descriptor_set_count = 0;
5585     {
5586         for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
5587             set_layouts[i] = GetDescriptorSetLayout(device_data, pCreateInfo->pSetLayouts[i]);
5588             if (set_layouts[i]->IsPushDescriptor()) ++push_descriptor_set_count;
5589         }
5590     }
5591 
5592     if (push_descriptor_set_count > 1) {
5593         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5594                         "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00293",
5595                         "vkCreatePipelineLayout() Multiple push descriptor sets found.");
5596     }
5597 
5598     // Max descriptors by type, within a single pipeline stage
5599     std::valarray<uint32_t> max_descriptors_per_stage = GetDescriptorCountMaxPerStage(device_data, set_layouts, true);
5600     // Samplers
5601     if (max_descriptors_per_stage[DSL_TYPE_SAMPLERS] > device_data->phys_dev_props.limits.maxPerStageDescriptorSamplers) {
5602         skip |=
5603             log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5604                     "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287",
5605                     "vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
5606                     "maxPerStageDescriptorSamplers limit (%d).",
5607                     max_descriptors_per_stage[DSL_TYPE_SAMPLERS], device_data->phys_dev_props.limits.maxPerStageDescriptorSamplers);
5608     }
5609 
5610     // Uniform buffers
5611     if (max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS] >
5612         device_data->phys_dev_props.limits.maxPerStageDescriptorUniformBuffers) {
5613         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5614                         "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288",
5615                         "vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
5616                         "maxPerStageDescriptorUniformBuffers limit (%d).",
5617                         max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS],
5618                         device_data->phys_dev_props.limits.maxPerStageDescriptorUniformBuffers);
5619     }
5620 
5621     // Storage buffers
5622     if (max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS] >
5623         device_data->phys_dev_props.limits.maxPerStageDescriptorStorageBuffers) {
5624         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5625                         "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289",
5626                         "vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
5627                         "maxPerStageDescriptorStorageBuffers limit (%d).",
5628                         max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS],
5629                         device_data->phys_dev_props.limits.maxPerStageDescriptorStorageBuffers);
5630     }
5631 
5632     // Sampled images
5633     if (max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES] >
5634         device_data->phys_dev_props.limits.maxPerStageDescriptorSampledImages) {
5635         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5636                         "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290",
5637                         "vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
5638                         "maxPerStageDescriptorSampledImages limit (%d).",
5639                         max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES],
5640                         device_data->phys_dev_props.limits.maxPerStageDescriptorSampledImages);
5641     }
5642 
5643     // Storage images
5644     if (max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES] >
5645         device_data->phys_dev_props.limits.maxPerStageDescriptorStorageImages) {
5646         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5647                         "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291",
5648                         "vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
5649                         "maxPerStageDescriptorStorageImages limit (%d).",
5650                         max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES],
5651                         device_data->phys_dev_props.limits.maxPerStageDescriptorStorageImages);
5652     }
5653 
5654     // Input attachments
5655     if (max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS] >
5656         device_data->phys_dev_props.limits.maxPerStageDescriptorInputAttachments) {
5657         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5658                         "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676",
5659                         "vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
5660                         "maxPerStageDescriptorInputAttachments limit (%d).",
5661                         max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS],
5662                         device_data->phys_dev_props.limits.maxPerStageDescriptorInputAttachments);
5663     }
5664 
5665     // Inline uniform blocks
5666     if (max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK] >
5667         device_data->phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks) {
5668         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5669                         "VUID-VkPipelineLayoutCreateInfo-descriptorType-02214",
5670                         "vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device "
5671                         "maxPerStageDescriptorInlineUniformBlocks limit (%d).",
5672                         max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK],
5673                         device_data->phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks);
5674     }
5675 
5676     // Total descriptors by type
5677     //
5678     std::map<uint32_t, uint32_t> sum_all_stages = GetDescriptorSum(device_data, set_layouts, true);
5679     // Samplers
5680     uint32_t sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
5681     if (sum > device_data->phys_dev_props.limits.maxDescriptorSetSamplers) {
5682         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5683                         "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677",
5684                         "vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
5685                         "maxDescriptorSetSamplers limit (%d).",
5686                         sum, device_data->phys_dev_props.limits.maxDescriptorSetSamplers);
5687     }
5688 
5689     // Uniform buffers
5690     if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > device_data->phys_dev_props.limits.maxDescriptorSetUniformBuffers) {
5691         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5692                         "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678",
5693                         "vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
5694                         "maxDescriptorSetUniformBuffers limit (%d).",
5695                         sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER],
5696                         device_data->phys_dev_props.limits.maxDescriptorSetUniformBuffers);
5697     }
5698 
5699     // Dynamic uniform buffers
5700     if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] >
5701         device_data->phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic) {
5702         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5703                         "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679",
5704                         "vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
5705                         "maxDescriptorSetUniformBuffersDynamic limit (%d).",
5706                         sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
5707                         device_data->phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic);
5708     }
5709 
5710     // Storage buffers
5711     if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > device_data->phys_dev_props.limits.maxDescriptorSetStorageBuffers) {
5712         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5713                         "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680",
5714                         "vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
5715                         "maxDescriptorSetStorageBuffers limit (%d).",
5716                         sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER],
5717                         device_data->phys_dev_props.limits.maxDescriptorSetStorageBuffers);
5718     }
5719 
5720     // Dynamic storage buffers
5721     if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] >
5722         device_data->phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic) {
5723         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5724                         "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681",
5725                         "vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
5726                         "maxDescriptorSetStorageBuffersDynamic limit (%d).",
5727                         sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
5728                         device_data->phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic);
5729     }
5730 
5731     //  Sampled images
5732     sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
5733           sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
5734     if (sum > device_data->phys_dev_props.limits.maxDescriptorSetSampledImages) {
5735         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5736                         "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682",
5737                         "vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
5738                         "maxDescriptorSetSampledImages limit (%d).",
5739                         sum, device_data->phys_dev_props.limits.maxDescriptorSetSampledImages);
5740     }
5741 
5742     //  Storage images
5743     sum = sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
5744     if (sum > device_data->phys_dev_props.limits.maxDescriptorSetStorageImages) {
5745         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5746                         "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683",
5747                         "vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
5748                         "maxDescriptorSetStorageImages limit (%d).",
5749                         sum, device_data->phys_dev_props.limits.maxDescriptorSetStorageImages);
5750     }
5751 
5752     // Input attachments
5753     if (sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > device_data->phys_dev_props.limits.maxDescriptorSetInputAttachments) {
5754         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5755                         "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684",
5756                         "vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
5757                         "maxDescriptorSetInputAttachments limit (%d).",
5758                         sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT],
5759                         device_data->phys_dev_props.limits.maxDescriptorSetInputAttachments);
5760     }
5761 
5762     // Inline uniform blocks
5763     if (sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] >
5764         device_data->phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks) {
5765         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5766                         "VUID-VkPipelineLayoutCreateInfo-descriptorType-02216",
5767                         "vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device "
5768                         "maxDescriptorSetInlineUniformBlocks limit (%d).",
5769                         sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT],
5770                         device_data->phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks);
5771     }
5772 
5773     if (device_data->device_extensions.vk_ext_descriptor_indexing) {
5774         // XXX TODO: replace with correct VU messages
5775 
5776         // Max descriptors by type, within a single pipeline stage
5777         std::valarray<uint32_t> max_descriptors_per_stage_update_after_bind =
5778             GetDescriptorCountMaxPerStage(device_data, set_layouts, false);
5779         // Samplers
5780         if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS] >
5781             device_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSamplers) {
5782             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5783                             "VUID-VkPipelineLayoutCreateInfo-descriptorType-03022",
5784                             "vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
5785                             "maxPerStageDescriptorUpdateAfterBindSamplers limit (%d).",
5786                             max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS],
5787                             device_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSamplers);
5788         }
5789 
5790         // Uniform buffers
5791         if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS] >
5792             device_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindUniformBuffers) {
5793             skip |= log_msg(
5794                 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5795                 "VUID-VkPipelineLayoutCreateInfo-descriptorType-03023",
5796                 "vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
5797                 "maxPerStageDescriptorUpdateAfterBindUniformBuffers limit (%d).",
5798                 max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS],
5799                 device_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindUniformBuffers);
5800         }
5801 
5802         // Storage buffers
5803         if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS] >
5804             device_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageBuffers) {
5805             skip |= log_msg(
5806                 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5807                 "VUID-VkPipelineLayoutCreateInfo-descriptorType-03024",
5808                 "vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
5809                 "maxPerStageDescriptorUpdateAfterBindStorageBuffers limit (%d).",
5810                 max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS],
5811                 device_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageBuffers);
5812         }
5813 
5814         // Sampled images
5815         if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES] >
5816             device_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSampledImages) {
5817             skip |= log_msg(
5818                 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5819                 "VUID-VkPipelineLayoutCreateInfo-descriptorType-03025",
5820                 "vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
5821                 "maxPerStageDescriptorUpdateAfterBindSampledImages limit (%d).",
5822                 max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES],
5823                 device_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSampledImages);
5824         }
5825 
5826         // Storage images
5827         if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES] >
5828             device_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageImages) {
5829             skip |= log_msg(
5830                 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5831                 "VUID-VkPipelineLayoutCreateInfo-descriptorType-03026",
5832                 "vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
5833                 "maxPerStageDescriptorUpdateAfterBindStorageImages limit (%d).",
5834                 max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES],
5835                 device_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageImages);
5836         }
5837 
5838         // Input attachments
5839         if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS] >
5840             device_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindInputAttachments) {
5841             skip |= log_msg(
5842                 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5843                 "VUID-VkPipelineLayoutCreateInfo-descriptorType-03027",
5844                 "vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
5845                 "maxPerStageDescriptorUpdateAfterBindInputAttachments limit (%d).",
5846                 max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS],
5847                 device_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindInputAttachments);
5848         }
5849 
5850         // Inline uniform blocks
5851         if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK] >
5852             device_data->phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks) {
5853             skip |= log_msg(
5854                 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5855                 "VUID-VkPipelineLayoutCreateInfo-descriptorType-02215",
5856                 "vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device "
5857                 "maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks limit (%d).",
5858                 max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK],
5859                 device_data->phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks);
5860         }
5861 
5862         // Total descriptors by type, summed across all pipeline stages
5863         //
5864         std::map<uint32_t, uint32_t> sum_all_stages_update_after_bind = GetDescriptorSum(device_data, set_layouts, false);
5865         // Samplers
5866         sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLER] +
5867               sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
5868         if (sum > device_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSamplers) {
5869             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5870                             "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03036",
5871                             "vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
5872                             "maxDescriptorSetUpdateAfterBindSamplers limit (%d).",
5873                             sum, device_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSamplers);
5874         }
5875 
5876         // Uniform buffers
5877         if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] >
5878             device_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffers) {
5879             skip |=
5880                 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5881                         "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03037",
5882                         "vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
5883                         "maxDescriptorSetUpdateAfterBindUniformBuffers limit (%d).",
5884                         sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER],
5885                         device_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffers);
5886         }
5887 
5888         // Dynamic uniform buffers
5889         if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] >
5890             device_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic) {
5891             skip |= log_msg(
5892                 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5893                 "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03038",
5894                 "vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
5895                 "maxDescriptorSetUpdateAfterBindUniformBuffersDynamic limit (%d).",
5896                 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
5897                 device_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic);
5898         }
5899 
5900         // Storage buffers
5901         if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] >
5902             device_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffers) {
5903             skip |=
5904                 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5905                         "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03039",
5906                         "vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
5907                         "maxDescriptorSetUpdateAfterBindStorageBuffers limit (%d).",
5908                         sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER],
5909                         device_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffers);
5910         }
5911 
5912         // Dynamic storage buffers
5913         if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] >
5914             device_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic) {
5915             skip |= log_msg(
5916                 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5917                 "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03040",
5918                 "vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
5919                 "maxDescriptorSetUpdateAfterBindStorageBuffersDynamic limit (%d).",
5920                 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
5921                 device_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic);
5922         }
5923 
5924         //  Sampled images
5925         sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] +
5926               sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
5927               sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
5928         if (sum > device_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSampledImages) {
5929             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5930                             "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03041",
5931                             "vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
5932                             "maxDescriptorSetUpdateAfterBindSampledImages limit (%d).",
5933                             sum,
5934                             device_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSampledImages);
5935         }
5936 
5937         //  Storage images
5938         sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] +
5939               sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
5940         if (sum > device_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageImages) {
5941             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5942                             "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03042",
5943                             "vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
5944                             "maxDescriptorSetUpdateAfterBindStorageImages limit (%d).",
5945                             sum,
5946                             device_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageImages);
5947         }
5948 
5949         // Input attachments
5950         if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] >
5951             device_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindInputAttachments) {
5952             skip |=
5953                 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5954                         "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03043",
5955                         "vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
5956                         "maxDescriptorSetUpdateAfterBindInputAttachments limit (%d).",
5957                         sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT],
5958                         device_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindInputAttachments);
5959         }
5960 
5961         // Inline uniform blocks
5962         if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] >
5963             device_data->phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks) {
5964             skip |= log_msg(
5965                 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5966                 "VUID-VkPipelineLayoutCreateInfo-descriptorType-02217",
5967                 "vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device "
5968                 "maxDescriptorSetUpdateAfterBindInlineUniformBlocks limit (%d).",
5969                 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT],
5970                 device_data->phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks);
5971         }
5972     }
5973     return skip;
5974 }
5975 
5976 // For repeatable sorting, not very useful for "memory in range" search
5977 struct PushConstantRangeCompare {
operator ()PushConstantRangeCompare5978     bool operator()(const VkPushConstantRange *lhs, const VkPushConstantRange *rhs) const {
5979         if (lhs->offset == rhs->offset) {
5980             if (lhs->size == rhs->size) {
5981                 // The comparison is arbitrary, but avoids false aliasing by comparing all fields.
5982                 return lhs->stageFlags < rhs->stageFlags;
5983             }
5984             // If the offsets are the same then sorting by the end of range is useful for validation
5985             return lhs->size < rhs->size;
5986         }
5987         return lhs->offset < rhs->offset;
5988     }
5989 };
5990 
5991 static PushConstantRangesDict push_constant_ranges_dict;
5992 
GetCanonicalId(const VkPipelineLayoutCreateInfo * info)5993 PushConstantRangesId GetCanonicalId(const VkPipelineLayoutCreateInfo *info) {
5994     if (!info->pPushConstantRanges) {
5995         // Hand back the empty entry (creating as needed)...
5996         return push_constant_ranges_dict.look_up(PushConstantRanges());
5997     }
5998 
5999     // Sort the input ranges to ensure equivalent ranges map to the same id
6000     std::set<const VkPushConstantRange *, PushConstantRangeCompare> sorted;
6001     for (uint32_t i = 0; i < info->pushConstantRangeCount; i++) {
6002         sorted.insert(info->pPushConstantRanges + i);
6003     }
6004 
6005     PushConstantRanges ranges(sorted.size());
6006     for (const auto range : sorted) {
6007         ranges.emplace_back(*range);
6008     }
6009     return push_constant_ranges_dict.look_up(std::move(ranges));
6010 }
6011 
6012 // Dictionary of canoncial form of the pipeline set layout of descriptor set layouts
6013 static PipelineLayoutSetLayoutsDict pipeline_layout_set_layouts_dict;
6014 
6015 // Dictionary of canonical form of the "compatible for set" records
6016 static PipelineLayoutCompatDict pipeline_layout_compat_dict;
6017 
GetCanonicalId(const uint32_t set_index,const PushConstantRangesId pcr_id,const PipelineLayoutSetLayoutsId set_layouts_id)6018 static PipelineLayoutCompatId GetCanonicalId(const uint32_t set_index, const PushConstantRangesId pcr_id,
6019                                              const PipelineLayoutSetLayoutsId set_layouts_id) {
6020     return pipeline_layout_compat_dict.look_up(PipelineLayoutCompatDef(set_index, pcr_id, set_layouts_id));
6021 }
6022 
PreCallRecordCreatePipelineLayout(VkDevice device,const VkPipelineLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineLayout * pPipelineLayout,void * cpl_state_data)6023 void CoreChecks::PreCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6024                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout,
6025                                                    void *cpl_state_data) {
6026     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6027     create_pipeline_layout_api_state *cpl_state = reinterpret_cast<create_pipeline_layout_api_state *>(cpl_state_data);
6028     if (GetEnables()->gpu_validation) {
6029         GpuPreCallCreatePipelineLayout(device_data, pCreateInfo, pAllocator, pPipelineLayout, &cpl_state->new_layouts,
6030                                        &cpl_state->modified_create_info);
6031     }
6032 }
6033 
PostCallRecordCreatePipelineLayout(VkDevice device,const VkPipelineLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkPipelineLayout * pPipelineLayout,VkResult result)6034 void CoreChecks::PostCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6035                                                     const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout,
6036                                                     VkResult result) {
6037     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6038 
6039     // Clean up GPU validation
6040     if (GetEnables()->gpu_validation) {
6041         GpuPostCallCreatePipelineLayout(device_data, result);
6042     }
6043     if (VK_SUCCESS != result) return;
6044 
6045     PIPELINE_LAYOUT_NODE &plNode = device_data->pipelineLayoutMap[*pPipelineLayout];
6046     plNode.layout = *pPipelineLayout;
6047     plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
6048     PipelineLayoutSetLayoutsDef set_layouts(pCreateInfo->setLayoutCount);
6049     for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6050         plNode.set_layouts[i] = GetDescriptorSetLayout(device_data, pCreateInfo->pSetLayouts[i]);
6051         set_layouts[i] = plNode.set_layouts[i]->GetLayoutId();
6052     }
6053 
6054     // Get canonical form IDs for the "compatible for set" contents
6055     plNode.push_constant_ranges = GetCanonicalId(pCreateInfo);
6056     auto set_layouts_id = pipeline_layout_set_layouts_dict.look_up(set_layouts);
6057     plNode.compat_for_set.reserve(pCreateInfo->setLayoutCount);
6058 
6059     // Create table of "compatible for set N" cannonical forms for trivial accept validation
6060     for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6061         plNode.compat_for_set.emplace_back(GetCanonicalId(i, plNode.push_constant_ranges, set_layouts_id));
6062     }
6063 }
6064 
PostCallRecordCreateDescriptorPool(VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool,VkResult result)6065 void CoreChecks::PostCallRecordCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo,
6066                                                     const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool,
6067                                                     VkResult result) {
6068     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6069     if (VK_SUCCESS != result) return;
6070     DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
6071     assert(pNewNode);
6072     dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6073 }
6074 
PreCallValidateResetDescriptorPool(VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)6075 bool CoreChecks::PreCallValidateResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
6076                                                     VkDescriptorPoolResetFlags flags) {
6077     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6078     // Make sure sets being destroyed are not currently in-use
6079     if (device_data->instance_data->disabled.idle_descriptor_set) return false;
6080     bool skip = false;
6081     DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(descriptorPool);
6082     if (pPool != nullptr) {
6083         for (auto ds : pPool->sets) {
6084             if (ds && ds->in_use.load()) {
6085                 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6086                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, HandleToUint64(descriptorPool),
6087                                 "VUID-vkResetDescriptorPool-descriptorPool-00313",
6088                                 "It is invalid to call vkResetDescriptorPool() with descriptor sets in use by a command buffer.");
6089                 if (skip) break;
6090             }
6091         }
6092     }
6093     return skip;
6094 }
6095 
PostCallRecordResetDescriptorPool(VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags,VkResult result)6096 void CoreChecks::PostCallRecordResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
6097                                                    VkDescriptorPoolResetFlags flags, VkResult result) {
6098     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6099     if (VK_SUCCESS != result) return;
6100     DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(descriptorPool);
6101     // TODO: validate flags
6102     // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
6103     for (auto ds : pPool->sets) {
6104         FreeDescriptorSet(device_data, ds);
6105     }
6106     pPool->sets.clear();
6107     // Reset available count for each type and available sets for this pool
6108     for (auto it = pPool->availableDescriptorTypeCount.begin(); it != pPool->availableDescriptorTypeCount.end(); ++it) {
6109         pPool->availableDescriptorTypeCount[it->first] = pPool->maxDescriptorTypeCount[it->first];
6110     }
6111     pPool->availableSets = pPool->maxSets;
6112 }
6113 
6114 // Ensure the pool contains enough descriptors and descriptor sets to satisfy
6115 // an allocation request. Fills common_data with the total number of descriptors of each type required,
6116 // as well as DescriptorSetLayout ptrs used for later update.
PreCallValidateAllocateDescriptorSets(VkDevice device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets,void * ads_state_data)6117 bool CoreChecks::PreCallValidateAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6118                                                        VkDescriptorSet *pDescriptorSets, void *ads_state_data) {
6119     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6120 
6121     // Always update common data
6122     cvdescriptorset::AllocateDescriptorSetsData *ads_state =
6123         reinterpret_cast<cvdescriptorset::AllocateDescriptorSetsData *>(ads_state_data);
6124     UpdateAllocateDescriptorSetsData(device_data, pAllocateInfo, ads_state);
6125     if (device_data->instance_data->disabled.allocate_descriptor_sets) return false;
6126     // All state checks for AllocateDescriptorSets is done in single function
6127     return ValidateAllocateDescriptorSets(device_data, pAllocateInfo, ads_state);
6128 }
6129 
6130 // Allocation state was good and call down chain was made so update state based on allocating descriptor sets
PostCallRecordAllocateDescriptorSets(VkDevice device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets,VkResult result,void * ads_state_data)6131 void CoreChecks::PostCallRecordAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6132                                                       VkDescriptorSet *pDescriptorSets, VkResult result, void *ads_state_data) {
6133     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6134     if (VK_SUCCESS != result) return;
6135     // All the updates are contained in a single cvdescriptorset function
6136     cvdescriptorset::AllocateDescriptorSetsData *ads_state =
6137         reinterpret_cast<cvdescriptorset::AllocateDescriptorSetsData *>(ads_state_data);
6138     PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, ads_state, &device_data->descriptorPoolMap, &device_data->setMap,
6139                                   device_data);
6140 }
6141 
PreCallValidateFreeDescriptorSets(VkDevice device,VkDescriptorPool descriptorPool,uint32_t count,const VkDescriptorSet * pDescriptorSets)6142 bool CoreChecks::PreCallValidateFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
6143                                                    const VkDescriptorSet *pDescriptorSets) {
6144     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6145     // Make sure that no sets being destroyed are in-flight
6146     bool skip = false;
6147     // First make sure sets being destroyed are not currently in-use
6148     for (uint32_t i = 0; i < count; ++i) {
6149         if (pDescriptorSets[i] != VK_NULL_HANDLE) {
6150             skip |= ValidateIdleDescriptorSet(device_data, pDescriptorSets[i], "vkFreeDescriptorSets");
6151         }
6152     }
6153     DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(descriptorPool);
6154     if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
6155         // Can't Free from a NON_FREE pool
6156         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6157                         HandleToUint64(descriptorPool), "VUID-vkFreeDescriptorSets-descriptorPool-00312",
6158                         "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6159                         "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6160     }
6161     return skip;
6162 }
6163 
PreCallRecordFreeDescriptorSets(VkDevice device,VkDescriptorPool descriptorPool,uint32_t count,const VkDescriptorSet * pDescriptorSets)6164 void CoreChecks::PreCallRecordFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
6165                                                  const VkDescriptorSet *pDescriptorSets) {
6166     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6167     DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(descriptorPool);
6168     // Update available descriptor sets in pool
6169     pool_state->availableSets += count;
6170 
6171     // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
6172     for (uint32_t i = 0; i < count; ++i) {
6173         if (pDescriptorSets[i] != VK_NULL_HANDLE) {
6174             auto descriptor_set = device_data->setMap[pDescriptorSets[i]];
6175             uint32_t type_index = 0, descriptor_count = 0;
6176             for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) {
6177                 type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j));
6178                 descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j);
6179                 pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
6180             }
6181             FreeDescriptorSet(device_data, descriptor_set);
6182             pool_state->sets.erase(descriptor_set);
6183         }
6184     }
6185 }
6186 
PreCallValidateUpdateDescriptorSets(VkDevice device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)6187 bool CoreChecks::PreCallValidateUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
6188                                                      const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6189                                                      const VkCopyDescriptorSet *pDescriptorCopies) {
6190     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6191     if (device_data->disabled.update_descriptor_sets) return false;
6192     // First thing to do is perform map look-ups.
6193     // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
6194     //  so we can't just do a single map look-up up-front, but do them individually in functions below
6195 
6196     // Now make call(s) that validate state, but don't perform state updates in this function
6197     // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
6198     //  namespace which will parse params and make calls into specific class instances
6199     return ValidateUpdateDescriptorSets(device_data->report_data, device_data, descriptorWriteCount, pDescriptorWrites,
6200                                         descriptorCopyCount, pDescriptorCopies, "vkUpdateDescriptorSets()");
6201 }
6202 
PreCallRecordUpdateDescriptorSets(VkDevice device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)6203 void CoreChecks::PreCallRecordUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
6204                                                    const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6205                                                    const VkCopyDescriptorSet *pDescriptorCopies) {
6206     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6207     cvdescriptorset::PerformUpdateDescriptorSets(device_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6208                                                  pDescriptorCopies);
6209 }
6210 
PostCallRecordAllocateCommandBuffers(VkDevice device,const VkCommandBufferAllocateInfo * pCreateInfo,VkCommandBuffer * pCommandBuffer,VkResult result)6211 void CoreChecks::PostCallRecordAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo,
6212                                                       VkCommandBuffer *pCommandBuffer, VkResult result) {
6213     if (VK_SUCCESS != result) return;
6214     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
6215     auto pPool = GetCommandPoolNode(pCreateInfo->commandPool);
6216     if (pPool) {
6217         for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6218             // Add command buffer to its commandPool map
6219             pPool->commandBuffers.insert(pCommandBuffer[i]);
6220             GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6221             // Add command buffer to map
6222             device_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6223             ResetCommandBufferState(device_data, pCommandBuffer[i]);
6224             pCB->createInfo = *pCreateInfo;
6225             pCB->device = device;
6226         }
6227     }
6228 }
6229 
6230 // Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
AddFramebufferBinding(layer_data * dev_data,GLOBAL_CB_NODE * cb_state,FRAMEBUFFER_STATE * fb_state)6231 void CoreChecks::AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
6232     AddCommandBufferBinding(&fb_state->cb_bindings, {HandleToUint64(fb_state->framebuffer), kVulkanObjectTypeFramebuffer},
6233                             cb_state);
6234 
6235     const uint32_t attachmentCount = fb_state->createInfo.attachmentCount;
6236     for (uint32_t attachment = 0; attachment < attachmentCount; ++attachment) {
6237         auto view_state = GetAttachmentImageViewState(fb_state, attachment);
6238         if (view_state) {
6239             AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
6240         }
6241     }
6242 }
6243 
PreCallValidateBeginCommandBuffer(VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)6244 bool CoreChecks::PreCallValidateBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6245     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6246     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6247     if (!cb_state) return false;
6248     bool skip = false;
6249     if (cb_state->in_use.load()) {
6250         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6251                         HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00049",
6252                         "Calling vkBeginCommandBuffer() on active command buffer %s before it has completed. You must check "
6253                         "command buffer fence before this call.",
6254                         device_data->report_data->FormatHandle(commandBuffer).c_str());
6255     }
6256     if (cb_state->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6257         // Secondary Command Buffer
6258         const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6259         if (!pInfo) {
6260             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6261                             HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00051",
6262                             "vkBeginCommandBuffer(): Secondary Command Buffer (%s) must have inheritance info.",
6263                             device_data->report_data->FormatHandle(commandBuffer).c_str());
6264         } else {
6265             if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6266                 assert(pInfo->renderPass);
6267                 auto framebuffer = GetFramebufferState(pInfo->framebuffer);
6268                 if (framebuffer) {
6269                     if (framebuffer->createInfo.renderPass != pInfo->renderPass) {
6270                         // renderPass that framebuffer was created with must be compatible with local renderPass
6271                         skip |=
6272                             ValidateRenderPassCompatibility(device_data, "framebuffer", framebuffer->rp_state.get(),
6273                                                             "command buffer", GetRenderPassState(pInfo->renderPass),
6274                                                             "vkBeginCommandBuffer()", "VUID-VkCommandBufferBeginInfo-flags-00055");
6275                     }
6276                 }
6277             }
6278             if ((pInfo->occlusionQueryEnable == VK_FALSE || device_data->enabled_features.core.occlusionQueryPrecise == VK_FALSE) &&
6279                 (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6280                 skip |=
6281                     log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6282                             HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00052",
6283                             "vkBeginCommandBuffer(): Secondary Command Buffer (%s) must not have VK_QUERY_CONTROL_PRECISE_BIT if "
6284                             "occulusionQuery is disabled or the device does not support precise occlusion queries.",
6285                             device_data->report_data->FormatHandle(commandBuffer).c_str());
6286             }
6287         }
6288         if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6289             auto renderPass = GetRenderPassState(pInfo->renderPass);
6290             if (renderPass) {
6291                 if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
6292                     skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6293                                     VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
6294                                     "VUID-VkCommandBufferBeginInfo-flags-00054",
6295                                     "vkBeginCommandBuffer(): Secondary Command Buffers (%s) must have a subpass index (%d) that is "
6296                                     "less than the number of subpasses (%d).",
6297                                     device_data->report_data->FormatHandle(commandBuffer).c_str(), pInfo->subpass,
6298                                     renderPass->createInfo.subpassCount);
6299                 }
6300             }
6301         }
6302     }
6303     if (CB_RECORDING == cb_state->state) {
6304         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6305                         HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00049",
6306                         "vkBeginCommandBuffer(): Cannot call Begin on command buffer (%s) in the RECORDING state. Must first call "
6307                         "vkEndCommandBuffer().",
6308                         device_data->report_data->FormatHandle(commandBuffer).c_str());
6309     } else if (CB_RECORDED == cb_state->state || CB_INVALID_COMPLETE == cb_state->state) {
6310         VkCommandPool cmdPool = cb_state->createInfo.commandPool;
6311         auto pPool = GetCommandPoolNode(cmdPool);
6312         if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
6313             skip |=
6314                 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6315                         HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00050",
6316                         "Call to vkBeginCommandBuffer() on command buffer (%s) attempts to implicitly reset cmdBuffer created from "
6317                         "command pool (%s) that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6318                         device_data->report_data->FormatHandle(commandBuffer).c_str(),
6319                         device_data->report_data->FormatHandle(cmdPool).c_str());
6320         }
6321     }
6322     return skip;
6323 }
6324 
PreCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)6325 void CoreChecks::PreCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6326     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6327     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6328     if (!cb_state) return;
6329     // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6330     ClearCmdBufAndMemReferences(device_data, cb_state);
6331     if (cb_state->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6332         // Secondary Command Buffer
6333         const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6334         if (pInfo) {
6335             if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6336                 assert(pInfo->renderPass);
6337                 auto framebuffer = GetFramebufferState(pInfo->framebuffer);
6338                 if (framebuffer) {
6339                     // Connect this framebuffer and its children to this cmdBuffer
6340                     AddFramebufferBinding(device_data, cb_state, framebuffer);
6341                 }
6342             }
6343         }
6344     }
6345     if (CB_RECORDED == cb_state->state || CB_INVALID_COMPLETE == cb_state->state) {
6346         ResetCommandBufferState(device_data, commandBuffer);
6347     }
6348     // Set updated state here in case implicit reset occurs above
6349     cb_state->state = CB_RECORDING;
6350     cb_state->beginInfo = *pBeginInfo;
6351     if (cb_state->beginInfo.pInheritanceInfo) {
6352         cb_state->inheritanceInfo = *(cb_state->beginInfo.pInheritanceInfo);
6353         cb_state->beginInfo.pInheritanceInfo = &cb_state->inheritanceInfo;
6354         // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
6355         if ((cb_state->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
6356             (cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6357             cb_state->activeRenderPass = GetRenderPassState(cb_state->beginInfo.pInheritanceInfo->renderPass);
6358             cb_state->activeSubpass = cb_state->beginInfo.pInheritanceInfo->subpass;
6359             cb_state->activeFramebuffer = cb_state->beginInfo.pInheritanceInfo->framebuffer;
6360             cb_state->framebuffers.insert(cb_state->beginInfo.pInheritanceInfo->framebuffer);
6361         }
6362     }
6363 }
6364 
PreCallValidateEndCommandBuffer(VkCommandBuffer commandBuffer)6365 bool CoreChecks::PreCallValidateEndCommandBuffer(VkCommandBuffer commandBuffer) {
6366     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6367     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6368     if (!cb_state) return false;
6369     bool skip = false;
6370     if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == cb_state->createInfo.level) ||
6371         !(cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6372         // This needs spec clarification to update valid usage, see comments in PR:
6373         // https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/165
6374         skip |= InsideRenderPass(device_data, cb_state, "vkEndCommandBuffer()", "VUID-vkEndCommandBuffer-commandBuffer-00060");
6375     }
6376     skip |= ValidateCmd(device_data, cb_state, CMD_ENDCOMMANDBUFFER, "vkEndCommandBuffer()");
6377     for (auto query : cb_state->activeQueries) {
6378         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6379                         HandleToUint64(commandBuffer), "VUID-vkEndCommandBuffer-commandBuffer-00061",
6380                         "Ending command buffer with in progress query: queryPool %s, index %d.",
6381                         device_data->report_data->FormatHandle(query.pool).c_str(), query.index);
6382     }
6383     return skip;
6384 }
6385 
PostCallRecordEndCommandBuffer(VkCommandBuffer commandBuffer,VkResult result)6386 void CoreChecks::PostCallRecordEndCommandBuffer(VkCommandBuffer commandBuffer, VkResult result) {
6387     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6388     if (!cb_state) return;
6389     // Cached validation is specific to a specific recording of a specific command buffer.
6390     for (auto descriptor_set : cb_state->validated_descriptor_sets) {
6391         descriptor_set->ClearCachedValidation(cb_state);
6392     }
6393     cb_state->validated_descriptor_sets.clear();
6394     if (VK_SUCCESS == result) {
6395         cb_state->state = CB_RECORDED;
6396     }
6397 }
6398 
PreCallValidateResetCommandBuffer(VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)6399 bool CoreChecks::PreCallValidateResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
6400     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6401     bool skip = false;
6402     GLOBAL_CB_NODE *pCB = GetCBNode(commandBuffer);
6403     if (!pCB) return false;
6404     VkCommandPool cmdPool = pCB->createInfo.commandPool;
6405     auto pPool = GetCommandPoolNode(cmdPool);
6406 
6407     if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
6408         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6409                         HandleToUint64(commandBuffer), "VUID-vkResetCommandBuffer-commandBuffer-00046",
6410                         "Attempt to reset command buffer (%s) created from command pool (%s) that does NOT have the "
6411                         "VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6412                         device_data->report_data->FormatHandle(commandBuffer).c_str(),
6413                         device_data->report_data->FormatHandle(cmdPool).c_str());
6414     }
6415     skip |= CheckCommandBufferInFlight(device_data, pCB, "reset", "VUID-vkResetCommandBuffer-commandBuffer-00045");
6416 
6417     return skip;
6418 }
6419 
PostCallRecordResetCommandBuffer(VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags,VkResult result)6420 void CoreChecks::PostCallRecordResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags, VkResult result) {
6421     if (VK_SUCCESS == result) {
6422         layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6423         ResetCommandBufferState(device_data, commandBuffer);
6424     }
6425 }
6426 
PreCallValidateCmdBindPipeline(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipeline pipeline)6427 bool CoreChecks::PreCallValidateCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
6428                                                 VkPipeline pipeline) {
6429     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6430     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6431     assert(cb_state);
6432 
6433     bool skip = ValidateCmdQueueFlags(device_data, cb_state, "vkCmdBindPipeline()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6434                                       "VUID-vkCmdBindPipeline-commandBuffer-cmdpool");
6435     skip |= ValidateCmd(device_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
6436     // TODO: "VUID-vkCmdBindPipeline-pipelineBindPoint-00777" "VUID-vkCmdBindPipeline-pipelineBindPoint-00779"  -- using
6437     // ValidatePipelineBindPoint
6438     return skip;
6439 }
6440 
PreCallRecordCmdBindPipeline(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipeline pipeline)6441 void CoreChecks::PreCallRecordCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
6442                                               VkPipeline pipeline) {
6443     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6444     assert(cb_state);
6445 
6446     auto pipe_state = GetPipelineState(pipeline);
6447     if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
6448         cb_state->status &= ~cb_state->static_status;
6449         cb_state->static_status = MakeStaticStateMask(pipe_state->graphicsPipelineCI.ptr()->pDynamicState);
6450         cb_state->status |= cb_state->static_status;
6451     }
6452     cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
6453     SetPipelineState(pipe_state);
6454     AddCommandBufferBinding(&pipe_state->cb_bindings, {HandleToUint64(pipeline), kVulkanObjectTypePipeline}, cb_state);
6455 }
6456 
PreCallValidateCmdSetViewport(VkCommandBuffer commandBuffer,uint32_t firstViewport,uint32_t viewportCount,const VkViewport * pViewports)6457 bool CoreChecks::PreCallValidateCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
6458                                                const VkViewport *pViewports) {
6459     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6460     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6461     assert(cb_state);
6462     bool skip = ValidateCmdQueueFlags(device_data, cb_state, "vkCmdSetViewport()", VK_QUEUE_GRAPHICS_BIT,
6463                                       "VUID-vkCmdSetViewport-commandBuffer-cmdpool");
6464     skip |= ValidateCmd(device_data, cb_state, CMD_SETVIEWPORT, "vkCmdSetViewport()");
6465     if (cb_state->static_status & CBSTATUS_VIEWPORT_SET) {
6466         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6467                         HandleToUint64(commandBuffer), "VUID-vkCmdSetViewport-None-01221",
6468                         "vkCmdSetViewport(): pipeline was created without VK_DYNAMIC_STATE_VIEWPORT flag.");
6469     }
6470     return skip;
6471 }
6472 
PreCallRecordCmdSetViewport(VkCommandBuffer commandBuffer,uint32_t firstViewport,uint32_t viewportCount,const VkViewport * pViewports)6473 void CoreChecks::PreCallRecordCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
6474                                              const VkViewport *pViewports) {
6475     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6476     cb_state->viewportMask |= ((1u << viewportCount) - 1u) << firstViewport;
6477     cb_state->status |= CBSTATUS_VIEWPORT_SET;
6478 }
6479 
PreCallValidateCmdSetScissor(VkCommandBuffer commandBuffer,uint32_t firstScissor,uint32_t scissorCount,const VkRect2D * pScissors)6480 bool CoreChecks::PreCallValidateCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
6481                                               const VkRect2D *pScissors) {
6482     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6483     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6484     assert(cb_state);
6485     bool skip = ValidateCmdQueueFlags(device_data, cb_state, "vkCmdSetScissor()", VK_QUEUE_GRAPHICS_BIT,
6486                                       "VUID-vkCmdSetScissor-commandBuffer-cmdpool");
6487     skip |= ValidateCmd(device_data, cb_state, CMD_SETSCISSOR, "vkCmdSetScissor()");
6488     if (cb_state->static_status & CBSTATUS_SCISSOR_SET) {
6489         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6490                         HandleToUint64(commandBuffer), "VUID-vkCmdSetScissor-None-00590",
6491                         "vkCmdSetScissor(): pipeline was created without VK_DYNAMIC_STATE_SCISSOR flag..");
6492     }
6493     return skip;
6494 }
6495 
PreCallRecordCmdSetScissor(VkCommandBuffer commandBuffer,uint32_t firstScissor,uint32_t scissorCount,const VkRect2D * pScissors)6496 void CoreChecks::PreCallRecordCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
6497                                             const VkRect2D *pScissors) {
6498     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6499     cb_state->scissorMask |= ((1u << scissorCount) - 1u) << firstScissor;
6500     cb_state->status |= CBSTATUS_SCISSOR_SET;
6501 }
6502 
PreCallValidateCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer,uint32_t firstExclusiveScissor,uint32_t exclusiveScissorCount,const VkRect2D * pExclusiveScissors)6503 bool CoreChecks::PreCallValidateCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor,
6504                                                          uint32_t exclusiveScissorCount, const VkRect2D *pExclusiveScissors) {
6505     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6506     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6507     assert(cb_state);
6508     bool skip = ValidateCmdQueueFlags(device_data, cb_state, "vkCmdSetExclusiveScissorNV()", VK_QUEUE_GRAPHICS_BIT,
6509                                       "VUID-vkCmdSetExclusiveScissorNV-commandBuffer-cmdpool");
6510     skip |= ValidateCmd(device_data, cb_state, CMD_SETEXCLUSIVESCISSOR, "vkCmdSetExclusiveScissorNV()");
6511     if (cb_state->static_status & CBSTATUS_EXCLUSIVE_SCISSOR_SET) {
6512         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6513                         HandleToUint64(commandBuffer), "VUID-vkCmdSetExclusiveScissorNV-None-02032",
6514                         "vkCmdSetExclusiveScissorNV(): pipeline was created without VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV flag.");
6515     }
6516 
6517     if (!GetEnabledFeatures()->exclusive_scissor.exclusiveScissor) {
6518         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6519                         HandleToUint64(commandBuffer), "VUID-vkCmdSetExclusiveScissorNV-None-02031",
6520                         "vkCmdSetExclusiveScissorNV: The exclusiveScissor feature is disabled.");
6521     }
6522 
6523     return skip;
6524 }
6525 
PreCallRecordCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer,uint32_t firstExclusiveScissor,uint32_t exclusiveScissorCount,const VkRect2D * pExclusiveScissors)6526 void CoreChecks::PreCallRecordCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor,
6527                                                        uint32_t exclusiveScissorCount, const VkRect2D *pExclusiveScissors) {
6528     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6529     // TODO: We don't have VUIDs for validating that all exclusive scissors have been set.
6530     // cb_state->exclusiveScissorMask |= ((1u << exclusiveScissorCount) - 1u) << firstExclusiveScissor;
6531     cb_state->status |= CBSTATUS_EXCLUSIVE_SCISSOR_SET;
6532 }
6533 
PreCallValidateCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer,VkImageView imageView,VkImageLayout imageLayout)6534 bool CoreChecks::PreCallValidateCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView,
6535                                                           VkImageLayout imageLayout) {
6536     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6537     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6538     assert(cb_state);
6539     bool skip = ValidateCmdQueueFlags(device_data, cb_state, "vkCmdBindShadingRateImageNV()", VK_QUEUE_GRAPHICS_BIT,
6540                                       "VUID-vkCmdBindShadingRateImageNV-commandBuffer-cmdpool");
6541 
6542     skip |= ValidateCmd(device_data, cb_state, CMD_BINDSHADINGRATEIMAGE, "vkCmdBindShadingRateImageNV()");
6543 
6544     if (!GetEnabledFeatures()->shading_rate_image.shadingRateImage) {
6545         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6546                         HandleToUint64(commandBuffer), "VUID-vkCmdBindShadingRateImageNV-None-02058",
6547                         "vkCmdBindShadingRateImageNV: The shadingRateImage feature is disabled.");
6548     }
6549 
6550     if (imageView != VK_NULL_HANDLE) {
6551         auto view_state = GetImageViewState(imageView);
6552         auto &ivci = view_state->create_info;
6553 
6554         if (!view_state || (ivci.viewType != VK_IMAGE_VIEW_TYPE_2D && ivci.viewType != VK_IMAGE_VIEW_TYPE_2D_ARRAY)) {
6555             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
6556                             HandleToUint64(imageView), "VUID-vkCmdBindShadingRateImageNV-imageView-02059",
6557                             "vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must be a valid "
6558                             "VkImageView handle of type VK_IMAGE_VIEW_TYPE_2D or VK_IMAGE_VIEW_TYPE_2D_ARRAY.");
6559         }
6560 
6561         if (view_state && ivci.format != VK_FORMAT_R8_UINT) {
6562             skip |= log_msg(
6563                 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
6564                 HandleToUint64(imageView), "VUID-vkCmdBindShadingRateImageNV-imageView-02060",
6565                 "vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must have a format of VK_FORMAT_R8_UINT.");
6566         }
6567 
6568         const VkImageCreateInfo *ici = view_state ? &GetImageState(view_state->create_info.image)->createInfo : nullptr;
6569         if (ici && !(ici->usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV)) {
6570             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
6571                             HandleToUint64(imageView), "VUID-vkCmdBindShadingRateImageNV-imageView-02061",
6572                             "vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, the image must have been "
6573                             "created with VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV set.");
6574         }
6575 
6576         if (view_state) {
6577             auto image_state = GetImageState(view_state->create_info.image);
6578             bool hit_error = false;
6579 
6580             // XXX TODO: While the VUID says "each subresource", only the base mip level is
6581             // actually used. Since we don't have an existing convenience function to iterate
6582             // over all mip levels, just don't bother with non-base levels.
6583             VkImageSubresourceRange &range = view_state->create_info.subresourceRange;
6584             VkImageSubresourceLayers subresource = {range.aspectMask, range.baseMipLevel, range.baseArrayLayer, range.layerCount};
6585 
6586             if (image_state) {
6587                 skip |= VerifyImageLayout(device_data, cb_state, image_state, subresource, imageLayout,
6588                                           VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV, "vkCmdCopyImage()",
6589                                           "VUID-vkCmdBindShadingRateImageNV-imageLayout-02063",
6590                                           "VUID-vkCmdBindShadingRateImageNV-imageView-02062", &hit_error);
6591             }
6592         }
6593     }
6594 
6595     return skip;
6596 }
6597 
PreCallRecordCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer,VkImageView imageView,VkImageLayout imageLayout)6598 void CoreChecks::PreCallRecordCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView,
6599                                                         VkImageLayout imageLayout) {
6600     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6601     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6602 
6603     if (imageView != VK_NULL_HANDLE) {
6604         auto view_state = GetImageViewState(imageView);
6605         AddCommandBufferBindingImageView(device_data, cb_state, view_state);
6606     }
6607 }
6608 
PreCallValidateCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer,uint32_t firstViewport,uint32_t viewportCount,const VkShadingRatePaletteNV * pShadingRatePalettes)6609 bool CoreChecks::PreCallValidateCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
6610                                                                    uint32_t viewportCount,
6611                                                                    const VkShadingRatePaletteNV *pShadingRatePalettes) {
6612     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6613     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6614     assert(cb_state);
6615     bool skip = ValidateCmdQueueFlags(device_data, cb_state, "vkCmdSetViewportShadingRatePaletteNV()", VK_QUEUE_GRAPHICS_BIT,
6616                                       "VUID-vkCmdSetViewportShadingRatePaletteNV-commandBuffer-cmdpool");
6617 
6618     skip |= ValidateCmd(device_data, cb_state, CMD_SETVIEWPORTSHADINGRATEPALETTE, "vkCmdSetViewportShadingRatePaletteNV()");
6619 
6620     if (!GetEnabledFeatures()->shading_rate_image.shadingRateImage) {
6621         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6622                         HandleToUint64(commandBuffer), "VUID-vkCmdSetViewportShadingRatePaletteNV-None-02064",
6623                         "vkCmdSetViewportShadingRatePaletteNV: The shadingRateImage feature is disabled.");
6624     }
6625 
6626     if (cb_state->static_status & CBSTATUS_SHADING_RATE_PALETTE_SET) {
6627         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6628                         HandleToUint64(commandBuffer), "VUID-vkCmdSetViewportShadingRatePaletteNV-None-02065",
6629                         "vkCmdSetViewportShadingRatePaletteNV(): pipeline was created without "
6630                         "VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV flag.");
6631     }
6632 
6633     for (uint32_t i = 0; i < viewportCount; ++i) {
6634         auto *palette = &pShadingRatePalettes[i];
6635         if (palette->shadingRatePaletteEntryCount == 0 ||
6636             palette->shadingRatePaletteEntryCount >
6637                 device_data->phys_dev_ext_props.shading_rate_image_props.shadingRatePaletteSize) {
6638             skip |= log_msg(
6639                 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6640                 HandleToUint64(commandBuffer), "VUID-VkShadingRatePaletteNV-shadingRatePaletteEntryCount-02071",
6641                 "vkCmdSetViewportShadingRatePaletteNV: shadingRatePaletteEntryCount must be between 1 and shadingRatePaletteSize.");
6642         }
6643     }
6644 
6645     return skip;
6646 }
6647 
PreCallRecordCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer,uint32_t firstViewport,uint32_t viewportCount,const VkShadingRatePaletteNV * pShadingRatePalettes)6648 void CoreChecks::PreCallRecordCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
6649                                                                  uint32_t viewportCount,
6650                                                                  const VkShadingRatePaletteNV *pShadingRatePalettes) {
6651     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6652     // TODO: We don't have VUIDs for validating that all shading rate palettes have been set.
6653     // cb_state->shadingRatePaletteMask |= ((1u << viewportCount) - 1u) << firstViewport;
6654     cb_state->status |= CBSTATUS_SHADING_RATE_PALETTE_SET;
6655 }
6656 
PreCallValidateCmdSetLineWidth(VkCommandBuffer commandBuffer,float lineWidth)6657 bool CoreChecks::PreCallValidateCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
6658     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6659     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6660     assert(cb_state);
6661     bool skip = ValidateCmdQueueFlags(device_data, cb_state, "vkCmdSetLineWidth()", VK_QUEUE_GRAPHICS_BIT,
6662                                       "VUID-vkCmdSetLineWidth-commandBuffer-cmdpool");
6663     skip |= ValidateCmd(device_data, cb_state, CMD_SETLINEWIDTH, "vkCmdSetLineWidth()");
6664 
6665     if (cb_state->static_status & CBSTATUS_LINE_WIDTH_SET) {
6666         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6667                         HandleToUint64(commandBuffer), "VUID-vkCmdSetLineWidth-None-00787",
6668                         "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH flag.");
6669     }
6670     return skip;
6671 }
6672 
PreCallRecordCmdSetLineWidth(VkCommandBuffer commandBuffer,float lineWidth)6673 void CoreChecks::PreCallRecordCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
6674     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6675     cb_state->status |= CBSTATUS_LINE_WIDTH_SET;
6676 }
6677 
PreCallValidateCmdSetDepthBias(VkCommandBuffer commandBuffer,float depthBiasConstantFactor,float depthBiasClamp,float depthBiasSlopeFactor)6678 bool CoreChecks::PreCallValidateCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
6679                                                 float depthBiasSlopeFactor) {
6680     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6681     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6682     assert(cb_state);
6683     bool skip = ValidateCmdQueueFlags(device_data, cb_state, "vkCmdSetDepthBias()", VK_QUEUE_GRAPHICS_BIT,
6684                                       "VUID-vkCmdSetDepthBias-commandBuffer-cmdpool");
6685     skip |= ValidateCmd(device_data, cb_state, CMD_SETDEPTHBIAS, "vkCmdSetDepthBias()");
6686     if (cb_state->static_status & CBSTATUS_DEPTH_BIAS_SET) {
6687         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6688                         HandleToUint64(commandBuffer), "VUID-vkCmdSetDepthBias-None-00789",
6689                         "vkCmdSetDepthBias(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BIAS flag..");
6690     }
6691     if ((depthBiasClamp != 0.0) && (!device_data->enabled_features.core.depthBiasClamp)) {
6692         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6693                         HandleToUint64(commandBuffer), "VUID-vkCmdSetDepthBias-depthBiasClamp-00790",
6694                         "vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp parameter must "
6695                         "be set to 0.0.");
6696     }
6697     return skip;
6698 }
6699 
PreCallRecordCmdSetDepthBias(VkCommandBuffer commandBuffer,float depthBiasConstantFactor,float depthBiasClamp,float depthBiasSlopeFactor)6700 void CoreChecks::PreCallRecordCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
6701                                               float depthBiasSlopeFactor) {
6702     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6703     cb_state->status |= CBSTATUS_DEPTH_BIAS_SET;
6704 }
6705 
PreCallValidateCmdSetBlendConstants(VkCommandBuffer commandBuffer,const float blendConstants[4])6706 bool CoreChecks::PreCallValidateCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
6707     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6708     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6709     assert(cb_state);
6710     bool skip = ValidateCmdQueueFlags(device_data, cb_state, "vkCmdSetBlendConstants()", VK_QUEUE_GRAPHICS_BIT,
6711                                       "VUID-vkCmdSetBlendConstants-commandBuffer-cmdpool");
6712     skip |= ValidateCmd(device_data, cb_state, CMD_SETBLENDCONSTANTS, "vkCmdSetBlendConstants()");
6713     if (cb_state->static_status & CBSTATUS_BLEND_CONSTANTS_SET) {
6714         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6715                         HandleToUint64(commandBuffer), "VUID-vkCmdSetBlendConstants-None-00612",
6716                         "vkCmdSetBlendConstants(): pipeline was created without VK_DYNAMIC_STATE_BLEND_CONSTANTS flag..");
6717     }
6718     return skip;
6719 }
6720 
PreCallRecordCmdSetBlendConstants(VkCommandBuffer commandBuffer,const float blendConstants[4])6721 void CoreChecks::PreCallRecordCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
6722     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6723     cb_state->status |= CBSTATUS_BLEND_CONSTANTS_SET;
6724 }
6725 
PreCallValidateCmdSetDepthBounds(VkCommandBuffer commandBuffer,float minDepthBounds,float maxDepthBounds)6726 bool CoreChecks::PreCallValidateCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
6727     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6728     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6729     assert(cb_state);
6730     bool skip = ValidateCmdQueueFlags(device_data, cb_state, "vkCmdSetDepthBounds()", VK_QUEUE_GRAPHICS_BIT,
6731                                       "VUID-vkCmdSetDepthBounds-commandBuffer-cmdpool");
6732     skip |= ValidateCmd(device_data, cb_state, CMD_SETDEPTHBOUNDS, "vkCmdSetDepthBounds()");
6733     if (cb_state->static_status & CBSTATUS_DEPTH_BOUNDS_SET) {
6734         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6735                         HandleToUint64(commandBuffer), "VUID-vkCmdSetDepthBounds-None-00599",
6736                         "vkCmdSetDepthBounds(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BOUNDS flag..");
6737     }
6738     return skip;
6739 }
6740 
PreCallRecordCmdSetDepthBounds(VkCommandBuffer commandBuffer,float minDepthBounds,float maxDepthBounds)6741 void CoreChecks::PreCallRecordCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
6742     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6743     cb_state->status |= CBSTATUS_DEPTH_BOUNDS_SET;
6744 }
6745 
PreCallValidateCmdSetStencilCompareMask(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t compareMask)6746 bool CoreChecks::PreCallValidateCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
6747                                                          uint32_t compareMask) {
6748     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6749     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6750     assert(cb_state);
6751     bool skip = ValidateCmdQueueFlags(device_data, cb_state, "vkCmdSetStencilCompareMask()", VK_QUEUE_GRAPHICS_BIT,
6752                                       "VUID-vkCmdSetStencilCompareMask-commandBuffer-cmdpool");
6753     skip |= ValidateCmd(device_data, cb_state, CMD_SETSTENCILCOMPAREMASK, "vkCmdSetStencilCompareMask()");
6754     if (cb_state->static_status & CBSTATUS_STENCIL_READ_MASK_SET) {
6755         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6756                         HandleToUint64(commandBuffer), "VUID-vkCmdSetStencilCompareMask-None-00602",
6757                         "vkCmdSetStencilCompareMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK flag..");
6758     }
6759     return skip;
6760 }
6761 
PreCallRecordCmdSetStencilCompareMask(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t compareMask)6762 void CoreChecks::PreCallRecordCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
6763                                                        uint32_t compareMask) {
6764     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6765     cb_state->status |= CBSTATUS_STENCIL_READ_MASK_SET;
6766 }
6767 
PreCallValidateCmdSetStencilWriteMask(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t writeMask)6768 bool CoreChecks::PreCallValidateCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
6769                                                        uint32_t writeMask) {
6770     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6771     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6772     assert(cb_state);
6773     bool skip = ValidateCmdQueueFlags(device_data, cb_state, "vkCmdSetStencilWriteMask()", VK_QUEUE_GRAPHICS_BIT,
6774                                       "VUID-vkCmdSetStencilWriteMask-commandBuffer-cmdpool");
6775     skip |= ValidateCmd(device_data, cb_state, CMD_SETSTENCILWRITEMASK, "vkCmdSetStencilWriteMask()");
6776     if (cb_state->static_status & CBSTATUS_STENCIL_WRITE_MASK_SET) {
6777         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6778                         HandleToUint64(commandBuffer), "VUID-vkCmdSetStencilWriteMask-None-00603",
6779                         "vkCmdSetStencilWriteMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_WRITE_MASK flag..");
6780     }
6781     return skip;
6782 }
6783 
PreCallRecordCmdSetStencilWriteMask(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t writeMask)6784 void CoreChecks::PreCallRecordCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
6785                                                      uint32_t writeMask) {
6786     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6787     cb_state->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
6788 }
6789 
PreCallValidateCmdSetStencilReference(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t reference)6790 bool CoreChecks::PreCallValidateCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
6791                                                        uint32_t reference) {
6792     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6793     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6794     assert(cb_state);
6795     bool skip = ValidateCmdQueueFlags(device_data, cb_state, "vkCmdSetStencilReference()", VK_QUEUE_GRAPHICS_BIT,
6796                                       "VUID-vkCmdSetStencilReference-commandBuffer-cmdpool");
6797     skip |= ValidateCmd(device_data, cb_state, CMD_SETSTENCILREFERENCE, "vkCmdSetStencilReference()");
6798     if (cb_state->static_status & CBSTATUS_STENCIL_REFERENCE_SET) {
6799         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6800                         HandleToUint64(commandBuffer), "VUID-vkCmdSetStencilReference-None-00604",
6801                         "vkCmdSetStencilReference(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_REFERENCE flag..");
6802     }
6803     return skip;
6804 }
6805 
PreCallRecordCmdSetStencilReference(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t reference)6806 void CoreChecks::PreCallRecordCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
6807                                                      uint32_t reference) {
6808     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6809     cb_state->status |= CBSTATUS_STENCIL_REFERENCE_SET;
6810 }
6811 
6812 // Update pipeline_layout bind points applying the "Pipeline Layout Compatibility" rules
UpdateLastBoundDescriptorSets(layer_data * device_data,GLOBAL_CB_NODE * cb_state,VkPipelineBindPoint pipeline_bind_point,const PIPELINE_LAYOUT_NODE * pipeline_layout,uint32_t first_set,uint32_t set_count,const std::vector<cvdescriptorset::DescriptorSet * > descriptor_sets,uint32_t dynamic_offset_count,const uint32_t * p_dynamic_offsets)6813 static void UpdateLastBoundDescriptorSets(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6814                                           VkPipelineBindPoint pipeline_bind_point, const PIPELINE_LAYOUT_NODE *pipeline_layout,
6815                                           uint32_t first_set, uint32_t set_count,
6816                                           const std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets,
6817                                           uint32_t dynamic_offset_count, const uint32_t *p_dynamic_offsets) {
6818     // Defensive
6819     assert(set_count);
6820     if (0 == set_count) return;
6821     assert(pipeline_layout);
6822     if (!pipeline_layout) return;
6823 
6824     uint32_t required_size = first_set + set_count;
6825     const uint32_t last_binding_index = required_size - 1;
6826     assert(last_binding_index < pipeline_layout->compat_for_set.size());
6827 
6828     // Some useful shorthand
6829     auto &last_bound = cb_state->lastBound[pipeline_bind_point];
6830 
6831     auto &bound_sets = last_bound.boundDescriptorSets;
6832     auto &dynamic_offsets = last_bound.dynamicOffsets;
6833     auto &bound_compat_ids = last_bound.compat_id_for_set;
6834     auto &pipe_compat_ids = pipeline_layout->compat_for_set;
6835 
6836     const uint32_t current_size = static_cast<uint32_t>(bound_sets.size());
6837     assert(current_size == dynamic_offsets.size());
6838     assert(current_size == bound_compat_ids.size());
6839 
6840     // We need this three times in this function, but nowhere else
6841     auto push_descriptor_cleanup = [&last_bound](const cvdescriptorset::DescriptorSet *ds) -> bool {
6842         if (ds && ds->IsPushDescriptor()) {
6843             assert(ds == last_bound.push_descriptor_set.get());
6844             last_bound.push_descriptor_set = nullptr;
6845             return true;
6846         }
6847         return false;
6848     };
6849 
6850     // Clean up the "disturbed" before and after the range to be set
6851     if (required_size < current_size) {
6852         if (bound_compat_ids[last_binding_index] != pipe_compat_ids[last_binding_index]) {
6853             // We're disturbing those after last, we'll shrink below, but first need to check for and cleanup the push_descriptor
6854             for (auto set_idx = required_size; set_idx < current_size; ++set_idx) {
6855                 if (push_descriptor_cleanup(bound_sets[set_idx])) break;
6856             }
6857         } else {
6858             // We're not disturbing past last, so leave the upper binding data alone.
6859             required_size = current_size;
6860         }
6861     }
6862 
6863     // We resize if we need more set entries or if those past "last" are disturbed
6864     if (required_size != current_size) {
6865         // TODO: put these size tied things in a struct (touches many lines)
6866         bound_sets.resize(required_size);
6867         dynamic_offsets.resize(required_size);
6868         bound_compat_ids.resize(required_size);
6869     }
6870 
6871     // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
6872     for (uint32_t set_idx = 0; set_idx < first_set; ++set_idx) {
6873         if (bound_compat_ids[set_idx] != pipe_compat_ids[set_idx]) {
6874             push_descriptor_cleanup(bound_sets[set_idx]);
6875             bound_sets[set_idx] = nullptr;
6876             dynamic_offsets[set_idx].clear();
6877             bound_compat_ids[set_idx] = pipe_compat_ids[set_idx];
6878         }
6879     }
6880 
6881     // Now update the bound sets with the input sets
6882     const uint32_t *input_dynamic_offsets = p_dynamic_offsets;  // "read" pointer for dynamic offset data
6883     for (uint32_t input_idx = 0; input_idx < set_count; input_idx++) {
6884         auto set_idx = input_idx + first_set;  // set_idx is index within layout, input_idx is index within input descriptor sets
6885         cvdescriptorset::DescriptorSet *descriptor_set = descriptor_sets[input_idx];
6886 
6887         // Record binding (or push)
6888         if (descriptor_set != last_bound.push_descriptor_set.get()) {
6889             // Only cleanup the push descriptors if they aren't the currently used set.
6890             push_descriptor_cleanup(bound_sets[set_idx]);
6891         }
6892         bound_sets[set_idx] = descriptor_set;
6893         bound_compat_ids[set_idx] = pipe_compat_ids[set_idx];  // compat ids are canonical *per* set index
6894 
6895         if (descriptor_set) {
6896             auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
6897             // TODO: Add logic for tracking push_descriptor offsets (here or in caller)
6898             if (set_dynamic_descriptor_count && input_dynamic_offsets) {
6899                 const uint32_t *end_offset = input_dynamic_offsets + set_dynamic_descriptor_count;
6900                 dynamic_offsets[set_idx] = std::vector<uint32_t>(input_dynamic_offsets, end_offset);
6901                 input_dynamic_offsets = end_offset;
6902                 assert(input_dynamic_offsets <= (p_dynamic_offsets + dynamic_offset_count));
6903             } else {
6904                 dynamic_offsets[set_idx].clear();
6905             }
6906             if (!descriptor_set->IsPushDescriptor()) {
6907                 // Can't cache validation of push_descriptors
6908                 cb_state->validated_descriptor_sets.insert(descriptor_set);
6909             }
6910         }
6911     }
6912 }
6913 
6914 // Update the bound state for the bind point, including the effects of incompatible pipeline layouts
PreCallRecordCmdBindDescriptorSets(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t firstSet,uint32_t setCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)6915 void CoreChecks::PreCallRecordCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
6916                                                     VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
6917                                                     const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
6918                                                     const uint32_t *pDynamicOffsets) {
6919     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6920     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6921     auto pipeline_layout = GetPipelineLayout(device_data, layout);
6922     std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets;
6923     descriptor_sets.reserve(setCount);
6924 
6925     // Construct a list of the descriptors
6926     bool found_non_null = false;
6927     for (uint32_t i = 0; i < setCount; i++) {
6928         cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(pDescriptorSets[i]);
6929         descriptor_sets.emplace_back(descriptor_set);
6930         found_non_null |= descriptor_set != nullptr;
6931     }
6932     if (found_non_null) {  // which implies setCount > 0
6933         UpdateLastBoundDescriptorSets(device_data, cb_state, pipelineBindPoint, pipeline_layout, firstSet, setCount,
6934                                       descriptor_sets, dynamicOffsetCount, pDynamicOffsets);
6935         cb_state->lastBound[pipelineBindPoint].pipeline_layout = layout;
6936     }
6937 }
6938 
ValidateDynamicOffsetAlignment(const debug_report_data * report_data,const VkDescriptorSetLayoutBinding * binding,VkDescriptorType test_type,VkDeviceSize alignment,const uint32_t * pDynamicOffsets,const char * err_msg,const char * limit_name,uint32_t * offset_idx)6939 static bool ValidateDynamicOffsetAlignment(const debug_report_data *report_data, const VkDescriptorSetLayoutBinding *binding,
6940                                            VkDescriptorType test_type, VkDeviceSize alignment, const uint32_t *pDynamicOffsets,
6941                                            const char *err_msg, const char *limit_name, uint32_t *offset_idx) {
6942     bool skip = false;
6943     if (binding->descriptorType == test_type) {
6944         const auto end_idx = *offset_idx + binding->descriptorCount;
6945         for (uint32_t current_idx = *offset_idx; current_idx < end_idx; current_idx++) {
6946             if (SafeModulo(pDynamicOffsets[current_idx], alignment) != 0) {
6947                 skip |= log_msg(
6948                     report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, err_msg,
6949                     "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of device limit %s 0x%" PRIxLEAST64
6950                     ".",
6951                     current_idx, pDynamicOffsets[current_idx], limit_name, alignment);
6952             }
6953         }
6954         *offset_idx = end_idx;
6955     }
6956     return skip;
6957 }
6958 
PreCallValidateCmdBindDescriptorSets(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t firstSet,uint32_t setCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)6959 bool CoreChecks::PreCallValidateCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
6960                                                       VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
6961                                                       const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
6962                                                       const uint32_t *pDynamicOffsets) {
6963     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6964     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
6965     assert(cb_state);
6966     bool skip = false;
6967     skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdBindDescriptorSets()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6968                                   "VUID-vkCmdBindDescriptorSets-commandBuffer-cmdpool");
6969     skip |= ValidateCmd(device_data, cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
6970     // Track total count of dynamic descriptor types to make sure we have an offset for each one
6971     uint32_t total_dynamic_descriptors = 0;
6972     string error_string = "";
6973     uint32_t last_set_index = firstSet + setCount - 1;
6974 
6975     if (last_set_index >= cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
6976         cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1);
6977         cb_state->lastBound[pipelineBindPoint].dynamicOffsets.resize(last_set_index + 1);
6978         cb_state->lastBound[pipelineBindPoint].compat_id_for_set.resize(last_set_index + 1);
6979     }
6980     auto pipeline_layout = GetPipelineLayout(device_data, layout);
6981     for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
6982         cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(pDescriptorSets[set_idx]);
6983         if (descriptor_set) {
6984             // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
6985             if (!VerifySetLayoutCompatibility(descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) {
6986                 skip |= log_msg(
6987                     device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6988                     HandleToUint64(pDescriptorSets[set_idx]), "VUID-vkCmdBindDescriptorSets-pDescriptorSets-00358",
6989                     "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout at index %u of "
6990                     "pipelineLayout %s due to: %s.",
6991                     set_idx, set_idx + firstSet, device_data->report_data->FormatHandle(layout).c_str(), error_string.c_str());
6992             }
6993 
6994             auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
6995             if (set_dynamic_descriptor_count) {
6996                 // First make sure we won't overstep bounds of pDynamicOffsets array
6997                 if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) {
6998                     // Test/report this here, such that we don't run past the end of pDynamicOffsets in the else clause
6999                     skip |= log_msg(
7000                         device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7001                         HandleToUint64(pDescriptorSets[set_idx]), "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359",
7002                         "descriptorSet #%u (%s) requires %u dynamicOffsets, but only %u dynamicOffsets are left in "
7003                         "pDynamicOffsets array. There must be one dynamic offset for each dynamic descriptor being bound.",
7004                         set_idx, device_data->report_data->FormatHandle(pDescriptorSets[set_idx]).c_str(),
7005                         descriptor_set->GetDynamicDescriptorCount(), (dynamicOffsetCount - total_dynamic_descriptors));
7006                     // Set the number found to the maximum to prevent duplicate messages, or subsquent descriptor sets from
7007                     // testing against the "short tail" we're skipping below.
7008                     total_dynamic_descriptors = dynamicOffsetCount;
7009                 } else {  // Validate dynamic offsets and Dynamic Offset Minimums
7010                     uint32_t cur_dyn_offset = total_dynamic_descriptors;
7011                     const auto dsl = descriptor_set->GetLayout();
7012                     const auto binding_count = dsl->GetBindingCount();
7013                     const auto &limits = device_data->phys_dev_props.limits;
7014                     for (uint32_t binding_idx = 0; binding_idx < binding_count; binding_idx++) {
7015                         const auto *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
7016                         skip |= ValidateDynamicOffsetAlignment(device_data->report_data, binding,
7017                                                                VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
7018                                                                limits.minUniformBufferOffsetAlignment, pDynamicOffsets,
7019                                                                "VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01971",
7020                                                                "minUniformBufferOffsetAlignment", &cur_dyn_offset);
7021                         skip |= ValidateDynamicOffsetAlignment(device_data->report_data, binding,
7022                                                                VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC,
7023                                                                limits.minStorageBufferOffsetAlignment, pDynamicOffsets,
7024                                                                "VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01972",
7025                                                                "minStorageBufferOffsetAlignment", &cur_dyn_offset);
7026                     }
7027                     // Keep running total of dynamic descriptor count to verify at the end
7028                     total_dynamic_descriptors += set_dynamic_descriptor_count;
7029                 }
7030             }
7031         } else {
7032             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7033                             HandleToUint64(pDescriptorSets[set_idx]), kVUID_Core_DrawState_InvalidSet,
7034                             "Attempt to bind descriptor set %s that doesn't exist!",
7035                             device_data->report_data->FormatHandle(pDescriptorSets[set_idx]).c_str());
7036         }
7037     }
7038     //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7039     if (total_dynamic_descriptors != dynamicOffsetCount) {
7040         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7041                         HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359",
7042                         "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount is %u. It should "
7043                         "exactly match the number of dynamic descriptors.",
7044                         setCount, total_dynamic_descriptors, dynamicOffsetCount);
7045     }
7046     return skip;
7047 }
7048 
7049 // Validates that the supplied bind point is supported for the command buffer (vis. the command pool)
7050 // Takes array of error codes as some of the VUID's (e.g. vkCmdBindPipeline) are written per bindpoint
7051 // TODO add vkCmdBindPipeline bind_point validation using this call.
ValidatePipelineBindPoint(layer_data * device_data,GLOBAL_CB_NODE * cb_state,VkPipelineBindPoint bind_point,const char * func_name,const std::map<VkPipelineBindPoint,std::string> & bind_errors)7052 bool CoreChecks::ValidatePipelineBindPoint(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
7053                                            const char *func_name, const std::map<VkPipelineBindPoint, std::string> &bind_errors) {
7054     bool skip = false;
7055     auto pool = GetCommandPoolNode(cb_state->createInfo.commandPool);
7056     if (pool) {  // The loss of a pool in a recording cmd is reported in DestroyCommandPool
7057         static const std::map<VkPipelineBindPoint, VkQueueFlags> flag_mask = {
7058             std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT)),
7059             std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, static_cast<VkQueueFlags>(VK_QUEUE_COMPUTE_BIT)),
7060             std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV,
7061                            static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)),
7062         };
7063         const auto &qfp = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex];
7064         if (0 == (qfp.queueFlags & flag_mask.at(bind_point))) {
7065             const std::string &error = bind_errors.at(bind_point);
7066             auto cb_u64 = HandleToUint64(cb_state->commandBuffer);
7067             auto cp_u64 = HandleToUint64(cb_state->createInfo.commandPool);
7068             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7069                             cb_u64, error,
7070                             "%s: CommandBuffer %s was allocated from VkCommandPool %s that does not support bindpoint %s.",
7071                             func_name, device_data->report_data->FormatHandle(cb_u64).c_str(),
7072                             device_data->report_data->FormatHandle(cp_u64).c_str(), string_VkPipelineBindPoint(bind_point));
7073         }
7074     }
7075     return skip;
7076 }
7077 
PreCallValidateCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t set,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites)7078 bool CoreChecks::PreCallValidateCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
7079                                                         VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
7080                                                         const VkWriteDescriptorSet *pDescriptorWrites) {
7081     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7082     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
7083     assert(cb_state);
7084     const char *func_name = "vkCmdPushDescriptorSetKHR()";
7085     bool skip = false;
7086     skip |= ValidateCmd(device_data, cb_state, CMD_PUSHDESCRIPTORSETKHR, func_name);
7087     skip |= ValidateCmdQueueFlags(device_data, cb_state, func_name, (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT),
7088                                   "VUID-vkCmdPushDescriptorSetKHR-commandBuffer-cmdpool");
7089 
7090     static const std::map<VkPipelineBindPoint, std::string> bind_errors = {
7091         std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"),
7092         std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"),
7093         std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363")};
7094 
7095     skip |= ValidatePipelineBindPoint(device_data, cb_state, pipelineBindPoint, func_name, bind_errors);
7096     auto layout_data = GetPipelineLayout(device_data, layout);
7097 
7098     // Validate the set index points to a push descriptor set and is in range
7099     if (layout_data) {
7100         const auto &set_layouts = layout_data->set_layouts;
7101         const auto layout_u64 = HandleToUint64(layout);
7102         if (set < set_layouts.size()) {
7103             const auto dsl = set_layouts[set];
7104             if (dsl) {
7105                 if (!dsl->IsPushDescriptor()) {
7106                     skip = log_msg(
7107                         device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
7108                         layout_u64, "VUID-vkCmdPushDescriptorSetKHR-set-00365",
7109                         "%s: Set index %" PRIu32 " does not match push descriptor set layout index for VkPipelineLayout %s.",
7110                         func_name, set, device_data->report_data->FormatHandle(layout_u64).c_str());
7111                 } else {
7112                     // Create an empty proxy in order to use the existing descriptor set update validation
7113                     // TODO move the validation (like this) that doesn't need descriptor set state to the DSL object so we
7114                     // don't have to do this.
7115                     cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, VK_NULL_HANDLE, dsl, 0, device_data);
7116                     skip |= proxy_ds.ValidatePushDescriptorsUpdate(device_data->report_data, descriptorWriteCount,
7117                                                                    pDescriptorWrites, func_name);
7118                 }
7119             }
7120         } else {
7121             skip =
7122                 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
7123                         layout_u64, "VUID-vkCmdPushDescriptorSetKHR-set-00364",
7124                         "%s: Set index %" PRIu32 " is outside of range for VkPipelineLayout %s (set < %" PRIu32 ").", func_name,
7125                         set, device_data->report_data->FormatHandle(layout_u64).c_str(), static_cast<uint32_t>(set_layouts.size()));
7126         }
7127     }
7128 
7129     return skip;
7130 }
7131 
RecordCmdPushDescriptorSetState(layer_data * device_data,GLOBAL_CB_NODE * cb_state,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t set,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites)7132 void CoreChecks::RecordCmdPushDescriptorSetState(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
7133                                                  VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set,
7134                                                  uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites) {
7135     const auto &pipeline_layout = GetPipelineLayout(device_data, layout);
7136     // Short circuit invalid updates
7137     if (!pipeline_layout || (set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[set] ||
7138         !pipeline_layout->set_layouts[set]->IsPushDescriptor())
7139         return;
7140 
7141     // We need a descriptor set to update the bindings with, compatible with the passed layout
7142     const auto dsl = pipeline_layout->set_layouts[set];
7143     auto &last_bound = cb_state->lastBound[pipelineBindPoint];
7144     auto &push_descriptor_set = last_bound.push_descriptor_set;
7145     // If we are disturbing the current push_desriptor_set clear it
7146     if (!push_descriptor_set || !CompatForSet(set, last_bound.compat_id_for_set, pipeline_layout->compat_for_set)) {
7147         push_descriptor_set.reset(new cvdescriptorset::DescriptorSet(0, 0, dsl, 0, device_data));
7148     }
7149 
7150     std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets = {push_descriptor_set.get()};
7151     UpdateLastBoundDescriptorSets(device_data, cb_state, pipelineBindPoint, pipeline_layout, set, 1, descriptor_sets, 0, nullptr);
7152     last_bound.pipeline_layout = layout;
7153 
7154     // Now that we have either the new or extant push_descriptor set ... do the write updates against it
7155     push_descriptor_set->PerformPushDescriptorsUpdate(descriptorWriteCount, pDescriptorWrites);
7156 }
7157 
PreCallRecordCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t set,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites)7158 void CoreChecks::PreCallRecordCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
7159                                                       VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
7160                                                       const VkWriteDescriptorSet *pDescriptorWrites) {
7161     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7162     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
7163     RecordCmdPushDescriptorSetState(device_data, cb_state, pipelineBindPoint, layout, set, descriptorWriteCount, pDescriptorWrites);
7164 }
7165 
GetIndexAlignment(VkIndexType indexType)7166 static VkDeviceSize GetIndexAlignment(VkIndexType indexType) {
7167     switch (indexType) {
7168         case VK_INDEX_TYPE_UINT16:
7169             return 2;
7170         case VK_INDEX_TYPE_UINT32:
7171             return 4;
7172         default:
7173             // Not a real index type. Express no alignment requirement here; we expect upper layer
7174             // to have already picked up on the enum being nonsense.
7175             return 1;
7176     }
7177 }
7178 
PreCallValidateCmdBindIndexBuffer(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,VkIndexType indexType)7179 bool CoreChecks::PreCallValidateCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
7180                                                    VkIndexType indexType) {
7181     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7182     auto buffer_state = GetBufferState(buffer);
7183     auto cb_node = GetCBNode(commandBuffer);
7184     assert(buffer_state);
7185     assert(cb_node);
7186 
7187     bool skip = ValidateBufferUsageFlags(device_data, buffer_state, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, true,
7188                                          "VUID-vkCmdBindIndexBuffer-buffer-00433", "vkCmdBindIndexBuffer()",
7189                                          "VK_BUFFER_USAGE_INDEX_BUFFER_BIT");
7190     skip |= ValidateCmdQueueFlags(device_data, cb_node, "vkCmdBindIndexBuffer()", VK_QUEUE_GRAPHICS_BIT,
7191                                   "VUID-vkCmdBindIndexBuffer-commandBuffer-cmdpool");
7192     skip |= ValidateCmd(device_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7193     skip |= ValidateMemoryIsBoundToBuffer(device_data, buffer_state, "vkCmdBindIndexBuffer()",
7194                                           "VUID-vkCmdBindIndexBuffer-buffer-00434");
7195     auto offset_align = GetIndexAlignment(indexType);
7196     if (offset % offset_align) {
7197         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7198                         HandleToUint64(commandBuffer), "VUID-vkCmdBindIndexBuffer-offset-00432",
7199                         "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset,
7200                         string_VkIndexType(indexType));
7201     }
7202 
7203     return skip;
7204 }
7205 
PreCallRecordCmdBindIndexBuffer(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,VkIndexType indexType)7206 void CoreChecks::PreCallRecordCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
7207                                                  VkIndexType indexType) {
7208     auto buffer_state = GetBufferState(buffer);
7209     auto cb_node = GetCBNode(commandBuffer);
7210 
7211     cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7212     cb_node->index_buffer_binding.buffer = buffer;
7213     cb_node->index_buffer_binding.size = buffer_state->createInfo.size;
7214     cb_node->index_buffer_binding.offset = offset;
7215     cb_node->index_buffer_binding.index_type = indexType;
7216 }
7217 
UpdateResourceTrackingOnDraw(GLOBAL_CB_NODE * pCB)7218 static inline void UpdateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->draw_data.push_back(pCB->current_draw_data); }
7219 
PreCallValidateCmdBindVertexBuffers(VkCommandBuffer commandBuffer,uint32_t firstBinding,uint32_t bindingCount,const VkBuffer * pBuffers,const VkDeviceSize * pOffsets)7220 bool CoreChecks::PreCallValidateCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
7221                                                      const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
7222     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7223     auto cb_state = GetCBNode(commandBuffer);
7224     assert(cb_state);
7225 
7226     bool skip = ValidateCmdQueueFlags(device_data, cb_state, "vkCmdBindVertexBuffers()", VK_QUEUE_GRAPHICS_BIT,
7227                                       "VUID-vkCmdBindVertexBuffers-commandBuffer-cmdpool");
7228     skip |= ValidateCmd(device_data, cb_state, CMD_BINDVERTEXBUFFERS, "vkCmdBindVertexBuffers()");
7229     for (uint32_t i = 0; i < bindingCount; ++i) {
7230         auto buffer_state = GetBufferState(pBuffers[i]);
7231         assert(buffer_state);
7232         skip |= ValidateBufferUsageFlags(device_data, buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true,
7233                                          "VUID-vkCmdBindVertexBuffers-pBuffers-00627", "vkCmdBindVertexBuffers()",
7234                                          "VK_BUFFER_USAGE_VERTEX_BUFFER_BIT");
7235         skip |= ValidateMemoryIsBoundToBuffer(device_data, buffer_state, "vkCmdBindVertexBuffers()",
7236                                               "VUID-vkCmdBindVertexBuffers-pBuffers-00628");
7237         if (pOffsets[i] >= buffer_state->createInfo.size) {
7238             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
7239                             HandleToUint64(buffer_state->buffer), "VUID-vkCmdBindVertexBuffers-pOffsets-00626",
7240                             "vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pOffsets[i]);
7241         }
7242     }
7243     return skip;
7244 }
7245 
PreCallRecordCmdBindVertexBuffers(VkCommandBuffer commandBuffer,uint32_t firstBinding,uint32_t bindingCount,const VkBuffer * pBuffers,const VkDeviceSize * pOffsets)7246 void CoreChecks::PreCallRecordCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
7247                                                    const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
7248     auto cb_state = GetCBNode(commandBuffer);
7249 
7250     uint32_t end = firstBinding + bindingCount;
7251     if (cb_state->current_draw_data.vertex_buffer_bindings.size() < end) {
7252         cb_state->current_draw_data.vertex_buffer_bindings.resize(end);
7253     }
7254 
7255     for (uint32_t i = 0; i < bindingCount; ++i) {
7256         auto &vertex_buffer_binding = cb_state->current_draw_data.vertex_buffer_bindings[i + firstBinding];
7257         vertex_buffer_binding.buffer = pBuffers[i];
7258         vertex_buffer_binding.offset = pOffsets[i];
7259     }
7260 }
7261 
7262 // Generic function to handle validation for all CmdDraw* type functions
ValidateCmdDrawType(layer_data * dev_data,VkCommandBuffer cmd_buffer,bool indexed,VkPipelineBindPoint bind_point,CMD_TYPE cmd_type,const char * caller,VkQueueFlags queue_flags,const char * queue_flag_code,const char * renderpass_msg_code,const char * pipebound_msg_code,const char * dynamic_state_msg_code)7263 bool CoreChecks::ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
7264                                      CMD_TYPE cmd_type, const char *caller, VkQueueFlags queue_flags, const char *queue_flag_code,
7265                                      const char *renderpass_msg_code, const char *pipebound_msg_code,
7266                                      const char *dynamic_state_msg_code) {
7267     bool skip = false;
7268     GLOBAL_CB_NODE *cb_state = GetCBNode(cmd_buffer);
7269     if (cb_state) {
7270         skip |= ValidateCmdQueueFlags(dev_data, cb_state, caller, queue_flags, queue_flag_code);
7271         skip |= ValidateCmd(dev_data, cb_state, cmd_type, caller);
7272         skip |= ValidateCmdBufDrawState(dev_data, cb_state, cmd_type, indexed, bind_point, caller, pipebound_msg_code,
7273                                         dynamic_state_msg_code);
7274         skip |= (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) ? OutsideRenderPass(dev_data, cb_state, caller, renderpass_msg_code)
7275                                                                 : InsideRenderPass(dev_data, cb_state, caller, renderpass_msg_code);
7276     }
7277     return skip;
7278 }
7279 
7280 // Generic function to handle state update for all CmdDraw* and CmdDispatch* type functions
UpdateStateCmdDrawDispatchType(layer_data * dev_data,GLOBAL_CB_NODE * cb_state,VkPipelineBindPoint bind_point)7281 void CoreChecks::UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
7282     UpdateDrawState(dev_data, cb_state, bind_point);
7283 }
7284 
7285 // Generic function to handle state update for all CmdDraw* type functions
UpdateStateCmdDrawType(layer_data * dev_data,GLOBAL_CB_NODE * cb_state,VkPipelineBindPoint bind_point)7286 void CoreChecks::UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
7287     UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
7288     UpdateResourceTrackingOnDraw(cb_state);
7289     cb_state->hasDrawCmd = true;
7290 }
7291 
PreCallValidateCmdDraw(VkCommandBuffer commandBuffer,uint32_t vertexCount,uint32_t instanceCount,uint32_t firstVertex,uint32_t firstInstance)7292 bool CoreChecks::PreCallValidateCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7293                                         uint32_t firstVertex, uint32_t firstInstance) {
7294     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7295     return ValidateCmdDrawType(device_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAW, "vkCmdDraw()",
7296                                VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdDraw-commandBuffer-cmdpool", "VUID-vkCmdDraw-renderpass",
7297                                "VUID-vkCmdDraw-None-00442", "VUID-vkCmdDraw-None-00443");
7298 }
7299 
PreCallRecordCmdDraw(VkCommandBuffer commandBuffer,uint32_t vertexCount,uint32_t instanceCount,uint32_t firstVertex,uint32_t firstInstance)7300 void CoreChecks::PreCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7301                                       uint32_t firstVertex, uint32_t firstInstance) {
7302     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7303     GpuAllocateValidationResources(device_data, commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS);
7304 }
7305 
PostCallRecordCmdDraw(VkCommandBuffer commandBuffer,uint32_t vertexCount,uint32_t instanceCount,uint32_t firstVertex,uint32_t firstInstance)7306 void CoreChecks::PostCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7307                                        uint32_t firstVertex, uint32_t firstInstance) {
7308     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7309     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
7310     UpdateStateCmdDrawType(device_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
7311 }
7312 
PreCallValidateCmdDrawIndexed(VkCommandBuffer commandBuffer,uint32_t indexCount,uint32_t instanceCount,uint32_t firstIndex,int32_t vertexOffset,uint32_t firstInstance)7313 bool CoreChecks::PreCallValidateCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
7314                                                uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
7315     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7316     bool skip = ValidateCmdDrawType(device_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAWINDEXED,
7317                                     "vkCmdDrawIndexed()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdDrawIndexed-commandBuffer-cmdpool",
7318                                     "VUID-vkCmdDrawIndexed-renderpass", "VUID-vkCmdDrawIndexed-None-00461",
7319                                     "VUID-vkCmdDrawIndexed-None-00462");
7320     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
7321     if (!skip && (cb_state->status & CBSTATUS_INDEX_BUFFER_BOUND)) {
7322         unsigned int index_size = 0;
7323         const auto &index_buffer_binding = cb_state->index_buffer_binding;
7324         if (index_buffer_binding.index_type == VK_INDEX_TYPE_UINT16) {
7325             index_size = 2;
7326         } else if (index_buffer_binding.index_type == VK_INDEX_TYPE_UINT32) {
7327             index_size = 4;
7328         }
7329         VkDeviceSize end_offset = (index_size * ((VkDeviceSize)firstIndex + indexCount)) + index_buffer_binding.offset;
7330         if (end_offset > index_buffer_binding.size) {
7331             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
7332                             HandleToUint64(index_buffer_binding.buffer), "VUID-vkCmdDrawIndexed-indexSize-00463",
7333                             "vkCmdDrawIndexed() index size (%d) * (firstIndex (%d) + indexCount (%d)) "
7334                             "+ binding offset (%" PRIuLEAST64 ") = an ending offset of %" PRIuLEAST64
7335                             " bytes, "
7336                             "which is greater than the index buffer size (%" PRIuLEAST64 ").",
7337                             index_size, firstIndex, indexCount, index_buffer_binding.offset, end_offset, index_buffer_binding.size);
7338         }
7339     }
7340     return skip;
7341 }
7342 
PreCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer,uint32_t indexCount,uint32_t instanceCount,uint32_t firstIndex,int32_t vertexOffset,uint32_t firstInstance)7343 void CoreChecks::PreCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
7344                                              uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
7345     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7346     GpuAllocateValidationResources(device_data, commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS);
7347 }
7348 
PostCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer,uint32_t indexCount,uint32_t instanceCount,uint32_t firstIndex,int32_t vertexOffset,uint32_t firstInstance)7349 void CoreChecks::PostCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
7350                                               uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
7351     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7352     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
7353     UpdateStateCmdDrawType(device_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
7354 }
7355 
PreCallValidateCmdDrawIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,uint32_t count,uint32_t stride)7356 bool CoreChecks::PreCallValidateCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
7357                                                 uint32_t stride) {
7358     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7359     bool skip = ValidateCmdDrawType(device_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAWINDIRECT,
7360                                     "vkCmdDrawIndirect()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdDrawIndirect-commandBuffer-cmdpool",
7361                                     "VUID-vkCmdDrawIndirect-renderpass", "VUID-vkCmdDrawIndirect-None-00485",
7362                                     "VUID-vkCmdDrawIndirect-None-00486");
7363     BUFFER_STATE *buffer_state = GetBufferState(buffer);
7364     skip |= ValidateMemoryIsBoundToBuffer(device_data, buffer_state, "vkCmdDrawIndirect()", "VUID-vkCmdDrawIndirect-buffer-00474");
7365     // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
7366     // VkDrawIndirectCommand structures accessed by this command must be 0, which will require access to the contents of 'buffer'.
7367     return skip;
7368 }
7369 
PreCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,uint32_t count,uint32_t stride)7370 void CoreChecks::PreCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
7371                                               uint32_t stride) {
7372     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7373     GpuAllocateValidationResources(device_data, commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS);
7374 }
7375 
PostCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,uint32_t count,uint32_t stride)7376 void CoreChecks::PostCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
7377                                                uint32_t stride) {
7378     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7379     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
7380     BUFFER_STATE *buffer_state = GetBufferState(buffer);
7381     UpdateStateCmdDrawType(device_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
7382     AddCommandBufferBindingBuffer(device_data, cb_state, buffer_state);
7383 }
7384 
PreCallValidateCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,uint32_t count,uint32_t stride)7385 bool CoreChecks::PreCallValidateCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
7386                                                        uint32_t count, uint32_t stride) {
7387     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7388     bool skip = ValidateCmdDrawType(
7389         device_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()",
7390         VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdDrawIndexedIndirect-commandBuffer-cmdpool", "VUID-vkCmdDrawIndexedIndirect-renderpass",
7391         "VUID-vkCmdDrawIndexedIndirect-None-00537", "VUID-vkCmdDrawIndexedIndirect-None-00538");
7392     BUFFER_STATE *buffer_state = GetBufferState(buffer);
7393     skip |= ValidateMemoryIsBoundToBuffer(device_data, buffer_state, "vkCmdDrawIndexedIndirect()",
7394                                           "VUID-vkCmdDrawIndexedIndirect-buffer-00526");
7395     // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
7396     // VkDrawIndexedIndirectCommand structures accessed by this command must be 0, which will require access to the contents of
7397     // 'buffer'.
7398     return skip;
7399 }
7400 
PreCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,uint32_t count,uint32_t stride)7401 void CoreChecks::PreCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
7402                                                      uint32_t count, uint32_t stride) {
7403     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7404     GpuAllocateValidationResources(device_data, commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS);
7405 }
7406 
PostCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,uint32_t count,uint32_t stride)7407 void CoreChecks::PostCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
7408                                                       uint32_t count, uint32_t stride) {
7409     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7410     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
7411     BUFFER_STATE *buffer_state = GetBufferState(buffer);
7412     UpdateStateCmdDrawType(device_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
7413     AddCommandBufferBindingBuffer(device_data, cb_state, buffer_state);
7414 }
7415 
PreCallValidateCmdDispatch(VkCommandBuffer commandBuffer,uint32_t x,uint32_t y,uint32_t z)7416 bool CoreChecks::PreCallValidateCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7417     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7418     return ValidateCmdDrawType(device_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, CMD_DISPATCH, "vkCmdDispatch()",
7419                                VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdDispatch-commandBuffer-cmdpool", "VUID-vkCmdDispatch-renderpass",
7420                                "VUID-vkCmdDispatch-None-00391", kVUIDUndefined);
7421 }
7422 
PreCallRecordCmdDispatch(VkCommandBuffer commandBuffer,uint32_t x,uint32_t y,uint32_t z)7423 void CoreChecks::PreCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7424     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7425     GpuAllocateValidationResources(device_data, commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE);
7426 }
7427 
PostCallRecordCmdDispatch(VkCommandBuffer commandBuffer,uint32_t x,uint32_t y,uint32_t z)7428 void CoreChecks::PostCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7429     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7430     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
7431     UpdateStateCmdDrawDispatchType(device_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
7432 }
7433 
PreCallValidateCmdDispatchIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset)7434 bool CoreChecks::PreCallValidateCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7435     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7436     bool skip =
7437         ValidateCmdDrawType(device_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, CMD_DISPATCHINDIRECT,
7438                             "vkCmdDispatchIndirect()", VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdDispatchIndirect-commandBuffer-cmdpool",
7439                             "VUID-vkCmdDispatchIndirect-renderpass", "VUID-vkCmdDispatchIndirect-None-00404", kVUIDUndefined);
7440     BUFFER_STATE *buffer_state = GetBufferState(buffer);
7441     skip |= ValidateMemoryIsBoundToBuffer(device_data, buffer_state, "vkCmdDispatchIndirect()",
7442                                           "VUID-vkCmdDispatchIndirect-buffer-00401");
7443     return skip;
7444 }
7445 
PreCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset)7446 void CoreChecks::PreCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7447     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7448     GpuAllocateValidationResources(device_data, commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE);
7449 }
7450 
PostCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset)7451 void CoreChecks::PostCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7452     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7453     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
7454     UpdateStateCmdDrawDispatchType(device_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
7455     BUFFER_STATE *buffer_state = GetBufferState(buffer);
7456     AddCommandBufferBindingBuffer(device_data, cb_state, buffer_state);
7457 }
7458 
7459 // Validate that an image's sampleCount matches the requirement for a specific API call
ValidateImageSampleCount(layer_data * dev_data,IMAGE_STATE * image_state,VkSampleCountFlagBits sample_count,const char * location,const std::string & msgCode)7460 bool CoreChecks::ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
7461                                           const char *location, const std::string &msgCode) {
7462     bool skip = false;
7463     if (image_state->createInfo.samples != sample_count) {
7464         skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
7465                        HandleToUint64(image_state->image), msgCode,
7466                        "%s for image %s was created with a sample count of %s but must be %s.", location,
7467                        dev_data->report_data->FormatHandle(image_state->image).c_str(),
7468                        string_VkSampleCountFlagBits(image_state->createInfo.samples), string_VkSampleCountFlagBits(sample_count));
7469     }
7470     return skip;
7471 }
7472 
PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize dataSize,const void * pData)7473 bool CoreChecks::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
7474                                                 VkDeviceSize dataSize, const void *pData) {
7475     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7476     auto cb_state = GetCBNode(commandBuffer);
7477     assert(cb_state);
7478     auto dst_buffer_state = GetBufferState(dstBuffer);
7479     assert(dst_buffer_state);
7480 
7481     bool skip = false;
7482     skip |= ValidateMemoryIsBoundToBuffer(device_data, dst_buffer_state, "vkCmdUpdateBuffer()",
7483                                           "VUID-vkCmdUpdateBuffer-dstBuffer-00035");
7484     // Validate that DST buffer has correct usage flags set
7485     skip |= ValidateBufferUsageFlags(device_data, dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7486                                      "VUID-vkCmdUpdateBuffer-dstBuffer-00034", "vkCmdUpdateBuffer()",
7487                                      "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7488     skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdUpdateBuffer()",
7489                                   VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
7490                                   "VUID-vkCmdUpdateBuffer-commandBuffer-cmdpool");
7491     skip |= ValidateCmd(device_data, cb_state, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
7492     skip |= InsideRenderPass(device_data, cb_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-renderpass");
7493     return skip;
7494 }
7495 
PostCallRecordCmdUpdateBuffer(VkCommandBuffer commandBuffer,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize dataSize,const void * pData)7496 void CoreChecks::PostCallRecordCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
7497                                                VkDeviceSize dataSize, const void *pData) {
7498     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7499     auto cb_state = GetCBNode(commandBuffer);
7500     auto dst_buffer_state = GetBufferState(dstBuffer);
7501 
7502     // Update bindings between buffer and cmd buffer
7503     AddCommandBufferBindingBuffer(device_data, cb_state, dst_buffer_state);
7504 }
7505 
SetEventStageMask(VkQueue queue,VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags stageMask)7506 bool CoreChecks::SetEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7507     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7508     GLOBAL_CB_NODE *pCB = GetCBNode(commandBuffer);
7509     if (pCB) {
7510         pCB->eventToStageMap[event] = stageMask;
7511     }
7512     auto queue_data = dev_data->queueMap.find(queue);
7513     if (queue_data != dev_data->queueMap.end()) {
7514         queue_data->second.eventToStageMap[event] = stageMask;
7515     }
7516     return false;
7517 }
7518 
PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags stageMask)7519 bool CoreChecks::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7520     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7521     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
7522     assert(cb_state);
7523     bool skip = ValidateCmdQueueFlags(device_data, cb_state, "vkCmdSetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
7524                                       "VUID-vkCmdSetEvent-commandBuffer-cmdpool");
7525     skip |= ValidateCmd(device_data, cb_state, CMD_SETEVENT, "vkCmdSetEvent()");
7526     skip |= InsideRenderPass(device_data, cb_state, "vkCmdSetEvent()", "VUID-vkCmdSetEvent-renderpass");
7527     skip |= ValidateStageMaskGsTsEnables(device_data, stageMask, "vkCmdSetEvent()", "VUID-vkCmdSetEvent-stageMask-01150",
7528                                          "VUID-vkCmdSetEvent-stageMask-01151", "VUID-vkCmdSetEvent-stageMask-02107",
7529                                          "VUID-vkCmdSetEvent-stageMask-02108");
7530     return skip;
7531 }
7532 
PreCallRecordCmdSetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags stageMask)7533 void CoreChecks::PreCallRecordCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7534     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
7535     auto event_state = GetEventNode(event);
7536     if (event_state) {
7537         AddCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, cb_state);
7538         event_state->cb_bindings.insert(cb_state);
7539     }
7540     cb_state->events.push_back(event);
7541     if (!cb_state->waitedEvents.count(event)) {
7542         cb_state->writeEventsBeforeWait.push_back(event);
7543     }
7544     cb_state->eventUpdates.emplace_back([=](VkQueue q) { return SetEventStageMask(q, commandBuffer, event, stageMask); });
7545 }
7546 
PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags stageMask)7547 bool CoreChecks::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7548     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7549     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
7550     assert(cb_state);
7551 
7552     bool skip = ValidateCmdQueueFlags(device_data, cb_state, "vkCmdResetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
7553                                       "VUID-vkCmdResetEvent-commandBuffer-cmdpool");
7554     skip |= ValidateCmd(device_data, cb_state, CMD_RESETEVENT, "vkCmdResetEvent()");
7555     skip |= InsideRenderPass(device_data, cb_state, "vkCmdResetEvent()", "VUID-vkCmdResetEvent-renderpass");
7556     skip |= ValidateStageMaskGsTsEnables(device_data, stageMask, "vkCmdResetEvent()", "VUID-vkCmdResetEvent-stageMask-01154",
7557                                          "VUID-vkCmdResetEvent-stageMask-01155", "VUID-vkCmdResetEvent-stageMask-02109",
7558                                          "VUID-vkCmdResetEvent-stageMask-02110");
7559     return skip;
7560 }
7561 
PreCallRecordCmdResetEvent(VkCommandBuffer commandBuffer,VkEvent event,VkPipelineStageFlags stageMask)7562 void CoreChecks::PreCallRecordCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7563     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
7564     auto event_state = GetEventNode(event);
7565     if (event_state) {
7566         AddCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, cb_state);
7567         event_state->cb_bindings.insert(cb_state);
7568     }
7569     cb_state->events.push_back(event);
7570     if (!cb_state->waitedEvents.count(event)) {
7571         cb_state->writeEventsBeforeWait.push_back(event);
7572     }
7573     // TODO : Add check for "VUID-vkResetEvent-event-01148"
7574     cb_state->eventUpdates.emplace_back(
7575         [=](VkQueue q) { return SetEventStageMask(q, commandBuffer, event, VkPipelineStageFlags(0)); });
7576 }
7577 
7578 // Return input pipeline stage flags, expanded for individual bits if VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT is set
ExpandPipelineStageFlags(const DeviceExtensions & extensions,VkPipelineStageFlags inflags)7579 static VkPipelineStageFlags ExpandPipelineStageFlags(const DeviceExtensions &extensions, VkPipelineStageFlags inflags) {
7580     if (~inflags & VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT) return inflags;
7581 
7582     return (inflags & ~VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT) |
7583            (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
7584             (extensions.vk_nv_mesh_shader ? (VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV | VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV) : 0) |
7585             VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
7586             VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
7587             VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
7588             VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
7589             VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
7590             (extensions.vk_ext_conditional_rendering ? VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT : 0) |
7591             (extensions.vk_ext_transform_feedback ? VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT : 0) |
7592             (extensions.vk_nv_shading_rate_image ? VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV : 0) |
7593             (extensions.vk_ext_fragment_density_map ? VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT : 0));
7594 }
7595 
HasNonFramebufferStagePipelineStageFlags(VkPipelineStageFlags inflags)7596 static bool HasNonFramebufferStagePipelineStageFlags(VkPipelineStageFlags inflags) {
7597     return (inflags & ~(VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
7598                         VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT)) != 0;
7599 }
7600 
GetGraphicsPipelineStageLogicalOrdinal(VkPipelineStageFlagBits flag)7601 static int GetGraphicsPipelineStageLogicalOrdinal(VkPipelineStageFlagBits flag) {
7602     // Note that the list (and lookup) ignore invalid-for-enabled-extension condition.  This should be checked elsewhere
7603     // and would greatly complicate this intentionally simple implementation
7604     // clang-format off
7605     const VkPipelineStageFlagBits ordered_array[] = {
7606         VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
7607         VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
7608         VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
7609         VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
7610         VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
7611         VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
7612         VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
7613         VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT,
7614 
7615         // Including the task/mesh shaders here is not technically correct, as they are in a
7616         // separate logical pipeline - but it works for the case this is currently used, and
7617         // fixing it would require significant rework and end up with the code being far more
7618         // verbose for no practical gain.
7619         // However, worth paying attention to this if using this function in a new way.
7620         VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV,
7621         VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV,
7622 
7623         VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV,
7624         VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
7625         VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
7626         VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
7627         VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
7628         VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT
7629     };
7630     // clang-format on
7631 
7632     const int ordered_array_length = sizeof(ordered_array) / sizeof(VkPipelineStageFlagBits);
7633 
7634     for (int i = 0; i < ordered_array_length; ++i) {
7635         if (ordered_array[i] == flag) {
7636             return i;
7637         }
7638     }
7639 
7640     return -1;
7641 }
7642 
7643 // The following two functions technically have O(N^2) complexity, but it's for a value of O that's largely
7644 // stable and also rather tiny - this could definitely be rejigged to work more efficiently, but the impact
7645 // on runtime is currently negligible, so it wouldn't gain very much.
7646 // If we add a lot more graphics pipeline stages, this set of functions should be rewritten to accomodate.
GetLogicallyEarliestGraphicsPipelineStage(VkPipelineStageFlags inflags)7647 static VkPipelineStageFlagBits GetLogicallyEarliestGraphicsPipelineStage(VkPipelineStageFlags inflags) {
7648     VkPipelineStageFlagBits earliest_bit = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
7649     int earliest_bit_order = GetGraphicsPipelineStageLogicalOrdinal(earliest_bit);
7650 
7651     for (std::size_t i = 0; i < sizeof(VkPipelineStageFlagBits); ++i) {
7652         VkPipelineStageFlagBits current_flag = (VkPipelineStageFlagBits)((inflags & 0x1u) << i);
7653         if (current_flag) {
7654             int new_order = GetGraphicsPipelineStageLogicalOrdinal(current_flag);
7655             if (new_order != -1 && new_order < earliest_bit_order) {
7656                 earliest_bit_order = new_order;
7657                 earliest_bit = current_flag;
7658             }
7659         }
7660         inflags = inflags >> 1;
7661     }
7662     return earliest_bit;
7663 }
7664 
GetLogicallyLatestGraphicsPipelineStage(VkPipelineStageFlags inflags)7665 static VkPipelineStageFlagBits GetLogicallyLatestGraphicsPipelineStage(VkPipelineStageFlags inflags) {
7666     VkPipelineStageFlagBits latest_bit = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
7667     int latest_bit_order = GetGraphicsPipelineStageLogicalOrdinal(latest_bit);
7668 
7669     for (std::size_t i = 0; i < sizeof(VkPipelineStageFlagBits); ++i) {
7670         if (inflags & 0x1u) {
7671             int new_order = GetGraphicsPipelineStageLogicalOrdinal((VkPipelineStageFlagBits)((inflags & 0x1u) << i));
7672             if (new_order != -1 && new_order > latest_bit_order) {
7673                 latest_bit_order = new_order;
7674                 latest_bit = (VkPipelineStageFlagBits)((inflags & 0x1u) << i);
7675             }
7676         }
7677         inflags = inflags >> 1;
7678     }
7679     return latest_bit;
7680 }
7681 
7682 // Verify image barrier image state and that the image is consistent with FB image
ValidateImageBarrierImage(layer_data * device_data,const char * funcName,GLOBAL_CB_NODE const * cb_state,VkFramebuffer framebuffer,uint32_t active_subpass,const safe_VkSubpassDescription2KHR & sub_desc,uint64_t rp_handle,uint32_t img_index,const VkImageMemoryBarrier & img_barrier)7683 bool CoreChecks::ValidateImageBarrierImage(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE const *cb_state,
7684                                            VkFramebuffer framebuffer, uint32_t active_subpass,
7685                                            const safe_VkSubpassDescription2KHR &sub_desc, uint64_t rp_handle, uint32_t img_index,
7686                                            const VkImageMemoryBarrier &img_barrier) {
7687     bool skip = false;
7688     const auto &fb_state = GetFramebufferState(framebuffer);
7689     assert(fb_state);
7690     const auto img_bar_image = img_barrier.image;
7691     bool image_match = false;
7692     bool sub_image_found = false;  // Do we find a corresponding subpass description
7693     VkImageLayout sub_image_layout = VK_IMAGE_LAYOUT_UNDEFINED;
7694     uint32_t attach_index = 0;
7695     // Verify that a framebuffer image matches barrier image
7696     const auto attachmentCount = fb_state->createInfo.attachmentCount;
7697     for (uint32_t attachment = 0; attachment < attachmentCount; ++attachment) {
7698         auto view_state = GetAttachmentImageViewState(fb_state, attachment);
7699         if (view_state && (img_bar_image == view_state->create_info.image)) {
7700             image_match = true;
7701             attach_index = attachment;
7702             break;
7703         }
7704     }
7705     if (image_match) {  // Make sure subpass is referring to matching attachment
7706         if (sub_desc.pDepthStencilAttachment && sub_desc.pDepthStencilAttachment->attachment == attach_index) {
7707             sub_image_layout = sub_desc.pDepthStencilAttachment->layout;
7708             sub_image_found = true;
7709         } else if (GetDeviceExtensions()->vk_khr_depth_stencil_resolve) {
7710             const auto *resolve = lvl_find_in_chain<VkSubpassDescriptionDepthStencilResolveKHR>(sub_desc.pNext);
7711             if (resolve && resolve->pDepthStencilResolveAttachment &&
7712                 resolve->pDepthStencilResolveAttachment->attachment == attach_index) {
7713                 sub_image_layout = resolve->pDepthStencilResolveAttachment->layout;
7714                 sub_image_found = true;
7715             }
7716         } else {
7717             for (uint32_t j = 0; j < sub_desc.colorAttachmentCount; ++j) {
7718                 if (sub_desc.pColorAttachments && sub_desc.pColorAttachments[j].attachment == attach_index) {
7719                     sub_image_layout = sub_desc.pColorAttachments[j].layout;
7720                     sub_image_found = true;
7721                     break;
7722                 } else if (sub_desc.pResolveAttachments && sub_desc.pResolveAttachments[j].attachment == attach_index) {
7723                     sub_image_layout = sub_desc.pResolveAttachments[j].layout;
7724                     sub_image_found = true;
7725                     break;
7726                 }
7727             }
7728         }
7729         if (!sub_image_found) {
7730             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7731                             rp_handle, "VUID-vkCmdPipelineBarrier-image-02635",
7732                             "%s: Barrier pImageMemoryBarriers[%d].image (%s) is not referenced by the VkSubpassDescription for "
7733                             "active subpass (%d) of current renderPass (%s).",
7734                             funcName, img_index, device_data->report_data->FormatHandle(img_bar_image).c_str(), active_subpass,
7735                             device_data->report_data->FormatHandle(rp_handle).c_str());
7736         }
7737     } else {  // !image_match
7738         auto const fb_handle = HandleToUint64(fb_state->framebuffer);
7739         skip |=
7740             log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT, fb_handle,
7741                     "VUID-vkCmdPipelineBarrier-image-02635",
7742                     "%s: Barrier pImageMemoryBarriers[%d].image (%s) does not match an image from the current framebuffer (%s).",
7743                     funcName, img_index, device_data->report_data->FormatHandle(img_bar_image).c_str(),
7744                     device_data->report_data->FormatHandle(fb_handle).c_str());
7745     }
7746     if (img_barrier.oldLayout != img_barrier.newLayout) {
7747         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7748                         HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-oldLayout-01181",
7749                         "%s: As the Image Barrier for image %s is being executed within a render pass instance, oldLayout must "
7750                         "equal newLayout yet they are %s and %s.",
7751                         funcName, device_data->report_data->FormatHandle(img_barrier.image).c_str(),
7752                         string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(img_barrier.newLayout));
7753     } else {
7754         if (sub_image_found && sub_image_layout != img_barrier.oldLayout) {
7755             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7756                             rp_handle, "VUID-vkCmdPipelineBarrier-oldLayout-02636",
7757                             "%s: Barrier pImageMemoryBarriers[%d].image (%s) is referenced by the VkSubpassDescription for active "
7758                             "subpass (%d) of current renderPass (%s) as having layout %s, but image barrier has layout %s.",
7759                             funcName, img_index, device_data->report_data->FormatHandle(img_bar_image).c_str(), active_subpass,
7760                             device_data->report_data->FormatHandle(rp_handle).c_str(), string_VkImageLayout(sub_image_layout),
7761                             string_VkImageLayout(img_barrier.oldLayout));
7762         }
7763     }
7764     return skip;
7765 }
7766 
7767 // Validate image barriers within a renderPass
ValidateRenderPassImageBarriers(layer_data * device_data,const char * funcName,GLOBAL_CB_NODE * cb_state,uint32_t active_subpass,const safe_VkSubpassDescription2KHR & sub_desc,uint64_t rp_handle,const safe_VkSubpassDependency2KHR * dependencies,const std::vector<uint32_t> & self_dependencies,uint32_t image_mem_barrier_count,const VkImageMemoryBarrier * image_barriers)7768 bool CoreChecks::ValidateRenderPassImageBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
7769                                                  uint32_t active_subpass, const safe_VkSubpassDescription2KHR &sub_desc,
7770                                                  uint64_t rp_handle, const safe_VkSubpassDependency2KHR *dependencies,
7771                                                  const std::vector<uint32_t> &self_dependencies, uint32_t image_mem_barrier_count,
7772                                                  const VkImageMemoryBarrier *image_barriers) {
7773     bool skip = false;
7774     for (uint32_t i = 0; i < image_mem_barrier_count; ++i) {
7775         const auto &img_barrier = image_barriers[i];
7776         const auto &img_src_access_mask = img_barrier.srcAccessMask;
7777         const auto &img_dst_access_mask = img_barrier.dstAccessMask;
7778         bool access_mask_match = false;
7779         for (const auto self_dep_index : self_dependencies) {
7780             const auto &sub_dep = dependencies[self_dep_index];
7781             access_mask_match = (img_src_access_mask == (sub_dep.srcAccessMask & img_src_access_mask)) &&
7782                                 (img_dst_access_mask == (sub_dep.dstAccessMask & img_dst_access_mask));
7783             if (access_mask_match) break;
7784         }
7785         if (!access_mask_match) {
7786             std::stringstream self_dep_ss;
7787             stream_join(self_dep_ss, ", ", self_dependencies);
7788             skip |= log_msg(
7789                 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
7790                 "VUID-vkCmdPipelineBarrier-pDependencies-02285",
7791                 "%s: Barrier pImageMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency "
7792                 "srcAccessMask of subpass %d of renderPass %s. Candidate VkSubpassDependency are pDependencies entries [%s].",
7793                 funcName, i, img_src_access_mask, active_subpass, device_data->report_data->FormatHandle(rp_handle).c_str(),
7794                 self_dep_ss.str().c_str());
7795             skip |= log_msg(
7796                 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
7797                 "VUID-vkCmdPipelineBarrier-pDependencies-02285",
7798                 "%s: Barrier pImageMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency "
7799                 "dstAccessMask of subpass %d of renderPass %s. Candidate VkSubpassDependency are pDependencies entries [%s].",
7800                 funcName, i, img_dst_access_mask, active_subpass, device_data->report_data->FormatHandle(rp_handle).c_str(),
7801                 self_dep_ss.str().c_str());
7802         }
7803         if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex ||
7804             VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) {
7805             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7806                             rp_handle, "VUID-vkCmdPipelineBarrier-srcQueueFamilyIndex-01182",
7807                             "%s: Barrier pImageMemoryBarriers[%d].srcQueueFamilyIndex is %d and "
7808                             "pImageMemoryBarriers[%d].dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.",
7809                             funcName, i, img_barrier.srcQueueFamilyIndex, i, img_barrier.dstQueueFamilyIndex);
7810         }
7811         // Secondary CBs can have null framebuffer so queue up validation in that case 'til FB is known
7812         if (VK_NULL_HANDLE == cb_state->activeFramebuffer) {
7813             assert(VK_COMMAND_BUFFER_LEVEL_SECONDARY == cb_state->createInfo.level);
7814             // Secondary CB case w/o FB specified delay validation
7815             cb_state->cmd_execute_commands_functions.emplace_back([=](GLOBAL_CB_NODE *primary_cb, VkFramebuffer fb) {
7816                 return ValidateImageBarrierImage(device_data, funcName, cb_state, fb, active_subpass, sub_desc, rp_handle, i,
7817                                                  img_barrier);
7818             });
7819         } else {
7820             skip |= ValidateImageBarrierImage(device_data, funcName, cb_state, cb_state->activeFramebuffer, active_subpass,
7821                                               sub_desc, rp_handle, i, img_barrier);
7822         }
7823     }
7824     return skip;
7825 }
7826 
7827 // Validate VUs for Pipeline Barriers that are within a renderPass
7828 // Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state
ValidateRenderPassPipelineBarriers(layer_data * device_data,const char * funcName,GLOBAL_CB_NODE * cb_state,VkPipelineStageFlags src_stage_mask,VkPipelineStageFlags dst_stage_mask,VkDependencyFlags dependency_flags,uint32_t mem_barrier_count,const VkMemoryBarrier * mem_barriers,uint32_t buffer_mem_barrier_count,const VkBufferMemoryBarrier * buffer_mem_barriers,uint32_t image_mem_barrier_count,const VkImageMemoryBarrier * image_barriers)7829 bool CoreChecks::ValidateRenderPassPipelineBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
7830                                                     VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
7831                                                     VkDependencyFlags dependency_flags, uint32_t mem_barrier_count,
7832                                                     const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count,
7833                                                     const VkBufferMemoryBarrier *buffer_mem_barriers,
7834                                                     uint32_t image_mem_barrier_count, const VkImageMemoryBarrier *image_barriers) {
7835     bool skip = false;
7836     const auto rp_state = cb_state->activeRenderPass;
7837     const auto active_subpass = cb_state->activeSubpass;
7838     auto rp_handle = HandleToUint64(rp_state->renderPass);
7839     const auto &self_dependencies = rp_state->self_dependencies[active_subpass];
7840     const auto &dependencies = rp_state->createInfo.pDependencies;
7841     if (self_dependencies.size() == 0) {
7842         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7843                         rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
7844                         "%s: Barriers cannot be set during subpass %d of renderPass %s with no self-dependency specified.",
7845                         funcName, active_subpass, device_data->report_data->FormatHandle(rp_handle).c_str());
7846     } else {
7847         // Grab ref to current subpassDescription up-front for use below
7848         const auto &sub_desc = rp_state->createInfo.pSubpasses[active_subpass];
7849         // Look for matching mask in any self-dependency
7850         bool stage_mask_match = false;
7851         for (const auto self_dep_index : self_dependencies) {
7852             const auto &sub_dep = dependencies[self_dep_index];
7853             const auto &sub_src_stage_mask = ExpandPipelineStageFlags(device_data->device_extensions, sub_dep.srcStageMask);
7854             const auto &sub_dst_stage_mask = ExpandPipelineStageFlags(device_data->device_extensions, sub_dep.dstStageMask);
7855             stage_mask_match = ((sub_src_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
7856                                 (src_stage_mask == (sub_src_stage_mask & src_stage_mask))) &&
7857                                ((sub_dst_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
7858                                 (dst_stage_mask == (sub_dst_stage_mask & dst_stage_mask)));
7859             if (stage_mask_match) break;
7860         }
7861         if (!stage_mask_match) {
7862             std::stringstream self_dep_ss;
7863             stream_join(self_dep_ss, ", ", self_dependencies);
7864             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7865                             rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
7866                             "%s: Barrier srcStageMask(0x%X) is not a subset of VkSubpassDependency srcStageMask of any "
7867                             "self-dependency of subpass %d of renderPass %s for which dstStageMask is also a subset. "
7868                             "Candidate VkSubpassDependency are pDependencies entries [%s].",
7869                             funcName, src_stage_mask, active_subpass, device_data->report_data->FormatHandle(rp_handle).c_str(),
7870                             self_dep_ss.str().c_str());
7871             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7872                             rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
7873                             "%s: Barrier dstStageMask(0x%X) is not a subset of VkSubpassDependency dstStageMask of any "
7874                             "self-dependency of subpass %d of renderPass %s for which srcStageMask is also a subset. "
7875                             "Candidate VkSubpassDependency are pDependencies entries [%s].",
7876                             funcName, dst_stage_mask, active_subpass, device_data->report_data->FormatHandle(rp_handle).c_str(),
7877                             self_dep_ss.str().c_str());
7878         }
7879 
7880         if (0 != buffer_mem_barrier_count) {
7881             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7882                             rp_handle, "VUID-vkCmdPipelineBarrier-bufferMemoryBarrierCount-01178",
7883                             "%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of renderPass %s.", funcName,
7884                             buffer_mem_barrier_count, active_subpass, device_data->report_data->FormatHandle(rp_handle).c_str());
7885         }
7886         for (uint32_t i = 0; i < mem_barrier_count; ++i) {
7887             const auto &mb_src_access_mask = mem_barriers[i].srcAccessMask;
7888             const auto &mb_dst_access_mask = mem_barriers[i].dstAccessMask;
7889             bool access_mask_match = false;
7890             for (const auto self_dep_index : self_dependencies) {
7891                 const auto &sub_dep = dependencies[self_dep_index];
7892                 access_mask_match = (mb_src_access_mask == (sub_dep.srcAccessMask & mb_src_access_mask)) &&
7893                                     (mb_dst_access_mask == (sub_dep.dstAccessMask & mb_dst_access_mask));
7894                 if (access_mask_match) break;
7895             }
7896 
7897             if (!access_mask_match) {
7898                 std::stringstream self_dep_ss;
7899                 stream_join(self_dep_ss, ", ", self_dependencies);
7900                 skip |= log_msg(
7901                     device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
7902                     "VUID-vkCmdPipelineBarrier-pDependencies-02285",
7903                     "%s: Barrier pMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency srcAccessMask "
7904                     "for any self-dependency of subpass %d of renderPass %s for which dstAccessMask is also a subset. "
7905                     "Candidate VkSubpassDependency are pDependencies entries [%s].",
7906                     funcName, i, mb_src_access_mask, active_subpass, device_data->report_data->FormatHandle(rp_handle).c_str(),
7907                     self_dep_ss.str().c_str());
7908                 skip |= log_msg(
7909                     device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
7910                     "VUID-vkCmdPipelineBarrier-pDependencies-02285",
7911                     "%s: Barrier pMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency dstAccessMask "
7912                     "for any self-dependency of subpass %d of renderPass %s for which srcAccessMask is also a subset. "
7913                     "Candidate VkSubpassDependency are pDependencies entries [%s].",
7914                     funcName, i, mb_dst_access_mask, active_subpass, device_data->report_data->FormatHandle(rp_handle).c_str(),
7915                     self_dep_ss.str().c_str());
7916             }
7917         }
7918 
7919         skip |= ValidateRenderPassImageBarriers(device_data, funcName, cb_state, active_subpass, sub_desc, rp_handle, dependencies,
7920                                                 self_dependencies, image_mem_barrier_count, image_barriers);
7921 
7922         bool flag_match = false;
7923         for (const auto self_dep_index : self_dependencies) {
7924             const auto &sub_dep = dependencies[self_dep_index];
7925             flag_match = sub_dep.dependencyFlags == dependency_flags;
7926             if (flag_match) break;
7927         }
7928         if (!flag_match) {
7929             std::stringstream self_dep_ss;
7930             stream_join(self_dep_ss, ", ", self_dependencies);
7931             skip |= log_msg(
7932                 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
7933                 "VUID-vkCmdPipelineBarrier-pDependencies-02285",
7934                 "%s: dependencyFlags param (0x%X) does not equal VkSubpassDependency dependencyFlags value for any "
7935                 "self-dependency of subpass %d of renderPass %s. Candidate VkSubpassDependency are pDependencies entries [%s].",
7936                 funcName, dependency_flags, cb_state->activeSubpass, device_data->report_data->FormatHandle(rp_handle).c_str(),
7937                 self_dep_ss.str().c_str());
7938         }
7939     }
7940     return skip;
7941 }
7942 
7943 // Array to mask individual accessMask to corresponding stageMask
7944 //  accessMask active bit position (0-31) maps to index
7945 const static VkPipelineStageFlags AccessMaskToPipeStage[28] = {
7946     // VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0
7947     VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
7948     // VK_ACCESS_INDEX_READ_BIT = 1
7949     VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
7950     // VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 2
7951     VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
7952     // VK_ACCESS_UNIFORM_READ_BIT = 3
7953     VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7954         VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7955         VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV |
7956         VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV,
7957     // VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 4
7958     VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
7959     // VK_ACCESS_SHADER_READ_BIT = 5
7960     VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7961         VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7962         VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV |
7963         VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV,
7964     // VK_ACCESS_SHADER_WRITE_BIT = 6
7965     VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7966         VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7967         VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV |
7968         VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV,
7969     // VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 7
7970     VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
7971     // VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 8
7972     VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
7973     // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 9
7974     VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
7975     // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 10
7976     VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
7977     // VK_ACCESS_TRANSFER_READ_BIT = 11
7978     VK_PIPELINE_STAGE_TRANSFER_BIT,
7979     // VK_ACCESS_TRANSFER_WRITE_BIT = 12
7980     VK_PIPELINE_STAGE_TRANSFER_BIT,
7981     // VK_ACCESS_HOST_READ_BIT = 13
7982     VK_PIPELINE_STAGE_HOST_BIT,
7983     // VK_ACCESS_HOST_WRITE_BIT = 14
7984     VK_PIPELINE_STAGE_HOST_BIT,
7985     // VK_ACCESS_MEMORY_READ_BIT = 15
7986     VK_ACCESS_FLAG_BITS_MAX_ENUM,  // Always match
7987     // VK_ACCESS_MEMORY_WRITE_BIT = 16
7988     VK_ACCESS_FLAG_BITS_MAX_ENUM,  // Always match
7989     // VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX = 17
7990     VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
7991     // VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 18
7992     VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
7993     // VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 19
7994     VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
7995     // VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT = 20
7996     VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT,
7997     // VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV = 21
7998     VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV | VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV,
7999     // VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV = 22
8000     VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV,
8001     // VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV = 23
8002     VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV,
8003     // VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT = 24
8004     VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT,
8005     // VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 25
8006     VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT,
8007     // VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 26
8008     VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
8009     // VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 27
8010     VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT,
8011 };
8012 
8013 // Verify that all bits of access_mask are supported by the src_stage_mask
ValidateAccessMaskPipelineStage(const DeviceExtensions & extensions,VkAccessFlags access_mask,VkPipelineStageFlags stage_mask)8014 static bool ValidateAccessMaskPipelineStage(const DeviceExtensions &extensions, VkAccessFlags access_mask,
8015                                             VkPipelineStageFlags stage_mask) {
8016     // Early out if all commands set, or access_mask NULL
8017     if ((stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (0 == access_mask)) return true;
8018 
8019     stage_mask = ExpandPipelineStageFlags(extensions, stage_mask);
8020     int index = 0;
8021     // for each of the set bits in access_mask, make sure that supporting stage mask bit(s) are set
8022     while (access_mask) {
8023         index = (u_ffs(access_mask) - 1);
8024         assert(index >= 0);
8025         // Must have "!= 0" compare to prevent warning from MSVC
8026         if ((AccessMaskToPipeStage[index] & stage_mask) == 0) return false;  // early out
8027         access_mask &= ~(1 << index);                                        // Mask off bit that's been checked
8028     }
8029     return true;
8030 }
8031 
8032 namespace barrier_queue_families {
8033 enum VuIndex {
8034     kSrcOrDstMustBeIgnore,
8035     kSpecialOrIgnoreOnly,
8036     kSrcIgnoreRequiresDstIgnore,
8037     kDstValidOrSpecialIfNotIgnore,
8038     kSrcValidOrSpecialIfNotIgnore,
8039     kSrcAndDestMustBeIgnore,
8040     kBothIgnoreOrBothValid,
8041     kSubmitQueueMustMatchSrcOrDst
8042 };
8043 static const char *vu_summary[] = {"Source or destination queue family must be ignored.",
8044                                    "Source or destination queue family must be special or ignored.",
8045                                    "Destination queue family must be ignored if source queue family is.",
8046                                    "Destination queue family must be valid, ignored, or special.",
8047                                    "Source queue family must be valid, ignored, or special.",
8048                                    "Source and destination queue family must both be ignored.",
8049                                    "Source and destination queue family must both be ignore or both valid.",
8050                                    "Source or destination queue family must match submit queue family, if not ignored."};
8051 
8052 static const std::string image_error_codes[] = {
8053     "VUID-VkImageMemoryBarrier-image-01381",  //   kSrcOrDstMustBeIgnore
8054     "VUID-VkImageMemoryBarrier-image-01766",  //   kSpecialOrIgnoreOnly
8055     "VUID-VkImageMemoryBarrier-image-01201",  //   kSrcIgnoreRequiresDstIgnore
8056     "VUID-VkImageMemoryBarrier-image-01768",  //   kDstValidOrSpecialIfNotIgnore
8057     "VUID-VkImageMemoryBarrier-image-01767",  //   kSrcValidOrSpecialIfNotIgnore
8058     "VUID-VkImageMemoryBarrier-image-01199",  //   kSrcAndDestMustBeIgnore
8059     "VUID-VkImageMemoryBarrier-image-01200",  //   kBothIgnoreOrBothValid
8060     "VUID-VkImageMemoryBarrier-image-01205",  //   kSubmitQueueMustMatchSrcOrDst
8061 };
8062 
8063 static const std::string buffer_error_codes[] = {
8064     "VUID-VkBufferMemoryBarrier-buffer-01191",  //  kSrcOrDstMustBeIgnore
8065     "VUID-VkBufferMemoryBarrier-buffer-01763",  //  kSpecialOrIgnoreOnly
8066     "VUID-VkBufferMemoryBarrier-buffer-01193",  //  kSrcIgnoreRequiresDstIgnore
8067     "VUID-VkBufferMemoryBarrier-buffer-01765",  //  kDstValidOrSpecialIfNotIgnore
8068     "VUID-VkBufferMemoryBarrier-buffer-01764",  //  kSrcValidOrSpecialIfNotIgnore
8069     "VUID-VkBufferMemoryBarrier-buffer-01190",  //  kSrcAndDestMustBeIgnore
8070     "VUID-VkBufferMemoryBarrier-buffer-01192",  //  kBothIgnoreOrBothValid
8071     "VUID-VkBufferMemoryBarrier-buffer-01196",  //  kSubmitQueueMustMatchSrcOrDst
8072 };
8073 
8074 class ValidatorState {
8075    public:
ValidatorState(const layer_data * device_data,const char * func_name,const GLOBAL_CB_NODE * cb_state,const uint64_t barrier_handle64,const VkSharingMode sharing_mode,const VulkanObjectType object_type,const std::string * val_codes)8076     ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
8077                    const uint64_t barrier_handle64, const VkSharingMode sharing_mode, const VulkanObjectType object_type,
8078                    const std::string *val_codes)
8079         : report_data_(device_data->report_data),
8080           func_name_(func_name),
8081           cb_handle64_(HandleToUint64(cb_state->commandBuffer)),
8082           barrier_handle64_(barrier_handle64),
8083           sharing_mode_(sharing_mode),
8084           object_type_(object_type),
8085           val_codes_(val_codes),
8086           limit_(static_cast<uint32_t>(device_data->physical_device_state->queue_family_properties.size())),
8087           mem_ext_(device_data->device_extensions.vk_khr_external_memory) {}
8088 
8089     // Create a validator state from an image state... reducing the image specific to the generic version.
ValidatorState(const layer_data * device_data,const char * func_name,const GLOBAL_CB_NODE * cb_state,const VkImageMemoryBarrier * barrier,const IMAGE_STATE * state)8090     ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
8091                    const VkImageMemoryBarrier *barrier, const IMAGE_STATE *state)
8092         : ValidatorState(device_data, func_name, cb_state, HandleToUint64(barrier->image), state->createInfo.sharingMode,
8093                          kVulkanObjectTypeImage, image_error_codes) {}
8094 
8095     // Create a validator state from an buffer state... reducing the buffer specific to the generic version.
ValidatorState(const layer_data * device_data,const char * func_name,const GLOBAL_CB_NODE * cb_state,const VkBufferMemoryBarrier * barrier,const BUFFER_STATE * state)8096     ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
8097                    const VkBufferMemoryBarrier *barrier, const BUFFER_STATE *state)
8098         : ValidatorState(device_data, func_name, cb_state, HandleToUint64(barrier->buffer), state->createInfo.sharingMode,
8099                          kVulkanObjectTypeImage, buffer_error_codes) {}
8100 
8101     // Log the messages using boilerplate from object state, and Vu specific information from the template arg
8102     // One and two family versions, in the single family version, Vu holds the name of the passed parameter
LogMsg(VuIndex vu_index,uint32_t family,const char * param_name) const8103     bool LogMsg(VuIndex vu_index, uint32_t family, const char *param_name) const {
8104         const std::string &val_code = val_codes_[vu_index];
8105         const char *annotation = GetFamilyAnnotation(family);
8106         return log_msg(report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_,
8107                        val_code, "%s: Barrier using %s %s created with sharingMode %s, has %s %u%s. %s", func_name_,
8108                        GetTypeString(), report_data_->FormatHandle(barrier_handle64_).c_str(), GetModeString(), param_name, family,
8109                        annotation, vu_summary[vu_index]);
8110     }
8111 
LogMsg(VuIndex vu_index,uint32_t src_family,uint32_t dst_family) const8112     bool LogMsg(VuIndex vu_index, uint32_t src_family, uint32_t dst_family) const {
8113         const std::string &val_code = val_codes_[vu_index];
8114         const char *src_annotation = GetFamilyAnnotation(src_family);
8115         const char *dst_annotation = GetFamilyAnnotation(dst_family);
8116         return log_msg(
8117             report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_, val_code,
8118             "%s: Barrier using %s %s created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
8119             func_name_, GetTypeString(), report_data_->FormatHandle(barrier_handle64_).c_str(), GetModeString(), src_family,
8120             src_annotation, dst_family, dst_annotation, vu_summary[vu_index]);
8121     }
8122 
8123     // This abstract Vu can only be tested at submit time, thus we need a callback from the closure containing the needed
8124     // data. Note that the mem_barrier is copied to the closure as the lambda lifespan exceed the guarantees of validity for
8125     // application input.
ValidateAtQueueSubmit(const VkQueue queue,const layer_data * device_data,uint32_t src_family,uint32_t dst_family,const ValidatorState & val)8126     static bool ValidateAtQueueSubmit(const VkQueue queue, const layer_data *device_data, uint32_t src_family, uint32_t dst_family,
8127                                       const ValidatorState &val) {
8128         auto queue_data_it = device_data->queueMap.find(queue);
8129         if (queue_data_it == device_data->queueMap.end()) return false;
8130 
8131         uint32_t queue_family = queue_data_it->second.queueFamilyIndex;
8132         if ((src_family != queue_family) && (dst_family != queue_family)) {
8133             const std::string &val_code = val.val_codes_[kSubmitQueueMustMatchSrcOrDst];
8134             const char *src_annotation = val.GetFamilyAnnotation(src_family);
8135             const char *dst_annotation = val.GetFamilyAnnotation(dst_family);
8136             return log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
8137                            HandleToUint64(queue), val_code,
8138                            "%s: Barrier submitted to queue with family index %u, using %s %s created with sharingMode %s, has "
8139                            "srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
8140                            "vkQueueSubmit", queue_family, val.GetTypeString(),
8141                            device_data->report_data->FormatHandle(val.barrier_handle64_).c_str(), val.GetModeString(), src_family,
8142                            src_annotation, dst_family, dst_annotation, vu_summary[kSubmitQueueMustMatchSrcOrDst]);
8143         }
8144         return false;
8145     }
8146     // Logical helpers for semantic clarity
KhrExternalMem() const8147     inline bool KhrExternalMem() const { return mem_ext_; }
IsValid(uint32_t queue_family) const8148     inline bool IsValid(uint32_t queue_family) const { return (queue_family < limit_); }
IsValidOrSpecial(uint32_t queue_family) const8149     inline bool IsValidOrSpecial(uint32_t queue_family) const {
8150         return IsValid(queue_family) || (mem_ext_ && IsSpecial(queue_family));
8151     }
IsIgnored(uint32_t queue_family) const8152     inline bool IsIgnored(uint32_t queue_family) const { return queue_family == VK_QUEUE_FAMILY_IGNORED; }
8153 
8154     // Helpers for LogMsg (and log_msg)
GetModeString() const8155     const char *GetModeString() const { return string_VkSharingMode(sharing_mode_); }
8156 
8157     // Descriptive text for the various types of queue family index
GetFamilyAnnotation(uint32_t family) const8158     const char *GetFamilyAnnotation(uint32_t family) const {
8159         const char *external = " (VK_QUEUE_FAMILY_EXTERNAL_KHR)";
8160         const char *foreign = " (VK_QUEUE_FAMILY_FOREIGN_EXT)";
8161         const char *ignored = " (VK_QUEUE_FAMILY_IGNORED)";
8162         const char *valid = " (VALID)";
8163         const char *invalid = " (INVALID)";
8164         switch (family) {
8165             case VK_QUEUE_FAMILY_EXTERNAL_KHR:
8166                 return external;
8167             case VK_QUEUE_FAMILY_FOREIGN_EXT:
8168                 return foreign;
8169             case VK_QUEUE_FAMILY_IGNORED:
8170                 return ignored;
8171             default:
8172                 if (IsValid(family)) {
8173                     return valid;
8174                 }
8175                 return invalid;
8176         };
8177     }
GetTypeString() const8178     const char *GetTypeString() const { return object_string[object_type_]; }
GetSharingMode() const8179     VkSharingMode GetSharingMode() const { return sharing_mode_; }
8180 
8181    protected:
8182     const debug_report_data *const report_data_;
8183     const char *const func_name_;
8184     const uint64_t cb_handle64_;
8185     const uint64_t barrier_handle64_;
8186     const VkSharingMode sharing_mode_;
8187     const VulkanObjectType object_type_;
8188     const std::string *val_codes_;
8189     const uint32_t limit_;
8190     const bool mem_ext_;
8191 };
8192 
Validate(const layer_data * device_data,const char * func_name,GLOBAL_CB_NODE * cb_state,const ValidatorState & val,const uint32_t src_queue_family,const uint32_t dst_queue_family)8193 bool Validate(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state, const ValidatorState &val,
8194               const uint32_t src_queue_family, const uint32_t dst_queue_family) {
8195     bool skip = false;
8196 
8197     const bool mode_concurrent = val.GetSharingMode() == VK_SHARING_MODE_CONCURRENT;
8198     const bool src_ignored = val.IsIgnored(src_queue_family);
8199     const bool dst_ignored = val.IsIgnored(dst_queue_family);
8200     if (val.KhrExternalMem()) {
8201         if (mode_concurrent) {
8202             if (!(src_ignored || dst_ignored)) {
8203                 skip |= val.LogMsg(kSrcOrDstMustBeIgnore, src_queue_family, dst_queue_family);
8204             }
8205             if ((src_ignored && !(dst_ignored || IsSpecial(dst_queue_family))) ||
8206                 (dst_ignored && !(src_ignored || IsSpecial(src_queue_family)))) {
8207                 skip |= val.LogMsg(kSpecialOrIgnoreOnly, src_queue_family, dst_queue_family);
8208             }
8209         } else {
8210             // VK_SHARING_MODE_EXCLUSIVE
8211             if (src_ignored && !dst_ignored) {
8212                 skip |= val.LogMsg(kSrcIgnoreRequiresDstIgnore, src_queue_family, dst_queue_family);
8213             }
8214             if (!dst_ignored && !val.IsValidOrSpecial(dst_queue_family)) {
8215                 skip |= val.LogMsg(kDstValidOrSpecialIfNotIgnore, dst_queue_family, "dstQueueFamilyIndex");
8216             }
8217             if (!src_ignored && !val.IsValidOrSpecial(src_queue_family)) {
8218                 skip |= val.LogMsg(kSrcValidOrSpecialIfNotIgnore, src_queue_family, "srcQueueFamilyIndex");
8219             }
8220         }
8221     } else {
8222         // No memory extension
8223         if (mode_concurrent) {
8224             if (!src_ignored || !dst_ignored) {
8225                 skip |= val.LogMsg(kSrcAndDestMustBeIgnore, src_queue_family, dst_queue_family);
8226             }
8227         } else {
8228             // VK_SHARING_MODE_EXCLUSIVE
8229             if (!((src_ignored && dst_ignored) || (val.IsValid(src_queue_family) && val.IsValid(dst_queue_family)))) {
8230                 skip |= val.LogMsg(kBothIgnoreOrBothValid, src_queue_family, dst_queue_family);
8231             }
8232         }
8233     }
8234     if (!mode_concurrent && !src_ignored && !dst_ignored) {
8235         // Only enqueue submit time check if it is needed. If more submit time checks are added, change the criteria
8236         // TODO create a better named list, or rename the submit time lists to something that matches the broader usage...
8237         // Note: if we want to create a semantic that separates state lookup, validation, and state update this should go
8238         // to a local queue of update_state_actions or something.
8239         cb_state->eventUpdates.emplace_back([device_data, src_queue_family, dst_queue_family, val](VkQueue queue) {
8240             return ValidatorState::ValidateAtQueueSubmit(queue, device_data, src_queue_family, dst_queue_family, val);
8241         });
8242     }
8243     return skip;
8244 }
8245 }  // namespace barrier_queue_families
8246 
8247 // Type specific wrapper for image barriers
ValidateBarrierQueueFamilies(const layer_data * device_data,const char * func_name,GLOBAL_CB_NODE * cb_state,const VkImageMemoryBarrier * barrier,const IMAGE_STATE * state_data)8248 bool ValidateBarrierQueueFamilies(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
8249                                   const VkImageMemoryBarrier *barrier, const IMAGE_STATE *state_data) {
8250     // State data is required
8251     if (!state_data) {
8252         return false;
8253     }
8254 
8255     // Create the validator state from the image state
8256     barrier_queue_families::ValidatorState val(device_data, func_name, cb_state, barrier, state_data);
8257     const uint32_t src_queue_family = barrier->srcQueueFamilyIndex;
8258     const uint32_t dst_queue_family = barrier->dstQueueFamilyIndex;
8259     return barrier_queue_families::Validate(device_data, func_name, cb_state, val, src_queue_family, dst_queue_family);
8260 }
8261 
8262 // Type specific wrapper for buffer barriers
ValidateBarrierQueueFamilies(const layer_data * device_data,const char * func_name,GLOBAL_CB_NODE * cb_state,const VkBufferMemoryBarrier * barrier,const BUFFER_STATE * state_data)8263 bool ValidateBarrierQueueFamilies(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
8264                                   const VkBufferMemoryBarrier *barrier, const BUFFER_STATE *state_data) {
8265     // State data is required
8266     if (!state_data) {
8267         return false;
8268     }
8269 
8270     // Create the validator state from the buffer state
8271     barrier_queue_families::ValidatorState val(device_data, func_name, cb_state, barrier, state_data);
8272     const uint32_t src_queue_family = barrier->srcQueueFamilyIndex;
8273     const uint32_t dst_queue_family = barrier->dstQueueFamilyIndex;
8274     return barrier_queue_families::Validate(device_data, func_name, cb_state, val, src_queue_family, dst_queue_family);
8275 }
8276 
ValidateBarriers(layer_data * device_data,const char * funcName,GLOBAL_CB_NODE * cb_state,VkPipelineStageFlags src_stage_mask,VkPipelineStageFlags dst_stage_mask,uint32_t memBarrierCount,const VkMemoryBarrier * pMemBarriers,uint32_t bufferBarrierCount,const VkBufferMemoryBarrier * pBufferMemBarriers,uint32_t imageMemBarrierCount,const VkImageMemoryBarrier * pImageMemBarriers)8277 bool CoreChecks::ValidateBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
8278                                   VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
8279                                   uint32_t memBarrierCount, const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8280                                   const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8281                                   const VkImageMemoryBarrier *pImageMemBarriers) {
8282     bool skip = false;
8283     for (uint32_t i = 0; i < memBarrierCount; ++i) {
8284         const auto &mem_barrier = pMemBarriers[i];
8285         if (!ValidateAccessMaskPipelineStage(device_data->device_extensions, mem_barrier.srcAccessMask, src_stage_mask)) {
8286             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8287                             HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184",
8288                             "%s: pMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
8289                             mem_barrier.srcAccessMask, src_stage_mask);
8290         }
8291         if (!ValidateAccessMaskPipelineStage(device_data->device_extensions, mem_barrier.dstAccessMask, dst_stage_mask)) {
8292             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8293                             HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185",
8294                             "%s: pMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
8295                             mem_barrier.dstAccessMask, dst_stage_mask);
8296         }
8297     }
8298     for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8299         auto mem_barrier = &pImageMemBarriers[i];
8300         if (!ValidateAccessMaskPipelineStage(device_data->device_extensions, mem_barrier->srcAccessMask, src_stage_mask)) {
8301             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8302                             HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184",
8303                             "%s: pImageMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
8304                             mem_barrier->srcAccessMask, src_stage_mask);
8305         }
8306         if (!ValidateAccessMaskPipelineStage(device_data->device_extensions, mem_barrier->dstAccessMask, dst_stage_mask)) {
8307             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8308                             HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185",
8309                             "%s: pImageMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
8310                             mem_barrier->dstAccessMask, dst_stage_mask);
8311         }
8312 
8313         auto image_data = GetImageState(mem_barrier->image);
8314         skip |= ValidateBarrierQueueFamilies(device_data, funcName, cb_state, mem_barrier, image_data);
8315 
8316         if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8317             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8318                             HandleToUint64(cb_state->commandBuffer), "VUID-VkImageMemoryBarrier-newLayout-01198",
8319                             "%s: Image Layout cannot be transitioned to UNDEFINED or PREINITIALIZED.", funcName);
8320         }
8321 
8322         if (image_data) {
8323             // There is no VUID for this, but there is blanket text:
8324             //     "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before
8325             //     recording commands in a command buffer."
8326             // TODO: Update this when VUID is defined
8327             skip |= ValidateMemoryIsBoundToImage(device_data, image_data, funcName, kVUIDUndefined);
8328 
8329             auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
8330             skip |= ValidateImageAspectMask(device_data, image_data->image, image_data->createInfo.format, aspect_mask, funcName);
8331 
8332             std::string param_name = "pImageMemoryBarriers[" + std::to_string(i) + "].subresourceRange";
8333             skip |= ValidateImageBarrierSubresourceRange(device_data, image_data, mem_barrier->subresourceRange, funcName,
8334                                                          param_name.c_str());
8335         }
8336     }
8337 
8338     for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
8339         auto mem_barrier = &pBufferMemBarriers[i];
8340         if (!mem_barrier) continue;
8341 
8342         if (!ValidateAccessMaskPipelineStage(device_data->device_extensions, mem_barrier->srcAccessMask, src_stage_mask)) {
8343             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8344                             HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184",
8345                             "%s: pBufferMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
8346                             mem_barrier->srcAccessMask, src_stage_mask);
8347         }
8348         if (!ValidateAccessMaskPipelineStage(device_data->device_extensions, mem_barrier->dstAccessMask, dst_stage_mask)) {
8349             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8350                             HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185",
8351                             "%s: pBufferMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
8352                             mem_barrier->dstAccessMask, dst_stage_mask);
8353         }
8354         // Validate buffer barrier queue family indices
8355         auto buffer_state = GetBufferState(mem_barrier->buffer);
8356         skip |= ValidateBarrierQueueFamilies(device_data, funcName, cb_state, mem_barrier, buffer_state);
8357 
8358         if (buffer_state) {
8359             // There is no VUID for this, but there is blanket text:
8360             //     "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before
8361             //     recording commands in a command buffer"
8362             // TODO: Update this when VUID is defined
8363             skip |= ValidateMemoryIsBoundToBuffer(device_data, buffer_state, funcName, kVUIDUndefined);
8364 
8365             auto buffer_size = buffer_state->createInfo.size;
8366             if (mem_barrier->offset >= buffer_size) {
8367                 skip |=
8368                     log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8369                             HandleToUint64(cb_state->commandBuffer), "VUID-VkBufferMemoryBarrier-offset-01187",
8370                             "%s: Buffer Barrier %s has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
8371                             funcName, device_data->report_data->FormatHandle(mem_barrier->buffer).c_str(),
8372                             HandleToUint64(mem_barrier->offset), HandleToUint64(buffer_size));
8373             } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8374                 skip |=
8375                     log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8376                             HandleToUint64(cb_state->commandBuffer), "VUID-VkBufferMemoryBarrier-size-01189",
8377                             "%s: Buffer Barrier %s has offset 0x%" PRIx64 " and size 0x%" PRIx64
8378                             " whose sum is greater than total size 0x%" PRIx64 ".",
8379                             funcName, device_data->report_data->FormatHandle(mem_barrier->buffer).c_str(),
8380                             HandleToUint64(mem_barrier->offset), HandleToUint64(mem_barrier->size), HandleToUint64(buffer_size));
8381             }
8382         }
8383     }
8384 
8385     skip |= ValidateBarriersQFOTransferUniqueness(device_data, funcName, cb_state, bufferBarrierCount, pBufferMemBarriers,
8386                                                   imageMemBarrierCount, pImageMemBarriers);
8387 
8388     return skip;
8389 }
8390 
ValidateEventStageMask(VkQueue queue,GLOBAL_CB_NODE * pCB,uint32_t eventCount,size_t firstEventIndex,VkPipelineStageFlags sourceStageMask)8391 bool CoreChecks::ValidateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex,
8392                                         VkPipelineStageFlags sourceStageMask) {
8393     bool skip = false;
8394     VkPipelineStageFlags stageMask = 0;
8395     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
8396     for (uint32_t i = 0; i < eventCount; ++i) {
8397         auto event = pCB->events[firstEventIndex + i];
8398         auto queue_data = dev_data->queueMap.find(queue);
8399         if (queue_data == dev_data->queueMap.end()) return false;
8400         auto event_data = queue_data->second.eventToStageMap.find(event);
8401         if (event_data != queue_data->second.eventToStageMap.end()) {
8402             stageMask |= event_data->second;
8403         } else {
8404             auto global_event_data = GetEventNode(event);
8405             if (!global_event_data) {
8406                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8407                                 HandleToUint64(event), kVUID_Core_DrawState_InvalidEvent,
8408                                 "Event %s cannot be waited on if it has never been set.",
8409                                 dev_data->report_data->FormatHandle(event).c_str());
8410             } else {
8411                 stageMask |= global_event_data->stageMask;
8412             }
8413         }
8414     }
8415     // TODO: Need to validate that host_bit is only set if set event is called
8416     // but set event can be called at any time.
8417     if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
8418         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8419                         HandleToUint64(pCB->commandBuffer), "VUID-vkCmdWaitEvents-srcStageMask-parameter",
8420                         "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%X which must be the bitwise OR of "
8421                         "the stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with "
8422                         "vkSetEvent but instead is 0x%X.",
8423                         sourceStageMask, stageMask);
8424     }
8425     return skip;
8426 }
8427 
8428 // Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
8429 static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
8430     {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
8431     {VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
8432     {VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
8433     {VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8434     {VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8435     {VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8436     {VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8437     {VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8438     {VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
8439     {VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
8440     {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
8441     {VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
8442     {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
8443     {VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
8444 
8445 static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
8446                                                             VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
8447                                                             VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
8448                                                             VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
8449                                                             VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
8450                                                             VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
8451                                                             VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
8452                                                             VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
8453                                                             VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
8454                                                             VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
8455                                                             VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
8456                                                             VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
8457                                                             VK_PIPELINE_STAGE_TRANSFER_BIT,
8458                                                             VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
8459 
CheckStageMaskQueueCompatibility(layer_data * dev_data,VkCommandBuffer command_buffer,VkPipelineStageFlags stage_mask,VkQueueFlags queue_flags,const char * function,const char * src_or_dest,const char * error_code)8460 bool CoreChecks::CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer,
8461                                                   VkPipelineStageFlags stage_mask, VkQueueFlags queue_flags, const char *function,
8462                                                   const char *src_or_dest, const char *error_code) {
8463     bool skip = false;
8464     // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
8465     for (const auto &item : stage_flag_bit_array) {
8466         if (stage_mask & item) {
8467             if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
8468                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8469                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), error_code,
8470                                 "%s(): %s flag %s is not compatible with the queue family properties of this command buffer.",
8471                                 function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)));
8472             }
8473         }
8474     }
8475     return skip;
8476 }
8477 
8478 // Check if all barriers are of a given operation type.
8479 template <typename Barrier, typename OpCheck>
AllTransferOp(const COMMAND_POOL_NODE * pool,OpCheck & op_check,uint32_t count,const Barrier * barriers)8480 bool AllTransferOp(const COMMAND_POOL_NODE *pool, OpCheck &op_check, uint32_t count, const Barrier *barriers) {
8481     if (!pool) return false;
8482 
8483     for (uint32_t b = 0; b < count; b++) {
8484         if (!op_check(pool, barriers + b)) return false;
8485     }
8486     return true;
8487 }
8488 
8489 // Look at the barriers to see if we they are all release or all acquire, the result impacts queue properties validation
ComputeBarrierOperationsType(layer_data * device_data,GLOBAL_CB_NODE * cb_state,uint32_t buffer_barrier_count,const VkBufferMemoryBarrier * buffer_barriers,uint32_t image_barrier_count,const VkImageMemoryBarrier * image_barriers)8490 BarrierOperationsType CoreChecks::ComputeBarrierOperationsType(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
8491                                                                uint32_t buffer_barrier_count,
8492                                                                const VkBufferMemoryBarrier *buffer_barriers,
8493                                                                uint32_t image_barrier_count,
8494                                                                const VkImageMemoryBarrier *image_barriers) {
8495     auto pool = GetCommandPoolNode(cb_state->createInfo.commandPool);
8496     BarrierOperationsType op_type = kGeneral;
8497 
8498     // Look at the barrier details only if they exist
8499     // Note: AllTransferOp returns true for count == 0
8500     if ((buffer_barrier_count + image_barrier_count) != 0) {
8501         if (AllTransferOp(pool, TempIsReleaseOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
8502             AllTransferOp(pool, TempIsReleaseOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
8503             op_type = kAllRelease;
8504         } else if (AllTransferOp(pool, IsAcquireOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
8505                    AllTransferOp(pool, IsAcquireOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
8506             op_type = kAllAcquire;
8507         }
8508     }
8509 
8510     return op_type;
8511 }
8512 
ValidateStageMasksAgainstQueueCapabilities(layer_data * dev_data,GLOBAL_CB_NODE const * cb_state,VkPipelineStageFlags source_stage_mask,VkPipelineStageFlags dest_stage_mask,BarrierOperationsType barrier_op_type,const char * function,const char * error_code)8513 bool CoreChecks::ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE const *cb_state,
8514                                                             VkPipelineStageFlags source_stage_mask,
8515                                                             VkPipelineStageFlags dest_stage_mask,
8516                                                             BarrierOperationsType barrier_op_type, const char *function,
8517                                                             const char *error_code) {
8518     bool skip = false;
8519     uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex;
8520     auto physical_device_state = GetPhysicalDeviceState();
8521 
8522     // Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
8523     // specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
8524     // that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
8525 
8526     if (queue_family_index < physical_device_state->queue_family_properties.size()) {
8527         VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
8528 
8529         // Only check the source stage mask if any barriers aren't "acquire ownership"
8530         if ((barrier_op_type != kAllAcquire) && (source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
8531             skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, source_stage_mask, specified_queue_flags,
8532                                                      function, "srcStageMask", error_code);
8533         }
8534         // Only check the dest stage mask if any barriers aren't "release ownership"
8535         if ((barrier_op_type != kAllRelease) && (dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
8536             skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, dest_stage_mask, specified_queue_flags,
8537                                                      function, "dstStageMask", error_code);
8538         }
8539     }
8540     return skip;
8541 }
8542 
PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer,uint32_t eventCount,const VkEvent * pEvents,VkPipelineStageFlags sourceStageMask,VkPipelineStageFlags dstStageMask,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)8543 bool CoreChecks::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
8544                                               VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
8545                                               uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8546                                               uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8547                                               uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8548     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8549     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
8550     assert(cb_state);
8551 
8552     auto barrier_op_type = ComputeBarrierOperationsType(device_data, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
8553                                                         imageMemoryBarrierCount, pImageMemoryBarriers);
8554     bool skip = ValidateStageMasksAgainstQueueCapabilities(device_data, cb_state, sourceStageMask, dstStageMask, barrier_op_type,
8555                                                            "vkCmdWaitEvents", "VUID-vkCmdWaitEvents-srcStageMask-01164");
8556     skip |= ValidateStageMaskGsTsEnables(device_data, sourceStageMask, "vkCmdWaitEvents()",
8557                                          "VUID-vkCmdWaitEvents-srcStageMask-01159", "VUID-vkCmdWaitEvents-srcStageMask-01161",
8558                                          "VUID-vkCmdWaitEvents-srcStageMask-02111", "VUID-vkCmdWaitEvents-srcStageMask-02112");
8559     skip |= ValidateStageMaskGsTsEnables(device_data, dstStageMask, "vkCmdWaitEvents()", "VUID-vkCmdWaitEvents-dstStageMask-01160",
8560                                          "VUID-vkCmdWaitEvents-dstStageMask-01162", "VUID-vkCmdWaitEvents-dstStageMask-02113",
8561                                          "VUID-vkCmdWaitEvents-dstStageMask-02114");
8562     skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8563                                   "VUID-vkCmdWaitEvents-commandBuffer-cmdpool");
8564     skip |= ValidateCmd(device_data, cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8565     skip |= ValidateBarriersToImages(device_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdWaitEvents()");
8566     skip |= ValidateBarriers(device_data, "vkCmdWaitEvents()", cb_state, sourceStageMask, dstStageMask, memoryBarrierCount,
8567                              pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
8568                              pImageMemoryBarriers);
8569     return skip;
8570 }
8571 
PreCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer,uint32_t eventCount,const VkEvent * pEvents,VkPipelineStageFlags sourceStageMask,VkPipelineStageFlags dstStageMask,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)8572 void CoreChecks::PreCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
8573                                             VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
8574                                             uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8575                                             uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8576                                             uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8577     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8578     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
8579     auto first_event_index = cb_state->events.size();
8580     for (uint32_t i = 0; i < eventCount; ++i) {
8581         auto event_state = GetEventNode(pEvents[i]);
8582         if (event_state) {
8583             AddCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(pEvents[i]), kVulkanObjectTypeEvent}, cb_state);
8584             event_state->cb_bindings.insert(cb_state);
8585         }
8586         cb_state->waitedEvents.insert(pEvents[i]);
8587         cb_state->events.push_back(pEvents[i]);
8588     }
8589     cb_state->eventUpdates.emplace_back(
8590         [=](VkQueue q) { return ValidateEventStageMask(q, cb_state, eventCount, first_event_index, sourceStageMask); });
8591     TransitionImageLayouts(device_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
8592     if (GetEnables()->gpu_validation) {
8593         GpuPreCallValidateCmdWaitEvents(device_data, sourceStageMask);
8594     }
8595 }
8596 
PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer,uint32_t eventCount,const VkEvent * pEvents,VkPipelineStageFlags sourceStageMask,VkPipelineStageFlags dstStageMask,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)8597 void CoreChecks::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
8598                                              VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
8599                                              uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8600                                              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8601                                              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8602     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8603     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
8604     RecordBarriersQFOTransfers(device_data, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
8605                                pImageMemoryBarriers);
8606 }
8607 
PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkDependencyFlags dependencyFlags,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)8608 bool CoreChecks::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
8609                                                    VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
8610                                                    uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8611                                                    uint32_t bufferMemoryBarrierCount,
8612                                                    const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8613                                                    uint32_t imageMemoryBarrierCount,
8614                                                    const VkImageMemoryBarrier *pImageMemoryBarriers) {
8615     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8616     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
8617     assert(cb_state);
8618 
8619     bool skip = false;
8620     auto barrier_op_type = ComputeBarrierOperationsType(device_data, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
8621                                                         imageMemoryBarrierCount, pImageMemoryBarriers);
8622     skip |= ValidateStageMasksAgainstQueueCapabilities(device_data, cb_state, srcStageMask, dstStageMask, barrier_op_type,
8623                                                        "vkCmdPipelineBarrier", "VUID-vkCmdPipelineBarrier-srcStageMask-01183");
8624     skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdPipelineBarrier()",
8625                                   VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8626                                   "VUID-vkCmdPipelineBarrier-commandBuffer-cmdpool");
8627     skip |= ValidateCmd(device_data, cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8628     skip |= ValidateStageMaskGsTsEnables(
8629         device_data, srcStageMask, "vkCmdPipelineBarrier()", "VUID-vkCmdPipelineBarrier-srcStageMask-01168",
8630         "VUID-vkCmdPipelineBarrier-srcStageMask-01170", "VUID-vkCmdPipelineBarrier-srcStageMask-02115",
8631         "VUID-vkCmdPipelineBarrier-srcStageMask-02116");
8632     skip |= ValidateStageMaskGsTsEnables(
8633         device_data, dstStageMask, "vkCmdPipelineBarrier()", "VUID-vkCmdPipelineBarrier-dstStageMask-01169",
8634         "VUID-vkCmdPipelineBarrier-dstStageMask-01171", "VUID-vkCmdPipelineBarrier-dstStageMask-02117",
8635         "VUID-vkCmdPipelineBarrier-dstStageMask-02118");
8636     if (cb_state->activeRenderPass) {
8637         skip |= ValidateRenderPassPipelineBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask,
8638                                                    dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8639                                                    pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8640         if (skip) return true;  // Early return to avoid redundant errors from below calls
8641     }
8642     skip |=
8643         ValidateBarriersToImages(device_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdPipelineBarrier()");
8644     skip |= ValidateBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, memoryBarrierCount,
8645                              pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
8646                              pImageMemoryBarriers);
8647     return skip;
8648 }
8649 
PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkDependencyFlags dependencyFlags,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)8650 void CoreChecks::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
8651                                                  VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
8652                                                  uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8653                                                  uint32_t bufferMemoryBarrierCount,
8654                                                  const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8655                                                  uint32_t imageMemoryBarrierCount,
8656                                                  const VkImageMemoryBarrier *pImageMemoryBarriers) {
8657     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8658     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
8659 
8660     RecordBarriersQFOTransfers(device_data, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
8661                                pImageMemoryBarriers);
8662     TransitionImageLayouts(device_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
8663 }
8664 
SetQueryState(VkQueue queue,VkCommandBuffer commandBuffer,QueryObject object,bool value)8665 bool CoreChecks::SetQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
8666     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8667     GLOBAL_CB_NODE *pCB = GetCBNode(commandBuffer);
8668     if (pCB) {
8669         pCB->queryToStateMap[object] = value;
8670     }
8671     auto queue_data = dev_data->queueMap.find(queue);
8672     if (queue_data != dev_data->queueMap.end()) {
8673         queue_data->second.queryToStateMap[object] = value;
8674     }
8675     return false;
8676 }
8677 
PreCallValidateCmdBeginQuery(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t slot,VkFlags flags)8678 bool CoreChecks::PreCallValidateCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8679     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8680     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
8681     assert(cb_state);
8682     bool skip = ValidateCmdQueueFlags(device_data, cb_state, "vkCmdBeginQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8683                                       "VUID-vkCmdBeginQuery-commandBuffer-cmdpool");
8684     auto queryType = GetQueryPoolNode(queryPool)->createInfo.queryType;
8685 
8686     if (flags & VK_QUERY_CONTROL_PRECISE_BIT) {
8687         if (!device_data->enabled_features.core.occlusionQueryPrecise) {
8688             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8689                             HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBeginQuery-queryType-00800",
8690                             "VK_QUERY_CONTROL_PRECISE_BIT provided to vkCmdBeginQuery, but precise occlusion queries not enabled "
8691                             "on the device.");
8692         }
8693 
8694         if (queryType != VK_QUERY_TYPE_OCCLUSION) {
8695             skip |= log_msg(
8696                 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8697                 HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBeginQuery-queryType-00800",
8698                 "VK_QUERY_CONTROL_PRECISE_BIT provided to vkCmdBeginQuery, but pool query type is not VK_QUERY_TYPE_OCCLUSION");
8699         }
8700     }
8701 
8702     skip |= ValidateCmd(device_data, cb_state, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8703     return skip;
8704 }
8705 
PostCallRecordCmdBeginQuery(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t slot,VkFlags flags)8706 void CoreChecks::PostCallRecordCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8707     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
8708     QueryObject query = {queryPool, slot};
8709     cb_state->activeQueries.insert(query);
8710     cb_state->startedQueries.insert(query);
8711     AddCommandBufferBinding(&GetQueryPoolNode(queryPool)->cb_bindings, {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool},
8712                             cb_state);
8713 }
8714 
PreCallValidateCmdEndQuery(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t slot)8715 bool CoreChecks::PreCallValidateCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8716     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8717     QueryObject query = {queryPool, slot};
8718     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
8719     assert(cb_state);
8720     bool skip = false;
8721     if (!cb_state->activeQueries.count(query)) {
8722         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8723                         HandleToUint64(commandBuffer), "VUID-vkCmdEndQuery-None-01923",
8724                         "Ending a query before it was started: queryPool %s, index %d.",
8725                         device_data->report_data->FormatHandle(queryPool).c_str(), slot);
8726     }
8727     skip |= ValidateCmdQueueFlags(device_data, cb_state, "VkCmdEndQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8728                                   "VUID-vkCmdEndQuery-commandBuffer-cmdpool");
8729     skip |= ValidateCmd(device_data, cb_state, CMD_ENDQUERY, "VkCmdEndQuery()");
8730     return skip;
8731 }
8732 
PostCallRecordCmdEndQuery(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t slot)8733 void CoreChecks::PostCallRecordCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8734     QueryObject query = {queryPool, slot};
8735     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
8736     cb_state->activeQueries.erase(query);
8737     cb_state->queryUpdates.emplace_back([=](VkQueue q) { return SetQueryState(q, commandBuffer, query, true); });
8738     AddCommandBufferBinding(&GetQueryPoolNode(queryPool)->cb_bindings, {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool},
8739                             cb_state);
8740 }
8741 
PreCallValidateCmdResetQueryPool(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount)8742 bool CoreChecks::PreCallValidateCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
8743                                                   uint32_t queryCount) {
8744     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8745     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
8746 
8747     bool skip = InsideRenderPass(device_data, cb_state, "vkCmdResetQueryPool()", "VUID-vkCmdResetQueryPool-renderpass");
8748     skip |= ValidateCmd(device_data, cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8749     skip |= ValidateCmdQueueFlags(device_data, cb_state, "VkCmdResetQueryPool()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8750                                   "VUID-vkCmdResetQueryPool-commandBuffer-cmdpool");
8751     return skip;
8752 }
8753 
PostCallRecordCmdResetQueryPool(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount)8754 void CoreChecks::PostCallRecordCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
8755                                                  uint32_t queryCount) {
8756     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
8757 
8758     for (uint32_t i = 0; i < queryCount; i++) {
8759         QueryObject query = {queryPool, firstQuery + i};
8760         cb_state->waitedEventsBeforeQueryReset[query] = cb_state->waitedEvents;
8761         cb_state->queryUpdates.emplace_back([=](VkQueue q) { return SetQueryState(q, commandBuffer, query, false); });
8762     }
8763     AddCommandBufferBinding(&GetQueryPoolNode(queryPool)->cb_bindings, {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool},
8764                             cb_state);
8765 }
8766 
IsQueryInvalid(layer_data * dev_data,QUEUE_STATE * queue_data,VkQueryPool queryPool,uint32_t queryIndex)8767 bool CoreChecks::IsQueryInvalid(layer_data *dev_data, QUEUE_STATE *queue_data, VkQueryPool queryPool, uint32_t queryIndex) {
8768     QueryObject query = {queryPool, queryIndex};
8769     auto query_data = queue_data->queryToStateMap.find(query);
8770     if (query_data != queue_data->queryToStateMap.end()) {
8771         if (!query_data->second) return true;
8772     } else {
8773         auto it = dev_data->queryToStateMap.find(query);
8774         if (it == dev_data->queryToStateMap.end() || !it->second) return true;
8775     }
8776 
8777     return false;
8778 }
8779 
ValidateQuery(VkQueue queue,GLOBAL_CB_NODE * pCB,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount)8780 bool CoreChecks::ValidateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t firstQuery,
8781                                uint32_t queryCount) {
8782     bool skip = false;
8783     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
8784     auto queue_data = GetQueueState(queue);
8785     if (!queue_data) return false;
8786     for (uint32_t i = 0; i < queryCount; i++) {
8787         if (IsQueryInvalid(dev_data, queue_data, queryPool, firstQuery + i)) {
8788             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8789                             HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidQuery,
8790                             "Requesting a copy from query to buffer with invalid query: queryPool %s, index %d",
8791                             dev_data->report_data->FormatHandle(queryPool).c_str(), firstQuery + i);
8792         }
8793     }
8794     return skip;
8795 }
8796 
PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize stride,VkQueryResultFlags flags)8797 bool CoreChecks::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
8798                                                         uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
8799                                                         VkDeviceSize stride, VkQueryResultFlags flags) {
8800     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8801     auto cb_state = GetCBNode(commandBuffer);
8802     auto dst_buff_state = GetBufferState(dstBuffer);
8803     assert(cb_state);
8804     assert(dst_buff_state);
8805     bool skip = ValidateMemoryIsBoundToBuffer(device_data, dst_buff_state, "vkCmdCopyQueryPoolResults()",
8806                                               "VUID-vkCmdCopyQueryPoolResults-dstBuffer-00826");
8807     // Validate that DST buffer has correct usage flags set
8808     skip |= ValidateBufferUsageFlags(device_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8809                                      "VUID-vkCmdCopyQueryPoolResults-dstBuffer-00825", "vkCmdCopyQueryPoolResults()",
8810                                      "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8811     skip |=
8812         ValidateCmdQueueFlags(device_data, cb_state, "vkCmdCopyQueryPoolResults()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8813                               "VUID-vkCmdCopyQueryPoolResults-commandBuffer-cmdpool");
8814     skip |= ValidateCmd(device_data, cb_state, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8815     skip |= InsideRenderPass(device_data, cb_state, "vkCmdCopyQueryPoolResults()", "VUID-vkCmdCopyQueryPoolResults-renderpass");
8816     return skip;
8817 }
8818 
PostCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,VkQueryPool queryPool,uint32_t firstQuery,uint32_t queryCount,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize stride,VkQueryResultFlags flags)8819 void CoreChecks::PostCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
8820                                                        uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
8821                                                        VkDeviceSize stride, VkQueryResultFlags flags) {
8822     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8823     auto cb_state = GetCBNode(commandBuffer);
8824     auto dst_buff_state = GetBufferState(dstBuffer);
8825     AddCommandBufferBindingBuffer(device_data, cb_state, dst_buff_state);
8826     cb_state->queryUpdates.emplace_back([=](VkQueue q) { return ValidateQuery(q, cb_state, queryPool, firstQuery, queryCount); });
8827     AddCommandBufferBinding(&GetQueryPoolNode(queryPool)->cb_bindings, {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool},
8828                             cb_state);
8829 }
8830 
PreCallValidateCmdPushConstants(VkCommandBuffer commandBuffer,VkPipelineLayout layout,VkShaderStageFlags stageFlags,uint32_t offset,uint32_t size,const void * pValues)8831 bool CoreChecks::PreCallValidateCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
8832                                                  VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
8833                                                  const void *pValues) {
8834     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8835     bool skip = false;
8836     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
8837     assert(cb_state);
8838     skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdPushConstants()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8839                                   "VUID-vkCmdPushConstants-commandBuffer-cmdpool");
8840     skip |= ValidateCmd(device_data, cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8841     skip |= ValidatePushConstantRange(device_data, offset, size, "vkCmdPushConstants()");
8842     if (0 == stageFlags) {
8843         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8844                         HandleToUint64(commandBuffer), "VUID-vkCmdPushConstants-stageFlags-requiredbitmask",
8845                         "vkCmdPushConstants() call has no stageFlags set.");
8846     }
8847 
8848     // Check if pipeline_layout VkPushConstantRange(s) overlapping offset, size have stageFlags set for each stage in the command
8849     // stageFlags argument, *and* that the command stageFlags argument has bits set for the stageFlags in each overlapping range.
8850     if (!skip) {
8851         const auto &ranges = *GetPipelineLayout(device_data, layout)->push_constant_ranges;
8852         VkShaderStageFlags found_stages = 0;
8853         for (const auto &range : ranges) {
8854             if ((offset >= range.offset) && (offset + size <= range.offset + range.size)) {
8855                 VkShaderStageFlags matching_stages = range.stageFlags & stageFlags;
8856                 if (matching_stages != range.stageFlags) {
8857                     // "VUID-vkCmdPushConstants-offset-01796" VUID-vkCmdPushConstants-offset-01796
8858                     skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8859                                     VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
8860                                     "VUID-vkCmdPushConstants-offset-01796",
8861                                     "vkCmdPushConstants(): stageFlags (0x%" PRIx32 ", offset (%" PRIu32 "), and size (%" PRIu32
8862                                     "),  must contain all stages in overlapping VkPushConstantRange stageFlags (0x%" PRIx32
8863                                     "), offset (%" PRIu32 "), and size (%" PRIu32 ") in pipeline layout %s.",
8864                                     (uint32_t)stageFlags, offset, size, (uint32_t)range.stageFlags, range.offset, range.size,
8865                                     device_data->report_data->FormatHandle(layout).c_str());
8866                 }
8867 
8868                 // Accumulate all stages we've found
8869                 found_stages = matching_stages | found_stages;
8870             }
8871         }
8872         if (found_stages != stageFlags) {
8873             // "VUID-vkCmdPushConstants-offset-01795" VUID-vkCmdPushConstants-offset-01795
8874             uint32_t missing_stages = ~found_stages & stageFlags;
8875             skip |=
8876                 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8877                         HandleToUint64(commandBuffer), "VUID-vkCmdPushConstants-offset-01795",
8878                         "vkCmdPushConstants(): stageFlags = 0x%" PRIx32
8879                         ", VkPushConstantRange in pipeline layout %s overlapping offset = %d and size = %d, do not contain "
8880                         "stageFlags 0x%" PRIx32 ".",
8881                         (uint32_t)stageFlags, device_data->report_data->FormatHandle(layout).c_str(), offset, size, missing_stages);
8882         }
8883     }
8884     return skip;
8885 }
8886 
PreCallValidateCmdWriteTimestamp(VkCommandBuffer commandBuffer,VkPipelineStageFlagBits pipelineStage,VkQueryPool queryPool,uint32_t slot)8887 bool CoreChecks::PreCallValidateCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
8888                                                   VkQueryPool queryPool, uint32_t slot) {
8889     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8890     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
8891     assert(cb_state);
8892     bool skip = ValidateCmdQueueFlags(device_data, cb_state, "vkCmdWriteTimestamp()",
8893                                       VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
8894                                       "VUID-vkCmdWriteTimestamp-commandBuffer-cmdpool");
8895     skip |= ValidateCmd(device_data, cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8896     return skip;
8897 }
8898 
PostCallRecordCmdWriteTimestamp(VkCommandBuffer commandBuffer,VkPipelineStageFlagBits pipelineStage,VkQueryPool queryPool,uint32_t slot)8899 void CoreChecks::PostCallRecordCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
8900                                                  VkQueryPool queryPool, uint32_t slot) {
8901     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
8902     QueryObject query = {queryPool, slot};
8903     cb_state->queryUpdates.emplace_back([=](VkQueue q) { return SetQueryState(q, commandBuffer, query, true); });
8904 }
8905 
MatchUsage(layer_data * dev_data,uint32_t count,const VkAttachmentReference2KHR * attachments,const VkFramebufferCreateInfo * fbci,VkImageUsageFlagBits usage_flag,const char * error_code)8906 bool CoreChecks::MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference2KHR *attachments,
8907                             const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag, const char *error_code) {
8908     bool skip = false;
8909 
8910     for (uint32_t attach = 0; attach < count; attach++) {
8911         if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
8912             // Attachment counts are verified elsewhere, but prevent an invalid access
8913             if (attachments[attach].attachment < fbci->attachmentCount) {
8914                 const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
8915                 auto view_state = GetImageViewState(*image_view);
8916                 if (view_state) {
8917                     const VkImageCreateInfo *ici = &GetImageState(view_state->create_info.image)->createInfo;
8918                     if (ici != nullptr) {
8919                         if ((ici->usage & usage_flag) == 0) {
8920                             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8921                                             VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, error_code,
8922                                             "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
8923                                             "IMAGE_USAGE flags (%s).",
8924                                             attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
8925                         }
8926                     }
8927                 }
8928             }
8929         }
8930     }
8931     return skip;
8932 }
8933 
8934 // Validate VkFramebufferCreateInfo which includes:
8935 // 1. attachmentCount equals renderPass attachmentCount
8936 // 2. corresponding framebuffer and renderpass attachments have matching formats
8937 // 3. corresponding framebuffer and renderpass attachments have matching sample counts
8938 // 4. fb attachments only have a single mip level
8939 // 5. fb attachment dimensions are each at least as large as the fb
8940 // 6. fb attachments use idenity swizzle
8941 // 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
8942 // 8. fb dimensions are within physical device limits
ValidateFramebufferCreateInfo(layer_data * dev_data,const VkFramebufferCreateInfo * pCreateInfo)8943 bool CoreChecks::ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8944     bool skip = false;
8945 
8946     auto rp_state = GetRenderPassState(pCreateInfo->renderPass);
8947     if (rp_state) {
8948         const VkRenderPassCreateInfo2KHR *rpci = rp_state->createInfo.ptr();
8949         if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
8950             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8951                             HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-attachmentCount-00876",
8952                             "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount "
8953                             "of %u of renderPass (%s) being used to create Framebuffer.",
8954                             pCreateInfo->attachmentCount, rpci->attachmentCount,
8955                             dev_data->report_data->FormatHandle(pCreateInfo->renderPass).c_str());
8956         } else {
8957             // attachmentCounts match, so make sure corresponding attachment details line up
8958             const VkImageView *image_views = pCreateInfo->pAttachments;
8959             for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8960                 auto view_state = GetImageViewState(image_views[i]);
8961                 auto &ivci = view_state->create_info;
8962                 if (ivci.format != rpci->pAttachments[i].format) {
8963                     skip |=
8964                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8965                                 HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-pAttachments-00880",
8966                                 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not "
8967                                 "match the format of %s used by the corresponding attachment for renderPass (%s).",
8968                                 i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
8969                                 dev_data->report_data->FormatHandle(pCreateInfo->renderPass).c_str());
8970                 }
8971                 const VkImageCreateInfo *ici = &GetImageState(ivci.image)->createInfo;
8972                 if (ici->samples != rpci->pAttachments[i].samples) {
8973                     skip |= log_msg(
8974                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8975                         HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-pAttachments-00881",
8976                         "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match the %s "
8977                         "samples used by the corresponding attachment for renderPass (%s).",
8978                         i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
8979                         dev_data->report_data->FormatHandle(pCreateInfo->renderPass).c_str());
8980                 }
8981                 // Verify that view only has a single mip level
8982                 if (ivci.subresourceRange.levelCount != 1) {
8983                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8984                                     0, "VUID-VkFramebufferCreateInfo-pAttachments-00883",
8985                                     "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u but "
8986                                     "only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer.",
8987                                     i, ivci.subresourceRange.levelCount);
8988                 }
8989                 const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
8990                 uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
8991                 uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
8992                 if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
8993                     (mip_height < pCreateInfo->height)) {
8994                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8995                                     0, "VUID-VkFramebufferCreateInfo-pAttachments-00882",
8996                                     "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions "
8997                                     "smaller than the corresponding framebuffer dimensions. Here are the respective dimensions for "
8998                                     "attachment #%u, framebuffer:\n"
8999                                     "width: %u, %u\n"
9000                                     "height: %u, %u\n"
9001                                     "layerCount: %u, %u\n",
9002                                     i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
9003                                     pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
9004                 }
9005                 if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
9006                     ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
9007                     ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
9008                     ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
9009                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9010                                     0, "VUID-VkFramebufferCreateInfo-pAttachments-00884",
9011                                     "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All "
9012                                     "framebuffer attachments must have been created with the identity swizzle. Here are the actual "
9013                                     "swizzle values:\n"
9014                                     "r swizzle = %s\n"
9015                                     "g swizzle = %s\n"
9016                                     "b swizzle = %s\n"
9017                                     "a swizzle = %s\n",
9018                                     i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
9019                                     string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a));
9020                 }
9021             }
9022         }
9023         // Verify correct attachment usage flags
9024         for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
9025             // Verify input attachments:
9026             skip |=
9027                 MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
9028                            pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00879");
9029             // Verify color attachments:
9030             skip |=
9031                 MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
9032                            pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00877");
9033             // Verify depth/stencil attachments:
9034             if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
9035                 skip |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
9036                                    VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-02633");
9037             }
9038         }
9039     }
9040     // Verify FB dimensions are within physical device limits
9041     if (pCreateInfo->width > dev_data->phys_dev_props.limits.maxFramebufferWidth) {
9042         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9043                         "VUID-VkFramebufferCreateInfo-width-00886",
9044                         "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. Requested "
9045                         "width: %u, device max: %u\n",
9046                         pCreateInfo->width, dev_data->phys_dev_props.limits.maxFramebufferWidth);
9047     }
9048     if (pCreateInfo->height > dev_data->phys_dev_props.limits.maxFramebufferHeight) {
9049         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9050                         "VUID-VkFramebufferCreateInfo-height-00888",
9051                         "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. Requested "
9052                         "height: %u, device max: %u\n",
9053                         pCreateInfo->height, dev_data->phys_dev_props.limits.maxFramebufferHeight);
9054     }
9055     if (pCreateInfo->layers > dev_data->phys_dev_props.limits.maxFramebufferLayers) {
9056         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9057                         "VUID-VkFramebufferCreateInfo-layers-00890",
9058                         "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. Requested "
9059                         "layers: %u, device max: %u\n",
9060                         pCreateInfo->layers, dev_data->phys_dev_props.limits.maxFramebufferLayers);
9061     }
9062     // Verify FB dimensions are greater than zero
9063     if (pCreateInfo->width <= 0) {
9064         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9065                         "VUID-VkFramebufferCreateInfo-width-00885",
9066                         "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero.");
9067     }
9068     if (pCreateInfo->height <= 0) {
9069         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9070                         "VUID-VkFramebufferCreateInfo-height-00887",
9071                         "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero.");
9072     }
9073     if (pCreateInfo->layers <= 0) {
9074         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9075                         "VUID-VkFramebufferCreateInfo-layers-00889",
9076                         "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero.");
9077     }
9078     return skip;
9079 }
9080 
PreCallValidateCreateFramebuffer(VkDevice device,const VkFramebufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFramebuffer * pFramebuffer)9081 bool CoreChecks::PreCallValidateCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
9082                                                   const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) {
9083     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9084     // TODO : Verify that renderPass FB is created with is compatible with FB
9085     bool skip = false;
9086     skip |= ValidateFramebufferCreateInfo(device_data, pCreateInfo);
9087     return skip;
9088 }
9089 
PostCallRecordCreateFramebuffer(VkDevice device,const VkFramebufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFramebuffer * pFramebuffer,VkResult result)9090 void CoreChecks::PostCallRecordCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
9091                                                  const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer,
9092                                                  VkResult result) {
9093     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9094     if (VK_SUCCESS != result) return;
9095     // Shadow create info and store in map
9096     std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
9097         new FRAMEBUFFER_STATE(*pFramebuffer, pCreateInfo, GetRenderPassStateSharedPtr(pCreateInfo->renderPass)));
9098 
9099     for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9100         VkImageView view = pCreateInfo->pAttachments[i];
9101         auto view_state = GetImageViewState(view);
9102         if (!view_state) {
9103             continue;
9104         }
9105 #ifdef FRAMEBUFFER_ATTACHMENT_STATE_CACHE
9106         MT_FB_ATTACHMENT_INFO fb_info;
9107         fb_info.view_state = view_state;
9108         fb_info.image = view_state->create_info.image;
9109         fb_state->attachments.push_back(fb_info);
9110 #endif
9111     }
9112     device_data->frameBufferMap[*pFramebuffer] = std::move(fb_state);
9113 }
9114 
FindDependency(const uint32_t index,const uint32_t dependent,const std::vector<DAGNode> & subpass_to_node,std::unordered_set<uint32_t> & processed_nodes)9115 static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node,
9116                            std::unordered_set<uint32_t> &processed_nodes) {
9117     // If we have already checked this node we have not found a dependency path so return false.
9118     if (processed_nodes.count(index)) return false;
9119     processed_nodes.insert(index);
9120     const DAGNode &node = subpass_to_node[index];
9121     // Look for a dependency path. If one exists return true else recurse on the previous nodes.
9122     if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
9123         for (auto elem : node.prev) {
9124             if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
9125         }
9126     } else {
9127         return true;
9128     }
9129     return false;
9130 }
9131 
CheckDependencyExists(const layer_data * dev_data,const uint32_t subpass,const std::vector<uint32_t> & dependent_subpasses,const std::vector<DAGNode> & subpass_to_node,bool & skip)9132 bool CoreChecks::CheckDependencyExists(const layer_data *dev_data, const uint32_t subpass,
9133                                        const std::vector<uint32_t> &dependent_subpasses,
9134                                        const std::vector<DAGNode> &subpass_to_node, bool &skip) {
9135     bool result = true;
9136     // Loop through all subpasses that share the same attachment and make sure a dependency exists
9137     for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
9138         if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) continue;
9139         const DAGNode &node = subpass_to_node[subpass];
9140         // Check for a specified dependency between the two nodes. If one exists we are done.
9141         auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
9142         auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
9143         if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
9144             // If no dependency exits an implicit dependency still might. If not, throw an error.
9145             std::unordered_set<uint32_t> processed_nodes;
9146             if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
9147                   FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
9148                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9149                                 kVUID_Core_DrawState_InvalidRenderpass,
9150                                 "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
9151                                 dependent_subpasses[k]);
9152                 result = false;
9153             }
9154         }
9155     }
9156     return result;
9157 }
9158 
CheckPreserved(const layer_data * dev_data,const VkRenderPassCreateInfo2KHR * pCreateInfo,const int index,const uint32_t attachment,const std::vector<DAGNode> & subpass_to_node,int depth,bool & skip)9159 bool CoreChecks::CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo2KHR *pCreateInfo, const int index,
9160                                 const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) {
9161     const DAGNode &node = subpass_to_node[index];
9162     // If this node writes to the attachment return true as next nodes need to preserve the attachment.
9163     const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[index];
9164     for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9165         if (attachment == subpass.pColorAttachments[j].attachment) return true;
9166     }
9167     for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9168         if (attachment == subpass.pInputAttachments[j].attachment) return true;
9169     }
9170     if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9171         if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
9172     }
9173     bool result = false;
9174     // Loop through previous nodes and see if any of them write to the attachment.
9175     for (auto elem : node.prev) {
9176         result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip);
9177     }
9178     // If the attachment was written to by a previous node than this node needs to preserve it.
9179     if (result && depth > 0) {
9180         bool has_preserved = false;
9181         for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9182             if (subpass.pPreserveAttachments[j] == attachment) {
9183                 has_preserved = true;
9184                 break;
9185             }
9186         }
9187         if (!has_preserved) {
9188             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9189                             kVUID_Core_DrawState_InvalidRenderpass,
9190                             "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
9191         }
9192     }
9193     return result;
9194 }
9195 
9196 template <class T>
IsRangeOverlapping(T offset1,T size1,T offset2,T size2)9197 bool IsRangeOverlapping(T offset1, T size1, T offset2, T size2) {
9198     return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
9199            ((offset1 > offset2) && (offset1 < (offset2 + size2)));
9200 }
9201 
IsRegionOverlapping(VkImageSubresourceRange range1,VkImageSubresourceRange range2)9202 bool IsRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
9203     return (IsRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
9204             IsRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
9205 }
9206 
ValidateDependencies(layer_data * dev_data,FRAMEBUFFER_STATE const * framebuffer,RENDER_PASS_STATE const * renderPass)9207 bool CoreChecks::ValidateDependencies(layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
9208                                       RENDER_PASS_STATE const *renderPass) {
9209     bool skip = false;
9210     auto const pFramebufferInfo = framebuffer->createInfo.ptr();
9211     auto const pCreateInfo = renderPass->createInfo.ptr();
9212     auto const &subpass_to_node = renderPass->subpassToNode;
9213     std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
9214     std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
9215     std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
9216     // Find overlapping attachments
9217     for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9218         for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
9219             VkImageView viewi = pFramebufferInfo->pAttachments[i];
9220             VkImageView viewj = pFramebufferInfo->pAttachments[j];
9221             if (viewi == viewj) {
9222                 overlapping_attachments[i].push_back(j);
9223                 overlapping_attachments[j].push_back(i);
9224                 continue;
9225             }
9226             auto view_state_i = GetImageViewState(viewi);
9227             auto view_state_j = GetImageViewState(viewj);
9228             if (!view_state_i || !view_state_j) {
9229                 continue;
9230             }
9231             auto view_ci_i = view_state_i->create_info;
9232             auto view_ci_j = view_state_j->create_info;
9233             if (view_ci_i.image == view_ci_j.image && IsRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
9234                 overlapping_attachments[i].push_back(j);
9235                 overlapping_attachments[j].push_back(i);
9236                 continue;
9237             }
9238             auto image_data_i = GetImageState(view_ci_i.image);
9239             auto image_data_j = GetImageState(view_ci_j.image);
9240             if (!image_data_i || !image_data_j) {
9241                 continue;
9242             }
9243             if (image_data_i->binding.mem == image_data_j->binding.mem &&
9244                 IsRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
9245                                    image_data_j->binding.size)) {
9246                 overlapping_attachments[i].push_back(j);
9247                 overlapping_attachments[j].push_back(i);
9248             }
9249         }
9250     }
9251     // Find for each attachment the subpasses that use them.
9252     unordered_set<uint32_t> attachmentIndices;
9253     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9254         const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
9255         attachmentIndices.clear();
9256         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9257             uint32_t attachment = subpass.pInputAttachments[j].attachment;
9258             if (attachment == VK_ATTACHMENT_UNUSED) continue;
9259             input_attachment_to_subpass[attachment].push_back(i);
9260             for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9261                 input_attachment_to_subpass[overlapping_attachment].push_back(i);
9262             }
9263         }
9264         for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9265             uint32_t attachment = subpass.pColorAttachments[j].attachment;
9266             if (attachment == VK_ATTACHMENT_UNUSED) continue;
9267             output_attachment_to_subpass[attachment].push_back(i);
9268             for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9269                 output_attachment_to_subpass[overlapping_attachment].push_back(i);
9270             }
9271             attachmentIndices.insert(attachment);
9272         }
9273         if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9274             uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9275             output_attachment_to_subpass[attachment].push_back(i);
9276             for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9277                 output_attachment_to_subpass[overlapping_attachment].push_back(i);
9278             }
9279 
9280             if (attachmentIndices.count(attachment)) {
9281                 skip |=
9282                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9283                             kVUID_Core_DrawState_InvalidRenderpass,
9284                             "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
9285             }
9286         }
9287     }
9288     // If there is a dependency needed make sure one exists
9289     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9290         const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
9291         // If the attachment is an input then all subpasses that output must have a dependency relationship
9292         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9293             uint32_t attachment = subpass.pInputAttachments[j].attachment;
9294             if (attachment == VK_ATTACHMENT_UNUSED) continue;
9295             CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
9296         }
9297         // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
9298         for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9299             uint32_t attachment = subpass.pColorAttachments[j].attachment;
9300             if (attachment == VK_ATTACHMENT_UNUSED) continue;
9301             CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
9302             CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
9303         }
9304         if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9305             const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
9306             CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
9307             CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
9308         }
9309     }
9310     // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
9311     // written.
9312     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9313         const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
9314         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9315             CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip);
9316         }
9317     }
9318     return skip;
9319 }
9320 
RecordRenderPassDAG(const layer_data * dev_data,RenderPassCreateVersion rp_version,const VkRenderPassCreateInfo2KHR * pCreateInfo,RENDER_PASS_STATE * render_pass)9321 static void RecordRenderPassDAG(const layer_data *dev_data, RenderPassCreateVersion rp_version,
9322                                 const VkRenderPassCreateInfo2KHR *pCreateInfo, RENDER_PASS_STATE *render_pass) {
9323     auto &subpass_to_node = render_pass->subpassToNode;
9324     subpass_to_node.resize(pCreateInfo->subpassCount);
9325     auto &self_dependencies = render_pass->self_dependencies;
9326     self_dependencies.resize(pCreateInfo->subpassCount);
9327 
9328     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9329         subpass_to_node[i].pass = i;
9330         self_dependencies[i].clear();
9331     }
9332     for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9333         const VkSubpassDependency2KHR &dependency = pCreateInfo->pDependencies[i];
9334         if ((dependency.srcSubpass != VK_SUBPASS_EXTERNAL) && (dependency.dstSubpass != VK_SUBPASS_EXTERNAL)) {
9335             if (dependency.srcSubpass == dependency.dstSubpass) {
9336                 self_dependencies[dependency.srcSubpass].push_back(i);
9337             } else {
9338                 subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
9339                 subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
9340             }
9341         }
9342     }
9343 }
9344 
ValidateRenderPassDAG(const layer_data * dev_data,RenderPassCreateVersion rp_version,const VkRenderPassCreateInfo2KHR * pCreateInfo,RENDER_PASS_STATE * render_pass)9345 static bool ValidateRenderPassDAG(const layer_data *dev_data, RenderPassCreateVersion rp_version,
9346                                   const VkRenderPassCreateInfo2KHR *pCreateInfo, RENDER_PASS_STATE *render_pass) {
9347     // Shorthand...
9348     auto &subpass_to_node = render_pass->subpassToNode;
9349     subpass_to_node.resize(pCreateInfo->subpassCount);
9350     auto &self_dependencies = render_pass->self_dependencies;
9351     self_dependencies.resize(pCreateInfo->subpassCount);
9352 
9353     bool skip = false;
9354     const char *vuid;
9355     const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
9356 
9357     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9358         subpass_to_node[i].pass = i;
9359         self_dependencies[i].clear();
9360     }
9361     for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9362         const VkSubpassDependency2KHR &dependency = pCreateInfo->pDependencies[i];
9363         VkPipelineStageFlags exclude_graphics_pipeline_stages =
9364             ~(VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT |
9365               ExpandPipelineStageFlags(dev_data->device_extensions, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT));
9366         VkPipelineStageFlagBits latest_src_stage = GetLogicallyLatestGraphicsPipelineStage(dependency.srcStageMask);
9367         VkPipelineStageFlagBits earliest_dst_stage = GetLogicallyEarliestGraphicsPipelineStage(dependency.dstStageMask);
9368 
9369         // This VU is actually generalised  to *any* pipeline - not just graphics - but only graphics render passes are
9370         // currently supported by the spec - so only that pipeline is checked here.
9371         // If that is ever relaxed, this check should be extended to cover those pipelines.
9372         if (dependency.srcSubpass == dependency.dstSubpass && (dependency.srcStageMask & exclude_graphics_pipeline_stages) != 0u &&
9373             (dependency.dstStageMask & exclude_graphics_pipeline_stages) != 0u) {
9374             vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcSubpass-02244" : "VUID-VkSubpassDependency-srcSubpass-01989";
9375             skip |= log_msg(
9376                 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9377                 "Dependency %u is a self-dependency, but specifies stage masks that contain stages not in the GRAPHICS pipeline.",
9378                 i);
9379         } else if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL && (dependency.srcStageMask & VK_PIPELINE_STAGE_HOST_BIT)) {
9380             vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcSubpass-03078" : "VUID-VkSubpassDependency-srcSubpass-00858";
9381             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9382                             "Dependency %u specifies a dependency from subpass %u, but includes HOST_BIT in the source stage mask.",
9383                             i, dependency.srcSubpass);
9384         } else if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL && (dependency.dstStageMask & VK_PIPELINE_STAGE_HOST_BIT)) {
9385             vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-dstSubpass-03079" : "VUID-VkSubpassDependency-dstSubpass-00859";
9386             skip |=
9387                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9388                         "Dependency %u specifies a dependency to subpass %u, but includes HOST_BIT in the destination stage mask.",
9389                         i, dependency.dstSubpass);
9390         }
9391         // These next two VUs are actually generalised  to *any* pipeline - not just graphics - but only graphics render passes are
9392         // currently supported by the spec - so only that pipeline is checked here.
9393         // If that is ever relaxed, these next two checks should be extended to cover those pipelines.
9394         else if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
9395                  pCreateInfo->pSubpasses[dependency.srcSubpass].pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS &&
9396                  (dependency.srcStageMask & exclude_graphics_pipeline_stages) != 0u) {
9397             vuid =
9398                 use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-pDependencies-03054" : "VUID-VkRenderPassCreateInfo-pDependencies-00837";
9399             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9400                             "Dependency %u specifies a source stage mask that contains stages not in the GRAPHICS pipeline as used "
9401                             "by the source subpass %u.",
9402                             i, dependency.srcSubpass);
9403         } else if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL &&
9404                    pCreateInfo->pSubpasses[dependency.dstSubpass].pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS &&
9405                    (dependency.dstStageMask & exclude_graphics_pipeline_stages) != 0u) {
9406             vuid =
9407                 use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-pDependencies-03055" : "VUID-VkRenderPassCreateInfo-pDependencies-00838";
9408             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9409                             "Dependency %u specifies a destination stage mask that contains stages not in the GRAPHICS pipeline as "
9410                             "used by the destination subpass %u.",
9411                             i, dependency.dstSubpass);
9412         }
9413         // The first subpass here serves as a good proxy for "is multiview enabled" - since all view masks need to be non-zero if
9414         // any are, which enables multiview.
9415         else if (use_rp2 && (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) &&
9416                  (pCreateInfo->pSubpasses[0].viewMask == 0)) {
9417             skip |= log_msg(
9418                 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9419                 "VUID-VkRenderPassCreateInfo2KHR-viewMask-03059",
9420                 "Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but multiview is not enabled for this render pass.", i);
9421         } else if (use_rp2 && !(dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && dependency.viewOffset != 0) {
9422             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9423                             "VUID-VkSubpassDependency2KHR-dependencyFlags-03092",
9424                             "Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but also specifies a view offset of %u.", i,
9425                             dependency.viewOffset);
9426         } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
9427             if (dependency.srcSubpass == dependency.dstSubpass) {
9428                 vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcSubpass-03085" : "VUID-VkSubpassDependency-srcSubpass-00865";
9429                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9430                                 vuid, "The src and dst subpasses in dependency %u are both external.", i);
9431             } else if (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) {
9432                 if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) {
9433                     vuid = "VUID-VkSubpassDependency-dependencyFlags-02520";
9434                 } else {  // dependency.dstSubpass == VK_SUBPASS_EXTERNAL
9435                     vuid = "VUID-VkSubpassDependency-dependencyFlags-02521";
9436                 }
9437                 if (use_rp2) {
9438                     // Create render pass 2 distinguishes between source and destination external dependencies.
9439                     if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) {
9440                         vuid = "VUID-VkSubpassDependency2KHR-dependencyFlags-03090";
9441                     } else {
9442                         vuid = "VUID-VkSubpassDependency2KHR-dependencyFlags-03091";
9443                     }
9444                 }
9445                 skip |=
9446                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9447                             "Dependency %u specifies an external dependency but also specifies VK_DEPENDENCY_VIEW_LOCAL_BIT.", i);
9448             }
9449         } else if (dependency.srcSubpass > dependency.dstSubpass) {
9450             vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcSubpass-03084" : "VUID-VkSubpassDependency-srcSubpass-00864";
9451             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9452                             "Dependency %u specifies a dependency from a later subpass (%u) to an earlier subpass (%u), which is "
9453                             "disallowed to prevent cyclic dependencies.",
9454                             i, dependency.srcSubpass, dependency.dstSubpass);
9455         } else if (dependency.srcSubpass == dependency.dstSubpass) {
9456             if (dependency.viewOffset != 0) {
9457                 vuid = use_rp2 ? kVUID_Core_DrawState_InvalidRenderpass : "VUID-VkRenderPassCreateInfo-pNext-01930";
9458                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9459                                 vuid, "Dependency %u specifies a self-dependency but has a non-zero view offset of %u", i,
9460                                 dependency.viewOffset);
9461             } else if ((dependency.dependencyFlags | VK_DEPENDENCY_VIEW_LOCAL_BIT) != dependency.dependencyFlags &&
9462                        pCreateInfo->pSubpasses[dependency.srcSubpass].viewMask > 1) {
9463                 vuid =
9464                     use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-pDependencies-03060" : "VUID-VkSubpassDependency-srcSubpass-00872";
9465                 skip |=
9466                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9467                             "Dependency %u specifies a self-dependency for subpass %u with a non-zero view mask, but does not "
9468                             "specify VK_DEPENDENCY_VIEW_LOCAL_BIT.",
9469                             i, dependency.srcSubpass);
9470             } else if ((HasNonFramebufferStagePipelineStageFlags(dependency.srcStageMask) ||
9471                         HasNonFramebufferStagePipelineStageFlags(dependency.dstStageMask)) &&
9472                        (GetGraphicsPipelineStageLogicalOrdinal(latest_src_stage) >
9473                         GetGraphicsPipelineStageLogicalOrdinal(earliest_dst_stage))) {
9474                 vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcSubpass-03087" : "VUID-VkSubpassDependency-srcSubpass-00867";
9475                 skip |= log_msg(
9476                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9477                     "Dependency %u specifies a self-dependency from logically-later stage (%s) to a logically-earlier stage (%s).",
9478                     i, string_VkPipelineStageFlagBits(latest_src_stage), string_VkPipelineStageFlagBits(earliest_dst_stage));
9479             } else {
9480                 self_dependencies[dependency.srcSubpass].push_back(i);
9481             }
9482         } else {
9483             subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
9484             subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
9485         }
9486     }
9487     return skip;
9488 }
9489 
ValidateAttachmentIndex(const layer_data * dev_data,RenderPassCreateVersion rp_version,uint32_t attachment,uint32_t attachment_count,const char * type)9490 bool CoreChecks::ValidateAttachmentIndex(const layer_data *dev_data, RenderPassCreateVersion rp_version, uint32_t attachment,
9491                                          uint32_t attachment_count, const char *type) {
9492     bool skip = false;
9493     const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
9494     const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()";
9495 
9496     if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
9497         const char *vuid =
9498             use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-attachment-03051" : "VUID-VkRenderPassCreateInfo-attachment-00834";
9499         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9500                         "%s: %s attachment %d must be less than the total number of attachments %d.", type, function_name,
9501                         attachment, attachment_count);
9502     }
9503     return skip;
9504 }
9505 
9506 enum AttachmentType {
9507     ATTACHMENT_COLOR = 1,
9508     ATTACHMENT_DEPTH = 2,
9509     ATTACHMENT_INPUT = 4,
9510     ATTACHMENT_PRESERVE = 8,
9511     ATTACHMENT_RESOLVE = 16,
9512 };
9513 
StringAttachmentType(uint8_t type)9514 char const *StringAttachmentType(uint8_t type) {
9515     switch (type) {
9516         case ATTACHMENT_COLOR:
9517             return "color";
9518         case ATTACHMENT_DEPTH:
9519             return "depth";
9520         case ATTACHMENT_INPUT:
9521             return "input";
9522         case ATTACHMENT_PRESERVE:
9523             return "preserve";
9524         case ATTACHMENT_RESOLVE:
9525             return "resolve";
9526         default:
9527             return "(multiple)";
9528     }
9529 }
9530 
AddAttachmentUse(const layer_data * dev_data,RenderPassCreateVersion rp_version,uint32_t subpass,std::vector<uint8_t> & attachment_uses,std::vector<VkImageLayout> & attachment_layouts,uint32_t attachment,uint8_t new_use,VkImageLayout new_layout)9531 bool CoreChecks::AddAttachmentUse(const layer_data *dev_data, RenderPassCreateVersion rp_version, uint32_t subpass,
9532                                   std::vector<uint8_t> &attachment_uses, std::vector<VkImageLayout> &attachment_layouts,
9533                                   uint32_t attachment, uint8_t new_use, VkImageLayout new_layout) {
9534     if (attachment >= attachment_uses.size()) return false; /* out of range, but already reported */
9535 
9536     bool skip = false;
9537     auto &uses = attachment_uses[attachment];
9538     const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
9539     const char *vuid;
9540     const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()";
9541 
9542     if (uses & new_use) {
9543         if (attachment_layouts[attachment] != new_layout) {
9544             vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-layout-02528" : "VUID-VkSubpassDescription-layout-02519";
9545             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9546                     "%s: subpass %u already uses attachment %u with a different image layout (%s vs %s).", function_name, subpass,
9547                     attachment, string_VkImageLayout(attachment_layouts[attachment]), string_VkImageLayout(new_layout));
9548         }
9549     } else if (uses & ~ATTACHMENT_INPUT || (uses && (new_use == ATTACHMENT_RESOLVE || new_use == ATTACHMENT_PRESERVE))) {
9550         /* Note: input attachments are assumed to be done first. */
9551         vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pPreserveAttachments-03074"
9552                        : "VUID-VkSubpassDescription-pPreserveAttachments-00854";
9553         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9554                         "%s: subpass %u uses attachment %u as both %s and %s attachment.", function_name, subpass, attachment,
9555                         StringAttachmentType(uses), StringAttachmentType(new_use));
9556     } else {
9557         attachment_layouts[attachment] = new_layout;
9558         uses |= new_use;
9559     }
9560 
9561     return skip;
9562 }
9563 
ValidateRenderpassAttachmentUsage(const layer_data * dev_data,RenderPassCreateVersion rp_version,const VkRenderPassCreateInfo2KHR * pCreateInfo)9564 bool CoreChecks::ValidateRenderpassAttachmentUsage(const layer_data *dev_data, RenderPassCreateVersion rp_version,
9565                                                    const VkRenderPassCreateInfo2KHR *pCreateInfo) {
9566     bool skip = false;
9567     const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
9568     const char *vuid;
9569     const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()";
9570 
9571     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9572         const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
9573         std::vector<uint8_t> attachment_uses(pCreateInfo->attachmentCount);
9574         std::vector<VkImageLayout> attachment_layouts(pCreateInfo->attachmentCount);
9575 
9576         if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
9577             vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pipelineBindPoint-03062"
9578                            : "VUID-VkSubpassDescription-pipelineBindPoint-00844";
9579             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9580                             "%s: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", function_name, i);
9581         }
9582 
9583         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9584             auto const &attachment_ref = subpass.pInputAttachments[j];
9585             if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
9586                 skip |=
9587                     ValidateAttachmentIndex(dev_data, rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount, "Input");
9588 
9589                 if (attachment_ref.aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) {
9590                     vuid =
9591                         use_rp2 ? kVUID_Core_DrawState_InvalidRenderpass : "VUID-VkInputAttachmentAspectReference-aspectMask-01964";
9592                     skip |= log_msg(
9593                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9594                         "%s: Aspect mask for input attachment reference %d in subpass %d includes VK_IMAGE_ASPECT_METADATA_BIT.",
9595                         function_name, i, j);
9596                 }
9597 
9598                 if (attachment_ref.attachment < pCreateInfo->attachmentCount) {
9599                     skip |= AddAttachmentUse(dev_data, rp_version, i, attachment_uses, attachment_layouts,
9600                                              attachment_ref.attachment, ATTACHMENT_INPUT, attachment_ref.layout);
9601 
9602                     vuid = use_rp2 ? kVUID_Core_DrawState_InvalidRenderpass : "VUID-VkRenderPassCreateInfo-pNext-01963";
9603                     skip |= ValidateImageAspectMask(dev_data, VK_NULL_HANDLE,
9604                                                     pCreateInfo->pAttachments[attachment_ref.attachment].format,
9605                                                     attachment_ref.aspectMask, function_name, vuid);
9606                 }
9607             }
9608 
9609             if (rp_version == RENDER_PASS_VERSION_2) {
9610                 // These are validated automatically as part of parameter validation for create renderpass 1
9611                 // as they are in a struct that only applies to input attachments - not so for v2.
9612 
9613                 // Check for 0
9614                 if (attachment_ref.aspectMask == 0) {
9615                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9616                                     0, "VUID-VkSubpassDescription2KHR-aspectMask-03176",
9617                                     "%s:  Input attachment (%d) aspect mask must not be 0.", function_name, j);
9618                 } else {
9619                     const VkImageAspectFlags valid_bits =
9620                         (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT |
9621                          VK_IMAGE_ASPECT_METADATA_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT |
9622                          VK_IMAGE_ASPECT_PLANE_2_BIT);
9623 
9624                     // Check for valid aspect mask bits
9625                     if (attachment_ref.aspectMask & ~valid_bits) {
9626                         skip |=
9627                             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9628                                     0, "VUID-VkSubpassDescription2KHR-aspectMask-03175",
9629                                     "%s:  Input attachment (%d) aspect mask (0x%" PRIx32 ")is invalid.", function_name, j,
9630                                     attachment_ref.aspectMask);
9631                     }
9632                 }
9633             }
9634         }
9635 
9636         for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9637             uint32_t attachment = subpass.pPreserveAttachments[j];
9638             if (attachment == VK_ATTACHMENT_UNUSED) {
9639                 vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-attachment-03073" : "VUID-VkSubpassDescription-attachment-00853";
9640                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9641                                 vuid, "%s:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", function_name, j);
9642             } else {
9643                 skip |= ValidateAttachmentIndex(dev_data, rp_version, attachment, pCreateInfo->attachmentCount, "Preserve");
9644                 if (attachment < pCreateInfo->attachmentCount) {
9645                     skip |= AddAttachmentUse(dev_data, rp_version, i, attachment_uses, attachment_layouts, attachment,
9646                                              ATTACHMENT_PRESERVE, VkImageLayout(0) /* preserve doesn't have any layout */);
9647                 }
9648             }
9649         }
9650 
9651         bool subpass_performs_resolve = false;
9652 
9653         for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9654             if (subpass.pResolveAttachments) {
9655                 auto const &attachment_ref = subpass.pResolveAttachments[j];
9656                 if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
9657                     skip |= ValidateAttachmentIndex(dev_data, rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount,
9658                                                     "Resolve");
9659 
9660                     if (attachment_ref.attachment < pCreateInfo->attachmentCount) {
9661                         skip |= AddAttachmentUse(dev_data, rp_version, i, attachment_uses, attachment_layouts,
9662                                                  attachment_ref.attachment, ATTACHMENT_RESOLVE, attachment_ref.layout);
9663 
9664                         subpass_performs_resolve = true;
9665 
9666                         if (pCreateInfo->pAttachments[attachment_ref.attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
9667                             vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pResolveAttachments-03067"
9668                                            : "VUID-VkSubpassDescription-pResolveAttachments-00849";
9669                             skip |=
9670                                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9671                                         VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9672                                         "%s:  Subpass %u requests multisample resolve into attachment %u, which must "
9673                                         "have VK_SAMPLE_COUNT_1_BIT but has %s.",
9674                                         function_name, i, attachment_ref.attachment,
9675                                         string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_ref.attachment].samples));
9676                         }
9677                     }
9678                 }
9679             }
9680         }
9681 
9682         if (subpass.pDepthStencilAttachment) {
9683             if (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9684                 skip |= ValidateAttachmentIndex(dev_data, rp_version, subpass.pDepthStencilAttachment->attachment,
9685                                                 pCreateInfo->attachmentCount, "Depth");
9686                 if (subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount) {
9687                     skip |= AddAttachmentUse(dev_data, rp_version, i, attachment_uses, attachment_layouts,
9688                                              subpass.pDepthStencilAttachment->attachment, ATTACHMENT_DEPTH,
9689                                              subpass.pDepthStencilAttachment->layout);
9690                 }
9691             }
9692         }
9693 
9694         uint32_t last_sample_count_attachment = VK_ATTACHMENT_UNUSED;
9695         for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9696             auto const &attachment_ref = subpass.pColorAttachments[j];
9697             skip |= ValidateAttachmentIndex(dev_data, rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount, "Color");
9698             if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED && attachment_ref.attachment < pCreateInfo->attachmentCount) {
9699                 skip |= AddAttachmentUse(dev_data, rp_version, i, attachment_uses, attachment_layouts, attachment_ref.attachment,
9700                                          ATTACHMENT_COLOR, attachment_ref.layout);
9701 
9702                 VkSampleCountFlagBits current_sample_count = pCreateInfo->pAttachments[attachment_ref.attachment].samples;
9703                 if (last_sample_count_attachment != VK_ATTACHMENT_UNUSED) {
9704                     VkSampleCountFlagBits last_sample_count =
9705                         pCreateInfo->pAttachments[subpass.pColorAttachments[last_sample_count_attachment].attachment].samples;
9706                     if (current_sample_count != last_sample_count) {
9707                         vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pColorAttachments-03069"
9708                                        : "VUID-VkSubpassDescription-pColorAttachments-01417";
9709                         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9710                                         VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9711                                         "%s:  Subpass %u attempts to render to color attachments with inconsistent sample counts."
9712                                         "Color attachment ref %u has sample count %s, whereas previous color attachment ref %u has "
9713                                         "sample count %s.",
9714                                         function_name, i, j, string_VkSampleCountFlagBits(current_sample_count),
9715                                         last_sample_count_attachment, string_VkSampleCountFlagBits(last_sample_count));
9716                     }
9717                 }
9718                 last_sample_count_attachment = j;
9719 
9720                 if (subpass_performs_resolve && current_sample_count == VK_SAMPLE_COUNT_1_BIT) {
9721                     vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pResolveAttachments-03066"
9722                                    : "VUID-VkSubpassDescription-pResolveAttachments-00848";
9723                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9724                                     0, vuid,
9725                                     "%s:  Subpass %u requests multisample resolve from attachment %u which has "
9726                                     "VK_SAMPLE_COUNT_1_BIT.",
9727                                     function_name, i, attachment_ref.attachment);
9728                 }
9729 
9730                 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED &&
9731                     subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount) {
9732                     const auto depth_stencil_sample_count =
9733                         pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples;
9734 
9735                     if (dev_data->device_extensions.vk_amd_mixed_attachment_samples) {
9736                         if (pCreateInfo->pAttachments[attachment_ref.attachment].samples > depth_stencil_sample_count) {
9737                             vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pColorAttachments-03070"
9738                                            : "VUID-VkSubpassDescription-pColorAttachments-01506";
9739                             skip |=
9740                                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9741                                         VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9742                                         "%s:  Subpass %u pColorAttachments[%u] has %s which is larger than "
9743                                         "depth/stencil attachment %s.",
9744                                         function_name, i, j,
9745                                         string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_ref.attachment].samples),
9746                                         string_VkSampleCountFlagBits(depth_stencil_sample_count));
9747                             break;
9748                         }
9749                     }
9750 
9751                     if (!dev_data->device_extensions.vk_amd_mixed_attachment_samples &&
9752                         !dev_data->device_extensions.vk_nv_framebuffer_mixed_samples &&
9753                         current_sample_count != depth_stencil_sample_count) {
9754                         vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pDepthStencilAttachment-03071"
9755                                        : "VUID-VkSubpassDescription-pDepthStencilAttachment-01418";
9756                         skip |= log_msg(
9757                             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9758                             "%s:  Subpass %u attempts to render to use a depth/stencil attachment with sample count that differs "
9759                             "from color attachment %u."
9760                             "The depth attachment ref has sample count %s, whereas color attachment ref %u has sample count %s.",
9761                             function_name, i, j, string_VkSampleCountFlagBits(depth_stencil_sample_count), j,
9762                             string_VkSampleCountFlagBits(current_sample_count));
9763                         break;
9764                     }
9765                 }
9766             }
9767 
9768             if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED &&
9769                 subpass.pResolveAttachments[j].attachment < pCreateInfo->attachmentCount) {
9770                 if (attachment_ref.attachment == VK_ATTACHMENT_UNUSED) {
9771                     vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pResolveAttachments-03065"
9772                                    : "VUID-VkSubpassDescription-pResolveAttachments-00847";
9773                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9774                                     0, vuid,
9775                                     "%s:  Subpass %u requests multisample resolve from attachment %u which has "
9776                                     "attachment=VK_ATTACHMENT_UNUSED.",
9777                                     function_name, i, attachment_ref.attachment);
9778                 } else {
9779                     const auto &color_desc = pCreateInfo->pAttachments[attachment_ref.attachment];
9780                     const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment];
9781                     if (color_desc.format != resolve_desc.format) {
9782                         vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pResolveAttachments-03068"
9783                                        : "VUID-VkSubpassDescription-pResolveAttachments-00850";
9784                         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9785                                         VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9786                                         "%s:  Subpass %u pColorAttachments[%u] resolves to an attachment with a "
9787                                         "different format. color format: %u, resolve format: %u.",
9788                                         function_name, i, j, color_desc.format, resolve_desc.format);
9789                     }
9790                 }
9791             }
9792         }
9793     }
9794     return skip;
9795 }
9796 
MarkAttachmentFirstUse(RENDER_PASS_STATE * render_pass,uint32_t index,bool is_read)9797 static void MarkAttachmentFirstUse(RENDER_PASS_STATE *render_pass, uint32_t index, bool is_read) {
9798     if (index == VK_ATTACHMENT_UNUSED) return;
9799 
9800     if (!render_pass->attachment_first_read.count(index)) render_pass->attachment_first_read[index] = is_read;
9801 }
9802 
ValidateCreateRenderPass(layer_data * dev_data,VkDevice device,RenderPassCreateVersion rp_version,const VkRenderPassCreateInfo2KHR * pCreateInfo,RENDER_PASS_STATE * render_pass)9803 bool CoreChecks::ValidateCreateRenderPass(layer_data *dev_data, VkDevice device, RenderPassCreateVersion rp_version,
9804                                           const VkRenderPassCreateInfo2KHR *pCreateInfo, RENDER_PASS_STATE *render_pass) {
9805     bool skip = false;
9806     const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
9807     const char *vuid;
9808     const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()";
9809 
9810     // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
9811     //       ValidateLayouts.
9812     skip |= ValidateRenderpassAttachmentUsage(dev_data, rp_version, pCreateInfo);
9813 
9814     render_pass->renderPass = VK_NULL_HANDLE;
9815     skip |= ValidateRenderPassDAG(dev_data, rp_version, pCreateInfo, render_pass);
9816 
9817     // Validate multiview correlation and view masks
9818     bool viewMaskZero = false;
9819     bool viewMaskNonZero = false;
9820 
9821     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9822         const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
9823         if (subpass.viewMask != 0) {
9824             viewMaskNonZero = true;
9825         } else {
9826             viewMaskZero = true;
9827         }
9828 
9829         if ((subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX) != 0 &&
9830             (subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX) == 0) {
9831             vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-flags-03076" : "VUID-VkSubpassDescription-flags-00856";
9832             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9833                             "%s: The flags parameter of subpass description %u includes "
9834                             "VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX but does not also include "
9835                             "VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX.",
9836                             function_name, i);
9837         }
9838     }
9839 
9840     if (rp_version == RENDER_PASS_VERSION_2) {
9841         if (viewMaskNonZero && viewMaskZero) {
9842             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9843                             "VUID-VkRenderPassCreateInfo2KHR-viewMask-03058",
9844                             "%s: Some view masks are non-zero whilst others are zero.", function_name);
9845         }
9846 
9847         if (viewMaskZero && pCreateInfo->correlatedViewMaskCount != 0) {
9848             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9849                             "VUID-VkRenderPassCreateInfo2KHR-viewMask-03057",
9850                             "%s: Multiview is not enabled but correlation masks are still provided", function_name);
9851         }
9852     }
9853     uint32_t aggregated_cvms = 0;
9854     for (uint32_t i = 0; i < pCreateInfo->correlatedViewMaskCount; ++i) {
9855         if (aggregated_cvms & pCreateInfo->pCorrelatedViewMasks[i]) {
9856             vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-pCorrelatedViewMasks-03056"
9857                            : "VUID-VkRenderPassMultiviewCreateInfo-pCorrelationMasks-00841";
9858             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9859                             "%s: pCorrelatedViewMasks[%u] contains a previously appearing view bit.", function_name, i);
9860         }
9861         aggregated_cvms |= pCreateInfo->pCorrelatedViewMasks[i];
9862     }
9863 
9864     for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9865         auto const &dependency = pCreateInfo->pDependencies[i];
9866         if (rp_version == RENDER_PASS_VERSION_2) {
9867             skip |= ValidateStageMaskGsTsEnables(
9868                 dev_data, dependency.srcStageMask, function_name, "VUID-VkSubpassDependency2KHR-srcStageMask-03080",
9869                 "VUID-VkSubpassDependency2KHR-srcStageMask-03082", "VUID-VkSubpassDependency2KHR-srcStageMask-02103",
9870                 "VUID-VkSubpassDependency2KHR-srcStageMask-02104");
9871             skip |= ValidateStageMaskGsTsEnables(
9872                 dev_data, dependency.dstStageMask, function_name, "VUID-VkSubpassDependency2KHR-dstStageMask-03081",
9873                 "VUID-VkSubpassDependency2KHR-dstStageMask-03083", "VUID-VkSubpassDependency2KHR-dstStageMask-02105",
9874                 "VUID-VkSubpassDependency2KHR-dstStageMask-02106");
9875         } else {
9876             skip |= ValidateStageMaskGsTsEnables(
9877                 dev_data, dependency.srcStageMask, function_name, "VUID-VkSubpassDependency-srcStageMask-00860",
9878                 "VUID-VkSubpassDependency-srcStageMask-00862", "VUID-VkSubpassDependency-srcStageMask-02099",
9879                 "VUID-VkSubpassDependency-srcStageMask-02100");
9880             skip |= ValidateStageMaskGsTsEnables(
9881                 dev_data, dependency.dstStageMask, function_name, "VUID-VkSubpassDependency-dstStageMask-00861",
9882                 "VUID-VkSubpassDependency-dstStageMask-00863", "VUID-VkSubpassDependency-dstStageMask-02101",
9883                 "VUID-VkSubpassDependency-dstStageMask-02102");
9884         }
9885 
9886         if (!ValidateAccessMaskPipelineStage(dev_data->device_extensions, dependency.srcAccessMask, dependency.srcStageMask)) {
9887             vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcAccessMask-03088" : "VUID-VkSubpassDependency-srcAccessMask-00868";
9888             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9889                             "%s: pDependencies[%u].srcAccessMask (0x%" PRIx32 ") is not supported by srcStageMask (0x%" PRIx32 ").",
9890                             function_name, i, dependency.srcAccessMask, dependency.srcStageMask);
9891         }
9892 
9893         if (!ValidateAccessMaskPipelineStage(dev_data->device_extensions, dependency.dstAccessMask, dependency.dstStageMask)) {
9894             vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-dstAccessMask-03089" : "VUID-VkSubpassDependency-dstAccessMask-00869";
9895             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
9896                             "%s: pDependencies[%u].dstAccessMask (0x%" PRIx32 ") is not supported by dstStageMask (0x%" PRIx32 ").",
9897                             function_name, i, dependency.dstAccessMask, dependency.dstStageMask);
9898         }
9899     }
9900     if (!skip) {
9901         skip |= ValidateLayouts(dev_data, rp_version, device, pCreateInfo);
9902     }
9903     return skip;
9904 }
9905 
PreCallValidateCreateRenderPass(VkDevice device,const VkRenderPassCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkRenderPass * pRenderPass)9906 bool CoreChecks::PreCallValidateCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9907                                                  const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
9908     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9909 
9910     bool skip = false;
9911     // Handle extension structs from KHR_multiview and KHR_maintenance2 that can only be validated for RP1 (indices out of bounds)
9912     const VkRenderPassMultiviewCreateInfo *pMultiviewInfo = lvl_find_in_chain<VkRenderPassMultiviewCreateInfo>(pCreateInfo->pNext);
9913     if (pMultiviewInfo) {
9914         if (pMultiviewInfo->subpassCount && pMultiviewInfo->subpassCount != pCreateInfo->subpassCount) {
9915             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9916                             "VUID-VkRenderPassCreateInfo-pNext-01928",
9917                             "Subpass count is %u but multiview info has a subpass count of %u.", pCreateInfo->subpassCount,
9918                             pMultiviewInfo->subpassCount);
9919         } else if (pMultiviewInfo->dependencyCount && pMultiviewInfo->dependencyCount != pCreateInfo->dependencyCount) {
9920             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9921                             "VUID-VkRenderPassCreateInfo-pNext-01929",
9922                             "Dependency count is %u but multiview info has a dependency count of %u.", pCreateInfo->dependencyCount,
9923                             pMultiviewInfo->dependencyCount);
9924         }
9925     }
9926     const VkRenderPassInputAttachmentAspectCreateInfo *pInputAttachmentAspectInfo =
9927         lvl_find_in_chain<VkRenderPassInputAttachmentAspectCreateInfo>(pCreateInfo->pNext);
9928     if (pInputAttachmentAspectInfo) {
9929         for (uint32_t i = 0; i < pInputAttachmentAspectInfo->aspectReferenceCount; ++i) {
9930             uint32_t subpass = pInputAttachmentAspectInfo->pAspectReferences[i].subpass;
9931             uint32_t attachment = pInputAttachmentAspectInfo->pAspectReferences[i].inputAttachmentIndex;
9932             if (subpass >= pCreateInfo->subpassCount) {
9933                 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9934                                 "VUID-VkRenderPassCreateInfo-pNext-01926",
9935                                 "Subpass index %u specified by input attachment aspect info %u is greater than the subpass "
9936                                 "count of %u for this render pass.",
9937                                 subpass, i, pCreateInfo->subpassCount);
9938             } else if (pCreateInfo->pSubpasses && attachment >= pCreateInfo->pSubpasses[subpass].inputAttachmentCount) {
9939                 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9940                                 "VUID-VkRenderPassCreateInfo-pNext-01927",
9941                                 "Input attachment index %u specified by input attachment aspect info %u is greater than the "
9942                                 "input attachment count of %u for this subpass.",
9943                                 attachment, i, pCreateInfo->pSubpasses[subpass].inputAttachmentCount);
9944             }
9945         }
9946     }
9947 
9948     if (!skip) {
9949         auto render_pass = std::unique_ptr<RENDER_PASS_STATE>(new RENDER_PASS_STATE(pCreateInfo));
9950         skip |=
9951             ValidateCreateRenderPass(device_data, device, RENDER_PASS_VERSION_1, render_pass->createInfo.ptr(), render_pass.get());
9952     }
9953 
9954     return skip;
9955 }
9956 
RecordCreateRenderPassState(layer_data * device_data,RenderPassCreateVersion rp_version,std::shared_ptr<RENDER_PASS_STATE> & render_pass,VkRenderPass * pRenderPass)9957 void RecordCreateRenderPassState(layer_data *device_data, RenderPassCreateVersion rp_version,
9958                                  std::shared_ptr<RENDER_PASS_STATE> &render_pass, VkRenderPass *pRenderPass) {
9959     render_pass->renderPass = *pRenderPass;
9960     auto create_info = render_pass->createInfo.ptr();
9961 
9962     RecordRenderPassDAG(device_data, RENDER_PASS_VERSION_1, create_info, render_pass.get());
9963 
9964     for (uint32_t i = 0; i < create_info->subpassCount; ++i) {
9965         const VkSubpassDescription2KHR &subpass = create_info->pSubpasses[i];
9966         for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9967             MarkAttachmentFirstUse(render_pass.get(), subpass.pColorAttachments[j].attachment, false);
9968 
9969             // resolve attachments are considered to be written
9970             if (subpass.pResolveAttachments) {
9971                 MarkAttachmentFirstUse(render_pass.get(), subpass.pResolveAttachments[j].attachment, false);
9972             }
9973         }
9974         if (subpass.pDepthStencilAttachment) {
9975             MarkAttachmentFirstUse(render_pass.get(), subpass.pDepthStencilAttachment->attachment, false);
9976         }
9977         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9978             MarkAttachmentFirstUse(render_pass.get(), subpass.pInputAttachments[j].attachment, true);
9979         }
9980     }
9981 
9982     // Even though render_pass is an rvalue-ref parameter, still must move s.t. move assignment is invoked.
9983     device_data->renderPassMap[*pRenderPass] = std::move(render_pass);
9984 }
9985 
9986 // Style note:
9987 // Use of rvalue reference exceeds reccommended usage of rvalue refs in google style guide, but intentionally forces caller to move
9988 // or copy.  This is clearer than passing a pointer to shared_ptr and avoids the atomic increment/decrement of shared_ptr copy
9989 // construction or assignment.
PostCallRecordCreateRenderPass(VkDevice device,const VkRenderPassCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkRenderPass * pRenderPass,VkResult result)9990 void CoreChecks::PostCallRecordCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9991                                                 const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass,
9992                                                 VkResult result) {
9993     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9994     if (VK_SUCCESS != result) return;
9995     auto render_pass_state = std::make_shared<RENDER_PASS_STATE>(pCreateInfo);
9996     RecordCreateRenderPassState(device_data, RENDER_PASS_VERSION_1, render_pass_state, pRenderPass);
9997 }
9998 
PostCallRecordCreateRenderPass2KHR(VkDevice device,const VkRenderPassCreateInfo2KHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkRenderPass * pRenderPass,VkResult result)9999 void CoreChecks::PostCallRecordCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR *pCreateInfo,
10000                                                     const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass,
10001                                                     VkResult result) {
10002     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10003     if (VK_SUCCESS != result) return;
10004     auto render_pass_state = std::make_shared<RENDER_PASS_STATE>(pCreateInfo);
10005     RecordCreateRenderPassState(device_data, RENDER_PASS_VERSION_2, render_pass_state, pRenderPass);
10006 }
10007 
ValidateDepthStencilResolve(const debug_report_data * report_data,const VkPhysicalDeviceDepthStencilResolvePropertiesKHR & depth_stencil_resolve_props,const VkRenderPassCreateInfo2KHR * pCreateInfo)10008 static bool ValidateDepthStencilResolve(const debug_report_data *report_data,
10009                                         const VkPhysicalDeviceDepthStencilResolvePropertiesKHR &depth_stencil_resolve_props,
10010                                         const VkRenderPassCreateInfo2KHR *pCreateInfo) {
10011     bool skip = false;
10012 
10013     // If the pNext list of VkSubpassDescription2KHR includes a VkSubpassDescriptionDepthStencilResolveKHR structure,
10014     // then that structure describes depth/stencil resolve operations for the subpass.
10015     for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
10016         VkSubpassDescription2KHR subpass = pCreateInfo->pSubpasses[i];
10017         const auto *resolve = lvl_find_in_chain<VkSubpassDescriptionDepthStencilResolveKHR>(subpass.pNext);
10018 
10019         if (resolve == nullptr) {
10020             continue;
10021         }
10022 
10023         if (resolve->pDepthStencilResolveAttachment != nullptr &&
10024             resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10025             if (subpass.pDepthStencilAttachment->attachment == VK_ATTACHMENT_UNUSED) {
10026                 skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10027                                 "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03177",
10028                                 "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
10029                                 "structure with resolve attachment %u, but pDepthStencilAttachment=VK_ATTACHMENT_UNUSED.",
10030                                 i, resolve->pDepthStencilResolveAttachment->attachment);
10031             }
10032             if (resolve->depthResolveMode == VK_RESOLVE_MODE_NONE_KHR && resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE_KHR) {
10033                 skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10034                                 "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03178",
10035                                 "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
10036                                 "structure with resolve attachment %u, but both depth and stencil resolve modes are "
10037                                 "VK_RESOLVE_MODE_NONE_KHR.",
10038                                 i, resolve->pDepthStencilResolveAttachment->attachment);
10039             }
10040         }
10041 
10042         if (resolve->pDepthStencilResolveAttachment != nullptr &&
10043             pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
10044             skip |= log_msg(
10045                 report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10046                 "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03179",
10047                 "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
10048                 "structure with resolve attachment %u. However pDepthStencilAttachment has sample count=VK_SAMPLE_COUNT_1_BIT.",
10049                 i, resolve->pDepthStencilResolveAttachment->attachment);
10050         }
10051 
10052         if (pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
10053             skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10054                             "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03180",
10055                             "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
10056                             "structure with resolve attachment %u which has sample count=VK_SAMPLE_COUNT_1_BIT.",
10057                             i, resolve->pDepthStencilResolveAttachment->attachment);
10058         }
10059 
10060         VkFormat pDepthStencilAttachmentFormat = pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].format;
10061         VkFormat pDepthStencilResolveAttachmentFormat =
10062             pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].format;
10063 
10064         if ((FormatDepthSize(pDepthStencilAttachmentFormat) != FormatDepthSize(pDepthStencilResolveAttachmentFormat)) ||
10065             (FormatDepthNumericalType(pDepthStencilAttachmentFormat) !=
10066              FormatDepthNumericalType(pDepthStencilResolveAttachmentFormat))) {
10067             skip |=
10068                 log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10069                         "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03181",
10070                         "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
10071                         "structure with resolve attachment %u which has a depth component (size %u). The depth component "
10072                         "of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.",
10073                         i, resolve->pDepthStencilResolveAttachment->attachment,
10074                         FormatDepthSize(pDepthStencilResolveAttachmentFormat), FormatDepthSize(pDepthStencilAttachmentFormat));
10075         }
10076 
10077         if ((FormatStencilSize(pDepthStencilAttachmentFormat) != FormatStencilSize(pDepthStencilResolveAttachmentFormat)) ||
10078             (FormatStencilNumericalType(pDepthStencilAttachmentFormat) !=
10079              FormatStencilNumericalType(pDepthStencilResolveAttachmentFormat))) {
10080             skip |=
10081                 log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10082                         "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03182",
10083                         "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
10084                         "structure with resolve attachment %u which has a stencil component (size %u). The stencil component "
10085                         "of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.",
10086                         i, resolve->pDepthStencilResolveAttachment->attachment,
10087                         FormatStencilSize(pDepthStencilResolveAttachmentFormat), FormatStencilSize(pDepthStencilAttachmentFormat));
10088         }
10089 
10090         if (!(resolve->depthResolveMode == VK_RESOLVE_MODE_NONE_KHR ||
10091               resolve->depthResolveMode & depth_stencil_resolve_props.supportedDepthResolveModes)) {
10092             skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10093                             "VUID-VkSubpassDescriptionDepthStencilResolveKHR-depthResolveMode-03183",
10094                             "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
10095                             "structure with invalid depthResolveMode=%u.",
10096                             i, resolve->depthResolveMode);
10097         }
10098 
10099         if (!(resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE_KHR ||
10100               resolve->stencilResolveMode & depth_stencil_resolve_props.supportedStencilResolveModes)) {
10101             skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10102                             "VUID-VkSubpassDescriptionDepthStencilResolveKHR-stencilResolveMode-03184",
10103                             "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
10104                             "structure with invalid stencilResolveMode=%u.",
10105                             i, resolve->stencilResolveMode);
10106         }
10107 
10108         if (FormatIsDepthAndStencil(pDepthStencilResolveAttachmentFormat) &&
10109             depth_stencil_resolve_props.independentResolve == VK_FALSE &&
10110             depth_stencil_resolve_props.independentResolveNone == VK_FALSE &&
10111             !(resolve->depthResolveMode == resolve->stencilResolveMode)) {
10112             skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10113                             "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03185",
10114                             "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
10115                             "structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical.",
10116                             i, resolve->depthResolveMode, resolve->stencilResolveMode);
10117         }
10118 
10119         if (FormatIsDepthAndStencil(pDepthStencilResolveAttachmentFormat) &&
10120             depth_stencil_resolve_props.independentResolve == VK_FALSE &&
10121             depth_stencil_resolve_props.independentResolveNone == VK_TRUE &&
10122             !(resolve->depthResolveMode == resolve->stencilResolveMode || resolve->depthResolveMode == VK_RESOLVE_MODE_NONE_KHR ||
10123               resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE_KHR)) {
10124             skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10125                             "VUID-VkSubpassDescriptionDepthStencilResolveKHR-pDepthStencilResolveAttachment-03186",
10126                             "vkCreateRenderPass2KHR(): Subpass %u includes a VkSubpassDescriptionDepthStencilResolveKHR "
10127                             "structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical, or "
10128                             "one of them must be %u.",
10129                             i, resolve->depthResolveMode, resolve->stencilResolveMode, VK_RESOLVE_MODE_NONE_KHR);
10130         }
10131     }
10132 
10133     return skip;
10134 }
10135 
PreCallValidateCreateRenderPass2KHR(VkDevice device,const VkRenderPassCreateInfo2KHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkRenderPass * pRenderPass)10136 bool CoreChecks::PreCallValidateCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR *pCreateInfo,
10137                                                      const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
10138     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10139     bool skip = false;
10140 
10141     if (GetDeviceExtensions()->vk_khr_depth_stencil_resolve) {
10142         skip |= ValidateDepthStencilResolve(device_data->report_data, device_data->phys_dev_ext_props.depth_stencil_resolve_props,
10143                                             pCreateInfo);
10144     }
10145 
10146     auto render_pass = std::make_shared<RENDER_PASS_STATE>(pCreateInfo);
10147     skip |= ValidateCreateRenderPass(device_data, device, RENDER_PASS_VERSION_2, render_pass->createInfo.ptr(), render_pass.get());
10148 
10149     return skip;
10150 }
10151 
ValidatePrimaryCommandBuffer(const layer_data * dev_data,const GLOBAL_CB_NODE * pCB,char const * cmd_name,const char * error_code)10152 bool CoreChecks::ValidatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, char const *cmd_name,
10153                                               const char *error_code) {
10154     bool skip = false;
10155     if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
10156         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10157                         HandleToUint64(pCB->commandBuffer), error_code, "Cannot execute command %s on a secondary command buffer.",
10158                         cmd_name);
10159     }
10160     return skip;
10161 }
10162 
VerifyRenderAreaBounds(const layer_data * dev_data,const VkRenderPassBeginInfo * pRenderPassBegin)10163 bool CoreChecks::VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
10164     bool skip = false;
10165     const safe_VkFramebufferCreateInfo *pFramebufferInfo = &GetFramebufferState(pRenderPassBegin->framebuffer)->createInfo;
10166     if (pRenderPassBegin->renderArea.offset.x < 0 ||
10167         (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
10168         pRenderPassBegin->renderArea.offset.y < 0 ||
10169         (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
10170         skip |= static_cast<bool>(log_msg(
10171             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10172             kVUID_Core_DrawState_InvalidRenderArea,
10173             "Cannot execute a render pass with renderArea not within the bound of the framebuffer. RenderArea: x %d, y %d, width "
10174             "%d, height %d. Framebuffer: width %d, height %d.",
10175             pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
10176             pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
10177     }
10178     return skip;
10179 }
10180 
10181 // If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
10182 // [load|store]Op flag must be checked
10183 // TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
10184 template <typename T>
FormatSpecificLoadAndStoreOpSettings(VkFormat format,T color_depth_op,T stencil_op,T op)10185 static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
10186     if (color_depth_op != op && stencil_op != op) {
10187         return false;
10188     }
10189     bool check_color_depth_load_op = !FormatIsStencilOnly(format);
10190     bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op;
10191 
10192     return ((check_color_depth_load_op && (color_depth_op == op)) || (check_stencil_load_op && (stencil_op == op)));
10193 }
10194 
ValidateCmdBeginRenderPass(layer_data * device_data,VkCommandBuffer commandBuffer,RenderPassCreateVersion rp_version,const VkRenderPassBeginInfo * pRenderPassBegin)10195 bool CoreChecks::ValidateCmdBeginRenderPass(layer_data *device_data, VkCommandBuffer commandBuffer,
10196                                             RenderPassCreateVersion rp_version, const VkRenderPassBeginInfo *pRenderPassBegin) {
10197     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
10198     assert(cb_state);
10199     auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr;
10200     auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr;
10201 
10202     bool skip = false;
10203     const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
10204     const char *vuid;
10205     const char *const function_name = use_rp2 ? "vkCmdBeginRenderPass2KHR()" : "vkCmdBeginRenderPass()";
10206 
10207     if (render_pass_state) {
10208         uint32_t clear_op_size = 0;  // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
10209 
10210         // Handle extension struct from EXT_sample_locations
10211         const VkRenderPassSampleLocationsBeginInfoEXT *pSampleLocationsBeginInfo =
10212             lvl_find_in_chain<VkRenderPassSampleLocationsBeginInfoEXT>(pRenderPassBegin->pNext);
10213         if (pSampleLocationsBeginInfo) {
10214             for (uint32_t i = 0; i < pSampleLocationsBeginInfo->attachmentInitialSampleLocationsCount; ++i) {
10215                 if (pSampleLocationsBeginInfo->pAttachmentInitialSampleLocations[i].attachmentIndex >=
10216                     render_pass_state->createInfo.attachmentCount) {
10217                     skip |=
10218                         log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10219                                 "VUID-VkAttachmentSampleLocationsEXT-attachmentIndex-01531",
10220                                 "Attachment index %u specified by attachment sample locations %u is greater than the "
10221                                 "attachment count of %u for the render pass being begun.",
10222                                 pSampleLocationsBeginInfo->pAttachmentInitialSampleLocations[i].attachmentIndex, i,
10223                                 render_pass_state->createInfo.attachmentCount);
10224                 }
10225             }
10226 
10227             for (uint32_t i = 0; i < pSampleLocationsBeginInfo->postSubpassSampleLocationsCount; ++i) {
10228                 if (pSampleLocationsBeginInfo->pPostSubpassSampleLocations[i].subpassIndex >=
10229                     render_pass_state->createInfo.subpassCount) {
10230                     skip |=
10231                         log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10232                                 "VUID-VkSubpassSampleLocationsEXT-subpassIndex-01532",
10233                                 "Subpass index %u specified by subpass sample locations %u is greater than the subpass count "
10234                                 "of %u for the render pass being begun.",
10235                                 pSampleLocationsBeginInfo->pPostSubpassSampleLocations[i].subpassIndex, i,
10236                                 render_pass_state->createInfo.subpassCount);
10237                 }
10238             }
10239         }
10240 
10241         for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
10242             auto pAttachment = &render_pass_state->createInfo.pAttachments[i];
10243             if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, pAttachment->stencilLoadOp,
10244                                                      VK_ATTACHMENT_LOAD_OP_CLEAR)) {
10245                 clear_op_size = static_cast<uint32_t>(i) + 1;
10246             }
10247         }
10248 
10249         if (clear_op_size > pRenderPassBegin->clearValueCount) {
10250             skip |= log_msg(
10251                 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
10252                 HandleToUint64(render_pass_state->renderPass), "VUID-VkRenderPassBeginInfo-clearValueCount-00902",
10253                 "In %s the VkRenderPassBeginInfo struct has a clearValueCount of %u but there "
10254                 "must be at least %u entries in pClearValues array to account for the highest index attachment in "
10255                 "renderPass %s that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array is indexed by "
10256                 "attachment number so even if some pClearValues entries between 0 and %u correspond to attachments "
10257                 "that aren't cleared they will be ignored.",
10258                 function_name, pRenderPassBegin->clearValueCount, clear_op_size,
10259                 device_data->report_data->FormatHandle(render_pass_state->renderPass).c_str(), clear_op_size, clear_op_size - 1);
10260         }
10261         skip |= VerifyRenderAreaBounds(device_data, pRenderPassBegin);
10262         skip |= VerifyFramebufferAndRenderPassLayouts(device_data, rp_version, cb_state, pRenderPassBegin,
10263                                                       GetFramebufferState(pRenderPassBegin->framebuffer));
10264         if (framebuffer->rp_state->renderPass != render_pass_state->renderPass) {
10265             skip |= ValidateRenderPassCompatibility(device_data, "render pass", render_pass_state, "framebuffer",
10266                                                     framebuffer->rp_state.get(), function_name,
10267                                                     "VUID-VkRenderPassBeginInfo-renderPass-00904");
10268         }
10269 
10270         vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-renderpass" : "VUID-vkCmdBeginRenderPass-renderpass";
10271         skip |= InsideRenderPass(device_data, cb_state, function_name, vuid);
10272         skip |= ValidateDependencies(device_data, framebuffer, render_pass_state);
10273 
10274         vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-bufferlevel" : "VUID-vkCmdBeginRenderPass-bufferlevel";
10275         skip |= ValidatePrimaryCommandBuffer(device_data, cb_state, function_name, vuid);
10276 
10277         vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-commandBuffer-cmdpool" : "VUID-vkCmdBeginRenderPass-commandBuffer-cmdpool";
10278         skip |= ValidateCmdQueueFlags(device_data, cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid);
10279 
10280         const CMD_TYPE cmd_type = use_rp2 ? CMD_BEGINRENDERPASS2KHR : CMD_BEGINRENDERPASS;
10281         skip |= ValidateCmd(device_data, cb_state, cmd_type, function_name);
10282     }
10283     return skip;
10284 }
10285 
PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer,const VkRenderPassBeginInfo * pRenderPassBegin,VkSubpassContents contents)10286 bool CoreChecks::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
10287                                                    VkSubpassContents contents) {
10288     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10289     bool skip = ValidateCmdBeginRenderPass(device_data, commandBuffer, RENDER_PASS_VERSION_1, pRenderPassBegin);
10290     return skip;
10291 }
10292 
PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,const VkRenderPassBeginInfo * pRenderPassBegin,const VkSubpassBeginInfoKHR * pSubpassBeginInfo)10293 bool CoreChecks::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
10294                                                        const VkSubpassBeginInfoKHR *pSubpassBeginInfo) {
10295     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10296     bool skip = ValidateCmdBeginRenderPass(device_data, commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin);
10297     return skip;
10298 }
10299 
RecordCmdBeginRenderPassState(layer_data * device_data,VkCommandBuffer commandBuffer,const VkRenderPassBeginInfo * pRenderPassBegin,const VkSubpassContents contents)10300 void CoreChecks::RecordCmdBeginRenderPassState(layer_data *device_data, VkCommandBuffer commandBuffer,
10301                                                const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassContents contents) {
10302     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
10303     auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr;
10304     auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr;
10305 
10306     if (render_pass_state) {
10307         cb_state->activeFramebuffer = pRenderPassBegin->framebuffer;
10308         cb_state->activeRenderPass = render_pass_state;
10309         // This is a shallow copy as that is all that is needed for now
10310         cb_state->activeRenderPassBeginInfo = *pRenderPassBegin;
10311         cb_state->activeSubpass = 0;
10312         cb_state->activeSubpassContents = contents;
10313         cb_state->framebuffers.insert(pRenderPassBegin->framebuffer);
10314         // Connect this framebuffer and its children to this cmdBuffer
10315         AddFramebufferBinding(device_data, cb_state, framebuffer);
10316         // Connect this RP to cmdBuffer
10317         AddCommandBufferBinding(&render_pass_state->cb_bindings,
10318                                 {HandleToUint64(render_pass_state->renderPass), kVulkanObjectTypeRenderPass}, cb_state);
10319         // transition attachments to the correct layouts for beginning of renderPass and first subpass
10320         TransitionBeginRenderPassLayouts(device_data, cb_state, render_pass_state, framebuffer);
10321     }
10322 }
10323 
PreCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer,const VkRenderPassBeginInfo * pRenderPassBegin,VkSubpassContents contents)10324 void CoreChecks::PreCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
10325                                                  VkSubpassContents contents) {
10326     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10327     RecordCmdBeginRenderPassState(device_data, commandBuffer, pRenderPassBegin, contents);
10328 }
10329 
PreCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,const VkRenderPassBeginInfo * pRenderPassBegin,const VkSubpassBeginInfoKHR * pSubpassBeginInfo)10330 void CoreChecks::PreCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
10331                                                      const VkSubpassBeginInfoKHR *pSubpassBeginInfo) {
10332     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10333     RecordCmdBeginRenderPassState(device_data, commandBuffer, pRenderPassBegin, pSubpassBeginInfo->contents);
10334 }
10335 
ValidateCmdNextSubpass(layer_data * device_data,RenderPassCreateVersion rp_version,VkCommandBuffer commandBuffer)10336 bool CoreChecks::ValidateCmdNextSubpass(layer_data *device_data, RenderPassCreateVersion rp_version,
10337                                         VkCommandBuffer commandBuffer) {
10338     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
10339     assert(cb_state);
10340     bool skip = false;
10341     const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
10342     const char *vuid;
10343     const char *const function_name = use_rp2 ? "vkCmdNextSubpass2KHR()" : "vkCmdNextSubpass()";
10344 
10345     vuid = use_rp2 ? "VUID-vkCmdNextSubpass2KHR-bufferlevel" : "VUID-vkCmdNextSubpass-bufferlevel";
10346     skip |= ValidatePrimaryCommandBuffer(device_data, cb_state, function_name, vuid);
10347 
10348     vuid = use_rp2 ? "VUID-vkCmdNextSubpass2KHR-commandBuffer-cmdpool" : "VUID-vkCmdNextSubpass-commandBuffer-cmdpool";
10349     skip |= ValidateCmdQueueFlags(device_data, cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid);
10350     const CMD_TYPE cmd_type = use_rp2 ? CMD_NEXTSUBPASS2KHR : CMD_NEXTSUBPASS;
10351     skip |= ValidateCmd(device_data, cb_state, cmd_type, function_name);
10352 
10353     vuid = use_rp2 ? "VUID-vkCmdNextSubpass2KHR-renderpass" : "VUID-vkCmdNextSubpass-renderpass";
10354     skip |= OutsideRenderPass(device_data, cb_state, function_name, vuid);
10355 
10356     auto subpassCount = cb_state->activeRenderPass->createInfo.subpassCount;
10357     if (cb_state->activeSubpass == subpassCount - 1) {
10358         vuid = use_rp2 ? "VUID-vkCmdNextSubpass2KHR-None-03102" : "VUID-vkCmdNextSubpass-None-00909";
10359         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10360                         HandleToUint64(commandBuffer), vuid, "%s: Attempted to advance beyond final subpass.", function_name);
10361     }
10362     return skip;
10363 }
10364 
PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer,VkSubpassContents contents)10365 bool CoreChecks::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
10366     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10367     return ValidateCmdNextSubpass(device_data, RENDER_PASS_VERSION_1, commandBuffer);
10368 }
10369 
PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer,const VkSubpassBeginInfoKHR * pSubpassBeginInfo,const VkSubpassEndInfoKHR * pSubpassEndInfo)10370 bool CoreChecks::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
10371                                                    const VkSubpassEndInfoKHR *pSubpassEndInfo) {
10372     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10373     return ValidateCmdNextSubpass(device_data, RENDER_PASS_VERSION_2, commandBuffer);
10374 }
10375 
RecordCmdNextSubpass(layer_data * device_data,VkCommandBuffer commandBuffer,VkSubpassContents contents)10376 void CoreChecks::RecordCmdNextSubpass(layer_data *device_data, VkCommandBuffer commandBuffer, VkSubpassContents contents) {
10377     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
10378     cb_state->activeSubpass++;
10379     cb_state->activeSubpassContents = contents;
10380     TransitionSubpassLayouts(device_data, cb_state, cb_state->activeRenderPass, cb_state->activeSubpass,
10381                              GetFramebufferState(cb_state->activeRenderPassBeginInfo.framebuffer));
10382 }
10383 
PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer,VkSubpassContents contents)10384 void CoreChecks::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
10385     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10386     RecordCmdNextSubpass(device_data, commandBuffer, contents);
10387 }
10388 
PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer,const VkSubpassBeginInfoKHR * pSubpassBeginInfo,const VkSubpassEndInfoKHR * pSubpassEndInfo)10389 void CoreChecks::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
10390                                                   const VkSubpassEndInfoKHR *pSubpassEndInfo) {
10391     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10392     RecordCmdNextSubpass(device_data, commandBuffer, pSubpassBeginInfo->contents);
10393 }
10394 
ValidateCmdEndRenderPass(layer_data * device_data,RenderPassCreateVersion rp_version,VkCommandBuffer commandBuffer)10395 bool CoreChecks::ValidateCmdEndRenderPass(layer_data *device_data, RenderPassCreateVersion rp_version,
10396                                           VkCommandBuffer commandBuffer) {
10397     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
10398     assert(cb_state);
10399     bool skip = false;
10400     const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
10401     const char *vuid;
10402     const char *const function_name = use_rp2 ? "vkCmdEndRenderPass2KHR()" : "vkCmdEndRenderPass()";
10403 
10404     RENDER_PASS_STATE *rp_state = cb_state->activeRenderPass;
10405     if (rp_state) {
10406         if (cb_state->activeSubpass != rp_state->createInfo.subpassCount - 1) {
10407             vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2KHR-None-03103" : "VUID-vkCmdEndRenderPass-None-00910";
10408             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10409                             HandleToUint64(commandBuffer), vuid, "%s: Called before reaching final subpass.", function_name);
10410         }
10411     }
10412 
10413     vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2KHR-renderpass" : "VUID-vkCmdEndRenderPass-renderpass";
10414     skip |= OutsideRenderPass(device_data, cb_state, function_name, vuid);
10415 
10416     vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2KHR-bufferlevel" : "VUID-vkCmdEndRenderPass-bufferlevel";
10417     skip |= ValidatePrimaryCommandBuffer(device_data, cb_state, function_name, vuid);
10418 
10419     vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2KHR-commandBuffer-cmdpool" : "VUID-vkCmdEndRenderPass-commandBuffer-cmdpool";
10420     skip |= ValidateCmdQueueFlags(device_data, cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid);
10421 
10422     const CMD_TYPE cmd_type = use_rp2 ? CMD_ENDRENDERPASS2KHR : CMD_ENDRENDERPASS;
10423     skip |= ValidateCmd(device_data, cb_state, cmd_type, function_name);
10424     return skip;
10425 }
10426 
PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer)10427 bool CoreChecks::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) {
10428     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10429     bool skip = ValidateCmdEndRenderPass(device_data, RENDER_PASS_VERSION_1, commandBuffer);
10430     return skip;
10431 }
10432 
PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,const VkSubpassEndInfoKHR * pSubpassEndInfo)10433 bool CoreChecks::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR *pSubpassEndInfo) {
10434     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10435     bool skip = ValidateCmdEndRenderPass(device_data, RENDER_PASS_VERSION_2, commandBuffer);
10436     return skip;
10437 }
10438 
RecordCmdEndRenderPassState(layer_data * device_data,VkCommandBuffer commandBuffer)10439 void CoreChecks::RecordCmdEndRenderPassState(layer_data *device_data, VkCommandBuffer commandBuffer) {
10440     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
10441     FRAMEBUFFER_STATE *framebuffer = GetFramebufferState(cb_state->activeFramebuffer);
10442     TransitionFinalSubpassLayouts(device_data, cb_state, &cb_state->activeRenderPassBeginInfo, framebuffer);
10443     cb_state->activeRenderPass = nullptr;
10444     cb_state->activeSubpass = 0;
10445     cb_state->activeFramebuffer = VK_NULL_HANDLE;
10446 }
10447 
PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer)10448 void CoreChecks::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
10449     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10450     RecordCmdEndRenderPassState(device_data, commandBuffer);
10451 }
10452 
PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,const VkSubpassEndInfoKHR * pSubpassEndInfo)10453 void CoreChecks::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR *pSubpassEndInfo) {
10454     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10455     RecordCmdEndRenderPassState(device_data, commandBuffer);
10456 }
10457 
ValidateFramebuffer(layer_data * dev_data,VkCommandBuffer primaryBuffer,const GLOBAL_CB_NODE * pCB,VkCommandBuffer secondaryBuffer,const GLOBAL_CB_NODE * pSubCB,const char * caller)10458 bool CoreChecks::ValidateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
10459                                      VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB, const char *caller) {
10460     bool skip = false;
10461     if (!pSubCB->beginInfo.pInheritanceInfo) {
10462         return skip;
10463     }
10464     VkFramebuffer primary_fb = pCB->activeFramebuffer;
10465     VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
10466     if (secondary_fb != VK_NULL_HANDLE) {
10467         if (primary_fb != secondary_fb) {
10468             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10469                             HandleToUint64(primaryBuffer), "VUID-vkCmdExecuteCommands-pCommandBuffers-00099",
10470                             "vkCmdExecuteCommands() called w/ invalid secondary command buffer %s which has a framebuffer %s"
10471                             " that is not the same as the primary command buffer's current active framebuffer %s.",
10472                             dev_data->report_data->FormatHandle(secondaryBuffer).c_str(),
10473                             dev_data->report_data->FormatHandle(secondary_fb).c_str(),
10474                             dev_data->report_data->FormatHandle(primary_fb).c_str());
10475         }
10476         auto fb = GetFramebufferState(secondary_fb);
10477         if (!fb) {
10478             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10479                             HandleToUint64(primaryBuffer), kVUID_Core_DrawState_InvalidSecondaryCommandBuffer,
10480                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %s which has invalid framebuffer %s.",
10481                             dev_data->report_data->FormatHandle(secondaryBuffer).c_str(),
10482                             dev_data->report_data->FormatHandle(secondary_fb).c_str());
10483             return skip;
10484         }
10485     }
10486     return skip;
10487 }
10488 
ValidateSecondaryCommandBufferState(layer_data * dev_data,GLOBAL_CB_NODE * pCB,GLOBAL_CB_NODE * pSubCB)10489 bool CoreChecks::ValidateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
10490     bool skip = false;
10491     unordered_set<int> activeTypes;
10492     for (auto queryObject : pCB->activeQueries) {
10493         auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10494         if (queryPoolData != dev_data->queryPoolMap.end()) {
10495             if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
10496                 pSubCB->beginInfo.pInheritanceInfo) {
10497                 VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
10498                 if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
10499                     skip |= log_msg(
10500                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10501                         HandleToUint64(pCB->commandBuffer), "VUID-vkCmdExecuteCommands-commandBuffer-00104",
10502                         "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %s which has invalid active query pool %s"
10503                         ". Pipeline statistics is being queried so the command buffer must have all bits set on the queryPool.",
10504                         dev_data->report_data->FormatHandle(pCB->commandBuffer).c_str(),
10505                         dev_data->report_data->FormatHandle(queryPoolData->first).c_str());
10506                 }
10507             }
10508             activeTypes.insert(queryPoolData->second.createInfo.queryType);
10509         }
10510     }
10511     for (auto queryObject : pSubCB->startedQueries) {
10512         auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10513         if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
10514             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10515                             HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidSecondaryCommandBuffer,
10516                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %s which has invalid active query pool %s"
10517                             " of type %d but a query of that type has been started on secondary Cmd Buffer %s.",
10518                             dev_data->report_data->FormatHandle(pCB->commandBuffer).c_str(),
10519                             dev_data->report_data->FormatHandle(queryPoolData->first).c_str(),
10520                             queryPoolData->second.createInfo.queryType,
10521                             dev_data->report_data->FormatHandle(pSubCB->commandBuffer).c_str());
10522         }
10523     }
10524 
10525     auto primary_pool = GetCommandPoolNode(pCB->createInfo.commandPool);
10526     auto secondary_pool = GetCommandPoolNode(pSubCB->createInfo.commandPool);
10527     if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
10528         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10529                         HandleToUint64(pSubCB->commandBuffer), kVUID_Core_DrawState_InvalidQueueFamily,
10530                         "vkCmdExecuteCommands(): Primary command buffer %s created in queue family %d has secondary command buffer "
10531                         "%s created in queue family %d.",
10532                         dev_data->report_data->FormatHandle(pCB->commandBuffer).c_str(), primary_pool->queueFamilyIndex,
10533                         dev_data->report_data->FormatHandle(pSubCB->commandBuffer).c_str(), secondary_pool->queueFamilyIndex);
10534     }
10535 
10536     return skip;
10537 }
10538 
PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer,uint32_t commandBuffersCount,const VkCommandBuffer * pCommandBuffers)10539 bool CoreChecks::PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
10540                                                    const VkCommandBuffer *pCommandBuffers) {
10541     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10542     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
10543     assert(cb_state);
10544     bool skip = false;
10545     GLOBAL_CB_NODE *sub_cb_state = NULL;
10546     std::unordered_set<GLOBAL_CB_NODE *> linked_command_buffers = cb_state->linkedCommandBuffers;
10547 
10548     for (uint32_t i = 0; i < commandBuffersCount; i++) {
10549         sub_cb_state = GetCBNode(pCommandBuffers[i]);
10550         assert(sub_cb_state);
10551         if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == sub_cb_state->createInfo.level) {
10552             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10553                             HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-pCommandBuffers-00088",
10554                             "vkCmdExecuteCommands() called w/ Primary Cmd Buffer %s in element %u of pCommandBuffers array. All "
10555                             "cmd buffers in pCommandBuffers array must be secondary.",
10556                             device_data->report_data->FormatHandle(pCommandBuffers[i]).c_str(), i);
10557         } else if (cb_state->activeRenderPass) {  // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
10558             if (sub_cb_state->beginInfo.pInheritanceInfo != nullptr) {
10559                 auto secondary_rp_state = GetRenderPassState(sub_cb_state->beginInfo.pInheritanceInfo->renderPass);
10560                 if (!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
10561                     skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10562                                     VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]),
10563                                     "VUID-vkCmdExecuteCommands-pCommandBuffers-00096",
10564                                     "vkCmdExecuteCommands(): Secondary Command Buffer (%s) executed within render pass (%s) must "
10565                                     "have had vkBeginCommandBuffer() called w/ "
10566                                     "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
10567                                     device_data->report_data->FormatHandle(pCommandBuffers[i]).c_str(),
10568                                     device_data->report_data->FormatHandle(cb_state->activeRenderPass->renderPass).c_str());
10569                 } else {
10570                     // Make sure render pass is compatible with parent command buffer pass if has continue
10571                     if (cb_state->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
10572                         skip |= ValidateRenderPassCompatibility(
10573                             device_data, "primary command buffer", cb_state->activeRenderPass, "secondary command buffer",
10574                             secondary_rp_state, "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-pInheritanceInfo-00098");
10575                     }
10576                     //  If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
10577                     skip |= ValidateFramebuffer(device_data, commandBuffer, cb_state, pCommandBuffers[i], sub_cb_state,
10578                                                 "vkCmdExecuteCommands()");
10579                     if (!sub_cb_state->cmd_execute_commands_functions.empty()) {
10580                         //  Inherit primary's activeFramebuffer and while running validate functions
10581                         for (auto &function : sub_cb_state->cmd_execute_commands_functions) {
10582                             skip |= function(cb_state, cb_state->activeFramebuffer);
10583                         }
10584                     }
10585                 }
10586             }
10587         }
10588         // TODO(mlentine): Move more logic into this method
10589         skip |= ValidateSecondaryCommandBufferState(device_data, cb_state, sub_cb_state);
10590         skip |= ValidateCommandBufferState(device_data, sub_cb_state, "vkCmdExecuteCommands()", 0,
10591                                            "VUID-vkCmdExecuteCommands-pCommandBuffers-00089");
10592         if (!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
10593             if (sub_cb_state->in_use.load() || linked_command_buffers.count(sub_cb_state)) {
10594                 skip |= log_msg(
10595                     device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10596                     HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdExecuteCommands-pCommandBuffers-00090",
10597                     "Attempt to simultaneously execute command buffer %s without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set!",
10598                     device_data->report_data->FormatHandle(cb_state->commandBuffer).c_str());
10599             }
10600             if (cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
10601                 // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
10602                 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
10603                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]),
10604                                 kVUID_Core_DrawState_InvalidCommandBufferSimultaneousUse,
10605                                 "vkCmdExecuteCommands(): Secondary Command Buffer (%s) does not have "
10606                                 "VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary "
10607                                 "command buffer (%s) to be treated as if it does not have "
10608                                 "VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set, even though it does.",
10609                                 device_data->report_data->FormatHandle(pCommandBuffers[i]).c_str(),
10610                                 device_data->report_data->FormatHandle(cb_state->commandBuffer).c_str());
10611             }
10612         }
10613         if (!cb_state->activeQueries.empty() && !device_data->enabled_features.core.inheritedQueries) {
10614             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10615                             HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-commandBuffer-00101",
10616                             "vkCmdExecuteCommands(): Secondary Command Buffer (%s) cannot be submitted with a query in flight and "
10617                             "inherited queries not supported on this device.",
10618                             device_data->report_data->FormatHandle(pCommandBuffers[i]).c_str());
10619         }
10620         // Propagate layout transitions to the primary cmd buffer
10621         // Novel Valid usage: "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001"
10622         // initial layout usage of secondary command buffers resources must match parent command buffer
10623         for (const auto &ilm_entry : sub_cb_state->imageLayoutMap) {
10624             auto cb_entry = cb_state->imageLayoutMap.find(ilm_entry.first);
10625             if (cb_entry != cb_state->imageLayoutMap.end()) {
10626                 // For exact matches ImageSubresourcePair matches, validate and update the parent entry
10627                 if ((VK_IMAGE_LAYOUT_UNDEFINED != ilm_entry.second.initialLayout) &&
10628                     (cb_entry->second.layout != ilm_entry.second.initialLayout)) {
10629                     const VkImageSubresource &subresource = ilm_entry.first.subresource;
10630                     log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10631                             HandleToUint64(pCommandBuffers[i]), "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001",
10632                             "%s: Executed secondary command buffer using image %s (subresource: aspectMask 0x%X array layer %u, "
10633                             "mip level %u) which expects layout %s--instead, image %s's current layout is %s.",
10634                             "vkCmdExecuteCommands():", device_data->report_data->FormatHandle(ilm_entry.first.image).c_str(),
10635                             subresource.aspectMask, subresource.arrayLayer, subresource.mipLevel,
10636                             string_VkImageLayout(ilm_entry.second.initialLayout),
10637                             device_data->report_data->FormatHandle(ilm_entry.first.image).c_str(),
10638                             string_VkImageLayout(cb_entry->second.layout));
10639                 }
10640             } else {
10641                 // Look for partial matches (in aspectMask), and update or create parent map entry in SetLayout
10642                 assert(ilm_entry.first.hasSubresource);
10643                 IMAGE_CMD_BUF_LAYOUT_NODE node;
10644                 if (FindCmdBufLayout(device_data, cb_state, ilm_entry.first.image, ilm_entry.first.subresource, node)) {
10645                     if ((VK_IMAGE_LAYOUT_UNDEFINED != ilm_entry.second.initialLayout) &&
10646                         (node.layout != ilm_entry.second.initialLayout)) {
10647                         const VkImageSubresource &subresource = ilm_entry.first.subresource;
10648                         log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10649                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]),
10650                                 "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001",
10651                                 "%s: Executed secondary command buffer using image %s (subresource: aspectMask 0x%X array layer "
10652                                 "%u, mip level %u) which expects layout %s--instead, image %s's current layout is %s.",
10653                                 "vkCmdExecuteCommands():", device_data->report_data->FormatHandle(ilm_entry.first.image).c_str(),
10654                                 subresource.aspectMask, subresource.arrayLayer, subresource.mipLevel,
10655                                 string_VkImageLayout(ilm_entry.second.initialLayout),
10656                                 device_data->report_data->FormatHandle(ilm_entry.first.image).c_str(),
10657                                 string_VkImageLayout(node.layout));
10658                     }
10659                 }
10660             }
10661         }
10662         linked_command_buffers.insert(sub_cb_state);
10663     }
10664     skip |= ValidatePrimaryCommandBuffer(device_data, cb_state, "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-bufferlevel");
10665     skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdExecuteCommands()",
10666                                   VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
10667                                   "VUID-vkCmdExecuteCommands-commandBuffer-cmdpool");
10668     skip |= ValidateCmd(device_data, cb_state, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()");
10669     return skip;
10670 }
10671 
PreCallRecordCmdExecuteCommands(VkCommandBuffer commandBuffer,uint32_t commandBuffersCount,const VkCommandBuffer * pCommandBuffers)10672 void CoreChecks::PreCallRecordCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
10673                                                  const VkCommandBuffer *pCommandBuffers) {
10674     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
10675     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
10676 
10677     GLOBAL_CB_NODE *sub_cb_state = NULL;
10678     for (uint32_t i = 0; i < commandBuffersCount; i++) {
10679         sub_cb_state = GetCBNode(pCommandBuffers[i]);
10680         assert(sub_cb_state);
10681         if (!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
10682             if (cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
10683                 // TODO: Because this is a state change, clearing the VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT needs to be moved
10684                 // from the validation step to the recording step
10685                 cb_state->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
10686             }
10687         }
10688         // Propagate layout transitions to the primary cmd buffer
10689         // Novel Valid usage: "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001"
10690         //  initial layout usage of secondary command buffers resources must match parent command buffer
10691         for (const auto &ilm_entry : sub_cb_state->imageLayoutMap) {
10692             auto cb_entry = cb_state->imageLayoutMap.find(ilm_entry.first);
10693             if (cb_entry != cb_state->imageLayoutMap.end()) {
10694                 // For exact matches ImageSubresourcePair matches, update the parent entry
10695                 cb_entry->second.layout = ilm_entry.second.layout;
10696             } else {
10697                 // Look for partial matches (in aspectMask), and update or create parent map entry in SetLayout
10698                 assert(ilm_entry.first.hasSubresource);
10699                 IMAGE_CMD_BUF_LAYOUT_NODE node;
10700                 if (!FindCmdBufLayout(device_data, cb_state, ilm_entry.first.image, ilm_entry.first.subresource, node)) {
10701                     node.initialLayout = ilm_entry.second.initialLayout;
10702                 }
10703                 node.layout = ilm_entry.second.layout;
10704                 SetLayout(device_data, cb_state, ilm_entry.first, node);
10705             }
10706         }
10707         sub_cb_state->primaryCommandBuffer = cb_state->commandBuffer;
10708         cb_state->linkedCommandBuffers.insert(sub_cb_state);
10709         sub_cb_state->linkedCommandBuffers.insert(cb_state);
10710         for (auto &function : sub_cb_state->queryUpdates) {
10711             cb_state->queryUpdates.push_back(function);
10712         }
10713         for (auto &function : sub_cb_state->queue_submit_functions) {
10714             cb_state->queue_submit_functions.push_back(function);
10715         }
10716     }
10717 }
10718 
PreCallValidateMapMemory(VkDevice device,VkDeviceMemory mem,VkDeviceSize offset,VkDeviceSize size,VkFlags flags,void ** ppData)10719 bool CoreChecks::PreCallValidateMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
10720                                           VkFlags flags, void **ppData) {
10721     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10722     bool skip = false;
10723     DEVICE_MEM_INFO *mem_info = GetMemObjInfo(mem);
10724     if (mem_info) {
10725         auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
10726         skip |= ValidateMapImageLayouts(device_data, device, mem_info, offset, end_offset);
10727         if ((device_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
10728              VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
10729             skip = log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10730                            HandleToUint64(mem), "VUID-vkMapMemory-memory-00682",
10731                            "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj %s.",
10732                            device_data->report_data->FormatHandle(mem).c_str());
10733         }
10734     }
10735     skip |= ValidateMapMemRange(device_data, mem, offset, size);
10736     return skip;
10737 }
10738 
PostCallRecordMapMemory(VkDevice device,VkDeviceMemory mem,VkDeviceSize offset,VkDeviceSize size,VkFlags flags,void ** ppData,VkResult result)10739 void CoreChecks::PostCallRecordMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
10740                                          void **ppData, VkResult result) {
10741     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10742     if (VK_SUCCESS != result) return;
10743     // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
10744     StoreMemRanges(device_data, mem, offset, size);
10745     InitializeAndTrackMemory(device_data, mem, offset, size, ppData);
10746 }
10747 
PreCallValidateUnmapMemory(VkDevice device,VkDeviceMemory mem)10748 bool CoreChecks::PreCallValidateUnmapMemory(VkDevice device, VkDeviceMemory mem) {
10749     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10750     bool skip = false;
10751     auto mem_info = GetMemObjInfo(mem);
10752     if (mem_info && !mem_info->mem_range.size) {
10753         // Valid Usage: memory must currently be mapped
10754         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10755                         HandleToUint64(mem), "VUID-vkUnmapMemory-memory-00689",
10756                         "Unmapping Memory without memory being mapped: mem obj %s.",
10757                         device_data->report_data->FormatHandle(mem).c_str());
10758     }
10759     return skip;
10760 }
10761 
PreCallRecordUnmapMemory(VkDevice device,VkDeviceMemory mem)10762 void CoreChecks::PreCallRecordUnmapMemory(VkDevice device, VkDeviceMemory mem) {
10763     auto mem_info = GetMemObjInfo(mem);
10764     mem_info->mem_range.size = 0;
10765     if (mem_info->shadow_copy) {
10766         free(mem_info->shadow_copy_base);
10767         mem_info->shadow_copy_base = 0;
10768         mem_info->shadow_copy = 0;
10769     }
10770 }
10771 
ValidateMemoryIsMapped(layer_data * dev_data,const char * funcName,uint32_t memRangeCount,const VkMappedMemoryRange * pMemRanges)10772 bool CoreChecks::ValidateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
10773                                         const VkMappedMemoryRange *pMemRanges) {
10774     bool skip = false;
10775     for (uint32_t i = 0; i < memRangeCount; ++i) {
10776         auto mem_info = GetMemObjInfo(pMemRanges[i].memory);
10777         if (mem_info) {
10778             if (pMemRanges[i].size == VK_WHOLE_SIZE) {
10779                 if (mem_info->mem_range.offset > pMemRanges[i].offset) {
10780                     skip |= log_msg(
10781                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10782                         HandleToUint64(pMemRanges[i].memory), "VUID-VkMappedMemoryRange-size-00686",
10783                         "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
10784                         ") is less than Memory Object's offset (" PRINTF_SIZE_T_SPECIFIER ").",
10785                         funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->mem_range.offset));
10786                 }
10787             } else {
10788                 const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE)
10789                                               ? mem_info->alloc_info.allocationSize
10790                                               : (mem_info->mem_range.offset + mem_info->mem_range.size);
10791                 if ((mem_info->mem_range.offset > pMemRanges[i].offset) ||
10792                     (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
10793                     skip |=
10794                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10795                                 HandleToUint64(pMemRanges[i].memory), "VUID-VkMappedMemoryRange-size-00685",
10796                                 "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
10797                                 ") exceed the Memory Object's upper-bound (" PRINTF_SIZE_T_SPECIFIER ").",
10798                                 funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
10799                                 static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end));
10800                 }
10801             }
10802         }
10803     }
10804     return skip;
10805 }
10806 
ValidateAndCopyNoncoherentMemoryToDriver(layer_data * dev_data,uint32_t mem_range_count,const VkMappedMemoryRange * mem_ranges)10807 bool CoreChecks::ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
10808                                                           const VkMappedMemoryRange *mem_ranges) {
10809     bool skip = false;
10810     for (uint32_t i = 0; i < mem_range_count; ++i) {
10811         auto mem_info = GetMemObjInfo(mem_ranges[i].memory);
10812         if (mem_info) {
10813             if (mem_info->shadow_copy) {
10814                 VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
10815                                         ? mem_info->mem_range.size
10816                                         : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
10817                 char *data = static_cast<char *>(mem_info->shadow_copy);
10818                 for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
10819                     if (data[j] != NoncoherentMemoryFillValue) {
10820                         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10821                                         VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges[i].memory),
10822                                         kVUID_Core_MemTrack_InvalidMap, "Memory underflow was detected on mem obj %s.",
10823                                         dev_data->report_data->FormatHandle(mem_ranges[i].memory).c_str());
10824                     }
10825                 }
10826                 for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
10827                     if (data[j] != NoncoherentMemoryFillValue) {
10828                         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10829                                         VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges[i].memory),
10830                                         kVUID_Core_MemTrack_InvalidMap, "Memory overflow was detected on mem obj %s.",
10831                                         dev_data->report_data->FormatHandle(mem_ranges[i].memory).c_str());
10832                     }
10833                 }
10834                 memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
10835             }
10836         }
10837     }
10838     return skip;
10839 }
10840 
CopyNoncoherentMemoryFromDriver(layer_data * dev_data,uint32_t mem_range_count,const VkMappedMemoryRange * mem_ranges)10841 void CoreChecks::CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count,
10842                                                  const VkMappedMemoryRange *mem_ranges) {
10843     for (uint32_t i = 0; i < mem_range_count; ++i) {
10844         auto mem_info = GetMemObjInfo(mem_ranges[i].memory);
10845         if (mem_info && mem_info->shadow_copy) {
10846             VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
10847                                     ? mem_info->mem_range.size
10848                                     : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
10849             char *data = static_cast<char *>(mem_info->shadow_copy);
10850             memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
10851         }
10852     }
10853 }
10854 
ValidateMappedMemoryRangeDeviceLimits(layer_data * dev_data,const char * func_name,uint32_t mem_range_count,const VkMappedMemoryRange * mem_ranges)10855 bool CoreChecks::ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
10856                                                        const VkMappedMemoryRange *mem_ranges) {
10857     bool skip = false;
10858     for (uint32_t i = 0; i < mem_range_count; ++i) {
10859         uint64_t atom_size = dev_data->phys_dev_props.limits.nonCoherentAtomSize;
10860         if (SafeModulo(mem_ranges[i].offset, atom_size) != 0) {
10861             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10862                             HandleToUint64(mem_ranges->memory), "VUID-VkMappedMemoryRange-offset-00687",
10863                             "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
10864                             ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
10865                             func_name, i, mem_ranges[i].offset, atom_size);
10866         }
10867         auto mem_info = GetMemObjInfo(mem_ranges[i].memory);
10868         if ((mem_ranges[i].size != VK_WHOLE_SIZE) &&
10869             (mem_ranges[i].size + mem_ranges[i].offset != mem_info->alloc_info.allocationSize) &&
10870             (SafeModulo(mem_ranges[i].size, atom_size) != 0)) {
10871             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10872                             HandleToUint64(mem_ranges->memory), "VUID-VkMappedMemoryRange-size-01390",
10873                             "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
10874                             ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
10875                             func_name, i, mem_ranges[i].size, atom_size);
10876         }
10877     }
10878     return skip;
10879 }
10880 
PreCallValidateFlushMappedMemoryRanges(VkDevice device,uint32_t memRangeCount,const VkMappedMemoryRange * pMemRanges)10881 bool CoreChecks::PreCallValidateFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
10882                                                         const VkMappedMemoryRange *pMemRanges) {
10883     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10884     bool skip = false;
10885     skip |= ValidateMappedMemoryRangeDeviceLimits(device_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
10886     skip |= ValidateAndCopyNoncoherentMemoryToDriver(device_data, memRangeCount, pMemRanges);
10887     skip |= ValidateMemoryIsMapped(device_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
10888     return skip;
10889 }
10890 
PreCallValidateInvalidateMappedMemoryRanges(VkDevice device,uint32_t memRangeCount,const VkMappedMemoryRange * pMemRanges)10891 bool CoreChecks::PreCallValidateInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
10892                                                              const VkMappedMemoryRange *pMemRanges) {
10893     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10894     bool skip = false;
10895     skip |= ValidateMappedMemoryRangeDeviceLimits(device_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
10896     skip |= ValidateMemoryIsMapped(device_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
10897     return skip;
10898 }
10899 
PostCallRecordInvalidateMappedMemoryRanges(VkDevice device,uint32_t memRangeCount,const VkMappedMemoryRange * pMemRanges,VkResult result)10900 void CoreChecks::PostCallRecordInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
10901                                                             const VkMappedMemoryRange *pMemRanges, VkResult result) {
10902     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10903     if (VK_SUCCESS == result) {
10904         // Update our shadow copy with modified driver data
10905         CopyNoncoherentMemoryFromDriver(device_data, memRangeCount, pMemRanges);
10906     }
10907 }
10908 
PreCallValidateGetDeviceMemoryCommitment(VkDevice device,VkDeviceMemory mem,VkDeviceSize * pCommittedMem)10909 bool CoreChecks::PreCallValidateGetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory mem, VkDeviceSize *pCommittedMem) {
10910     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10911     bool skip = false;
10912     auto mem_info = GetMemObjInfo(mem);
10913 
10914     if (mem_info) {
10915         if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
10916              VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) == 0) {
10917             skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10918                            HandleToUint64(mem), "VUID-vkGetDeviceMemoryCommitment-memory-00690",
10919                            "Querying commitment for memory without VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT set: mem obj %s.",
10920                            dev_data->report_data->FormatHandle(mem).c_str());
10921         }
10922     }
10923     return skip;
10924 }
10925 
ValidateBindImageMemory(layer_data * device_data,VkImage image,VkDeviceMemory mem,VkDeviceSize memoryOffset,const char * api_name)10926 bool CoreChecks::ValidateBindImageMemory(layer_data *device_data, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset,
10927                                          const char *api_name) {
10928     bool skip = false;
10929     IMAGE_STATE *image_state = GetImageState(image);
10930     if (image_state) {
10931         // Track objects tied to memory
10932         uint64_t image_handle = HandleToUint64(image);
10933         skip = ValidateSetMemBinding(device_data, mem, image_handle, kVulkanObjectTypeImage, api_name);
10934         if (!image_state->memory_requirements_checked) {
10935             // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
10936             // BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
10937             // vkGetImageMemoryRequirements()
10938             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10939                             image_handle, kVUID_Core_DrawState_InvalidImage,
10940                             "%s: Binding memory to image %s but vkGetImageMemoryRequirements() has not been called on that image.",
10941                             api_name, device_data->report_data->FormatHandle(image_handle).c_str());
10942             // Make the call for them so we can verify the state
10943             device_data->device_dispatch_table.GetImageMemoryRequirements(device_data->device, image, &image_state->requirements);
10944         }
10945 
10946         // Validate bound memory range information
10947         auto mem_info = GetMemObjInfo(mem);
10948         if (mem_info) {
10949             skip |= ValidateInsertImageMemoryRange(device_data, image, mem_info, memoryOffset, image_state->requirements,
10950                                                    image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR, api_name);
10951             skip |= ValidateMemoryTypes(device_data, mem_info, image_state->requirements.memoryTypeBits, api_name,
10952                                         "VUID-vkBindImageMemory-memory-01047");
10953         }
10954 
10955         // Validate memory requirements alignment
10956         if (SafeModulo(memoryOffset, image_state->requirements.alignment) != 0) {
10957             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10958                             image_handle, "VUID-vkBindImageMemory-memoryOffset-01048",
10959                             "%s: memoryOffset is 0x%" PRIxLEAST64
10960                             " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
10961                             ", returned from a call to vkGetImageMemoryRequirements with image.",
10962                             api_name, memoryOffset, image_state->requirements.alignment);
10963         }
10964 
10965         if (mem_info) {
10966             // Validate memory requirements size
10967             if (image_state->requirements.size > mem_info->alloc_info.allocationSize - memoryOffset) {
10968                 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10969                                 image_handle, "VUID-vkBindImageMemory-size-01049",
10970                                 "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
10971                                 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
10972                                 ", returned from a call to vkGetImageMemoryRequirements with image.",
10973                                 api_name, mem_info->alloc_info.allocationSize - memoryOffset, image_state->requirements.size);
10974             }
10975 
10976             // Validate dedicated allocation
10977             if (mem_info->is_dedicated && ((mem_info->dedicated_image != image) || (memoryOffset != 0))) {
10978                 // TODO: Add vkBindImageMemory2KHR error message when added to spec.
10979                 auto validation_error = kVUIDUndefined;
10980                 if (strcmp(api_name, "vkBindImageMemory()") == 0) {
10981                     validation_error = "VUID-vkBindImageMemory-memory-01509";
10982                 }
10983                 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
10984                                 image_handle, validation_error,
10985                                 "%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfoKHR::image %s must be equal "
10986                                 "to image %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
10987                                 api_name, device_data->report_data->FormatHandle(mem).c_str(),
10988                                 device_data->report_data->FormatHandle(mem_info->dedicated_image).c_str(),
10989                                 device_data->report_data->FormatHandle(image_handle).c_str(), memoryOffset);
10990             }
10991         }
10992     }
10993     return skip;
10994 }
10995 
PreCallValidateBindImageMemory(VkDevice device,VkImage image,VkDeviceMemory mem,VkDeviceSize memoryOffset)10996 bool CoreChecks::PreCallValidateBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
10997     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10998     return ValidateBindImageMemory(device_data, image, mem, memoryOffset, "vkBindImageMemory()");
10999 }
11000 
UpdateBindImageMemoryState(layer_data * device_data,VkImage image,VkDeviceMemory mem,VkDeviceSize memoryOffset)11001 void CoreChecks::UpdateBindImageMemoryState(layer_data *device_data, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
11002     IMAGE_STATE *image_state = GetImageState(image);
11003     if (image_state) {
11004         // Track bound memory range information
11005         auto mem_info = GetMemObjInfo(mem);
11006         if (mem_info) {
11007             InsertImageMemoryRange(device_data, image, mem_info, memoryOffset, image_state->requirements,
11008                                    image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
11009         }
11010 
11011         // Track objects tied to memory
11012         uint64_t image_handle = HandleToUint64(image);
11013         SetMemBinding(device_data, mem, image_state, memoryOffset, image_handle, kVulkanObjectTypeImage);
11014     }
11015 }
11016 
PostCallRecordBindImageMemory(VkDevice device,VkImage image,VkDeviceMemory mem,VkDeviceSize memoryOffset,VkResult result)11017 void CoreChecks::PostCallRecordBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset,
11018                                                VkResult result) {
11019     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11020     if (VK_SUCCESS != result) return;
11021     UpdateBindImageMemoryState(device_data, image, mem, memoryOffset);
11022 }
11023 
PreCallValidateBindImageMemory2(VkDevice device,uint32_t bindInfoCount,const VkBindImageMemoryInfoKHR * pBindInfos)11024 bool CoreChecks::PreCallValidateBindImageMemory2(VkDevice device, uint32_t bindInfoCount,
11025                                                  const VkBindImageMemoryInfoKHR *pBindInfos) {
11026     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11027     bool skip = false;
11028     char api_name[128];
11029     for (uint32_t i = 0; i < bindInfoCount; i++) {
11030         sprintf(api_name, "vkBindImageMemory2() pBindInfos[%u]", i);
11031         skip |=
11032             ValidateBindImageMemory(device_data, pBindInfos[i].image, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name);
11033     }
11034     return skip;
11035 }
11036 
PreCallValidateBindImageMemory2KHR(VkDevice device,uint32_t bindInfoCount,const VkBindImageMemoryInfoKHR * pBindInfos)11037 bool CoreChecks::PreCallValidateBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount,
11038                                                     const VkBindImageMemoryInfoKHR *pBindInfos) {
11039     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11040     bool skip = false;
11041     char api_name[128];
11042     for (uint32_t i = 0; i < bindInfoCount; i++) {
11043         sprintf(api_name, "vkBindImageMemory2KHR() pBindInfos[%u]", i);
11044         skip |=
11045             ValidateBindImageMemory(device_data, pBindInfos[i].image, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name);
11046     }
11047     return skip;
11048 }
11049 
PostCallRecordBindImageMemory2(VkDevice device,uint32_t bindInfoCount,const VkBindImageMemoryInfoKHR * pBindInfos,VkResult result)11050 void CoreChecks::PostCallRecordBindImageMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos,
11051                                                 VkResult result) {
11052     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11053     if (VK_SUCCESS != result) return;
11054     for (uint32_t i = 0; i < bindInfoCount; i++) {
11055         UpdateBindImageMemoryState(device_data, pBindInfos[i].image, pBindInfos[i].memory, pBindInfos[i].memoryOffset);
11056     }
11057 }
11058 
PostCallRecordBindImageMemory2KHR(VkDevice device,uint32_t bindInfoCount,const VkBindImageMemoryInfoKHR * pBindInfos,VkResult result)11059 void CoreChecks::PostCallRecordBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount,
11060                                                    const VkBindImageMemoryInfoKHR *pBindInfos, VkResult result) {
11061     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11062     if (VK_SUCCESS != result) return;
11063     for (uint32_t i = 0; i < bindInfoCount; i++) {
11064         UpdateBindImageMemoryState(device_data, pBindInfos[i].image, pBindInfos[i].memory, pBindInfos[i].memoryOffset);
11065     }
11066 }
11067 
PreCallValidateSetEvent(VkDevice device,VkEvent event)11068 bool CoreChecks::PreCallValidateSetEvent(VkDevice device, VkEvent event) {
11069     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11070     bool skip = false;
11071     auto event_state = GetEventNode(event);
11072     if (event_state) {
11073         if (event_state->write_in_use) {
11074             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
11075                             HandleToUint64(event), kVUID_Core_DrawState_QueueForwardProgress,
11076                             "Cannot call vkSetEvent() on event %s that is already in use by a command buffer.",
11077                             device_data->report_data->FormatHandle(event).c_str());
11078         }
11079     }
11080     return skip;
11081 }
11082 
PreCallRecordSetEvent(VkDevice device,VkEvent event)11083 void CoreChecks::PreCallRecordSetEvent(VkDevice device, VkEvent event) {
11084     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11085     auto event_state = GetEventNode(event);
11086     if (event_state) {
11087         event_state->needsSignaled = false;
11088         event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
11089     }
11090     // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
11091     // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
11092     // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
11093     for (auto queue_data : device_data->queueMap) {
11094         auto event_entry = queue_data.second.eventToStageMap.find(event);
11095         if (event_entry != queue_data.second.eventToStageMap.end()) {
11096             event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
11097         }
11098     }
11099 }
11100 
PreCallValidateQueueBindSparse(VkQueue queue,uint32_t bindInfoCount,const VkBindSparseInfo * pBindInfo,VkFence fence)11101 bool CoreChecks::PreCallValidateQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
11102                                                 VkFence fence) {
11103     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
11104     auto pFence = GetFenceNode(fence);
11105     bool skip = ValidateFenceForSubmit(device_data, pFence);
11106     if (skip) {
11107         return true;
11108     }
11109 
11110     unordered_set<VkSemaphore> signaled_semaphores;
11111     unordered_set<VkSemaphore> unsignaled_semaphores;
11112     unordered_set<VkSemaphore> internal_semaphores;
11113     for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
11114         const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
11115 
11116         std::vector<SEMAPHORE_WAIT> semaphore_waits;
11117         std::vector<VkSemaphore> semaphore_signals;
11118         for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
11119             VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
11120             auto pSemaphore = GetSemaphoreNode(semaphore);
11121             if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
11122                 if (unsignaled_semaphores.count(semaphore) ||
11123                     (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
11124                     skip |=
11125                         log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11126                                 HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
11127                                 "Queue %s is waiting on semaphore %s that has no way to be signaled.",
11128                                 device_data->report_data->FormatHandle(queue).c_str(),
11129                                 device_data->report_data->FormatHandle(semaphore).c_str());
11130                 } else {
11131                     signaled_semaphores.erase(semaphore);
11132                     unsignaled_semaphores.insert(semaphore);
11133                 }
11134             }
11135             if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) {
11136                 internal_semaphores.insert(semaphore);
11137             }
11138         }
11139         for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
11140             VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
11141             auto pSemaphore = GetSemaphoreNode(semaphore);
11142             if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) {
11143                 if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
11144                     skip |=
11145                         log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11146                                 HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
11147                                 "Queue %s is signaling semaphore %s that was previously signaled by queue %s but has not since "
11148                                 "been waited on by any queue.",
11149                                 device_data->report_data->FormatHandle(queue).c_str(),
11150                                 device_data->report_data->FormatHandle(semaphore).c_str(),
11151                                 device_data->report_data->FormatHandle(pSemaphore->signaler.first).c_str());
11152                 } else {
11153                     unsignaled_semaphores.erase(semaphore);
11154                     signaled_semaphores.insert(semaphore);
11155                 }
11156             }
11157         }
11158         // Store sparse binding image_state and after binding is complete make sure that any requiring metadata have it bound
11159         std::unordered_set<IMAGE_STATE *> sparse_images;
11160         // If we're binding sparse image memory make sure reqs were queried and note if metadata is required and bound
11161         for (uint32_t i = 0; i < bindInfo.imageBindCount; ++i) {
11162             const auto &image_bind = bindInfo.pImageBinds[i];
11163             auto image_state = GetImageState(image_bind.image);
11164             if (!image_state)
11165                 continue;  // Param/Object validation should report image_bind.image handles being invalid, so just skip here.
11166             sparse_images.insert(image_state);
11167             if (image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT) {
11168                 if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) {
11169                     // For now just warning if sparse image binding occurs without calling to get reqs first
11170                     return log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11171                                    HandleToUint64(image_state->image), kVUID_Core_MemTrack_InvalidState,
11172                                    "vkQueueBindSparse(): Binding sparse memory to image %s without first calling "
11173                                    "vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.",
11174                                    device_data->report_data->FormatHandle(image_state->image).c_str());
11175                 }
11176             }
11177             if (!image_state->memory_requirements_checked) {
11178                 // For now just warning if sparse image binding occurs without calling to get reqs first
11179                 return log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11180                                HandleToUint64(image_state->image), kVUID_Core_MemTrack_InvalidState,
11181                                "vkQueueBindSparse(): Binding sparse memory to image %s without first calling "
11182                                "vkGetImageMemoryRequirements() to retrieve requirements.",
11183                                device_data->report_data->FormatHandle(image_state->image).c_str());
11184             }
11185         }
11186         for (uint32_t i = 0; i < bindInfo.imageOpaqueBindCount; ++i) {
11187             const auto &image_opaque_bind = bindInfo.pImageOpaqueBinds[i];
11188             auto image_state = GetImageState(bindInfo.pImageOpaqueBinds[i].image);
11189             if (!image_state)
11190                 continue;  // Param/Object validation should report image_bind.image handles being invalid, so just skip here.
11191             sparse_images.insert(image_state);
11192             if (image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT) {
11193                 if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) {
11194                     // For now just warning if sparse image binding occurs without calling to get reqs first
11195                     return log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11196                                    HandleToUint64(image_state->image), kVUID_Core_MemTrack_InvalidState,
11197                                    "vkQueueBindSparse(): Binding opaque sparse memory to image %s without first calling "
11198                                    "vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.",
11199                                    device_data->report_data->FormatHandle(image_state->image).c_str());
11200                 }
11201             }
11202             if (!image_state->memory_requirements_checked) {
11203                 // For now just warning if sparse image binding occurs without calling to get reqs first
11204                 return log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11205                                HandleToUint64(image_state->image), kVUID_Core_MemTrack_InvalidState,
11206                                "vkQueueBindSparse(): Binding opaque sparse memory to image %s without first calling "
11207                                "vkGetImageMemoryRequirements() to retrieve requirements.",
11208                                device_data->report_data->FormatHandle(image_state->image).c_str());
11209             }
11210             for (uint32_t j = 0; j < image_opaque_bind.bindCount; ++j) {
11211                 if (image_opaque_bind.pBinds[j].flags & VK_SPARSE_MEMORY_BIND_METADATA_BIT) {
11212                     image_state->sparse_metadata_bound = true;
11213                 }
11214             }
11215         }
11216         for (const auto &sparse_image_state : sparse_images) {
11217             if (sparse_image_state->sparse_metadata_required && !sparse_image_state->sparse_metadata_bound) {
11218                 // Warn if sparse image binding metadata required for image with sparse binding, but metadata not bound
11219                 return log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11220                                HandleToUint64(sparse_image_state->image), kVUID_Core_MemTrack_InvalidState,
11221                                "vkQueueBindSparse(): Binding sparse memory to image %s which requires a metadata aspect but no "
11222                                "binding with VK_SPARSE_MEMORY_BIND_METADATA_BIT set was made.",
11223                                device_data->report_data->FormatHandle(sparse_image_state->image).c_str());
11224             }
11225         }
11226     }
11227 
11228     return skip;
11229 }
PostCallRecordQueueBindSparse(VkQueue queue,uint32_t bindInfoCount,const VkBindSparseInfo * pBindInfo,VkFence fence,VkResult result)11230 void CoreChecks::PostCallRecordQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
11231                                                VkFence fence, VkResult result) {
11232     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
11233     if (result != VK_SUCCESS) return;
11234     uint64_t early_retire_seq = 0;
11235     auto pFence = GetFenceNode(fence);
11236     auto pQueue = GetQueueState(queue);
11237 
11238     if (pFence) {
11239         if (pFence->scope == kSyncScopeInternal) {
11240             SubmitFence(pQueue, pFence, std::max(1u, bindInfoCount));
11241             if (!bindInfoCount) {
11242                 // No work to do, just dropping a fence in the queue by itself.
11243                 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(),
11244                                                  std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence);
11245             }
11246         } else {
11247             // Retire work up until this fence early, we will not see the wait that corresponds to this signal
11248             early_retire_seq = pQueue->seq + pQueue->submissions.size();
11249             if (!device_data->external_sync_warning) {
11250                 device_data->external_sync_warning = true;
11251                 log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
11252                         HandleToUint64(fence), kVUID_Core_DrawState_QueueForwardProgress,
11253                         "vkQueueBindSparse(): Signaling external fence %s on queue %s will disable validation of preceding command "
11254                         "buffer lifecycle states and the in-use status of associated objects.",
11255                         device_data->report_data->FormatHandle(fence).c_str(),
11256                         device_data->report_data->FormatHandle(queue).c_str());
11257             }
11258         }
11259     }
11260 
11261     for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
11262         const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
11263         // Track objects tied to memory
11264         for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
11265             for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
11266                 auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
11267                 SetSparseMemBinding(device_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
11268                                     HandleToUint64(bindInfo.pBufferBinds[j].buffer), kVulkanObjectTypeBuffer);
11269             }
11270         }
11271         for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
11272             for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
11273                 auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
11274                 SetSparseMemBinding(device_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
11275                                     HandleToUint64(bindInfo.pImageOpaqueBinds[j].image), kVulkanObjectTypeImage);
11276             }
11277         }
11278         for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
11279             for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
11280                 auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
11281                 // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
11282                 VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
11283                 SetSparseMemBinding(device_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
11284                                     HandleToUint64(bindInfo.pImageBinds[j].image), kVulkanObjectTypeImage);
11285             }
11286         }
11287 
11288         std::vector<SEMAPHORE_WAIT> semaphore_waits;
11289         std::vector<VkSemaphore> semaphore_signals;
11290         std::vector<VkSemaphore> semaphore_externals;
11291         for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
11292             VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
11293             auto pSemaphore = GetSemaphoreNode(semaphore);
11294             if (pSemaphore) {
11295                 if (pSemaphore->scope == kSyncScopeInternal) {
11296                     if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
11297                         semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
11298                         pSemaphore->in_use.fetch_add(1);
11299                     }
11300                     pSemaphore->signaler.first = VK_NULL_HANDLE;
11301                     pSemaphore->signaled = false;
11302                 } else {
11303                     semaphore_externals.push_back(semaphore);
11304                     pSemaphore->in_use.fetch_add(1);
11305                     if (pSemaphore->scope == kSyncScopeExternalTemporary) {
11306                         pSemaphore->scope = kSyncScopeInternal;
11307                     }
11308                 }
11309             }
11310         }
11311         for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
11312             VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
11313             auto pSemaphore = GetSemaphoreNode(semaphore);
11314             if (pSemaphore) {
11315                 if (pSemaphore->scope == kSyncScopeInternal) {
11316                     pSemaphore->signaler.first = queue;
11317                     pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
11318                     pSemaphore->signaled = true;
11319                     pSemaphore->in_use.fetch_add(1);
11320                     semaphore_signals.push_back(semaphore);
11321                 } else {
11322                     // Retire work up until this submit early, we will not see the wait that corresponds to this signal
11323                     early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1);
11324                     if (!device_data->external_sync_warning) {
11325                         device_data->external_sync_warning = true;
11326                         log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11327                                 VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, HandleToUint64(semaphore),
11328                                 kVUID_Core_DrawState_QueueForwardProgress,
11329                                 "vkQueueBindSparse(): Signaling external semaphore %s on queue %s will disable validation of "
11330                                 "preceding command buffer lifecycle states and the in-use status of associated objects.",
11331                                 device_data->report_data->FormatHandle(semaphore).c_str(),
11332                                 device_data->report_data->FormatHandle(queue).c_str());
11333                     }
11334                 }
11335             }
11336         }
11337 
11338         pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), semaphore_waits, semaphore_signals, semaphore_externals,
11339                                          bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
11340     }
11341 
11342     if (early_retire_seq) {
11343         RetireWorkOnQueue(device_data, pQueue, early_retire_seq);
11344     }
11345 }
11346 
PostCallRecordCreateSemaphore(VkDevice device,const VkSemaphoreCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSemaphore * pSemaphore,VkResult result)11347 void CoreChecks::PostCallRecordCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
11348                                                const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore, VkResult result) {
11349     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11350     if (VK_SUCCESS != result) return;
11351     SEMAPHORE_NODE *sNode = &device_data->semaphoreMap[*pSemaphore];
11352     sNode->signaler.first = VK_NULL_HANDLE;
11353     sNode->signaler.second = 0;
11354     sNode->signaled = false;
11355     sNode->scope = kSyncScopeInternal;
11356 }
11357 
ValidateImportSemaphore(layer_data * device_data,VkSemaphore semaphore,const char * caller_name)11358 bool CoreChecks::ValidateImportSemaphore(layer_data *device_data, VkSemaphore semaphore, const char *caller_name) {
11359     bool skip = false;
11360     SEMAPHORE_NODE *sema_node = GetSemaphoreNode(semaphore);
11361     if (sema_node) {
11362         VK_OBJECT obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
11363         skip |= ValidateObjectNotInUse(device_data, sema_node, obj_struct, caller_name, kVUIDUndefined);
11364     }
11365     return skip;
11366 }
11367 
RecordImportSemaphoreState(layer_data * device_data,VkSemaphore semaphore,VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type,VkSemaphoreImportFlagsKHR flags)11368 void CoreChecks::RecordImportSemaphoreState(layer_data *device_data, VkSemaphore semaphore,
11369                                             VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type, VkSemaphoreImportFlagsKHR flags) {
11370     SEMAPHORE_NODE *sema_node = GetSemaphoreNode(semaphore);
11371     if (sema_node && sema_node->scope != kSyncScopeExternalPermanent) {
11372         if ((handle_type == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) &&
11373             sema_node->scope == kSyncScopeInternal) {
11374             sema_node->scope = kSyncScopeExternalTemporary;
11375         } else {
11376             sema_node->scope = kSyncScopeExternalPermanent;
11377         }
11378     }
11379 }
11380 
11381 #ifdef VK_USE_PLATFORM_WIN32_KHR
PreCallValidateImportSemaphoreWin32HandleKHR(VkDevice device,const VkImportSemaphoreWin32HandleInfoKHR * pImportSemaphoreWin32HandleInfo)11382 bool CoreChecks::PreCallValidateImportSemaphoreWin32HandleKHR(
11383     VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) {
11384     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11385     return ValidateImportSemaphore(device_data, pImportSemaphoreWin32HandleInfo->semaphore, "vkImportSemaphoreWin32HandleKHR");
11386 }
11387 
PostCallRecordImportSemaphoreWin32HandleKHR(VkDevice device,const VkImportSemaphoreWin32HandleInfoKHR * pImportSemaphoreWin32HandleInfo,VkResult result)11388 void CoreChecks::PostCallRecordImportSemaphoreWin32HandleKHR(
11389     VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo, VkResult result) {
11390     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11391     if (VK_SUCCESS != result) return;
11392     RecordImportSemaphoreState(device_data, pImportSemaphoreWin32HandleInfo->semaphore, pImportSemaphoreWin32HandleInfo->handleType,
11393                                pImportSemaphoreWin32HandleInfo->flags);
11394 }
11395 #endif  // VK_USE_PLATFORM_WIN32_KHR
11396 
PreCallValidateImportSemaphoreFdKHR(VkDevice device,const VkImportSemaphoreFdInfoKHR * pImportSemaphoreFdInfo)11397 bool CoreChecks::PreCallValidateImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) {
11398     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11399     return ValidateImportSemaphore(device_data, pImportSemaphoreFdInfo->semaphore, "vkImportSemaphoreFdKHR");
11400 }
11401 
PostCallRecordImportSemaphoreFdKHR(VkDevice device,const VkImportSemaphoreFdInfoKHR * pImportSemaphoreFdInfo,VkResult result)11402 void CoreChecks::PostCallRecordImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo,
11403                                                     VkResult result) {
11404     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11405     if (VK_SUCCESS != result) return;
11406     RecordImportSemaphoreState(device_data, pImportSemaphoreFdInfo->semaphore, pImportSemaphoreFdInfo->handleType,
11407                                pImportSemaphoreFdInfo->flags);
11408 }
11409 
RecordGetExternalSemaphoreState(layer_data * device_data,VkSemaphore semaphore,VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type)11410 void CoreChecks::RecordGetExternalSemaphoreState(layer_data *device_data, VkSemaphore semaphore,
11411                                                  VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type) {
11412     SEMAPHORE_NODE *semaphore_state = GetSemaphoreNode(semaphore);
11413     if (semaphore_state && handle_type != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
11414         // Cannot track semaphore state once it is exported, except for Sync FD handle types which have copy transference
11415         semaphore_state->scope = kSyncScopeExternalPermanent;
11416     }
11417 }
11418 
11419 #ifdef VK_USE_PLATFORM_WIN32_KHR
PostCallRecordGetSemaphoreWin32HandleKHR(VkDevice device,const VkSemaphoreGetWin32HandleInfoKHR * pGetWin32HandleInfo,HANDLE * pHandle,VkResult result)11420 void CoreChecks::PostCallRecordGetSemaphoreWin32HandleKHR(VkDevice device,
11421                                                           const VkSemaphoreGetWin32HandleInfoKHR *pGetWin32HandleInfo,
11422                                                           HANDLE *pHandle, VkResult result) {
11423     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11424     if (VK_SUCCESS != result) return;
11425     RecordGetExternalSemaphoreState(device_data, pGetWin32HandleInfo->semaphore, pGetWin32HandleInfo->handleType);
11426 }
11427 #endif
11428 
PostCallRecordGetSemaphoreFdKHR(VkDevice device,const VkSemaphoreGetFdInfoKHR * pGetFdInfo,int * pFd,VkResult result)11429 void CoreChecks::PostCallRecordGetSemaphoreFdKHR(VkDevice device, const VkSemaphoreGetFdInfoKHR *pGetFdInfo, int *pFd,
11430                                                  VkResult result) {
11431     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11432     if (VK_SUCCESS != result) return;
11433     RecordGetExternalSemaphoreState(device_data, pGetFdInfo->semaphore, pGetFdInfo->handleType);
11434 }
11435 
ValidateImportFence(layer_data * device_data,VkFence fence,const char * caller_name)11436 bool CoreChecks::ValidateImportFence(layer_data *device_data, VkFence fence, const char *caller_name) {
11437     FENCE_NODE *fence_node = GetFenceNode(fence);
11438     bool skip = false;
11439     if (fence_node && fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
11440         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
11441                         HandleToUint64(fence), kVUIDUndefined, "Cannot call %s on fence %s that is currently in use.", caller_name,
11442                         device_data->report_data->FormatHandle(fence).c_str());
11443     }
11444     return skip;
11445 }
11446 
RecordImportFenceState(layer_data * device_data,VkFence fence,VkExternalFenceHandleTypeFlagBitsKHR handle_type,VkFenceImportFlagsKHR flags)11447 void CoreChecks::RecordImportFenceState(layer_data *device_data, VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type,
11448                                         VkFenceImportFlagsKHR flags) {
11449     FENCE_NODE *fence_node = GetFenceNode(fence);
11450     if (fence_node && fence_node->scope != kSyncScopeExternalPermanent) {
11451         if ((handle_type == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_FENCE_IMPORT_TEMPORARY_BIT_KHR) &&
11452             fence_node->scope == kSyncScopeInternal) {
11453             fence_node->scope = kSyncScopeExternalTemporary;
11454         } else {
11455             fence_node->scope = kSyncScopeExternalPermanent;
11456         }
11457     }
11458 }
11459 
11460 #ifdef VK_USE_PLATFORM_WIN32_KHR
PreCallValidateImportFenceWin32HandleKHR(VkDevice device,const VkImportFenceWin32HandleInfoKHR * pImportFenceWin32HandleInfo)11461 bool CoreChecks::PreCallValidateImportFenceWin32HandleKHR(VkDevice device,
11462                                                           const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) {
11463     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11464     return ValidateImportFence(device_data, pImportFenceWin32HandleInfo->fence, "vkImportFenceWin32HandleKHR");
11465 }
PostCallRecordImportFenceWin32HandleKHR(VkDevice device,const VkImportFenceWin32HandleInfoKHR * pImportFenceWin32HandleInfo,VkResult result)11466 void CoreChecks::PostCallRecordImportFenceWin32HandleKHR(VkDevice device,
11467                                                          const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo,
11468                                                          VkResult result) {
11469     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11470     if (VK_SUCCESS != result) return;
11471     RecordImportFenceState(device_data, pImportFenceWin32HandleInfo->fence, pImportFenceWin32HandleInfo->handleType,
11472                            pImportFenceWin32HandleInfo->flags);
11473 }
11474 #endif  // VK_USE_PLATFORM_WIN32_KHR
11475 
PreCallValidateImportFenceFdKHR(VkDevice device,const VkImportFenceFdInfoKHR * pImportFenceFdInfo)11476 bool CoreChecks::PreCallValidateImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) {
11477     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11478     return ValidateImportFence(device_data, pImportFenceFdInfo->fence, "vkImportFenceFdKHR");
11479 }
PostCallRecordImportFenceFdKHR(VkDevice device,const VkImportFenceFdInfoKHR * pImportFenceFdInfo,VkResult result)11480 void CoreChecks::PostCallRecordImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo,
11481                                                 VkResult result) {
11482     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11483     if (VK_SUCCESS != result) return;
11484     RecordImportFenceState(device_data, pImportFenceFdInfo->fence, pImportFenceFdInfo->handleType, pImportFenceFdInfo->flags);
11485 }
11486 
RecordGetExternalFenceState(layer_data * device_data,VkFence fence,VkExternalFenceHandleTypeFlagBitsKHR handle_type)11487 void CoreChecks::RecordGetExternalFenceState(layer_data *device_data, VkFence fence,
11488                                              VkExternalFenceHandleTypeFlagBitsKHR handle_type) {
11489     FENCE_NODE *fence_state = GetFenceNode(fence);
11490     if (fence_state) {
11491         if (handle_type != VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
11492             // Export with reference transference becomes external
11493             fence_state->scope = kSyncScopeExternalPermanent;
11494         } else if (fence_state->scope == kSyncScopeInternal) {
11495             // Export with copy transference has a side effect of resetting the fence
11496             fence_state->state = FENCE_UNSIGNALED;
11497         }
11498     }
11499 }
11500 
11501 #ifdef VK_USE_PLATFORM_WIN32_KHR
PostCallRecordGetFenceWin32HandleKHR(VkDevice device,const VkFenceGetWin32HandleInfoKHR * pGetWin32HandleInfo,HANDLE * pHandle,VkResult result)11502 void CoreChecks::PostCallRecordGetFenceWin32HandleKHR(VkDevice device, const VkFenceGetWin32HandleInfoKHR *pGetWin32HandleInfo,
11503                                                       HANDLE *pHandle, VkResult result) {
11504     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11505     if (VK_SUCCESS != result) return;
11506     RecordGetExternalFenceState(device_data, pGetWin32HandleInfo->fence, pGetWin32HandleInfo->handleType);
11507 }
11508 #endif
11509 
PostCallRecordGetFenceFdKHR(VkDevice device,const VkFenceGetFdInfoKHR * pGetFdInfo,int * pFd,VkResult result)11510 void CoreChecks::PostCallRecordGetFenceFdKHR(VkDevice device, const VkFenceGetFdInfoKHR *pGetFdInfo, int *pFd, VkResult result) {
11511     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11512     if (VK_SUCCESS != result) return;
11513     RecordGetExternalFenceState(device_data, pGetFdInfo->fence, pGetFdInfo->handleType);
11514 }
11515 
PostCallRecordCreateEvent(VkDevice device,const VkEventCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkEvent * pEvent,VkResult result)11516 void CoreChecks::PostCallRecordCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
11517                                            const VkAllocationCallbacks *pAllocator, VkEvent *pEvent, VkResult result) {
11518     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11519     if (VK_SUCCESS != result) return;
11520     device_data->eventMap[*pEvent].needsSignaled = false;
11521     device_data->eventMap[*pEvent].write_in_use = 0;
11522     device_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
11523 }
11524 
ValidateCreateSwapchain(layer_data * device_data,const char * func_name,VkSwapchainCreateInfoKHR const * pCreateInfo,SURFACE_STATE * surface_state,SWAPCHAIN_NODE * old_swapchain_state)11525 bool CoreChecks::ValidateCreateSwapchain(layer_data *device_data, const char *func_name,
11526                                          VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state,
11527                                          SWAPCHAIN_NODE *old_swapchain_state) {
11528     VkDevice device = device_data->device;
11529 
11530     // All physical devices and queue families are required to be able to present to any native window on Android; require the
11531     // application to have established support on any other platform.
11532     if (!device_data->instance_extensions.vk_khr_android_surface) {
11533         auto support_predicate = [device_data](decltype(surface_state->gpu_queue_support)::value_type qs) -> bool {
11534             // TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device
11535             return (qs.first.gpu == device_data->physical_device) && qs.second;
11536         };
11537         const auto &support = surface_state->gpu_queue_support;
11538         bool is_supported = std::any_of(support.begin(), support.end(), support_predicate);
11539 
11540         if (!is_supported) {
11541             if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11542                         HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-surface-01270",
11543                         "%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. The "
11544                         "vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support with "
11545                         "this surface for at least one queue family of this device.",
11546                         func_name))
11547                 return true;
11548         }
11549     }
11550 
11551     if (old_swapchain_state) {
11552         if (old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
11553             if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11554                         HandleToUint64(pCreateInfo->oldSwapchain), "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933",
11555                         "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
11556                 return true;
11557         }
11558         if (old_swapchain_state->retired) {
11559             if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11560                         HandleToUint64(pCreateInfo->oldSwapchain), "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933",
11561                         "%s: pCreateInfo->oldSwapchain is retired", func_name))
11562                 return true;
11563         }
11564     }
11565 
11566     if ((pCreateInfo->imageExtent.width == 0) || (pCreateInfo->imageExtent.height == 0)) {
11567         if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11568                     HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageExtent-01689",
11569                     "%s: pCreateInfo->imageExtent = (%d, %d) which is illegal.", func_name, pCreateInfo->imageExtent.width,
11570                     pCreateInfo->imageExtent.height))
11571             return true;
11572     }
11573 
11574     auto physical_device_state = GetPhysicalDeviceState();
11575     if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
11576         if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11577                     HandleToUint64(device_data->physical_device), kVUID_Core_DrawState_SwapchainCreateBeforeQuery,
11578                     "%s: surface capabilities not retrieved for this physical device", func_name))
11579             return true;
11580     } else {  // have valid capabilities
11581         auto &capabilities = physical_device_state->surfaceCapabilities;
11582         // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
11583         if (pCreateInfo->minImageCount < capabilities.minImageCount) {
11584             if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11585                         HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-minImageCount-01271",
11586                         "%s called with minImageCount = %d, which is outside the bounds returned by "
11587                         "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
11588                         func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount))
11589                 return true;
11590         }
11591 
11592         if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
11593             if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11594                         HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-minImageCount-01272",
11595                         "%s called with minImageCount = %d, which is outside the bounds returned by "
11596                         "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
11597                         func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount))
11598                 return true;
11599         }
11600 
11601         // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
11602         if ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
11603             (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
11604             (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
11605             (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height)) {
11606             if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11607                         HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageExtent-01274",
11608                         "%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
11609                         "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
11610                         "maxImageExtent = (%d,%d).",
11611                         func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
11612                         capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width,
11613                         capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height))
11614                 return true;
11615         }
11616         // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
11617         // VkSurfaceCapabilitiesKHR::supportedTransforms.
11618         if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
11619             !(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
11620             // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
11621             // it up a little at a time, and then log it:
11622             std::string errorString = "";
11623             char str[1024];
11624             // Here's the first part of the message:
11625             sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s).  Supported values are:\n", func_name,
11626                     string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
11627             errorString += str;
11628             for (int i = 0; i < 32; i++) {
11629                 // Build up the rest of the message:
11630                 if ((1 << i) & capabilities.supportedTransforms) {
11631                     const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
11632                     sprintf(str, "    %s\n", newStr);
11633                     errorString += str;
11634                 }
11635             }
11636             // Log the message that we've built up:
11637             if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11638                         HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-preTransform-01279", "%s.", errorString.c_str()))
11639                 return true;
11640         }
11641 
11642         // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
11643         // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
11644         if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
11645             !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
11646             // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
11647             // it up a little at a time, and then log it:
11648             std::string errorString = "";
11649             char str[1024];
11650             // Here's the first part of the message:
11651             sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s).  Supported values are:\n",
11652                     func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
11653             errorString += str;
11654             for (int i = 0; i < 32; i++) {
11655                 // Build up the rest of the message:
11656                 if ((1 << i) & capabilities.supportedCompositeAlpha) {
11657                     const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
11658                     sprintf(str, "    %s\n", newStr);
11659                     errorString += str;
11660                 }
11661             }
11662             // Log the message that we've built up:
11663             if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11664                         HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-compositeAlpha-01280", "%s.", errorString.c_str()))
11665                 return true;
11666         }
11667         // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
11668         if (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers) {
11669             if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11670                         HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageArrayLayers-01275",
11671                         "%s called with a non-supported imageArrayLayers (i.e. %d).  Maximum value is %d.", func_name,
11672                         pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers))
11673                 return true;
11674         }
11675         // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
11676         if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
11677             if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11678                         HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageUsage-01276",
11679                         "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x).  Supported flag bits are 0x%08x.",
11680                         func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags))
11681                 return true;
11682         }
11683     }
11684 
11685     // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
11686     if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
11687         if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11688                     HandleToUint64(device), kVUID_Core_DrawState_SwapchainCreateBeforeQuery,
11689                     "%s called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name))
11690             return true;
11691     } else {
11692         // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
11693         bool foundFormat = false;
11694         bool foundColorSpace = false;
11695         bool foundMatch = false;
11696         for (auto const &format : physical_device_state->surface_formats) {
11697             if (pCreateInfo->imageFormat == format.format) {
11698                 // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
11699                 foundFormat = true;
11700                 if (pCreateInfo->imageColorSpace == format.colorSpace) {
11701                     foundMatch = true;
11702                     break;
11703                 }
11704             } else {
11705                 if (pCreateInfo->imageColorSpace == format.colorSpace) {
11706                     foundColorSpace = true;
11707                 }
11708             }
11709         }
11710         if (!foundMatch) {
11711             if (!foundFormat) {
11712                 if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11713                             HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273",
11714                             "%s called with a non-supported pCreateInfo->imageFormat (i.e. %d).", func_name,
11715                             pCreateInfo->imageFormat))
11716                     return true;
11717             }
11718             if (!foundColorSpace) {
11719                 if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11720                             HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273",
11721                             "%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d).", func_name,
11722                             pCreateInfo->imageColorSpace))
11723                     return true;
11724             }
11725         }
11726     }
11727 
11728     // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
11729     if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
11730         // FIFO is required to always be supported
11731         if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) {
11732             if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11733                         HandleToUint64(device), kVUID_Core_DrawState_SwapchainCreateBeforeQuery,
11734                         "%s called before calling vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name))
11735                 return true;
11736         }
11737     } else {
11738         // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
11739         bool foundMatch = std::find(physical_device_state->present_modes.begin(), physical_device_state->present_modes.end(),
11740                                     pCreateInfo->presentMode) != physical_device_state->present_modes.end();
11741         if (!foundMatch) {
11742             if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11743                         HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-presentMode-01281",
11744                         "%s called with a non-supported presentMode (i.e. %s).", func_name,
11745                         string_VkPresentModeKHR(pCreateInfo->presentMode)))
11746                 return true;
11747         }
11748     }
11749     // Validate state for shared presentable case
11750     if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
11751         VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
11752         if (!device_data->device_extensions.vk_khr_shared_presentable_image) {
11753             if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11754                         HandleToUint64(device), kVUID_Core_DrawState_ExtensionNotEnabled,
11755                         "%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not "
11756                         "been enabled.",
11757                         func_name, string_VkPresentModeKHR(pCreateInfo->presentMode)))
11758                 return true;
11759         } else if (pCreateInfo->minImageCount != 1) {
11760             if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11761                         HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-minImageCount-01383",
11762                         "%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount "
11763                         "must be 1.",
11764                         func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount))
11765                 return true;
11766         }
11767     }
11768 
11769     if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) {
11770         if (!device_data->device_extensions.vk_khr_swapchain_mutable_format) {
11771             if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11772                         HandleToUint64(device), kVUID_Core_DrawState_ExtensionNotEnabled,
11773                         "%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR which requires the "
11774                         "VK_KHR_swapchain_mutable_format extension, which has not been enabled.",
11775                         func_name))
11776                 return true;
11777         } else {
11778             const auto *image_format_list = lvl_find_in_chain<VkImageFormatListCreateInfoKHR>(pCreateInfo->pNext);
11779             if (image_format_list == nullptr) {
11780                 if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11781                             HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-flags-03168",
11782                             "%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but the pNext chain of "
11783                             "pCreateInfo does not contain an instance of VkImageFormatListCreateInfoKHR.",
11784                             func_name))
11785                     return true;
11786             } else if (image_format_list->viewFormatCount == 0) {
11787                 if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11788                             HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-flags-03168",
11789                             "%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but the viewFormatCount "
11790                             "member of VkImageFormatListCreateInfoKHR in the pNext chain is zero.",
11791                             func_name))
11792                     return true;
11793             } else {
11794                 bool found_base_format = false;
11795                 for (uint32_t i = 0; i < image_format_list->viewFormatCount; ++i) {
11796                     if (image_format_list->pViewFormats[i] == pCreateInfo->imageFormat) {
11797                         found_base_format = true;
11798                         break;
11799                     }
11800                 }
11801                 if (!found_base_format) {
11802                     if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11803                                 HandleToUint64(device), "VUID-VkSwapchainCreateInfoKHR-flags-03168",
11804                                 "%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but none of the "
11805                                 "elements of the pViewFormats member of VkImageFormatListCreateInfoKHR match "
11806                                 "pCreateInfo->imageFormat.",
11807                                 func_name))
11808                         return true;
11809                 }
11810             }
11811         }
11812     }
11813 
11814     if ((pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT) && pCreateInfo->pQueueFamilyIndices) {
11815         bool skip = ValidateQueueFamilies(device_data, pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices,
11816                                           "vkCreateBuffer", "pCreateInfo->pQueueFamilyIndices",
11817                                           "VUID-VkSwapchainCreateInfoKHR-imageSharingMode-01428",
11818                                           "VUID-VkSwapchainCreateInfoKHR-imageSharingMode-01428", false);
11819         if (skip) return true;
11820     }
11821 
11822     return false;
11823 }
11824 
PreCallValidateCreateSwapchainKHR(VkDevice device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchain)11825 bool CoreChecks::PreCallValidateCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
11826                                                    const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
11827     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11828     auto surface_state = GetSurfaceState(pCreateInfo->surface);
11829     auto old_swapchain_state = GetSwapchainNode(pCreateInfo->oldSwapchain);
11830     return ValidateCreateSwapchain(device_data, "vkCreateSwapchainKHR()", pCreateInfo, surface_state, old_swapchain_state);
11831 }
11832 
RecordCreateSwapchainState(layer_data * device_data,VkResult result,const VkSwapchainCreateInfoKHR * pCreateInfo,VkSwapchainKHR * pSwapchain,SURFACE_STATE * surface_state,SWAPCHAIN_NODE * old_swapchain_state)11833 static void RecordCreateSwapchainState(layer_data *device_data, VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo,
11834                                        VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state,
11835                                        SWAPCHAIN_NODE *old_swapchain_state) {
11836     if (VK_SUCCESS == result) {
11837         auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
11838         if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
11839             VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
11840             swapchain_state->shared_presentable = true;
11841         }
11842         surface_state->swapchain = swapchain_state.get();
11843         device_data->swapchainMap[*pSwapchain] = std::move(swapchain_state);
11844     } else {
11845         surface_state->swapchain = nullptr;
11846     }
11847     // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain is retired
11848     if (old_swapchain_state) {
11849         old_swapchain_state->retired = true;
11850     }
11851     return;
11852 }
11853 
PostCallRecordCreateSwapchainKHR(VkDevice device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchain,VkResult result)11854 void CoreChecks::PostCallRecordCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
11855                                                   const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain,
11856                                                   VkResult result) {
11857     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11858     auto surface_state = GetSurfaceState(pCreateInfo->surface);
11859     auto old_swapchain_state = GetSwapchainNode(pCreateInfo->oldSwapchain);
11860     RecordCreateSwapchainState(device_data, result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
11861 }
11862 
PreCallRecordDestroySwapchainKHR(VkDevice device,VkSwapchainKHR swapchain,const VkAllocationCallbacks * pAllocator)11863 void CoreChecks::PreCallRecordDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain,
11864                                                   const VkAllocationCallbacks *pAllocator) {
11865     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11866     if (!swapchain) return;
11867     auto swapchain_data = GetSwapchainNode(swapchain);
11868     if (swapchain_data) {
11869         if (swapchain_data->images.size() > 0) {
11870             for (auto swapchain_image : swapchain_data->images) {
11871                 auto image_sub = device_data->imageSubresourceMap.find(swapchain_image);
11872                 if (image_sub != device_data->imageSubresourceMap.end()) {
11873                     for (auto imgsubpair : image_sub->second) {
11874                         auto image_item = device_data->imageLayoutMap.find(imgsubpair);
11875                         if (image_item != device_data->imageLayoutMap.end()) {
11876                             device_data->imageLayoutMap.erase(image_item);
11877                         }
11878                     }
11879                     device_data->imageSubresourceMap.erase(image_sub);
11880                 }
11881                 ClearMemoryObjectBindings(HandleToUint64(swapchain_image), kVulkanObjectTypeSwapchainKHR);
11882                 EraseQFOImageRelaseBarriers(device_data, swapchain_image);
11883                 device_data->imageMap.erase(swapchain_image);
11884             }
11885         }
11886 
11887         auto surface_state = GetSurfaceState(swapchain_data->createInfo.surface);
11888         if (surface_state) {
11889             if (surface_state->swapchain == swapchain_data) surface_state->swapchain = nullptr;
11890         }
11891 
11892         device_data->swapchainMap.erase(swapchain);
11893     }
11894 }
11895 
PreCallValidateGetSwapchainImagesKHR(VkDevice device,VkSwapchainKHR swapchain,uint32_t * pSwapchainImageCount,VkImage * pSwapchainImages)11896 bool CoreChecks::PreCallValidateGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
11897                                                       VkImage *pSwapchainImages) {
11898     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11899     auto swapchain_state = GetSwapchainNode(swapchain);
11900     bool skip = false;
11901     if (swapchain_state && pSwapchainImages) {
11902         // Compare the preliminary value of *pSwapchainImageCount with the value this time:
11903         if (swapchain_state->vkGetSwapchainImagesKHRState == UNCALLED) {
11904             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11905                             HandleToUint64(device), kVUID_Core_Swapchain_PriorCount,
11906                             "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount; but no prior positive value has "
11907                             "been seen for pSwapchainImages.");
11908         } else if (*pSwapchainImageCount > swapchain_state->get_swapchain_image_count) {
11909             skip |=
11910                 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11911                         HandleToUint64(device), kVUID_Core_Swapchain_InvalidCount,
11912                         "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount, and with pSwapchainImages set to a "
11913                         "value (%d) that is greater than the value (%d) that was returned when pSwapchainImageCount was NULL.",
11914                         *pSwapchainImageCount, swapchain_state->get_swapchain_image_count);
11915         }
11916     }
11917     return skip;
11918 }
11919 
PostCallRecordGetSwapchainImagesKHR(VkDevice device,VkSwapchainKHR swapchain,uint32_t * pSwapchainImageCount,VkImage * pSwapchainImages,VkResult result)11920 void CoreChecks::PostCallRecordGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
11921                                                      VkImage *pSwapchainImages, VkResult result) {
11922     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11923 
11924     if ((result != VK_SUCCESS) && (result != VK_INCOMPLETE)) return;
11925     auto swapchain_state = GetSwapchainNode(swapchain);
11926 
11927     if (*pSwapchainImageCount > swapchain_state->images.size()) swapchain_state->images.resize(*pSwapchainImageCount);
11928 
11929     if (pSwapchainImages) {
11930         if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_DETAILS) {
11931             swapchain_state->vkGetSwapchainImagesKHRState = QUERY_DETAILS;
11932         }
11933         for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) {
11934             if (swapchain_state->images[i] != VK_NULL_HANDLE) continue;  // Already retrieved this.
11935 
11936             IMAGE_LAYOUT_NODE image_layout_node;
11937             image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
11938             image_layout_node.format = swapchain_state->createInfo.imageFormat;
11939             // Add imageMap entries for each swapchain image
11940             VkImageCreateInfo image_ci = {};
11941             image_ci.flags = 0;
11942             image_ci.imageType = VK_IMAGE_TYPE_2D;
11943             image_ci.format = swapchain_state->createInfo.imageFormat;
11944             image_ci.extent.width = swapchain_state->createInfo.imageExtent.width;
11945             image_ci.extent.height = swapchain_state->createInfo.imageExtent.height;
11946             image_ci.extent.depth = 1;
11947             image_ci.mipLevels = 1;
11948             image_ci.arrayLayers = swapchain_state->createInfo.imageArrayLayers;
11949             image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
11950             image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
11951             image_ci.usage = swapchain_state->createInfo.imageUsage;
11952             image_ci.sharingMode = swapchain_state->createInfo.imageSharingMode;
11953             device_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
11954             auto &image_state = device_data->imageMap[pSwapchainImages[i]];
11955             image_state->valid = false;
11956             image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
11957             swapchain_state->images[i] = pSwapchainImages[i];
11958             ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
11959             device_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
11960             device_data->imageLayoutMap[subpair] = image_layout_node;
11961         }
11962     }
11963 
11964     if (*pSwapchainImageCount) {
11965         if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_COUNT) {
11966             swapchain_state->vkGetSwapchainImagesKHRState = QUERY_COUNT;
11967         }
11968         swapchain_state->get_swapchain_image_count = *pSwapchainImageCount;
11969     }
11970 }
11971 
PreCallValidateQueuePresentKHR(VkQueue queue,const VkPresentInfoKHR * pPresentInfo)11972 bool CoreChecks::PreCallValidateQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
11973     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
11974     bool skip = false;
11975     auto queue_state = GetQueueState(queue);
11976 
11977     for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
11978         auto pSemaphore = GetSemaphoreNode(pPresentInfo->pWaitSemaphores[i]);
11979         if (pSemaphore && !pSemaphore->signaled) {
11980             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11981                             0, kVUID_Core_DrawState_QueueForwardProgress,
11982                             "Queue %s is waiting on semaphore %s that has no way to be signaled.",
11983                             device_data->report_data->FormatHandle(queue).c_str(),
11984                             device_data->report_data->FormatHandle(pPresentInfo->pWaitSemaphores[i]).c_str());
11985         }
11986     }
11987 
11988     for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
11989         auto swapchain_data = GetSwapchainNode(pPresentInfo->pSwapchains[i]);
11990         if (swapchain_data) {
11991             if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
11992                 skip |=
11993                     log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11994                             HandleToUint64(pPresentInfo->pSwapchains[i]), kVUID_Core_DrawState_SwapchainInvalidImage,
11995                             "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
11996                             pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
11997             } else {
11998                 auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
11999                 auto image_state = GetImageState(image);
12000 
12001                 if (image_state->shared_presentable) {
12002                     image_state->layout_locked = true;
12003                 }
12004 
12005                 if (!image_state->acquired) {
12006                     skip |= log_msg(
12007                         device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12008                         HandleToUint64(pPresentInfo->pSwapchains[i]), kVUID_Core_DrawState_SwapchainImageNotAcquired,
12009                         "vkQueuePresentKHR: Swapchain image index %u has not been acquired.", pPresentInfo->pImageIndices[i]);
12010                 }
12011 
12012                 vector<VkImageLayout> layouts;
12013                 if (FindLayouts(device_data, image, layouts)) {
12014                     for (auto layout : layouts) {
12015                         if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) &&
12016                             (!device_data->device_extensions.vk_khr_shared_presentable_image ||
12017                              (layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) {
12018                             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
12019                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, HandleToUint64(queue),
12020                                             "VUID-VkPresentInfoKHR-pImageIndices-01296",
12021                                             "Images passed to present must be in layout VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or "
12022                                             "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s.",
12023                                             string_VkImageLayout(layout));
12024                         }
12025                     }
12026                 }
12027             }
12028 
12029             // All physical devices and queue families are required to be able to present to any native window on Android; require
12030             // the application to have established support on any other platform.
12031             if (!device_data->instance_extensions.vk_khr_android_surface) {
12032                 auto surface_state = GetSurfaceState(swapchain_data->createInfo.surface);
12033                 auto support_it =
12034                     surface_state->gpu_queue_support.find({device_data->physical_device, queue_state->queueFamilyIndex});
12035 
12036                 if (support_it == surface_state->gpu_queue_support.end()) {
12037                     skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
12038                                     VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]),
12039                                     kVUID_Core_DrawState_SwapchainUnsupportedQueue,
12040                                     "vkQueuePresentKHR: Presenting image without calling vkGetPhysicalDeviceSurfaceSupportKHR");
12041                 } else if (!support_it->second) {
12042                     skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
12043                                     VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]),
12044                                     "VUID-vkQueuePresentKHR-pSwapchains-01292",
12045                                     "vkQueuePresentKHR: Presenting image on queue that cannot present to this surface.");
12046                 }
12047             }
12048         }
12049     }
12050     if (pPresentInfo && pPresentInfo->pNext) {
12051         // Verify ext struct
12052         const auto *present_regions = lvl_find_in_chain<VkPresentRegionsKHR>(pPresentInfo->pNext);
12053         if (present_regions) {
12054             for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) {
12055                 auto swapchain_data = GetSwapchainNode(pPresentInfo->pSwapchains[i]);
12056                 assert(swapchain_data);
12057                 VkPresentRegionKHR region = present_regions->pRegions[i];
12058                 for (uint32_t j = 0; j < region.rectangleCount; ++j) {
12059                     VkRectLayerKHR rect = region.pRectangles[j];
12060                     if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) {
12061                         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
12062                                         VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]),
12063                                         "VUID-VkRectLayerKHR-offset-01261",
12064                                         "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
12065                                         "pRegion[%i].pRectangles[%i], the sum of offset.x (%i) and extent.width (%i) is greater "
12066                                         "than the corresponding swapchain's imageExtent.width (%i).",
12067                                         i, j, rect.offset.x, rect.extent.width, swapchain_data->createInfo.imageExtent.width);
12068                     }
12069                     if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) {
12070                         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
12071                                         VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]),
12072                                         "VUID-VkRectLayerKHR-offset-01261",
12073                                         "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
12074                                         "pRegion[%i].pRectangles[%i], the sum of offset.y (%i) and extent.height (%i) is greater "
12075                                         "than the corresponding swapchain's imageExtent.height (%i).",
12076                                         i, j, rect.offset.y, rect.extent.height, swapchain_data->createInfo.imageExtent.height);
12077                     }
12078                     if (rect.layer > swapchain_data->createInfo.imageArrayLayers) {
12079                         skip |= log_msg(
12080                             device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12081                             HandleToUint64(pPresentInfo->pSwapchains[i]), "VUID-VkRectLayerKHR-layer-01262",
12082                             "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the layer "
12083                             "(%i) is greater than the corresponding swapchain's imageArrayLayers (%i).",
12084                             i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers);
12085                     }
12086                 }
12087             }
12088         }
12089 
12090         const auto *present_times_info = lvl_find_in_chain<VkPresentTimesInfoGOOGLE>(pPresentInfo->pNext);
12091         if (present_times_info) {
12092             if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) {
12093                 skip |=
12094                     log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12095                             HandleToUint64(pPresentInfo->pSwapchains[0]), "VUID-VkPresentTimesInfoGOOGLE-swapchainCount-01247",
12096                             "vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but pPresentInfo->swapchainCount "
12097                             "is %i. For VkPresentTimesInfoGOOGLE down pNext chain of VkPresentInfoKHR, "
12098                             "VkPresentTimesInfoGOOGLE.swapchainCount must equal VkPresentInfoKHR.swapchainCount.",
12099                             present_times_info->swapchainCount, pPresentInfo->swapchainCount);
12100             }
12101         }
12102     }
12103 
12104     return skip;
12105 }
12106 
PostCallRecordQueuePresentKHR(VkQueue queue,const VkPresentInfoKHR * pPresentInfo,VkResult result)12107 void CoreChecks::PostCallRecordQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo, VkResult result) {
12108     // Semaphore waits occur before error generation, if the call reached the ICD. (Confirm?)
12109     for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
12110         auto pSemaphore = GetSemaphoreNode(pPresentInfo->pWaitSemaphores[i]);
12111         if (pSemaphore) {
12112             pSemaphore->signaler.first = VK_NULL_HANDLE;
12113             pSemaphore->signaled = false;
12114         }
12115     }
12116 
12117     for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
12118         // Note: this is imperfect, in that we can get confused about what did or didn't succeed-- but if the app does that, it's
12119         // confused itself just as much.
12120         auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
12121         if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR) continue;  // this present didn't actually happen.
12122         // Mark the image as having been released to the WSI
12123         auto swapchain_data = GetSwapchainNode(pPresentInfo->pSwapchains[i]);
12124         if (swapchain_data && (swapchain_data->images.size() > pPresentInfo->pImageIndices[i])) {
12125             auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
12126             auto image_state = GetImageState(image);
12127             if (image_state) {
12128                 image_state->acquired = false;
12129             }
12130         }
12131     }
12132     // Note: even though presentation is directed to a queue, there is no direct ordering between QP and subsequent work, so QP (and
12133     // its semaphore waits) /never/ participate in any completion proof.
12134 }
12135 
PreCallValidateCreateSharedSwapchainsKHR(VkDevice device,uint32_t swapchainCount,const VkSwapchainCreateInfoKHR * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchains)12136 bool CoreChecks::PreCallValidateCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
12137                                                           const VkSwapchainCreateInfoKHR *pCreateInfos,
12138                                                           const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
12139     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12140     bool skip = false;
12141     if (pCreateInfos) {
12142         for (uint32_t i = 0; i < swapchainCount; i++) {
12143             auto surface_state = GetSurfaceState(pCreateInfos[i].surface);
12144             auto old_swapchain_state = GetSwapchainNode(pCreateInfos[i].oldSwapchain);
12145             std::stringstream func_name;
12146             func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]()";
12147             skip |=
12148                 ValidateCreateSwapchain(device_data, func_name.str().c_str(), &pCreateInfos[i], surface_state, old_swapchain_state);
12149         }
12150     }
12151     return skip;
12152 }
12153 
PostCallRecordCreateSharedSwapchainsKHR(VkDevice device,uint32_t swapchainCount,const VkSwapchainCreateInfoKHR * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchains,VkResult result)12154 void CoreChecks::PostCallRecordCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
12155                                                          const VkSwapchainCreateInfoKHR *pCreateInfos,
12156                                                          const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains,
12157                                                          VkResult result) {
12158     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12159     if (pCreateInfos) {
12160         for (uint32_t i = 0; i < swapchainCount; i++) {
12161             auto surface_state = GetSurfaceState(pCreateInfos[i].surface);
12162             auto old_swapchain_state = GetSwapchainNode(pCreateInfos[i].oldSwapchain);
12163             RecordCreateSwapchainState(device_data, result, &pCreateInfos[i], &pSwapchains[i], surface_state, old_swapchain_state);
12164         }
12165     }
12166 }
12167 
ValidateAcquireNextImage(layer_data * device_data,VkDevice device,VkSwapchainKHR swapchain,uint64_t timeout,VkSemaphore semaphore,VkFence fence,uint32_t * pImageIndex,const char * func_name)12168 bool CoreChecks::ValidateAcquireNextImage(layer_data *device_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
12169                                           VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex, const char *func_name) {
12170     bool skip = false;
12171     if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
12172         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
12173                         HandleToUint64(device), "VUID-vkAcquireNextImageKHR-semaphore-01780",
12174                         "%s: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way to "
12175                         "determine the completion of this operation.",
12176                         func_name);
12177     }
12178 
12179     auto pSemaphore = GetSemaphoreNode(semaphore);
12180     if (pSemaphore && pSemaphore->scope == kSyncScopeInternal && pSemaphore->signaled) {
12181         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
12182                         HandleToUint64(semaphore), "VUID-vkAcquireNextImageKHR-semaphore-01286",
12183                         "%s: Semaphore must not be currently signaled or in a wait state.", func_name);
12184     }
12185 
12186     auto pFence = GetFenceNode(fence);
12187     if (pFence) {
12188         skip |= ValidateFenceForSubmit(device_data, pFence);
12189     }
12190 
12191     auto swapchain_data = GetSwapchainNode(swapchain);
12192     if (swapchain_data && swapchain_data->retired) {
12193         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12194                         HandleToUint64(swapchain), "VUID-vkAcquireNextImageKHR-swapchain-01285",
12195                         "%s: This swapchain has been retired. The application can still present any images it "
12196                         "has acquired, but cannot acquire any more.",
12197                         func_name);
12198     }
12199 
12200     auto physical_device_state = GetPhysicalDeviceState();
12201     if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
12202         uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
12203                                                  [=](VkImage image) { return GetImageState(image)->acquired; });
12204         if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
12205             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12206                             HandleToUint64(swapchain), kVUID_Core_DrawState_SwapchainTooManyImages,
12207                             "%s: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")", func_name,
12208                             acquired_images);
12209         }
12210     }
12211 
12212     if (swapchain_data && swapchain_data->images.size() == 0) {
12213         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12214                         HandleToUint64(swapchain), kVUID_Core_DrawState_SwapchainImagesNotFound,
12215                         "%s: No images found to acquire from. Application probably did not call "
12216                         "vkGetSwapchainImagesKHR after swapchain creation.",
12217                         func_name);
12218     }
12219     return skip;
12220 }
12221 
PreCallValidateAcquireNextImageKHR(VkDevice device,VkSwapchainKHR swapchain,uint64_t timeout,VkSemaphore semaphore,VkFence fence,uint32_t * pImageIndex)12222 bool CoreChecks::PreCallValidateAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
12223                                                     VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
12224     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12225     return ValidateAcquireNextImage(device_data, device, swapchain, timeout, semaphore, fence, pImageIndex,
12226                                     "vkAcquireNextImageKHR");
12227 }
12228 
PreCallValidateAcquireNextImage2KHR(VkDevice device,const VkAcquireNextImageInfoKHR * pAcquireInfo,uint32_t * pImageIndex)12229 bool CoreChecks::PreCallValidateAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo,
12230                                                      uint32_t *pImageIndex) {
12231     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12232     return ValidateAcquireNextImage(device_data, device, pAcquireInfo->swapchain, pAcquireInfo->timeout, pAcquireInfo->semaphore,
12233                                     pAcquireInfo->fence, pImageIndex, "vkAcquireNextImage2KHR");
12234 }
12235 
RecordAcquireNextImageState(layer_data * device_data,VkDevice device,VkSwapchainKHR swapchain,uint64_t timeout,VkSemaphore semaphore,VkFence fence,uint32_t * pImageIndex)12236 void CoreChecks::RecordAcquireNextImageState(layer_data *device_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
12237                                              VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
12238     auto pFence = GetFenceNode(fence);
12239     if (pFence && pFence->scope == kSyncScopeInternal) {
12240         // Treat as inflight since it is valid to wait on this fence, even in cases where it is technically a temporary
12241         // import
12242         pFence->state = FENCE_INFLIGHT;
12243         pFence->signaler.first = VK_NULL_HANDLE;  // ANI isn't on a queue, so this can't participate in a completion proof.
12244     }
12245 
12246     auto pSemaphore = GetSemaphoreNode(semaphore);
12247     if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) {
12248         // Treat as signaled since it is valid to wait on this semaphore, even in cases where it is technically a
12249         // temporary import
12250         pSemaphore->signaled = true;
12251         pSemaphore->signaler.first = VK_NULL_HANDLE;
12252     }
12253 
12254     // Mark the image as acquired.
12255     auto swapchain_data = GetSwapchainNode(swapchain);
12256     if (swapchain_data && (swapchain_data->images.size() > *pImageIndex)) {
12257         auto image = swapchain_data->images[*pImageIndex];
12258         auto image_state = GetImageState(image);
12259         if (image_state) {
12260             image_state->acquired = true;
12261             image_state->shared_presentable = swapchain_data->shared_presentable;
12262         }
12263     }
12264 }
12265 
PostCallRecordAcquireNextImageKHR(VkDevice device,VkSwapchainKHR swapchain,uint64_t timeout,VkSemaphore semaphore,VkFence fence,uint32_t * pImageIndex,VkResult result)12266 void CoreChecks::PostCallRecordAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
12267                                                    VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex, VkResult result) {
12268     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12269     if ((VK_SUCCESS != result) && (VK_SUBOPTIMAL_KHR != result)) return;
12270     RecordAcquireNextImageState(device_data, device, swapchain, timeout, semaphore, fence, pImageIndex);
12271 }
12272 
PostCallRecordAcquireNextImage2KHR(VkDevice device,const VkAcquireNextImageInfoKHR * pAcquireInfo,uint32_t * pImageIndex,VkResult result)12273 void CoreChecks::PostCallRecordAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo,
12274                                                     uint32_t *pImageIndex, VkResult result) {
12275     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12276     if ((VK_SUCCESS != result) && (VK_SUBOPTIMAL_KHR != result)) return;
12277     RecordAcquireNextImageState(device_data, device, pAcquireInfo->swapchain, pAcquireInfo->timeout, pAcquireInfo->semaphore,
12278                                 pAcquireInfo->fence, pImageIndex);
12279 }
12280 
PostCallRecordEnumeratePhysicalDevices(VkInstance instance,uint32_t * pPhysicalDeviceCount,VkPhysicalDevice * pPhysicalDevices,VkResult result)12281 void CoreChecks::PostCallRecordEnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
12282                                                         VkPhysicalDevice *pPhysicalDevices, VkResult result) {
12283     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
12284     if ((NULL != pPhysicalDevices) && ((result == VK_SUCCESS || result == VK_INCOMPLETE))) {
12285         for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
12286             auto &phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
12287             phys_device_state.phys_device = pPhysicalDevices[i];
12288             // Init actual features for each physical device
12289             instance_data->instance_dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i],
12290                                                                              &phys_device_state.features2.features);
12291         }
12292     }
12293 }
12294 
12295 // Common function to handle validation for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_layer_data * instance_data,PHYSICAL_DEVICE_STATE * pd_state,uint32_t requested_queue_family_property_count,bool qfp_null,const char * caller_name)12296 static bool ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
12297                                                                  PHYSICAL_DEVICE_STATE *pd_state,
12298                                                                  uint32_t requested_queue_family_property_count, bool qfp_null,
12299                                                                  const char *caller_name) {
12300     bool skip = false;
12301     if (!qfp_null) {
12302         // Verify that for each physical device, this command is called first with NULL pQueueFamilyProperties in order to get count
12303         if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
12304             skip |= log_msg(
12305                 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12306                 HandleToUint64(pd_state->phys_device), kVUID_Core_DevLimit_MissingQueryCount,
12307                 "%s is called with non-NULL pQueueFamilyProperties before obtaining pQueueFamilyPropertyCount. It is recommended "
12308                 "to first call %s with NULL pQueueFamilyProperties in order to obtain the maximal pQueueFamilyPropertyCount.",
12309                 caller_name, caller_name);
12310             // Then verify that pCount that is passed in on second call matches what was returned
12311         } else if (pd_state->queue_family_count != requested_queue_family_property_count) {
12312             skip |= log_msg(
12313                 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12314                 HandleToUint64(pd_state->phys_device), kVUID_Core_DevLimit_CountMismatch,
12315                 "%s is called with non-NULL pQueueFamilyProperties and pQueueFamilyPropertyCount value %" PRIu32
12316                 ", but the largest previously returned pQueueFamilyPropertyCount for this physicalDevice is %" PRIu32
12317                 ". It is recommended to instead receive all the properties by calling %s with pQueueFamilyPropertyCount that was "
12318                 "previously obtained by calling %s with NULL pQueueFamilyProperties.",
12319                 caller_name, requested_queue_family_property_count, pd_state->queue_family_count, caller_name, caller_name);
12320         }
12321         pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
12322     }
12323 
12324     return skip;
12325 }
12326 
PreCallValidateGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,uint32_t * pQueueFamilyPropertyCount,VkQueueFamilyProperties * pQueueFamilyProperties)12327 bool CoreChecks::PreCallValidateGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
12328                                                                        uint32_t *pQueueFamilyPropertyCount,
12329                                                                        VkQueueFamilyProperties *pQueueFamilyProperties) {
12330     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12331     auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
12332     assert(physical_device_state);
12333     return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state, *pQueueFamilyPropertyCount,
12334                                                                 (nullptr == pQueueFamilyProperties),
12335                                                                 "vkGetPhysicalDeviceQueueFamilyProperties()");
12336 }
12337 
PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,uint32_t * pQueueFamilyPropertyCount,VkQueueFamilyProperties2KHR * pQueueFamilyProperties)12338 bool CoreChecks::PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,
12339                                                                         uint32_t *pQueueFamilyPropertyCount,
12340                                                                         VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
12341     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12342     auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
12343     assert(physical_device_state);
12344     return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state, *pQueueFamilyPropertyCount,
12345                                                                 (nullptr == pQueueFamilyProperties),
12346                                                                 "vkGetPhysicalDeviceQueueFamilyProperties2()");
12347 }
12348 
PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,uint32_t * pQueueFamilyPropertyCount,VkQueueFamilyProperties2KHR * pQueueFamilyProperties)12349 bool CoreChecks::PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
12350                                                                            uint32_t *pQueueFamilyPropertyCount,
12351                                                                            VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
12352     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12353     auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
12354     assert(physical_device_state);
12355     return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state, *pQueueFamilyPropertyCount,
12356                                                                 (nullptr == pQueueFamilyProperties),
12357                                                                 "vkGetPhysicalDeviceQueueFamilyProperties2KHR()");
12358 }
12359 
12360 // Common function to update state for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE * pd_state,uint32_t count,VkQueueFamilyProperties2KHR * pQueueFamilyProperties)12361 static void StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
12362                                                                     VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
12363     if (!pQueueFamilyProperties) {
12364         if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
12365             pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
12366         pd_state->queue_family_count = count;
12367     } else {  // Save queue family properties
12368         pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
12369         pd_state->queue_family_count = std::max(pd_state->queue_family_count, count);
12370 
12371         pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count));
12372         for (uint32_t i = 0; i < count; ++i) {
12373             pd_state->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
12374         }
12375     }
12376 }
12377 
PostCallRecordGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,uint32_t * pQueueFamilyPropertyCount,VkQueueFamilyProperties * pQueueFamilyProperties)12378 void CoreChecks::PostCallRecordGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
12379                                                                       uint32_t *pQueueFamilyPropertyCount,
12380                                                                       VkQueueFamilyProperties *pQueueFamilyProperties) {
12381     auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
12382     assert(physical_device_state);
12383     VkQueueFamilyProperties2KHR *pqfp = nullptr;
12384     std::vector<VkQueueFamilyProperties2KHR> qfp;
12385     qfp.resize(*pQueueFamilyPropertyCount);
12386     if (pQueueFamilyProperties) {
12387         for (uint32_t i = 0; i < *pQueueFamilyPropertyCount; ++i) {
12388             qfp[i].sType = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR;
12389             qfp[i].pNext = nullptr;
12390             qfp[i].queueFamilyProperties = pQueueFamilyProperties[i];
12391         }
12392         pqfp = qfp.data();
12393     }
12394     StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pqfp);
12395 }
12396 
PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,uint32_t * pQueueFamilyPropertyCount,VkQueueFamilyProperties2KHR * pQueueFamilyProperties)12397 void CoreChecks::PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,
12398                                                                        uint32_t *pQueueFamilyPropertyCount,
12399                                                                        VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
12400     auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
12401     assert(physical_device_state);
12402     StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount,
12403                                                             pQueueFamilyProperties);
12404 }
12405 
PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,uint32_t * pQueueFamilyPropertyCount,VkQueueFamilyProperties2KHR * pQueueFamilyProperties)12406 void CoreChecks::PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
12407                                                                           uint32_t *pQueueFamilyPropertyCount,
12408                                                                           VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
12409     auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
12410     assert(physical_device_state);
12411     StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount,
12412                                                             pQueueFamilyProperties);
12413 }
12414 
PreCallValidateDestroySurfaceKHR(VkInstance instance,VkSurfaceKHR surface,const VkAllocationCallbacks * pAllocator)12415 bool CoreChecks::PreCallValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface,
12416                                                   const VkAllocationCallbacks *pAllocator) {
12417     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12418     auto surface_state = GetSurfaceState(surface);
12419     bool skip = false;
12420     if ((surface_state) && (surface_state->swapchain)) {
12421         skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
12422                         HandleToUint64(instance), "VUID-vkDestroySurfaceKHR-surface-01266",
12423                         "vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed.");
12424     }
12425     return skip;
12426 }
12427 
PreCallRecordValidateDestroySurfaceKHR(VkInstance instance,VkSurfaceKHR surface,const VkAllocationCallbacks * pAllocator)12428 void CoreChecks::PreCallRecordValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface,
12429                                                         const VkAllocationCallbacks *pAllocator) {
12430     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12431     instance_data->surface_map.erase(surface);
12432 }
12433 
RecordVulkanSurface(instance_layer_data * instance_data,VkSurfaceKHR * pSurface)12434 static void RecordVulkanSurface(instance_layer_data *instance_data, VkSurfaceKHR *pSurface) {
12435     instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
12436 }
12437 
PostCallRecordCreateDisplayPlaneSurfaceKHR(VkInstance instance,const VkDisplaySurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface,VkResult result)12438 void CoreChecks::PostCallRecordCreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
12439                                                             const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
12440                                                             VkResult result) {
12441     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12442     if (VK_SUCCESS != result) return;
12443     RecordVulkanSurface(instance_data, pSurface);
12444 }
12445 
12446 #ifdef VK_USE_PLATFORM_ANDROID_KHR
PostCallRecordCreateAndroidSurfaceKHR(VkInstance instance,const VkAndroidSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface,VkResult result)12447 void CoreChecks::PostCallRecordCreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
12448                                                        const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
12449                                                        VkResult result) {
12450     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12451     if (VK_SUCCESS != result) return;
12452     RecordVulkanSurface(instance_data, pSurface);
12453 }
12454 #endif  // VK_USE_PLATFORM_ANDROID_KHR
12455 
12456 #ifdef VK_USE_PLATFORM_IOS_MVK
PostCallRecordCreateIOSSurfaceMVK(VkInstance instance,const VkIOSSurfaceCreateInfoMVK * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface,VkResult result)12457 void CoreChecks::PostCallRecordCreateIOSSurfaceMVK(VkInstance instance, const VkIOSSurfaceCreateInfoMVK *pCreateInfo,
12458                                                    const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
12459                                                    VkResult result) {
12460     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12461     if (VK_SUCCESS != result) return;
12462     RecordVulkanSurface(instance_data, pSurface);
12463 }
12464 #endif  // VK_USE_PLATFORM_IOS_MVK
12465 
12466 #ifdef VK_USE_PLATFORM_MACOS_MVK
PostCallRecordCreateMacOSSurfaceMVK(VkInstance instance,const VkMacOSSurfaceCreateInfoMVK * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface,VkResult result)12467 void CoreChecks::PostCallRecordCreateMacOSSurfaceMVK(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK *pCreateInfo,
12468                                                      const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
12469                                                      VkResult result) {
12470     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12471     if (VK_SUCCESS != result) return;
12472     RecordVulkanSurface(instance_data, pSurface);
12473 }
12474 #endif  // VK_USE_PLATFORM_MACOS_MVK
12475 
12476 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
PostCallRecordCreateWaylandSurfaceKHR(VkInstance instance,const VkWaylandSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface,VkResult result)12477 void CoreChecks::PostCallRecordCreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
12478                                                        const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
12479                                                        VkResult result) {
12480     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12481     if (VK_SUCCESS != result) return;
12482     RecordVulkanSurface(instance_data, pSurface);
12483 }
12484 
PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,struct wl_display * display)12485 bool CoreChecks::PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
12486                                                                                uint32_t queueFamilyIndex,
12487                                                                                struct wl_display *display) {
12488     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12489     const auto pd_state = GetPhysicalDeviceState(physicalDevice);
12490     return ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex,
12491                                              "VUID-vkGetPhysicalDeviceWaylandPresentationSupportKHR-queueFamilyIndex-01306",
12492                                              "vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex");
12493 }
12494 #endif  // VK_USE_PLATFORM_WAYLAND_KHR
12495 
12496 #ifdef VK_USE_PLATFORM_WIN32_KHR
PostCallRecordCreateWin32SurfaceKHR(VkInstance instance,const VkWin32SurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface,VkResult result)12497 void CoreChecks::PostCallRecordCreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
12498                                                      const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
12499                                                      VkResult result) {
12500     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12501     if (VK_SUCCESS != result) return;
12502     RecordVulkanSurface(instance_data, pSurface);
12503 }
12504 
PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex)12505 bool CoreChecks::PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
12506                                                                              uint32_t queueFamilyIndex) {
12507     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12508     const auto pd_state = GetPhysicalDeviceState(physicalDevice);
12509     return ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex,
12510                                              "VUID-vkGetPhysicalDeviceWin32PresentationSupportKHR-queueFamilyIndex-01309",
12511                                              "vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex");
12512 }
12513 #endif  // VK_USE_PLATFORM_WIN32_KHR
12514 
12515 #ifdef VK_USE_PLATFORM_XCB_KHR
PostCallRecordCreateXcbSurfaceKHR(VkInstance instance,const VkXcbSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface,VkResult result)12516 void CoreChecks::PostCallRecordCreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
12517                                                    const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
12518                                                    VkResult result) {
12519     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12520     if (VK_SUCCESS != result) return;
12521     RecordVulkanSurface(instance_data, pSurface);
12522 }
12523 
PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,xcb_connection_t * connection,xcb_visualid_t visual_id)12524 bool CoreChecks::PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
12525                                                                            uint32_t queueFamilyIndex, xcb_connection_t *connection,
12526                                                                            xcb_visualid_t visual_id) {
12527     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12528     const auto pd_state = GetPhysicalDeviceState(physicalDevice);
12529     return ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex,
12530                                              "VUID-vkGetPhysicalDeviceXcbPresentationSupportKHR-queueFamilyIndex-01312",
12531                                              "vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex");
12532 }
12533 #endif  // VK_USE_PLATFORM_XCB_KHR
12534 
12535 #ifdef VK_USE_PLATFORM_XLIB_KHR
PostCallRecordCreateXlibSurfaceKHR(VkInstance instance,const VkXlibSurfaceCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSurfaceKHR * pSurface,VkResult result)12536 void CoreChecks::PostCallRecordCreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
12537                                                     const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
12538                                                     VkResult result) {
12539     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12540     if (VK_SUCCESS != result) return;
12541     RecordVulkanSurface(instance_data, pSurface);
12542 }
12543 
PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,Display * dpy,VisualID visualID)12544 bool CoreChecks::PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
12545                                                                             uint32_t queueFamilyIndex, Display *dpy,
12546                                                                             VisualID visualID) {
12547     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12548     const auto pd_state = GetPhysicalDeviceState(physicalDevice);
12549     return ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex,
12550                                              "VUID-vkGetPhysicalDeviceXlibPresentationSupportKHR-queueFamilyIndex-01315",
12551                                              "vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex");
12552 }
12553 #endif  // VK_USE_PLATFORM_XLIB_KHR
12554 
PostCallRecordGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR surface,VkSurfaceCapabilitiesKHR * pSurfaceCapabilities,VkResult result)12555 void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
12556                                                                        VkSurfaceCapabilitiesKHR *pSurfaceCapabilities,
12557                                                                        VkResult result) {
12558     if (VK_SUCCESS != result) return;
12559     auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
12560     physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
12561     physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
12562 }
12563 
PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,VkSurfaceCapabilities2KHR * pSurfaceCapabilities,VkResult result)12564 void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,
12565                                                                         const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
12566                                                                         VkSurfaceCapabilities2KHR *pSurfaceCapabilities,
12567                                                                         VkResult result) {
12568     if (VK_SUCCESS != result) return;
12569     auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
12570     physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
12571     physical_device_state->surfaceCapabilities = pSurfaceCapabilities->surfaceCapabilities;
12572 }
12573 
PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice,VkSurfaceKHR surface,VkSurfaceCapabilities2EXT * pSurfaceCapabilities,VkResult result)12574 void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
12575                                                                         VkSurfaceCapabilities2EXT *pSurfaceCapabilities,
12576                                                                         VkResult result) {
12577     auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
12578     physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
12579     physical_device_state->surfaceCapabilities.minImageCount = pSurfaceCapabilities->minImageCount;
12580     physical_device_state->surfaceCapabilities.maxImageCount = pSurfaceCapabilities->maxImageCount;
12581     physical_device_state->surfaceCapabilities.currentExtent = pSurfaceCapabilities->currentExtent;
12582     physical_device_state->surfaceCapabilities.minImageExtent = pSurfaceCapabilities->minImageExtent;
12583     physical_device_state->surfaceCapabilities.maxImageExtent = pSurfaceCapabilities->maxImageExtent;
12584     physical_device_state->surfaceCapabilities.maxImageArrayLayers = pSurfaceCapabilities->maxImageArrayLayers;
12585     physical_device_state->surfaceCapabilities.supportedTransforms = pSurfaceCapabilities->supportedTransforms;
12586     physical_device_state->surfaceCapabilities.currentTransform = pSurfaceCapabilities->currentTransform;
12587     physical_device_state->surfaceCapabilities.supportedCompositeAlpha = pSurfaceCapabilities->supportedCompositeAlpha;
12588     physical_device_state->surfaceCapabilities.supportedUsageFlags = pSurfaceCapabilities->supportedUsageFlags;
12589 }
12590 
PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,VkSurfaceKHR surface,VkBool32 * pSupported)12591 bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
12592                                                                    VkSurfaceKHR surface, VkBool32 *pSupported) {
12593     auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12594     const auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
12595     return ValidatePhysicalDeviceQueueFamily(instance_data, physical_device_state, queueFamilyIndex,
12596                                              "VUID-vkGetPhysicalDeviceSurfaceSupportKHR-queueFamilyIndex-01269",
12597                                              "vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex");
12598 }
12599 
PostCallRecordGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,VkSurfaceKHR surface,VkBool32 * pSupported,VkResult result)12600 void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
12601                                                                   VkSurfaceKHR surface, VkBool32 *pSupported, VkResult result) {
12602     if (VK_SUCCESS != result) return;
12603     auto surface_state = GetSurfaceState(surface);
12604     surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported == VK_TRUE);
12605 }
12606 
PostCallRecordGetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR surface,uint32_t * pPresentModeCount,VkPresentModeKHR * pPresentModes,VkResult result)12607 void CoreChecks::PostCallRecordGetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
12608                                                                        uint32_t *pPresentModeCount, VkPresentModeKHR *pPresentModes,
12609                                                                        VkResult result) {
12610     if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return;
12611 
12612     // TODO: This isn't quite right -- available modes may differ by surface AND physical device.
12613     auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
12614     auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
12615 
12616     if (*pPresentModeCount) {
12617         if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
12618         if (*pPresentModeCount > physical_device_state->present_modes.size())
12619             physical_device_state->present_modes.resize(*pPresentModeCount);
12620     }
12621     if (pPresentModes) {
12622         if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
12623         for (uint32_t i = 0; i < *pPresentModeCount; i++) {
12624             physical_device_state->present_modes[i] = pPresentModes[i];
12625         }
12626     }
12627 }
12628 
PreCallValidateGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR surface,uint32_t * pSurfaceFormatCount,VkSurfaceFormatKHR * pSurfaceFormats)12629 bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
12630                                                                    uint32_t *pSurfaceFormatCount,
12631                                                                    VkSurfaceFormatKHR *pSurfaceFormats) {
12632     auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12633     if (!pSurfaceFormats) return false;
12634     auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
12635     auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
12636     bool skip = false;
12637     switch (call_state) {
12638         case UNCALLED:
12639             // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application didn't
12640             // previously call this function with a NULL value of pSurfaceFormats:
12641             skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12642                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
12643                             kVUID_Core_DevLimit_MustQueryCount,
12644                             "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior "
12645                             "positive value has been seen for pSurfaceFormats.");
12646             break;
12647         default:
12648             auto prev_format_count = (uint32_t)physical_device_state->surface_formats.size();
12649             if (prev_format_count != *pSurfaceFormatCount) {
12650                 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12651                                 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
12652                                 kVUID_Core_DevLimit_CountMismatch,
12653                                 "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with "
12654                                 "pSurfaceFormats set to a value (%u) that is greater than the value (%u) that was returned "
12655                                 "when pSurfaceFormatCount was NULL.",
12656                                 *pSurfaceFormatCount, prev_format_count);
12657             }
12658             break;
12659     }
12660     return skip;
12661 }
12662 
PostCallRecordGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR surface,uint32_t * pSurfaceFormatCount,VkSurfaceFormatKHR * pSurfaceFormats,VkResult result)12663 void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
12664                                                                   uint32_t *pSurfaceFormatCount,
12665                                                                   VkSurfaceFormatKHR *pSurfaceFormats, VkResult result) {
12666     if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return;
12667 
12668     auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
12669     auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
12670 
12671     if (*pSurfaceFormatCount) {
12672         if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
12673         if (*pSurfaceFormatCount > physical_device_state->surface_formats.size())
12674             physical_device_state->surface_formats.resize(*pSurfaceFormatCount);
12675     }
12676     if (pSurfaceFormats) {
12677         if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
12678         for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
12679             physical_device_state->surface_formats[i] = pSurfaceFormats[i];
12680         }
12681     }
12682 }
12683 
PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,uint32_t * pSurfaceFormatCount,VkSurfaceFormat2KHR * pSurfaceFormats,VkResult result)12684 void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
12685                                                                    const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
12686                                                                    uint32_t *pSurfaceFormatCount,
12687                                                                    VkSurfaceFormat2KHR *pSurfaceFormats, VkResult result) {
12688     if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return;
12689 
12690     auto physicalDeviceState = GetPhysicalDeviceState(physicalDevice);
12691     if (*pSurfaceFormatCount) {
12692         if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_COUNT) {
12693             physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_COUNT;
12694         }
12695         if (*pSurfaceFormatCount > physicalDeviceState->surface_formats.size())
12696             physicalDeviceState->surface_formats.resize(*pSurfaceFormatCount);
12697     }
12698     if (pSurfaceFormats) {
12699         if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_DETAILS) {
12700             physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_DETAILS;
12701         }
12702         for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
12703             physicalDeviceState->surface_formats[i] = pSurfaceFormats[i].surfaceFormat;
12704         }
12705     }
12706 }
12707 
PreCallRecordQueueBeginDebugUtilsLabelEXT(VkQueue queue,const VkDebugUtilsLabelEXT * pLabelInfo)12708 void CoreChecks::PreCallRecordQueueBeginDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
12709     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
12710     BeginQueueDebugUtilsLabel(device_data->report_data, queue, pLabelInfo);
12711 }
12712 
PostCallRecordQueueEndDebugUtilsLabelEXT(VkQueue queue)12713 void CoreChecks::PostCallRecordQueueEndDebugUtilsLabelEXT(VkQueue queue) {
12714     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
12715     EndQueueDebugUtilsLabel(device_data->report_data, queue);
12716 }
12717 
PreCallRecordQueueInsertDebugUtilsLabelEXT(VkQueue queue,const VkDebugUtilsLabelEXT * pLabelInfo)12718 void CoreChecks::PreCallRecordQueueInsertDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
12719     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
12720     InsertQueueDebugUtilsLabel(device_data->report_data, queue, pLabelInfo);
12721 }
12722 
PreCallRecordCmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer,const VkDebugUtilsLabelEXT * pLabelInfo)12723 void CoreChecks::PreCallRecordCmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
12724     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12725     BeginCmdDebugUtilsLabel(device_data->report_data, commandBuffer, pLabelInfo);
12726 }
12727 
PostCallRecordCmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer)12728 void CoreChecks::PostCallRecordCmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer) {
12729     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12730     EndCmdDebugUtilsLabel(device_data->report_data, commandBuffer);
12731 }
12732 
PreCallRecordCmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer,const VkDebugUtilsLabelEXT * pLabelInfo)12733 void CoreChecks::PreCallRecordCmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
12734     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12735     InsertCmdDebugUtilsLabel(device_data->report_data, commandBuffer, pLabelInfo);
12736 }
12737 
PostCallRecordCreateDebugUtilsMessengerEXT(VkInstance instance,const VkDebugUtilsMessengerCreateInfoEXT * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDebugUtilsMessengerEXT * pMessenger,VkResult result)12738 void CoreChecks::PostCallRecordCreateDebugUtilsMessengerEXT(VkInstance instance,
12739                                                             const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo,
12740                                                             const VkAllocationCallbacks *pAllocator,
12741                                                             VkDebugUtilsMessengerEXT *pMessenger, VkResult result) {
12742     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12743     if (VK_SUCCESS != result) return;
12744     layer_create_messenger_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMessenger);
12745 }
12746 
PostCallRecordDestroyDebugUtilsMessengerEXT(VkInstance instance,VkDebugUtilsMessengerEXT messenger,const VkAllocationCallbacks * pAllocator)12747 void CoreChecks::PostCallRecordDestroyDebugUtilsMessengerEXT(VkInstance instance, VkDebugUtilsMessengerEXT messenger,
12748                                                              const VkAllocationCallbacks *pAllocator) {
12749     if (!messenger) return;
12750     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12751     layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
12752 }
12753 
PostCallRecordDestroyDebugReportCallbackEXT(VkInstance instance,VkDebugReportCallbackEXT msgCallback,const VkAllocationCallbacks * pAllocator)12754 void CoreChecks::PostCallRecordDestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
12755                                                              const VkAllocationCallbacks *pAllocator) {
12756     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12757     layer_destroy_report_callback(instance_data->report_data, msgCallback, pAllocator);
12758 }
12759 
PostRecordEnumeratePhysicalDeviceGroupsState(instance_layer_data * instance_data,uint32_t * pPhysicalDeviceGroupCount,VkPhysicalDeviceGroupPropertiesKHR * pPhysicalDeviceGroupProperties)12760 static void PostRecordEnumeratePhysicalDeviceGroupsState(instance_layer_data *instance_data, uint32_t *pPhysicalDeviceGroupCount,
12761                                                          VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
12762     if (NULL != pPhysicalDeviceGroupProperties) {
12763         for (uint32_t i = 0; i < *pPhysicalDeviceGroupCount; i++) {
12764             for (uint32_t j = 0; j < pPhysicalDeviceGroupProperties[i].physicalDeviceCount; j++) {
12765                 VkPhysicalDevice cur_phys_dev = pPhysicalDeviceGroupProperties[i].physicalDevices[j];
12766                 auto &phys_device_state = instance_data->physical_device_map[cur_phys_dev];
12767                 phys_device_state.phys_device = cur_phys_dev;
12768                 // Init actual features for each physical device
12769                 instance_data->instance_dispatch_table.GetPhysicalDeviceFeatures(cur_phys_dev,
12770                                                                                  &phys_device_state.features2.features);
12771             }
12772         }
12773     }
12774 }
12775 
PostCallRecordEnumeratePhysicalDeviceGroups(VkInstance instance,uint32_t * pPhysicalDeviceGroupCount,VkPhysicalDeviceGroupPropertiesKHR * pPhysicalDeviceGroupProperties,VkResult result)12776 void CoreChecks::PostCallRecordEnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount,
12777                                                              VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties,
12778                                                              VkResult result) {
12779     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12780     if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return;
12781     PostRecordEnumeratePhysicalDeviceGroupsState(instance_data, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
12782 }
12783 
PostCallRecordEnumeratePhysicalDeviceGroupsKHR(VkInstance instance,uint32_t * pPhysicalDeviceGroupCount,VkPhysicalDeviceGroupPropertiesKHR * pPhysicalDeviceGroupProperties,VkResult result)12784 void CoreChecks::PostCallRecordEnumeratePhysicalDeviceGroupsKHR(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount,
12785                                                                 VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties,
12786                                                                 VkResult result) {
12787     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12788     if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return;
12789     PostRecordEnumeratePhysicalDeviceGroupsState(instance_data, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
12790 }
12791 
ValidateDescriptorUpdateTemplate(const char * func_name,layer_data * device_data,const VkDescriptorUpdateTemplateCreateInfoKHR * pCreateInfo)12792 bool CoreChecks::ValidateDescriptorUpdateTemplate(const char *func_name, layer_data *device_data,
12793                                                   const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo) {
12794     bool skip = false;
12795     const auto layout = GetDescriptorSetLayout(device_data, pCreateInfo->descriptorSetLayout);
12796     if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET == pCreateInfo->templateType && !layout) {
12797         auto ds_uint = HandleToUint64(pCreateInfo->descriptorSetLayout);
12798         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
12799                         ds_uint, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00350",
12800                         "%s: Invalid pCreateInfo->descriptorSetLayout (%s)", func_name,
12801                         device_data->report_data->FormatHandle(ds_uint).c_str());
12802     } else if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR == pCreateInfo->templateType) {
12803         auto bind_point = pCreateInfo->pipelineBindPoint;
12804         bool valid_bp = (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS) || (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE);
12805         if (!valid_bp) {
12806             skip |=
12807                 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
12808                         "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00351",
12809                         "%s: Invalid pCreateInfo->pipelineBindPoint (%" PRIu32 ").", func_name, static_cast<uint32_t>(bind_point));
12810         }
12811         const auto pipeline_layout = GetPipelineLayout(device_data, pCreateInfo->pipelineLayout);
12812         if (!pipeline_layout) {
12813             uint64_t pl_uint = HandleToUint64(pCreateInfo->pipelineLayout);
12814             skip |= log_msg(
12815                 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, pl_uint,
12816                 "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00352", "%s: Invalid pCreateInfo->pipelineLayout (%s)",
12817                 func_name, device_data->report_data->FormatHandle(pl_uint).c_str());
12818         } else {
12819             const uint32_t pd_set = pCreateInfo->set;
12820             if ((pd_set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[pd_set] ||
12821                 !pipeline_layout->set_layouts[pd_set]->IsPushDescriptor()) {
12822                 uint64_t pl_uint = HandleToUint64(pCreateInfo->pipelineLayout);
12823                 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
12824                                 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, pl_uint,
12825                                 "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353",
12826                                 "%s: pCreateInfo->set (%" PRIu32
12827                                 ") does not refer to the push descriptor set layout for pCreateInfo->pipelineLayout (%s).",
12828                                 func_name, pd_set, device_data->report_data->FormatHandle(pl_uint).c_str());
12829             }
12830         }
12831     }
12832     return skip;
12833 }
12834 
PreCallValidateCreateDescriptorUpdateTemplate(VkDevice device,const VkDescriptorUpdateTemplateCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplateKHR * pDescriptorUpdateTemplate)12835 bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplate(VkDevice device,
12836                                                                const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
12837                                                                const VkAllocationCallbacks *pAllocator,
12838                                                                VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
12839     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12840 
12841     bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplate()", device_data, pCreateInfo);
12842     return skip;
12843 }
12844 
PreCallValidateCreateDescriptorUpdateTemplateKHR(VkDevice device,const VkDescriptorUpdateTemplateCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplateKHR * pDescriptorUpdateTemplate)12845 bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplateKHR(VkDevice device,
12846                                                                   const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
12847                                                                   const VkAllocationCallbacks *pAllocator,
12848                                                                   VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
12849     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12850 
12851     bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplateKHR()", device_data, pCreateInfo);
12852     return skip;
12853 }
12854 
PreCallRecordDestroyDescriptorUpdateTemplate(VkDevice device,VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,const VkAllocationCallbacks * pAllocator)12855 void CoreChecks::PreCallRecordDestroyDescriptorUpdateTemplate(VkDevice device,
12856                                                               VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12857                                                               const VkAllocationCallbacks *pAllocator) {
12858     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12859     if (!descriptorUpdateTemplate) return;
12860     device_data->desc_template_map.erase(descriptorUpdateTemplate);
12861 }
12862 
PreCallRecordDestroyDescriptorUpdateTemplateKHR(VkDevice device,VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,const VkAllocationCallbacks * pAllocator)12863 void CoreChecks::PreCallRecordDestroyDescriptorUpdateTemplateKHR(VkDevice device,
12864                                                                  VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12865                                                                  const VkAllocationCallbacks *pAllocator) {
12866     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12867     if (!descriptorUpdateTemplate) return;
12868     device_data->desc_template_map.erase(descriptorUpdateTemplate);
12869 }
12870 
RecordCreateDescriptorUpdateTemplateState(layer_data * device_data,const VkDescriptorUpdateTemplateCreateInfoKHR * pCreateInfo,VkDescriptorUpdateTemplateKHR * pDescriptorUpdateTemplate)12871 void RecordCreateDescriptorUpdateTemplateState(layer_data *device_data, const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
12872                                                VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
12873     safe_VkDescriptorUpdateTemplateCreateInfo *local_create_info = new safe_VkDescriptorUpdateTemplateCreateInfo(pCreateInfo);
12874     std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info));
12875     device_data->desc_template_map[*pDescriptorUpdateTemplate] = std::move(template_state);
12876 }
12877 
PostCallRecordCreateDescriptorUpdateTemplate(VkDevice device,const VkDescriptorUpdateTemplateCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplateKHR * pDescriptorUpdateTemplate,VkResult result)12878 void CoreChecks::PostCallRecordCreateDescriptorUpdateTemplate(VkDevice device,
12879                                                               const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
12880                                                               const VkAllocationCallbacks *pAllocator,
12881                                                               VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate,
12882                                                               VkResult result) {
12883     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12884     if (VK_SUCCESS != result) return;
12885     RecordCreateDescriptorUpdateTemplateState(device_data, pCreateInfo, pDescriptorUpdateTemplate);
12886 }
12887 
PostCallRecordCreateDescriptorUpdateTemplateKHR(VkDevice device,const VkDescriptorUpdateTemplateCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplateKHR * pDescriptorUpdateTemplate,VkResult result)12888 void CoreChecks::PostCallRecordCreateDescriptorUpdateTemplateKHR(VkDevice device,
12889                                                                  const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
12890                                                                  const VkAllocationCallbacks *pAllocator,
12891                                                                  VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate,
12892                                                                  VkResult result) {
12893     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12894     if (VK_SUCCESS != result) return;
12895     RecordCreateDescriptorUpdateTemplateState(device_data, pCreateInfo, pDescriptorUpdateTemplate);
12896 }
12897 
ValidateUpdateDescriptorSetWithTemplate(layer_data * device_data,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,const void * pData)12898 bool CoreChecks::ValidateUpdateDescriptorSetWithTemplate(layer_data *device_data, VkDescriptorSet descriptorSet,
12899                                                          VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12900                                                          const void *pData) {
12901     bool skip = false;
12902     auto const template_map_entry = device_data->desc_template_map.find(descriptorUpdateTemplate);
12903     if ((template_map_entry == device_data->desc_template_map.end()) || (template_map_entry->second.get() == nullptr)) {
12904         // Object tracker will report errors for invalid descriptorUpdateTemplate values, avoiding a crash in release builds
12905         // but retaining the assert as template support is new enough to want to investigate these in debug builds.
12906         assert(0);
12907     } else {
12908         const TEMPLATE_STATE *template_state = template_map_entry->second.get();
12909         // TODO: Validate template push descriptor updates
12910         if (template_state->create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) {
12911             skip = ValidateUpdateDescriptorSetsWithTemplateKHR(device_data, descriptorSet, template_state, pData);
12912         }
12913     }
12914     return skip;
12915 }
12916 
PreCallValidateUpdateDescriptorSetWithTemplate(VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)12917 bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
12918                                                                 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
12919                                                                 const void *pData) {
12920     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12921     return ValidateUpdateDescriptorSetWithTemplate(device_data, descriptorSet, descriptorUpdateTemplate, pData);
12922 }
12923 
PreCallValidateUpdateDescriptorSetWithTemplateKHR(VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,const void * pData)12924 bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
12925                                                                    VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12926                                                                    const void *pData) {
12927     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12928     return ValidateUpdateDescriptorSetWithTemplate(device_data, descriptorSet, descriptorUpdateTemplate, pData);
12929 }
12930 
RecordUpdateDescriptorSetWithTemplateState(layer_data * device_data,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,const void * pData)12931 void CoreChecks::RecordUpdateDescriptorSetWithTemplateState(layer_data *device_data, VkDescriptorSet descriptorSet,
12932                                                             VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12933                                                             const void *pData) {
12934     auto const template_map_entry = device_data->desc_template_map.find(descriptorUpdateTemplate);
12935     if ((template_map_entry == device_data->desc_template_map.end()) || (template_map_entry->second.get() == nullptr)) {
12936         assert(0);
12937     } else {
12938         const TEMPLATE_STATE *template_state = template_map_entry->second.get();
12939         // TODO: Record template push descriptor updates
12940         if (template_state->create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) {
12941             PerformUpdateDescriptorSetsWithTemplateKHR(device_data, descriptorSet, template_state, pData);
12942         }
12943     }
12944 }
12945 
PreCallRecordUpdateDescriptorSetWithTemplate(VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)12946 void CoreChecks::PreCallRecordUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
12947                                                               VkDescriptorUpdateTemplate descriptorUpdateTemplate,
12948                                                               const void *pData) {
12949     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12950     RecordUpdateDescriptorSetWithTemplateState(device_data, descriptorSet, descriptorUpdateTemplate, pData);
12951 }
12952 
PreCallRecordUpdateDescriptorSetWithTemplateKHR(VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,const void * pData)12953 void CoreChecks::PreCallRecordUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
12954                                                                  VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12955                                                                  const void *pData) {
12956     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12957     RecordUpdateDescriptorSetWithTemplateState(device_data, descriptorSet, descriptorUpdateTemplate, pData);
12958 }
12959 
GetDslFromPipelineLayout(PIPELINE_LAYOUT_NODE const * layout_data,uint32_t set)12960 static std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> GetDslFromPipelineLayout(PIPELINE_LAYOUT_NODE const *layout_data,
12961                                                                                             uint32_t set) {
12962     std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> dsl = nullptr;
12963     if (layout_data && (set < layout_data->set_layouts.size())) {
12964         dsl = layout_data->set_layouts[set];
12965     }
12966     return dsl;
12967 }
12968 
PreCallValidateCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,VkPipelineLayout layout,uint32_t set,const void * pData)12969 bool CoreChecks::PreCallValidateCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
12970                                                                     VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12971                                                                     VkPipelineLayout layout, uint32_t set, const void *pData) {
12972     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12973     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
12974     assert(cb_state);
12975     const char *const func_name = "vkPushDescriptorSetWithTemplateKHR()";
12976     bool skip = false;
12977     skip |= ValidateCmd(device_data, cb_state, CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, func_name);
12978 
12979     auto layout_data = GetPipelineLayout(device_data, layout);
12980     auto dsl = GetDslFromPipelineLayout(layout_data, set);
12981     const auto layout_u64 = HandleToUint64(layout);
12982 
12983     // Validate the set index points to a push descriptor set and is in range
12984     if (dsl) {
12985         if (!dsl->IsPushDescriptor()) {
12986             skip = log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
12987                            layout_u64, "VUID-vkCmdPushDescriptorSetKHR-set-00365",
12988                            "%s: Set index %" PRIu32 " does not match push descriptor set layout index for VkPipelineLayout %s.",
12989                            func_name, set, device_data->report_data->FormatHandle(layout_u64).c_str());
12990         }
12991     } else if (layout_data && (set >= layout_data->set_layouts.size())) {
12992         skip = log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
12993                        layout_u64, "VUID-vkCmdPushDescriptorSetKHR-set-00364",
12994                        "%s: Set index %" PRIu32 " is outside of range for VkPipelineLayout %s (set < %" PRIu32 ").", func_name, set,
12995                        device_data->report_data->FormatHandle(layout_u64).c_str(),
12996                        static_cast<uint32_t>(layout_data->set_layouts.size()));
12997     }
12998 
12999     const auto template_state = GetDescriptorTemplateState(device_data, descriptorUpdateTemplate);
13000     if (template_state) {
13001         const auto &template_ci = template_state->create_info;
13002         static const std::map<VkPipelineBindPoint, std::string> bind_errors = {
13003             std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"),
13004             std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"),
13005             std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV,
13006                            "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366")};
13007         skip |= ValidatePipelineBindPoint(device_data, cb_state, template_ci.pipelineBindPoint, func_name, bind_errors);
13008 
13009         if (template_ci.templateType != VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) {
13010             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
13011                             HandleToUint64(cb_state->commandBuffer), kVUID_Core_PushDescriptorUpdate_TemplateType,
13012                             "%s: descriptorUpdateTemplate %s was not created with flag "
13013                             "VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR.",
13014                             func_name, device_data->report_data->FormatHandle(descriptorUpdateTemplate).c_str());
13015         }
13016         if (template_ci.set != set) {
13017             skip |= log_msg(
13018                 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
13019                 HandleToUint64(cb_state->commandBuffer), kVUID_Core_PushDescriptorUpdate_Template_SetMismatched,
13020                 "%s: descriptorUpdateTemplate %s created with set %" PRIu32 " does not match command parameter set %" PRIu32 ".",
13021                 func_name, device_data->report_data->FormatHandle(descriptorUpdateTemplate).c_str(), template_ci.set, set);
13022         }
13023         if (!CompatForSet(set, layout_data, GetPipelineLayout(device_data, template_ci.pipelineLayout))) {
13024             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
13025                             HandleToUint64(cb_state->commandBuffer), kVUID_Core_PushDescriptorUpdate_Template_LayoutMismatched,
13026                             "%s: descriptorUpdateTemplate %s created with pipelineLayout %s is incompatible with command parameter "
13027                             "layout %s for set %" PRIu32,
13028                             func_name, device_data->report_data->FormatHandle(descriptorUpdateTemplate).c_str(),
13029                             device_data->report_data->FormatHandle(template_ci.pipelineLayout).c_str(),
13030                             device_data->report_data->FormatHandle(layout).c_str(), set);
13031         }
13032     }
13033 
13034     if (dsl && template_state) {
13035         // Create an empty proxy in order to use the existing descriptor set update validation
13036         cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, VK_NULL_HANDLE, dsl, 0, device_data);
13037         // Decode the template into a set of write updates
13038         cvdescriptorset::DecodedTemplateUpdate decoded_template(device_data, VK_NULL_HANDLE, template_state, pData,
13039                                                                 dsl->GetDescriptorSetLayout());
13040         // Validate the decoded update against the proxy_ds
13041         skip |= proxy_ds.ValidatePushDescriptorsUpdate(device_data->report_data,
13042                                                        static_cast<uint32_t>(decoded_template.desc_writes.size()),
13043                                                        decoded_template.desc_writes.data(), func_name);
13044     }
13045 
13046     return skip;
13047 }
13048 
PreCallRecordCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,VkPipelineLayout layout,uint32_t set,const void * pData)13049 void CoreChecks::PreCallRecordCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
13050                                                                   VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
13051                                                                   VkPipelineLayout layout, uint32_t set, const void *pData) {
13052     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
13053     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
13054 
13055     const auto template_state = GetDescriptorTemplateState(device_data, descriptorUpdateTemplate);
13056     if (template_state) {
13057         auto layout_data = GetPipelineLayout(device_data, layout);
13058         auto dsl = GetDslFromPipelineLayout(layout_data, set);
13059         const auto &template_ci = template_state->create_info;
13060         if (dsl && !dsl->IsDestroyed()) {
13061             // Decode the template into a set of write updates
13062             cvdescriptorset::DecodedTemplateUpdate decoded_template(device_data, VK_NULL_HANDLE, template_state, pData,
13063                                                                     dsl->GetDescriptorSetLayout());
13064             RecordCmdPushDescriptorSetState(device_data, cb_state, template_ci.pipelineBindPoint, layout, set,
13065                                             static_cast<uint32_t>(decoded_template.desc_writes.size()),
13066                                             decoded_template.desc_writes.data());
13067         }
13068     }
13069 }
13070 
RecordGetPhysicalDeviceDisplayPlanePropertiesState(instance_layer_data * instance_data,VkPhysicalDevice physicalDevice,uint32_t * pPropertyCount,void * pProperties)13071 void CoreChecks::RecordGetPhysicalDeviceDisplayPlanePropertiesState(instance_layer_data *instance_data,
13072                                                                     VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
13073                                                                     void *pProperties) {
13074     auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
13075     if (*pPropertyCount) {
13076         if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_COUNT) {
13077             physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_COUNT;
13078         }
13079         physical_device_state->display_plane_property_count = *pPropertyCount;
13080     }
13081     if (pProperties) {
13082         if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_DETAILS) {
13083             physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_DETAILS;
13084         }
13085     }
13086 }
13087 
PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice,uint32_t * pPropertyCount,VkDisplayPlanePropertiesKHR * pProperties,VkResult result)13088 void CoreChecks::PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
13089                                                                           VkDisplayPlanePropertiesKHR *pProperties,
13090                                                                           VkResult result) {
13091     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
13092     if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return;
13093     RecordGetPhysicalDeviceDisplayPlanePropertiesState(instance_data, physicalDevice, pPropertyCount, pProperties);
13094 }
13095 
PostCallRecordGetPhysicalDeviceDisplayPlaneProperties2KHR(VkPhysicalDevice physicalDevice,uint32_t * pPropertyCount,VkDisplayPlaneProperties2KHR * pProperties,VkResult result)13096 void CoreChecks::PostCallRecordGetPhysicalDeviceDisplayPlaneProperties2KHR(VkPhysicalDevice physicalDevice,
13097                                                                            uint32_t *pPropertyCount,
13098                                                                            VkDisplayPlaneProperties2KHR *pProperties,
13099                                                                            VkResult result) {
13100     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
13101     if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return;
13102     RecordGetPhysicalDeviceDisplayPlanePropertiesState(instance_data, physicalDevice, pPropertyCount, pProperties);
13103 }
13104 
ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_layer_data * instance_data,VkPhysicalDevice physicalDevice,uint32_t planeIndex,const char * api_name)13105 bool CoreChecks::ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_layer_data *instance_data,
13106                                                                          VkPhysicalDevice physicalDevice, uint32_t planeIndex,
13107                                                                          const char *api_name) {
13108     bool skip = false;
13109     auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
13110     if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState == UNCALLED) {
13111         skip |=
13112             log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
13113                     HandleToUint64(physicalDevice), kVUID_Core_Swapchain_GetSupportedDisplaysWithoutQuery,
13114                     "Potential problem with calling %s() without first querying vkGetPhysicalDeviceDisplayPlanePropertiesKHR "
13115                     "or vkGetPhysicalDeviceDisplayPlaneProperties2KHR.",
13116                     api_name);
13117     } else {
13118         if (planeIndex >= physical_device_state->display_plane_property_count) {
13119             skip |= log_msg(
13120                 instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
13121                 HandleToUint64(physicalDevice), "VUID-vkGetDisplayPlaneSupportedDisplaysKHR-planeIndex-01249",
13122                 "%s(): planeIndex must be in the range [0, %d] that was returned by vkGetPhysicalDeviceDisplayPlanePropertiesKHR "
13123                 "or vkGetPhysicalDeviceDisplayPlaneProperties2KHR. Do you have the plane index hardcoded?",
13124                 api_name, physical_device_state->display_plane_property_count - 1);
13125         }
13126     }
13127     return skip;
13128 }
13129 
PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice,uint32_t planeIndex,uint32_t * pDisplayCount,VkDisplayKHR * pDisplays)13130 bool CoreChecks::PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
13131                                                                     uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
13132     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
13133     bool skip = false;
13134     skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
13135                                                                     "vkGetDisplayPlaneSupportedDisplaysKHR");
13136     return skip;
13137 }
13138 
PreCallValidateGetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice,VkDisplayModeKHR mode,uint32_t planeIndex,VkDisplayPlaneCapabilitiesKHR * pCapabilities)13139 bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
13140                                                                uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) {
13141     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
13142     bool skip = false;
13143     skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
13144                                                                     "vkGetDisplayPlaneCapabilitiesKHR");
13145     return skip;
13146 }
13147 
PreCallValidateGetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice,const VkDisplayPlaneInfo2KHR * pDisplayPlaneInfo,VkDisplayPlaneCapabilities2KHR * pCapabilities)13148 bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice,
13149                                                                 const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo,
13150                                                                 VkDisplayPlaneCapabilities2KHR *pCapabilities) {
13151     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
13152     bool skip = false;
13153     skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, pDisplayPlaneInfo->planeIndex,
13154                                                                     "vkGetDisplayPlaneCapabilities2KHR");
13155     return skip;
13156 }
13157 
PreCallValidateCmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer,const VkDebugMarkerMarkerInfoEXT * pMarkerInfo)13158 bool CoreChecks::PreCallValidateCmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer,
13159                                                        const VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
13160     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
13161     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
13162     assert(cb_state);
13163     return ValidateCmd(device_data, cb_state, CMD_DEBUGMARKERBEGINEXT, "vkCmdDebugMarkerBeginEXT()");
13164 }
13165 
PreCallValidateCmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer)13166 bool CoreChecks::PreCallValidateCmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) {
13167     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
13168     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
13169     assert(cb_state);
13170     return ValidateCmd(device_data, cb_state, CMD_DEBUGMARKERENDEXT, "vkCmdDebugMarkerEndEXT()");
13171 }
13172 
PreCallValidateCmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer,uint32_t firstDiscardRectangle,uint32_t discardRectangleCount,const VkRect2D * pDiscardRectangles)13173 bool CoreChecks::PreCallValidateCmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle,
13174                                                           uint32_t discardRectangleCount, const VkRect2D *pDiscardRectangles) {
13175     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
13176     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
13177     // Minimal validation for command buffer state
13178     return ValidateCmd(device_data, cb_state, CMD_SETDISCARDRECTANGLEEXT, "vkCmdSetDiscardRectangleEXT()");
13179 }
13180 
PreCallValidateCmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,const VkSampleLocationsInfoEXT * pSampleLocationsInfo)13181 bool CoreChecks::PreCallValidateCmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
13182                                                          const VkSampleLocationsInfoEXT *pSampleLocationsInfo) {
13183     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
13184     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
13185     // Minimal validation for command buffer state
13186     return ValidateCmd(device_data, cb_state, CMD_SETSAMPLELOCATIONSEXT, "vkCmdSetSampleLocationsEXT()");
13187 }
13188 
PreCallValidateCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,VkBuffer countBuffer,VkDeviceSize countBufferOffset,uint32_t maxDrawCount,uint32_t stride)13189 bool CoreChecks::PreCallValidateCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
13190                                                         VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
13191                                                         uint32_t stride) {
13192     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
13193     bool skip = false;
13194     if (offset & 3) {
13195         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
13196                         HandleToUint64(commandBuffer), "VUID-vkCmdDrawIndirectCountKHR-offset-03108",
13197                         "vkCmdDrawIndirectCountKHR() parameter, VkDeviceSize offset (0x%" PRIxLEAST64 "), is not a multiple of 4.",
13198                         offset);
13199     }
13200 
13201     if (countBufferOffset & 3) {
13202         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
13203                         HandleToUint64(commandBuffer), "VUID-vkCmdDrawIndirectCountKHR-countBufferOffset-03109",
13204                         "vkCmdDrawIndirectCountKHR() parameter, VkDeviceSize countBufferOffset (0x%" PRIxLEAST64
13205                         "), is not a multiple of 4.",
13206                         countBufferOffset);
13207     }
13208 
13209     if ((stride & 3) || stride < sizeof(VkDrawIndirectCommand)) {
13210         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
13211                         HandleToUint64(commandBuffer), "VUID-vkCmdDrawIndirectCountKHR-stride-03110",
13212                         "vkCmdDrawIndirectCountKHR() parameter, uint32_t stride (0x%" PRIxLEAST32
13213                         "), is not a multiple of 4 or smaller than sizeof (VkDrawIndirectCommand).",
13214                         stride);
13215     }
13216 
13217     skip |= ValidateCmdDrawType(device_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAWINDIRECTCOUNTKHR,
13218                                 "vkCmdDrawIndirectCountKHR()", VK_QUEUE_GRAPHICS_BIT,
13219                                 "VUID-vkCmdDrawIndirectCountKHR-commandBuffer-cmdpool", "VUID-vkCmdDrawIndirectCountKHR-renderpass",
13220                                 "VUID-vkCmdDrawIndirectCountKHR-None-03119", "VUID-vkCmdDrawIndirectCountKHR-None-03120");
13221     BUFFER_STATE *buffer_state = GetBufferState(buffer);
13222     BUFFER_STATE *count_buffer_state = GetBufferState(countBuffer);
13223     skip |= ValidateMemoryIsBoundToBuffer(device_data, buffer_state, "vkCmdDrawIndirectCountKHR()",
13224                                           "VUID-vkCmdDrawIndirectCountKHR-buffer-03104");
13225     skip |= ValidateMemoryIsBoundToBuffer(device_data, count_buffer_state, "vkCmdDrawIndirectCountKHR()",
13226                                           "VUID-vkCmdDrawIndirectCountKHR-countBuffer-03106");
13227 
13228     return skip;
13229 }
13230 
PreCallRecordCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,VkBuffer countBuffer,VkDeviceSize countBufferOffset,uint32_t maxDrawCount,uint32_t stride)13231 void CoreChecks::PreCallRecordCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
13232                                                       VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
13233                                                       uint32_t stride) {
13234     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
13235     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
13236     BUFFER_STATE *buffer_state = GetBufferState(buffer);
13237     BUFFER_STATE *count_buffer_state = GetBufferState(countBuffer);
13238     UpdateStateCmdDrawType(device_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
13239     AddCommandBufferBindingBuffer(device_data, cb_state, buffer_state);
13240     AddCommandBufferBindingBuffer(device_data, cb_state, count_buffer_state);
13241 }
13242 
PreCallValidateCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,VkBuffer countBuffer,VkDeviceSize countBufferOffset,uint32_t maxDrawCount,uint32_t stride)13243 bool CoreChecks::PreCallValidateCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
13244                                                                VkBuffer countBuffer, VkDeviceSize countBufferOffset,
13245                                                                uint32_t maxDrawCount, uint32_t stride) {
13246     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
13247     bool skip = false;
13248     if (offset & 3) {
13249         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
13250                         HandleToUint64(commandBuffer), "VUID-vkCmdDrawIndexedIndirectCountKHR-offset-03140",
13251                         "vkCmdDrawIndexedIndirectCountKHR() parameter, VkDeviceSize offset (0x%" PRIxLEAST64
13252                         "), is not a multiple of 4.",
13253                         offset);
13254     }
13255 
13256     if (countBufferOffset & 3) {
13257         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
13258                         HandleToUint64(commandBuffer), "VUID-vkCmdDrawIndexedIndirectCountKHR-countBufferOffset-03141",
13259                         "vkCmdDrawIndexedIndirectCountKHR() parameter, VkDeviceSize countBufferOffset (0x%" PRIxLEAST64
13260                         "), is not a multiple of 4.",
13261                         countBufferOffset);
13262     }
13263 
13264     if ((stride & 3) || stride < sizeof(VkDrawIndexedIndirectCommand)) {
13265         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
13266                         HandleToUint64(commandBuffer), "VUID-vkCmdDrawIndexedIndirectCountKHR-stride-03142",
13267                         "vkCmdDrawIndexedIndirectCountKHR() parameter, uint32_t stride (0x%" PRIxLEAST32
13268                         "), is not a multiple of 4 or smaller than sizeof (VkDrawIndexedIndirectCommand).",
13269                         stride);
13270     }
13271 
13272     skip |= ValidateCmdDrawType(
13273         device_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAWINDEXEDINDIRECTCOUNTKHR,
13274         "vkCmdDrawIndexedIndirectCountKHR()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdDrawIndexedIndirectCountKHR-commandBuffer-cmdpool",
13275         "VUID-vkCmdDrawIndexedIndirectCountKHR-renderpass", "VUID-vkCmdDrawIndexedIndirectCountKHR-None-03151",
13276         "VUID-vkCmdDrawIndexedIndirectCountKHR-None-03152");
13277     BUFFER_STATE *buffer_state = GetBufferState(buffer);
13278     BUFFER_STATE *count_buffer_state = GetBufferState(countBuffer);
13279     skip |= ValidateMemoryIsBoundToBuffer(device_data, buffer_state, "vkCmdDrawIndexedIndirectCountKHR()",
13280                                           "VUID-vkCmdDrawIndexedIndirectCountKHR-buffer-03136");
13281     skip |= ValidateMemoryIsBoundToBuffer(device_data, count_buffer_state, "vkCmdDrawIndexedIndirectCountKHR()",
13282                                           "VUID-vkCmdDrawIndexedIndirectCountKHR-countBuffer-03138");
13283     return skip;
13284 }
13285 
PreCallRecordCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,VkBuffer countBuffer,VkDeviceSize countBufferOffset,uint32_t maxDrawCount,uint32_t stride)13286 void CoreChecks::PreCallRecordCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
13287                                                              VkBuffer countBuffer, VkDeviceSize countBufferOffset,
13288                                                              uint32_t maxDrawCount, uint32_t stride) {
13289     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
13290     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
13291     BUFFER_STATE *buffer_state = GetBufferState(buffer);
13292     BUFFER_STATE *count_buffer_state = GetBufferState(countBuffer);
13293     UpdateStateCmdDrawType(device_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
13294     AddCommandBufferBindingBuffer(device_data, cb_state, buffer_state);
13295     AddCommandBufferBindingBuffer(device_data, cb_state, count_buffer_state);
13296 }
13297 
PreCallValidateCmdDrawMeshTasksNV(VkCommandBuffer commandBuffer,uint32_t taskCount,uint32_t firstTask)13298 bool CoreChecks::PreCallValidateCmdDrawMeshTasksNV(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask) {
13299     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
13300     bool skip = ValidateCmdDrawType(device_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAWMESHTASKSNV,
13301                                     "vkCmdDrawMeshTasksNV()", VK_QUEUE_GRAPHICS_BIT,
13302                                     "VUID-vkCmdDrawMeshTasksNV-commandBuffer-cmdpool", "VUID-vkCmdDrawMeshTasksNV-renderpass",
13303                                     "VUID-vkCmdDrawMeshTasksNV-None-02125", "VUID-vkCmdDrawMeshTasksNV-None-02126");
13304     return skip;
13305 }
13306 
PreCallRecordCmdDrawMeshTasksNV(VkCommandBuffer commandBuffer,uint32_t taskCount,uint32_t firstTask)13307 void CoreChecks::PreCallRecordCmdDrawMeshTasksNV(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask) {
13308     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
13309     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
13310     UpdateStateCmdDrawType(device_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
13311 }
13312 
PreCallValidateCmdDrawMeshTasksIndirectNV(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)13313 bool CoreChecks::PreCallValidateCmdDrawMeshTasksIndirectNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
13314                                                            uint32_t drawCount, uint32_t stride) {
13315     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
13316     bool skip = ValidateCmdDrawType(device_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAWMESHTASKSINDIRECTNV,
13317                                     "vkCmdDrawMeshTasksIndirectNV()", VK_QUEUE_GRAPHICS_BIT,
13318                                     "VUID-vkCmdDrawMeshTasksIndirectNV-commandBuffer-cmdpool",
13319                                     "VUID-vkCmdDrawMeshTasksIndirectNV-renderpass", "VUID-vkCmdDrawMeshTasksIndirectNV-None-02154",
13320                                     "VUID-vkCmdDrawMeshTasksIndirectNV-None-02155");
13321     BUFFER_STATE *buffer_state = GetBufferState(buffer);
13322     skip |= ValidateMemoryIsBoundToBuffer(device_data, buffer_state, "vkCmdDrawMeshTasksIndirectNV()",
13323                                           "VUID-vkCmdDrawMeshTasksIndirectNV-buffer-02143");
13324 
13325     return skip;
13326 }
13327 
PreCallRecordCmdDrawMeshTasksIndirectNV(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)13328 void CoreChecks::PreCallRecordCmdDrawMeshTasksIndirectNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
13329                                                          uint32_t drawCount, uint32_t stride) {
13330     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
13331     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
13332     UpdateStateCmdDrawType(device_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
13333     BUFFER_STATE *buffer_state = GetBufferState(buffer);
13334     if (buffer_state) {
13335         AddCommandBufferBindingBuffer(device_data, cb_state, buffer_state);
13336     }
13337 }
13338 
PreCallValidateCmdDrawMeshTasksIndirectCountNV(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,VkBuffer countBuffer,VkDeviceSize countBufferOffset,uint32_t maxDrawCount,uint32_t stride)13339 bool CoreChecks::PreCallValidateCmdDrawMeshTasksIndirectCountNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
13340                                                                 VkBuffer countBuffer, VkDeviceSize countBufferOffset,
13341                                                                 uint32_t maxDrawCount, uint32_t stride) {
13342     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
13343     bool skip = ValidateCmdDrawType(
13344         device_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAWMESHTASKSINDIRECTCOUNTNV,
13345         "vkCmdDrawMeshTasksIndirectCountNV()", VK_QUEUE_GRAPHICS_BIT,
13346         "VUID-vkCmdDrawMeshTasksIndirectCountNV-commandBuffer-cmdpool", "VUID-vkCmdDrawMeshTasksIndirectCountNV-renderpass",
13347         "VUID-vkCmdDrawMeshTasksIndirectCountNV-None-02189", "VUID-vkCmdDrawMeshTasksIndirectCountNV-None-02190");
13348     BUFFER_STATE *buffer_state = GetBufferState(buffer);
13349     BUFFER_STATE *count_buffer_state = GetBufferState(countBuffer);
13350     skip |= ValidateMemoryIsBoundToBuffer(device_data, buffer_state, "vkCmdDrawMeshTasksIndirectCountNV()",
13351                                           "VUID-vkCmdDrawMeshTasksIndirectCountNV-buffer-02176");
13352     skip |= ValidateMemoryIsBoundToBuffer(device_data, count_buffer_state, "vkCmdDrawMeshTasksIndirectCountNV()",
13353                                           "VUID-vkCmdDrawMeshTasksIndirectCountNV-countBuffer-02178");
13354 
13355     return skip;
13356 }
13357 
PreCallRecordCmdDrawMeshTasksIndirectCountNV(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,VkBuffer countBuffer,VkDeviceSize countBufferOffset,uint32_t maxDrawCount,uint32_t stride)13358 void CoreChecks::PreCallRecordCmdDrawMeshTasksIndirectCountNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
13359                                                               VkBuffer countBuffer, VkDeviceSize countBufferOffset,
13360                                                               uint32_t maxDrawCount, uint32_t stride) {
13361     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
13362     GLOBAL_CB_NODE *cb_state = GetCBNode(commandBuffer);
13363     BUFFER_STATE *buffer_state = GetBufferState(buffer);
13364     BUFFER_STATE *count_buffer_state = GetBufferState(countBuffer);
13365     UpdateStateCmdDrawType(device_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
13366     if (buffer_state) {
13367         AddCommandBufferBindingBuffer(device_data, cb_state, buffer_state);
13368     }
13369     if (count_buffer_state) {
13370         AddCommandBufferBindingBuffer(device_data, cb_state, count_buffer_state);
13371     }
13372 }
13373 
ValidateCreateSamplerYcbcrConversion(const layer_data * device_data,const char * func_name,const VkSamplerYcbcrConversionCreateInfo * create_info)13374 bool CoreChecks::ValidateCreateSamplerYcbcrConversion(const layer_data *device_data, const char *func_name,
13375                                                       const VkSamplerYcbcrConversionCreateInfo *create_info) {
13376     bool skip = false;
13377     if (GetDeviceExtensions()->vk_android_external_memory_android_hardware_buffer) {
13378         skip |= ValidateCreateSamplerYcbcrConversionANDROID(device_data, create_info);
13379     } else {  // Not android hardware buffer
13380         if (VK_FORMAT_UNDEFINED == create_info->format) {
13381             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
13382                             VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, 0,
13383                             "VUID-VkSamplerYcbcrConversionCreateInfo-format-01649",
13384                             "%s: CreateInfo format type is VK_FORMAT_UNDEFINED.", func_name);
13385         }
13386     }
13387     return skip;
13388 }
13389 
PreCallValidateCreateSamplerYcbcrConversion(VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion)13390 bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
13391                                                              const VkAllocationCallbacks *pAllocator,
13392                                                              VkSamplerYcbcrConversion *pYcbcrConversion) {
13393     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
13394     return ValidateCreateSamplerYcbcrConversion(device_data, "vkCreateSamplerYcbcrConversion()", pCreateInfo);
13395 }
13396 
PreCallValidateCreateSamplerYcbcrConversionKHR(VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion)13397 bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversionKHR(VkDevice device,
13398                                                                 const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
13399                                                                 const VkAllocationCallbacks *pAllocator,
13400                                                                 VkSamplerYcbcrConversion *pYcbcrConversion) {
13401     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
13402     return ValidateCreateSamplerYcbcrConversion(device_data, "vkCreateSamplerYcbcrConversionKHR()", pCreateInfo);
13403 }
13404 
RecordCreateSamplerYcbcrConversionState(layer_data * device_data,const VkSamplerYcbcrConversionCreateInfo * create_info,VkSamplerYcbcrConversion ycbcr_conversion)13405 void CoreChecks::RecordCreateSamplerYcbcrConversionState(layer_data *device_data,
13406                                                          const VkSamplerYcbcrConversionCreateInfo *create_info,
13407                                                          VkSamplerYcbcrConversion ycbcr_conversion) {
13408     if (GetDeviceExtensions()->vk_android_external_memory_android_hardware_buffer) {
13409         RecordCreateSamplerYcbcrConversionANDROID(device_data, create_info, ycbcr_conversion);
13410     }
13411 }
13412 
PostCallRecordCreateSamplerYcbcrConversion(VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion,VkResult result)13413 void CoreChecks::PostCallRecordCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
13414                                                             const VkAllocationCallbacks *pAllocator,
13415                                                             VkSamplerYcbcrConversion *pYcbcrConversion, VkResult result) {
13416     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
13417     if (VK_SUCCESS != result) return;
13418     RecordCreateSamplerYcbcrConversionState(device_data, pCreateInfo, *pYcbcrConversion);
13419 }
13420 
PostCallRecordCreateSamplerYcbcrConversionKHR(VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion,VkResult result)13421 void CoreChecks::PostCallRecordCreateSamplerYcbcrConversionKHR(VkDevice device,
13422                                                                const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
13423                                                                const VkAllocationCallbacks *pAllocator,
13424                                                                VkSamplerYcbcrConversion *pYcbcrConversion, VkResult result) {
13425     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
13426     if (VK_SUCCESS != result) return;
13427     RecordCreateSamplerYcbcrConversionState(device_data, pCreateInfo, *pYcbcrConversion);
13428 }
13429 
PostCallRecordDestroySamplerYcbcrConversion(VkDevice device,VkSamplerYcbcrConversion ycbcrConversion,const VkAllocationCallbacks * pAllocator)13430 void CoreChecks::PostCallRecordDestroySamplerYcbcrConversion(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion,
13431                                                              const VkAllocationCallbacks *pAllocator) {
13432     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
13433     if (!ycbcrConversion) return;
13434     if (GetDeviceExtensions()->vk_android_external_memory_android_hardware_buffer) {
13435         RecordDestroySamplerYcbcrConversionANDROID(device_data, ycbcrConversion);
13436     }
13437 }
13438 
PostCallRecordDestroySamplerYcbcrConversionKHR(VkDevice device,VkSamplerYcbcrConversion ycbcrConversion,const VkAllocationCallbacks * pAllocator)13439 void CoreChecks::PostCallRecordDestroySamplerYcbcrConversionKHR(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion,
13440                                                                 const VkAllocationCallbacks *pAllocator) {
13441     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
13442     if (!ycbcrConversion) return;
13443     if (GetDeviceExtensions()->vk_android_external_memory_android_hardware_buffer) {
13444         RecordDestroySamplerYcbcrConversionANDROID(device_data, ycbcrConversion);
13445     }
13446 }
13447 
PreCallValidateGetBufferDeviceAddressEXT(VkDevice device,const VkBufferDeviceAddressInfoEXT * pInfo)13448 bool CoreChecks::PreCallValidateGetBufferDeviceAddressEXT(VkDevice device, const VkBufferDeviceAddressInfoEXT *pInfo) {
13449     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
13450     bool skip = false;
13451 
13452     if (!GetEnabledFeatures()->buffer_address.bufferDeviceAddress) {
13453         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
13454                         HandleToUint64(pInfo->buffer), "VUID-vkGetBufferDeviceAddressEXT-None-02598",
13455                         "The bufferDeviceAddress feature must: be enabled.");
13456     }
13457 
13458     if (device_data->physical_device_count > 1 && !GetEnabledFeatures()->buffer_address.bufferDeviceAddressMultiDevice) {
13459         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
13460                         HandleToUint64(pInfo->buffer), "VUID-vkGetBufferDeviceAddressEXT-device-02599",
13461                         "If device was created with multiple physical devices, then the "
13462                         "bufferDeviceAddressMultiDevice feature must: be enabled.");
13463     }
13464 
13465     auto buffer_state = GetBufferState(pInfo->buffer);
13466     if (buffer_state) {
13467         if (!(buffer_state->createInfo.flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT)) {
13468             skip |= ValidateMemoryIsBoundToBuffer(device_data, buffer_state, "vkGetBufferDeviceAddressEXT()",
13469                                                   "VUID-VkBufferDeviceAddressInfoEXT-buffer-02600");
13470         }
13471 
13472         skip |= ValidateBufferUsageFlags(device_data, buffer_state, VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT, true,
13473                                          "VUID-VkBufferDeviceAddressInfoEXT-buffer-02601", "vkGetBufferDeviceAddressEXT()",
13474                                          "VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT");
13475     }
13476 
13477     return skip;
13478 }
13479 
PreCallRecordGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties * pPhysicalDeviceProperties)13480 void CoreChecks::PreCallRecordGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
13481                                                           VkPhysicalDeviceProperties *pPhysicalDeviceProperties) {
13482     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
13483     if (GetEnables()->gpu_validation && GetEnables()->gpu_validation_reserve_binding_slot) {
13484         if (pPhysicalDeviceProperties->limits.maxBoundDescriptorSets > 1) {
13485             pPhysicalDeviceProperties->limits.maxBoundDescriptorSets -= 1;
13486         } else {
13487             log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
13488                     HandleToUint64(physicalDevice), "UNASSIGNED-GPU-Assisted Validation Setup Error.",
13489                     "Unable to reserve descriptor binding slot on a device with only one slot.");
13490         }
13491     }
13492 }
13493 
CoreLayerCreateValidationCacheEXT(VkDevice device,const VkValidationCacheCreateInfoEXT * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkValidationCacheEXT * pValidationCache)13494 VkResult CoreChecks::CoreLayerCreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo,
13495                                                        const VkAllocationCallbacks *pAllocator,
13496                                                        VkValidationCacheEXT *pValidationCache) {
13497     *pValidationCache = ValidationCache::Create(pCreateInfo);
13498     return *pValidationCache ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED;
13499 }
13500 
CoreLayerDestroyValidationCacheEXT(VkDevice device,VkValidationCacheEXT validationCache,const VkAllocationCallbacks * pAllocator)13501 void CoreChecks::CoreLayerDestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache,
13502                                                     const VkAllocationCallbacks *pAllocator) {
13503     delete (ValidationCache *)validationCache;
13504 }
13505 
CoreLayerGetValidationCacheDataEXT(VkDevice device,VkValidationCacheEXT validationCache,size_t * pDataSize,void * pData)13506 VkResult CoreChecks::CoreLayerGetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize,
13507                                                         void *pData) {
13508     size_t inSize = *pDataSize;
13509     ((ValidationCache *)validationCache)->Write(pDataSize, pData);
13510     return (pData && *pDataSize != inSize) ? VK_INCOMPLETE : VK_SUCCESS;
13511 }
13512 
CoreLayerMergeValidationCachesEXT(VkDevice device,VkValidationCacheEXT dstCache,uint32_t srcCacheCount,const VkValidationCacheEXT * pSrcCaches)13513 VkResult CoreChecks::CoreLayerMergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount,
13514                                                        const VkValidationCacheEXT *pSrcCaches) {
13515     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
13516     bool skip = false;
13517     auto dst = (ValidationCache *)dstCache;
13518     auto src = (ValidationCache const *const *)pSrcCaches;
13519     VkResult result = VK_SUCCESS;
13520     for (uint32_t i = 0; i < srcCacheCount; i++) {
13521         if (src[i] == dst) {
13522             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT,
13523                             0, "VUID-vkMergeValidationCachesEXT-dstCache-01536",
13524                             "vkMergeValidationCachesEXT: dstCache (0x%" PRIx64 ") must not appear in pSrcCaches array.",
13525                             HandleToUint64(dstCache));
13526             result = VK_ERROR_VALIDATION_FAILED_EXT;
13527         }
13528         if (!skip) {
13529             dst->Merge(src[i]);
13530         }
13531     }
13532 
13533     return result;
13534 }
13535