• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  * Copyright (c) 2014-2023 The Khronos Group Inc.
4  * Copyright (c) 2014-2023 Valve Corporation
5  * Copyright (c) 2014-2023 LunarG, Inc.
6  * Copyright (C) 2015 Google Inc.
7  * Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
8  * Copyright (c) 2023-2023 RasterGrid Kft.
9  *
10  * Licensed under the Apache License, Version 2.0 (the "License");
11  * you may not use this file except in compliance with the License.
12  * You may obtain a copy of the License at
13  *
14  *     http://www.apache.org/licenses/LICENSE-2.0
15  *
16  * Unless required by applicable law or agreed to in writing, software
17  * distributed under the License is distributed on an "AS IS" BASIS,
18  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19  * See the License for the specific language governing permissions and
20  * limitations under the License.
21 
22  *
23  * Author: Jon Ashburn <jon@lunarg.com>
24  * Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
25  * Author: Mark Young <marky@lunarg.com>
26  * Author: Lenny Komow <lenny@lunarg.com>
27  * Author: Charles Giessen <charles@lunarg.com>
28  *
29  */
30 
31 #include "loader.h"
32 
33 #include <errno.h>
34 #include <inttypes.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <stdarg.h>
38 #include <stdbool.h>
39 #include <string.h>
40 #include <stddef.h>
41 
42 #if defined(__APPLE__)
43 #include <CoreFoundation/CoreFoundation.h>
44 #include <sys/param.h>
45 #endif
46 
47 #include <sys/types.h>
48 #if defined(_WIN32)
49 #include "dirent_on_windows.h"
50 #elif COMMON_UNIX_PLATFORMS
51 #include <dirent.h>
52 #else
53 #warning dirent.h not available on this platform
54 #endif  // _WIN32
55 
56 #include "allocation.h"
57 #include "stack_allocation.h"
58 #include "cJSON.h"
59 #include "debug_utils.h"
60 #include "loader_environment.h"
61 #include "loader_json.h"
62 #include "log.h"
63 #include "unknown_function_handling.h"
64 #include "vk_loader_platform.h"
65 #include "wsi.h"
66 
67 #if defined(WIN32)
68 #include "loader_windows.h"
69 #endif
70 #if defined(LOADER_ENABLE_LINUX_SORT)
71 // This header is currently only used when sorting Linux devices, so don't include it otherwise.
72 #include "loader_linux.h"
73 #endif  // LOADER_ENABLE_LINUX_SORT
74 
75 // Generated file containing all the extension data
76 #include "vk_loader_extensions.c"
77 
78 #if defined(__OHOS__)
79 #define IGRAPHICS_CONFG_DIR "/system/etc/vulkan/igraphics"
80 #endif
81 
82 struct loader_struct loader = {0};
83 
84 struct activated_layer_info {
85     char *name;
86     char *manifest;
87     char *library;
88     bool is_implicit;
89     enum loader_layer_enabled_by_what enabled_by_what;
90     char *disable_env;
91     char *enable_name_env;
92     char *enable_value_env;
93 };
94 
95 // thread safety lock for accessing global data structures such as "loader"
96 // all entrypoints on the instance chain need to be locked except GPA
97 // additionally CreateDevice and DestroyDevice needs to be locked
98 loader_platform_thread_mutex loader_lock;
99 loader_platform_thread_mutex loader_preload_icd_lock;
100 loader_platform_thread_mutex loader_global_instance_list_lock;
101 
102 // A list of ICDs that gets initialized when the loader does its global initialization. This list should never be used by anything
103 // other than EnumerateInstanceExtensionProperties(), vkDestroyInstance, and loader_release(). This list does not change
104 // functionality, but the fact that the libraries already been loaded causes any call that needs to load ICD libraries to speed up
105 // significantly. This can have a huge impact when making repeated calls to vkEnumerateInstanceExtensionProperties and
106 // vkCreateInstance.
107 struct loader_icd_tramp_list preloaded_icds;
108 
109 // controls whether loader_platform_close_library() closes the libraries or not - controlled by an environment
110 // variables - this is just the definition of the variable, usage is in vk_loader_platform.h
111 bool loader_disable_dynamic_library_unloading;
112 
113 LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_init);
114 
115 // Creates loader_api_version struct that contains the major and minor fields, setting patch to 0
loader_make_version(uint32_t version)116 loader_api_version loader_make_version(uint32_t version) {
117     loader_api_version out_version;
118     out_version.major = VK_API_VERSION_MAJOR(version);
119     out_version.minor = VK_API_VERSION_MINOR(version);
120     out_version.patch = 0;
121     return out_version;
122 }
123 
124 // Creates loader_api_version struct containing the major, minor, and patch fields
loader_make_full_version(uint32_t version)125 loader_api_version loader_make_full_version(uint32_t version) {
126     loader_api_version out_version;
127     out_version.major = VK_API_VERSION_MAJOR(version);
128     out_version.minor = VK_API_VERSION_MINOR(version);
129     out_version.patch = VK_API_VERSION_PATCH(version);
130     return out_version;
131 }
132 
loader_combine_version(uint32_t major,uint32_t minor,uint32_t patch)133 loader_api_version loader_combine_version(uint32_t major, uint32_t minor, uint32_t patch) {
134     loader_api_version out_version;
135     out_version.major = (uint16_t)major;
136     out_version.minor = (uint16_t)minor;
137     out_version.patch = (uint16_t)patch;
138     return out_version;
139 }
140 
141 // Helper macros for determining if a version is valid or not
loader_check_version_meets_required(loader_api_version required,loader_api_version version)142 bool loader_check_version_meets_required(loader_api_version required, loader_api_version version) {
143     // major version is satisfied
144     return (version.major > required.major) ||
145            // major version is equal, minor version is patch version is greater to minimum minor
146            (version.major == required.major && version.minor > required.minor) ||
147            // major and minor version are equal, patch version is greater or equal to minimum patch
148            (version.major == required.major && version.minor == required.minor && version.patch >= required.patch);
149 }
150 
get_enabled_by_what_str(enum loader_layer_enabled_by_what enabled_by_what)151 const char *get_enabled_by_what_str(enum loader_layer_enabled_by_what enabled_by_what) {
152     switch (enabled_by_what) {
153         default:
154             assert(true && "Shouldn't reach this");
155             return "Unknown";
156         case (ENABLED_BY_WHAT_UNSET):
157             assert(true && "Shouldn't reach this");
158             return "Unknown";
159         case (ENABLED_BY_WHAT_LOADER_SETTINGS_FILE):
160             return "Loader Settings File (Vulkan Configurator)";
161         case (ENABLED_BY_WHAT_IMPLICIT_LAYER):
162             return "Implicit Layer";
163         case (ENABLED_BY_WHAT_VK_INSTANCE_LAYERS):
164             return "Environment Variable VK_INSTANCE_LAYERS";
165         case (ENABLED_BY_WHAT_VK_LOADER_LAYERS_ENABLE):
166             return "Environment Variable VK_LOADER_LAYERS_ENABLE";
167         case (ENABLED_BY_WHAT_IN_APPLICATION_API):
168             return "By the Application";
169         case (ENABLED_BY_WHAT_META_LAYER):
170             return "Meta Layer (Vulkan Configurator)";
171     }
172 }
173 
174 // Wrapper around opendir so that the dirent_on_windows gets the instance it needs
175 // while linux opendir & readdir does not
loader_opendir(const struct loader_instance * instance,const char * name)176 DIR *loader_opendir(const struct loader_instance *instance, const char *name) {
177 #if defined(_WIN32)
178     return opendir(instance ? &instance->alloc_callbacks : NULL, name);
179 #elif COMMON_UNIX_PLATFORMS
180     (void)instance;
181     return opendir(name);
182 #else
183 #warning dirent.h - opendir not available on this platform
184 #endif  // _WIN32
185 }
loader_closedir(const struct loader_instance * instance,DIR * dir)186 int loader_closedir(const struct loader_instance *instance, DIR *dir) {
187 #if defined(_WIN32)
188     return closedir(instance ? &instance->alloc_callbacks : NULL, dir);
189 #elif COMMON_UNIX_PLATFORMS
190     (void)instance;
191     return closedir(dir);
192 #else
193 #warning dirent.h - closedir not available on this platform
194 #endif  // _WIN32
195 }
196 
is_json(const char * path,size_t len)197 bool is_json(const char *path, size_t len) {
198     if (len < 5) {
199         return false;
200     }
201     return !strncmp(path, ".json", 5);
202 }
203 
204 // Handle error from to library loading
loader_handle_load_library_error(const struct loader_instance * inst,const char * filename,enum loader_layer_library_status * lib_status)205 void loader_handle_load_library_error(const struct loader_instance *inst, const char *filename,
206                                       enum loader_layer_library_status *lib_status) {
207     const char *error_message = loader_platform_open_library_error(filename);
208     // If the error is due to incompatible architecture (eg 32 bit vs 64 bit), report it with INFO level
209     // Discussed in Github issue 262 & 644
210     // "wrong ELF class" is a linux error, " with error 193" is a windows error
211     VkFlags err_flag = VULKAN_LOADER_ERROR_BIT;
212     if (strstr(error_message, "wrong ELF class:") != NULL || strstr(error_message, " with error 193") != NULL) {
213         err_flag = VULKAN_LOADER_INFO_BIT;
214         if (NULL != lib_status) {
215             *lib_status = LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE;
216         }
217     }
218     // Check if the error is due to lack of memory
219     // "with error 8" is the windows error code for OOM cases, aka ERROR_NOT_ENOUGH_MEMORY
220     // Linux doesn't have such a nice error message - only if there are reported issues should this be called
221     else if (strstr(error_message, " with error 8") != NULL) {
222         if (NULL != lib_status) {
223             *lib_status = LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY;
224         }
225     } else if (NULL != lib_status) {
226         *lib_status = LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD;
227     }
228     loader_log(inst, err_flag, 0, "%s", error_message);
229 }
230 
vkSetInstanceDispatch(VkInstance instance,void * object)231 VKAPI_ATTR VkResult VKAPI_CALL vkSetInstanceDispatch(VkInstance instance, void *object) {
232     struct loader_instance *inst = loader_get_instance(instance);
233     if (!inst) {
234         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkSetInstanceDispatch: Can not retrieve Instance dispatch table.");
235         return VK_ERROR_INITIALIZATION_FAILED;
236     }
237     loader_set_dispatch(object, inst->disp);
238     return VK_SUCCESS;
239 }
240 
vkSetDeviceDispatch(VkDevice device,void * object)241 VKAPI_ATTR VkResult VKAPI_CALL vkSetDeviceDispatch(VkDevice device, void *object) {
242     struct loader_device *dev;
243     struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev);
244 
245     if (NULL == icd_term || NULL == dev) {
246         return VK_ERROR_INITIALIZATION_FAILED;
247     }
248     loader_set_dispatch(object, &dev->loader_dispatch);
249     return VK_SUCCESS;
250 }
251 
loader_free_layer_properties(const struct loader_instance * inst,struct loader_layer_properties * layer_properties)252 void loader_free_layer_properties(const struct loader_instance *inst, struct loader_layer_properties *layer_properties) {
253     loader_instance_heap_free(inst, layer_properties->manifest_file_name);
254     loader_instance_heap_free(inst, layer_properties->lib_name);
255     loader_instance_heap_free(inst, layer_properties->functions.str_gipa);
256     loader_instance_heap_free(inst, layer_properties->functions.str_gdpa);
257     loader_instance_heap_free(inst, layer_properties->functions.str_negotiate_interface);
258     loader_destroy_generic_list(inst, (struct loader_generic_list *)&layer_properties->instance_extension_list);
259     if (layer_properties->device_extension_list.capacity > 0 && NULL != layer_properties->device_extension_list.list) {
260         for (uint32_t i = 0; i < layer_properties->device_extension_list.count; i++) {
261             free_string_list(inst, &layer_properties->device_extension_list.list[i].entrypoints);
262         }
263     }
264     loader_destroy_generic_list(inst, (struct loader_generic_list *)&layer_properties->device_extension_list);
265     loader_instance_heap_free(inst, layer_properties->disable_env_var.name);
266     loader_instance_heap_free(inst, layer_properties->disable_env_var.value);
267     loader_instance_heap_free(inst, layer_properties->enable_env_var.name);
268     loader_instance_heap_free(inst, layer_properties->enable_env_var.value);
269     free_string_list(inst, &layer_properties->component_layer_names);
270     loader_instance_heap_free(inst, layer_properties->pre_instance_functions.enumerate_instance_extension_properties);
271     loader_instance_heap_free(inst, layer_properties->pre_instance_functions.enumerate_instance_layer_properties);
272     loader_instance_heap_free(inst, layer_properties->pre_instance_functions.enumerate_instance_version);
273     free_string_list(inst, &layer_properties->override_paths);
274     free_string_list(inst, &layer_properties->blacklist_layer_names);
275     free_string_list(inst, &layer_properties->app_key_paths);
276 
277     // Make sure to clear out the removed layer, in case new layers are added in the previous location
278     memset(layer_properties, 0, sizeof(struct loader_layer_properties));
279 }
280 
loader_init_library_list(struct loader_layer_list * instance_layers,loader_platform_dl_handle ** libs)281 VkResult loader_init_library_list(struct loader_layer_list *instance_layers, loader_platform_dl_handle **libs) {
282     if (instance_layers->count > 0) {
283         *libs = loader_calloc(NULL, sizeof(loader_platform_dl_handle) * instance_layers->count, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
284         if (*libs == NULL) {
285             return VK_ERROR_OUT_OF_HOST_MEMORY;
286         }
287     }
288     return VK_SUCCESS;
289 }
290 
loader_copy_to_new_str(const struct loader_instance * inst,const char * source_str,char ** dest_str)291 VkResult loader_copy_to_new_str(const struct loader_instance *inst, const char *source_str, char **dest_str) {
292     assert(source_str && dest_str);
293     size_t str_len = strlen(source_str) + 1;
294     *dest_str = loader_instance_heap_calloc(inst, str_len, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
295     if (NULL == *dest_str) return VK_ERROR_OUT_OF_HOST_MEMORY;
296     loader_strncpy(*dest_str, str_len, source_str, str_len);
297     (*dest_str)[str_len - 1] = 0;
298     return VK_SUCCESS;
299 }
300 
create_string_list(const struct loader_instance * inst,uint32_t allocated_count,struct loader_string_list * string_list)301 VkResult create_string_list(const struct loader_instance *inst, uint32_t allocated_count, struct loader_string_list *string_list) {
302     assert(string_list);
303     string_list->list = loader_instance_heap_calloc(inst, sizeof(char *) * allocated_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
304     if (NULL == string_list->list) {
305         return VK_ERROR_OUT_OF_HOST_MEMORY;
306     }
307     string_list->allocated_count = allocated_count;
308     string_list->count = 0;
309     return VK_SUCCESS;
310 }
311 
append_str_to_string_list(const struct loader_instance * inst,struct loader_string_list * string_list,char * str)312 VkResult append_str_to_string_list(const struct loader_instance *inst, struct loader_string_list *string_list, char *str) {
313     assert(string_list && str);
314     if (string_list->allocated_count == 0) {
315         string_list->allocated_count = 32;
316         string_list->list =
317             loader_instance_heap_calloc(inst, sizeof(char *) * string_list->allocated_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
318         if (NULL == string_list->list) {
319             loader_instance_heap_free(inst, str);  // Must clean up in case of failure
320             return VK_ERROR_OUT_OF_HOST_MEMORY;
321         }
322     } else if (string_list->count + 1 > string_list->allocated_count) {
323         uint32_t new_allocated_count = string_list->allocated_count * 2;
324         string_list->list = loader_instance_heap_realloc(inst, string_list->list, sizeof(char *) * string_list->allocated_count,
325                                                          sizeof(char *) * new_allocated_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
326         if (NULL == string_list->list) {
327             loader_instance_heap_free(inst, str);  // Must clean up in case of failure
328             return VK_ERROR_OUT_OF_HOST_MEMORY;
329         }
330         string_list->allocated_count *= 2;
331     }
332     string_list->list[string_list->count++] = str;
333     return VK_SUCCESS;
334 }
335 
copy_str_to_string_list(const struct loader_instance * inst,struct loader_string_list * string_list,const char * str,size_t str_len)336 VkResult copy_str_to_string_list(const struct loader_instance *inst, struct loader_string_list *string_list, const char *str,
337                                  size_t str_len) {
338     assert(string_list && str);
339     char *new_str = loader_instance_heap_calloc(inst, sizeof(char *) * str_len + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
340     if (NULL == new_str) {
341         return VK_ERROR_OUT_OF_HOST_MEMORY;
342     }
343     loader_strncpy(new_str, sizeof(char *) * str_len + 1, str, str_len);
344     new_str[str_len] = '\0';
345     return append_str_to_string_list(inst, string_list, new_str);
346 }
347 
free_string_list(const struct loader_instance * inst,struct loader_string_list * string_list)348 void free_string_list(const struct loader_instance *inst, struct loader_string_list *string_list) {
349     assert(string_list);
350     if (string_list->list) {
351         for (uint32_t i = 0; i < string_list->count; i++) {
352             loader_instance_heap_free(inst, string_list->list[i]);
353             string_list->list[i] = NULL;
354         }
355         loader_instance_heap_free(inst, string_list->list);
356     }
357     memset(string_list, 0, sizeof(struct loader_string_list));
358 }
359 
360 // Given string of three part form "maj.min.pat" convert to a vulkan version number.
361 // Also can understand four part form "variant.major.minor.patch" if provided.
loader_parse_version_string(char * vers_str)362 uint32_t loader_parse_version_string(char *vers_str) {
363     uint32_t variant = 0, major = 0, minor = 0, patch = 0;
364     char *vers_tok;
365     char *context = NULL;
366     if (!vers_str) {
367         return 0;
368     }
369 
370     vers_tok = thread_safe_strtok(vers_str, ".\"\n\r", &context);
371     if (NULL != vers_tok) {
372         major = (uint16_t)atoi(vers_tok);
373         vers_tok = thread_safe_strtok(NULL, ".\"\n\r", &context);
374         if (NULL != vers_tok) {
375             minor = (uint16_t)atoi(vers_tok);
376             vers_tok = thread_safe_strtok(NULL, ".\"\n\r", &context);
377             if (NULL != vers_tok) {
378                 patch = (uint16_t)atoi(vers_tok);
379                 vers_tok = thread_safe_strtok(NULL, ".\"\n\r", &context);
380                 // check that we are using a 4 part version string
381                 if (NULL != vers_tok) {
382                     // if we are, move the values over into the correct place
383                     variant = major;
384                     major = minor;
385                     minor = patch;
386                     patch = (uint16_t)atoi(vers_tok);
387                 }
388             }
389         }
390     }
391 
392     return VK_MAKE_API_VERSION(variant, major, minor, patch);
393 }
394 
compare_vk_extension_properties(const VkExtensionProperties * op1,const VkExtensionProperties * op2)395 bool compare_vk_extension_properties(const VkExtensionProperties *op1, const VkExtensionProperties *op2) {
396     return strcmp(op1->extensionName, op2->extensionName) == 0 ? true : false;
397 }
398 
399 // Search the given ext_array for an extension matching the given vk_ext_prop
has_vk_extension_property_array(const VkExtensionProperties * vk_ext_prop,const uint32_t count,const VkExtensionProperties * ext_array)400 bool has_vk_extension_property_array(const VkExtensionProperties *vk_ext_prop, const uint32_t count,
401                                      const VkExtensionProperties *ext_array) {
402     for (uint32_t i = 0; i < count; i++) {
403         if (compare_vk_extension_properties(vk_ext_prop, &ext_array[i])) return true;
404     }
405     return false;
406 }
407 
408 // Search the given ext_list for an extension matching the given vk_ext_prop
has_vk_extension_property(const VkExtensionProperties * vk_ext_prop,const struct loader_extension_list * ext_list)409 bool has_vk_extension_property(const VkExtensionProperties *vk_ext_prop, const struct loader_extension_list *ext_list) {
410     for (uint32_t i = 0; i < ext_list->count; i++) {
411         if (compare_vk_extension_properties(&ext_list->list[i], vk_ext_prop)) return true;
412     }
413     return false;
414 }
415 
416 // Search the given ext_list for a device extension matching the given ext_prop
has_vk_dev_ext_property(const VkExtensionProperties * ext_prop,const struct loader_device_extension_list * ext_list)417 bool has_vk_dev_ext_property(const VkExtensionProperties *ext_prop, const struct loader_device_extension_list *ext_list) {
418     for (uint32_t i = 0; i < ext_list->count; i++) {
419         if (compare_vk_extension_properties(&ext_list->list[i].props, ext_prop)) return true;
420     }
421     return false;
422 }
423 
loader_append_layer_property(const struct loader_instance * inst,struct loader_layer_list * layer_list,struct loader_layer_properties * layer_property)424 VkResult loader_append_layer_property(const struct loader_instance *inst, struct loader_layer_list *layer_list,
425                                       struct loader_layer_properties *layer_property) {
426     VkResult res = VK_SUCCESS;
427     if (layer_list->capacity == 0) {
428         res = loader_init_generic_list(inst, (struct loader_generic_list *)layer_list, sizeof(struct loader_layer_properties));
429         if (VK_SUCCESS != res) {
430             goto out;
431         }
432     }
433 
434     // Ensure enough room to add an entry
435     if ((layer_list->count + 1) * sizeof(struct loader_layer_properties) > layer_list->capacity) {
436         void *new_ptr = loader_instance_heap_realloc(inst, layer_list->list, layer_list->capacity, layer_list->capacity * 2,
437                                                      VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
438         if (NULL == new_ptr) {
439             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_append_layer_property: realloc failed for layer list");
440             res = VK_ERROR_OUT_OF_HOST_MEMORY;
441             goto out;
442         }
443         layer_list->list = new_ptr;
444         layer_list->capacity *= 2;
445     }
446     memcpy(&layer_list->list[layer_list->count], layer_property, sizeof(struct loader_layer_properties));
447     layer_list->count++;
448     memset(layer_property, 0, sizeof(struct loader_layer_properties));
449 out:
450     if (res != VK_SUCCESS) {
451         loader_free_layer_properties(inst, layer_property);
452     }
453     return res;
454 }
455 
456 // Search the given layer list for a layer property matching the given layer name
loader_find_layer_property(const char * name,const struct loader_layer_list * layer_list)457 struct loader_layer_properties *loader_find_layer_property(const char *name, const struct loader_layer_list *layer_list) {
458     for (uint32_t i = 0; i < layer_list->count; i++) {
459         const VkLayerProperties *item = &layer_list->list[i].info;
460         if (strcmp(name, item->layerName) == 0) return &layer_list->list[i];
461     }
462     return NULL;
463 }
464 
loader_find_pointer_layer_property(const char * name,const struct loader_pointer_layer_list * layer_list)465 struct loader_layer_properties *loader_find_pointer_layer_property(const char *name,
466                                                                    const struct loader_pointer_layer_list *layer_list) {
467     for (uint32_t i = 0; i < layer_list->count; i++) {
468         const VkLayerProperties *item = &layer_list->list[i]->info;
469         if (strcmp(name, item->layerName) == 0) return layer_list->list[i];
470     }
471     return NULL;
472 }
473 
474 // Search the given layer list for a layer matching the given layer name
loader_find_layer_name_in_list(const char * name,const struct loader_pointer_layer_list * layer_list)475 bool loader_find_layer_name_in_list(const char *name, const struct loader_pointer_layer_list *layer_list) {
476     if (NULL == layer_list) {
477         return false;
478     }
479     if (NULL != loader_find_pointer_layer_property(name, layer_list)) {
480         return true;
481     }
482     return false;
483 }
484 
485 // Search the given meta-layer's component list for a layer matching the given layer name
loader_find_layer_name_in_meta_layer(const struct loader_instance * inst,const char * layer_name,struct loader_layer_list * layer_list,struct loader_layer_properties * meta_layer_props)486 bool loader_find_layer_name_in_meta_layer(const struct loader_instance *inst, const char *layer_name,
487                                           struct loader_layer_list *layer_list, struct loader_layer_properties *meta_layer_props) {
488     for (uint32_t comp_layer = 0; comp_layer < meta_layer_props->component_layer_names.count; comp_layer++) {
489         if (!strcmp(meta_layer_props->component_layer_names.list[comp_layer], layer_name)) {
490             return true;
491         }
492         struct loader_layer_properties *comp_layer_props =
493             loader_find_layer_property(meta_layer_props->component_layer_names.list[comp_layer], layer_list);
494         if (comp_layer_props->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
495             return loader_find_layer_name_in_meta_layer(inst, layer_name, layer_list, comp_layer_props);
496         }
497     }
498     return false;
499 }
500 
501 // Search the override layer's blacklist for a layer matching the given layer name
loader_find_layer_name_in_blacklist(const char * layer_name,struct loader_layer_properties * meta_layer_props)502 bool loader_find_layer_name_in_blacklist(const char *layer_name, struct loader_layer_properties *meta_layer_props) {
503     for (uint32_t black_layer = 0; black_layer < meta_layer_props->blacklist_layer_names.count; ++black_layer) {
504         if (!strcmp(meta_layer_props->blacklist_layer_names.list[black_layer], layer_name)) {
505             return true;
506         }
507     }
508     return false;
509 }
510 
511 // Remove all layer properties entries from the list
loader_delete_layer_list_and_properties(const struct loader_instance * inst,struct loader_layer_list * layer_list)512 TEST_FUNCTION_EXPORT void loader_delete_layer_list_and_properties(const struct loader_instance *inst,
513                                                                   struct loader_layer_list *layer_list) {
514     uint32_t i;
515     if (!layer_list) return;
516 
517     for (i = 0; i < layer_list->count; i++) {
518         if (layer_list->list[i].lib_handle) {
519             loader_platform_close_library(layer_list->list[i].lib_handle);
520             loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Unloading layer library %s",
521                        layer_list->list[i].lib_name);
522             layer_list->list[i].lib_handle = NULL;
523         }
524         loader_free_layer_properties(inst, &(layer_list->list[i]));
525     }
526     layer_list->count = 0;
527 
528     if (layer_list->capacity > 0) {
529         layer_list->capacity = 0;
530         loader_instance_heap_free(inst, layer_list->list);
531     }
532     memset(layer_list, 0, sizeof(struct loader_layer_list));
533 }
534 
loader_remove_layer_in_list(const struct loader_instance * inst,struct loader_layer_list * layer_list,uint32_t layer_to_remove)535 void loader_remove_layer_in_list(const struct loader_instance *inst, struct loader_layer_list *layer_list,
536                                  uint32_t layer_to_remove) {
537     if (layer_list == NULL || layer_to_remove >= layer_list->count) {
538         return;
539     }
540     loader_free_layer_properties(inst, &(layer_list->list[layer_to_remove]));
541 
542     // Remove the current invalid meta-layer from the layer list.  Use memmove since we are
543     // overlapping the source and destination addresses.
544     if (layer_to_remove + 1 <= layer_list->count) {
545         memmove(&layer_list->list[layer_to_remove], &layer_list->list[layer_to_remove + 1],
546                 sizeof(struct loader_layer_properties) * (layer_list->count - 1 - layer_to_remove));
547     }
548     // Decrement the count (because we now have one less) and decrement the loop index since we need to
549     // re-check this index.
550     layer_list->count--;
551 }
552 
553 // Remove all layers in the layer list that are blacklisted by the override layer.
554 // NOTE: This should only be called if an override layer is found and not expired.
loader_remove_layers_in_blacklist(const struct loader_instance * inst,struct loader_layer_list * layer_list)555 void loader_remove_layers_in_blacklist(const struct loader_instance *inst, struct loader_layer_list *layer_list) {
556     struct loader_layer_properties *override_prop = loader_find_layer_property(VK_OVERRIDE_LAYER_NAME, layer_list);
557     if (NULL == override_prop) {
558         return;
559     }
560 
561     for (int32_t j = 0; j < (int32_t)(layer_list->count); j++) {
562         struct loader_layer_properties cur_layer_prop = layer_list->list[j];
563         const char *cur_layer_name = &cur_layer_prop.info.layerName[0];
564 
565         // Skip the override layer itself.
566         if (!strcmp(VK_OVERRIDE_LAYER_NAME, cur_layer_name)) {
567             continue;
568         }
569 
570         // If found in the override layer's blacklist, remove it
571         if (loader_find_layer_name_in_blacklist(cur_layer_name, override_prop)) {
572             loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0,
573                        "loader_remove_layers_in_blacklist: Override layer is active and layer %s is in the blacklist inside of it. "
574                        "Removing that layer from current layer list.",
575                        cur_layer_name);
576             loader_remove_layer_in_list(inst, layer_list, j);
577             j--;
578 
579             // Re-do the query for the override layer
580             override_prop = loader_find_layer_property(VK_OVERRIDE_LAYER_NAME, layer_list);
581         }
582     }
583 }
584 
585 // Remove all layers in the layer list that are not found inside any implicit meta-layers.
loader_remove_layers_not_in_implicit_meta_layers(const struct loader_instance * inst,struct loader_layer_list * layer_list)586 void loader_remove_layers_not_in_implicit_meta_layers(const struct loader_instance *inst, struct loader_layer_list *layer_list) {
587     int32_t i;
588     int32_t j;
589     int32_t layer_count = (int32_t)(layer_list->count);
590 
591     for (i = 0; i < layer_count; i++) {
592         layer_list->list[i].keep = false;
593     }
594 
595     for (i = 0; i < layer_count; i++) {
596         struct loader_layer_properties *cur_layer_prop = &layer_list->list[i];
597 
598         if (0 == (cur_layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
599             cur_layer_prop->keep = true;
600             continue;
601         }
602         for (j = 0; j < layer_count; j++) {
603             struct loader_layer_properties *layer_to_check = &layer_list->list[j];
604 
605             if (i == j) {
606                 continue;
607             }
608 
609             if (layer_to_check->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
610                 // For all layers found in this meta layer, we want to keep them as well.
611                 if (loader_find_layer_name_in_meta_layer(inst, cur_layer_prop->info.layerName, layer_list, layer_to_check)) {
612                     cur_layer_prop->keep = true;
613                 }
614             }
615         }
616     }
617 
618     // Remove any layers we don't want to keep (Don't use layer_count here as we need it to be
619     // dynamically updated if we delete a layer property in the list).
620     for (i = 0; i < (int32_t)(layer_list->count); i++) {
621         struct loader_layer_properties *cur_layer_prop = &layer_list->list[i];
622         if (!cur_layer_prop->keep) {
623             loader_log(
624                 inst, VULKAN_LOADER_DEBUG_BIT, 0,
625                 "loader_remove_layers_not_in_implicit_meta_layers : Implicit meta-layers are active, and layer %s is not list "
626                 "inside of any.  So removing layer from current layer list.",
627                 cur_layer_prop->info.layerName);
628             loader_remove_layer_in_list(inst, layer_list, i);
629             i--;
630         }
631     }
632 }
633 
loader_add_instance_extensions(const struct loader_instance * inst,const PFN_vkEnumerateInstanceExtensionProperties fp_get_props,const char * lib_name,struct loader_extension_list * ext_list)634 VkResult loader_add_instance_extensions(const struct loader_instance *inst,
635                                         const PFN_vkEnumerateInstanceExtensionProperties fp_get_props, const char *lib_name,
636                                         struct loader_extension_list *ext_list) {
637     uint32_t i, count = 0;
638     VkExtensionProperties *ext_props;
639     VkResult res = VK_SUCCESS;
640 
641     if (!fp_get_props) {
642         // No EnumerateInstanceExtensionProperties defined
643         goto out;
644     }
645 
646     // Make sure we never call ourself by accident, this should never happen outside of error paths
647     if (fp_get_props == vkEnumerateInstanceExtensionProperties) {
648         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
649                    "loader_add_instance_extensions: %s's vkEnumerateInstanceExtensionProperties points to the loader, this would "
650                    "lead to infinite recursion.",
651                    lib_name);
652         goto out;
653     }
654 
655     res = fp_get_props(NULL, &count, NULL);
656     if (res != VK_SUCCESS) {
657         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
658                    "loader_add_instance_extensions: Error getting Instance extension count from %s", lib_name);
659         goto out;
660     }
661 
662     if (count == 0) {
663         // No ExtensionProperties to report
664         goto out;
665     }
666 
667     ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties));
668     if (NULL == ext_props) {
669         res = VK_ERROR_OUT_OF_HOST_MEMORY;
670         goto out;
671     }
672 
673     res = fp_get_props(NULL, &count, ext_props);
674     if (res != VK_SUCCESS) {
675         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_add_instance_extensions: Error getting Instance extensions from %s",
676                    lib_name);
677         goto out;
678     }
679 
680     for (i = 0; i < count; i++) {
681         bool ext_unsupported = wsi_unsupported_instance_extension(&ext_props[i]);
682         if (!ext_unsupported) {
683             res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]);
684             if (res != VK_SUCCESS) {
685                 goto out;
686             }
687         }
688     }
689 
690 out:
691     return res;
692 }
693 
loader_add_device_extensions(const struct loader_instance * inst,PFN_vkEnumerateDeviceExtensionProperties fpEnumerateDeviceExtensionProperties,VkPhysicalDevice physical_device,const char * lib_name,struct loader_extension_list * ext_list)694 VkResult loader_add_device_extensions(const struct loader_instance *inst,
695                                       PFN_vkEnumerateDeviceExtensionProperties fpEnumerateDeviceExtensionProperties,
696                                       VkPhysicalDevice physical_device, const char *lib_name,
697                                       struct loader_extension_list *ext_list) {
698     uint32_t i = 0, count = 0;
699     VkResult res = VK_SUCCESS;
700     VkExtensionProperties *ext_props = NULL;
701 
702     res = fpEnumerateDeviceExtensionProperties(physical_device, NULL, &count, NULL);
703     if (res != VK_SUCCESS) {
704         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
705                    "loader_add_device_extensions: Error getting physical device extension info count from library %s", lib_name);
706         return res;
707     }
708     if (count > 0) {
709         ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties));
710         if (!ext_props) {
711             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
712                        "loader_add_device_extensions: Failed to allocate space for device extension properties from library %s.",
713                        lib_name);
714             return VK_ERROR_OUT_OF_HOST_MEMORY;
715         }
716         res = fpEnumerateDeviceExtensionProperties(physical_device, NULL, &count, ext_props);
717         if (res != VK_SUCCESS) {
718             return res;
719         }
720         for (i = 0; i < count; i++) {
721             res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]);
722             if (res != VK_SUCCESS) {
723                 return res;
724             }
725         }
726     }
727 
728     return VK_SUCCESS;
729 }
730 
loader_init_generic_list(const struct loader_instance * inst,struct loader_generic_list * list_info,size_t element_size)731 VkResult loader_init_generic_list(const struct loader_instance *inst, struct loader_generic_list *list_info, size_t element_size) {
732     size_t capacity = 32 * element_size;
733     list_info->count = 0;
734     list_info->capacity = 0;
735     list_info->list = loader_instance_heap_calloc(inst, capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
736     if (list_info->list == NULL) {
737         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_init_generic_list: Failed to allocate space for generic list");
738         return VK_ERROR_OUT_OF_HOST_MEMORY;
739     }
740     list_info->capacity = capacity;
741     return VK_SUCCESS;
742 }
743 
loader_resize_generic_list(const struct loader_instance * inst,struct loader_generic_list * list_info)744 VkResult loader_resize_generic_list(const struct loader_instance *inst, struct loader_generic_list *list_info) {
745     list_info->list = loader_instance_heap_realloc(inst, list_info->list, list_info->capacity, list_info->capacity * 2,
746                                                    VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
747     if (list_info->list == NULL) {
748         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_resize_generic_list: Failed to allocate space for generic list");
749         return VK_ERROR_OUT_OF_HOST_MEMORY;
750     }
751     list_info->capacity = list_info->capacity * 2;
752     return VK_SUCCESS;
753 }
754 
loader_destroy_generic_list(const struct loader_instance * inst,struct loader_generic_list * list)755 void loader_destroy_generic_list(const struct loader_instance *inst, struct loader_generic_list *list) {
756     loader_instance_heap_free(inst, list->list);
757     memset(list, 0, sizeof(struct loader_generic_list));
758 }
759 
loader_get_next_available_entry(const struct loader_instance * inst,struct loader_used_object_list * list_info,uint32_t * free_index,const VkAllocationCallbacks * pAllocator)760 VkResult loader_get_next_available_entry(const struct loader_instance *inst, struct loader_used_object_list *list_info,
761                                          uint32_t *free_index, const VkAllocationCallbacks *pAllocator) {
762     if (NULL == list_info->list) {
763         VkResult res =
764             loader_init_generic_list(inst, (struct loader_generic_list *)list_info, sizeof(struct loader_used_object_status));
765         if (VK_SUCCESS != res) {
766             return res;
767         }
768     }
769     for (uint32_t i = 0; i < list_info->capacity / sizeof(struct loader_used_object_status); i++) {
770         if (list_info->list[i].status == VK_FALSE) {
771             list_info->list[i].status = VK_TRUE;
772             if (pAllocator) {
773                 list_info->list[i].allocation_callbacks = *pAllocator;
774             } else {
775                 memset(&list_info->list[i].allocation_callbacks, 0, sizeof(VkAllocationCallbacks));
776             }
777             *free_index = i;
778             return VK_SUCCESS;
779         }
780     }
781     // No free space, must resize
782 
783     size_t old_capacity = list_info->capacity;
784     VkResult res = loader_resize_generic_list(inst, (struct loader_generic_list *)list_info);
785     if (VK_SUCCESS != res) {
786         return res;
787     }
788     uint32_t new_index = (uint32_t)(old_capacity / sizeof(struct loader_used_object_status));
789     // Zero out the newly allocated back half of list.
790     memset(&list_info->list[new_index], 0, old_capacity);
791     list_info->list[new_index].status = VK_TRUE;
792     if (pAllocator) {
793         list_info->list[new_index].allocation_callbacks = *pAllocator;
794     } else {
795         memset(&list_info->list[new_index].allocation_callbacks, 0, sizeof(VkAllocationCallbacks));
796     }
797     *free_index = new_index;
798     return VK_SUCCESS;
799 }
800 
loader_release_object_from_list(struct loader_used_object_list * list_info,uint32_t index_to_free)801 void loader_release_object_from_list(struct loader_used_object_list *list_info, uint32_t index_to_free) {
802     if (list_info->list && list_info->capacity > index_to_free * sizeof(struct loader_used_object_status)) {
803         list_info->list[index_to_free].status = VK_FALSE;
804         memset(&list_info->list[index_to_free].allocation_callbacks, 0, sizeof(VkAllocationCallbacks));
805     }
806 }
807 
808 // Append non-duplicate extension properties defined in props to the given ext_list.
809 // Return - Vk_SUCCESS on success
loader_add_to_ext_list(const struct loader_instance * inst,struct loader_extension_list * ext_list,uint32_t prop_list_count,const VkExtensionProperties * props)810 VkResult loader_add_to_ext_list(const struct loader_instance *inst, struct loader_extension_list *ext_list,
811                                 uint32_t prop_list_count, const VkExtensionProperties *props) {
812     if (ext_list->list == NULL || ext_list->capacity == 0) {
813         VkResult res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(VkExtensionProperties));
814         if (VK_SUCCESS != res) {
815             return res;
816         }
817     }
818 
819     for (uint32_t i = 0; i < prop_list_count; i++) {
820         const VkExtensionProperties *cur_ext = &props[i];
821 
822         // look for duplicates
823         if (has_vk_extension_property(cur_ext, ext_list)) {
824             continue;
825         }
826 
827         // add to list at end
828         // check for enough capacity
829         if (ext_list->count * sizeof(VkExtensionProperties) >= ext_list->capacity) {
830             void *new_ptr = loader_instance_heap_realloc(inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2,
831                                                          VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
832             if (new_ptr == NULL) {
833                 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
834                            "loader_add_to_ext_list: Failed to reallocate space for extension list");
835                 return VK_ERROR_OUT_OF_HOST_MEMORY;
836             }
837             ext_list->list = new_ptr;
838 
839             // double capacity
840             ext_list->capacity *= 2;
841         }
842 
843         memcpy(&ext_list->list[ext_list->count], cur_ext, sizeof(VkExtensionProperties));
844         ext_list->count++;
845     }
846     return VK_SUCCESS;
847 }
848 
849 // Append one extension property defined in props with entrypoints defined in entries to the given
850 // ext_list. Do not append if a duplicate.
851 // If this is a duplicate, this function free's the passed in entries - as in it takes ownership over that list (if it is not
852 // NULL) Return - Vk_SUCCESS on success
loader_add_to_dev_ext_list(const struct loader_instance * inst,struct loader_device_extension_list * ext_list,const VkExtensionProperties * props,struct loader_string_list * entrys)853 VkResult loader_add_to_dev_ext_list(const struct loader_instance *inst, struct loader_device_extension_list *ext_list,
854                                     const VkExtensionProperties *props, struct loader_string_list *entrys) {
855     VkResult res = VK_SUCCESS;
856     bool should_free_entrys = true;
857     if (ext_list->list == NULL || ext_list->capacity == 0) {
858         res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(struct loader_dev_ext_props));
859         if (VK_SUCCESS != res) {
860             goto out;
861         }
862     }
863 
864     // look for duplicates
865     if (has_vk_dev_ext_property(props, ext_list)) {
866         goto out;
867     }
868 
869     uint32_t idx = ext_list->count;
870     // add to list at end
871     // check for enough capacity
872     if (idx * sizeof(struct loader_dev_ext_props) >= ext_list->capacity) {
873         void *new_ptr = loader_instance_heap_realloc(inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2,
874                                                      VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
875 
876         if (NULL == new_ptr) {
877             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
878                        "loader_add_to_dev_ext_list: Failed to reallocate space for device extension list");
879             res = VK_ERROR_OUT_OF_HOST_MEMORY;
880             goto out;
881         }
882         ext_list->list = new_ptr;
883 
884         // double capacity
885         ext_list->capacity *= 2;
886     }
887 
888     memcpy(&ext_list->list[idx].props, props, sizeof(*props));
889     if (entrys) {
890         ext_list->list[idx].entrypoints = *entrys;
891         should_free_entrys = false;
892     }
893     ext_list->count++;
894 out:
895     if (NULL != entrys && should_free_entrys) {
896         free_string_list(inst, entrys);
897     }
898     return res;
899 }
900 
901 // Create storage for pointers to loader_layer_properties
loader_init_pointer_layer_list(const struct loader_instance * inst,struct loader_pointer_layer_list * list)902 bool loader_init_pointer_layer_list(const struct loader_instance *inst, struct loader_pointer_layer_list *list) {
903     list->capacity = 32 * sizeof(void *);
904     list->list = loader_instance_heap_calloc(inst, list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
905     if (list->list == NULL) {
906         return false;
907     }
908     list->count = 0;
909     return true;
910 }
911 
912 // Search the given array of layer names for an entry matching the given VkLayerProperties
loader_names_array_has_layer_property(const VkLayerProperties * vk_layer_prop,uint32_t layer_info_count,struct activated_layer_info * layer_info)913 bool loader_names_array_has_layer_property(const VkLayerProperties *vk_layer_prop, uint32_t layer_info_count,
914                                            struct activated_layer_info *layer_info) {
915     for (uint32_t i = 0; i < layer_info_count; i++) {
916         if (strcmp(vk_layer_prop->layerName, layer_info[i].name) == 0) {
917             return true;
918         }
919     }
920     return false;
921 }
922 
loader_destroy_pointer_layer_list(const struct loader_instance * inst,struct loader_pointer_layer_list * layer_list)923 void loader_destroy_pointer_layer_list(const struct loader_instance *inst, struct loader_pointer_layer_list *layer_list) {
924     loader_instance_heap_free(inst, layer_list->list);
925     memset(layer_list, 0, sizeof(struct loader_pointer_layer_list));
926 }
927 
928 // Append layer properties defined in prop_list to the given layer_info list
loader_add_layer_properties_to_list(const struct loader_instance * inst,struct loader_pointer_layer_list * list,struct loader_layer_properties * props)929 VkResult loader_add_layer_properties_to_list(const struct loader_instance *inst, struct loader_pointer_layer_list *list,
930                                              struct loader_layer_properties *props) {
931     if (list->list == NULL || list->capacity == 0) {
932         if (!loader_init_pointer_layer_list(inst, list)) {
933             return VK_ERROR_OUT_OF_HOST_MEMORY;
934         }
935     }
936 
937     // Check for enough capacity
938     if (((list->count + 1) * sizeof(struct loader_layer_properties)) >= list->capacity) {
939         size_t new_capacity = list->capacity * 2;
940         void *new_ptr =
941             loader_instance_heap_realloc(inst, list->list, list->capacity, new_capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
942         if (NULL == new_ptr) {
943             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
944                        "loader_add_layer_properties_to_list: Realloc failed for when attempting to add new layer");
945             return VK_ERROR_OUT_OF_HOST_MEMORY;
946         }
947         list->list = new_ptr;
948         list->capacity = new_capacity;
949     }
950     list->list[list->count++] = props;
951 
952     return VK_SUCCESS;
953 }
954 
955 // Determine if the provided explicit layer should be available by querying the appropriate environmental variables.
loader_layer_is_available(const struct loader_instance * inst,const struct loader_envvar_all_filters * filters,const struct loader_layer_properties * prop)956 bool loader_layer_is_available(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
957                                const struct loader_layer_properties *prop) {
958     bool available = true;
959     bool is_implicit = (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER));
960     bool disabled_by_type =
961         (is_implicit) ? (filters->disable_filter.disable_all_implicit) : (filters->disable_filter.disable_all_explicit);
962     if ((filters->disable_filter.disable_all || disabled_by_type ||
963          check_name_matches_filter_environment_var(prop->info.layerName, &filters->disable_filter.additional_filters)) &&
964         !check_name_matches_filter_environment_var(prop->info.layerName, &filters->allow_filter)) {
965         available = false;
966     }
967     if (check_name_matches_filter_environment_var(prop->info.layerName, &filters->enable_filter)) {
968         available = true;
969     } else if (!available) {
970         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
971                    "Layer \"%s\" forced disabled because name matches filter of env var \'%s\'.", prop->info.layerName,
972                    VK_LAYERS_DISABLE_ENV_VAR);
973     }
974 
975     return available;
976 }
977 
978 // Search the given search_list for any layers in the props list.  Add these to the
979 // output layer_list.
loader_add_layer_names_to_list(const struct loader_instance * inst,const struct loader_envvar_all_filters * filters,struct loader_pointer_layer_list * output_list,struct loader_pointer_layer_list * expanded_output_list,uint32_t name_count,const char * const * names,const struct loader_layer_list * source_list)980 VkResult loader_add_layer_names_to_list(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
981                                         struct loader_pointer_layer_list *output_list,
982                                         struct loader_pointer_layer_list *expanded_output_list, uint32_t name_count,
983                                         const char *const *names, const struct loader_layer_list *source_list) {
984     VkResult err = VK_SUCCESS;
985 
986     for (uint32_t i = 0; i < name_count; i++) {
987         const char *source_name = names[i];
988 
989         struct loader_layer_properties *layer_prop = loader_find_layer_property(source_name, source_list);
990         if (NULL == layer_prop) {
991             loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
992                        "loader_add_layer_names_to_list: Unable to find layer \"%s\"", source_name);
993             err = VK_ERROR_LAYER_NOT_PRESENT;
994             continue;
995         }
996 
997         // Make sure the layer isn't already in the output_list, skip adding it if it is.
998         if (loader_find_layer_name_in_list(source_name, output_list)) {
999             continue;
1000         }
1001 
1002         if (!loader_layer_is_available(inst, filters, layer_prop)) {
1003             continue;
1004         }
1005 
1006         // If not a meta-layer, simply add it.
1007         if (0 == (layer_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
1008             layer_prop->enabled_by_what = ENABLED_BY_WHAT_IN_APPLICATION_API;
1009             err = loader_add_layer_properties_to_list(inst, output_list, layer_prop);
1010             if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err;
1011             err = loader_add_layer_properties_to_list(inst, expanded_output_list, layer_prop);
1012             if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err;
1013         } else {
1014             err = loader_add_meta_layer(inst, filters, layer_prop, output_list, expanded_output_list, source_list, NULL);
1015             if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err;
1016         }
1017     }
1018 
1019     return err;
1020 }
1021 
1022 // Determine if the provided implicit layer should be enabled by querying the appropriate environmental variables.
1023 // For an implicit layer, at least a disable environment variable is required.
loader_implicit_layer_is_enabled(const struct loader_instance * inst,const struct loader_envvar_all_filters * filters,const struct loader_layer_properties * prop)1024 bool loader_implicit_layer_is_enabled(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
1025                                       const struct loader_layer_properties *prop) {
1026     bool enable = false;
1027     bool forced_disabled = false;
1028     bool forced_enabled = false;
1029 
1030     if ((filters->disable_filter.disable_all || filters->disable_filter.disable_all_implicit ||
1031          check_name_matches_filter_environment_var(prop->info.layerName, &filters->disable_filter.additional_filters)) &&
1032         !check_name_matches_filter_environment_var(prop->info.layerName, &filters->allow_filter)) {
1033         forced_disabled = true;
1034     }
1035     if (check_name_matches_filter_environment_var(prop->info.layerName, &filters->enable_filter)) {
1036         forced_enabled = true;
1037     }
1038 
1039     // If no enable_environment variable is specified, this implicit layer is always be enabled by default.
1040     if (NULL == prop->enable_env_var.name) {
1041         enable = true;
1042     } else {
1043         char *env_value = loader_getenv(prop->enable_env_var.name, inst);
1044         if (env_value && !strcmp(prop->enable_env_var.value, env_value)) {
1045             enable = true;
1046         }
1047 
1048         // Otherwise, only enable this layer if the enable environment variable is defined
1049         loader_free_getenv(env_value, inst);
1050     }
1051 
1052     if (forced_enabled) {
1053         // Only report a message that we've forced on a layer if it wouldn't have been enabled
1054         // normally.
1055         if (!enable) {
1056             enable = true;
1057             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1058                        "Implicit layer \"%s\" forced enabled due to env var \'%s\'.", prop->info.layerName,
1059                        VK_LAYERS_ENABLE_ENV_VAR);
1060         }
1061     } else if (enable && forced_disabled) {
1062         enable = false;
1063         // Report a message that we've forced off a layer if it would have been enabled normally.
1064         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1065                    "Implicit layer \"%s\" forced disabled because name matches filter of env var \'%s\'.", prop->info.layerName,
1066                    VK_LAYERS_DISABLE_ENV_VAR);
1067         return enable;
1068     }
1069 
1070     // The disable_environment has priority over everything else.  If it is defined, the layer is always
1071     // disabled.
1072     if (NULL != prop->disable_env_var.name) {
1073         char *env_value = loader_getenv(prop->disable_env_var.name, inst);
1074         if (NULL != env_value) {
1075             enable = false;
1076         }
1077         loader_free_getenv(env_value, inst);
1078     } else if ((prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER) == 0) {
1079         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1080                    "Implicit layer \"%s\" missing disabled environment variable!", prop->info.layerName);
1081     }
1082 
1083     // Enable this layer if it is included in the override layer
1084     if (inst != NULL && inst->override_layer_present) {
1085         struct loader_layer_properties *override = NULL;
1086         for (uint32_t i = 0; i < inst->instance_layer_list.count; ++i) {
1087             if (strcmp(inst->instance_layer_list.list[i].info.layerName, VK_OVERRIDE_LAYER_NAME) == 0) {
1088                 override = &inst->instance_layer_list.list[i];
1089                 break;
1090             }
1091         }
1092         if (override != NULL) {
1093             for (uint32_t i = 0; i < override->component_layer_names.count; ++i) {
1094                 if (strcmp(override->component_layer_names.list[i], prop->info.layerName) == 0) {
1095                     enable = true;
1096                     break;
1097                 }
1098             }
1099         }
1100     }
1101 
1102     return enable;
1103 }
1104 
1105 // Check the individual implicit layer for the enable/disable environment variable settings.  Only add it after
1106 // every check has passed indicating it should be used, including making sure a layer of the same name hasn't already been
1107 // added.
loader_add_implicit_layer(const struct loader_instance * inst,struct loader_layer_properties * prop,const struct loader_envvar_all_filters * filters,struct loader_pointer_layer_list * target_list,struct loader_pointer_layer_list * expanded_target_list,const struct loader_layer_list * source_list)1108 VkResult loader_add_implicit_layer(const struct loader_instance *inst, struct loader_layer_properties *prop,
1109                                    const struct loader_envvar_all_filters *filters, struct loader_pointer_layer_list *target_list,
1110                                    struct loader_pointer_layer_list *expanded_target_list,
1111                                    const struct loader_layer_list *source_list) {
1112     VkResult result = VK_SUCCESS;
1113     if (loader_implicit_layer_is_enabled(inst, filters, prop)) {
1114         if (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
1115             // Make sure the layer isn't already in the output_list, skip adding it if it is.
1116             if (loader_find_layer_name_in_list(&prop->info.layerName[0], target_list)) {
1117                 return result;
1118             }
1119             prop->enabled_by_what = ENABLED_BY_WHAT_IMPLICIT_LAYER;
1120             result = loader_add_layer_properties_to_list(inst, target_list, prop);
1121             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1122             if (NULL != expanded_target_list) {
1123                 result = loader_add_layer_properties_to_list(inst, expanded_target_list, prop);
1124             }
1125         } else {
1126             result = loader_add_meta_layer(inst, filters, prop, target_list, expanded_target_list, source_list, NULL);
1127         }
1128     }
1129     return result;
1130 }
1131 
1132 // Add the component layers of a meta-layer to the active list of layers
loader_add_meta_layer(const struct loader_instance * inst,const struct loader_envvar_all_filters * filters,struct loader_layer_properties * prop,struct loader_pointer_layer_list * target_list,struct loader_pointer_layer_list * expanded_target_list,const struct loader_layer_list * source_list,bool * out_found_all_component_layers)1133 VkResult loader_add_meta_layer(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
1134                                struct loader_layer_properties *prop, struct loader_pointer_layer_list *target_list,
1135                                struct loader_pointer_layer_list *expanded_target_list, const struct loader_layer_list *source_list,
1136                                bool *out_found_all_component_layers) {
1137     VkResult result = VK_SUCCESS;
1138     bool found_all_component_layers = true;
1139 
1140     // We need to add all the individual component layers
1141     loader_api_version meta_layer_api_version = loader_make_version(prop->info.specVersion);
1142     for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) {
1143         struct loader_layer_properties *search_prop =
1144             loader_find_layer_property(prop->component_layer_names.list[comp_layer], source_list);
1145         if (search_prop != NULL) {
1146             loader_api_version search_prop_version = loader_make_version(prop->info.specVersion);
1147             if (!loader_check_version_meets_required(meta_layer_api_version, search_prop_version)) {
1148                 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1149                            "Meta-layer \"%s\" API version %u.%u, component layer \"%s\" version %u.%u, may have "
1150                            "incompatibilities (Policy #LLP_LAYER_8)!",
1151                            prop->info.layerName, meta_layer_api_version.major, meta_layer_api_version.minor,
1152                            search_prop->info.layerName, search_prop_version.major, search_prop_version.minor);
1153             }
1154 
1155             if (!loader_layer_is_available(inst, filters, search_prop)) {
1156                 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1157                            "Meta Layer \"%s\" component layer \"%s\" disabled.", prop->info.layerName, search_prop->info.layerName);
1158                 continue;
1159             }
1160 
1161             // If the component layer is itself an implicit layer, we need to do the implicit layer enable
1162             // checks
1163             if (0 == (search_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
1164                 search_prop->enabled_by_what = ENABLED_BY_WHAT_META_LAYER;
1165                 result = loader_add_implicit_layer(inst, search_prop, filters, target_list, expanded_target_list, source_list);
1166                 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1167             } else {
1168                 if (0 != (search_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
1169                     bool found_layers_in_component_meta_layer = true;
1170                     search_prop->enabled_by_what = ENABLED_BY_WHAT_META_LAYER;
1171                     result = loader_add_meta_layer(inst, filters, search_prop, target_list, expanded_target_list, source_list,
1172                                                    &found_layers_in_component_meta_layer);
1173                     if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1174                     if (!found_layers_in_component_meta_layer) found_all_component_layers = false;
1175                 } else if (!loader_find_layer_name_in_list(&search_prop->info.layerName[0], target_list)) {
1176                     // Make sure the layer isn't already in the output_list, skip adding it if it is.
1177                     search_prop->enabled_by_what = ENABLED_BY_WHAT_META_LAYER;
1178                     result = loader_add_layer_properties_to_list(inst, target_list, search_prop);
1179                     if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1180                     if (NULL != expanded_target_list) {
1181                         result = loader_add_layer_properties_to_list(inst, expanded_target_list, search_prop);
1182                         if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1183                     }
1184                 }
1185             }
1186         } else {
1187             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1188                        "Failed to find layer name \"%s\" component layer \"%s\" to activate (Policy #LLP_LAYER_7)",
1189                        prop->component_layer_names.list[comp_layer], prop->component_layer_names.list[comp_layer]);
1190             found_all_component_layers = false;
1191         }
1192     }
1193 
1194     // Add this layer to the overall target list (not the expanded one)
1195     if (found_all_component_layers) {
1196         prop->enabled_by_what = ENABLED_BY_WHAT_META_LAYER;
1197         result = loader_add_layer_properties_to_list(inst, target_list, prop);
1198         if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1199         // Write the result to out_found_all_component_layers in case this function is being recursed
1200         if (out_found_all_component_layers) *out_found_all_component_layers = found_all_component_layers;
1201     }
1202 
1203     return result;
1204 }
1205 
get_extension_property(const char * name,const struct loader_extension_list * list)1206 VkExtensionProperties *get_extension_property(const char *name, const struct loader_extension_list *list) {
1207     for (uint32_t i = 0; i < list->count; i++) {
1208         if (strcmp(name, list->list[i].extensionName) == 0) return &list->list[i];
1209     }
1210     return NULL;
1211 }
1212 
get_dev_extension_property(const char * name,const struct loader_device_extension_list * list)1213 VkExtensionProperties *get_dev_extension_property(const char *name, const struct loader_device_extension_list *list) {
1214     for (uint32_t i = 0; i < list->count; i++) {
1215         if (strcmp(name, list->list[i].props.extensionName) == 0) return &list->list[i].props;
1216     }
1217     return NULL;
1218 }
1219 
1220 // For Instance extensions implemented within the loader (i.e. DEBUG_REPORT
1221 // the extension must provide two entry points for the loader to use:
1222 // - "trampoline" entry point - this is the address returned by GetProcAddr
1223 //                              and will always do what's necessary to support a
1224 //                              global call.
1225 // - "terminator" function    - this function will be put at the end of the
1226 //                              instance chain and will contain the necessary logic
1227 //                              to call / process the extension for the appropriate
1228 //                              ICDs that are available.
1229 // There is no generic mechanism for including these functions, the references
1230 // must be placed into the appropriate loader entry points.
1231 // GetInstanceProcAddr: call extension GetInstanceProcAddr to check for GetProcAddr
1232 // requests
1233 // loader_coalesce_extensions(void) - add extension records to the list of global
1234 //                                    extension available to the app.
1235 // instance_disp                    - add function pointer for terminator function
1236 //                                    to this array.
1237 // The extension itself should be in a separate file that will be linked directly
1238 // with the loader.
loader_get_icd_loader_instance_extensions(const struct loader_instance * inst,struct loader_icd_tramp_list * icd_tramp_list,struct loader_extension_list * inst_exts)1239 VkResult loader_get_icd_loader_instance_extensions(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list,
1240                                                    struct loader_extension_list *inst_exts) {
1241     struct loader_extension_list icd_exts;
1242     VkResult res = VK_SUCCESS;
1243     char *env_value;
1244     bool filter_extensions = true;
1245 
1246     // Check if a user wants to disable the instance extension filtering behavior
1247     env_value = loader_getenv("VK_LOADER_DISABLE_INST_EXT_FILTER", inst);
1248     if (NULL != env_value && atoi(env_value) != 0) {
1249         filter_extensions = false;
1250     }
1251     loader_free_getenv(env_value, inst);
1252 
1253     // traverse scanned icd list adding non-duplicate extensions to the list
1254     for (uint32_t i = 0; i < icd_tramp_list->count; i++) {
1255         res = loader_init_generic_list(inst, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
1256         if (VK_SUCCESS != res) {
1257             goto out;
1258         }
1259         res = loader_add_instance_extensions(inst, icd_tramp_list->scanned_list[i].EnumerateInstanceExtensionProperties,
1260                                              icd_tramp_list->scanned_list[i].lib_name, &icd_exts);
1261         if (VK_SUCCESS == res) {
1262             if (filter_extensions) {
1263                 // Remove any extensions not recognized by the loader
1264                 for (int32_t j = 0; j < (int32_t)icd_exts.count; j++) {
1265                     // See if the extension is in the list of supported extensions
1266                     bool found = false;
1267                     for (uint32_t k = 0; LOADER_INSTANCE_EXTENSIONS[k] != NULL; k++) {
1268                         if (strcmp(icd_exts.list[j].extensionName, LOADER_INSTANCE_EXTENSIONS[k]) == 0) {
1269                             found = true;
1270                             break;
1271                         }
1272                     }
1273 
1274                     // If it isn't in the list, remove it
1275                     if (!found) {
1276                         for (uint32_t k = j + 1; k < icd_exts.count; k++) {
1277                             icd_exts.list[k - 1] = icd_exts.list[k];
1278                         }
1279                         --icd_exts.count;
1280                         --j;
1281                     }
1282                 }
1283             }
1284 
1285             res = loader_add_to_ext_list(inst, inst_exts, icd_exts.count, icd_exts.list);
1286         }
1287         loader_destroy_generic_list(inst, (struct loader_generic_list *)&icd_exts);
1288         if (VK_SUCCESS != res) {
1289             goto out;
1290         }
1291     };
1292 
1293     // Traverse loader's extensions, adding non-duplicate extensions to the list
1294     res = add_debug_extensions_to_ext_list(inst, inst_exts);
1295     if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
1296         goto out;
1297     }
1298     const VkExtensionProperties portability_enumeration_extension_info[] = {
1299         {VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME, VK_KHR_PORTABILITY_ENUMERATION_SPEC_VERSION}};
1300 
1301     // Add VK_KHR_portability_subset
1302     res = loader_add_to_ext_list(inst, inst_exts, sizeof(portability_enumeration_extension_info) / sizeof(VkExtensionProperties),
1303                                  portability_enumeration_extension_info);
1304     if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
1305         goto out;
1306     }
1307 
1308     const VkExtensionProperties direct_driver_loading_extension_info[] = {
1309         {VK_LUNARG_DIRECT_DRIVER_LOADING_EXTENSION_NAME, VK_LUNARG_DIRECT_DRIVER_LOADING_SPEC_VERSION}};
1310 
1311     // Add VK_LUNARG_direct_driver_loading
1312     res = loader_add_to_ext_list(inst, inst_exts, sizeof(direct_driver_loading_extension_info) / sizeof(VkExtensionProperties),
1313                                  direct_driver_loading_extension_info);
1314     if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
1315         goto out;
1316     }
1317 
1318 out:
1319     return res;
1320 }
1321 
loader_get_icd_and_device(const void * device,struct loader_device ** found_dev)1322 struct loader_icd_term *loader_get_icd_and_device(const void *device, struct loader_device **found_dev) {
1323     VkLayerDispatchTable *dispatch_table_device = loader_get_dispatch(device);
1324     if (NULL == dispatch_table_device) {
1325         *found_dev = NULL;
1326         return NULL;
1327     }
1328     loader_platform_thread_lock_mutex(&loader_global_instance_list_lock);
1329     *found_dev = NULL;
1330 
1331     for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) {
1332         for (struct loader_icd_term *icd_term = inst->icd_terms; icd_term; icd_term = icd_term->next) {
1333             for (struct loader_device *dev = icd_term->logical_device_list; dev; dev = dev->next) {
1334                 // Value comparison of device prevents object wrapping by layers
1335                 if (loader_get_dispatch(dev->icd_device) == dispatch_table_device ||
1336                     (dev->chain_device != VK_NULL_HANDLE && loader_get_dispatch(dev->chain_device) == dispatch_table_device)) {
1337                     *found_dev = dev;
1338                     loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock);
1339                     return icd_term;
1340                 }
1341             }
1342         }
1343     }
1344     loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock);
1345     return NULL;
1346 }
1347 
loader_destroy_logical_device(struct loader_device * dev,const VkAllocationCallbacks * pAllocator)1348 void loader_destroy_logical_device(struct loader_device *dev, const VkAllocationCallbacks *pAllocator) {
1349     if (pAllocator) {
1350         dev->alloc_callbacks = *pAllocator;
1351     }
1352     loader_device_heap_free(dev, dev);
1353 }
1354 
loader_create_logical_device(const struct loader_instance * inst,const VkAllocationCallbacks * pAllocator)1355 struct loader_device *loader_create_logical_device(const struct loader_instance *inst, const VkAllocationCallbacks *pAllocator) {
1356     struct loader_device *new_dev;
1357     new_dev = loader_calloc(pAllocator, sizeof(struct loader_device), VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1358 
1359     if (!new_dev) {
1360         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_create_logical_device: Failed to alloc struct loader_device");
1361         return NULL;
1362     }
1363 
1364     new_dev->loader_dispatch.core_dispatch.magic = DEVICE_DISP_TABLE_MAGIC_NUMBER;
1365 
1366     if (pAllocator) {
1367         new_dev->alloc_callbacks = *pAllocator;
1368     }
1369 
1370     return new_dev;
1371 }
1372 
loader_add_logical_device(struct loader_icd_term * icd_term,struct loader_device * dev)1373 void loader_add_logical_device(struct loader_icd_term *icd_term, struct loader_device *dev) {
1374     dev->next = icd_term->logical_device_list;
1375     icd_term->logical_device_list = dev;
1376 }
1377 
loader_remove_logical_device(struct loader_icd_term * icd_term,struct loader_device * found_dev,const VkAllocationCallbacks * pAllocator)1378 void loader_remove_logical_device(struct loader_icd_term *icd_term, struct loader_device *found_dev,
1379                                   const VkAllocationCallbacks *pAllocator) {
1380     struct loader_device *dev, *prev_dev;
1381 
1382     if (!icd_term || !found_dev) return;
1383 
1384     prev_dev = NULL;
1385     dev = icd_term->logical_device_list;
1386     while (dev && dev != found_dev) {
1387         prev_dev = dev;
1388         dev = dev->next;
1389     }
1390 
1391     if (prev_dev)
1392         prev_dev->next = found_dev->next;
1393     else
1394         icd_term->logical_device_list = found_dev->next;
1395     loader_destroy_logical_device(found_dev, pAllocator);
1396 }
1397 
ignore_null_callback(const VkAllocationCallbacks * callbacks)1398 const VkAllocationCallbacks *ignore_null_callback(const VkAllocationCallbacks *callbacks) {
1399     return NULL != callbacks->pfnAllocation && NULL != callbacks->pfnFree && NULL != callbacks->pfnReallocation &&
1400                    NULL != callbacks->pfnInternalAllocation && NULL != callbacks->pfnInternalFree
1401                ? callbacks
1402                : NULL;
1403 }
1404 
1405 // Try to close any open objects on the loader_icd_term - this must be done before destroying the instance
loader_icd_close_objects(struct loader_instance * ptr_inst,struct loader_icd_term * icd_term)1406 void loader_icd_close_objects(struct loader_instance *ptr_inst, struct loader_icd_term *icd_term) {
1407     for (uint32_t i = 0; i < icd_term->surface_list.capacity / sizeof(VkSurfaceKHR); i++) {
1408         if (ptr_inst->surfaces_list.capacity > i * sizeof(struct loader_used_object_status) &&
1409             ptr_inst->surfaces_list.list[i].status == VK_TRUE && NULL != icd_term->surface_list.list &&
1410             icd_term->surface_list.list[i] && NULL != icd_term->dispatch.DestroySurfaceKHR) {
1411             icd_term->dispatch.DestroySurfaceKHR(icd_term->instance, icd_term->surface_list.list[i],
1412                                                  ignore_null_callback(&(ptr_inst->surfaces_list.list[i].allocation_callbacks)));
1413             icd_term->surface_list.list[i] = (VkSurfaceKHR)(uintptr_t)NULL;
1414         }
1415     }
1416     for (uint32_t i = 0; i < icd_term->debug_utils_messenger_list.capacity / sizeof(VkDebugUtilsMessengerEXT); i++) {
1417         if (ptr_inst->debug_utils_messengers_list.capacity > i * sizeof(struct loader_used_object_status) &&
1418             ptr_inst->debug_utils_messengers_list.list[i].status == VK_TRUE && NULL != icd_term->debug_utils_messenger_list.list &&
1419             icd_term->debug_utils_messenger_list.list[i] && NULL != icd_term->dispatch.DestroyDebugUtilsMessengerEXT) {
1420             icd_term->dispatch.DestroyDebugUtilsMessengerEXT(
1421                 icd_term->instance, icd_term->debug_utils_messenger_list.list[i],
1422                 ignore_null_callback(&(ptr_inst->debug_utils_messengers_list.list[i].allocation_callbacks)));
1423             icd_term->debug_utils_messenger_list.list[i] = (VkDebugUtilsMessengerEXT)(uintptr_t)NULL;
1424         }
1425     }
1426     for (uint32_t i = 0; i < icd_term->debug_report_callback_list.capacity / sizeof(VkDebugReportCallbackEXT); i++) {
1427         if (ptr_inst->debug_report_callbacks_list.capacity > i * sizeof(struct loader_used_object_status) &&
1428             ptr_inst->debug_report_callbacks_list.list[i].status == VK_TRUE && NULL != icd_term->debug_report_callback_list.list &&
1429             icd_term->debug_report_callback_list.list[i] && NULL != icd_term->dispatch.DestroyDebugReportCallbackEXT) {
1430             icd_term->dispatch.DestroyDebugReportCallbackEXT(
1431                 icd_term->instance, icd_term->debug_report_callback_list.list[i],
1432                 ignore_null_callback(&(ptr_inst->debug_report_callbacks_list.list[i].allocation_callbacks)));
1433             icd_term->debug_report_callback_list.list[i] = (VkDebugReportCallbackEXT)(uintptr_t)NULL;
1434         }
1435     }
1436 }
1437 // Free resources allocated inside the loader_icd_term
loader_icd_destroy(struct loader_instance * ptr_inst,struct loader_icd_term * icd_term,const VkAllocationCallbacks * pAllocator)1438 void loader_icd_destroy(struct loader_instance *ptr_inst, struct loader_icd_term *icd_term,
1439                         const VkAllocationCallbacks *pAllocator) {
1440     ptr_inst->icd_terms_count--;
1441     for (struct loader_device *dev = icd_term->logical_device_list; dev;) {
1442         struct loader_device *next_dev = dev->next;
1443         loader_destroy_logical_device(dev, pAllocator);
1444         dev = next_dev;
1445     }
1446 
1447     loader_destroy_generic_list(ptr_inst, (struct loader_generic_list *)&icd_term->surface_list);
1448     loader_destroy_generic_list(ptr_inst, (struct loader_generic_list *)&icd_term->debug_utils_messenger_list);
1449     loader_destroy_generic_list(ptr_inst, (struct loader_generic_list *)&icd_term->debug_report_callback_list);
1450 
1451     loader_instance_heap_free(ptr_inst, icd_term);
1452 }
1453 
loader_icd_add(struct loader_instance * ptr_inst,const struct loader_scanned_icd * scanned_icd)1454 struct loader_icd_term *loader_icd_add(struct loader_instance *ptr_inst, const struct loader_scanned_icd *scanned_icd) {
1455     struct loader_icd_term *icd_term;
1456 
1457     icd_term = loader_instance_heap_calloc(ptr_inst, sizeof(struct loader_icd_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1458     if (!icd_term) {
1459         return NULL;
1460     }
1461 
1462     icd_term->scanned_icd = scanned_icd;
1463     icd_term->this_instance = ptr_inst;
1464 
1465     // Prepend to the list
1466     icd_term->next = ptr_inst->icd_terms;
1467     ptr_inst->icd_terms = icd_term;
1468     ptr_inst->icd_terms_count++;
1469 
1470     return icd_term;
1471 }
1472 // Closes the library handle in the scanned ICD, free the lib_name string, and zeros out all data
loader_unload_scanned_icd(struct loader_instance * inst,struct loader_scanned_icd * scanned_icd)1473 void loader_unload_scanned_icd(struct loader_instance *inst, struct loader_scanned_icd *scanned_icd) {
1474     if (NULL == scanned_icd) {
1475         return;
1476     }
1477     if (scanned_icd->handle) {
1478         loader_platform_close_library(scanned_icd->handle);
1479         scanned_icd->handle = NULL;
1480     }
1481     loader_instance_heap_free(inst, scanned_icd->lib_name);
1482     memset(scanned_icd, 0, sizeof(struct loader_scanned_icd));
1483 }
1484 
1485 // Determine the ICD interface version to use.
1486 //     @param icd
1487 //     @param pVersion Output parameter indicating which version to use or 0 if
1488 //            the negotiation API is not supported by the ICD
1489 //     @return  bool indicating true if the selected interface version is supported
1490 //            by the loader, false indicates the version is not supported
loader_get_icd_interface_version(PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version,uint32_t * pVersion)1491 bool loader_get_icd_interface_version(PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version, uint32_t *pVersion) {
1492     if (fp_negotiate_icd_version == NULL) {
1493         // ICD does not support the negotiation API, it supports version 0 or 1
1494         // calling code must determine if it is version 0 or 1
1495         *pVersion = 0;
1496     } else {
1497         // ICD supports the negotiation API, so call it with the loader's
1498         // latest version supported
1499         *pVersion = CURRENT_LOADER_ICD_INTERFACE_VERSION;
1500         VkResult result = fp_negotiate_icd_version(pVersion);
1501 
1502         if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
1503             // ICD no longer supports the loader's latest interface version so
1504             // fail loading the ICD
1505             return false;
1506         }
1507     }
1508 
1509 #if MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION > 0
1510     if (*pVersion < MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION) {
1511         // Loader no longer supports the ICD's latest interface version so fail
1512         // loading the ICD
1513         return false;
1514     }
1515 #endif
1516     return true;
1517 }
1518 
loader_clear_scanned_icd_list(const struct loader_instance * inst,struct loader_icd_tramp_list * icd_tramp_list)1519 void loader_clear_scanned_icd_list(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list) {
1520     if (0 != icd_tramp_list->capacity && icd_tramp_list->scanned_list) {
1521         for (uint32_t i = 0; i < icd_tramp_list->count; i++) {
1522             if (icd_tramp_list->scanned_list[i].handle) {
1523                 loader_platform_close_library(icd_tramp_list->scanned_list[i].handle);
1524                 icd_tramp_list->scanned_list[i].handle = NULL;
1525             }
1526             loader_instance_heap_free(inst, icd_tramp_list->scanned_list[i].lib_name);
1527         }
1528         loader_instance_heap_free(inst, icd_tramp_list->scanned_list);
1529     }
1530     memset(icd_tramp_list, 0, sizeof(struct loader_icd_tramp_list));
1531 }
1532 
loader_init_scanned_icd_list(const struct loader_instance * inst,struct loader_icd_tramp_list * icd_tramp_list)1533 VkResult loader_init_scanned_icd_list(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list) {
1534     VkResult res = VK_SUCCESS;
1535     loader_clear_scanned_icd_list(inst, icd_tramp_list);
1536     icd_tramp_list->capacity = 8 * sizeof(struct loader_scanned_icd);
1537     icd_tramp_list->scanned_list = loader_instance_heap_alloc(inst, icd_tramp_list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1538     if (NULL == icd_tramp_list->scanned_list) {
1539         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1540                    "loader_init_scanned_icd_list: Realloc failed for layer list when attempting to add new layer");
1541         res = VK_ERROR_OUT_OF_HOST_MEMORY;
1542     }
1543     return res;
1544 }
1545 
loader_add_direct_driver(const struct loader_instance * inst,uint32_t index,const VkDirectDriverLoadingInfoLUNARG * pDriver,struct loader_icd_tramp_list * icd_tramp_list)1546 VkResult loader_add_direct_driver(const struct loader_instance *inst, uint32_t index,
1547                                   const VkDirectDriverLoadingInfoLUNARG *pDriver, struct loader_icd_tramp_list *icd_tramp_list) {
1548     // Assume pDriver is valid, since there is no real way to check it. Calling code should make sure the pointer to the array
1549     // of VkDirectDriverLoadingInfoLUNARG structures is non-null.
1550     if (NULL == pDriver->pfnGetInstanceProcAddr) {
1551         loader_log(
1552             inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1553             "loader_add_direct_driver: VkDirectDriverLoadingInfoLUNARG structure at index %d contains a NULL pointer for the "
1554             "pfnGetInstanceProcAddr member, skipping.",
1555             index);
1556         return VK_ERROR_INITIALIZATION_FAILED;
1557     }
1558 
1559     PFN_vkGetInstanceProcAddr fp_get_proc_addr = pDriver->pfnGetInstanceProcAddr;
1560     PFN_vkCreateInstance fp_create_inst = NULL;
1561     PFN_vkEnumerateInstanceExtensionProperties fp_get_inst_ext_props = NULL;
1562     PFN_GetPhysicalDeviceProcAddr fp_get_phys_dev_proc_addr = NULL;
1563     PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version = NULL;
1564 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1565     PFN_vk_icdEnumerateAdapterPhysicalDevices fp_enum_dxgi_adapter_phys_devs = NULL;
1566 #endif
1567     struct loader_scanned_icd *new_scanned_icd;
1568     uint32_t interface_version = 0;
1569 
1570     // Try to get the negotiate ICD interface version function
1571     fp_negotiate_icd_version = (PFN_vk_icdNegotiateLoaderICDInterfaceVersion)pDriver->pfnGetInstanceProcAddr(
1572         NULL, "vk_icdNegotiateLoaderICDInterfaceVersion");
1573 
1574     if (NULL == fp_negotiate_icd_version) {
1575         loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1576                    "loader_add_direct_driver: Could not get 'vk_icdNegotiateLoaderICDInterfaceVersion' from "
1577                    "VkDirectDriverLoadingInfoLUNARG structure at "
1578                    "index %d, skipping.",
1579                    index);
1580         return VK_ERROR_INITIALIZATION_FAILED;
1581     }
1582 
1583     if (!loader_get_icd_interface_version(fp_negotiate_icd_version, &interface_version)) {
1584         loader_log(
1585             inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1586             "loader_add_direct_driver: VkDirectDriverLoadingInfoLUNARG structure at index %d supports interface version %d, "
1587             "which is incompatible with the Loader Driver Interface version that supports the VK_LUNARG_direct_driver_loading "
1588             "extension, skipping.",
1589             index, interface_version);
1590         return VK_ERROR_INITIALIZATION_FAILED;
1591     }
1592 
1593     if (interface_version < 7) {
1594         loader_log(
1595             inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1596             "loader_add_direct_driver: VkDirectDriverLoadingInfoLUNARG structure at index %d supports interface version %d, "
1597             "which is incompatible with the Loader Driver Interface version that supports the VK_LUNARG_direct_driver_loading "
1598             "extension, skipping.",
1599             index, interface_version);
1600         return VK_ERROR_INITIALIZATION_FAILED;
1601     }
1602 
1603     fp_create_inst = (PFN_vkCreateInstance)pDriver->pfnGetInstanceProcAddr(NULL, "vkCreateInstance");
1604     if (NULL == fp_create_inst) {
1605         loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1606                    "loader_add_direct_driver: Could not get 'vkCreateInstance' from VkDirectDriverLoadingInfoLUNARG structure at "
1607                    "index %d, skipping.",
1608                    index);
1609         return VK_ERROR_INITIALIZATION_FAILED;
1610     }
1611     fp_get_inst_ext_props =
1612         (PFN_vkEnumerateInstanceExtensionProperties)pDriver->pfnGetInstanceProcAddr(NULL, "vkEnumerateInstanceExtensionProperties");
1613     if (NULL == fp_get_inst_ext_props) {
1614         loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1615                    "loader_add_direct_driver: Could not get 'vkEnumerateInstanceExtensionProperties' from "
1616                    "VkDirectDriverLoadingInfoLUNARG structure at index %d, skipping.",
1617                    index);
1618         return VK_ERROR_INITIALIZATION_FAILED;
1619     }
1620 
1621     fp_get_phys_dev_proc_addr =
1622         (PFN_vk_icdGetPhysicalDeviceProcAddr)pDriver->pfnGetInstanceProcAddr(NULL, "vk_icdGetPhysicalDeviceProcAddr");
1623 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1624     // Query "vk_icdEnumerateAdapterPhysicalDevices" with vk_icdGetInstanceProcAddr if the library reports interface version
1625     // 7 or greater, otherwise fallback to loading it from the platform dynamic linker
1626     fp_enum_dxgi_adapter_phys_devs =
1627         (PFN_vk_icdEnumerateAdapterPhysicalDevices)pDriver->pfnGetInstanceProcAddr(NULL, "vk_icdEnumerateAdapterPhysicalDevices");
1628 #endif
1629 
1630     // check for enough capacity
1631     if ((icd_tramp_list->count * sizeof(struct loader_scanned_icd)) >= icd_tramp_list->capacity) {
1632         void *new_ptr = loader_instance_heap_realloc(inst, icd_tramp_list->scanned_list, icd_tramp_list->capacity,
1633                                                      icd_tramp_list->capacity * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1634         if (NULL == new_ptr) {
1635             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1636                        "loader_add_direct_driver: Realloc failed on icd library list for ICD index %u", index);
1637             return VK_ERROR_OUT_OF_HOST_MEMORY;
1638         }
1639         icd_tramp_list->scanned_list = new_ptr;
1640 
1641         // double capacity
1642         icd_tramp_list->capacity *= 2;
1643     }
1644 
1645     // Driver must be 1.1 to support version 7
1646     uint32_t api_version = VK_API_VERSION_1_1;
1647     PFN_vkEnumerateInstanceVersion icd_enumerate_instance_version =
1648         (PFN_vkEnumerateInstanceVersion)pDriver->pfnGetInstanceProcAddr(NULL, "vkEnumerateInstanceVersion");
1649 
1650     if (icd_enumerate_instance_version) {
1651         VkResult res = icd_enumerate_instance_version(&api_version);
1652         if (res != VK_SUCCESS) {
1653             return res;
1654         }
1655     }
1656 
1657     new_scanned_icd = &(icd_tramp_list->scanned_list[icd_tramp_list->count]);
1658     new_scanned_icd->handle = NULL;
1659     new_scanned_icd->api_version = api_version;
1660     new_scanned_icd->GetInstanceProcAddr = fp_get_proc_addr;
1661     new_scanned_icd->GetPhysicalDeviceProcAddr = fp_get_phys_dev_proc_addr;
1662     new_scanned_icd->EnumerateInstanceExtensionProperties = fp_get_inst_ext_props;
1663     new_scanned_icd->CreateInstance = fp_create_inst;
1664 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1665     new_scanned_icd->EnumerateAdapterPhysicalDevices = fp_enum_dxgi_adapter_phys_devs;
1666 #endif
1667     new_scanned_icd->interface_version = interface_version;
1668 
1669     new_scanned_icd->lib_name = NULL;
1670     icd_tramp_list->count++;
1671 
1672     loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1673                "loader_add_direct_driver: Adding driver found in index %d of "
1674                "VkDirectDriverLoadingListLUNARG::pDrivers structure. pfnGetInstanceProcAddr was set to %p",
1675                index, pDriver->pfnGetInstanceProcAddr);
1676 
1677     return VK_SUCCESS;
1678 }
1679 
1680 // Search through VkInstanceCreateInfo's pNext chain for any drivers from the direct driver loading extension and load them.
loader_scan_for_direct_drivers(const struct loader_instance * inst,const VkInstanceCreateInfo * pCreateInfo,struct loader_icd_tramp_list * icd_tramp_list,bool * direct_driver_loading_exclusive_mode)1681 VkResult loader_scan_for_direct_drivers(const struct loader_instance *inst, const VkInstanceCreateInfo *pCreateInfo,
1682                                         struct loader_icd_tramp_list *icd_tramp_list, bool *direct_driver_loading_exclusive_mode) {
1683     if (NULL == pCreateInfo) {
1684         // Don't do this logic unless we are being called from vkCreateInstance, when pCreateInfo will be non-null
1685         return VK_SUCCESS;
1686     }
1687     bool direct_driver_loading_enabled = false;
1688     // Try to if VK_LUNARG_direct_driver_loading is enabled and if we are using it exclusively
1689     // Skip this step if inst is NULL, aka when this function is being called before instance creation
1690     if (inst != NULL && pCreateInfo->ppEnabledExtensionNames && pCreateInfo->enabledExtensionCount > 0) {
1691         // Look through the enabled extension list, make sure VK_LUNARG_direct_driver_loading is present
1692         for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1693             if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_LUNARG_DIRECT_DRIVER_LOADING_EXTENSION_NAME) == 0) {
1694                 direct_driver_loading_enabled = true;
1695                 break;
1696             }
1697         }
1698     }
1699     const VkDirectDriverLoadingListLUNARG *ddl_list = NULL;
1700     // Find the VkDirectDriverLoadingListLUNARG struct in the pNext chain of vkInstanceCreateInfo
1701     const void *pNext = pCreateInfo->pNext;
1702     while (pNext) {
1703         VkBaseInStructure out_structure = {0};
1704         memcpy(&out_structure, pNext, sizeof(VkBaseInStructure));
1705         if (out_structure.sType == VK_STRUCTURE_TYPE_DIRECT_DRIVER_LOADING_LIST_LUNARG) {
1706             ddl_list = (VkDirectDriverLoadingListLUNARG *)pNext;
1707             break;
1708         }
1709         pNext = out_structure.pNext;
1710     }
1711     if (NULL == ddl_list) {
1712         if (direct_driver_loading_enabled) {
1713             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1714                        "loader_scan_for_direct_drivers: The VK_LUNARG_direct_driver_loading extension was enabled but the "
1715                        "pNext chain of "
1716                        "VkInstanceCreateInfo did not contain the "
1717                        "VkDirectDriverLoadingListLUNARG structure.");
1718         }
1719         // Always want to exit early if there was no VkDirectDriverLoadingListLUNARG in the pNext chain
1720         return VK_SUCCESS;
1721     }
1722 
1723     if (!direct_driver_loading_enabled) {
1724         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1725                    "loader_scan_for_direct_drivers: The pNext chain of VkInstanceCreateInfo contained the "
1726                    "VkDirectDriverLoadingListLUNARG structure, but the VK_LUNARG_direct_driver_loading extension was "
1727                    "not enabled.");
1728         return VK_SUCCESS;
1729     }
1730     // If we are using exclusive mode, skip looking for any more drivers from system or environment variables
1731     if (ddl_list->mode == VK_DIRECT_DRIVER_LOADING_MODE_EXCLUSIVE_LUNARG) {
1732         *direct_driver_loading_exclusive_mode = true;
1733         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1734                    "loader_scan_for_direct_drivers: The VK_LUNARG_direct_driver_loading extension is active and specified "
1735                    "VK_DIRECT_DRIVER_LOADING_MODE_EXCLUSIVE_LUNARG, skipping system and environment "
1736                    "variable driver search mechanisms.");
1737     }
1738     if (NULL == ddl_list->pDrivers) {
1739         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1740                    "loader_scan_for_direct_drivers: The VkDirectDriverLoadingListLUNARG structure in the pNext chain of "
1741                    "VkInstanceCreateInfo has a NULL pDrivers member.");
1742         return VK_SUCCESS;
1743     }
1744     if (ddl_list->driverCount == 0) {
1745         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1746                    "loader_scan_for_direct_drivers: The VkDirectDriverLoadingListLUNARG structure in the pNext chain of "
1747                    "VkInstanceCreateInfo has a non-null pDrivers member but a driverCount member with a value "
1748                    "of zero.");
1749         return VK_SUCCESS;
1750     }
1751     // Go through all VkDirectDriverLoadingInfoLUNARG entries and add each driver
1752     // Because icd_tramp's are prepended, this will result in the drivers appearing at the end
1753     for (uint32_t i = 0; i < ddl_list->driverCount; i++) {
1754         VkResult res = loader_add_direct_driver(inst, i, &ddl_list->pDrivers[i], icd_tramp_list);
1755         if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
1756             return res;
1757         }
1758     }
1759 
1760     return VK_SUCCESS;
1761 }
1762 
loader_scanned_icd_add(const struct loader_instance * inst,struct loader_icd_tramp_list * icd_tramp_list,const char * filename,uint32_t api_version,enum loader_layer_library_status * lib_status)1763 VkResult loader_scanned_icd_add(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list,
1764                                 const char *filename, uint32_t api_version, enum loader_layer_library_status *lib_status) {
1765     loader_platform_dl_handle handle = NULL;
1766     PFN_vkCreateInstance fp_create_inst = NULL;
1767     PFN_vkEnumerateInstanceExtensionProperties fp_get_inst_ext_props = NULL;
1768     PFN_vkGetInstanceProcAddr fp_get_proc_addr = NULL;
1769     PFN_GetPhysicalDeviceProcAddr fp_get_phys_dev_proc_addr = NULL;
1770     PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version = NULL;
1771 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1772     PFN_vk_icdEnumerateAdapterPhysicalDevices fp_enum_dxgi_adapter_phys_devs = NULL;
1773 #endif
1774     struct loader_scanned_icd *new_scanned_icd = NULL;
1775     uint32_t interface_vers;
1776     VkResult res = VK_SUCCESS;
1777 
1778     // This shouldn't happen, but the check is necessary because dlopen returns a handle to the main program when
1779     // filename is NULL
1780     if (filename == NULL) {
1781         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_scanned_icd_add: A NULL filename was used, skipping this ICD");
1782         res = VK_ERROR_INCOMPATIBLE_DRIVER;
1783         goto out;
1784     }
1785 
1786 // TODO implement smarter opening/closing of libraries. For now this
1787 // function leaves libraries open and the scanned_icd_clear closes them
1788 #if defined(__Fuchsia__)
1789     handle = loader_platform_open_driver(filename);
1790 #else
1791     handle = loader_platform_open_library(filename);
1792 #endif
1793     if (NULL == handle) {
1794         loader_handle_load_library_error(inst, filename, lib_status);
1795         if (lib_status && *lib_status == LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY) {
1796             res = VK_ERROR_OUT_OF_HOST_MEMORY;
1797         } else {
1798             res = VK_ERROR_INCOMPATIBLE_DRIVER;
1799         }
1800         goto out;
1801     }
1802 
1803     // Try to load the driver's exported vk_icdNegotiateLoaderICDInterfaceVersion
1804     fp_negotiate_icd_version = loader_platform_get_proc_address(handle, "vk_icdNegotiateLoaderICDInterfaceVersion");
1805 
1806     // If it isn't exported, we are dealing with either a v0, v1, or a v7 and up driver
1807     if (NULL == fp_negotiate_icd_version) {
1808         // Try to load the driver's exported vk_icdGetInstanceProcAddr - if this is a v7 or up driver, we can use it to get
1809         // the driver's vk_icdNegotiateLoaderICDInterfaceVersion function
1810         fp_get_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetInstanceProcAddr");
1811 
1812         // If we successfully loaded vk_icdGetInstanceProcAddr, try to get vk_icdNegotiateLoaderICDInterfaceVersion
1813         if (fp_get_proc_addr) {
1814             fp_negotiate_icd_version =
1815                 (PFN_vk_icdNegotiateLoaderICDInterfaceVersion)fp_get_proc_addr(NULL, "vk_icdNegotiateLoaderICDInterfaceVersion");
1816         }
1817     }
1818 
1819     // Try to negotiate the Loader and Driver Interface Versions
1820     // loader_get_icd_interface_version will check if fp_negotiate_icd_version is NULL, so we don't have to.
1821     // If it *is* NULL, that means this driver uses interface version 0 or 1
1822     if (!loader_get_icd_interface_version(fp_negotiate_icd_version, &interface_vers)) {
1823         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1824                    "loader_scanned_icd_add: ICD %s doesn't support interface version compatible with loader, skip this ICD.",
1825                    filename);
1826         goto out;
1827     }
1828 
1829     // If we didn't already query vk_icdGetInstanceProcAddr, try now
1830     if (NULL == fp_get_proc_addr) {
1831         fp_get_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetInstanceProcAddr");
1832     }
1833 
1834     // If vk_icdGetInstanceProcAddr is NULL, this ICD is using version 0 and so we should respond accordingly.
1835     if (NULL == fp_get_proc_addr) {
1836         // Exporting vk_icdNegotiateLoaderICDInterfaceVersion but not vk_icdGetInstanceProcAddr violates Version 2's
1837         // requirements, as for Version 2 to be supported Version 1 must also be supported
1838         if (interface_vers != 0) {
1839             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1840                        "loader_scanned_icd_add: ICD %s reports an interface version of %d but doesn't export "
1841                        "vk_icdGetInstanceProcAddr, skip this ICD.",
1842                        filename, interface_vers);
1843             goto out;
1844         }
1845         // Use deprecated interface from version 0
1846         fp_get_proc_addr = loader_platform_get_proc_address(handle, "vkGetInstanceProcAddr");
1847         if (NULL == fp_get_proc_addr) {
1848             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1849                        "loader_scanned_icd_add: Attempt to retrieve either \'vkGetInstanceProcAddr\' or "
1850                        "\'vk_icdGetInstanceProcAddr\' from ICD %s failed.",
1851                        filename);
1852             goto out;
1853         } else {
1854             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
1855                        "loader_scanned_icd_add: Using deprecated ICD interface of \'vkGetInstanceProcAddr\' instead of "
1856                        "\'vk_icdGetInstanceProcAddr\' for ICD %s",
1857                        filename);
1858         }
1859         fp_create_inst = loader_platform_get_proc_address(handle, "vkCreateInstance");
1860         if (NULL == fp_create_inst) {
1861             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1862                        "loader_scanned_icd_add:  Failed querying \'vkCreateInstance\' via dlsym/LoadLibrary for ICD %s", filename);
1863             goto out;
1864         }
1865         fp_get_inst_ext_props = loader_platform_get_proc_address(handle, "vkEnumerateInstanceExtensionProperties");
1866         if (NULL == fp_get_inst_ext_props) {
1867             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1868                        "loader_scanned_icd_add: Could not get \'vkEnumerateInstanceExtensionProperties\' via dlsym/LoadLibrary "
1869                        "for ICD %s",
1870                        filename);
1871             goto out;
1872         }
1873     } else {
1874         // vk_icdGetInstanceProcAddr was successfully found, we can assume the version is at least one
1875         // If vk_icdNegotiateLoaderICDInterfaceVersion was also found, interface_vers must be 2 or greater, so this check is
1876         // fine
1877         if (interface_vers == 0) {
1878             interface_vers = 1;
1879         }
1880 
1881         fp_create_inst = (PFN_vkCreateInstance)fp_get_proc_addr(NULL, "vkCreateInstance");
1882         if (NULL == fp_create_inst) {
1883             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1884                        "loader_scanned_icd_add: Could not get \'vkCreateInstance\' via \'vk_icdGetInstanceProcAddr\' for ICD %s",
1885                        filename);
1886             goto out;
1887         }
1888         fp_get_inst_ext_props =
1889             (PFN_vkEnumerateInstanceExtensionProperties)fp_get_proc_addr(NULL, "vkEnumerateInstanceExtensionProperties");
1890         if (NULL == fp_get_inst_ext_props) {
1891             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1892                        "loader_scanned_icd_add: Could not get \'vkEnumerateInstanceExtensionProperties\' via "
1893                        "\'vk_icdGetInstanceProcAddr\' for ICD %s",
1894                        filename);
1895             goto out;
1896         }
1897         // Query "vk_icdGetPhysicalDeviceProcAddr" with vk_icdGetInstanceProcAddr if the library reports interface version 7 or
1898         // greater, otherwise fallback to loading it from the platform dynamic linker
1899         if (interface_vers >= 7) {
1900             fp_get_phys_dev_proc_addr =
1901                 (PFN_vk_icdGetPhysicalDeviceProcAddr)fp_get_proc_addr(NULL, "vk_icdGetPhysicalDeviceProcAddr");
1902         }
1903         if (NULL == fp_get_phys_dev_proc_addr && interface_vers >= 3) {
1904             fp_get_phys_dev_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetPhysicalDeviceProcAddr");
1905         }
1906 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1907         // Query "vk_icdEnumerateAdapterPhysicalDevices" with vk_icdGetInstanceProcAddr if the library reports interface version
1908         // 7 or greater, otherwise fallback to loading it from the platform dynamic linker
1909         if (interface_vers >= 7) {
1910             fp_enum_dxgi_adapter_phys_devs =
1911                 (PFN_vk_icdEnumerateAdapterPhysicalDevices)fp_get_proc_addr(NULL, "vk_icdEnumerateAdapterPhysicalDevices");
1912         }
1913         if (NULL == fp_enum_dxgi_adapter_phys_devs && interface_vers >= 6) {
1914             fp_enum_dxgi_adapter_phys_devs = loader_platform_get_proc_address(handle, "vk_icdEnumerateAdapterPhysicalDevices");
1915         }
1916 #endif
1917     }
1918 
1919     // check for enough capacity
1920     if ((icd_tramp_list->count * sizeof(struct loader_scanned_icd)) >= icd_tramp_list->capacity) {
1921         void *new_ptr = loader_instance_heap_realloc(inst, icd_tramp_list->scanned_list, icd_tramp_list->capacity,
1922                                                      icd_tramp_list->capacity * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1923         if (NULL == new_ptr) {
1924             res = VK_ERROR_OUT_OF_HOST_MEMORY;
1925             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_scanned_icd_add: Realloc failed on icd library list for ICD %s",
1926                        filename);
1927             goto out;
1928         }
1929         icd_tramp_list->scanned_list = new_ptr;
1930 
1931         // double capacity
1932         icd_tramp_list->capacity *= 2;
1933     }
1934 
1935     loader_api_version api_version_struct = loader_make_version(api_version);
1936     if (interface_vers <= 4 && loader_check_version_meets_required(LOADER_VERSION_1_1_0, api_version_struct)) {
1937         loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
1938                    "loader_scanned_icd_add: Driver %s supports Vulkan %u.%u, but only supports loader interface version %u."
1939                    " Interface version 5 or newer required to support this version of Vulkan (Policy #LDP_DRIVER_7)",
1940                    filename, api_version_struct.major, api_version_struct.minor, interface_vers);
1941     }
1942 
1943     new_scanned_icd = &(icd_tramp_list->scanned_list[icd_tramp_list->count]);
1944     new_scanned_icd->handle = handle;
1945     new_scanned_icd->api_version = api_version;
1946     new_scanned_icd->GetInstanceProcAddr = fp_get_proc_addr;
1947     new_scanned_icd->GetPhysicalDeviceProcAddr = fp_get_phys_dev_proc_addr;
1948     new_scanned_icd->EnumerateInstanceExtensionProperties = fp_get_inst_ext_props;
1949     new_scanned_icd->CreateInstance = fp_create_inst;
1950 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1951     new_scanned_icd->EnumerateAdapterPhysicalDevices = fp_enum_dxgi_adapter_phys_devs;
1952 #endif
1953     new_scanned_icd->interface_version = interface_vers;
1954 
1955     res = loader_copy_to_new_str(inst, filename, &new_scanned_icd->lib_name);
1956     if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
1957         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_scanned_icd_add: Out of memory can't add ICD %s", filename);
1958         goto out;
1959     }
1960     icd_tramp_list->count++;
1961 
1962 out:
1963 
1964     return res;
1965 }
1966 
1967 #if defined(_WIN32)
loader_initialize(PINIT_ONCE InitOnce,PVOID Parameter,PVOID * Context)1968 BOOL __stdcall loader_initialize(PINIT_ONCE InitOnce, PVOID Parameter, PVOID *Context) {
1969     (void)InitOnce;
1970     (void)Parameter;
1971     (void)Context;
1972 #else
1973 void loader_initialize(void) {
1974     loader_platform_thread_create_mutex(&loader_lock);
1975     loader_platform_thread_create_mutex(&loader_preload_icd_lock);
1976     loader_platform_thread_create_mutex(&loader_global_instance_list_lock);
1977     init_global_loader_settings();
1978 #endif
1979 
1980     // initialize logging
1981     loader_init_global_debug_level();
1982 #if defined(_WIN32)
1983     windows_initialization();
1984 #endif
1985 
1986     loader_api_version version = loader_make_full_version(VK_HEADER_VERSION_COMPLETE);
1987     loader_log(NULL, VULKAN_LOADER_INFO_BIT, 0, "Vulkan Loader Version %d.%d.%d", version.major, version.minor, version.patch);
1988 
1989 #if defined(GIT_BRANCH_NAME) && defined(GIT_TAG_INFO)
1990     loader_log(NULL, VULKAN_LOADER_INFO_BIT, 0, "[Vulkan Loader Git - Tag: " GIT_BRANCH_NAME ", Branch/Commit: " GIT_TAG_INFO "]");
1991 #endif
1992 
1993     char *loader_disable_dynamic_library_unloading_env_var = loader_getenv("VK_LOADER_DISABLE_DYNAMIC_LIBRARY_UNLOADING", NULL);
1994     if (loader_disable_dynamic_library_unloading_env_var &&
1995         0 == strncmp(loader_disable_dynamic_library_unloading_env_var, "1", 2)) {
1996         loader_disable_dynamic_library_unloading = true;
1997         loader_log(NULL, VULKAN_LOADER_WARN_BIT, 0, "Vulkan Loader: library unloading is disabled");
1998     } else {
1999         loader_disable_dynamic_library_unloading = false;
2000     }
2001     loader_free_getenv(loader_disable_dynamic_library_unloading_env_var, NULL);
2002 #if defined(LOADER_USE_UNSAFE_FILE_SEARCH)
2003     loader_log(NULL, VULKAN_LOADER_WARN_BIT, 0, "Vulkan Loader: unsafe searching is enabled");
2004 #endif
2005 #if defined(_WIN32)
2006     return TRUE;
2007 #endif
2008 }
2009 
2010 void loader_release(void) {
2011     // Guarantee release of the preloaded ICD libraries. This may have already been called in vkDestroyInstance.
2012     loader_unload_preloaded_icds();
2013 
2014     // release mutexes
2015     teardown_global_loader_settings();
2016     loader_platform_thread_delete_mutex(&loader_lock);
2017     loader_platform_thread_delete_mutex(&loader_preload_icd_lock);
2018     loader_platform_thread_delete_mutex(&loader_global_instance_list_lock);
2019 }
2020 
2021 // Preload the ICD libraries that are likely to be needed so we don't repeatedly load/unload them later
2022 void loader_preload_icds(void) {
2023     loader_platform_thread_lock_mutex(&loader_preload_icd_lock);
2024 
2025     // Already preloaded, skip loading again.
2026     if (preloaded_icds.scanned_list != NULL) {
2027         loader_platform_thread_unlock_mutex(&loader_preload_icd_lock);
2028         return;
2029     }
2030 
2031     VkResult result = loader_icd_scan(NULL, &preloaded_icds, NULL, NULL);
2032     if (result != VK_SUCCESS) {
2033         loader_clear_scanned_icd_list(NULL, &preloaded_icds);
2034     }
2035     loader_platform_thread_unlock_mutex(&loader_preload_icd_lock);
2036 }
2037 
2038 // Release the ICD libraries that were preloaded
2039 void loader_unload_preloaded_icds(void) {
2040     loader_platform_thread_lock_mutex(&loader_preload_icd_lock);
2041     loader_clear_scanned_icd_list(NULL, &preloaded_icds);
2042     loader_platform_thread_unlock_mutex(&loader_preload_icd_lock);
2043 }
2044 
2045 #if !defined(_WIN32)
2046 __attribute__((constructor)) void loader_init_library(void) { loader_initialize(); }
2047 
2048 __attribute__((destructor)) void loader_free_library(void) { loader_release(); }
2049 #endif
2050 
2051 // Get next file or dirname given a string list or registry key path
2052 //
2053 // \returns
2054 // A pointer to first char in the next path.
2055 // The next path (or NULL) in the list is returned in next_path.
2056 // Note: input string is modified in some cases. PASS IN A COPY!
2057 char *loader_get_next_path(char *path) {
2058     uint32_t len;
2059     char *next;
2060 
2061     if (path == NULL) return NULL;
2062     next = strchr(path, PATH_SEPARATOR);
2063     if (next == NULL) {
2064         len = (uint32_t)strlen(path);
2065         next = path + len;
2066     } else {
2067         *next = '\0';
2068         next++;
2069     }
2070 
2071     return next;
2072 }
2073 
2074 /* Processes a json manifest's library_path and the location of the json manifest to create the path of the library
2075  * The output is stored in out_fullpath by allocating a string - so its the caller's responsibility to free it
2076  * The output is the combination of the base path of manifest_file_path concatenated with library path
2077  * If library_path is an absolute path, we do not prepend the base path of manifest_file_path
2078  *
2079  * This function takes ownership of library_path - caller does not need to worry about freeing it.
2080  */
2081 VkResult combine_manifest_directory_and_library_path(const struct loader_instance *inst, char *library_path,
2082                                                      const char *manifest_file_path, char **out_fullpath) {
2083     assert(library_path && manifest_file_path && out_fullpath);
2084     if (loader_platform_is_path_absolute(library_path)) {
2085         *out_fullpath = library_path;
2086         return VK_SUCCESS;
2087     }
2088     VkResult res = VK_SUCCESS;
2089 
2090     size_t library_path_len = strlen(library_path);
2091     size_t manifest_file_path_str_len = strlen(manifest_file_path);
2092     bool library_path_contains_directory_symbol = false;
2093     for (size_t i = 0; i < library_path_len; i++) {
2094         if (library_path[i] == DIRECTORY_SYMBOL) {
2095             library_path_contains_directory_symbol = true;
2096             break;
2097         }
2098     }
2099     // Means that the library_path is neither absolute nor relative - thus we should not modify it at all
2100     if (!library_path_contains_directory_symbol) {
2101         *out_fullpath = library_path;
2102         return VK_SUCCESS;
2103     }
2104     // must include both a directory symbol and the null terminator
2105     size_t new_str_len = library_path_len + manifest_file_path_str_len + 1 + 1;
2106 
2107     *out_fullpath = loader_instance_heap_calloc(inst, new_str_len, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
2108     if (NULL == *out_fullpath) {
2109         res = VK_ERROR_OUT_OF_HOST_MEMORY;
2110         goto out;
2111     }
2112     size_t cur_loc_in_out_fullpath = 0;
2113     // look for the last occurrence of DIRECTORY_SYMBOL in manifest_file_path
2114     size_t last_directory_symbol = 0;
2115     bool found_directory_symbol = false;
2116     for (size_t i = 0; i < manifest_file_path_str_len; i++) {
2117         if (manifest_file_path[i] == DIRECTORY_SYMBOL) {
2118             last_directory_symbol = i + 1;  // we want to include the symbol
2119             found_directory_symbol = true;
2120             // dont break because we want to find the last occurrence
2121         }
2122     }
2123     // Add manifest_file_path up to the last directory symbol
2124     if (found_directory_symbol) {
2125         loader_strncpy(*out_fullpath, new_str_len, manifest_file_path, last_directory_symbol);
2126         cur_loc_in_out_fullpath += last_directory_symbol;
2127     }
2128     loader_strncpy(&(*out_fullpath)[cur_loc_in_out_fullpath], new_str_len - cur_loc_in_out_fullpath, library_path,
2129                    library_path_len);
2130     cur_loc_in_out_fullpath += library_path_len + 1;
2131     (*out_fullpath)[cur_loc_in_out_fullpath] = '\0';
2132 
2133 out:
2134     loader_instance_heap_free(inst, library_path);
2135 
2136     return res;
2137 }
2138 
2139 // Given a filename (file)  and a list of paths (in_dirs), try to find an existing
2140 // file in the paths.  If filename already is a path then no searching in the given paths.
2141 //
2142 // @return - A string in out_fullpath of either the full path or file.
2143 void loader_get_fullpath(const char *file, const char *in_dirs, size_t out_size, char *out_fullpath) {
2144     if (!loader_platform_is_path(file) && *in_dirs) {
2145         size_t dirs_copy_len = strlen(in_dirs) + 1;
2146         char *dirs_copy = loader_stack_alloc(dirs_copy_len);
2147         loader_strncpy(dirs_copy, dirs_copy_len, in_dirs, dirs_copy_len);
2148 
2149         // find if file exists after prepending paths in given list
2150         // for (dir = dirs_copy; *dir && (next_dir = loader_get_next_path(dir)); dir = next_dir) {
2151         char *dir = dirs_copy;
2152         char *next_dir = loader_get_next_path(dir);
2153         while (*dir && next_dir) {
2154             int path_concat_ret = snprintf(out_fullpath, out_size, "%s%c%s", dir, DIRECTORY_SYMBOL, file);
2155             if (path_concat_ret < 0) {
2156                 continue;
2157             }
2158             if (loader_platform_file_exists(out_fullpath)) {
2159                 return;
2160             }
2161             dir = next_dir;
2162             next_dir = loader_get_next_path(dir);
2163         }
2164     }
2165 
2166     (void)snprintf(out_fullpath, out_size, "%s", file);
2167 }
2168 
2169 // Verify that all component layers in a meta-layer are valid.
2170 // This function is potentially recursive so we pass in an array of "already checked" (length of the instance_layers->count) meta
2171 // layers, preventing a stack overflow verifying  meta layers that are each other's component layers
2172 bool verify_meta_layer_component_layers(const struct loader_instance *inst, size_t prop_index,
2173                                         struct loader_layer_list *instance_layers, bool *already_checked_meta_layers) {
2174     struct loader_layer_properties *prop = &instance_layers->list[prop_index];
2175     loader_api_version meta_layer_version = loader_make_version(prop->info.specVersion);
2176 
2177     if (NULL == already_checked_meta_layers) {
2178         already_checked_meta_layers = loader_stack_alloc(sizeof(bool) * instance_layers->count);
2179         if (already_checked_meta_layers == NULL) {
2180             return false;
2181         }
2182         memset(already_checked_meta_layers, 0, sizeof(bool) * instance_layers->count);
2183     }
2184 
2185     // Mark this meta layer as 'already checked', indicating which layers have already been recursed.
2186     already_checked_meta_layers[prop_index] = true;
2187 
2188     for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) {
2189         struct loader_layer_properties *comp_prop =
2190             loader_find_layer_property(prop->component_layer_names.list[comp_layer], instance_layers);
2191         if (comp_prop == NULL) {
2192             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2193                        "verify_meta_layer_component_layers: Meta-layer %s can't find component layer %s at index %d."
2194                        "  Skipping this layer.",
2195                        prop->info.layerName, prop->component_layer_names.list[comp_layer], comp_layer);
2196 
2197             return false;
2198         }
2199 
2200         // Check the version of each layer, they need to be at least MAJOR and MINOR
2201         loader_api_version comp_prop_version = loader_make_version(comp_prop->info.specVersion);
2202         if (!loader_check_version_meets_required(meta_layer_version, comp_prop_version)) {
2203             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2204                        "verify_meta_layer_component_layers: Meta-layer uses API version %d.%d, but component "
2205                        "layer %d has API version %d.%d that is lower.  Skipping this layer.",
2206                        meta_layer_version.major, meta_layer_version.minor, comp_layer, comp_prop_version.major,
2207                        comp_prop_version.minor);
2208 
2209             return false;
2210         }
2211 
2212         // Make sure the layer isn't using it's own name
2213         if (!strcmp(prop->info.layerName, prop->component_layer_names.list[comp_layer])) {
2214             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2215                        "verify_meta_layer_component_layers: Meta-layer %s lists itself in its component layer "
2216                        "list at index %d.  Skipping this layer.",
2217                        prop->info.layerName, comp_layer);
2218 
2219             return false;
2220         }
2221         if (comp_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
2222             size_t comp_prop_index = INT32_MAX;
2223             // Make sure we haven't verified this meta layer before
2224             for (uint32_t i = 0; i < instance_layers->count; i++) {
2225                 if (strcmp(comp_prop->info.layerName, instance_layers->list[i].info.layerName) == 0) {
2226                     comp_prop_index = i;
2227                 }
2228             }
2229             if (comp_prop_index != INT32_MAX && already_checked_meta_layers[comp_prop_index]) {
2230                 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2231                            "verify_meta_layer_component_layers: Recursive depedency between Meta-layer %s and  Meta-layer %s.  "
2232                            "Skipping this layer.",
2233                            instance_layers->list[prop_index].info.layerName, comp_prop->info.layerName);
2234                 return false;
2235             }
2236 
2237             loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
2238                        "verify_meta_layer_component_layers: Adding meta-layer %s which also contains meta-layer %s",
2239                        prop->info.layerName, comp_prop->info.layerName);
2240 
2241             // Make sure if the layer is using a meta-layer in its component list that we also verify that.
2242             if (!verify_meta_layer_component_layers(inst, comp_prop_index, instance_layers, already_checked_meta_layers)) {
2243                 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2244                            "Meta-layer %s component layer %s can not find all component layers."
2245                            "  Skipping this layer.",
2246                            prop->info.layerName, prop->component_layer_names.list[comp_layer]);
2247                 return false;
2248             }
2249         }
2250     }
2251     // Didn't exit early so that means it passed all checks
2252     loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2253                "Meta-layer \"%s\" all %d component layers appear to be valid.", prop->info.layerName,
2254                prop->component_layer_names.count);
2255 
2256     // If layer logging is on, list the internals included in the meta-layer
2257     for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) {
2258         loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "  [%d] %s", comp_layer, prop->component_layer_names.list[comp_layer]);
2259     }
2260     return true;
2261 }
2262 
2263 // Add any instance and device extensions from component layers to this layer
2264 // list, so that anyone querying extensions will only need to look at the meta-layer
2265 bool update_meta_layer_extensions_from_component_layers(const struct loader_instance *inst, struct loader_layer_properties *prop,
2266                                                         struct loader_layer_list *instance_layers) {
2267     VkResult res = VK_SUCCESS;
2268     for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) {
2269         struct loader_layer_properties *comp_prop =
2270             loader_find_layer_property(prop->component_layer_names.list[comp_layer], instance_layers);
2271 
2272         if (NULL != comp_prop->instance_extension_list.list) {
2273             for (uint32_t ext = 0; ext < comp_prop->instance_extension_list.count; ext++) {
2274                 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "Meta-layer %s component layer %s adding instance extension %s",
2275                            prop->info.layerName, prop->component_layer_names.list[comp_layer],
2276                            comp_prop->instance_extension_list.list[ext].extensionName);
2277 
2278                 if (!has_vk_extension_property(&comp_prop->instance_extension_list.list[ext], &prop->instance_extension_list)) {
2279                     res = loader_add_to_ext_list(inst, &prop->instance_extension_list, 1,
2280                                                  &comp_prop->instance_extension_list.list[ext]);
2281                     if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
2282                         return res;
2283                     }
2284                 }
2285             }
2286         }
2287         if (NULL != comp_prop->device_extension_list.list) {
2288             for (uint32_t ext = 0; ext < comp_prop->device_extension_list.count; ext++) {
2289                 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "Meta-layer %s component layer %s adding device extension %s",
2290                            prop->info.layerName, prop->component_layer_names.list[comp_layer],
2291                            comp_prop->device_extension_list.list[ext].props.extensionName);
2292 
2293                 if (!has_vk_dev_ext_property(&comp_prop->device_extension_list.list[ext].props, &prop->device_extension_list)) {
2294                     loader_add_to_dev_ext_list(inst, &prop->device_extension_list,
2295                                                &comp_prop->device_extension_list.list[ext].props, NULL);
2296                     if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
2297                         return res;
2298                     }
2299                 }
2300             }
2301         }
2302     }
2303     return res;
2304 }
2305 
2306 // Verify that all meta-layers in a layer list are valid.
2307 VkResult verify_all_meta_layers(struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
2308                                 struct loader_layer_list *instance_layers, bool *override_layer_present) {
2309     VkResult res = VK_SUCCESS;
2310     *override_layer_present = false;
2311     for (int32_t i = 0; i < (int32_t)instance_layers->count; i++) {
2312         struct loader_layer_properties *prop = &instance_layers->list[i];
2313 
2314         // If this is a meta-layer, make sure it is valid
2315         if (prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
2316             if (verify_meta_layer_component_layers(inst, i, instance_layers, NULL)) {
2317                 // If any meta layer is valid, update its extension list to include the extensions from its component layers.
2318                 res = update_meta_layer_extensions_from_component_layers(inst, prop, instance_layers);
2319                 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
2320                     return res;
2321                 }
2322                 if (prop->is_override && loader_implicit_layer_is_enabled(inst, filters, prop)) {
2323                     *override_layer_present = true;
2324                 }
2325             } else {
2326                 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0,
2327                            "Removing meta-layer %s from instance layer list since it appears invalid.", prop->info.layerName);
2328 
2329                 loader_remove_layer_in_list(inst, instance_layers, i);
2330                 i--;
2331             }
2332         }
2333     }
2334     return res;
2335 }
2336 
2337 // If the current working directory matches any app_key_path of the layers, remove all other override layers.
2338 // Otherwise if no matching app_key was found, remove all but the global override layer, which has no app_key_path.
2339 void remove_all_non_valid_override_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers) {
2340     if (instance_layers == NULL) {
2341         return;
2342     }
2343 
2344     char cur_path[1024];
2345     char *ret = loader_platform_executable_path(cur_path, 1024);
2346     if (NULL == ret) {
2347         return;
2348     }
2349     // Find out if there is an override layer with same the app_key_path as the path to the current executable.
2350     // If more than one is found, remove it and use the first layer
2351     // Remove any layers which aren't global and do not have the same app_key_path as the path to the current executable.
2352     bool found_active_override_layer = false;
2353     int global_layer_index = -1;
2354     for (uint32_t i = 0; i < instance_layers->count; i++) {
2355         struct loader_layer_properties *props = &instance_layers->list[i];
2356         if (strcmp(props->info.layerName, VK_OVERRIDE_LAYER_NAME) == 0) {
2357             if (props->app_key_paths.count > 0) {  // not the global layer
2358                 for (uint32_t j = 0; j < props->app_key_paths.count; j++) {
2359                     if (strcmp(props->app_key_paths.list[j], cur_path) == 0) {
2360                         if (!found_active_override_layer) {
2361                             found_active_override_layer = true;
2362                         } else {
2363                             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2364                                        "remove_all_non_valid_override_layers: Multiple override layers where the same path in "
2365                                        "app_keys "
2366                                        "was found. Using the first layer found");
2367 
2368                             // Remove duplicate active override layers that have the same app_key_path
2369                             loader_remove_layer_in_list(inst, instance_layers, i);
2370                             i--;
2371                         }
2372                     }
2373                 }
2374                 if (!found_active_override_layer) {
2375                     loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2376                                "--Override layer found but not used because app \'%s\' is not in \'app_keys\' list!", cur_path);
2377 
2378                     // Remove non-global override layers that don't have an app_key that matches cur_path
2379                     loader_remove_layer_in_list(inst, instance_layers, i);
2380                     i--;
2381                 }
2382             } else {
2383                 if (global_layer_index == -1) {
2384                     global_layer_index = i;
2385                 } else {
2386                     loader_log(
2387                         inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2388                         "remove_all_non_valid_override_layers: Multiple global override layers found. Using the first global "
2389                         "layer found");
2390                     loader_remove_layer_in_list(inst, instance_layers, i);
2391                     i--;
2392                 }
2393             }
2394         }
2395     }
2396     // Remove global layer if layer with same the app_key_path as the path to the current executable is found
2397     if (found_active_override_layer && global_layer_index >= 0) {
2398         loader_remove_layer_in_list(inst, instance_layers, global_layer_index);
2399     }
2400     // Should be at most 1 override layer in the list now.
2401     if (found_active_override_layer) {
2402         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Using the override layer for app key %s", cur_path);
2403     } else if (global_layer_index >= 0) {
2404         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Using the global override layer");
2405     }
2406 }
2407 
2408 /* The following are required in the "layer" object:
2409  * "name"
2410  * "type"
2411  * (for non-meta layers) "library_path"
2412  * (for meta layers) "component_layers"
2413  * "api_version"
2414  * "implementation_version"
2415  * "description"
2416  * (for implicit layers) "disable_environment"
2417  */
2418 
2419 VkResult loader_read_layer_json(const struct loader_instance *inst, struct loader_layer_list *layer_instance_list,
2420                                 cJSON *layer_node, loader_api_version version, bool is_implicit, char *filename) {
2421     assert(layer_instance_list);
2422     char *library_path = NULL;
2423     VkResult result = VK_SUCCESS;
2424     struct loader_layer_properties props = {0};
2425 
2426     result = loader_copy_to_new_str(inst, filename, &props.manifest_file_name);
2427     if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
2428         goto out;
2429     }
2430 
2431     // Parse name
2432 
2433     result = loader_parse_json_string_to_existing_str(layer_node, "name", VK_MAX_EXTENSION_NAME_SIZE, props.info.layerName);
2434     if (VK_ERROR_INITIALIZATION_FAILED == result) {
2435         loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2436                    "Layer located at %s didn't find required layer value \"name\" in manifest JSON file, skipping this layer",
2437                    filename);
2438         goto out;
2439     }
2440 
2441     // Check if this layer's name matches the override layer name, set is_override to true if so.
2442     if (!strcmp(props.info.layerName, VK_OVERRIDE_LAYER_NAME)) {
2443         props.is_override = true;
2444     }
2445 
2446     if (0 != strncmp(props.info.layerName, "VK_LAYER_", 9)) {
2447         loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, "Layer name %s does not conform to naming standard (Policy #LLP_LAYER_3)",
2448                    props.info.layerName);
2449     }
2450 
2451     // Parse type
2452     char *type = loader_cJSON_GetStringValue(loader_cJSON_GetObjectItem(layer_node, "type"));
2453     if (NULL == type) {
2454         loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2455                    "Layer located at %s didn't find required layer value \"type\" in manifest JSON file, skipping this layer",
2456                    filename);
2457         goto out;
2458     }
2459 
2460     // Add list entry
2461     if (!strcmp(type, "DEVICE")) {
2462         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Device layers are deprecated. Skipping layer %s",
2463                    props.info.layerName);
2464         result = VK_ERROR_INITIALIZATION_FAILED;
2465         goto out;
2466     }
2467 
2468     // Allow either GLOBAL or INSTANCE type interchangeably to handle layers that must work with older loaders
2469     if (!strcmp(type, "INSTANCE") || !strcmp(type, "GLOBAL")) {
2470         props.type_flags = VK_LAYER_TYPE_FLAG_INSTANCE_LAYER;
2471         if (!is_implicit) {
2472             props.type_flags |= VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER;
2473         }
2474     } else {
2475         result = VK_ERROR_INITIALIZATION_FAILED;
2476         goto out;
2477     }
2478 
2479     // Parse api_version
2480     char *api_version = loader_cJSON_GetStringValue(loader_cJSON_GetObjectItem(layer_node, "api_version"));
2481     if (NULL == api_version) {
2482         loader_log(
2483             inst, VULKAN_LOADER_WARN_BIT, 0,
2484             "Layer located at %s didn't find required layer value \"api_version\" in manifest JSON file, skipping this layer",
2485             filename);
2486         goto out;
2487     }
2488 
2489     props.info.specVersion = loader_parse_version_string(api_version);
2490 
2491     // Make sure the layer's manifest doesn't contain a non zero variant value
2492     if (VK_API_VERSION_VARIANT(props.info.specVersion) != 0) {
2493         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2494                    "Layer \"%s\" has an \'api_version\' field which contains a non-zero variant value of %d. "
2495                    " Skipping Layer.",
2496                    props.info.layerName, VK_API_VERSION_VARIANT(props.info.specVersion));
2497         result = VK_ERROR_INITIALIZATION_FAILED;
2498         goto out;
2499     }
2500 
2501     // Parse implementation_version
2502     char *implementation_version = loader_cJSON_GetStringValue(loader_cJSON_GetObjectItem(layer_node, "implementation_version"));
2503     if (NULL == implementation_version) {
2504         loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2505                    "Layer located at %s didn't find required layer value \"implementation_version\" in manifest JSON file, "
2506                    "skipping this layer",
2507                    filename);
2508         goto out;
2509     }
2510     props.info.implementationVersion = atoi(implementation_version);
2511 
2512     // Parse description
2513 
2514     result =
2515         loader_parse_json_string_to_existing_str(layer_node, "description", VK_MAX_EXTENSION_NAME_SIZE, props.info.description);
2516     if (VK_ERROR_INITIALIZATION_FAILED == result) {
2517         loader_log(
2518             inst, VULKAN_LOADER_WARN_BIT, 0,
2519             "Layer located at %s didn't find required layer value \"description\" in manifest JSON file, skipping this layer",
2520             filename);
2521         goto out;
2522     }
2523 
2524     // Parse library_path
2525 
2526     // Library path no longer required unless component_layers is also not defined
2527     result = loader_parse_json_string(layer_node, "library_path", &library_path);
2528     if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
2529         loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2530                    "Skipping layer \"%s\" due to problem accessing the library_path value in the manifest JSON file",
2531                    props.info.layerName);
2532         result = VK_ERROR_OUT_OF_HOST_MEMORY;
2533         goto out;
2534     }
2535     if (NULL != library_path) {
2536         if (NULL != loader_cJSON_GetObjectItem(layer_node, "component_layers")) {
2537             loader_log(
2538                 inst, VULKAN_LOADER_WARN_BIT, 0,
2539                 "Layer \"%s\" contains meta-layer-specific component_layers, but also defining layer library path.  Both are not "
2540                 "compatible, so skipping this layer",
2541                 props.info.layerName);
2542             result = VK_ERROR_INITIALIZATION_FAILED;
2543             loader_instance_heap_free(inst, library_path);
2544             goto out;
2545         }
2546 
2547         // This function takes ownership of library_path_str - so we don't need to clean it up
2548         result = combine_manifest_directory_and_library_path(inst, library_path, filename, &props.lib_name);
2549         if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2550     }
2551 
2552     // Parse component_layers
2553 
2554     if (NULL == library_path) {
2555         if (!loader_check_version_meets_required(LOADER_VERSION_1_1_0, version)) {
2556             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2557                        "Layer \"%s\" contains meta-layer-specific component_layers, but using older JSON file version.",
2558                        props.info.layerName);
2559         }
2560 
2561         result = loader_parse_json_array_of_strings(inst, layer_node, "component_layers", &(props.component_layer_names));
2562         if (VK_ERROR_OUT_OF_HOST_MEMORY == result) {
2563             goto out;
2564         }
2565         if (VK_ERROR_INITIALIZATION_FAILED == result) {
2566             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2567                        "Layer \"%s\" is missing both library_path and component_layers fields.  One or the other MUST be defined.  "
2568                        "Skipping this layer",
2569                        props.info.layerName);
2570             goto out;
2571         }
2572         // This is now, officially, a meta-layer
2573         props.type_flags |= VK_LAYER_TYPE_FLAG_META_LAYER;
2574         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Encountered meta-layer \"%s\"",
2575                    props.info.layerName);
2576     }
2577 
2578     // Parse blacklisted_layers
2579 
2580     if (props.is_override) {
2581         result = loader_parse_json_array_of_strings(inst, layer_node, "blacklisted_layers", &(props.blacklist_layer_names));
2582         if (VK_ERROR_OUT_OF_HOST_MEMORY == result) {
2583             goto out;
2584         }
2585     }
2586 
2587     // Parse override_paths
2588 
2589     result = loader_parse_json_array_of_strings(inst, layer_node, "override_paths", &(props.override_paths));
2590     if (VK_ERROR_OUT_OF_HOST_MEMORY == result) {
2591         goto out;
2592     }
2593     if (NULL != props.override_paths.list && !loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) {
2594         loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2595                    "Layer \"%s\" contains meta-layer-specific override paths, but using older JSON file version.",
2596                    props.info.layerName);
2597     }
2598 
2599     // Parse disable_environment
2600 
2601     if (is_implicit) {
2602         cJSON *disable_environment = loader_cJSON_GetObjectItem(layer_node, "disable_environment");
2603         if (disable_environment == NULL) {
2604             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2605                        "Layer \"%s\" doesn't contain required layer object disable_environment in the manifest JSON file, skipping "
2606                        "this layer",
2607                        props.info.layerName);
2608             result = VK_ERROR_INITIALIZATION_FAILED;
2609             goto out;
2610         }
2611 
2612         if (!disable_environment->child || disable_environment->child->type != cJSON_String ||
2613             !disable_environment->child->string || !disable_environment->child->valuestring) {
2614             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2615                        "Layer \"%s\" doesn't contain required child value in object disable_environment in the manifest JSON file, "
2616                        "skipping this layer (Policy #LLP_LAYER_9)",
2617                        props.info.layerName);
2618             result = VK_ERROR_INITIALIZATION_FAILED;
2619             goto out;
2620         }
2621         result = loader_copy_to_new_str(inst, disable_environment->child->string, &(props.disable_env_var.name));
2622         if (VK_SUCCESS != result) goto out;
2623         result = loader_copy_to_new_str(inst, disable_environment->child->valuestring, &(props.disable_env_var.value));
2624         if (VK_SUCCESS != result) goto out;
2625     }
2626 
2627     // Now get all optional items and objects and put in list:
2628     // functions
2629     // instance_extensions
2630     // device_extensions
2631     // enable_environment (implicit layers only)
2632     // library_arch
2633 
2634     // Layer interface functions
2635     //    vkGetInstanceProcAddr
2636     //    vkGetDeviceProcAddr
2637     //    vkNegotiateLoaderLayerInterfaceVersion (starting with JSON file 1.1.0)
2638     cJSON *functions = loader_cJSON_GetObjectItem(layer_node, "functions");
2639     if (functions != NULL) {
2640         if (loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) {
2641             result = loader_parse_json_string(functions, "vkNegotiateLoaderLayerInterfaceVersion",
2642                                               &props.functions.str_negotiate_interface);
2643             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2644         }
2645         result = loader_parse_json_string(functions, "vkGetInstanceProcAddr", &props.functions.str_gipa);
2646         if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2647 
2648         if (NULL == props.functions.str_negotiate_interface && props.functions.str_gipa &&
2649             loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) {
2650             loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
2651                        "Layer \"%s\" using deprecated \'vkGetInstanceProcAddr\' tag which was deprecated starting with JSON "
2652                        "file version 1.1.0. The new vkNegotiateLoaderLayerInterfaceVersion function is preferred, though for "
2653                        "compatibility reasons it may be desirable to continue using the deprecated tag.",
2654                        props.info.layerName);
2655         }
2656 
2657         result = loader_parse_json_string(functions, "vkGetDeviceProcAddr", &props.functions.str_gdpa);
2658         if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2659 
2660         if (NULL == props.functions.str_negotiate_interface && props.functions.str_gdpa &&
2661             loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) {
2662             loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
2663                        "Layer \"%s\" using deprecated \'vkGetDeviceProcAddr\' tag which was deprecated starting with JSON "
2664                        "file version 1.1.0. The new vkNegotiateLoaderLayerInterfaceVersion function is preferred, though for "
2665                        "compatibility reasons it may be desirable to continue using the deprecated tag.",
2666                        props.info.layerName);
2667         }
2668     }
2669 
2670     // instance_extensions
2671     //   array of {
2672     //     name
2673     //     spec_version
2674     //   }
2675 
2676     cJSON *instance_extensions = loader_cJSON_GetObjectItem(layer_node, "instance_extensions");
2677     if (instance_extensions != NULL && instance_extensions->type == cJSON_Array) {
2678         cJSON *ext_item = NULL;
2679         cJSON_ArrayForEach(ext_item, instance_extensions) {
2680             if (ext_item->type != cJSON_Object) {
2681                 continue;
2682             }
2683 
2684             VkExtensionProperties ext_prop = {0};
2685             result = loader_parse_json_string_to_existing_str(ext_item, "name", VK_MAX_EXTENSION_NAME_SIZE, ext_prop.extensionName);
2686             if (result == VK_ERROR_INITIALIZATION_FAILED) {
2687                 continue;
2688             }
2689             char *spec_version = NULL;
2690             result = loader_parse_json_string(ext_item, "spec_version", &spec_version);
2691             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2692             if (NULL != spec_version) {
2693                 ext_prop.specVersion = atoi(spec_version);
2694             }
2695             loader_instance_heap_free(inst, spec_version);
2696             bool ext_unsupported = wsi_unsupported_instance_extension(&ext_prop);
2697             if (!ext_unsupported) {
2698                 loader_add_to_ext_list(inst, &props.instance_extension_list, 1, &ext_prop);
2699             }
2700         }
2701     }
2702 
2703     // device_extensions
2704     //   array of {
2705     //     name
2706     //     spec_version
2707     //     entrypoints
2708     //   }
2709     cJSON *device_extensions = loader_cJSON_GetObjectItem(layer_node, "device_extensions");
2710     if (device_extensions != NULL && device_extensions->type == cJSON_Array) {
2711         cJSON *ext_item = NULL;
2712         cJSON_ArrayForEach(ext_item, device_extensions) {
2713             if (ext_item->type != cJSON_Object) {
2714                 continue;
2715             }
2716 
2717             VkExtensionProperties ext_prop = {0};
2718             result = loader_parse_json_string_to_existing_str(ext_item, "name", VK_MAX_EXTENSION_NAME_SIZE, ext_prop.extensionName);
2719             if (result == VK_ERROR_INITIALIZATION_FAILED) {
2720                 continue;
2721             }
2722 
2723             char *spec_version = NULL;
2724             result = loader_parse_json_string(ext_item, "spec_version", &spec_version);
2725             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2726             if (NULL != spec_version) {
2727                 ext_prop.specVersion = atoi(spec_version);
2728             }
2729             loader_instance_heap_free(inst, spec_version);
2730 
2731             cJSON *entrypoints = loader_cJSON_GetObjectItem(ext_item, "entrypoints");
2732             if (entrypoints == NULL) {
2733                 result = loader_add_to_dev_ext_list(inst, &props.device_extension_list, &ext_prop, NULL);
2734                 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2735                 continue;
2736             }
2737 
2738             struct loader_string_list entrys = {0};
2739             result = loader_parse_json_array_of_strings(inst, ext_item, "entrypoints", &entrys);
2740             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2741             result = loader_add_to_dev_ext_list(inst, &props.device_extension_list, &ext_prop, &entrys);
2742             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2743         }
2744     }
2745     if (is_implicit) {
2746         cJSON *enable_environment = loader_cJSON_GetObjectItem(layer_node, "enable_environment");
2747 
2748         // enable_environment is optional
2749         if (enable_environment && enable_environment->child && enable_environment->child->type == cJSON_String &&
2750             enable_environment->child->string && enable_environment->child->valuestring) {
2751             result = loader_copy_to_new_str(inst, enable_environment->child->string, &(props.enable_env_var.name));
2752             if (VK_SUCCESS != result) goto out;
2753             result = loader_copy_to_new_str(inst, enable_environment->child->valuestring, &(props.enable_env_var.value));
2754             if (VK_SUCCESS != result) goto out;
2755         }
2756     }
2757 
2758     // Read in the pre-instance stuff
2759     cJSON *pre_instance = loader_cJSON_GetObjectItem(layer_node, "pre_instance_functions");
2760     if (NULL != pre_instance) {
2761         // Supported versions started in 1.1.2, so anything newer
2762         if (!loader_check_version_meets_required(loader_combine_version(1, 1, 2), version)) {
2763             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
2764                        "Found pre_instance_functions section in layer from \"%s\". This section is only valid in manifest version "
2765                        "1.1.2 or later. The section will be ignored",
2766                        filename);
2767         } else if (!is_implicit) {
2768             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2769                        "Found pre_instance_functions section in explicit layer from \"%s\". This section is only valid in implicit "
2770                        "layers. The section will be ignored",
2771                        filename);
2772         } else {
2773             result = loader_parse_json_string(pre_instance, "vkEnumerateInstanceExtensionProperties",
2774                                               &props.pre_instance_functions.enumerate_instance_extension_properties);
2775             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2776 
2777             result = loader_parse_json_string(pre_instance, "vkEnumerateInstanceLayerProperties",
2778                                               &props.pre_instance_functions.enumerate_instance_layer_properties);
2779             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2780 
2781             result = loader_parse_json_string(pre_instance, "vkEnumerateInstanceVersion",
2782                                               &props.pre_instance_functions.enumerate_instance_version);
2783             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2784         }
2785     }
2786 
2787     if (loader_cJSON_GetObjectItem(layer_node, "app_keys")) {
2788         if (!props.is_override) {
2789             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2790                        "Layer %s contains app_keys, but any app_keys can only be provided by the override meta layer. "
2791                        "These will be ignored.",
2792                        props.info.layerName);
2793         }
2794 
2795         result = loader_parse_json_array_of_strings(inst, layer_node, "app_keys", &props.app_key_paths);
2796         if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2797     }
2798 
2799     char *library_arch = loader_cJSON_GetStringValue(loader_cJSON_GetObjectItem(layer_node, "library_arch"));
2800     if (NULL != library_arch) {
2801         if ((strncmp(library_arch, "32", 2) == 0 && sizeof(void *) != 4) ||
2802             (strncmp(library_arch, "64", 2) == 0 && sizeof(void *) != 8)) {
2803             loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
2804                        "The library architecture in layer %s doesn't match the current running architecture, skipping this layer",
2805                        filename);
2806             result = VK_ERROR_INITIALIZATION_FAILED;
2807             goto out;
2808         }
2809     }
2810 
2811     result = VK_SUCCESS;
2812 
2813 out:
2814     // Try to append the layer property
2815     if (VK_SUCCESS == result) {
2816         result = loader_append_layer_property(inst, layer_instance_list, &props);
2817     }
2818     // If appending fails - free all the memory allocated in it
2819     if (VK_SUCCESS != result) {
2820         loader_free_layer_properties(inst, &props);
2821     }
2822     return result;
2823 }
2824 
2825 bool is_valid_layer_json_version(const loader_api_version *layer_json) {
2826     // Supported versions are: 1.0.0, 1.0.1, 1.1.0 - 1.1.2, and 1.2.0 - 1.2.1.
2827     if ((layer_json->major == 1 && layer_json->minor == 2 && layer_json->patch < 2) ||
2828         (layer_json->major == 1 && layer_json->minor == 1 && layer_json->patch < 3) ||
2829         (layer_json->major == 1 && layer_json->minor == 0 && layer_json->patch < 2)) {
2830         return true;
2831     }
2832     return false;
2833 }
2834 
2835 // Given a cJSON struct (json) of the top level JSON object from layer manifest
2836 // file, add entry to the layer_list. Fill out the layer_properties in this list
2837 // entry from the input cJSON object.
2838 //
2839 // \returns
2840 // void
2841 // layer_list has a new entry and initialized accordingly.
2842 // If the json input object does not have all the required fields no entry
2843 // is added to the list.
2844 VkResult loader_add_layer_properties(const struct loader_instance *inst, struct loader_layer_list *layer_instance_list, cJSON *json,
2845                                      bool is_implicit, char *filename) {
2846     // The following Fields in layer manifest file that are required:
2847     //   - "file_format_version"
2848     //   - If more than one "layer" object are used, then the "layers" array is
2849     //     required
2850     VkResult result = VK_ERROR_INITIALIZATION_FAILED;
2851     // Make sure sure the top level json value is an object
2852     if (!json || json->type != cJSON_Object) {
2853         goto out;
2854     }
2855     char *file_vers = loader_cJSON_GetStringValue(loader_cJSON_GetObjectItem(json, "file_format_version"));
2856     if (NULL == file_vers) {
2857         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2858                    "loader_add_layer_properties: Manifest %s missing required field file_format_version", filename);
2859         goto out;
2860     }
2861 
2862     loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, "Found manifest file %s (file version %s)", filename, file_vers);
2863     // Get the major/minor/and patch as integers for easier comparison
2864     loader_api_version json_version = loader_make_full_version(loader_parse_version_string(file_vers));
2865 
2866     if (!is_valid_layer_json_version(&json_version)) {
2867         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2868                    "loader_add_layer_properties: %s has unknown layer manifest file version %d.%d.%d.  May cause errors.", filename,
2869                    json_version.major, json_version.minor, json_version.patch);
2870     }
2871 
2872     // If "layers" is present, read in the array of layer objects
2873     cJSON *layers_node = loader_cJSON_GetObjectItem(json, "layers");
2874     if (layers_node != NULL) {
2875         // Supported versions started in 1.0.1, so anything newer
2876         if (!loader_check_version_meets_required(loader_combine_version(1, 0, 1), json_version)) {
2877             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2878                        "loader_add_layer_properties: \'layers\' tag not supported until file version 1.0.1, but %s is reporting "
2879                        "version %s",
2880                        filename, file_vers);
2881         }
2882         cJSON *layer_node = NULL;
2883         cJSON_ArrayForEach(layer_node, layers_node) {
2884             if (layer_node->type != cJSON_Object) {
2885                 loader_log(
2886                     inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2887                     "loader_add_layer_properties: Array element in \"layers\" field in manifest JSON file %s is not an object.  "
2888                     "Skipping this file",
2889                     filename);
2890                 goto out;
2891             }
2892             result = loader_read_layer_json(inst, layer_instance_list, layer_node, json_version, is_implicit, filename);
2893         }
2894     } else {
2895         // Otherwise, try to read in individual layers
2896         cJSON *layer_node = loader_cJSON_GetObjectItem(json, "layer");
2897         if (layer_node == NULL) {
2898             // Don't warn if this happens to be an ICD manifest
2899             if (loader_cJSON_GetObjectItem(json, "ICD") == NULL) {
2900                 loader_log(
2901                     inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2902                     "loader_add_layer_properties: Can not find 'layer' object in manifest JSON file %s.  Skipping this file.",
2903                     filename);
2904             }
2905             goto out;
2906         }
2907         // Loop through all "layer" objects in the file to get a count of them
2908         // first.
2909         uint16_t layer_count = 0;
2910         cJSON *tempNode = layer_node;
2911         do {
2912             tempNode = tempNode->next;
2913             layer_count++;
2914         } while (tempNode != NULL);
2915 
2916         // Throw a warning if we encounter multiple "layer" objects in file
2917         // versions newer than 1.0.0.  Having multiple objects with the same
2918         // name at the same level is actually a JSON standard violation.
2919         if (layer_count > 1 && loader_check_version_meets_required(loader_combine_version(1, 0, 1), json_version)) {
2920             loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2921                        "loader_add_layer_properties: Multiple 'layer' nodes are deprecated starting in file version \"1.0.1\".  "
2922                        "Please use 'layers' : [] array instead in %s.",
2923                        filename);
2924         } else {
2925             do {
2926                 result = loader_read_layer_json(inst, layer_instance_list, layer_node, json_version, is_implicit, filename);
2927                 layer_node = layer_node->next;
2928             } while (layer_node != NULL);
2929         }
2930     }
2931 
2932 out:
2933 
2934     return result;
2935 }
2936 
2937 size_t determine_data_file_path_size(const char *cur_path, size_t relative_path_size) {
2938     size_t path_size = 0;
2939 
2940     if (NULL != cur_path) {
2941         // For each folder in cur_path, (detected by finding additional
2942         // path separators in the string) we need to add the relative path on
2943         // the end.  Plus, leave an additional two slots on the end to add an
2944         // additional directory slash and path separator if needed
2945         path_size += strlen(cur_path) + relative_path_size + 2;
2946         for (const char *x = cur_path; *x; ++x) {
2947             if (*x == PATH_SEPARATOR) {
2948                 path_size += relative_path_size + 2;
2949             }
2950         }
2951     }
2952 
2953     return path_size;
2954 }
2955 
2956 void copy_data_file_info(const char *cur_path, const char *relative_path, size_t relative_path_size, char **output_path) {
2957     if (NULL != cur_path) {
2958         uint32_t start = 0;
2959         uint32_t stop = 0;
2960         char *cur_write = *output_path;
2961 
2962         while (cur_path[start] != '\0') {
2963             while (cur_path[start] == PATH_SEPARATOR) {
2964                 start++;
2965             }
2966             stop = start;
2967             while (cur_path[stop] != PATH_SEPARATOR && cur_path[stop] != '\0') {
2968                 stop++;
2969             }
2970             const size_t s = stop - start;
2971             if (s) {
2972                 memcpy(cur_write, &cur_path[start], s);
2973                 cur_write += s;
2974 
2975                 // If this is a specific JSON file, just add it and don't add any
2976                 // relative path or directory symbol to it.
2977                 if (!is_json(cur_write - 5, s)) {
2978                     // Add the relative directory if present.
2979                     if (relative_path_size > 0) {
2980                         // If last symbol written was not a directory symbol, add it.
2981                         if (*(cur_write - 1) != DIRECTORY_SYMBOL) {
2982                             *cur_write++ = DIRECTORY_SYMBOL;
2983                         }
2984                         memcpy(cur_write, relative_path, relative_path_size);
2985                         cur_write += relative_path_size;
2986                     }
2987                 }
2988 
2989                 *cur_write++ = PATH_SEPARATOR;
2990                 start = stop;
2991             }
2992         }
2993         *output_path = cur_write;
2994     }
2995 }
2996 
2997 // If the file found is a manifest file name, add it to the out_files manifest list.
2998 VkResult add_if_manifest_file(const struct loader_instance *inst, const char *file_name, struct loader_string_list *out_files) {
2999     VkResult vk_result = VK_SUCCESS;
3000 
3001     assert(NULL != file_name && "add_if_manifest_file: Received NULL pointer for file_name");
3002     assert(NULL != out_files && "add_if_manifest_file: Received NULL pointer for out_files");
3003 
3004     // Look for files ending with ".json" suffix
3005     size_t name_len = strlen(file_name);
3006     const char *name_suffix = file_name + name_len - 5;
3007     if (!is_json(name_suffix, name_len)) {
3008         // Use incomplete to indicate invalid name, but to keep going.
3009         vk_result = VK_INCOMPLETE;
3010         goto out;
3011     }
3012 
3013     vk_result = copy_str_to_string_list(inst, out_files, file_name, name_len);
3014 
3015 out:
3016 
3017     return vk_result;
3018 }
3019 
3020 // Add any files found in the search_path.  If any path in the search path points to a specific JSON, attempt to
3021 // only open that one JSON.  Otherwise, if the path is a folder, search the folder for JSON files.
3022 VkResult add_data_files(const struct loader_instance *inst, char *search_path, struct loader_string_list *out_files,
3023                         bool use_first_found_manifest) {
3024     VkResult vk_result = VK_SUCCESS;
3025     char full_path[2048];
3026 #if !defined(_WIN32)
3027     char temp_path[2048];
3028 #endif
3029 
3030     // Now, parse the paths
3031     char *next_file = search_path;
3032     while (NULL != next_file && *next_file != '\0') {
3033         char *name = NULL;
3034         char *cur_file = next_file;
3035         next_file = loader_get_next_path(cur_file);
3036 
3037         // Is this a JSON file, then try to open it.
3038         size_t len = strlen(cur_file);
3039         if (is_json(cur_file + len - 5, len)) {
3040 #if defined(_WIN32)
3041             name = cur_file;
3042 #elif COMMON_UNIX_PLATFORMS
3043             // Only Linux has relative paths, make a copy of location so it isn't modified
3044             size_t str_len;
3045             if (NULL != next_file) {
3046                 str_len = next_file - cur_file + 1;
3047             } else {
3048                 str_len = strlen(cur_file) + 1;
3049             }
3050             if (str_len > sizeof(temp_path)) {
3051                 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "add_data_files: Path to %s too long", cur_file);
3052                 continue;
3053             }
3054             strncpy(temp_path, cur_file, str_len);
3055             name = temp_path;
3056 #else
3057 #warning add_data_files must define relative path copy for this platform
3058 #endif
3059             loader_get_fullpath(cur_file, name, sizeof(full_path), full_path);
3060             name = full_path;
3061 
3062             VkResult local_res;
3063             local_res = add_if_manifest_file(inst, name, out_files);
3064 
3065             // Incomplete means this was not a valid data file.
3066             if (local_res == VK_INCOMPLETE) {
3067                 continue;
3068             } else if (local_res != VK_SUCCESS) {
3069                 vk_result = local_res;
3070                 break;
3071             }
3072         } else {  // Otherwise, treat it as a directory
3073             DIR *dir_stream = loader_opendir(inst, cur_file);
3074             if (NULL == dir_stream) {
3075                 continue;
3076             }
3077             while (1) {
3078                 errno = 0;
3079                 struct dirent *dir_entry = readdir(dir_stream);
3080 #if !defined(WIN32)  // Windows doesn't use readdir, don't check errors on functions which aren't called
3081                 if (errno != 0) {
3082                     loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "readdir failed with %d: %s", errno, strerror(errno));
3083                     break;
3084                 }
3085 #endif
3086                 if (NULL == dir_entry) {
3087                     break;
3088                 }
3089 
3090                 name = &(dir_entry->d_name[0]);
3091                 loader_get_fullpath(name, cur_file, sizeof(full_path), full_path);
3092                 name = full_path;
3093 
3094                 VkResult local_res;
3095                 local_res = add_if_manifest_file(inst, name, out_files);
3096 
3097                 // Incomplete means this was not a valid data file.
3098                 if (local_res == VK_INCOMPLETE) {
3099                     continue;
3100                 } else if (local_res != VK_SUCCESS) {
3101                     vk_result = local_res;
3102                     break;
3103                 }
3104             }
3105             loader_closedir(inst, dir_stream);
3106             if (vk_result != VK_SUCCESS) {
3107                 goto out;
3108             }
3109         }
3110         if (use_first_found_manifest && out_files->count > 0) {
3111             break;
3112         }
3113     }
3114 
3115 out:
3116 
3117     return vk_result;
3118 }
3119 
3120 // Look for data files in the provided paths, but first check the environment override to determine if we should use that
3121 // instead.
3122 VkResult read_data_files_in_search_paths(const struct loader_instance *inst, enum loader_data_files_type manifest_type,
3123                                          const char *path_override, bool *override_active, struct loader_string_list *out_files) {
3124     VkResult vk_result = VK_SUCCESS;
3125     char *override_env = NULL;
3126     const char *override_path = NULL;
3127     char *additional_env = NULL;
3128     size_t search_path_size = 0;
3129     char *search_path = NULL;
3130     char *cur_path_ptr = NULL;
3131     bool use_first_found_manifest = false;
3132 #if COMMON_UNIX_PLATFORMS
3133     const char *relative_location = NULL;  // Only used on unix platforms
3134     size_t rel_size = 0;                   // unused in windows, dont declare so no compiler warnings are generated
3135 #endif
3136 
3137 #if defined(_WIN32)
3138     char *package_path = NULL;
3139 #elif COMMON_UNIX_PLATFORMS
3140     // Determine how much space is needed to generate the full search path
3141     // for the current manifest files.
3142     char *xdg_config_home = loader_secure_getenv("XDG_CONFIG_HOME", inst);
3143     char *xdg_config_dirs = loader_secure_getenv("XDG_CONFIG_DIRS", inst);
3144 
3145 #if !defined(__Fuchsia__) && !defined(__QNX__) && !defined(__OHOS__)
3146     if (NULL == xdg_config_dirs || '\0' == xdg_config_dirs[0]) {
3147         xdg_config_dirs = FALLBACK_CONFIG_DIRS;
3148     }
3149 #endif
3150 
3151     char *xdg_data_home = loader_secure_getenv("XDG_DATA_HOME", inst);
3152     char *xdg_data_dirs = loader_secure_getenv("XDG_DATA_DIRS", inst);
3153 
3154 #if !defined(__Fuchsia__) && !defined(__QNX__) && !defined(__OHOS__)
3155     if (NULL == xdg_data_dirs || '\0' == xdg_data_dirs[0]) {
3156         xdg_data_dirs = FALLBACK_DATA_DIRS;
3157     }
3158 #endif
3159 
3160     char *home = NULL;
3161     char *default_data_home = NULL;
3162     char *default_config_home = NULL;
3163     char *home_data_dir = NULL;
3164     char *home_config_dir = NULL;
3165 
3166     // Only use HOME if XDG_DATA_HOME is not present on the system
3167     home = loader_secure_getenv("HOME", inst);
3168     if (home != NULL) {
3169         if (NULL == xdg_config_home || '\0' == xdg_config_home[0]) {
3170             const char config_suffix[] = "/.config";
3171             size_t default_config_home_len = strlen(home) + sizeof(config_suffix) + 1;
3172             default_config_home = loader_instance_heap_calloc(inst, default_config_home_len, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3173             if (default_config_home == NULL) {
3174                 vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3175                 goto out;
3176             }
3177             strncpy(default_config_home, home, default_config_home_len);
3178             strncat(default_config_home, config_suffix, default_config_home_len);
3179         }
3180         if (NULL == xdg_data_home || '\0' == xdg_data_home[0]) {
3181             const char data_suffix[] = "/.local/share";
3182             size_t default_data_home_len = strlen(home) + sizeof(data_suffix) + 1;
3183             default_data_home = loader_instance_heap_calloc(inst, default_data_home_len, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3184             if (default_data_home == NULL) {
3185                 vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3186                 goto out;
3187             }
3188             strncpy(default_data_home, home, default_data_home_len);
3189             strncat(default_data_home, data_suffix, default_data_home_len);
3190         }
3191     }
3192 
3193     if (NULL != default_config_home) {
3194         home_config_dir = default_config_home;
3195     } else {
3196         home_config_dir = xdg_config_home;
3197     }
3198     if (NULL != default_data_home) {
3199         home_data_dir = default_data_home;
3200     } else {
3201         home_data_dir = xdg_data_home;
3202     }
3203 
3204 #if defined(__OHOS__)
3205     char *debug_layer_name = loader_secure_getenv("debug.graphic.debug_layer", inst); // squid squidsubcapture
3206     char *debug_hap_name = loader_secure_getenv("debug.graphic.debug_hap", inst);
3207     char *system_debug_hap_name = loader_secure_getenv("debug.graphic.system_layer_flag", inst);
3208     char *debug_layer_json_path = NULL;
3209     bool use_system_layer = false;
3210     if (NULL != system_debug_hap_name && '\0' != system_debug_hap_name[0]) {
3211         if (strcmp(system_debug_hap_name, "1") == 0) {
3212             use_system_layer = true;
3213         }
3214     }
3215     bool currentProcessEnableDebugLayer = false;
3216     bool debug_layer_use_heap = false;
3217     if (NULL != debug_layer_name && '\0' != debug_layer_name[0] && InitBundleInfo(debug_hap_name)) {
3218         currentProcessEnableDebugLayer = true;
3219         const char default_json_path[] = "/data/storage/el2/base/haps/entry/files/";
3220         const char external_system_json_path[] = "/vendor/etc/vulkan/debuglayer/";
3221         const char json_suffix[] = ".json";
3222         size_t max_len = sizeof(default_json_path);
3223         if (max_len < sizeof(external_system_json_path)) {
3224             max_len = sizeof(external_system_json_path);
3225         }
3226         size_t debug_layer_json_path_len = max_len + strlen(debug_layer_name) + sizeof(json_suffix) + 1;
3227         debug_layer_json_path = loader_secure_getenv("debug.graphic.vklayer_json_path",inst);
3228         if (NULL == debug_layer_json_path || '\0' == debug_layer_json_path[0]){
3229             debug_layer_json_path = loader_instance_heap_calloc(inst,debug_layer_json_path_len,VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3230             if(debug_layer_json_path == NULL){
3231                 vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3232                 goto out;
3233             }
3234             debug_layer_use_heap = true;
3235             if (use_system_layer && CheckAppProvisionTypeIsDebug()) {
3236                 strncpy(debug_layer_json_path, external_system_json_path, debug_layer_json_path_len);
3237             } else {
3238                 strncpy(debug_layer_json_path, default_json_path, debug_layer_json_path_len);
3239             }
3240             strncat(debug_layer_json_path, debug_layer_name, debug_layer_json_path_len);
3241             strncat(debug_layer_json_path, json_suffix, debug_layer_json_path_len);
3242         }
3243         loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "OHOS:: debug_layer_json_path: %s", debug_layer_json_path);
3244     }
3245 #endif
3246 
3247 #else
3248 #warning read_data_files_in_search_paths unsupported platform
3249 #endif
3250 
3251     switch (manifest_type) {
3252         case LOADER_DATA_FILE_MANIFEST_DRIVER:
3253             override_env = loader_secure_getenv(VK_DRIVER_FILES_ENV_VAR, inst);
3254             if (NULL == override_env) {
3255                 // Not there, so fall back to the old name
3256                 override_env = loader_secure_getenv(VK_ICD_FILENAMES_ENV_VAR, inst);
3257             }
3258             additional_env = loader_secure_getenv(VK_ADDITIONAL_DRIVER_FILES_ENV_VAR, inst);
3259 #if COMMON_UNIX_PLATFORMS
3260             relative_location = VK_DRIVERS_INFO_RELATIVE_DIR;
3261 #endif
3262 #if defined(_WIN32)
3263             package_path = windows_get_app_package_manifest_path(inst);
3264 #endif
3265             break;
3266         case LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER:
3267             override_env = loader_secure_getenv(VK_IMPLICIT_LAYER_PATH_ENV_VAR, inst);
3268             additional_env = loader_secure_getenv(VK_ADDITIONAL_IMPLICIT_LAYER_PATH_ENV_VAR, inst);
3269 #if COMMON_UNIX_PLATFORMS
3270             relative_location = VK_ILAYERS_INFO_RELATIVE_DIR;
3271 #endif
3272 #if defined(_WIN32)
3273             package_path = windows_get_app_package_manifest_path(inst);
3274 #endif
3275             break;
3276         case LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER:
3277             override_env = loader_secure_getenv(VK_EXPLICIT_LAYER_PATH_ENV_VAR, inst);
3278             additional_env = loader_secure_getenv(VK_ADDITIONAL_EXPLICIT_LAYER_PATH_ENV_VAR, inst);
3279 #if COMMON_UNIX_PLATFORMS
3280             relative_location = VK_ELAYERS_INFO_RELATIVE_DIR;
3281 #endif
3282             break;
3283         default:
3284             assert(false && "Shouldn't get here!");
3285             break;
3286     }
3287 
3288     // Log a message when VK_LAYER_PATH is set but the override layer paths take priority
3289     if (manifest_type == LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER && NULL != override_env && NULL != path_override) {
3290         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
3291                    "Ignoring VK_LAYER_PATH. The Override layer is active and has override paths set, which takes priority. "
3292                    "VK_LAYER_PATH is set to %s",
3293                    override_env);
3294     }
3295 
3296     if (path_override != NULL) {
3297         override_path = path_override;
3298     } else if (override_env != NULL) {
3299         override_path = override_env;
3300     }
3301 
3302     // Add two by default for NULL terminator and one path separator on end (just in case)
3303     search_path_size = 2;
3304 
3305     // If there's an override, use that (and the local folder if required) and nothing else
3306     if (NULL != override_path) {
3307         // Local folder and null terminator
3308         search_path_size += strlen(override_path) + 2;
3309     } else {
3310         // Add the size of any additional search paths defined in the additive environment variable
3311         if (NULL != additional_env) {
3312             search_path_size += determine_data_file_path_size(additional_env, 0) + 2;
3313 #if defined(_WIN32)
3314         }
3315         if (NULL != package_path) {
3316             search_path_size += determine_data_file_path_size(package_path, 0) + 2;
3317         }
3318         if (search_path_size == 2) {
3319             goto out;
3320         }
3321 #elif COMMON_UNIX_PLATFORMS
3322         }
3323 
3324         // Add the general search folders (with the appropriate relative folder added)
3325         rel_size = strlen(relative_location);
3326         if (rel_size > 0) {
3327 #if defined(__APPLE__)
3328             search_path_size += MAXPATHLEN;
3329 #endif
3330 
3331 #if defined (__OHOS__)
3332             search_path_size += determine_data_file_path_size(IGRAPHICS_CONFG_DIR, rel_size);
3333 #endif
3334 
3335             // Only add the home folders if defined
3336             if (NULL != home_config_dir) {
3337                 search_path_size += determine_data_file_path_size(home_config_dir, rel_size);
3338             }
3339             search_path_size += determine_data_file_path_size(xdg_config_dirs, rel_size);
3340             search_path_size += determine_data_file_path_size(SYSCONFDIR, rel_size);
3341 #if defined(EXTRASYSCONFDIR)
3342             search_path_size += determine_data_file_path_size(EXTRASYSCONFDIR, rel_size);
3343 #endif
3344             // Only add the home folders if defined
3345             if (NULL != home_data_dir) {
3346                 search_path_size += determine_data_file_path_size(home_data_dir, rel_size);
3347             }
3348             search_path_size += determine_data_file_path_size(xdg_data_dirs, rel_size);
3349 #if defined (__OHOS__)
3350             if(currentProcessEnableDebugLayer) {
3351                 search_path_size += determine_data_file_path_size(debug_layer_json_path, rel_size);
3352             }
3353             search_path_size += determine_data_file_path_size("/system/etc/vulkan/swapchain", rel_size);
3354 #endif
3355         }
3356 #else
3357 #warning read_data_files_in_search_paths unsupported platform
3358 #endif
3359     }
3360 
3361     // Allocate the required space
3362     search_path = loader_instance_heap_calloc(inst, search_path_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3363     if (NULL == search_path) {
3364         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
3365                    "read_data_files_in_search_paths: Failed to allocate space for search path of length %d",
3366                    (uint32_t)search_path_size);
3367         vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3368         goto out;
3369     }
3370 
3371     cur_path_ptr = search_path;
3372 
3373     // Add the remaining paths to the list
3374     if (NULL != override_path) {
3375         size_t override_path_len = strlen(override_path);
3376         loader_strncpy(cur_path_ptr, search_path_size, override_path, override_path_len);
3377         cur_path_ptr += override_path_len;
3378     } else {
3379         // Add any additional search paths defined in the additive environment variable
3380         if (NULL != additional_env) {
3381             copy_data_file_info(additional_env, NULL, 0, &cur_path_ptr);
3382         }
3383 
3384 #if defined(_WIN32)
3385         if (NULL != package_path) {
3386             copy_data_file_info(package_path, NULL, 0, &cur_path_ptr);
3387         }
3388 #elif COMMON_UNIX_PLATFORMS
3389         if (rel_size > 0) {
3390 #if defined(__APPLE__)
3391             // Add the bundle's Resources dir to the beginning of the search path.
3392             // Looks for manifests in the bundle first, before any system directories.
3393             // This also appears to work unmodified for iOS, it finds the app bundle on the devices
3394             // file system. (RSW)
3395             CFBundleRef main_bundle = CFBundleGetMainBundle();
3396             if (NULL != main_bundle) {
3397                 CFURLRef ref = CFBundleCopyResourcesDirectoryURL(main_bundle);
3398                 if (NULL != ref) {
3399                     if (CFURLGetFileSystemRepresentation(ref, TRUE, (UInt8 *)cur_path_ptr, search_path_size)) {
3400                         cur_path_ptr += strlen(cur_path_ptr);
3401                         *cur_path_ptr++ = DIRECTORY_SYMBOL;
3402                         memcpy(cur_path_ptr, relative_location, rel_size);
3403                         cur_path_ptr += rel_size;
3404                         *cur_path_ptr++ = PATH_SEPARATOR;
3405                         if (manifest_type == LOADER_DATA_FILE_MANIFEST_DRIVER) {
3406                             use_first_found_manifest = true;
3407                         }
3408                     }
3409                     CFRelease(ref);
3410                 }
3411             }
3412 #endif  // __APPLE__
3413 
3414 #if defined (__OHOS__)
3415             copy_data_file_info(IGRAPHICS_CONFG_DIR, relative_location, rel_size, &cur_path_ptr);
3416 #endif
3417 
3418             // Only add the home folders if not NULL
3419             if (NULL != home_config_dir) {
3420                 copy_data_file_info(home_config_dir, relative_location, rel_size, &cur_path_ptr);
3421             }
3422             copy_data_file_info(xdg_config_dirs, relative_location, rel_size, &cur_path_ptr);
3423             copy_data_file_info(SYSCONFDIR, relative_location, rel_size, &cur_path_ptr);
3424 #if defined(EXTRASYSCONFDIR)
3425             copy_data_file_info(EXTRASYSCONFDIR, relative_location, rel_size, &cur_path_ptr);
3426 #endif
3427 
3428             // Only add the home folders if not NULL
3429             if (NULL != home_data_dir) {
3430                 copy_data_file_info(home_data_dir, relative_location, rel_size, &cur_path_ptr);
3431             }
3432             copy_data_file_info(xdg_data_dirs, relative_location, rel_size, &cur_path_ptr);
3433 #if defined (__OHOS__)
3434             if(currentProcessEnableDebugLayer){
3435                 copy_data_file_info(debug_layer_json_path,relative_location,rel_size,&cur_path_ptr);
3436             }
3437             copy_data_file_info("/system/etc/vulkan/swapchain/",relative_location,rel_size,&cur_path_ptr);
3438 #endif
3439         }
3440 
3441         // Remove the last path separator
3442         --cur_path_ptr;
3443 
3444         assert(cur_path_ptr - search_path < (ptrdiff_t)search_path_size);
3445         *cur_path_ptr = '\0';
3446 #else
3447 #warning read_data_files_in_search_paths unsupported platform
3448 #endif
3449     }
3450 
3451     // Remove duplicate paths, or it would result in duplicate extensions, duplicate devices, etc.
3452     // This uses minimal memory, but is O(N^2) on the number of paths. Expect only a few paths.
3453     char path_sep_str[2] = {PATH_SEPARATOR, '\0'};
3454     size_t search_path_updated_size = strlen(search_path);
3455     for (size_t first = 0; first < search_path_updated_size;) {
3456         // If this is an empty path, erase it
3457         if (search_path[first] == PATH_SEPARATOR) {
3458             memmove(&search_path[first], &search_path[first + 1], search_path_updated_size - first + 1);
3459             search_path_updated_size -= 1;
3460             continue;
3461         }
3462 
3463         size_t first_end = first + 1;
3464         first_end += strcspn(&search_path[first_end], path_sep_str);
3465         for (size_t second = first_end + 1; second < search_path_updated_size;) {
3466             size_t second_end = second + 1;
3467             second_end += strcspn(&search_path[second_end], path_sep_str);
3468             if (first_end - first == second_end - second &&
3469                 !strncmp(&search_path[first], &search_path[second], second_end - second)) {
3470                 // Found duplicate. Include PATH_SEPARATOR in second_end, then erase it from search_path.
3471                 if (search_path[second_end] == PATH_SEPARATOR) {
3472                     second_end++;
3473                 }
3474                 memmove(&search_path[second], &search_path[second_end], search_path_updated_size - second_end + 1);
3475                 search_path_updated_size -= second_end - second;
3476             } else {
3477                 second = second_end + 1;
3478             }
3479         }
3480         first = first_end + 1;
3481     }
3482     search_path_size = search_path_updated_size;
3483 
3484     // Print out the paths being searched if debugging is enabled
3485     uint32_t log_flags = 0;
3486     if (search_path_size > 0) {
3487         char *tmp_search_path = loader_instance_heap_alloc(inst, search_path_size + 1, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3488         if (NULL != tmp_search_path) {
3489             loader_strncpy(tmp_search_path, search_path_size + 1, search_path, search_path_size);
3490             tmp_search_path[search_path_size] = '\0';
3491             if (manifest_type == LOADER_DATA_FILE_MANIFEST_DRIVER) {
3492                 log_flags = VULKAN_LOADER_DRIVER_BIT;
3493                 loader_log(inst, VULKAN_LOADER_DRIVER_BIT, 0, "Searching for driver manifest files");
3494             } else {
3495                 log_flags = VULKAN_LOADER_LAYER_BIT;
3496                 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "Searching for %s layer manifest files",
3497                            manifest_type == LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER ? "explicit" : "implicit");
3498             }
3499             loader_log(inst, log_flags, 0, "   In following locations:");
3500             char *cur_file;
3501             char *next_file = tmp_search_path;
3502             while (NULL != next_file && *next_file != '\0') {
3503                 cur_file = next_file;
3504                 next_file = loader_get_next_path(cur_file);
3505                 loader_log(inst, log_flags, 0, "      %s", cur_file);
3506             }
3507             loader_instance_heap_free(inst, tmp_search_path);
3508         }
3509     }
3510 
3511     // Now, parse the paths and add any manifest files found in them.
3512     vk_result = add_data_files(inst, search_path, out_files, use_first_found_manifest);
3513 
3514     if (log_flags != 0 && out_files->count > 0) {
3515         loader_log(inst, log_flags, 0, "   Found the following files:");
3516         for (uint32_t cur_file = 0; cur_file < out_files->count; ++cur_file) {
3517             loader_log(inst, log_flags, 0, "      %s", out_files->list[cur_file]);
3518         }
3519     } else {
3520         loader_log(inst, log_flags, 0, "   Found no files");
3521     }
3522 
3523     if (NULL != override_path) {
3524         *override_active = true;
3525     } else {
3526         *override_active = false;
3527     }
3528 
3529 out:
3530 
3531     loader_free_getenv(additional_env, inst);
3532     loader_free_getenv(override_env, inst);
3533 #if defined(_WIN32)
3534     loader_instance_heap_free(inst, package_path);
3535 #elif COMMON_UNIX_PLATFORMS
3536     loader_free_getenv(xdg_config_home, inst);
3537     loader_free_getenv(xdg_config_dirs, inst);
3538     loader_free_getenv(xdg_data_home, inst);
3539     loader_free_getenv(xdg_data_dirs, inst);
3540     loader_free_getenv(xdg_data_home, inst);
3541     loader_free_getenv(home, inst);
3542     loader_instance_heap_free(inst, default_data_home);
3543     loader_instance_heap_free(inst, default_config_home);
3544 #elif defined(__OHOS__)
3545     if(currentProcessEnableDebugLayer){
3546         if (debug_layer_use_heap) {
3547             loader_instance_heap_free(inst, debug_layer_json_path);
3548         } else {
3549             loader_free_getenv(debug_layer_json_path, inst);
3550         }
3551     }
3552     loader_free_getenv(debug_layer_name, inst);
3553     loader_free_getenv(debug_hap_name, inst);
3554 #else
3555 #warning read_data_files_in_search_paths unsupported platform
3556 #endif
3557 
3558     loader_instance_heap_free(inst, search_path);
3559 
3560     return vk_result;
3561 }
3562 
3563 // Find the Vulkan library manifest files.
3564 //
3565 // This function scans the appropriate locations for a list of JSON manifest files based on the
3566 // "manifest_type".  The location is interpreted as Registry path on Windows and a directory path(s)
3567 // on Linux.
3568 // "home_location" is an additional directory in the users home directory to look at. It is
3569 // expanded into the dir path $XDG_DATA_HOME/home_location or $HOME/.local/share/home_location
3570 // depending on environment variables. This "home_location" is only used on Linux.
3571 //
3572 // \returns
3573 // VKResult
3574 // A string list of manifest files to be opened in out_files param.
3575 // List has a pointer to string for each manifest filename.
3576 // When done using the list in out_files, pointers should be freed.
3577 // Location or override  string lists can be either files or directories as
3578 // follows:
3579 //            | location | override
3580 // --------------------------------
3581 // Win ICD    | files    | files
3582 // Win Layer  | files    | dirs
3583 // Linux ICD  | dirs     | files
3584 // Linux Layer| dirs     | dirs
3585 
loader_get_data_files(const struct loader_instance * inst,enum loader_data_files_type manifest_type,const char * path_override,struct loader_string_list * out_files)3586 VkResult loader_get_data_files(const struct loader_instance *inst, enum loader_data_files_type manifest_type,
3587                                const char *path_override, struct loader_string_list *out_files) {
3588     VkResult res = VK_SUCCESS;
3589     bool override_active = false;
3590 
3591     // Free and init the out_files information so there's no false data left from uninitialized variables.
3592     free_string_list(inst, out_files);
3593 
3594     res = read_data_files_in_search_paths(inst, manifest_type, path_override, &override_active, out_files);
3595     if (VK_SUCCESS != res) {
3596         goto out;
3597     }
3598 
3599 #if defined(_WIN32)
3600     // Read the registry if the override wasn't active.
3601     if (!override_active) {
3602         bool warn_if_not_present = false;
3603         char *registry_location = NULL;
3604 
3605         switch (manifest_type) {
3606             default:
3607                 goto out;
3608             case LOADER_DATA_FILE_MANIFEST_DRIVER:
3609                 warn_if_not_present = true;
3610                 registry_location = VK_DRIVERS_INFO_REGISTRY_LOC;
3611                 break;
3612             case LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER:
3613                 registry_location = VK_ILAYERS_INFO_REGISTRY_LOC;
3614                 break;
3615             case LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER:
3616                 warn_if_not_present = true;
3617                 registry_location = VK_ELAYERS_INFO_REGISTRY_LOC;
3618                 break;
3619         }
3620         VkResult tmp_res =
3621             windows_read_data_files_in_registry(inst, manifest_type, warn_if_not_present, registry_location, out_files);
3622         // Only return an error if there was an error this time, and no manifest files from before.
3623         if (VK_SUCCESS != tmp_res && out_files->count == 0) {
3624             res = tmp_res;
3625             goto out;
3626         }
3627     }
3628 #endif
3629 
3630 out:
3631 
3632     if (VK_SUCCESS != res) {
3633         free_string_list(inst, out_files);
3634     }
3635 
3636     return res;
3637 }
3638 
3639 struct ICDManifestInfo {
3640     char *full_library_path;
3641     uint32_t version;
3642 };
3643 
3644 // Takes a json file, opens, reads, and parses an ICD Manifest out of it.
3645 // Should only return VK_SUCCESS, VK_ERROR_INCOMPATIBLE_DRIVER, or VK_ERROR_OUT_OF_HOST_MEMORY
loader_parse_icd_manifest(const struct loader_instance * inst,char * file_str,struct ICDManifestInfo * icd,bool * skipped_portability_drivers)3646 VkResult loader_parse_icd_manifest(const struct loader_instance *inst, char *file_str, struct ICDManifestInfo *icd,
3647                                    bool *skipped_portability_drivers) {
3648     VkResult res = VK_SUCCESS;
3649     cJSON *icd_manifest_json = NULL;
3650 
3651     if (file_str == NULL) {
3652         goto out;
3653     }
3654 
3655     res = loader_get_json(inst, file_str, &icd_manifest_json);
3656     if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
3657         goto out;
3658     }
3659     if (res != VK_SUCCESS || NULL == icd_manifest_json) {
3660         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3661         goto out;
3662     }
3663 
3664     cJSON *file_format_version_json = loader_cJSON_GetObjectItem(icd_manifest_json, "file_format_version");
3665     if (file_format_version_json == NULL) {
3666         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3667                    "loader_parse_icd_manifest: ICD JSON %s does not have a \'file_format_version\' field. Skipping ICD JSON.",
3668                    file_str);
3669         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3670         goto out;
3671     }
3672 
3673     char *file_vers_str = loader_cJSON_GetStringValue(file_format_version_json);
3674     if (NULL == file_vers_str) {
3675         // Only reason the print can fail is if there was an allocation issue
3676         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3677                    "loader_parse_icd_manifest: Failed retrieving ICD JSON %s \'file_format_version\' field. Skipping ICD JSON",
3678                    file_str);
3679         goto out;
3680     }
3681     loader_log(inst, VULKAN_LOADER_DRIVER_BIT, 0, "Found ICD manifest file %s, version %s", file_str, file_vers_str);
3682 
3683     // Get the version of the driver manifest
3684     loader_api_version json_file_version = loader_make_full_version(loader_parse_version_string(file_vers_str));
3685 
3686     // Loader only knows versions 1.0.0 and 1.0.1, anything above it is unknown
3687     if (loader_check_version_meets_required(loader_combine_version(1, 0, 2), json_file_version)) {
3688         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3689                    "loader_parse_icd_manifest: %s has unknown icd manifest file version %d.%d.%d. May cause errors.", file_str,
3690                    json_file_version.major, json_file_version.minor, json_file_version.patch);
3691     }
3692 
3693     cJSON *itemICD = loader_cJSON_GetObjectItem(icd_manifest_json, "ICD");
3694     if (itemICD == NULL) {
3695         // Don't warn if this happens to be a layer manifest file
3696         if (loader_cJSON_GetObjectItem(icd_manifest_json, "layer") == NULL &&
3697             loader_cJSON_GetObjectItem(icd_manifest_json, "layers") == NULL) {
3698             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3699                        "loader_parse_icd_manifest: Can not find \'ICD\' object in ICD JSON file %s. Skipping ICD JSON", file_str);
3700         }
3701         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3702         goto out;
3703     }
3704 
3705     cJSON *library_path_json = loader_cJSON_GetObjectItem(itemICD, "library_path");
3706     if (library_path_json == NULL) {
3707         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3708                    "loader_parse_icd_manifest: Failed to find \'library_path\' object in ICD JSON file %s. Skipping ICD JSON.",
3709                    file_str);
3710         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3711         goto out;
3712     }
3713     bool out_of_memory = false;
3714     char *library_path = loader_cJSON_Print(library_path_json, &out_of_memory);
3715     if (out_of_memory) {
3716         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3717                    "loader_parse_icd_manifest: Failed retrieving ICD JSON %s \'library_path\' field. Skipping ICD JSON.", file_str);
3718         res = VK_ERROR_OUT_OF_HOST_MEMORY;
3719         goto out;
3720     } else if (!library_path || strlen(library_path) == 0) {
3721         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3722                    "loader_parse_icd_manifest: ICD JSON %s \'library_path\' field is empty. Skipping ICD JSON.", file_str);
3723         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3724         loader_instance_heap_free(inst, library_path);
3725         goto out;
3726     }
3727 
3728     // Print out the paths being searched if debugging is enabled
3729     loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0, "Searching for ICD drivers named %s", library_path);
3730     // This function takes ownership of library_path - so we don't need to clean it up
3731     res = combine_manifest_directory_and_library_path(inst, library_path, file_str, &icd->full_library_path);
3732     if (VK_SUCCESS != res) {
3733         goto out;
3734     }
3735 
3736     cJSON *api_version_json = loader_cJSON_GetObjectItem(itemICD, "api_version");
3737     if (api_version_json == NULL) {
3738         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3739                    "loader_parse_icd_manifest: ICD JSON %s does not have an \'api_version\' field. Skipping ICD JSON.", file_str);
3740         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3741         goto out;
3742     }
3743     char *version_str = loader_cJSON_GetStringValue(api_version_json);
3744     if (NULL == version_str) {
3745         // Only reason the print can fail is if there was an allocation issue
3746         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3747                    "loader_parse_icd_manifest: Failed retrieving ICD JSON %s \'api_version\' field. Skipping ICD JSON.", file_str);
3748 
3749         goto out;
3750     }
3751     icd->version = loader_parse_version_string(version_str);
3752 
3753     if (VK_API_VERSION_VARIANT(icd->version) != 0) {
3754         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3755                    "loader_parse_icd_manifest: Driver's ICD JSON %s \'api_version\' field contains a non-zero variant value of %d. "
3756                    " Skipping ICD JSON.",
3757                    file_str, VK_API_VERSION_VARIANT(icd->version));
3758         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3759         goto out;
3760     }
3761 
3762     // Skip over ICD's which contain a true "is_portability_driver" value whenever the application doesn't enable
3763     // portability enumeration.
3764     cJSON *is_portability_driver_json = loader_cJSON_GetObjectItem(itemICD, "is_portability_driver");
3765     if (loader_cJSON_IsTrue(is_portability_driver_json) && inst && !inst->portability_enumeration_enabled) {
3766         if (skipped_portability_drivers) {
3767             *skipped_portability_drivers = true;
3768         }
3769         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3770         goto out;
3771     }
3772 
3773     char *library_arch_str = loader_cJSON_GetStringValue(loader_cJSON_GetObjectItem(itemICD, "library_arch"));
3774     if (library_arch_str != NULL) {
3775         // cJSON includes the quotes by default, so we need to look for those here
3776         if ((strncmp(library_arch_str, "32", 4) == 0 && sizeof(void *) != 4) ||
3777             (strncmp(library_arch_str, "64", 4) == 0 && sizeof(void *) != 8)) {
3778             loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
3779                        "loader_parse_icd_manifest: Driver library architecture doesn't match the current running "
3780                        "architecture, skipping this driver");
3781             res = VK_ERROR_INCOMPATIBLE_DRIVER;
3782             goto out;
3783         }
3784     }
3785 out:
3786     loader_cJSON_Delete(icd_manifest_json);
3787     return res;
3788 }
3789 
3790 // Try to find the Vulkan ICD driver(s).
3791 //
3792 // This function scans the default system loader path(s) or path specified by either the
3793 // VK_DRIVER_FILES or VK_ICD_FILENAMES environment variable in order to find loadable
3794 // VK ICDs manifest files.
3795 // From these manifest files it finds the ICD libraries.
3796 //
3797 // skipped_portability_drivers is used to report whether the loader found drivers which report
3798 // portability but the application didn't enable the bit to enumerate them
3799 // Can be NULL
3800 //
3801 // \returns
3802 // Vulkan result
3803 // (on result == VK_SUCCESS) a list of icds that were discovered
loader_icd_scan(const struct loader_instance * inst,struct loader_icd_tramp_list * icd_tramp_list,const VkInstanceCreateInfo * pCreateInfo,bool * skipped_portability_drivers)3804 VkResult loader_icd_scan(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list,
3805                          const VkInstanceCreateInfo *pCreateInfo, bool *skipped_portability_drivers) {
3806     VkResult res = VK_SUCCESS;
3807     struct loader_string_list manifest_files = {0};
3808     struct loader_envvar_filter select_filter = {0};
3809     struct loader_envvar_filter disable_filter = {0};
3810     struct ICDManifestInfo *icd_details = NULL;
3811 
3812     // Set up the ICD Trampoline list so elements can be written into it.
3813     res = loader_init_scanned_icd_list(inst, icd_tramp_list);
3814     if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
3815         return res;
3816     }
3817 
3818     bool direct_driver_loading_exclusive_mode = false;
3819     res = loader_scan_for_direct_drivers(inst, pCreateInfo, icd_tramp_list, &direct_driver_loading_exclusive_mode);
3820     if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
3821         goto out;
3822     }
3823     if (direct_driver_loading_exclusive_mode) {
3824         // Make sure to jump over the system & env-var driver discovery mechanisms if exclusive mode is set, even if no drivers
3825         // were successfully found through the direct driver loading mechanism
3826         goto out;
3827     }
3828 
3829     // Parse the filter environment variables to determine if we have any special behavior
3830     res = parse_generic_filter_environment_var(inst, VK_DRIVERS_SELECT_ENV_VAR, &select_filter);
3831     if (VK_SUCCESS != res) {
3832         goto out;
3833     }
3834     res = parse_generic_filter_environment_var(inst, VK_DRIVERS_DISABLE_ENV_VAR, &disable_filter);
3835     if (VK_SUCCESS != res) {
3836         goto out;
3837     }
3838 
3839     // Get a list of manifest files for ICDs
3840     res = loader_get_data_files(inst, LOADER_DATA_FILE_MANIFEST_DRIVER, NULL, &manifest_files);
3841     if (VK_SUCCESS != res) {
3842         goto out;
3843     }
3844 
3845     icd_details = loader_stack_alloc(sizeof(struct ICDManifestInfo) * manifest_files.count);
3846     if (NULL == icd_details) {
3847         res = VK_ERROR_OUT_OF_HOST_MEMORY;
3848         goto out;
3849     }
3850     memset(icd_details, 0, sizeof(struct ICDManifestInfo) * manifest_files.count);
3851 
3852     for (uint32_t i = 0; i < manifest_files.count; i++) {
3853         VkResult icd_res = VK_SUCCESS;
3854 
3855         icd_res = loader_parse_icd_manifest(inst, manifest_files.list[i], &icd_details[i], skipped_portability_drivers);
3856         if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_res) {
3857             res = icd_res;
3858             goto out;
3859         } else if (VK_ERROR_INCOMPATIBLE_DRIVER == icd_res) {
3860             continue;
3861         }
3862 
3863         if (select_filter.count > 0 || disable_filter.count > 0) {
3864             // Get only the filename for comparing to the filters
3865             char *just_filename_str = strrchr(manifest_files.list[i], DIRECTORY_SYMBOL);
3866 
3867             // No directory symbol, just the filename
3868             if (NULL == just_filename_str) {
3869                 just_filename_str = manifest_files.list[i];
3870             } else {
3871                 just_filename_str++;
3872             }
3873 
3874             bool name_matches_select =
3875                 (select_filter.count > 0 && check_name_matches_filter_environment_var(just_filename_str, &select_filter));
3876             bool name_matches_disable =
3877                 (disable_filter.count > 0 && check_name_matches_filter_environment_var(just_filename_str, &disable_filter));
3878 
3879             if (name_matches_disable && !name_matches_select) {
3880                 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3881                            "Driver \"%s\" ignored because it was disabled by env var \'%s\'", just_filename_str,
3882                            VK_DRIVERS_DISABLE_ENV_VAR);
3883                 continue;
3884             }
3885             if (select_filter.count != 0 && !name_matches_select) {
3886                 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3887                            "Driver \"%s\" ignored because not selected by env var \'%s\'", just_filename_str,
3888                            VK_DRIVERS_SELECT_ENV_VAR);
3889                 continue;
3890             }
3891         }
3892 
3893         enum loader_layer_library_status lib_status;
3894         icd_res =
3895             loader_scanned_icd_add(inst, icd_tramp_list, icd_details[i].full_library_path, icd_details[i].version, &lib_status);
3896         if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_res) {
3897             res = icd_res;
3898             goto out;
3899         } else if (VK_ERROR_INCOMPATIBLE_DRIVER == icd_res) {
3900             switch (lib_status) {
3901                 case LOADER_LAYER_LIB_NOT_LOADED:
3902                 case LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD:
3903                     loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3904                                "loader_icd_scan: Failed loading library associated with ICD JSON %s. Ignoring this JSON",
3905                                icd_details[i].full_library_path);
3906                     break;
3907                 case LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE: {
3908                     loader_log(inst, VULKAN_LOADER_DRIVER_BIT, 0, "Requested ICD %s was wrong bit-type. Ignoring this JSON",
3909                                icd_details[i].full_library_path);
3910                     break;
3911                 }
3912                 case LOADER_LAYER_LIB_SUCCESS_LOADED:
3913                 case LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY:
3914                     // Shouldn't be able to reach this but if it is, best to report a debug
3915                     loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3916                                "Shouldn't reach this. A valid version of requested ICD %s was loaded but something bad "
3917                                "happened afterwards.",
3918                                icd_details[i].full_library_path);
3919                     break;
3920             }
3921         }
3922     }
3923 
3924 out:
3925     if (NULL != icd_details) {
3926         // Successfully got the icd_details structure, which means we need to free the paths contained within
3927         for (uint32_t i = 0; i < manifest_files.count; i++) {
3928             loader_instance_heap_free(inst, icd_details[i].full_library_path);
3929         }
3930     }
3931     free_string_list(inst, &manifest_files);
3932     return res;
3933 }
3934 
3935 // Gets the layer data files corresponding to manifest_type & path_override, then parses the resulting json objects
3936 // into instance_layers
3937 // Manifest type must be either implicit or explicit
loader_parse_instance_layers(struct loader_instance * inst,enum loader_data_files_type manifest_type,const char * path_override,struct loader_layer_list * instance_layers)3938 VkResult loader_parse_instance_layers(struct loader_instance *inst, enum loader_data_files_type manifest_type,
3939                                       const char *path_override, struct loader_layer_list *instance_layers) {
3940     assert(manifest_type == LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER || manifest_type == LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER);
3941     VkResult res = VK_SUCCESS;
3942     struct loader_string_list manifest_files = {0};
3943 
3944     res = loader_get_data_files(inst, manifest_type, path_override, &manifest_files);
3945     if (VK_SUCCESS != res) {
3946         goto out;
3947     }
3948 
3949     for (uint32_t i = 0; i < manifest_files.count; i++) {
3950         char *file_str = manifest_files.list[i];
3951         if (file_str == NULL) {
3952             continue;
3953         }
3954 
3955         // Parse file into JSON struct
3956         cJSON *json = NULL;
3957         VkResult local_res = loader_get_json(inst, file_str, &json);
3958         if (VK_ERROR_OUT_OF_HOST_MEMORY == local_res) {
3959             res = VK_ERROR_OUT_OF_HOST_MEMORY;
3960             goto out;
3961         } else if (VK_SUCCESS != local_res || NULL == json) {
3962             continue;
3963         }
3964 
3965         local_res = loader_add_layer_properties(inst, instance_layers, json,
3966                                                 manifest_type == LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER, file_str);
3967         loader_cJSON_Delete(json);
3968 
3969         // If the error is anything other than out of memory we still want to try to load the other layers
3970         if (VK_ERROR_OUT_OF_HOST_MEMORY == local_res) {
3971             res = VK_ERROR_OUT_OF_HOST_MEMORY;
3972             goto out;
3973         }
3974     }
3975 out:
3976     free_string_list(inst, &manifest_files);
3977 
3978     return res;
3979 }
3980 
3981 // Given a loader_layer_properties struct that is a valid override layer, concatenate the properties override paths and put them
3982 // into the output parameter override_paths
get_override_layer_override_paths(struct loader_instance * inst,struct loader_layer_properties * prop,char ** override_paths)3983 VkResult get_override_layer_override_paths(struct loader_instance *inst, struct loader_layer_properties *prop,
3984                                            char **override_paths) {
3985     if (prop->override_paths.count > 0) {
3986         char *cur_write_ptr = NULL;
3987         size_t override_path_size = 0;
3988         for (uint32_t j = 0; j < prop->override_paths.count; j++) {
3989             override_path_size += determine_data_file_path_size(prop->override_paths.list[j], 0);
3990         }
3991         *override_paths = loader_instance_heap_alloc(inst, override_path_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3992         if (*override_paths == NULL) {
3993             return VK_ERROR_OUT_OF_HOST_MEMORY;
3994         }
3995         cur_write_ptr = &(*override_paths)[0];
3996         for (uint32_t j = 0; j < prop->override_paths.count; j++) {
3997             copy_data_file_info(prop->override_paths.list[j], NULL, 0, &cur_write_ptr);
3998         }
3999         // Remove the last path separator
4000         --cur_write_ptr;
4001         assert(cur_write_ptr - (*override_paths) < (ptrdiff_t)override_path_size);
4002         *cur_write_ptr = '\0';
4003         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Override layer has override paths set to %s",
4004                    *override_paths);
4005     }
4006     return VK_SUCCESS;
4007 }
4008 
loader_scan_for_layers(struct loader_instance * inst,struct loader_layer_list * instance_layers,const struct loader_envvar_all_filters * filters)4009 VkResult loader_scan_for_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers,
4010                                 const struct loader_envvar_all_filters *filters) {
4011     VkResult res = VK_SUCCESS;
4012     struct loader_layer_list settings_layers = {0};
4013     struct loader_layer_list regular_instance_layers = {0};
4014     bool override_layer_valid = false;
4015     char *override_paths = NULL;
4016 
4017     bool should_search_for_other_layers = true;
4018     res = get_settings_layers(inst, &settings_layers, &should_search_for_other_layers);
4019     if (VK_SUCCESS != res) {
4020         goto out;
4021     }
4022 
4023     // If we should not look for layers using other mechanisms, assign settings_layers to instance_layers and jump to the
4024     // output
4025     if (!should_search_for_other_layers) {
4026         *instance_layers = settings_layers;
4027         memset(&settings_layers, 0, sizeof(struct loader_layer_list));
4028         goto out;
4029     }
4030 
4031     res = loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER, NULL, &regular_instance_layers);
4032     if (VK_SUCCESS != res) {
4033         goto out;
4034     }
4035 
4036     // Remove any extraneous override layers.
4037     remove_all_non_valid_override_layers(inst, &regular_instance_layers);
4038 
4039     // Check to see if the override layer is present, and use it's override paths.
4040     for (uint32_t i = 0; i < regular_instance_layers.count; i++) {
4041         struct loader_layer_properties *prop = &regular_instance_layers.list[i];
4042         if (prop->is_override && loader_implicit_layer_is_enabled(inst, filters, prop) && prop->override_paths.count > 0) {
4043             res = get_override_layer_override_paths(inst, prop, &override_paths);
4044             if (VK_SUCCESS != res) {
4045                 goto out;
4046             }
4047             break;
4048         }
4049     }
4050 
4051     // Get a list of manifest files for explicit layers
4052     res = loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER, override_paths, &regular_instance_layers);
4053     if (VK_SUCCESS != res) {
4054         goto out;
4055     }
4056 
4057     // Verify any meta-layers in the list are valid and all the component layers are
4058     // actually present in the available layer list
4059     res = verify_all_meta_layers(inst, filters, &regular_instance_layers, &override_layer_valid);
4060     if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
4061         return res;
4062     }
4063 
4064     if (override_layer_valid) {
4065         loader_remove_layers_in_blacklist(inst, &regular_instance_layers);
4066         if (NULL != inst) {
4067             inst->override_layer_present = true;
4068         }
4069     }
4070 
4071     // Remove disabled layers
4072     for (uint32_t i = 0; i < regular_instance_layers.count; ++i) {
4073         if (!loader_layer_is_available(inst, filters, &regular_instance_layers.list[i])) {
4074             loader_remove_layer_in_list(inst, &regular_instance_layers, i);
4075             i--;
4076         }
4077     }
4078 
4079     res = combine_settings_layers_with_regular_layers(inst, &settings_layers, &regular_instance_layers, instance_layers);
4080 
4081 out:
4082     loader_delete_layer_list_and_properties(inst, &settings_layers);
4083     loader_delete_layer_list_and_properties(inst, &regular_instance_layers);
4084 
4085     loader_instance_heap_free(inst, override_paths);
4086     return res;
4087 }
4088 
loader_scan_for_implicit_layers(struct loader_instance * inst,struct loader_layer_list * instance_layers,const struct loader_envvar_all_filters * layer_filters)4089 VkResult loader_scan_for_implicit_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers,
4090                                          const struct loader_envvar_all_filters *layer_filters) {
4091     VkResult res = VK_SUCCESS;
4092     struct loader_layer_list settings_layers = {0};
4093     struct loader_layer_list regular_instance_layers = {0};
4094     bool override_layer_valid = false;
4095     char *override_paths = NULL;
4096     bool implicit_metalayer_present = false;
4097 
4098     bool should_search_for_other_layers = true;
4099     res = get_settings_layers(inst, &settings_layers, &should_search_for_other_layers);
4100     if (VK_SUCCESS != res) {
4101         goto out;
4102     }
4103 
4104     // Remove layers from settings file that are off, are explicit, or are implicit layers that aren't active
4105     for (uint32_t i = 0; i < settings_layers.count; ++i) {
4106         if (settings_layers.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_OFF ||
4107             settings_layers.list[i].settings_control_value == LOADER_SETTINGS_LAYER_UNORDERED_LAYER_LOCATION ||
4108             (settings_layers.list[i].type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER) == VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER ||
4109             !loader_implicit_layer_is_enabled(inst, layer_filters, &settings_layers.list[i])) {
4110             loader_remove_layer_in_list(inst, &settings_layers, i);
4111             i--;
4112         }
4113     }
4114 
4115     // If we should not look for layers using other mechanisms, assign settings_layers to instance_layers and jump to the
4116     // output
4117     if (!should_search_for_other_layers) {
4118         *instance_layers = settings_layers;
4119         memset(&settings_layers, 0, sizeof(struct loader_layer_list));
4120         goto out;
4121     }
4122 
4123     res = loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER, NULL, &regular_instance_layers);
4124     if (VK_SUCCESS != res) {
4125         goto out;
4126     }
4127 
4128     // Remove any extraneous override layers.
4129     remove_all_non_valid_override_layers(inst, &regular_instance_layers);
4130 
4131     // Check to see if either the override layer is present, or another implicit meta-layer.
4132     // Each of these may require explicit layers to be enabled at this time.
4133     for (uint32_t i = 0; i < regular_instance_layers.count; i++) {
4134         struct loader_layer_properties *prop = &regular_instance_layers.list[i];
4135         if (prop->is_override && loader_implicit_layer_is_enabled(inst, layer_filters, prop)) {
4136             override_layer_valid = true;
4137             res = get_override_layer_override_paths(inst, prop, &override_paths);
4138             if (VK_SUCCESS != res) {
4139                 goto out;
4140             }
4141         } else if (!prop->is_override && prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
4142             implicit_metalayer_present = true;
4143         }
4144     }
4145 
4146     // If either the override layer or an implicit meta-layer are present, we need to add
4147     // explicit layer info as well.  Not to worry, though, all explicit layers not included
4148     // in the override layer will be removed below in loader_remove_layers_in_blacklist().
4149     if (override_layer_valid || implicit_metalayer_present) {
4150         res =
4151             loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER, override_paths, &regular_instance_layers);
4152         if (VK_SUCCESS != res) {
4153             goto out;
4154         }
4155     }
4156 
4157     // Verify any meta-layers in the list are valid and all the component layers are
4158     // actually present in the available layer list
4159     res = verify_all_meta_layers(inst, layer_filters, &regular_instance_layers, &override_layer_valid);
4160     if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
4161         return res;
4162     }
4163 
4164     if (override_layer_valid || implicit_metalayer_present) {
4165         loader_remove_layers_not_in_implicit_meta_layers(inst, &regular_instance_layers);
4166         if (override_layer_valid && inst != NULL) {
4167             inst->override_layer_present = true;
4168         }
4169     }
4170 
4171     // Remove disabled layers
4172     for (uint32_t i = 0; i < regular_instance_layers.count; ++i) {
4173         if (!loader_implicit_layer_is_enabled(inst, layer_filters, &regular_instance_layers.list[i])) {
4174             loader_remove_layer_in_list(inst, &regular_instance_layers, i);
4175             i--;
4176         }
4177     }
4178 
4179     res = combine_settings_layers_with_regular_layers(inst, &settings_layers, &regular_instance_layers, instance_layers);
4180 
4181 out:
4182     loader_delete_layer_list_and_properties(inst, &settings_layers);
4183     loader_delete_layer_list_and_properties(inst, &regular_instance_layers);
4184 
4185     loader_instance_heap_free(inst, override_paths);
4186     return res;
4187 }
4188 
loader_gpdpa_instance_terminator(VkInstance inst,const char * pName)4189 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpdpa_instance_terminator(VkInstance inst, const char *pName) {
4190     // inst is not wrapped
4191     if (inst == VK_NULL_HANDLE) {
4192         return NULL;
4193     }
4194 
4195     VkLayerInstanceDispatchTable *disp_table = *(VkLayerInstanceDispatchTable **)inst;
4196 
4197     if (disp_table == NULL) return NULL;
4198 
4199     struct loader_instance *loader_inst = loader_get_instance(inst);
4200 
4201     if (loader_inst->instance_finished_creation) {
4202         disp_table = &loader_inst->terminator_dispatch;
4203     }
4204 
4205     bool found_name;
4206     void *addr = loader_lookup_instance_dispatch_table(disp_table, pName, &found_name);
4207     if (found_name) {
4208         return addr;
4209     }
4210 
4211     // Check if any drivers support the function, and if so, add it to the unknown function list
4212     addr = loader_phys_dev_ext_gpa_term(loader_get_instance(inst), pName);
4213     if (NULL != addr) return addr;
4214 
4215     // Don't call down the chain, this would be an infinite loop
4216     loader_log(NULL, VULKAN_LOADER_DEBUG_BIT, 0, "loader_gpdpa_instance_terminator() unrecognized name %s", pName);
4217     return NULL;
4218 }
4219 
loader_gpa_instance_terminator(VkInstance inst,const char * pName)4220 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpa_instance_terminator(VkInstance inst, const char *pName) {
4221     // Global functions - Do not need a valid instance handle to query
4222     if (!strcmp(pName, "vkGetInstanceProcAddr")) {
4223         return (PFN_vkVoidFunction)loader_gpa_instance_terminator;
4224     }
4225     if (!strcmp(pName, "vk_layerGetPhysicalDeviceProcAddr")) {
4226         return (PFN_vkVoidFunction)loader_gpdpa_instance_terminator;
4227     }
4228     if (!strcmp(pName, "vkCreateInstance")) {
4229         return (PFN_vkVoidFunction)terminator_CreateInstance;
4230     }
4231     // If a layer is querying pre-instance functions using vkGetInstanceProcAddr, we need to return function pointers that match the
4232     // Vulkan API
4233     if (!strcmp(pName, "vkEnumerateInstanceLayerProperties")) {
4234         return (PFN_vkVoidFunction)terminator_EnumerateInstanceLayerProperties;
4235     }
4236     if (!strcmp(pName, "vkEnumerateInstanceExtensionProperties")) {
4237         return (PFN_vkVoidFunction)terminator_EnumerateInstanceExtensionProperties;
4238     }
4239     if (!strcmp(pName, "vkEnumerateInstanceVersion")) {
4240         return (PFN_vkVoidFunction)terminator_EnumerateInstanceVersion;
4241     }
4242 
4243     // While the spec is very clear that querying vkCreateDevice requires a valid VkInstance, because the loader allowed querying
4244     // with a NULL VkInstance handle for a long enough time, it is impractical to fix this bug in the loader
4245 
4246     // As such, this is a bug to maintain compatibility for the RTSS layer (Riva Tuner Statistics Server) but may
4247     // be depended upon by other layers out in the wild.
4248     if (!strcmp(pName, "vkCreateDevice")) {
4249         return (PFN_vkVoidFunction)terminator_CreateDevice;
4250     }
4251 
4252     // inst is not wrapped
4253     if (inst == VK_NULL_HANDLE) {
4254         return NULL;
4255     }
4256     VkLayerInstanceDispatchTable *disp_table = *(VkLayerInstanceDispatchTable **)inst;
4257 
4258     if (disp_table == NULL) return NULL;
4259 
4260     struct loader_instance *loader_inst = loader_get_instance(inst);
4261 
4262     // The VK_EXT_debug_utils functions need a special case here so the terminators can still be found from
4263     // vkGetInstanceProcAddr This is because VK_EXT_debug_utils is an instance level extension with device level functions, and
4264     // is 'supported' by the loader.
4265     // These functions need a terminator to handle the case of a driver not supporting VK_EXT_debug_utils when there are layers
4266     // present which not check for NULL before calling the function.
4267     if (!strcmp(pName, "vkSetDebugUtilsObjectNameEXT")) {
4268         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_SetDebugUtilsObjectNameEXT
4269                                                                      : NULL;
4270     }
4271     if (!strcmp(pName, "vkSetDebugUtilsObjectTagEXT")) {
4272         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_SetDebugUtilsObjectTagEXT
4273                                                                      : NULL;
4274     }
4275     if (!strcmp(pName, "vkQueueBeginDebugUtilsLabelEXT")) {
4276         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_QueueBeginDebugUtilsLabelEXT
4277                                                                      : NULL;
4278     }
4279     if (!strcmp(pName, "vkQueueEndDebugUtilsLabelEXT")) {
4280         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_QueueEndDebugUtilsLabelEXT
4281                                                                      : NULL;
4282     }
4283     if (!strcmp(pName, "vkQueueInsertDebugUtilsLabelEXT")) {
4284         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_QueueInsertDebugUtilsLabelEXT
4285                                                                      : NULL;
4286     }
4287     if (!strcmp(pName, "vkCmdBeginDebugUtilsLabelEXT")) {
4288         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_CmdBeginDebugUtilsLabelEXT
4289                                                                      : NULL;
4290     }
4291     if (!strcmp(pName, "vkCmdEndDebugUtilsLabelEXT")) {
4292         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_CmdEndDebugUtilsLabelEXT
4293                                                                      : NULL;
4294     }
4295     if (!strcmp(pName, "vkCmdInsertDebugUtilsLabelEXT")) {
4296         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_CmdInsertDebugUtilsLabelEXT
4297                                                                      : NULL;
4298     }
4299 
4300     if (loader_inst->instance_finished_creation) {
4301         disp_table = &loader_inst->terminator_dispatch;
4302     }
4303 
4304     bool found_name;
4305     void *addr = loader_lookup_instance_dispatch_table(disp_table, pName, &found_name);
4306     if (found_name) {
4307         return addr;
4308     }
4309 
4310     // Check if it is an unknown physical device function, to see if any drivers support it.
4311     addr = loader_phys_dev_ext_gpa_term(loader_get_instance(inst), pName);
4312     if (addr) {
4313         return addr;
4314     }
4315 
4316     // Assume it is an unknown device function, check to see if any drivers support it.
4317     addr = loader_dev_ext_gpa_term(loader_get_instance(inst), pName);
4318     if (addr) {
4319         return addr;
4320     }
4321 
4322     // Don't call down the chain, this would be an infinite loop
4323     loader_log(NULL, VULKAN_LOADER_DEBUG_BIT, 0, "loader_gpa_instance_terminator() unrecognized name %s", pName);
4324     return NULL;
4325 }
4326 
loader_gpa_device_terminator(VkDevice device,const char * pName)4327 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpa_device_terminator(VkDevice device, const char *pName) {
4328     struct loader_device *dev;
4329     struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev);
4330 
4331     // Return this function if a layer above here is asking for the vkGetDeviceProcAddr.
4332     // This is so we can properly intercept any device commands needing a terminator.
4333     if (!strcmp(pName, "vkGetDeviceProcAddr")) {
4334         return (PFN_vkVoidFunction)loader_gpa_device_terminator;
4335     }
4336 
4337     // NOTE: Device Funcs needing Trampoline/Terminator.
4338     // Overrides for device functions needing a trampoline and
4339     // a terminator because certain device entry-points still need to go
4340     // through a terminator before hitting the ICD.  This could be for
4341     // several reasons, but the main one is currently unwrapping an
4342     // object before passing the appropriate info along to the ICD.
4343     // This is why we also have to override the direct ICD call to
4344     // vkGetDeviceProcAddr to intercept those calls.
4345     // If the pName is for a 'known' function but isn't available, due to
4346     // the corresponding extension/feature not being enabled, we need to
4347     // return NULL and not call down to the driver's GetDeviceProcAddr.
4348     if (NULL != dev) {
4349         bool found_name = false;
4350         PFN_vkVoidFunction addr = get_extension_device_proc_terminator(dev, pName, &found_name);
4351         if (found_name) {
4352             return addr;
4353         }
4354     }
4355 
4356     if (icd_term == NULL) {
4357         return NULL;
4358     }
4359 
4360     return icd_term->dispatch.GetDeviceProcAddr(device, pName);
4361 }
4362 
loader_get_instance(const VkInstance instance)4363 struct loader_instance *loader_get_instance(const VkInstance instance) {
4364     // look up the loader_instance in our list by comparing dispatch tables, as
4365     // there is no guarantee the instance is still a loader_instance* after any
4366     // layers which wrap the instance object.
4367     const VkLayerInstanceDispatchTable *disp;
4368     struct loader_instance *ptr_instance = (struct loader_instance *)instance;
4369     if (VK_NULL_HANDLE == instance || LOADER_MAGIC_NUMBER != ptr_instance->magic) {
4370         return NULL;
4371     } else {
4372         disp = loader_get_instance_layer_dispatch(instance);
4373         loader_platform_thread_lock_mutex(&loader_global_instance_list_lock);
4374         for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) {
4375             if (&inst->disp->layer_inst_disp == disp) {
4376                 ptr_instance = inst;
4377                 break;
4378             }
4379         }
4380         loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock);
4381     }
4382     return ptr_instance;
4383 }
4384 
loader_open_layer_file(const struct loader_instance * inst,struct loader_layer_properties * prop)4385 loader_platform_dl_handle loader_open_layer_file(const struct loader_instance *inst, struct loader_layer_properties *prop) {
4386     char* libPath = prop->lib_name;
4387     loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_LAYER_BIT, 0, "try to open json libPath: %s", libPath);
4388     if ((prop->lib_handle = loader_platform_open_library(libPath)) == NULL) {
4389         loader_handle_load_library_error(inst, libPath, &prop->lib_status);
4390     } else {
4391         prop->lib_status = LOADER_LAYER_LIB_SUCCESS_LOADED;
4392         loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Loading layer library %s", prop->lib_name);
4393         return prop->lib_handle;
4394     }
4395 
4396 #if defined(__OHOS__)
4397     char *debug_layer_name = loader_secure_getenv("debug.graphic.debug_layer", inst);
4398     char *debug_hap_name = loader_secure_getenv("debug.graphic.debug_hap", inst);
4399     bool isDebugLayer = false;
4400     char* debugLayerLibPath = NULL;
4401 
4402     if (NULL != debug_layer_name && '\0' != debug_layer_name[0] && InitBundleInfo(debug_hap_name)) {
4403         const char lib_prefix[] = "lib";
4404         const char so_suffix[] = ".so";
4405 
4406         size_t totalLen = strlen(debug_layer_name) + strlen(lib_prefix) + strlen(so_suffix) + 1;
4407         char* layerSoName = loader_instance_heap_calloc(inst, totalLen, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
4408         if (layerSoName == NULL) {
4409             loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0, "malloc layerSoName fail");
4410             goto mallocErr;
4411         }
4412         strncpy(layerSoName, lib_prefix, totalLen);
4413         strncat(layerSoName, debug_layer_name, totalLen);
4414         strncat(layerSoName, so_suffix, totalLen);
4415 
4416         if (strcmp(layerSoName, libPath) == 0) {
4417             isDebugLayer = true;
4418             debugLayerLibPath = GetDebugLayerLibPath(inst, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
4419             if(debugLayerLibPath == NULL) {
4420                 loader_instance_heap_free(inst, layerSoName);
4421                 isDebugLayer = false;
4422                 goto mallocErr;
4423             }
4424             size_t totalLength = strlen(libPath) + strlen(debugLayerLibPath) + 1;
4425             libPath = loader_instance_heap_calloc(inst, totalLength, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
4426             if (libPath == NULL) {
4427                 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0, "malloc libPath fail");
4428                 loader_instance_heap_free(inst, layerSoName);
4429                 loader_instance_heap_free(inst, debugLayerLibPath);
4430                 libPath = prop->lib_name;
4431                 isDebugLayer = false;
4432                 goto mallocErr;
4433             }
4434             strncpy(libPath, debugLayerLibPath, totalLength);
4435             strncat(libPath, prop->lib_name, totalLength);
4436         } else {
4437             loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0, "layerSoName != libPath : %s != %s",
4438                 layerSoName, libPath);
4439         }
4440         loader_instance_heap_free(inst, layerSoName);
4441     }
4442 mallocErr:
4443     loader_free_getenv(debug_layer_name, inst);
4444     loader_free_getenv(debug_hap_name, inst);
4445 
4446     loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_LAYER_BIT, 0, "try to open hap libPath: %s", libPath);
4447     if ((prop->lib_handle = loader_platform_open_library(libPath)) == NULL) {
4448         loader_handle_load_library_error(inst, libPath, &prop->lib_status);
4449     } else {
4450         prop->lib_status = LOADER_LAYER_LIB_SUCCESS_LOADED;
4451         loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Loading hap layer library %s", prop->lib_name);
4452     }
4453     if (isDebugLayer) {
4454         loader_instance_heap_free(inst, debugLayerLibPath);
4455         loader_instance_heap_free(inst, libPath);
4456     }
4457 #endif
4458 
4459     return prop->lib_handle;
4460 }
4461 
4462 // Go through the search_list and find any layers which match type. If layer
4463 // type match is found in then add it to ext_list.
loader_add_implicit_layers(const struct loader_instance * inst,const struct loader_envvar_all_filters * filters,struct loader_pointer_layer_list * target_list,struct loader_pointer_layer_list * expanded_target_list,const struct loader_layer_list * source_list)4464 VkResult loader_add_implicit_layers(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
4465                                     struct loader_pointer_layer_list *target_list,
4466                                     struct loader_pointer_layer_list *expanded_target_list,
4467                                     const struct loader_layer_list *source_list) {
4468     for (uint32_t src_layer = 0; src_layer < source_list->count; src_layer++) {
4469         struct loader_layer_properties *prop = &source_list->list[src_layer];
4470         if (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
4471             VkResult result = loader_add_implicit_layer(inst, prop, filters, target_list, expanded_target_list, source_list);
4472             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
4473         }
4474     }
4475     return VK_SUCCESS;
4476 }
4477 
warn_if_layers_are_older_than_application(struct loader_instance * inst)4478 void warn_if_layers_are_older_than_application(struct loader_instance *inst) {
4479     for (uint32_t i = 0; i < inst->expanded_activated_layer_list.count; i++) {
4480         // Verify that the layer api version is at least that of the application's request, if not, throw a warning since
4481         // undefined behavior could occur.
4482         struct loader_layer_properties *prop = inst->expanded_activated_layer_list.list[i];
4483         loader_api_version prop_spec_version = loader_make_version(prop->info.specVersion);
4484         if (!loader_check_version_meets_required(inst->app_api_version, prop_spec_version)) {
4485             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4486                        "Layer %s uses API version %u.%u which is older than the application specified "
4487                        "API version of %u.%u. May cause issues.",
4488                        prop->info.layerName, prop_spec_version.major, prop_spec_version.minor, inst->app_api_version.major,
4489                        inst->app_api_version.minor);
4490         }
4491     }
4492 }
4493 
loader_enable_instance_layers(struct loader_instance * inst,const VkInstanceCreateInfo * pCreateInfo,const struct loader_layer_list * instance_layers,const struct loader_envvar_all_filters * layer_filters)4494 VkResult loader_enable_instance_layers(struct loader_instance *inst, const VkInstanceCreateInfo *pCreateInfo,
4495                                        const struct loader_layer_list *instance_layers,
4496                                        const struct loader_envvar_all_filters *layer_filters) {
4497     VkResult res = VK_SUCCESS;
4498 
4499     assert(inst && "Cannot have null instance");
4500 
4501     if (!loader_init_pointer_layer_list(inst, &inst->app_activated_layer_list)) {
4502         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4503                    "loader_enable_instance_layers: Failed to initialize application version of the layer list");
4504         res = VK_ERROR_OUT_OF_HOST_MEMORY;
4505         goto out;
4506     }
4507 
4508     if (!loader_init_pointer_layer_list(inst, &inst->expanded_activated_layer_list)) {
4509         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4510                    "loader_enable_instance_layers: Failed to initialize expanded version of the layer list");
4511         res = VK_ERROR_OUT_OF_HOST_MEMORY;
4512         goto out;
4513     }
4514 
4515     if (inst->settings.settings_active) {
4516         res = enable_correct_layers_from_settings(inst, layer_filters, pCreateInfo->enabledLayerCount,
4517                                                   pCreateInfo->ppEnabledLayerNames, &inst->instance_layer_list,
4518                                                   &inst->app_activated_layer_list, &inst->expanded_activated_layer_list);
4519         warn_if_layers_are_older_than_application(inst);
4520 
4521         goto out;
4522     }
4523 
4524     // Add any implicit layers first
4525     res = loader_add_implicit_layers(inst, layer_filters, &inst->app_activated_layer_list, &inst->expanded_activated_layer_list,
4526                                      instance_layers);
4527     if (res != VK_SUCCESS) {
4528         goto out;
4529     }
4530 
4531     // Add any layers specified via environment variable next
4532     res = loader_add_environment_layers(inst, VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER, layer_filters, &inst->app_activated_layer_list,
4533                                         &inst->expanded_activated_layer_list, instance_layers);
4534     if (res != VK_SUCCESS) {
4535         goto out;
4536     }
4537 
4538     // Add layers specified by the application
4539     res = loader_add_layer_names_to_list(inst, layer_filters, &inst->app_activated_layer_list, &inst->expanded_activated_layer_list,
4540                                          pCreateInfo->enabledLayerCount, pCreateInfo->ppEnabledLayerNames, instance_layers);
4541 
4542     warn_if_layers_are_older_than_application(inst);
4543 out:
4544     return res;
4545 }
4546 
4547 // Determine the layer interface version to use.
loader_get_layer_interface_version(PFN_vkNegotiateLoaderLayerInterfaceVersion fp_negotiate_layer_version,VkNegotiateLayerInterface * interface_struct)4548 bool loader_get_layer_interface_version(PFN_vkNegotiateLoaderLayerInterfaceVersion fp_negotiate_layer_version,
4549                                         VkNegotiateLayerInterface *interface_struct) {
4550     memset(interface_struct, 0, sizeof(VkNegotiateLayerInterface));
4551     interface_struct->sType = LAYER_NEGOTIATE_INTERFACE_STRUCT;
4552     interface_struct->loaderLayerInterfaceVersion = 1;
4553     interface_struct->pNext = NULL;
4554 
4555     if (fp_negotiate_layer_version != NULL) {
4556         // Layer supports the negotiation API, so call it with the loader's
4557         // latest version supported
4558         interface_struct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
4559         VkResult result = fp_negotiate_layer_version(interface_struct);
4560 
4561         if (result != VK_SUCCESS) {
4562             // Layer no longer supports the loader's latest interface version so
4563             // fail loading the Layer
4564             return false;
4565         }
4566     }
4567 
4568     if (interface_struct->loaderLayerInterfaceVersion < MIN_SUPPORTED_LOADER_LAYER_INTERFACE_VERSION) {
4569         // Loader no longer supports the layer's latest interface version so
4570         // fail loading the layer
4571         return false;
4572     }
4573 
4574     return true;
4575 }
4576 
4577 // Every extension that has a loader-defined trampoline needs to be marked as enabled or disabled so that we know whether or
4578 // not to return that trampoline when vkGetDeviceProcAddr is called
setup_logical_device_enabled_layer_extensions(const struct loader_instance * inst,struct loader_device * dev,const struct loader_extension_list * icd_exts,const VkDeviceCreateInfo * pCreateInfo)4579 void setup_logical_device_enabled_layer_extensions(const struct loader_instance *inst, struct loader_device *dev,
4580                                                    const struct loader_extension_list *icd_exts,
4581                                                    const VkDeviceCreateInfo *pCreateInfo) {
4582     // no enabled extensions, early exit
4583     if (pCreateInfo->ppEnabledExtensionNames == NULL) {
4584         return;
4585     }
4586     // Can only setup debug marker as debug utils is an instance extensions.
4587     for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; ++i) {
4588         if (pCreateInfo->ppEnabledExtensionNames[i] &&
4589             !strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
4590             // Check if its supported by the driver
4591             for (uint32_t j = 0; j < icd_exts->count; ++j) {
4592                 if (!strcmp(icd_exts->list[j].extensionName, VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
4593                     dev->layer_extensions.ext_debug_marker_enabled = true;
4594                 }
4595             }
4596             // also check if any layers support it.
4597             for (uint32_t j = 0; j < inst->app_activated_layer_list.count; j++) {
4598                 struct loader_layer_properties *layer = inst->app_activated_layer_list.list[j];
4599                 for (uint32_t k = 0; k < layer->device_extension_list.count; k++) {
4600                     if (!strcmp(layer->device_extension_list.list[k].props.extensionName, VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
4601                         dev->layer_extensions.ext_debug_marker_enabled = true;
4602                     }
4603                 }
4604             }
4605         }
4606     }
4607 }
4608 
loader_layer_create_device(VkInstance instance,VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice,PFN_vkGetInstanceProcAddr layerGIPA,PFN_vkGetDeviceProcAddr * nextGDPA)4609 VKAPI_ATTR VkResult VKAPI_CALL loader_layer_create_device(VkInstance instance, VkPhysicalDevice physicalDevice,
4610                                                           const VkDeviceCreateInfo *pCreateInfo,
4611                                                           const VkAllocationCallbacks *pAllocator, VkDevice *pDevice,
4612                                                           PFN_vkGetInstanceProcAddr layerGIPA, PFN_vkGetDeviceProcAddr *nextGDPA) {
4613     VkResult res;
4614     VkPhysicalDevice internal_device = VK_NULL_HANDLE;
4615     struct loader_device *dev = NULL;
4616     struct loader_instance *inst = NULL;
4617 
4618     if (instance != VK_NULL_HANDLE) {
4619         inst = loader_get_instance(instance);
4620         internal_device = physicalDevice;
4621     } else {
4622         struct loader_physical_device_tramp *phys_dev = (struct loader_physical_device_tramp *)physicalDevice;
4623         internal_device = phys_dev->phys_dev;
4624         inst = (struct loader_instance *)phys_dev->this_instance;
4625     }
4626 
4627     // Get the physical device (ICD) extensions
4628     struct loader_extension_list icd_exts = {0};
4629     icd_exts.list = NULL;
4630     res = loader_init_generic_list(inst, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
4631     if (VK_SUCCESS != res) {
4632         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to create ICD extension list");
4633         goto out;
4634     }
4635 
4636     PFN_vkEnumerateDeviceExtensionProperties enumDeviceExtensionProperties = NULL;
4637     if (layerGIPA != NULL) {
4638         enumDeviceExtensionProperties =
4639             (PFN_vkEnumerateDeviceExtensionProperties)layerGIPA(instance, "vkEnumerateDeviceExtensionProperties");
4640     } else {
4641         enumDeviceExtensionProperties = inst->disp->layer_inst_disp.EnumerateDeviceExtensionProperties;
4642     }
4643     res = loader_add_device_extensions(inst, enumDeviceExtensionProperties, internal_device, "Unknown", &icd_exts);
4644     if (res != VK_SUCCESS) {
4645         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to add extensions to list");
4646         goto out;
4647     }
4648 
4649     // Make sure requested extensions to be enabled are supported
4650     res = loader_validate_device_extensions(inst, &inst->expanded_activated_layer_list, &icd_exts, pCreateInfo);
4651     if (res != VK_SUCCESS) {
4652         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to validate extensions in list");
4653         goto out;
4654     }
4655 
4656     dev = loader_create_logical_device(inst, pAllocator);
4657     if (dev == NULL) {
4658         res = VK_ERROR_OUT_OF_HOST_MEMORY;
4659         goto out;
4660     }
4661 
4662     setup_logical_device_enabled_layer_extensions(inst, dev, &icd_exts, pCreateInfo);
4663 
4664     res = loader_create_device_chain(internal_device, pCreateInfo, pAllocator, inst, dev, layerGIPA, nextGDPA);
4665     if (res != VK_SUCCESS) {
4666         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice:  Failed to create device chain.");
4667         goto out;
4668     }
4669 
4670     *pDevice = dev->chain_device;
4671 
4672     // Initialize any device extension dispatch entry's from the instance list
4673     loader_init_dispatch_dev_ext(inst, dev);
4674 
4675     // Initialize WSI device extensions as part of core dispatch since loader
4676     // has dedicated trampoline code for these
4677     loader_init_device_extension_dispatch_table(&dev->loader_dispatch, inst->disp->layer_inst_disp.GetInstanceProcAddr,
4678                                                 dev->loader_dispatch.core_dispatch.GetDeviceProcAddr, inst->instance, *pDevice);
4679 
4680 out:
4681 
4682     // Failure cleanup
4683     if (VK_SUCCESS != res) {
4684         if (NULL != dev) {
4685             // Find the icd_term this device belongs to then remove it from that icd_term.
4686             // Need to iterate the linked lists and remove the device from it. Don't delete
4687             // the device here since it may not have been added to the icd_term and there
4688             // are other allocations attached to it.
4689             struct loader_icd_term *icd_term = inst->icd_terms;
4690             bool found = false;
4691             while (!found && NULL != icd_term) {
4692                 struct loader_device *cur_dev = icd_term->logical_device_list;
4693                 struct loader_device *prev_dev = NULL;
4694                 while (NULL != cur_dev) {
4695                     if (cur_dev == dev) {
4696                         if (cur_dev == icd_term->logical_device_list) {
4697                             icd_term->logical_device_list = cur_dev->next;
4698                         } else if (prev_dev) {
4699                             prev_dev->next = cur_dev->next;
4700                         }
4701 
4702                         found = true;
4703                         break;
4704                     }
4705                     prev_dev = cur_dev;
4706                     cur_dev = cur_dev->next;
4707                 }
4708                 icd_term = icd_term->next;
4709             }
4710             // Now destroy the device and the allocations associated with it.
4711             loader_destroy_logical_device(dev, pAllocator);
4712         }
4713     }
4714 
4715     if (NULL != icd_exts.list) {
4716         loader_destroy_generic_list(inst, (struct loader_generic_list *)&icd_exts);
4717     }
4718     return res;
4719 }
4720 
loader_layer_destroy_device(VkDevice device,const VkAllocationCallbacks * pAllocator,PFN_vkDestroyDevice destroyFunction)4721 VKAPI_ATTR void VKAPI_CALL loader_layer_destroy_device(VkDevice device, const VkAllocationCallbacks *pAllocator,
4722                                                        PFN_vkDestroyDevice destroyFunction) {
4723     struct loader_device *dev;
4724 
4725     if (device == VK_NULL_HANDLE) {
4726         return;
4727     }
4728 
4729     struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev);
4730 
4731     destroyFunction(device, pAllocator);
4732     if (NULL != dev) {
4733         dev->chain_device = NULL;
4734         dev->icd_device = NULL;
4735         loader_remove_logical_device(icd_term, dev, pAllocator);
4736     }
4737 }
4738 
4739 // Given the list of layers to activate in the loader_instance
4740 // structure. This function will add a VkLayerInstanceCreateInfo
4741 // structure to the VkInstanceCreateInfo.pNext pointer.
4742 // Each activated layer will have it's own VkLayerInstanceLink
4743 // structure that tells the layer what Get*ProcAddr to call to
4744 // get function pointers to the next layer down.
4745 // Once the chain info has been created this function will
4746 // execute the CreateInstance call chain. Each layer will
4747 // then have an opportunity in it's CreateInstance function
4748 // to setup it's dispatch table when the lower layer returns
4749 // successfully.
4750 // Each layer can wrap or not-wrap the returned VkInstance object
4751 // as it sees fit.
4752 // The instance chain is terminated by a loader function
4753 // that will call CreateInstance on all available ICD's and
4754 // cache those VkInstance objects for future use.
loader_create_instance_chain(const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,struct loader_instance * inst,VkInstance * created_instance)4755 VkResult loader_create_instance_chain(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
4756                                       struct loader_instance *inst, VkInstance *created_instance) {
4757     uint32_t num_activated_layers = 0;
4758     struct activated_layer_info *activated_layers = NULL;
4759     VkLayerInstanceCreateInfo chain_info;
4760     VkLayerInstanceLink *layer_instance_link_info = NULL;
4761     VkInstanceCreateInfo loader_create_info;
4762     VkResult res;
4763 
4764     PFN_vkGetInstanceProcAddr next_gipa = loader_gpa_instance_terminator;
4765     PFN_vkGetInstanceProcAddr cur_gipa = loader_gpa_instance_terminator;
4766     PFN_vkGetDeviceProcAddr cur_gdpa = loader_gpa_device_terminator;
4767     PFN_GetPhysicalDeviceProcAddr next_gpdpa = loader_gpdpa_instance_terminator;
4768     PFN_GetPhysicalDeviceProcAddr cur_gpdpa = loader_gpdpa_instance_terminator;
4769 
4770     memcpy(&loader_create_info, pCreateInfo, sizeof(VkInstanceCreateInfo));
4771 
4772     if (inst->expanded_activated_layer_list.count > 0) {
4773         chain_info.u.pLayerInfo = NULL;
4774         chain_info.pNext = pCreateInfo->pNext;
4775         chain_info.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
4776         chain_info.function = VK_LAYER_LINK_INFO;
4777         loader_create_info.pNext = &chain_info;
4778 
4779         layer_instance_link_info = loader_stack_alloc(sizeof(VkLayerInstanceLink) * inst->expanded_activated_layer_list.count);
4780         if (!layer_instance_link_info) {
4781             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4782                        "loader_create_instance_chain: Failed to alloc Instance objects for layer");
4783             return VK_ERROR_OUT_OF_HOST_MEMORY;
4784         }
4785 
4786         activated_layers = loader_stack_alloc(sizeof(struct activated_layer_info) * inst->expanded_activated_layer_list.count);
4787         if (!activated_layers) {
4788             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4789                        "loader_create_instance_chain: Failed to alloc activated layer storage array");
4790             return VK_ERROR_OUT_OF_HOST_MEMORY;
4791         }
4792 
4793         // Create instance chain of enabled layers
4794         for (int32_t i = inst->expanded_activated_layer_list.count - 1; i >= 0; i--) {
4795             struct loader_layer_properties *layer_prop = inst->expanded_activated_layer_list.list[i];
4796             loader_platform_dl_handle lib_handle;
4797 
4798             // Skip it if a Layer with the same name has been already successfully activated
4799             if (loader_names_array_has_layer_property(&layer_prop->info, num_activated_layers, activated_layers)) {
4800                 continue;
4801             }
4802 
4803             lib_handle = loader_open_layer_file(inst, layer_prop);
4804             if (layer_prop->lib_status == LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY) {
4805                 return VK_ERROR_OUT_OF_HOST_MEMORY;
4806             }
4807             if (!lib_handle) {
4808                 continue;
4809             }
4810 
4811             if (NULL == layer_prop->functions.negotiate_layer_interface) {
4812                 PFN_vkNegotiateLoaderLayerInterfaceVersion negotiate_interface = NULL;
4813                 bool functions_in_interface = false;
4814                 if (!layer_prop->functions.str_negotiate_interface || strlen(layer_prop->functions.str_negotiate_interface) == 0) {
4815                     negotiate_interface = (PFN_vkNegotiateLoaderLayerInterfaceVersion)loader_platform_get_proc_address(
4816                         lib_handle, "vkNegotiateLoaderLayerInterfaceVersion");
4817                 } else {
4818                     negotiate_interface = (PFN_vkNegotiateLoaderLayerInterfaceVersion)loader_platform_get_proc_address(
4819                         lib_handle, layer_prop->functions.str_negotiate_interface);
4820                 }
4821 
4822                 // If we can negotiate an interface version, then we can also
4823                 // get everything we need from the one function call, so try
4824                 // that first, and see if we can get all the function pointers
4825                 // necessary from that one call.
4826                 if (NULL != negotiate_interface) {
4827                     layer_prop->functions.negotiate_layer_interface = negotiate_interface;
4828 
4829                     VkNegotiateLayerInterface interface_struct;
4830 
4831                     if (loader_get_layer_interface_version(negotiate_interface, &interface_struct)) {
4832                         // Go ahead and set the properties version to the
4833                         // correct value.
4834                         layer_prop->interface_version = interface_struct.loaderLayerInterfaceVersion;
4835 
4836                         // If the interface is 2 or newer, we have access to the
4837                         // new GetPhysicalDeviceProcAddr function, so grab it,
4838                         // and the other necessary functions, from the
4839                         // structure.
4840                         if (interface_struct.loaderLayerInterfaceVersion > 1) {
4841                             cur_gipa = interface_struct.pfnGetInstanceProcAddr;
4842                             cur_gdpa = interface_struct.pfnGetDeviceProcAddr;
4843                             cur_gpdpa = interface_struct.pfnGetPhysicalDeviceProcAddr;
4844                             if (cur_gipa != NULL) {
4845                                 // We've set the functions, so make sure we
4846                                 // don't do the unnecessary calls later.
4847                                 functions_in_interface = true;
4848                             }
4849                         }
4850                     }
4851                 }
4852 
4853                 if (!functions_in_interface) {
4854                     if ((cur_gipa = layer_prop->functions.get_instance_proc_addr) == NULL) {
4855                         if (layer_prop->functions.str_gipa == NULL || strlen(layer_prop->functions.str_gipa) == 0) {
4856                             cur_gipa =
4857                                 (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr");
4858                             layer_prop->functions.get_instance_proc_addr = cur_gipa;
4859 
4860                             if (NULL == cur_gipa) {
4861                                 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4862                                            "loader_create_instance_chain: Failed to find \'vkGetInstanceProcAddr\' in layer \"%s\"",
4863                                            layer_prop->lib_name);
4864                                 continue;
4865                             }
4866                         } else {
4867                             cur_gipa = (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle,
4868                                                                                                    layer_prop->functions.str_gipa);
4869 
4870                             if (NULL == cur_gipa) {
4871                                 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4872                                            "loader_create_instance_chain: Failed to find \'%s\' in layer \"%s\"",
4873                                            layer_prop->functions.str_gipa, layer_prop->lib_name);
4874                                 continue;
4875                             }
4876                         }
4877                     }
4878                 }
4879             }
4880 
4881             layer_instance_link_info[num_activated_layers].pNext = chain_info.u.pLayerInfo;
4882             layer_instance_link_info[num_activated_layers].pfnNextGetInstanceProcAddr = next_gipa;
4883             layer_instance_link_info[num_activated_layers].pfnNextGetPhysicalDeviceProcAddr = next_gpdpa;
4884             next_gipa = cur_gipa;
4885             if (layer_prop->interface_version > 1 && cur_gpdpa != NULL) {
4886                 layer_prop->functions.get_physical_device_proc_addr = cur_gpdpa;
4887                 next_gpdpa = cur_gpdpa;
4888             }
4889             if (layer_prop->interface_version > 1 && cur_gipa != NULL) {
4890                 layer_prop->functions.get_instance_proc_addr = cur_gipa;
4891             }
4892             if (layer_prop->interface_version > 1 && cur_gdpa != NULL) {
4893                 layer_prop->functions.get_device_proc_addr = cur_gdpa;
4894             }
4895 
4896             chain_info.u.pLayerInfo = &layer_instance_link_info[num_activated_layers];
4897 
4898             activated_layers[num_activated_layers].name = layer_prop->info.layerName;
4899             activated_layers[num_activated_layers].manifest = layer_prop->manifest_file_name;
4900             activated_layers[num_activated_layers].library = layer_prop->lib_name;
4901             activated_layers[num_activated_layers].is_implicit = !(layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER);
4902             activated_layers[num_activated_layers].enabled_by_what = layer_prop->enabled_by_what;
4903             if (activated_layers[num_activated_layers].is_implicit) {
4904                 activated_layers[num_activated_layers].disable_env = layer_prop->disable_env_var.name;
4905                 activated_layers[num_activated_layers].enable_name_env = layer_prop->enable_env_var.name;
4906                 activated_layers[num_activated_layers].enable_value_env = layer_prop->enable_env_var.value;
4907             }
4908 
4909             loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Insert instance layer \"%s\" (%s)",
4910                        layer_prop->info.layerName, layer_prop->lib_name);
4911 
4912             num_activated_layers++;
4913         }
4914     }
4915 
4916     // Make sure each layer requested by the application was actually loaded
4917     for (uint32_t exp = 0; exp < inst->expanded_activated_layer_list.count; ++exp) {
4918         struct loader_layer_properties *exp_layer_prop = inst->expanded_activated_layer_list.list[exp];
4919         bool found = false;
4920         for (uint32_t act = 0; act < num_activated_layers; ++act) {
4921             if (!strcmp(activated_layers[act].name, exp_layer_prop->info.layerName)) {
4922                 found = true;
4923                 break;
4924             }
4925         }
4926         // If it wasn't found, we want to at least log an error.  However, if it was enabled by the application directly,
4927         // we want to return a bad layer error.
4928         if (!found) {
4929             bool app_requested = false;
4930             for (uint32_t act = 0; act < pCreateInfo->enabledLayerCount; ++act) {
4931                 if (!strcmp(pCreateInfo->ppEnabledLayerNames[act], exp_layer_prop->info.layerName)) {
4932                     app_requested = true;
4933                     break;
4934                 }
4935             }
4936             VkFlags log_flag = VULKAN_LOADER_LAYER_BIT;
4937             char ending = '.';
4938             if (app_requested) {
4939                 log_flag |= VULKAN_LOADER_ERROR_BIT;
4940                 ending = '!';
4941             } else {
4942                 log_flag |= VULKAN_LOADER_INFO_BIT;
4943             }
4944             switch (exp_layer_prop->lib_status) {
4945                 case LOADER_LAYER_LIB_NOT_LOADED:
4946                     loader_log(inst, log_flag, 0, "Requested layer \"%s\" was not loaded%c", exp_layer_prop->info.layerName,
4947                                ending);
4948                     break;
4949                 case LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE: {
4950                     loader_log(inst, log_flag, 0, "Requested layer \"%s\" was wrong bit-type%c", exp_layer_prop->info.layerName,
4951                                ending);
4952                     break;
4953                 }
4954                 case LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD:
4955                     loader_log(inst, log_flag, 0, "Requested layer \"%s\" failed to load%c", exp_layer_prop->info.layerName,
4956                                ending);
4957                     break;
4958                 case LOADER_LAYER_LIB_SUCCESS_LOADED:
4959                 case LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY:
4960                     // Shouldn't be able to reach this but if it is, best to report a debug
4961                     loader_log(inst, log_flag, 0,
4962                                "Shouldn't reach this. A valid version of requested layer %s was loaded but was not found in the "
4963                                "list of activated layers%c",
4964                                exp_layer_prop->info.layerName, ending);
4965                     break;
4966             }
4967             if (app_requested) {
4968                 return VK_ERROR_LAYER_NOT_PRESENT;
4969             }
4970         }
4971     }
4972 
4973     VkLoaderFeatureFlags feature_flags = 0;
4974 #if defined(_WIN32)
4975     feature_flags = windows_initialize_dxgi();
4976 #endif
4977 
4978     // The following line of code is actually invalid at least according to the Vulkan spec with header update 1.2.193 and onwards.
4979     // The update required calls to vkGetInstanceProcAddr querying "global" functions (which includes vkCreateInstance) to pass NULL
4980     // for the instance parameter. Because it wasn't required to be NULL before, there may be layers which expect the loader's
4981     // behavior of passing a non-NULL value into vkGetInstanceProcAddr.
4982     // In an abundance of caution, the incorrect code remains as is, with a big comment to indicate that its wrong
4983     PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)next_gipa(*created_instance, "vkCreateInstance");
4984     if (fpCreateInstance) {
4985         VkLayerInstanceCreateInfo instance_dispatch;
4986         instance_dispatch.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
4987         instance_dispatch.pNext = loader_create_info.pNext;
4988         instance_dispatch.function = VK_LOADER_DATA_CALLBACK;
4989         instance_dispatch.u.pfnSetInstanceLoaderData = vkSetInstanceDispatch;
4990 
4991         VkLayerInstanceCreateInfo device_callback;
4992         device_callback.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
4993         device_callback.pNext = &instance_dispatch;
4994         device_callback.function = VK_LOADER_LAYER_CREATE_DEVICE_CALLBACK;
4995         device_callback.u.layerDevice.pfnLayerCreateDevice = loader_layer_create_device;
4996         device_callback.u.layerDevice.pfnLayerDestroyDevice = loader_layer_destroy_device;
4997 
4998         VkLayerInstanceCreateInfo loader_features;
4999         loader_features.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
5000         loader_features.pNext = &device_callback;
5001         loader_features.function = VK_LOADER_FEATURES;
5002         loader_features.u.loaderFeatures = feature_flags;
5003 
5004         loader_create_info.pNext = &loader_features;
5005 
5006         // If layer debugging is enabled, let's print out the full callstack with layers in their
5007         // defined order.
5008         loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "vkCreateInstance layer callstack setup to:");
5009         loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "   <Application>");
5010         loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "     ||");
5011         loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "   <Loader>");
5012         loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "     ||");
5013         for (uint32_t cur_layer = 0; cur_layer < num_activated_layers; ++cur_layer) {
5014             uint32_t index = num_activated_layers - cur_layer - 1;
5015             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "   %s", activated_layers[index].name);
5016             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "           Type: %s",
5017                        activated_layers[index].is_implicit ? "Implicit" : "Explicit");
5018             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "           Enabled By: %s",
5019                        get_enabled_by_what_str(activated_layers[index].enabled_by_what));
5020             if (activated_layers[index].is_implicit) {
5021                 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "               Disable Env Var:  %s",
5022                            activated_layers[index].disable_env);
5023                 if (activated_layers[index].enable_name_env) {
5024                     loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0,
5025                                "               This layer was enabled because Env Var %s was set to Value %s",
5026                                activated_layers[index].enable_name_env, activated_layers[index].enable_value_env);
5027                 }
5028             }
5029             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "           Manifest: %s", activated_layers[index].manifest);
5030             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "           Library:  %s", activated_layers[index].library);
5031             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "     ||");
5032         }
5033         loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "   <Drivers>");
5034 
5035         res = fpCreateInstance(&loader_create_info, pAllocator, created_instance);
5036     } else {
5037         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_create_instance_chain: Failed to find \'vkCreateInstance\'");
5038         // Couldn't find CreateInstance function!
5039         res = VK_ERROR_INITIALIZATION_FAILED;
5040     }
5041 
5042     if (res == VK_SUCCESS) {
5043         // Copy the current disp table into the terminator_dispatch table so we can use it in loader_gpa_instance_terminator()
5044         memcpy(&inst->terminator_dispatch, &inst->disp->layer_inst_disp, sizeof(VkLayerInstanceDispatchTable));
5045 
5046         loader_init_instance_core_dispatch_table(&inst->disp->layer_inst_disp, next_gipa, *created_instance);
5047         inst->instance = *created_instance;
5048 
5049         if (pCreateInfo->enabledLayerCount > 0 && pCreateInfo->ppEnabledLayerNames != NULL) {
5050             res = create_string_list(inst, pCreateInfo->enabledLayerCount, &inst->enabled_layer_names);
5051             if (res != VK_SUCCESS) {
5052                 return res;
5053             }
5054 
5055             for (uint32_t i = 0; i < pCreateInfo->enabledLayerCount; ++i) {
5056                 res = copy_str_to_string_list(inst, &inst->enabled_layer_names, pCreateInfo->ppEnabledLayerNames[i],
5057                                               strlen(pCreateInfo->ppEnabledLayerNames[i]));
5058                 if (res != VK_SUCCESS) return res;
5059             }
5060         }
5061     }
5062 
5063     return res;
5064 }
5065 
loader_activate_instance_layer_extensions(struct loader_instance * inst,VkInstance created_inst)5066 void loader_activate_instance_layer_extensions(struct loader_instance *inst, VkInstance created_inst) {
5067     loader_init_instance_extension_dispatch_table(&inst->disp->layer_inst_disp, inst->disp->layer_inst_disp.GetInstanceProcAddr,
5068                                                   created_inst);
5069 }
5070 
5071 #if defined(__APPLE__)
loader_create_device_chain(const VkPhysicalDevice pd,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,const struct loader_instance * inst,struct loader_device * dev,PFN_vkGetInstanceProcAddr callingLayer,PFN_vkGetDeviceProcAddr * layerNextGDPA)5072 VkResult loader_create_device_chain(const VkPhysicalDevice pd, const VkDeviceCreateInfo *pCreateInfo,
5073                                     const VkAllocationCallbacks *pAllocator, const struct loader_instance *inst,
5074                                     struct loader_device *dev, PFN_vkGetInstanceProcAddr callingLayer,
5075                                     PFN_vkGetDeviceProcAddr *layerNextGDPA) __attribute__((optnone)) {
5076 #else
5077 VkResult loader_create_device_chain(const VkPhysicalDevice pd, const VkDeviceCreateInfo *pCreateInfo,
5078                                     const VkAllocationCallbacks *pAllocator, const struct loader_instance *inst,
5079                                     struct loader_device *dev, PFN_vkGetInstanceProcAddr callingLayer,
5080                                     PFN_vkGetDeviceProcAddr *layerNextGDPA) {
5081 #endif
5082     uint32_t num_activated_layers = 0;
5083     struct activated_layer_info *activated_layers = NULL;
5084     VkLayerDeviceLink *layer_device_link_info;
5085     VkLayerDeviceCreateInfo chain_info;
5086     VkDeviceCreateInfo loader_create_info;
5087     VkDeviceGroupDeviceCreateInfo *original_device_group_create_info_struct = NULL;
5088     VkResult res;
5089 
5090     PFN_vkGetDeviceProcAddr fpGDPA = NULL, nextGDPA = loader_gpa_device_terminator;
5091     PFN_vkGetInstanceProcAddr fpGIPA = NULL, nextGIPA = loader_gpa_instance_terminator;
5092 
5093     memcpy(&loader_create_info, pCreateInfo, sizeof(VkDeviceCreateInfo));
5094 
5095     if (loader_create_info.enabledLayerCount > 0 && loader_create_info.ppEnabledLayerNames != NULL) {
5096         bool invalid_device_layer_usage = false;
5097 
5098         if (loader_create_info.enabledLayerCount != inst->enabled_layer_names.count && loader_create_info.enabledLayerCount > 0) {
5099             invalid_device_layer_usage = true;
5100         } else if (loader_create_info.enabledLayerCount > 0 && loader_create_info.ppEnabledLayerNames == NULL) {
5101             invalid_device_layer_usage = true;
5102         } else if (loader_create_info.enabledLayerCount == 0 && loader_create_info.ppEnabledLayerNames != NULL) {
5103             invalid_device_layer_usage = true;
5104         } else if (inst->enabled_layer_names.list != NULL) {
5105             for (uint32_t i = 0; i < loader_create_info.enabledLayerCount; i++) {
5106                 const char *device_layer_names = loader_create_info.ppEnabledLayerNames[i];
5107 
5108                 if (strcmp(device_layer_names, inst->enabled_layer_names.list[i]) != 0) {
5109                     invalid_device_layer_usage = true;
5110                     break;
5111                 }
5112             }
5113         }
5114 
5115         if (invalid_device_layer_usage) {
5116             loader_log(
5117                 inst, VULKAN_LOADER_WARN_BIT, 0,
5118                 "loader_create_device_chain: Using deprecated and ignored 'ppEnabledLayerNames' member of 'VkDeviceCreateInfo' "
5119                 "when creating a Vulkan device.");
5120         }
5121     }
5122 
5123     // Before we continue, we need to find out if the KHR_device_group extension is in the enabled list.  If it is, we then
5124     // need to look for the corresponding VkDeviceGroupDeviceCreateInfo struct in the device list.  This is because we
5125     // need to replace all the incoming physical device values (which are really loader trampoline physical device values)
5126     // with the layer/ICD version.
5127     {
5128         VkBaseOutStructure *pNext = (VkBaseOutStructure *)loader_create_info.pNext;
5129         VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&loader_create_info;
5130         while (NULL != pNext) {
5131             if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) {
5132                 VkDeviceGroupDeviceCreateInfo *cur_struct = (VkDeviceGroupDeviceCreateInfo *)pNext;
5133                 if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) {
5134                     VkDeviceGroupDeviceCreateInfo *temp_struct = loader_stack_alloc(sizeof(VkDeviceGroupDeviceCreateInfo));
5135                     VkPhysicalDevice *phys_dev_array = NULL;
5136                     if (NULL == temp_struct) {
5137                         return VK_ERROR_OUT_OF_HOST_MEMORY;
5138                     }
5139                     memcpy(temp_struct, cur_struct, sizeof(VkDeviceGroupDeviceCreateInfo));
5140                     phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * cur_struct->physicalDeviceCount);
5141                     if (NULL == phys_dev_array) {
5142                         return VK_ERROR_OUT_OF_HOST_MEMORY;
5143                     }
5144 
5145                     // Before calling down, replace the incoming physical device values (which are really loader trampoline
5146                     // physical devices) with the next layer (or possibly even the terminator) physical device values.
5147                     struct loader_physical_device_tramp *cur_tramp;
5148                     for (uint32_t phys_dev = 0; phys_dev < cur_struct->physicalDeviceCount; phys_dev++) {
5149                         cur_tramp = (struct loader_physical_device_tramp *)cur_struct->pPhysicalDevices[phys_dev];
5150                         phys_dev_array[phys_dev] = cur_tramp->phys_dev;
5151                     }
5152                     temp_struct->pPhysicalDevices = phys_dev_array;
5153 
5154                     original_device_group_create_info_struct = (VkDeviceGroupDeviceCreateInfo *)pPrev->pNext;
5155 
5156                     // Replace the old struct in the pNext chain with this one.
5157                     pPrev->pNext = (VkBaseOutStructure *)temp_struct;
5158                 }
5159                 break;
5160             }
5161 
5162             pPrev = pNext;
5163             pNext = pNext->pNext;
5164         }
5165     }
5166     if (inst->expanded_activated_layer_list.count > 0) {
5167         layer_device_link_info = loader_stack_alloc(sizeof(VkLayerDeviceLink) * inst->expanded_activated_layer_list.count);
5168         if (!layer_device_link_info) {
5169             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5170                        "loader_create_device_chain: Failed to alloc Device objects for layer. Skipping Layer.");
5171             return VK_ERROR_OUT_OF_HOST_MEMORY;
5172         }
5173 
5174         activated_layers = loader_stack_alloc(sizeof(struct activated_layer_info) * inst->expanded_activated_layer_list.count);
5175         if (!activated_layers) {
5176             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5177                        "loader_create_device_chain: Failed to alloc activated layer storage array");
5178             return VK_ERROR_OUT_OF_HOST_MEMORY;
5179         }
5180 
5181         chain_info.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO;
5182         chain_info.function = VK_LAYER_LINK_INFO;
5183         chain_info.u.pLayerInfo = NULL;
5184         chain_info.pNext = loader_create_info.pNext;
5185         loader_create_info.pNext = &chain_info;
5186 
5187         // Create instance chain of enabled layers
5188         for (int32_t i = inst->expanded_activated_layer_list.count - 1; i >= 0; i--) {
5189             struct loader_layer_properties *layer_prop = inst->expanded_activated_layer_list.list[i];
5190             loader_platform_dl_handle lib_handle = layer_prop->lib_handle;
5191 
5192             // Skip it if a Layer with the same name has been already successfully activated
5193             if (loader_names_array_has_layer_property(&layer_prop->info, num_activated_layers, activated_layers)) {
5194                 continue;
5195             }
5196 
5197             // Skip the layer if the handle is NULL - this is likely because the library failed to load but wasn't removed from
5198             // the list.
5199             if (!lib_handle) {
5200                 continue;
5201             }
5202 
5203             // The Get*ProcAddr pointers will already be filled in if they were received from either the json file or the
5204             // version negotiation
5205             if ((fpGIPA = layer_prop->functions.get_instance_proc_addr) == NULL) {
5206                 if (layer_prop->functions.str_gipa == NULL || strlen(layer_prop->functions.str_gipa) == 0) {
5207                     fpGIPA = (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr");
5208                     layer_prop->functions.get_instance_proc_addr = fpGIPA;
5209                 } else
5210                     fpGIPA =
5211                         (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gipa);
5212                 if (!fpGIPA) {
5213                     loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
5214                                "loader_create_device_chain: Failed to find \'vkGetInstanceProcAddr\' in layer \"%s\".  "
5215                                "Skipping layer.",
5216                                layer_prop->lib_name);
5217                     continue;
5218                 }
5219             }
5220 
5221             if (fpGIPA == callingLayer) {
5222                 if (layerNextGDPA != NULL) {
5223                     *layerNextGDPA = nextGDPA;
5224                 }
5225                 // Break here because if fpGIPA is the same as callingLayer, that means a layer is trying to create a device,
5226                 // and once we don't want to continue any further as the next layer will be the calling layer
5227                 break;
5228             }
5229 
5230             if ((fpGDPA = layer_prop->functions.get_device_proc_addr) == NULL) {
5231                 if (layer_prop->functions.str_gdpa == NULL || strlen(layer_prop->functions.str_gdpa) == 0) {
5232                     fpGDPA = (PFN_vkGetDeviceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetDeviceProcAddr");
5233                     layer_prop->functions.get_device_proc_addr = fpGDPA;
5234                 } else
5235                     fpGDPA = (PFN_vkGetDeviceProcAddr)loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gdpa);
5236                 if (!fpGDPA) {
5237                     loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
5238                                "Failed to find vkGetDeviceProcAddr in layer \"%s\"", layer_prop->lib_name);
5239                     continue;
5240                 }
5241             }
5242 
5243             layer_device_link_info[num_activated_layers].pNext = chain_info.u.pLayerInfo;
5244             layer_device_link_info[num_activated_layers].pfnNextGetInstanceProcAddr = nextGIPA;
5245             layer_device_link_info[num_activated_layers].pfnNextGetDeviceProcAddr = nextGDPA;
5246             chain_info.u.pLayerInfo = &layer_device_link_info[num_activated_layers];
5247             nextGIPA = fpGIPA;
5248             nextGDPA = fpGDPA;
5249 
5250             activated_layers[num_activated_layers].name = layer_prop->info.layerName;
5251             activated_layers[num_activated_layers].manifest = layer_prop->manifest_file_name;
5252             activated_layers[num_activated_layers].library = layer_prop->lib_name;
5253             activated_layers[num_activated_layers].is_implicit = !(layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER);
5254             activated_layers[num_activated_layers].enabled_by_what = layer_prop->enabled_by_what;
5255             if (activated_layers[num_activated_layers].is_implicit) {
5256                 activated_layers[num_activated_layers].disable_env = layer_prop->disable_env_var.name;
5257             }
5258 
5259             loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Inserted device layer \"%s\" (%s)",
5260                        layer_prop->info.layerName, layer_prop->lib_name);
5261 
5262             num_activated_layers++;
5263         }
5264     }
5265 
5266     VkDevice created_device = (VkDevice)dev;
5267     PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)nextGIPA(inst->instance, "vkCreateDevice");
5268     if (fpCreateDevice) {
5269         VkLayerDeviceCreateInfo create_info_disp;
5270 
5271         create_info_disp.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO;
5272         create_info_disp.function = VK_LOADER_DATA_CALLBACK;
5273 
5274         create_info_disp.u.pfnSetDeviceLoaderData = vkSetDeviceDispatch;
5275 
5276         // If layer debugging is enabled, let's print out the full callstack with layers in their
5277         // defined order.
5278         uint32_t layer_driver_bits = VULKAN_LOADER_LAYER_BIT | VULKAN_LOADER_DRIVER_BIT;
5279         loader_log(inst, layer_driver_bits, 0, "vkCreateDevice layer callstack setup to:");
5280         loader_log(inst, layer_driver_bits, 0, "   <Application>");
5281         loader_log(inst, layer_driver_bits, 0, "     ||");
5282         loader_log(inst, layer_driver_bits, 0, "   <Loader>");
5283         loader_log(inst, layer_driver_bits, 0, "     ||");
5284         for (uint32_t cur_layer = 0; cur_layer < num_activated_layers; ++cur_layer) {
5285             uint32_t index = num_activated_layers - cur_layer - 1;
5286             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "   %s", activated_layers[index].name);
5287             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "           Type: %s",
5288                        activated_layers[index].is_implicit ? "Implicit" : "Explicit");
5289             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "           Enabled By: %s",
5290                        get_enabled_by_what_str(activated_layers[index].enabled_by_what));
5291             if (activated_layers[index].is_implicit) {
5292                 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "               Disable Env Var:  %s",
5293                            activated_layers[index].disable_env);
5294             }
5295             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "           Manifest: %s", activated_layers[index].manifest);
5296             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "           Library:  %s", activated_layers[index].library);
5297             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "     ||");
5298         }
5299         loader_log(inst, layer_driver_bits, 0, "   <Device>");
5300         create_info_disp.pNext = loader_create_info.pNext;
5301         loader_create_info.pNext = &create_info_disp;
5302         res = fpCreateDevice(pd, &loader_create_info, pAllocator, &created_device);
5303         if (res != VK_SUCCESS) {
5304             return res;
5305         }
5306         dev->chain_device = created_device;
5307 
5308         // Because we changed the pNext chain to use our own VkDeviceGroupDeviceCreateInfo, we need to fixup the chain to
5309         // point back at the original VkDeviceGroupDeviceCreateInfo.
5310         VkBaseOutStructure *pNext = (VkBaseOutStructure *)loader_create_info.pNext;
5311         VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&loader_create_info;
5312         while (NULL != pNext) {
5313             if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) {
5314                 VkDeviceGroupDeviceCreateInfo *cur_struct = (VkDeviceGroupDeviceCreateInfo *)pNext;
5315                 if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) {
5316                     pPrev->pNext = (VkBaseOutStructure *)original_device_group_create_info_struct;
5317                 }
5318                 break;
5319             }
5320 
5321             pPrev = pNext;
5322             pNext = pNext->pNext;
5323         }
5324 
5325     } else {
5326         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5327                    "loader_create_device_chain: Failed to find \'vkCreateDevice\' in layers or ICD");
5328         // Couldn't find CreateDevice function!
5329         return VK_ERROR_INITIALIZATION_FAILED;
5330     }
5331 
5332     // Initialize device dispatch table
5333     loader_init_device_dispatch_table(&dev->loader_dispatch, nextGDPA, dev->chain_device);
5334     // Initialize the dispatch table to functions which need terminators
5335     // These functions point directly to the driver, not the terminator functions
5336     init_extension_device_proc_terminator_dispatch(dev);
5337 
5338     return res;
5339 }
5340 
5341 VkResult loader_validate_layers(const struct loader_instance *inst, const uint32_t layer_count,
5342                                 const char *const *ppEnabledLayerNames, const struct loader_layer_list *list) {
5343     struct loader_layer_properties *prop;
5344 
5345     if (layer_count > 0 && ppEnabledLayerNames == NULL) {
5346         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5347                    "loader_validate_layers: ppEnabledLayerNames is NULL but enabledLayerCount is greater than zero");
5348         return VK_ERROR_LAYER_NOT_PRESENT;
5349     }
5350 
5351     for (uint32_t i = 0; i < layer_count; i++) {
5352         VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, ppEnabledLayerNames[i]);
5353         if (result != VK_STRING_ERROR_NONE) {
5354             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5355                        "loader_validate_layers: ppEnabledLayerNames contains string that is too long or is badly formed");
5356             return VK_ERROR_LAYER_NOT_PRESENT;
5357         }
5358 
5359         prop = loader_find_layer_property(ppEnabledLayerNames[i], list);
5360         if (NULL == prop) {
5361             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5362                        "loader_validate_layers: Layer %d does not exist in the list of available layers", i);
5363             return VK_ERROR_LAYER_NOT_PRESENT;
5364         }
5365         if (inst->settings.settings_active && prop->settings_control_value != LOADER_SETTINGS_LAYER_CONTROL_ON &&
5366             prop->settings_control_value != LOADER_SETTINGS_LAYER_CONTROL_DEFAULT) {
5367             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5368                        "loader_validate_layers: Layer %d was explicitly prevented from being enabled by the loader settings file",
5369                        i);
5370             return VK_ERROR_LAYER_NOT_PRESENT;
5371         }
5372     }
5373     return VK_SUCCESS;
5374 }
5375 
5376 VkResult loader_validate_instance_extensions(struct loader_instance *inst, const struct loader_extension_list *icd_exts,
5377                                              const struct loader_layer_list *instance_layers,
5378                                              const struct loader_envvar_all_filters *layer_filters,
5379                                              const VkInstanceCreateInfo *pCreateInfo) {
5380     VkExtensionProperties *extension_prop;
5381     char *env_value;
5382     bool check_if_known = true;
5383     VkResult res = VK_SUCCESS;
5384 
5385     struct loader_pointer_layer_list active_layers = {0};
5386     struct loader_pointer_layer_list expanded_layers = {0};
5387 
5388     if (pCreateInfo->enabledExtensionCount > 0 && pCreateInfo->ppEnabledExtensionNames == NULL) {
5389         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5390                    "loader_validate_instance_extensions: Instance ppEnabledExtensionNames is NULL but enabledExtensionCount is "
5391                    "greater than zero");
5392         return VK_ERROR_EXTENSION_NOT_PRESENT;
5393     }
5394     if (!loader_init_pointer_layer_list(inst, &active_layers)) {
5395         res = VK_ERROR_OUT_OF_HOST_MEMORY;
5396         goto out;
5397     }
5398     if (!loader_init_pointer_layer_list(inst, &expanded_layers)) {
5399         res = VK_ERROR_OUT_OF_HOST_MEMORY;
5400         goto out;
5401     }
5402 
5403     if (inst->settings.settings_active) {
5404         res = enable_correct_layers_from_settings(inst, layer_filters, pCreateInfo->enabledLayerCount,
5405                                                   pCreateInfo->ppEnabledLayerNames, instance_layers, &active_layers,
5406                                                   &expanded_layers);
5407         if (res != VK_SUCCESS) {
5408             goto out;
5409         }
5410     } else {
5411         // Build the lists of active layers (including meta layers) and expanded layers (with meta layers resolved to their
5412         // components)
5413         res = loader_add_implicit_layers(inst, layer_filters, &active_layers, &expanded_layers, instance_layers);
5414         if (res != VK_SUCCESS) {
5415             goto out;
5416         }
5417         res = loader_add_environment_layers(inst, VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER, layer_filters, &active_layers,
5418                                             &expanded_layers, instance_layers);
5419         if (res != VK_SUCCESS) {
5420             goto out;
5421         }
5422         res = loader_add_layer_names_to_list(inst, layer_filters, &active_layers, &expanded_layers, pCreateInfo->enabledLayerCount,
5423                                              pCreateInfo->ppEnabledLayerNames, instance_layers);
5424         if (VK_SUCCESS != res) {
5425             goto out;
5426         }
5427     }
5428     for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
5429         VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]);
5430         if (result != VK_STRING_ERROR_NONE) {
5431             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5432                        "loader_validate_instance_extensions: Instance ppEnabledExtensionNames contains "
5433                        "string that is too long or is badly formed");
5434             res = VK_ERROR_EXTENSION_NOT_PRESENT;
5435             goto out;
5436         }
5437 
5438         // Check if a user wants to disable the instance extension filtering behavior
5439         env_value = loader_getenv("VK_LOADER_DISABLE_INST_EXT_FILTER", inst);
5440         if (NULL != env_value && atoi(env_value) != 0) {
5441             check_if_known = false;
5442         }
5443         loader_free_getenv(env_value, inst);
5444 
5445         if (check_if_known) {
5446             // See if the extension is in the list of supported extensions
5447             bool found = false;
5448             for (uint32_t j = 0; LOADER_INSTANCE_EXTENSIONS[j] != NULL; j++) {
5449                 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], LOADER_INSTANCE_EXTENSIONS[j]) == 0) {
5450                     found = true;
5451                     break;
5452                 }
5453             }
5454 
5455             // If it isn't in the list, return an error
5456             if (!found) {
5457                 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5458                            "loader_validate_instance_extensions: Extension %s not found in list of known instance extensions.",
5459                            pCreateInfo->ppEnabledExtensionNames[i]);
5460                 res = VK_ERROR_EXTENSION_NOT_PRESENT;
5461                 goto out;
5462             }
5463         }
5464 
5465         extension_prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], icd_exts);
5466 
5467         if (extension_prop) {
5468             continue;
5469         }
5470 
5471         extension_prop = NULL;
5472 
5473         // Not in global list, search layer extension lists
5474         for (uint32_t j = 0; NULL == extension_prop && j < expanded_layers.count; ++j) {
5475             extension_prop =
5476                 get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], &expanded_layers.list[j]->instance_extension_list);
5477             if (extension_prop) {
5478                 // Found the extension in one of the layers enabled by the app.
5479                 break;
5480             }
5481 
5482             struct loader_layer_properties *layer_prop =
5483                 loader_find_layer_property(expanded_layers.list[j]->info.layerName, instance_layers);
5484             if (NULL == layer_prop) {
5485                 // Should NOT get here, loader_validate_layers should have already filtered this case out.
5486                 continue;
5487             }
5488         }
5489 
5490         if (!extension_prop) {
5491             // Didn't find extension name in any of the global layers, error out
5492             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5493                        "loader_validate_instance_extensions: Instance extension %s not supported by available ICDs or enabled "
5494                        "layers.",
5495                        pCreateInfo->ppEnabledExtensionNames[i]);
5496             res = VK_ERROR_EXTENSION_NOT_PRESENT;
5497             goto out;
5498         }
5499     }
5500 
5501 out:
5502     loader_destroy_pointer_layer_list(inst, &active_layers);
5503     loader_destroy_pointer_layer_list(inst, &expanded_layers);
5504     return res;
5505 }
5506 
5507 VkResult loader_validate_device_extensions(struct loader_instance *this_instance,
5508                                            const struct loader_pointer_layer_list *activated_device_layers,
5509                                            const struct loader_extension_list *icd_exts, const VkDeviceCreateInfo *pCreateInfo) {
5510     // Early out to prevent nullptr dereference
5511     if (pCreateInfo->enabledExtensionCount == 0 || pCreateInfo->ppEnabledExtensionNames == NULL) {
5512         return VK_SUCCESS;
5513     }
5514     for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
5515         if (pCreateInfo->ppEnabledExtensionNames[i] == NULL) {
5516             continue;
5517         }
5518         VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]);
5519         if (result != VK_STRING_ERROR_NONE) {
5520             loader_log(this_instance, VULKAN_LOADER_ERROR_BIT, 0,
5521                        "loader_validate_device_extensions: Device ppEnabledExtensionNames contains "
5522                        "string that is too long or is badly formed");
5523             return VK_ERROR_EXTENSION_NOT_PRESENT;
5524         }
5525 
5526         const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i];
5527         VkExtensionProperties *extension_prop = get_extension_property(extension_name, icd_exts);
5528 
5529         if (extension_prop) {
5530             continue;
5531         }
5532 
5533         // Not in global list, search activated layer extension lists
5534         for (uint32_t j = 0; j < activated_device_layers->count; j++) {
5535             struct loader_layer_properties *layer_prop = activated_device_layers->list[j];
5536 
5537             extension_prop = get_dev_extension_property(extension_name, &layer_prop->device_extension_list);
5538             if (extension_prop) {
5539                 // Found the extension in one of the layers enabled by the app.
5540                 break;
5541             }
5542         }
5543 
5544         if (!extension_prop) {
5545             // Didn't find extension name in any of the device layers, error out
5546             loader_log(this_instance, VULKAN_LOADER_ERROR_BIT, 0,
5547                        "loader_validate_device_extensions: Device extension %s not supported by selected physical device "
5548                        "or enabled layers.",
5549                        pCreateInfo->ppEnabledExtensionNames[i]);
5550             return VK_ERROR_EXTENSION_NOT_PRESENT;
5551         }
5552     }
5553     return VK_SUCCESS;
5554 }
5555 
5556 // Terminator functions for the Instance chain
5557 // All named terminator_<Vulkan API name>
5558 VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
5559                                                          const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
5560     struct loader_icd_term *icd_term;
5561     VkExtensionProperties *prop;
5562     char **filtered_extension_names = NULL;
5563     VkInstanceCreateInfo icd_create_info;
5564     VkResult res = VK_SUCCESS;
5565     bool one_icd_successful = false;
5566 
5567     struct loader_instance *ptr_instance = (struct loader_instance *)*pInstance;
5568     if (NULL == ptr_instance) {
5569         loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5570                    "terminator_CreateInstance: Loader instance pointer null encountered.  Possibly set by active layer. (Policy "
5571                    "#LLP_LAYER_21)");
5572     } else if (LOADER_MAGIC_NUMBER != ptr_instance->magic) {
5573         loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5574                    "terminator_CreateInstance: Instance pointer (%p) has invalid MAGIC value 0x%08" PRIx64
5575                    ". Instance value possibly "
5576                    "corrupted by active layer (Policy #LLP_LAYER_21).  ",
5577                    ptr_instance, ptr_instance->magic);
5578     }
5579 
5580     // Save the application version if it has been modified - layers sometimes needs features in newer API versions than
5581     // what the application requested, and thus will increase the instance version to a level that suites their needs.
5582     if (pCreateInfo->pApplicationInfo && pCreateInfo->pApplicationInfo->apiVersion) {
5583         loader_api_version altered_version = loader_make_version(pCreateInfo->pApplicationInfo->apiVersion);
5584         if (altered_version.major != ptr_instance->app_api_version.major ||
5585             altered_version.minor != ptr_instance->app_api_version.minor) {
5586             ptr_instance->app_api_version = altered_version;
5587         }
5588     }
5589 
5590     memcpy(&icd_create_info, pCreateInfo, sizeof(icd_create_info));
5591 
5592     icd_create_info.enabledLayerCount = 0;
5593     icd_create_info.ppEnabledLayerNames = NULL;
5594 
5595     // NOTE: Need to filter the extensions to only those supported by the ICD.
5596     //       No ICD will advertise support for layers. An ICD library could
5597     //       support a layer, but it would be independent of the actual ICD,
5598     //       just in the same library.
5599     uint32_t extension_count = pCreateInfo->enabledExtensionCount;
5600 #if defined(LOADER_ENABLE_LINUX_SORT)
5601     extension_count += 1;
5602 #endif  // LOADER_ENABLE_LINUX_SORT
5603     filtered_extension_names = loader_stack_alloc(extension_count * sizeof(char *));
5604     if (!filtered_extension_names) {
5605         loader_log(ptr_instance, VULKAN_LOADER_ERROR_BIT, 0,
5606                    "terminator_CreateInstance: Failed create extension name array for %d extensions", extension_count);
5607         res = VK_ERROR_OUT_OF_HOST_MEMORY;
5608         goto out;
5609     }
5610     icd_create_info.ppEnabledExtensionNames = (const char *const *)filtered_extension_names;
5611 
5612     // Determine if Get Physical Device Properties 2 is available to this Instance
5613     if (pCreateInfo->pApplicationInfo && pCreateInfo->pApplicationInfo->apiVersion >= VK_API_VERSION_1_1) {
5614         ptr_instance->supports_get_dev_prop_2 = true;
5615     } else {
5616         for (uint32_t j = 0; j < pCreateInfo->enabledExtensionCount; j++) {
5617             if (!strcmp(pCreateInfo->ppEnabledExtensionNames[j], VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
5618                 ptr_instance->supports_get_dev_prop_2 = true;
5619                 break;
5620             }
5621         }
5622     }
5623 
5624     for (uint32_t i = 0; i < ptr_instance->icd_tramp_list.count; i++) {
5625         icd_term = loader_icd_add(ptr_instance, &ptr_instance->icd_tramp_list.scanned_list[i]);
5626         if (NULL == icd_term) {
5627             loader_log(ptr_instance, VULKAN_LOADER_ERROR_BIT, 0,
5628                        "terminator_CreateInstance: Failed to add ICD %d to ICD trampoline list.", i);
5629             res = VK_ERROR_OUT_OF_HOST_MEMORY;
5630             goto out;
5631         }
5632 
5633         // If any error happens after here, we need to remove the ICD from the list,
5634         // because we've already added it, but haven't validated it
5635 
5636         // Make sure that we reset the pApplicationInfo so we don't get an old pointer
5637         icd_create_info.pApplicationInfo = pCreateInfo->pApplicationInfo;
5638         icd_create_info.enabledExtensionCount = 0;
5639         struct loader_extension_list icd_exts = {0};
5640 
5641         // traverse scanned icd list adding non-duplicate extensions to the list
5642         res = loader_init_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
5643         if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
5644             // If out of memory, bail immediately.
5645             goto out;
5646         } else if (VK_SUCCESS != res) {
5647             // Something bad happened with this ICD, so free it and try the
5648             // next.
5649             ptr_instance->icd_terms = icd_term->next;
5650             icd_term->next = NULL;
5651             loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5652             continue;
5653         }
5654 
5655         res = loader_add_instance_extensions(ptr_instance, icd_term->scanned_icd->EnumerateInstanceExtensionProperties,
5656                                              icd_term->scanned_icd->lib_name, &icd_exts);
5657         if (VK_SUCCESS != res) {
5658             loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts);
5659             if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
5660                 // If out of memory, bail immediately.
5661                 goto out;
5662             } else {
5663                 // Something bad happened with this ICD, so free it and try the next.
5664                 ptr_instance->icd_terms = icd_term->next;
5665                 icd_term->next = NULL;
5666                 loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5667                 continue;
5668             }
5669         }
5670 
5671         for (uint32_t j = 0; j < pCreateInfo->enabledExtensionCount; j++) {
5672             prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[j], &icd_exts);
5673             if (prop) {
5674                 filtered_extension_names[icd_create_info.enabledExtensionCount] = (char *)pCreateInfo->ppEnabledExtensionNames[j];
5675                 icd_create_info.enabledExtensionCount++;
5676             }
5677         }
5678 #if defined(LOADER_ENABLE_LINUX_SORT)
5679         // Force on "VK_KHR_get_physical_device_properties2" for Linux as we use it for GPU sorting.  This
5680         // should be done if the API version of either the application or the driver does not natively support
5681         // the core version of vkGetPhysicalDeviceProperties2 entrypoint.
5682         if ((ptr_instance->app_api_version.major == 1 && ptr_instance->app_api_version.minor == 0) ||
5683             (VK_API_VERSION_MAJOR(icd_term->scanned_icd->api_version) == 1 &&
5684              VK_API_VERSION_MINOR(icd_term->scanned_icd->api_version) == 0)) {
5685             prop = get_extension_property(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, &icd_exts);
5686             if (prop) {
5687                 filtered_extension_names[icd_create_info.enabledExtensionCount] =
5688                     (char *)VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME;
5689                 icd_create_info.enabledExtensionCount++;
5690 
5691                 // At least one ICD supports this, so the instance should be able to support it
5692                 ptr_instance->supports_get_dev_prop_2 = true;
5693             }
5694         }
5695 #endif  // LOADER_ENABLE_LINUX_SORT
5696 
5697         // Determine if vkGetPhysicalDeviceProperties2 is available to this Instance
5698         // Also determine if VK_EXT_surface_maintenance1 is available on the ICD
5699         if (icd_term->scanned_icd->api_version >= VK_API_VERSION_1_1) {
5700             icd_term->supports_get_dev_prop_2 = true;
5701         }
5702         for (uint32_t j = 0; j < icd_create_info.enabledExtensionCount; j++) {
5703             if (!strcmp(filtered_extension_names[j], VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
5704                 icd_term->supports_get_dev_prop_2 = true;
5705                 continue;
5706             }
5707             if (!strcmp(filtered_extension_names[j], VK_EXT_SURFACE_MAINTENANCE_1_EXTENSION_NAME)) {
5708                 icd_term->supports_ext_surface_maintenance_1 = true;
5709                 continue;
5710             }
5711         }
5712 
5713         loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts);
5714 
5715         // Get the driver version from vkEnumerateInstanceVersion
5716         uint32_t icd_version = VK_API_VERSION_1_0;
5717         VkResult icd_result = VK_SUCCESS;
5718         if (icd_term->scanned_icd->api_version >= VK_API_VERSION_1_1) {
5719             PFN_vkEnumerateInstanceVersion icd_enumerate_instance_version =
5720                 (PFN_vkEnumerateInstanceVersion)icd_term->scanned_icd->GetInstanceProcAddr(NULL, "vkEnumerateInstanceVersion");
5721             if (icd_enumerate_instance_version != NULL) {
5722                 icd_result = icd_enumerate_instance_version(&icd_version);
5723                 if (icd_result != VK_SUCCESS) {
5724                     icd_version = VK_API_VERSION_1_0;
5725                     loader_log(ptr_instance, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5726                                "terminator_CreateInstance: ICD \"%s\" vkEnumerateInstanceVersion returned error. The ICD will be "
5727                                "treated as a 1.0 ICD",
5728                                icd_term->scanned_icd->lib_name);
5729                 } else if (VK_API_VERSION_MINOR(icd_version) == 0) {
5730                     loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5731                                "terminator_CreateInstance: Manifest ICD for \"%s\" contained a 1.1 or greater API version, but "
5732                                "vkEnumerateInstanceVersion returned 1.0, treating as a 1.0 ICD",
5733                                icd_term->scanned_icd->lib_name);
5734                 }
5735             } else {
5736                 loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5737                            "terminator_CreateInstance: Manifest ICD for \"%s\" contained a 1.1 or greater API version, but does "
5738                            "not support vkEnumerateInstanceVersion, treating as a 1.0 ICD",
5739                            icd_term->scanned_icd->lib_name);
5740             }
5741         }
5742 
5743         // Remove the portability enumeration flag bit if the ICD doesn't support the extension
5744         if ((pCreateInfo->flags & VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR) == 1) {
5745             bool supports_portability_enumeration = false;
5746             for (uint32_t j = 0; j < icd_create_info.enabledExtensionCount; j++) {
5747                 if (strcmp(filtered_extension_names[j], VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME) == 0) {
5748                     supports_portability_enumeration = true;
5749                     break;
5750                 }
5751             }
5752             // If the icd supports the extension, use the flags as given, otherwise remove the portability bit
5753             icd_create_info.flags = supports_portability_enumeration
5754                                         ? pCreateInfo->flags
5755                                         : pCreateInfo->flags & (~VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR);
5756         }
5757 
5758         // Create an instance, substituting the version to 1.0 if necessary
5759         VkApplicationInfo icd_app_info;
5760         const uint32_t api_variant = 0;
5761         const uint32_t api_version_1_0 = VK_API_VERSION_1_0;
5762         uint32_t icd_version_nopatch =
5763             VK_MAKE_API_VERSION(api_variant, VK_API_VERSION_MAJOR(icd_version), VK_API_VERSION_MINOR(icd_version), 0);
5764         uint32_t requested_version = (pCreateInfo == NULL || pCreateInfo->pApplicationInfo == NULL)
5765                                          ? api_version_1_0
5766                                          : pCreateInfo->pApplicationInfo->apiVersion;
5767         if ((requested_version != 0) && (icd_version_nopatch == api_version_1_0)) {
5768             if (icd_create_info.pApplicationInfo == NULL) {
5769                 memset(&icd_app_info, 0, sizeof(icd_app_info));
5770             } else {
5771                 memmove(&icd_app_info, icd_create_info.pApplicationInfo, sizeof(icd_app_info));
5772             }
5773             icd_app_info.apiVersion = icd_version;
5774             icd_create_info.pApplicationInfo = &icd_app_info;
5775         }
5776         icd_result =
5777             ptr_instance->icd_tramp_list.scanned_list[i].CreateInstance(&icd_create_info, pAllocator, &(icd_term->instance));
5778         if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_result) {
5779             // If out of memory, bail immediately.
5780             res = VK_ERROR_OUT_OF_HOST_MEMORY;
5781             goto out;
5782         } else if (VK_SUCCESS != icd_result) {
5783             loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5784                        "terminator_CreateInstance: Received return code %i from call to vkCreateInstance in ICD %s. Skipping "
5785                        "this driver.",
5786                        icd_result, icd_term->scanned_icd->lib_name);
5787             ptr_instance->icd_terms = icd_term->next;
5788             icd_term->next = NULL;
5789             loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5790             continue;
5791         }
5792 
5793         if (!loader_icd_init_entries(ptr_instance, icd_term)) {
5794             loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5795                        "terminator_CreateInstance: Failed to find required entrypoints in ICD %s. Skipping this driver.",
5796                        icd_term->scanned_icd->lib_name);
5797             ptr_instance->icd_terms = icd_term->next;
5798             icd_term->next = NULL;
5799             loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5800             continue;
5801         }
5802 
5803         if (ptr_instance->icd_tramp_list.scanned_list[i].interface_version < 3 &&
5804             (
5805 #if defined(VK_USE_PLATFORM_XLIB_KHR)
5806                 NULL != icd_term->dispatch.CreateXlibSurfaceKHR ||
5807 #endif  // VK_USE_PLATFORM_XLIB_KHR
5808 #if defined(VK_USE_PLATFORM_XCB_KHR)
5809                 NULL != icd_term->dispatch.CreateXcbSurfaceKHR ||
5810 #endif  // VK_USE_PLATFORM_XCB_KHR
5811 #if defined(VK_USE_PLATFORM_WAYLAND_KHR)
5812                 NULL != icd_term->dispatch.CreateWaylandSurfaceKHR ||
5813 #endif  // VK_USE_PLATFORM_WAYLAND_KHR
5814 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
5815                 NULL != icd_term->dispatch.CreateAndroidSurfaceKHR ||
5816 #endif  // VK_USE_PLATFORM_ANDROID_KHR
5817 #if defined(VK_USE_PLATFORM_OHOS)
5818                 NULL != icd_term->dispatch.CreateSurfaceOHOS ||
5819 #endif  // VK_USE_PLATFORM_OHOS
5820 #if defined(VK_USE_PLATFORM_WIN32_KHR)
5821                 NULL != icd_term->dispatch.CreateWin32SurfaceKHR ||
5822 #endif  // VK_USE_PLATFORM_WIN32_KHR
5823                 NULL != icd_term->dispatch.DestroySurfaceKHR)) {
5824             loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5825                        "terminator_CreateInstance: Driver %s supports interface version %u but still exposes VkSurfaceKHR"
5826                        " create/destroy entrypoints (Policy #LDP_DRIVER_8)",
5827                        ptr_instance->icd_tramp_list.scanned_list[i].lib_name,
5828                        ptr_instance->icd_tramp_list.scanned_list[i].interface_version);
5829         }
5830 
5831         // If we made it this far, at least one ICD was successful
5832         one_icd_successful = true;
5833     }
5834 
5835     // For vkGetPhysicalDeviceProperties2, at least one ICD needs to support the extension for the
5836     // instance to have it
5837     if (ptr_instance->supports_get_dev_prop_2) {
5838         bool at_least_one_supports = false;
5839         icd_term = ptr_instance->icd_terms;
5840         while (icd_term != NULL) {
5841             if (icd_term->supports_get_dev_prop_2) {
5842                 at_least_one_supports = true;
5843                 break;
5844             }
5845             icd_term = icd_term->next;
5846         }
5847         if (!at_least_one_supports) {
5848             ptr_instance->supports_get_dev_prop_2 = false;
5849         }
5850     }
5851 
5852     // If no ICDs were added to instance list and res is unchanged from it's initial value, the loader was unable to
5853     // find a suitable ICD.
5854     if (VK_SUCCESS == res && (ptr_instance->icd_terms == NULL || !one_icd_successful)) {
5855         loader_log(ptr_instance, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5856                    "terminator_CreateInstance: Found no drivers!");
5857         res = VK_ERROR_INCOMPATIBLE_DRIVER;
5858     }
5859 
5860 out:
5861 
5862     ptr_instance->create_terminator_invalid_extension = false;
5863 
5864     if (VK_SUCCESS != res) {
5865         if (VK_ERROR_EXTENSION_NOT_PRESENT == res) {
5866             ptr_instance->create_terminator_invalid_extension = true;
5867         }
5868 
5869         while (NULL != ptr_instance->icd_terms) {
5870             icd_term = ptr_instance->icd_terms;
5871             ptr_instance->icd_terms = icd_term->next;
5872             if (NULL != icd_term->instance) {
5873                 loader_icd_close_objects(ptr_instance, icd_term);
5874                 icd_term->dispatch.DestroyInstance(icd_term->instance, pAllocator);
5875             }
5876             loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5877         }
5878     } else {
5879         // Check for enabled extensions here to setup the loader structures so the loader knows what extensions
5880         // it needs to worry about.
5881         // We do it here and again above the layers in the trampoline function since the trampoline function
5882         // may think different extensions are enabled than what's down here.
5883         // This is why we don't clear inside of these function calls.
5884         // The clearing should actually be handled by the overall memset of the pInstance structure in the
5885         // trampoline.
5886         wsi_create_instance(ptr_instance, pCreateInfo);
5887         check_for_enabled_debug_extensions(ptr_instance, pCreateInfo);
5888         extensions_create_instance(ptr_instance, pCreateInfo);
5889     }
5890 
5891     return res;
5892 }
5893 
5894 VKAPI_ATTR void VKAPI_CALL terminator_DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
5895     struct loader_instance *ptr_instance = loader_get_instance(instance);
5896     if (NULL == ptr_instance) {
5897         return;
5898     }
5899 
5900     // Remove this instance from the list of instances:
5901     struct loader_instance *prev = NULL;
5902     loader_platform_thread_lock_mutex(&loader_global_instance_list_lock);
5903     struct loader_instance *next = loader.instances;
5904     while (next != NULL) {
5905         if (next == ptr_instance) {
5906             // Remove this instance from the list:
5907             if (prev)
5908                 prev->next = next->next;
5909             else
5910                 loader.instances = next->next;
5911             break;
5912         }
5913         prev = next;
5914         next = next->next;
5915     }
5916     loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock);
5917 
5918     struct loader_icd_term *icd_terms = ptr_instance->icd_terms;
5919     while (NULL != icd_terms) {
5920         if (icd_terms->instance) {
5921             loader_icd_close_objects(ptr_instance, icd_terms);
5922             icd_terms->dispatch.DestroyInstance(icd_terms->instance, pAllocator);
5923         }
5924         struct loader_icd_term *next_icd_term = icd_terms->next;
5925         icd_terms->instance = VK_NULL_HANDLE;
5926         loader_icd_destroy(ptr_instance, icd_terms, pAllocator);
5927 
5928         icd_terms = next_icd_term;
5929     }
5930 
5931     loader_clear_scanned_icd_list(ptr_instance, &ptr_instance->icd_tramp_list);
5932     loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&ptr_instance->ext_list);
5933     if (NULL != ptr_instance->phys_devs_term) {
5934         for (uint32_t i = 0; i < ptr_instance->phys_dev_count_term; i++) {
5935             for (uint32_t j = i + 1; j < ptr_instance->phys_dev_count_term; j++) {
5936                 if (ptr_instance->phys_devs_term[i] == ptr_instance->phys_devs_term[j]) {
5937                     ptr_instance->phys_devs_term[j] = NULL;
5938                 }
5939             }
5940         }
5941         for (uint32_t i = 0; i < ptr_instance->phys_dev_count_term; i++) {
5942             loader_instance_heap_free(ptr_instance, ptr_instance->phys_devs_term[i]);
5943         }
5944         loader_instance_heap_free(ptr_instance, ptr_instance->phys_devs_term);
5945     }
5946     if (NULL != ptr_instance->phys_dev_groups_term) {
5947         for (uint32_t i = 0; i < ptr_instance->phys_dev_group_count_term; i++) {
5948             loader_instance_heap_free(ptr_instance, ptr_instance->phys_dev_groups_term[i]);
5949         }
5950         loader_instance_heap_free(ptr_instance, ptr_instance->phys_dev_groups_term);
5951     }
5952     loader_free_dev_ext_table(ptr_instance);
5953     loader_free_phys_dev_ext_table(ptr_instance);
5954 
5955     free_string_list(ptr_instance, &ptr_instance->enabled_layer_names);
5956 }
5957 
5958 VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo,
5959                                                        const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
5960     VkResult res = VK_SUCCESS;
5961     struct loader_physical_device_term *phys_dev_term;
5962     phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
5963     struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
5964 
5965     struct loader_device *dev = (struct loader_device *)*pDevice;
5966     PFN_vkCreateDevice fpCreateDevice = icd_term->dispatch.CreateDevice;
5967     struct loader_extension_list icd_exts;
5968 
5969     VkBaseOutStructure *caller_dgci_container = NULL;
5970     VkDeviceGroupDeviceCreateInfo *caller_dgci = NULL;
5971 
5972     if (NULL == dev) {
5973         loader_log(icd_term->this_instance, VULKAN_LOADER_WARN_BIT, 0,
5974                    "terminator_CreateDevice: Loader device pointer null encountered.  Possibly set by active layer. (Policy "
5975                    "#LLP_LAYER_22)");
5976     } else if (DEVICE_DISP_TABLE_MAGIC_NUMBER != dev->loader_dispatch.core_dispatch.magic) {
5977         loader_log(icd_term->this_instance, VULKAN_LOADER_WARN_BIT, 0,
5978                    "terminator_CreateDevice: Device pointer (%p) has invalid MAGIC value 0x%08" PRIx64
5979                    ". The expected value is "
5980                    "0x10ADED040410ADED. Device value possibly "
5981                    "corrupted by active layer (Policy #LLP_LAYER_22).  ",
5982                    dev, dev->loader_dispatch.core_dispatch.magic);
5983     }
5984 
5985     dev->phys_dev_term = phys_dev_term;
5986 
5987     icd_exts.list = NULL;
5988 
5989     if (fpCreateDevice == NULL) {
5990         loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5991                    "terminator_CreateDevice: No vkCreateDevice command exposed by ICD %s", icd_term->scanned_icd->lib_name);
5992         res = VK_ERROR_INITIALIZATION_FAILED;
5993         goto out;
5994     }
5995 
5996     VkDeviceCreateInfo localCreateInfo;
5997     memcpy(&localCreateInfo, pCreateInfo, sizeof(localCreateInfo));
5998 
5999     // NOTE: Need to filter the extensions to only those supported by the ICD.
6000     //       No ICD will advertise support for layers. An ICD library could support a layer,
6001     //       but it would be independent of the actual ICD, just in the same library.
6002     char **filtered_extension_names = NULL;
6003     if (0 < pCreateInfo->enabledExtensionCount) {
6004         filtered_extension_names = loader_stack_alloc(pCreateInfo->enabledExtensionCount * sizeof(char *));
6005         if (NULL == filtered_extension_names) {
6006             loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT, 0,
6007                        "terminator_CreateDevice: Failed to create extension name storage for %d extensions",
6008                        pCreateInfo->enabledExtensionCount);
6009             return VK_ERROR_OUT_OF_HOST_MEMORY;
6010         }
6011     }
6012 
6013     localCreateInfo.enabledLayerCount = 0;
6014     localCreateInfo.ppEnabledLayerNames = NULL;
6015 
6016     localCreateInfo.enabledExtensionCount = 0;
6017     localCreateInfo.ppEnabledExtensionNames = (const char *const *)filtered_extension_names;
6018 
6019     // Get the physical device (ICD) extensions
6020     res = loader_init_generic_list(icd_term->this_instance, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
6021     if (VK_SUCCESS != res) {
6022         goto out;
6023     }
6024 
6025     res = loader_add_device_extensions(icd_term->this_instance, icd_term->dispatch.EnumerateDeviceExtensionProperties,
6026                                        phys_dev_term->phys_dev, icd_term->scanned_icd->lib_name, &icd_exts);
6027     if (res != VK_SUCCESS) {
6028         goto out;
6029     }
6030 
6031     for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
6032         if (pCreateInfo->ppEnabledExtensionNames == NULL) {
6033             continue;
6034         }
6035         const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i];
6036         if (extension_name == NULL) {
6037             continue;
6038         }
6039         VkExtensionProperties *prop = get_extension_property(extension_name, &icd_exts);
6040         if (prop) {
6041             filtered_extension_names[localCreateInfo.enabledExtensionCount] = (char *)extension_name;
6042             localCreateInfo.enabledExtensionCount++;
6043         } else {
6044             loader_log(icd_term->this_instance, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
6045                        "vkCreateDevice extension %s not available for devices associated with ICD %s", extension_name,
6046                        icd_term->scanned_icd->lib_name);
6047         }
6048     }
6049 
6050     // Before we continue, If KHX_device_group is the list of enabled and viable extensions, then we then need to look for the
6051     // corresponding VkDeviceGroupDeviceCreateInfo struct in the device list and replace all the physical device values (which
6052     // are really loader physical device terminator values) with the ICD versions.
6053     // if (icd_term->this_instance->enabled_known_extensions.khr_device_group_creation == 1) {
6054     {
6055         VkBaseOutStructure *pNext = (VkBaseOutStructure *)localCreateInfo.pNext;
6056         VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&localCreateInfo;
6057         while (NULL != pNext) {
6058             if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) {
6059                 VkDeviceGroupDeviceCreateInfo *cur_struct = (VkDeviceGroupDeviceCreateInfo *)pNext;
6060                 if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) {
6061                     VkDeviceGroupDeviceCreateInfo *temp_struct = loader_stack_alloc(sizeof(VkDeviceGroupDeviceCreateInfo));
6062                     VkPhysicalDevice *phys_dev_array = NULL;
6063                     if (NULL == temp_struct) {
6064                         return VK_ERROR_OUT_OF_HOST_MEMORY;
6065                     }
6066                     memcpy(temp_struct, cur_struct, sizeof(VkDeviceGroupDeviceCreateInfo));
6067                     phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * cur_struct->physicalDeviceCount);
6068                     if (NULL == phys_dev_array) {
6069                         return VK_ERROR_OUT_OF_HOST_MEMORY;
6070                     }
6071 
6072                     // Before calling down, replace the incoming physical device values (which are really loader terminator
6073                     // physical devices) with the ICDs physical device values.
6074                     struct loader_physical_device_term *cur_term;
6075                     for (uint32_t phys_dev = 0; phys_dev < cur_struct->physicalDeviceCount; phys_dev++) {
6076                         cur_term = (struct loader_physical_device_term *)cur_struct->pPhysicalDevices[phys_dev];
6077                         phys_dev_array[phys_dev] = cur_term->phys_dev;
6078                     }
6079                     temp_struct->pPhysicalDevices = phys_dev_array;
6080 
6081                     // Keep track of pointers to restore pNext chain before returning
6082                     caller_dgci_container = pPrev;
6083                     caller_dgci = cur_struct;
6084 
6085                     // Replace the old struct in the pNext chain with this one.
6086                     pPrev->pNext = (VkBaseOutStructure *)temp_struct;
6087                 }
6088                 break;
6089             }
6090 
6091             pPrev = pNext;
6092             pNext = pNext->pNext;
6093         }
6094     }
6095 
6096     // Handle loader emulation for structs that are not supported by the ICD:
6097     // Presently, the emulation leaves the pNext chain alone. This means that the ICD will receive items in the chain which
6098     // are not recognized by the ICD. If this causes the ICD to fail, then the items would have to be removed here. The current
6099     // implementation does not remove them because copying the pNext chain would be impossible if the loader does not recognize
6100     // the any of the struct types, as the loader would not know the size to allocate and copy.
6101     // if (icd_term->dispatch.GetPhysicalDeviceFeatures2 == NULL && icd_term->dispatch.GetPhysicalDeviceFeatures2KHR == NULL) {
6102     {
6103         const void *pNext = localCreateInfo.pNext;
6104         while (pNext != NULL) {
6105             VkBaseInStructure pNext_in_structure = {0};
6106             memcpy(&pNext_in_structure, pNext, sizeof(VkBaseInStructure));
6107             switch (pNext_in_structure.sType) {
6108                 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2: {
6109                     const VkPhysicalDeviceFeatures2KHR *features = pNext;
6110 
6111                     if (icd_term->dispatch.GetPhysicalDeviceFeatures2 == NULL &&
6112                         icd_term->dispatch.GetPhysicalDeviceFeatures2KHR == NULL) {
6113                         loader_log(icd_term->this_instance, VULKAN_LOADER_INFO_BIT, 0,
6114                                    "vkCreateDevice: Emulating handling of VkPhysicalDeviceFeatures2 in pNext chain for ICD \"%s\"",
6115                                    icd_term->scanned_icd->lib_name);
6116 
6117                         // Verify that VK_KHR_get_physical_device_properties2 is enabled
6118                         if (icd_term->this_instance->enabled_known_extensions.khr_get_physical_device_properties2) {
6119                             localCreateInfo.pEnabledFeatures = &features->features;
6120                         }
6121                     }
6122 
6123                     // Leave this item in the pNext chain for now
6124 
6125                     pNext = features->pNext;
6126                     break;
6127                 }
6128 
6129                 case VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO: {
6130                     const VkDeviceGroupDeviceCreateInfo *group_info = pNext;
6131 
6132                     if (icd_term->dispatch.EnumeratePhysicalDeviceGroups == NULL &&
6133                         icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR == NULL) {
6134                         loader_log(icd_term->this_instance, VULKAN_LOADER_INFO_BIT, 0,
6135                                    "vkCreateDevice: Emulating handling of VkPhysicalDeviceGroupProperties in pNext chain for "
6136                                    "ICD \"%s\"",
6137                                    icd_term->scanned_icd->lib_name);
6138 
6139                         // The group must contain only this one device, since physical device groups aren't actually supported
6140                         if (group_info->physicalDeviceCount != 1) {
6141                             loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT, 0,
6142                                        "vkCreateDevice: Emulation failed to create device from device group info");
6143                             res = VK_ERROR_INITIALIZATION_FAILED;
6144                             goto out;
6145                         }
6146                     }
6147 
6148                     // Nothing needs to be done here because we're leaving the item in the pNext chain and because the spec
6149                     // states that the physicalDevice argument must be included in the device group, and we've already checked
6150                     // that it is
6151 
6152                     pNext = group_info->pNext;
6153                     break;
6154                 }
6155 
6156                 // Multiview properties are also allowed, but since VK_KHX_multiview is a device extension, we'll just let the
6157                 // ICD handle that error when the user enables the extension here
6158                 default: {
6159                     pNext = pNext_in_structure.pNext;
6160                     break;
6161                 }
6162             }
6163         }
6164     }
6165 
6166     VkBool32 maintenance5_feature_enabled = false;
6167     // Look for the VkPhysicalDeviceMaintenance5FeaturesKHR struct to see if the feature was enabled
6168     {
6169         const void *pNext = localCreateInfo.pNext;
6170         while (pNext != NULL) {
6171             VkBaseInStructure pNext_in_structure = {0};
6172             memcpy(&pNext_in_structure, pNext, sizeof(VkBaseInStructure));
6173             switch (pNext_in_structure.sType) {
6174                 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES_KHR: {
6175                     const VkPhysicalDeviceMaintenance5FeaturesKHR *maintenance_features = pNext;
6176                     if (maintenance_features->maintenance5 == VK_TRUE) {
6177                         maintenance5_feature_enabled = true;
6178                     }
6179                     pNext = maintenance_features->pNext;
6180                     break;
6181                 }
6182 
6183                 default: {
6184                     pNext = pNext_in_structure.pNext;
6185                     break;
6186                 }
6187             }
6188         }
6189     }
6190 
6191     // Every extension that has a loader-defined terminator needs to be marked as enabled or disabled so that we know whether or
6192     // not to return that terminator when vkGetDeviceProcAddr is called
6193     for (uint32_t i = 0; i < localCreateInfo.enabledExtensionCount; ++i) {
6194         if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
6195             dev->driver_extensions.khr_swapchain_enabled = true;
6196         } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME)) {
6197             dev->driver_extensions.khr_display_swapchain_enabled = true;
6198         } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_DEVICE_GROUP_EXTENSION_NAME)) {
6199             dev->driver_extensions.khr_device_group_enabled = true;
6200         } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
6201             dev->driver_extensions.ext_debug_marker_enabled = true;
6202 #if defined(VK_USE_PLATFORM_WIN32_KHR)
6203         } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_EXT_FULL_SCREEN_EXCLUSIVE_EXTENSION_NAME)) {
6204             dev->driver_extensions.ext_full_screen_exclusive_enabled = true;
6205 #endif
6206         } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_MAINTENANCE_5_EXTENSION_NAME) &&
6207                    maintenance5_feature_enabled) {
6208             dev->should_ignore_device_commands_from_newer_version = true;
6209         }
6210     }
6211     dev->layer_extensions.ext_debug_utils_enabled = icd_term->this_instance->enabled_known_extensions.ext_debug_utils;
6212     dev->driver_extensions.ext_debug_utils_enabled = icd_term->this_instance->enabled_known_extensions.ext_debug_utils;
6213 
6214     VkPhysicalDeviceProperties properties;
6215     icd_term->dispatch.GetPhysicalDeviceProperties(phys_dev_term->phys_dev, &properties);
6216     if (properties.apiVersion >= VK_API_VERSION_1_1) {
6217         dev->driver_extensions.version_1_1_enabled = true;
6218     }
6219     if (properties.apiVersion >= VK_API_VERSION_1_2) {
6220         dev->driver_extensions.version_1_2_enabled = true;
6221     }
6222     if (properties.apiVersion >= VK_API_VERSION_1_3) {
6223         dev->driver_extensions.version_1_3_enabled = true;
6224     }
6225 
6226     loader_log(icd_term->this_instance, VULKAN_LOADER_LAYER_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
6227                "       Using \"%s\" with driver: \"%s\"", properties.deviceName, icd_term->scanned_icd->lib_name);
6228 
6229     res = fpCreateDevice(phys_dev_term->phys_dev, &localCreateInfo, pAllocator, &dev->icd_device);
6230     if (res != VK_SUCCESS) {
6231         loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
6232                    "terminator_CreateDevice: Failed in ICD %s vkCreateDevice call", icd_term->scanned_icd->lib_name);
6233         goto out;
6234     }
6235 
6236     *pDevice = dev->icd_device;
6237     loader_add_logical_device(icd_term, dev);
6238 
6239     // Init dispatch pointer in new device object
6240     loader_init_dispatch(*pDevice, &dev->loader_dispatch);
6241 
6242 out:
6243     if (NULL != icd_exts.list) {
6244         loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&icd_exts);
6245     }
6246 
6247     // Restore pNext pointer to old VkDeviceGroupDeviceCreateInfo
6248     // in the chain to maintain consistency for the caller.
6249     if (caller_dgci_container != NULL) {
6250         caller_dgci_container->pNext = (VkBaseOutStructure *)caller_dgci;
6251     }
6252 
6253     return res;
6254 }
6255 
6256 // Update the trampoline physical devices with the wrapped version.
6257 // We always want to re-use previous physical device pointers since they may be used by an application
6258 // after returning previously.
6259 VkResult setup_loader_tramp_phys_devs(struct loader_instance *inst, uint32_t phys_dev_count, VkPhysicalDevice *phys_devs) {
6260     VkResult res = VK_SUCCESS;
6261     uint32_t found_count = 0;
6262     uint32_t old_count = inst->phys_dev_count_tramp;
6263     uint32_t new_count = inst->total_gpu_count;
6264     struct loader_physical_device_tramp **new_phys_devs = NULL;
6265 
6266     if (0 == phys_dev_count) {
6267         return VK_SUCCESS;
6268     }
6269     if (phys_dev_count > new_count) {
6270         new_count = phys_dev_count;
6271     }
6272 
6273     // We want an old to new index array and a new to old index array
6274     int32_t *old_to_new_index = (int32_t *)loader_stack_alloc(sizeof(int32_t) * old_count);
6275     int32_t *new_to_old_index = (int32_t *)loader_stack_alloc(sizeof(int32_t) * new_count);
6276     if (NULL == old_to_new_index || NULL == new_to_old_index) {
6277         return VK_ERROR_OUT_OF_HOST_MEMORY;
6278     }
6279 
6280     // Initialize both
6281     for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
6282         old_to_new_index[cur_idx] = -1;
6283     }
6284     for (uint32_t cur_idx = 0; cur_idx < new_count; ++cur_idx) {
6285         new_to_old_index[cur_idx] = -1;
6286     }
6287 
6288     // Figure out the old->new and new->old indices
6289     for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
6290         for (uint32_t new_idx = 0; new_idx < phys_dev_count; ++new_idx) {
6291             if (inst->phys_devs_tramp[cur_idx]->phys_dev == phys_devs[new_idx]) {
6292                 old_to_new_index[cur_idx] = (int32_t)new_idx;
6293                 new_to_old_index[new_idx] = (int32_t)cur_idx;
6294                 found_count++;
6295                 break;
6296             }
6297         }
6298     }
6299 
6300     // If we found exactly the number of items we were looking for as we had before.  Then everything
6301     // we already have is good enough and we just need to update the array that was passed in with
6302     // the loader values.
6303     if (found_count == phys_dev_count && 0 != old_count && old_count == new_count) {
6304         for (uint32_t new_idx = 0; new_idx < phys_dev_count; ++new_idx) {
6305             for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
6306                 if (old_to_new_index[cur_idx] == (int32_t)new_idx) {
6307                     phys_devs[new_idx] = (VkPhysicalDevice)inst->phys_devs_tramp[cur_idx];
6308                     break;
6309                 }
6310             }
6311         }
6312         // Nothing else to do for this path
6313         res = VK_SUCCESS;
6314     } else {
6315         // Something is different, so do the full path of checking every device and creating a new array to use.
6316         // This can happen if a device was added, or removed, or we hadn't previously queried all the data and we
6317         // have more to store.
6318         new_phys_devs = loader_instance_heap_calloc(inst, sizeof(struct loader_physical_device_tramp *) * new_count,
6319                                                     VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6320         if (NULL == new_phys_devs) {
6321             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6322                        "setup_loader_tramp_phys_devs:  Failed to allocate new physical device array of size %d", new_count);
6323             res = VK_ERROR_OUT_OF_HOST_MEMORY;
6324             goto out;
6325         }
6326 
6327         if (new_count > phys_dev_count) {
6328             found_count = phys_dev_count;
6329         } else {
6330             found_count = new_count;
6331         }
6332 
6333         // First try to see if an old item exists that matches the new item.  If so, just copy it over.
6334         for (uint32_t new_idx = 0; new_idx < found_count; ++new_idx) {
6335             bool old_item_found = false;
6336             for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
6337                 if (old_to_new_index[cur_idx] == (int32_t)new_idx) {
6338                     // Copy over old item to correct spot in the new array
6339                     new_phys_devs[new_idx] = inst->phys_devs_tramp[cur_idx];
6340                     old_item_found = true;
6341                     break;
6342                 }
6343             }
6344             // Something wasn't found, so it's new so add it to the new list
6345             if (!old_item_found) {
6346                 new_phys_devs[new_idx] = loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_tramp),
6347                                                                     VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6348                 if (NULL == new_phys_devs[new_idx]) {
6349                     loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6350                                "setup_loader_tramp_phys_devs:  Failed to allocate new trampoline physical device");
6351                     res = VK_ERROR_OUT_OF_HOST_MEMORY;
6352                     goto out;
6353                 }
6354 
6355                 // Initialize the new physicalDevice object
6356                 loader_set_dispatch((void *)new_phys_devs[new_idx], inst->disp);
6357                 new_phys_devs[new_idx]->this_instance = inst;
6358                 new_phys_devs[new_idx]->phys_dev = phys_devs[new_idx];
6359                 new_phys_devs[new_idx]->magic = PHYS_TRAMP_MAGIC_NUMBER;
6360             }
6361 
6362             phys_devs[new_idx] = (VkPhysicalDevice)new_phys_devs[new_idx];
6363         }
6364 
6365         // We usually get here if the user array is smaller than the total number of devices, so copy the
6366         // remaining devices we have over to the new array.
6367         uint32_t start = found_count;
6368         for (uint32_t new_idx = start; new_idx < new_count; ++new_idx) {
6369             for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
6370                 if (old_to_new_index[cur_idx] == -1) {
6371                     new_phys_devs[new_idx] = inst->phys_devs_tramp[cur_idx];
6372                     old_to_new_index[cur_idx] = new_idx;
6373                     found_count++;
6374                     break;
6375                 }
6376             }
6377         }
6378     }
6379 
6380 out:
6381 
6382     if (NULL != new_phys_devs) {
6383         if (VK_SUCCESS != res) {
6384             for (uint32_t new_idx = 0; new_idx < found_count; ++new_idx) {
6385                 // If an OOM occurred inside the copying of the new physical devices into the existing array
6386                 // will leave some of the old physical devices in the array which may have been copied into
6387                 // the new array, leading to them being freed twice. To avoid this we just make sure to not
6388                 // delete physical devices which were copied.
6389                 bool found = false;
6390                 for (uint32_t cur_idx = 0; cur_idx < inst->phys_dev_count_tramp; cur_idx++) {
6391                     if (new_phys_devs[new_idx] == inst->phys_devs_tramp[cur_idx]) {
6392                         found = true;
6393                         break;
6394                     }
6395                 }
6396                 if (!found) {
6397                     loader_instance_heap_free(inst, new_phys_devs[new_idx]);
6398                 }
6399             }
6400             loader_instance_heap_free(inst, new_phys_devs);
6401         } else {
6402             if (new_count > inst->total_gpu_count) {
6403                 inst->total_gpu_count = new_count;
6404             }
6405             // Free everything in the old array that was not copied into the new array
6406             // here.  We can't attempt to do that before here since the previous loop
6407             // looking before the "out:" label may hit an out of memory condition resulting
6408             // in memory leaking.
6409             if (NULL != inst->phys_devs_tramp) {
6410                 for (uint32_t i = 0; i < inst->phys_dev_count_tramp; i++) {
6411                     bool found = false;
6412                     for (uint32_t j = 0; j < inst->total_gpu_count; j++) {
6413                         if (inst->phys_devs_tramp[i] == new_phys_devs[j]) {
6414                             found = true;
6415                             break;
6416                         }
6417                     }
6418                     if (!found) {
6419                         loader_instance_heap_free(inst, inst->phys_devs_tramp[i]);
6420                     }
6421                 }
6422                 loader_instance_heap_free(inst, inst->phys_devs_tramp);
6423             }
6424             inst->phys_devs_tramp = new_phys_devs;
6425             inst->phys_dev_count_tramp = found_count;
6426         }
6427     }
6428     if (VK_SUCCESS != res) {
6429         inst->total_gpu_count = 0;
6430     }
6431 
6432     return res;
6433 }
6434 
6435 #if defined(LOADER_ENABLE_LINUX_SORT)
6436 bool is_linux_sort_enabled(struct loader_instance *inst) {
6437     bool sort_items = inst->supports_get_dev_prop_2;
6438     char *env_value = loader_getenv("VK_LOADER_DISABLE_SELECT", inst);
6439     if (NULL != env_value) {
6440         int32_t int_env_val = atoi(env_value);
6441         loader_free_getenv(env_value, inst);
6442         if (int_env_val != 0) {
6443             sort_items = false;
6444         }
6445     }
6446     return sort_items;
6447 }
6448 #endif  // LOADER_ENABLE_LINUX_SORT
6449 
6450 // Look for physical_device in the provided phys_devs list, return true if found and put the index into out_idx, otherwise
6451 // return false
6452 bool find_phys_dev(VkPhysicalDevice physical_device, uint32_t phys_devs_count, struct loader_physical_device_term **phys_devs,
6453                    uint32_t *out_idx) {
6454     if (NULL == phys_devs) return false;
6455     for (uint32_t idx = 0; idx < phys_devs_count; idx++) {
6456         if (NULL != phys_devs[idx] && physical_device == phys_devs[idx]->phys_dev) {
6457             *out_idx = idx;
6458             return true;
6459         }
6460     }
6461     return false;
6462 }
6463 
6464 // Add physical_device to new_phys_devs
6465 VkResult check_and_add_to_new_phys_devs(struct loader_instance *inst, VkPhysicalDevice physical_device,
6466                                         struct loader_icd_physical_devices *dev_array, uint32_t *cur_new_phys_dev_count,
6467                                         struct loader_physical_device_term **new_phys_devs) {
6468     uint32_t out_idx = 0;
6469     uint32_t idx = *cur_new_phys_dev_count;
6470     // Check if the physical_device already exists in the new_phys_devs buffer, that means it was found from both
6471     // EnumerateAdapterPhysicalDevices and EnumeratePhysicalDevices and we need to skip it.
6472     if (find_phys_dev(physical_device, idx, new_phys_devs, &out_idx)) {
6473         return VK_SUCCESS;
6474     }
6475     // Check if it was found in a previous call to vkEnumeratePhysicalDevices, we can just copy over the old data.
6476     if (find_phys_dev(physical_device, inst->phys_dev_count_term, inst->phys_devs_term, &out_idx)) {
6477         new_phys_devs[idx] = inst->phys_devs_term[out_idx];
6478         (*cur_new_phys_dev_count)++;
6479         return VK_SUCCESS;
6480     }
6481 
6482     // Exit in case something is already present - this shouldn't happen but better to be safe than overwrite existing data
6483     // since this code has been refactored a half dozen times.
6484     if (NULL != new_phys_devs[idx]) {
6485         return VK_SUCCESS;
6486     }
6487     // If this physical device is new, we need to allocate space for it.
6488     new_phys_devs[idx] =
6489         loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6490     if (NULL == new_phys_devs[idx]) {
6491         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6492                    "check_and_add_to_new_phys_devs:  Failed to allocate physical device terminator object %d", idx);
6493         return VK_ERROR_OUT_OF_HOST_MEMORY;
6494     }
6495 
6496     loader_set_dispatch((void *)new_phys_devs[idx], inst->disp);
6497     new_phys_devs[idx]->this_icd_term = dev_array->icd_term;
6498     new_phys_devs[idx]->phys_dev = physical_device;
6499 
6500     // Increment the count of new physical devices
6501     (*cur_new_phys_dev_count)++;
6502     return VK_SUCCESS;
6503 }
6504 
6505 /* Enumerate all physical devices from ICDs and add them to inst->phys_devs_term
6506  *
6507  * There are two methods to find VkPhysicalDevices - vkEnumeratePhysicalDevices and vkEnumerateAdapterPhysicalDevices
6508  * The latter is supported on windows only and on devices supporting ICD Interface Version 6 and greater.
6509  *
6510  * Once all physical devices are acquired, they need to be pulled into a single list of `loader_physical_device_term`'s.
6511  * They also need to be setup - the icd_term, icd_index, phys_dev, and disp (dispatch table) all need the correct data.
6512  * Additionally, we need to keep using already setup physical devices as they may be in use, thus anything enumerated
6513  * that is already in inst->phys_devs_term will be carried over.
6514  */
6515 
6516 VkResult setup_loader_term_phys_devs(struct loader_instance *inst) {
6517     VkResult res = VK_SUCCESS;
6518     struct loader_icd_term *icd_term;
6519     uint32_t windows_sorted_devices_count = 0;
6520     struct loader_icd_physical_devices *windows_sorted_devices_array = NULL;
6521     uint32_t icd_count = 0;
6522     struct loader_icd_physical_devices *icd_phys_dev_array = NULL;
6523     uint32_t new_phys_devs_capacity = 0;
6524     uint32_t new_phys_devs_count = 0;
6525     struct loader_physical_device_term **new_phys_devs = NULL;
6526 
6527 #if defined(_WIN32)
6528     // Get the physical devices supported by platform sorting mechanism into a separate list
6529     res = windows_read_sorted_physical_devices(inst, &windows_sorted_devices_count, &windows_sorted_devices_array);
6530     if (VK_SUCCESS != res) {
6531         goto out;
6532     }
6533 #endif
6534 
6535     icd_count = inst->icd_terms_count;
6536 
6537     // Allocate something to store the physical device characteristics that we read from each ICD.
6538     icd_phys_dev_array =
6539         (struct loader_icd_physical_devices *)loader_stack_alloc(sizeof(struct loader_icd_physical_devices) * icd_count);
6540     if (NULL == icd_phys_dev_array) {
6541         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6542                    "setup_loader_term_phys_devs:  Failed to allocate temporary ICD Physical device info array of size %d",
6543                    icd_count);
6544         res = VK_ERROR_OUT_OF_HOST_MEMORY;
6545         goto out;
6546     }
6547     memset(icd_phys_dev_array, 0, sizeof(struct loader_icd_physical_devices) * icd_count);
6548 
6549     // For each ICD, query the number of physical devices, and then get an
6550     // internal value for those physical devices.
6551     icd_term = inst->icd_terms;
6552     uint32_t icd_idx = 0;
6553     while (NULL != icd_term) {
6554         res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &icd_phys_dev_array[icd_idx].device_count, NULL);
6555         if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
6556             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6557                        "setup_loader_term_phys_devs: Call to \'vkEnumeratePhysicalDevices\' in ICD %s failed with error code "
6558                        "VK_ERROR_OUT_OF_HOST_MEMORY",
6559                        icd_term->scanned_icd->lib_name);
6560             goto out;
6561         } else if (VK_SUCCESS == res) {
6562             icd_phys_dev_array[icd_idx].physical_devices =
6563                 (VkPhysicalDevice *)loader_stack_alloc(icd_phys_dev_array[icd_idx].device_count * sizeof(VkPhysicalDevice));
6564             if (NULL == icd_phys_dev_array[icd_idx].physical_devices) {
6565                 loader_log(
6566                     inst, VULKAN_LOADER_ERROR_BIT, 0,
6567                     "setup_loader_term_phys_devs: Failed to allocate temporary ICD Physical device array for ICD %s of size %d",
6568                     icd_term->scanned_icd->lib_name, icd_phys_dev_array[icd_idx].device_count);
6569                 res = VK_ERROR_OUT_OF_HOST_MEMORY;
6570                 goto out;
6571             }
6572 
6573             res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &(icd_phys_dev_array[icd_idx].device_count),
6574                                                               icd_phys_dev_array[icd_idx].physical_devices);
6575             if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
6576                 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6577                            "setup_loader_term_phys_devs: Call to \'vkEnumeratePhysicalDevices\' in ICD %s failed with error code "
6578                            "VK_ERROR_OUT_OF_HOST_MEMORY",
6579                            icd_term->scanned_icd->lib_name);
6580                 goto out;
6581             }
6582             if (VK_SUCCESS != res) {
6583                 loader_log(
6584                     inst, VULKAN_LOADER_ERROR_BIT, 0,
6585                     "setup_loader_term_phys_devs: Call to \'vkEnumeratePhysicalDevices\' in ICD %s failed with error code %d",
6586                     icd_term->scanned_icd->lib_name, res);
6587                 icd_phys_dev_array[icd_idx].device_count = 0;
6588                 icd_phys_dev_array[icd_idx].physical_devices = 0;
6589             }
6590         } else {
6591             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6592                        "setup_loader_term_phys_devs: Call to \'vkEnumeratePhysicalDevices\' in ICD %s failed with error code %d",
6593                        icd_term->scanned_icd->lib_name, res);
6594             icd_phys_dev_array[icd_idx].device_count = 0;
6595             icd_phys_dev_array[icd_idx].physical_devices = 0;
6596         }
6597         icd_phys_dev_array[icd_idx].icd_term = icd_term;
6598         icd_term->physical_device_count = icd_phys_dev_array[icd_idx].device_count;
6599         icd_term = icd_term->next;
6600         ++icd_idx;
6601     }
6602 
6603     // Add up both the windows sorted and non windows found physical device counts
6604     for (uint32_t i = 0; i < windows_sorted_devices_count; ++i) {
6605         new_phys_devs_capacity += windows_sorted_devices_array[i].device_count;
6606     }
6607     for (uint32_t i = 0; i < icd_count; ++i) {
6608         new_phys_devs_capacity += icd_phys_dev_array[i].device_count;
6609     }
6610 
6611     // Bail out if there are no physical devices reported
6612     if (0 == new_phys_devs_capacity) {
6613         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6614                    "setup_loader_term_phys_devs:  Failed to detect any valid GPUs in the current config");
6615         res = VK_ERROR_INITIALIZATION_FAILED;
6616         goto out;
6617     }
6618 
6619     // Create an allocation large enough to hold both the windows sorting enumeration and non-windows physical device
6620     // enumeration
6621     new_phys_devs = loader_instance_heap_calloc(inst, sizeof(struct loader_physical_device_term *) * new_phys_devs_capacity,
6622                                                 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6623     if (NULL == new_phys_devs) {
6624         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6625                    "setup_loader_term_phys_devs:  Failed to allocate new physical device array of size %d", new_phys_devs_capacity);
6626         res = VK_ERROR_OUT_OF_HOST_MEMORY;
6627         goto out;
6628     }
6629 
6630     // Copy over everything found through sorted enumeration
6631     for (uint32_t i = 0; i < windows_sorted_devices_count; ++i) {
6632         for (uint32_t j = 0; j < windows_sorted_devices_array[i].device_count; ++j) {
6633             res = check_and_add_to_new_phys_devs(inst, windows_sorted_devices_array[i].physical_devices[j],
6634                                                  &windows_sorted_devices_array[i], &new_phys_devs_count, new_phys_devs);
6635             if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
6636                 goto out;
6637             }
6638         }
6639     }
6640 
6641 // Now go through the rest of the physical devices and add them to new_phys_devs
6642 #if defined(LOADER_ENABLE_LINUX_SORT)
6643 
6644     if (is_linux_sort_enabled(inst)) {
6645         for (uint32_t dev = new_phys_devs_count; dev < new_phys_devs_capacity; ++dev) {
6646             new_phys_devs[dev] =
6647                 loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6648             if (NULL == new_phys_devs[dev]) {
6649                 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6650                            "setup_loader_term_phys_devs:  Failed to allocate physical device terminator object %d", dev);
6651                 res = VK_ERROR_OUT_OF_HOST_MEMORY;
6652                 goto out;
6653             }
6654         }
6655 
6656         // Get the physical devices supported by platform sorting mechanism into a separate list
6657         // Pass in a sublist to the function so it only operates on the correct elements. This means passing in a pointer to the
6658         // current next element in new_phys_devs and passing in a `count` of currently unwritten elements
6659         res = linux_read_sorted_physical_devices(inst, icd_count, icd_phys_dev_array, new_phys_devs_capacity - new_phys_devs_count,
6660                                                  &new_phys_devs[new_phys_devs_count]);
6661         if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
6662             goto out;
6663         }
6664         // Keep previously allocated physical device info since apps may already be using that!
6665         for (uint32_t new_idx = new_phys_devs_count; new_idx < new_phys_devs_capacity; new_idx++) {
6666             for (uint32_t old_idx = 0; old_idx < inst->phys_dev_count_term; old_idx++) {
6667                 if (new_phys_devs[new_idx]->phys_dev == inst->phys_devs_term[old_idx]->phys_dev) {
6668                     loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
6669                                "Copying old device %u into new device %u", old_idx, new_idx);
6670                     // Free the old new_phys_devs info since we're not using it before we assign the new info
6671                     loader_instance_heap_free(inst, new_phys_devs[new_idx]);
6672                     new_phys_devs[new_idx] = inst->phys_devs_term[old_idx];
6673                     break;
6674                 }
6675             }
6676         }
6677         // now set the count to the capacity, as now the list is filled in
6678         new_phys_devs_count = new_phys_devs_capacity;
6679         // We want the following code to run if either linux sorting is disabled at compile time or runtime
6680     } else {
6681 #endif  // LOADER_ENABLE_LINUX_SORT
6682 
6683         // Copy over everything found through the non-sorted means.
6684         for (uint32_t i = 0; i < icd_count; ++i) {
6685             for (uint32_t j = 0; j < icd_phys_dev_array[i].device_count; ++j) {
6686                 res = check_and_add_to_new_phys_devs(inst, icd_phys_dev_array[i].physical_devices[j], &icd_phys_dev_array[i],
6687                                                      &new_phys_devs_count, new_phys_devs);
6688                 if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
6689                     goto out;
6690                 }
6691             }
6692         }
6693 #if defined(LOADER_ENABLE_LINUX_SORT)
6694     }
6695 #endif  // LOADER_ENABLE_LINUX_SORT
6696 out:
6697 
6698     if (VK_SUCCESS != res) {
6699         if (NULL != new_phys_devs) {
6700             // We've encountered an error, so we should free the new buffers.
6701             for (uint32_t i = 0; i < new_phys_devs_capacity; i++) {
6702                 // May not have allocated this far, skip it if we hadn't.
6703                 if (new_phys_devs[i] == NULL) continue;
6704 
6705                 // If an OOM occurred inside the copying of the new physical devices into the existing array
6706                 // will leave some of the old physical devices in the array which may have been copied into
6707                 // the new array, leading to them being freed twice. To avoid this we just make sure to not
6708                 // delete physical devices which were copied.
6709                 bool found = false;
6710                 if (NULL != inst->phys_devs_term) {
6711                     for (uint32_t old_idx = 0; old_idx < inst->phys_dev_count_term; old_idx++) {
6712                         if (new_phys_devs[i] == inst->phys_devs_term[old_idx]) {
6713                             found = true;
6714                             break;
6715                         }
6716                     }
6717                 }
6718                 if (!found) {
6719                     loader_instance_heap_free(inst, new_phys_devs[i]);
6720                 }
6721             }
6722             loader_instance_heap_free(inst, new_phys_devs);
6723         }
6724         inst->total_gpu_count = 0;
6725     } else {
6726         if (NULL != inst->phys_devs_term) {
6727             // Free everything in the old array that was not copied into the new array
6728             // here.  We can't attempt to do that before here since the previous loop
6729             // looking before the "out:" label may hit an out of memory condition resulting
6730             // in memory leaking.
6731             for (uint32_t i = 0; i < inst->phys_dev_count_term; i++) {
6732                 bool found = false;
6733                 for (uint32_t j = 0; j < new_phys_devs_count; j++) {
6734                     if (new_phys_devs != NULL && inst->phys_devs_term[i] == new_phys_devs[j]) {
6735                         found = true;
6736                         break;
6737                     }
6738                 }
6739                 if (!found) {
6740                     loader_instance_heap_free(inst, inst->phys_devs_term[i]);
6741                 }
6742             }
6743             loader_instance_heap_free(inst, inst->phys_devs_term);
6744         }
6745 
6746         // Swap out old and new devices list
6747         inst->phys_dev_count_term = new_phys_devs_count;
6748         inst->phys_devs_term = new_phys_devs;
6749         inst->total_gpu_count = new_phys_devs_count;
6750     }
6751 
6752     if (windows_sorted_devices_array != NULL) {
6753         for (uint32_t i = 0; i < windows_sorted_devices_count; ++i) {
6754             if (windows_sorted_devices_array[i].device_count > 0 && windows_sorted_devices_array[i].physical_devices != NULL) {
6755                 loader_instance_heap_free(inst, windows_sorted_devices_array[i].physical_devices);
6756             }
6757         }
6758         loader_instance_heap_free(inst, windows_sorted_devices_array);
6759     }
6760 
6761     return res;
6762 }
6763 /**
6764  * Iterates through all drivers and unloads any which do not contain physical devices.
6765  * This saves address space, which for 32 bit applications is scarce.
6766  * This must only be called after a call to vkEnumeratePhysicalDevices that isn't just querying the count
6767  */
6768 void unload_drivers_without_physical_devices(struct loader_instance *inst) {
6769     struct loader_icd_term *cur_icd_term = inst->icd_terms;
6770     struct loader_icd_term *prev_icd_term = NULL;
6771 
6772     while (NULL != cur_icd_term) {
6773         struct loader_icd_term *next_icd_term = cur_icd_term->next;
6774         if (cur_icd_term->physical_device_count == 0) {
6775             uint32_t cur_scanned_icd_index = UINT32_MAX;
6776             if (inst->icd_tramp_list.scanned_list) {
6777                 for (uint32_t i = 0; i < inst->icd_tramp_list.count; i++) {
6778                     if (&(inst->icd_tramp_list.scanned_list[i]) == cur_icd_term->scanned_icd) {
6779                         cur_scanned_icd_index = i;
6780                         break;
6781                     }
6782                 }
6783             }
6784             if (cur_scanned_icd_index != UINT32_MAX) {
6785                 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
6786                            "Removing driver %s due to not having any physical devices", cur_icd_term->scanned_icd->lib_name);
6787 
6788                 const VkAllocationCallbacks *allocation_callbacks = ignore_null_callback(&(inst->alloc_callbacks));
6789                 if (cur_icd_term->instance) {
6790                     loader_icd_close_objects(inst, cur_icd_term);
6791                     cur_icd_term->dispatch.DestroyInstance(cur_icd_term->instance, allocation_callbacks);
6792                 }
6793                 cur_icd_term->instance = VK_NULL_HANDLE;
6794                 loader_icd_destroy(inst, cur_icd_term, allocation_callbacks);
6795                 cur_icd_term = NULL;
6796                 struct loader_scanned_icd *scanned_icd_to_remove = &inst->icd_tramp_list.scanned_list[cur_scanned_icd_index];
6797                 // Iterate through preloaded ICDs and remove the corresponding driver from that list
6798                 loader_platform_thread_lock_mutex(&loader_preload_icd_lock);
6799                 if (NULL != preloaded_icds.scanned_list) {
6800                     for (uint32_t i = 0; i < preloaded_icds.count; i++) {
6801                         if (NULL != preloaded_icds.scanned_list[i].lib_name && NULL != scanned_icd_to_remove->lib_name &&
6802                             strcmp(preloaded_icds.scanned_list[i].lib_name, scanned_icd_to_remove->lib_name) == 0) {
6803                             loader_unload_scanned_icd(NULL, &preloaded_icds.scanned_list[i]);
6804                             // condense the list so that it doesn't contain empty elements.
6805                             if (i < preloaded_icds.count - 1) {
6806                                 memcpy((void *)&preloaded_icds.scanned_list[i],
6807                                        (void *)&preloaded_icds.scanned_list[preloaded_icds.count - 1],
6808                                        sizeof(struct loader_scanned_icd));
6809                                 memset((void *)&preloaded_icds.scanned_list[preloaded_icds.count - 1], 0,
6810                                        sizeof(struct loader_scanned_icd));
6811                             }
6812                             if (i > 0) {
6813                                 preloaded_icds.count--;
6814                             }
6815 
6816                             break;
6817                         }
6818                     }
6819                 }
6820                 loader_platform_thread_unlock_mutex(&loader_preload_icd_lock);
6821 
6822                 loader_unload_scanned_icd(inst, scanned_icd_to_remove);
6823             }
6824 
6825             if (NULL == prev_icd_term) {
6826                 inst->icd_terms = next_icd_term;
6827             } else {
6828                 prev_icd_term->next = next_icd_term;
6829             }
6830         } else {
6831             prev_icd_term = cur_icd_term;
6832         }
6833         cur_icd_term = next_icd_term;
6834     }
6835 }
6836 
6837 VkResult setup_loader_tramp_phys_dev_groups(struct loader_instance *inst, uint32_t group_count,
6838                                             VkPhysicalDeviceGroupProperties *groups) {
6839     VkResult res = VK_SUCCESS;
6840     uint32_t cur_idx;
6841     uint32_t dev_idx;
6842 
6843     if (0 == group_count) {
6844         return VK_SUCCESS;
6845     }
6846 
6847     // Generate a list of all the devices and convert them to the loader ID
6848     uint32_t phys_dev_count = 0;
6849     for (cur_idx = 0; cur_idx < group_count; ++cur_idx) {
6850         phys_dev_count += groups[cur_idx].physicalDeviceCount;
6851     }
6852     VkPhysicalDevice *devices = (VkPhysicalDevice *)loader_stack_alloc(sizeof(VkPhysicalDevice) * phys_dev_count);
6853     if (NULL == devices) {
6854         return VK_ERROR_OUT_OF_HOST_MEMORY;
6855     }
6856 
6857     uint32_t cur_device = 0;
6858     for (cur_idx = 0; cur_idx < group_count; ++cur_idx) {
6859         for (dev_idx = 0; dev_idx < groups[cur_idx].physicalDeviceCount; ++dev_idx) {
6860             devices[cur_device++] = groups[cur_idx].physicalDevices[dev_idx];
6861         }
6862     }
6863 
6864     // Update the devices based on the loader physical device values.
6865     res = setup_loader_tramp_phys_devs(inst, phys_dev_count, devices);
6866     if (VK_SUCCESS != res) {
6867         return res;
6868     }
6869 
6870     // Update the devices in the group structures now
6871     cur_device = 0;
6872     for (cur_idx = 0; cur_idx < group_count; ++cur_idx) {
6873         for (dev_idx = 0; dev_idx < groups[cur_idx].physicalDeviceCount; ++dev_idx) {
6874             groups[cur_idx].physicalDevices[dev_idx] = devices[cur_device++];
6875         }
6876     }
6877 
6878     return res;
6879 }
6880 
6881 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
6882                                                                    VkPhysicalDevice *pPhysicalDevices) {
6883     struct loader_instance *inst = (struct loader_instance *)instance;
6884     VkResult res = VK_SUCCESS;
6885 
6886     // Always call the setup loader terminator physical devices because they may
6887     // have changed at any point.
6888     res = setup_loader_term_phys_devs(inst);
6889     if (VK_SUCCESS != res) {
6890         goto out;
6891     }
6892 
6893     uint32_t copy_count = inst->phys_dev_count_term;
6894     if (NULL != pPhysicalDevices) {
6895         if (copy_count > *pPhysicalDeviceCount) {
6896             copy_count = *pPhysicalDeviceCount;
6897             loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
6898                        "terminator_EnumeratePhysicalDevices : Trimming device count from %d to %d.", inst->phys_dev_count_term,
6899                        copy_count);
6900             res = VK_INCOMPLETE;
6901         }
6902 
6903         for (uint32_t i = 0; i < copy_count; i++) {
6904             pPhysicalDevices[i] = (VkPhysicalDevice)inst->phys_devs_term[i];
6905         }
6906     }
6907 
6908     *pPhysicalDeviceCount = copy_count;
6909 
6910 out:
6911 
6912     return res;
6913 }
6914 
6915 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
6916                                                                              const char *pLayerName, uint32_t *pPropertyCount,
6917                                                                              VkExtensionProperties *pProperties) {
6918     if (NULL == pPropertyCount) {
6919         return VK_INCOMPLETE;
6920     }
6921 
6922     struct loader_physical_device_term *phys_dev_term;
6923 
6924     // Any layer or trampoline wrapping should be removed at this point in time can just cast to the expected
6925     // type for VkPhysicalDevice.
6926     phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
6927 
6928     // if we got here with a non-empty pLayerName, look up the extensions
6929     // from the json
6930     if (pLayerName != NULL && strlen(pLayerName) > 0) {
6931         uint32_t count;
6932         uint32_t copy_size;
6933         const struct loader_instance *inst = phys_dev_term->this_icd_term->this_instance;
6934         struct loader_device_extension_list *dev_ext_list = NULL;
6935         struct loader_device_extension_list local_ext_list;
6936         memset(&local_ext_list, 0, sizeof(local_ext_list));
6937         if (vk_string_validate(MaxLoaderStringLength, pLayerName) == VK_STRING_ERROR_NONE) {
6938             for (uint32_t i = 0; i < inst->instance_layer_list.count; i++) {
6939                 struct loader_layer_properties *props = &inst->instance_layer_list.list[i];
6940                 if (strcmp(props->info.layerName, pLayerName) == 0) {
6941                     dev_ext_list = &props->device_extension_list;
6942                 }
6943             }
6944 
6945             count = (dev_ext_list == NULL) ? 0 : dev_ext_list->count;
6946             if (pProperties == NULL) {
6947                 *pPropertyCount = count;
6948                 loader_destroy_generic_list(inst, (struct loader_generic_list *)&local_ext_list);
6949                 return VK_SUCCESS;
6950             }
6951 
6952             copy_size = *pPropertyCount < count ? *pPropertyCount : count;
6953             for (uint32_t i = 0; i < copy_size; i++) {
6954                 memcpy(&pProperties[i], &dev_ext_list->list[i].props, sizeof(VkExtensionProperties));
6955             }
6956             *pPropertyCount = copy_size;
6957 
6958             loader_destroy_generic_list(inst, (struct loader_generic_list *)&local_ext_list);
6959             if (copy_size < count) {
6960                 return VK_INCOMPLETE;
6961             }
6962         } else {
6963             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6964                        "vkEnumerateDeviceExtensionProperties:  pLayerName is too long or is badly formed");
6965             return VK_ERROR_EXTENSION_NOT_PRESENT;
6966         }
6967 
6968         return VK_SUCCESS;
6969     }
6970 
6971     // user is querying driver extensions and has supplied their own storage - just fill it out
6972     else if (pProperties) {
6973         struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
6974         uint32_t written_count = *pPropertyCount;
6975         VkResult res =
6976             icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &written_count, pProperties);
6977         if (res != VK_SUCCESS) {
6978             return res;
6979         }
6980 
6981         // Iterate over active layers, if they are an implicit layer, add their device extensions
6982         // After calling into the driver, written_count contains the amount of device extensions written. We can therefore write
6983         // layer extensions starting at that point in pProperties
6984         for (uint32_t i = 0; i < icd_term->this_instance->expanded_activated_layer_list.count; i++) {
6985             struct loader_layer_properties *layer_props = icd_term->this_instance->expanded_activated_layer_list.list[i];
6986             if (0 == (layer_props->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
6987                 struct loader_device_extension_list *layer_ext_list = &layer_props->device_extension_list;
6988                 for (uint32_t j = 0; j < layer_ext_list->count; j++) {
6989                     struct loader_dev_ext_props *cur_ext_props = &layer_ext_list->list[j];
6990                     // look for duplicates
6991                     if (has_vk_extension_property_array(&cur_ext_props->props, written_count, pProperties)) {
6992                         continue;
6993                     }
6994 
6995                     if (*pPropertyCount <= written_count) {
6996                         return VK_INCOMPLETE;
6997                     }
6998 
6999                     memcpy(&pProperties[written_count], &cur_ext_props->props, sizeof(VkExtensionProperties));
7000                     written_count++;
7001                 }
7002             }
7003         }
7004         // Make sure we update the pPropertyCount with the how many were written
7005         *pPropertyCount = written_count;
7006         return res;
7007     }
7008     // Use `goto out;` for rest of this function
7009 
7010     // This case is during the call down the instance chain with pLayerName == NULL and pProperties == NULL
7011     struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
7012     struct loader_extension_list all_exts = {0};
7013     VkResult res;
7014 
7015     // We need to find the count without duplicates. This requires querying the driver for the names of the extensions.
7016     res = icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &all_exts.count, NULL);
7017     if (res != VK_SUCCESS) {
7018         goto out;
7019     }
7020     // Then allocate memory to store the physical device extension list + the extensions layers provide
7021     // all_exts.count currently is the number of driver extensions
7022     all_exts.capacity = sizeof(VkExtensionProperties) * (all_exts.count + 20);
7023     all_exts.list = loader_instance_heap_alloc(icd_term->this_instance, all_exts.capacity, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
7024     if (NULL == all_exts.list) {
7025         res = VK_ERROR_OUT_OF_HOST_MEMORY;
7026         goto out;
7027     }
7028 
7029     // Get the available device extensions and put them in all_exts.list
7030     res = icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &all_exts.count, all_exts.list);
7031     if (res != VK_SUCCESS) {
7032         goto out;
7033     }
7034 
7035     // Iterate over active layers, if they are an implicit layer, add their device extensions to all_exts.list
7036     for (uint32_t i = 0; i < icd_term->this_instance->expanded_activated_layer_list.count; i++) {
7037         struct loader_layer_properties *layer_props = icd_term->this_instance->expanded_activated_layer_list.list[i];
7038         if (0 == (layer_props->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
7039             struct loader_device_extension_list *layer_ext_list = &layer_props->device_extension_list;
7040             for (uint32_t j = 0; j < layer_ext_list->count; j++) {
7041                 res = loader_add_to_ext_list(icd_term->this_instance, &all_exts, 1, &layer_ext_list->list[j].props);
7042                 if (res != VK_SUCCESS) {
7043                     goto out;
7044                 }
7045             }
7046         }
7047     }
7048 
7049     // Write out the final de-duplicated count to pPropertyCount
7050     *pPropertyCount = all_exts.count;
7051     res = VK_SUCCESS;
7052 
7053 out:
7054 
7055     loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&all_exts);
7056     return res;
7057 }
7058 
7059 VkStringErrorFlags vk_string_validate(const int max_length, const char *utf8) {
7060     VkStringErrorFlags result = VK_STRING_ERROR_NONE;
7061     int num_char_bytes = 0;
7062     int i, j;
7063 
7064     if (utf8 == NULL) {
7065         return VK_STRING_ERROR_NULL_PTR;
7066     }
7067 
7068     for (i = 0; i <= max_length; i++) {
7069         if (utf8[i] == 0) {
7070             break;
7071         } else if (i == max_length) {
7072             result |= VK_STRING_ERROR_LENGTH;
7073             break;
7074         } else if ((utf8[i] >= 0x20) && (utf8[i] < 0x7f)) {
7075             num_char_bytes = 0;
7076         } else if ((utf8[i] & UTF8_ONE_BYTE_MASK) == UTF8_ONE_BYTE_CODE) {
7077             num_char_bytes = 1;
7078         } else if ((utf8[i] & UTF8_TWO_BYTE_MASK) == UTF8_TWO_BYTE_CODE) {
7079             num_char_bytes = 2;
7080         } else if ((utf8[i] & UTF8_THREE_BYTE_MASK) == UTF8_THREE_BYTE_CODE) {
7081             num_char_bytes = 3;
7082         } else {
7083             result = VK_STRING_ERROR_BAD_DATA;
7084         }
7085 
7086         // Validate the following num_char_bytes of data
7087         for (j = 0; (j < num_char_bytes) && (i < max_length); j++) {
7088             if (++i == max_length) {
7089                 result |= VK_STRING_ERROR_LENGTH;
7090                 break;
7091             }
7092             if ((utf8[i] & UTF8_DATA_BYTE_MASK) != UTF8_DATA_BYTE_CODE) {
7093                 result |= VK_STRING_ERROR_BAD_DATA;
7094             }
7095         }
7096     }
7097     return result;
7098 }
7099 
7100 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceVersion(uint32_t *pApiVersion) {
7101     // NOTE: The Vulkan WG doesn't want us checking pApiVersion for NULL, but instead
7102     // prefers us crashing.
7103     *pApiVersion = VK_HEADER_VERSION_COMPLETE;
7104     return VK_SUCCESS;
7105 }
7106 
7107 VKAPI_ATTR VkResult VKAPI_CALL terminator_pre_instance_EnumerateInstanceVersion(const VkEnumerateInstanceVersionChain *chain,
7108                                                                                 uint32_t *pApiVersion) {
7109     (void)chain;
7110     return terminator_EnumerateInstanceVersion(pApiVersion);
7111 }
7112 
7113 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pPropertyCount,
7114                                                                                VkExtensionProperties *pProperties) {
7115     struct loader_extension_list *global_ext_list = NULL;
7116     struct loader_layer_list instance_layers;
7117     struct loader_extension_list local_ext_list;
7118     struct loader_icd_tramp_list icd_tramp_list;
7119     uint32_t copy_size;
7120     VkResult res = VK_SUCCESS;
7121     struct loader_envvar_all_filters layer_filters = {0};
7122 
7123     memset(&local_ext_list, 0, sizeof(local_ext_list));
7124     memset(&instance_layers, 0, sizeof(instance_layers));
7125     memset(&icd_tramp_list, 0, sizeof(icd_tramp_list));
7126 
7127     res = parse_layer_environment_var_filters(NULL, &layer_filters);
7128     if (VK_SUCCESS != res) {
7129         goto out;
7130     }
7131 
7132     // Get layer libraries if needed
7133     if (pLayerName && strlen(pLayerName) != 0) {
7134         if (vk_string_validate(MaxLoaderStringLength, pLayerName) != VK_STRING_ERROR_NONE) {
7135             assert(VK_FALSE && "vkEnumerateInstanceExtensionProperties: pLayerName is too long or is badly formed");
7136             res = VK_ERROR_EXTENSION_NOT_PRESENT;
7137             goto out;
7138         }
7139 
7140         res = loader_scan_for_layers(NULL, &instance_layers, &layer_filters);
7141         if (VK_SUCCESS != res) {
7142             goto out;
7143         }
7144         for (uint32_t i = 0; i < instance_layers.count; i++) {
7145             struct loader_layer_properties *props = &instance_layers.list[i];
7146             if (strcmp(props->info.layerName, pLayerName) == 0) {
7147                 global_ext_list = &props->instance_extension_list;
7148                 break;
7149             }
7150         }
7151     } else {
7152         // Preload ICD libraries so subsequent calls to EnumerateInstanceExtensionProperties don't have to load them
7153         loader_preload_icds();
7154 
7155         // Scan/discover all ICD libraries
7156         res = loader_icd_scan(NULL, &icd_tramp_list, NULL, NULL);
7157         // EnumerateInstanceExtensionProperties can't return anything other than OOM or VK_ERROR_LAYER_NOT_PRESENT
7158         if ((VK_SUCCESS != res && icd_tramp_list.count > 0) || res == VK_ERROR_OUT_OF_HOST_MEMORY) {
7159             goto out;
7160         }
7161         // Get extensions from all ICD's, merge so no duplicates
7162         res = loader_get_icd_loader_instance_extensions(NULL, &icd_tramp_list, &local_ext_list);
7163         if (VK_SUCCESS != res) {
7164             goto out;
7165         }
7166         loader_clear_scanned_icd_list(NULL, &icd_tramp_list);
7167 
7168         // Append enabled implicit layers.
7169         res = loader_scan_for_implicit_layers(NULL, &instance_layers, &layer_filters);
7170         if (VK_SUCCESS != res) {
7171             goto out;
7172         }
7173         for (uint32_t i = 0; i < instance_layers.count; i++) {
7174             struct loader_extension_list *ext_list = &instance_layers.list[i].instance_extension_list;
7175             loader_add_to_ext_list(NULL, &local_ext_list, ext_list->count, ext_list->list);
7176         }
7177 
7178         global_ext_list = &local_ext_list;
7179     }
7180 
7181     if (global_ext_list == NULL) {
7182         res = VK_ERROR_LAYER_NOT_PRESENT;
7183         goto out;
7184     }
7185 
7186     if (pProperties == NULL) {
7187         *pPropertyCount = global_ext_list->count;
7188         goto out;
7189     }
7190 
7191     copy_size = *pPropertyCount < global_ext_list->count ? *pPropertyCount : global_ext_list->count;
7192     for (uint32_t i = 0; i < copy_size; i++) {
7193         memcpy(&pProperties[i], &global_ext_list->list[i], sizeof(VkExtensionProperties));
7194     }
7195     *pPropertyCount = copy_size;
7196 
7197     if (copy_size < global_ext_list->count) {
7198         res = VK_INCOMPLETE;
7199         goto out;
7200     }
7201 
7202 out:
7203     loader_destroy_generic_list(NULL, (struct loader_generic_list *)&icd_tramp_list);
7204     loader_destroy_generic_list(NULL, (struct loader_generic_list *)&local_ext_list);
7205     loader_delete_layer_list_and_properties(NULL, &instance_layers);
7206     return res;
7207 }
7208 
7209 VKAPI_ATTR VkResult VKAPI_CALL terminator_pre_instance_EnumerateInstanceExtensionProperties(
7210     const VkEnumerateInstanceExtensionPropertiesChain *chain, const char *pLayerName, uint32_t *pPropertyCount,
7211     VkExtensionProperties *pProperties) {
7212     (void)chain;
7213     return terminator_EnumerateInstanceExtensionProperties(pLayerName, pPropertyCount, pProperties);
7214 }
7215 
7216 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
7217                                                                            VkLayerProperties *pProperties) {
7218     VkResult result = VK_SUCCESS;
7219     struct loader_layer_list instance_layer_list;
7220     struct loader_envvar_all_filters layer_filters = {0};
7221 
7222     LOADER_PLATFORM_THREAD_ONCE(&once_init, loader_initialize);
7223 
7224     result = parse_layer_environment_var_filters(NULL, &layer_filters);
7225     if (VK_SUCCESS != result) {
7226         goto out;
7227     }
7228 
7229     // Get layer libraries
7230     memset(&instance_layer_list, 0, sizeof(instance_layer_list));
7231     result = loader_scan_for_layers(NULL, &instance_layer_list, &layer_filters);
7232     if (VK_SUCCESS != result) {
7233         goto out;
7234     }
7235 
7236     uint32_t layers_to_write_out = 0;
7237     for (uint32_t i = 0; i < instance_layer_list.count; i++) {
7238         if (instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_ON ||
7239             instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_DEFAULT) {
7240             layers_to_write_out++;
7241         }
7242     }
7243 
7244     if (pProperties == NULL) {
7245         *pPropertyCount = layers_to_write_out;
7246         goto out;
7247     }
7248 
7249     uint32_t output_properties_index = 0;
7250     for (uint32_t i = 0; i < instance_layer_list.count; i++) {
7251         if (output_properties_index < *pPropertyCount &&
7252             (instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_ON ||
7253              instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_DEFAULT)) {
7254             memcpy(&pProperties[output_properties_index], &instance_layer_list.list[i].info, sizeof(VkLayerProperties));
7255             output_properties_index++;
7256         }
7257     }
7258     if (output_properties_index < layers_to_write_out) {
7259         // Indicates that we had more elements to write but ran out of room
7260         result = VK_INCOMPLETE;
7261     }
7262 
7263     *pPropertyCount = output_properties_index;
7264 
7265 out:
7266 
7267     loader_delete_layer_list_and_properties(NULL, &instance_layer_list);
7268     return result;
7269 }
7270 
7271 VKAPI_ATTR VkResult VKAPI_CALL terminator_pre_instance_EnumerateInstanceLayerProperties(
7272     const VkEnumerateInstanceLayerPropertiesChain *chain, uint32_t *pPropertyCount, VkLayerProperties *pProperties) {
7273     (void)chain;
7274     return terminator_EnumerateInstanceLayerProperties(pPropertyCount, pProperties);
7275 }
7276 
7277 // ---- Vulkan Core 1.1 terminators
7278 
7279 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumeratePhysicalDeviceGroups(
7280     VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties) {
7281     struct loader_instance *inst = (struct loader_instance *)instance;
7282 
7283     VkResult res = VK_SUCCESS;
7284     struct loader_icd_term *icd_term;
7285     uint32_t total_count = 0;
7286     uint32_t cur_icd_group_count = 0;
7287     VkPhysicalDeviceGroupProperties **new_phys_dev_groups = NULL;
7288     struct loader_physical_device_group_term *local_phys_dev_groups = NULL;
7289     PFN_vkEnumeratePhysicalDeviceGroups fpEnumeratePhysicalDeviceGroups = NULL;
7290     struct loader_icd_physical_devices *sorted_phys_dev_array = NULL;
7291     uint32_t sorted_count = 0;
7292 
7293     // For each ICD, query the number of physical device groups, and then get an
7294     // internal value for those physical devices.
7295     icd_term = inst->icd_terms;
7296     while (NULL != icd_term) {
7297         cur_icd_group_count = 0;
7298 
7299         // Get the function pointer to use to call into the ICD. This could be the core or KHR version
7300         if (inst->enabled_known_extensions.khr_device_group_creation) {
7301             fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR;
7302         } else {
7303             fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroups;
7304         }
7305 
7306         if (NULL == fpEnumeratePhysicalDeviceGroups) {
7307             // Treat each ICD's GPU as it's own group if the extension isn't supported
7308             res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &cur_icd_group_count, NULL);
7309             if (res != VK_SUCCESS) {
7310                 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7311                            "terminator_EnumeratePhysicalDeviceGroups:  Failed during dispatch call of \'EnumeratePhysicalDevices\' "
7312                            "to ICD %s to get plain phys dev count.",
7313                            icd_term->scanned_icd->lib_name);
7314                 continue;
7315             }
7316         } else {
7317             // Query the actual group info
7318             res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &cur_icd_group_count, NULL);
7319             if (res != VK_SUCCESS) {
7320                 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7321                            "terminator_EnumeratePhysicalDeviceGroups:  Failed during dispatch call of "
7322                            "\'EnumeratePhysicalDeviceGroups\' to ICD %s to get count.",
7323                            icd_term->scanned_icd->lib_name);
7324                 continue;
7325             }
7326         }
7327         total_count += cur_icd_group_count;
7328         icd_term = icd_term->next;
7329     }
7330 
7331     // If GPUs not sorted yet, look through them and generate list of all available GPUs
7332     if (0 == total_count || 0 == inst->total_gpu_count) {
7333         res = setup_loader_term_phys_devs(inst);
7334         if (VK_SUCCESS != res) {
7335             goto out;
7336         }
7337     }
7338 
7339     if (NULL != pPhysicalDeviceGroupProperties) {
7340         // Create an array for the new physical device groups, which will be stored
7341         // in the instance for the Terminator code.
7342         new_phys_dev_groups = (VkPhysicalDeviceGroupProperties **)loader_instance_heap_calloc(
7343             inst, total_count * sizeof(VkPhysicalDeviceGroupProperties *), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
7344         if (NULL == new_phys_dev_groups) {
7345             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7346                        "terminator_EnumeratePhysicalDeviceGroups:  Failed to allocate new physical device group array of size %d",
7347                        total_count);
7348             res = VK_ERROR_OUT_OF_HOST_MEMORY;
7349             goto out;
7350         }
7351 
7352         // Create a temporary array (on the stack) to keep track of the
7353         // returned VkPhysicalDevice values.
7354         local_phys_dev_groups = loader_stack_alloc(sizeof(struct loader_physical_device_group_term) * total_count);
7355         // Initialize the memory to something valid
7356         memset(local_phys_dev_groups, 0, sizeof(struct loader_physical_device_group_term) * total_count);
7357 
7358 #if defined(_WIN32)
7359         // Get the physical devices supported by platform sorting mechanism into a separate list
7360         res = windows_read_sorted_physical_devices(inst, &sorted_count, &sorted_phys_dev_array);
7361         if (VK_SUCCESS != res) {
7362             goto out;
7363         }
7364 #endif
7365 
7366         cur_icd_group_count = 0;
7367         icd_term = inst->icd_terms;
7368         while (NULL != icd_term) {
7369             uint32_t count_this_time = total_count - cur_icd_group_count;
7370 
7371             // Get the function pointer to use to call into the ICD. This could be the core or KHR version
7372             if (inst->enabled_known_extensions.khr_device_group_creation) {
7373                 fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR;
7374             } else {
7375                 fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroups;
7376             }
7377 
7378             if (NULL == fpEnumeratePhysicalDeviceGroups) {
7379                 icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &count_this_time, NULL);
7380 
7381                 VkPhysicalDevice *phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * count_this_time);
7382                 if (NULL == phys_dev_array) {
7383                     loader_log(
7384                         inst, VULKAN_LOADER_ERROR_BIT, 0,
7385                         "terminator_EnumeratePhysicalDeviceGroups:  Failed to allocate local physical device array of size %d",
7386                         count_this_time);
7387                     res = VK_ERROR_OUT_OF_HOST_MEMORY;
7388                     goto out;
7389                 }
7390 
7391                 res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &count_this_time, phys_dev_array);
7392                 if (res != VK_SUCCESS) {
7393                     loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7394                                "terminator_EnumeratePhysicalDeviceGroups:  Failed during dispatch call of "
7395                                "\'EnumeratePhysicalDevices\' to ICD %s to get plain phys dev count.",
7396                                icd_term->scanned_icd->lib_name);
7397                     goto out;
7398                 }
7399 
7400                 // Add each GPU as it's own group
7401                 for (uint32_t indiv_gpu = 0; indiv_gpu < count_this_time; indiv_gpu++) {
7402                     uint32_t cur_index = indiv_gpu + cur_icd_group_count;
7403                     local_phys_dev_groups[cur_index].this_icd_term = icd_term;
7404                     local_phys_dev_groups[cur_index].group_props.physicalDeviceCount = 1;
7405                     local_phys_dev_groups[cur_index].group_props.physicalDevices[0] = phys_dev_array[indiv_gpu];
7406                 }
7407 
7408             } else {
7409                 res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time, NULL);
7410                 if (res != VK_SUCCESS) {
7411                     loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7412                                "terminator_EnumeratePhysicalDeviceGroups:  Failed during dispatch call of "
7413                                "\'EnumeratePhysicalDeviceGroups\' to ICD %s to get group count.",
7414                                icd_term->scanned_icd->lib_name);
7415                     goto out;
7416                 }
7417                 if (cur_icd_group_count + count_this_time < *pPhysicalDeviceGroupCount) {
7418                     // The total amount is still less than the amount of physical device group data passed in
7419                     // by the callee.  Therefore, we don't have to allocate any temporary structures and we
7420                     // can just use the data that was passed in.
7421                     res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time,
7422                                                           &pPhysicalDeviceGroupProperties[cur_icd_group_count]);
7423                     if (res != VK_SUCCESS) {
7424                         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7425                                    "terminator_EnumeratePhysicalDeviceGroups:  Failed during dispatch call of "
7426                                    "\'EnumeratePhysicalDeviceGroups\' to ICD %s to get group information.",
7427                                    icd_term->scanned_icd->lib_name);
7428                         goto out;
7429                     }
7430                     for (uint32_t group = 0; group < count_this_time; ++group) {
7431                         uint32_t cur_index = group + cur_icd_group_count;
7432                         local_phys_dev_groups[cur_index].group_props = pPhysicalDeviceGroupProperties[cur_index];
7433                         local_phys_dev_groups[cur_index].this_icd_term = icd_term;
7434                     }
7435                 } else {
7436                     // There's not enough space in the callee's allocated pPhysicalDeviceGroupProperties structs,
7437                     // so we have to allocate temporary versions to collect all the data.  However, we need to make
7438                     // sure that at least the ones we do query utilize any pNext data in the callee's version.
7439                     VkPhysicalDeviceGroupProperties *tmp_group_props =
7440                         loader_stack_alloc(count_this_time * sizeof(VkPhysicalDeviceGroupProperties));
7441                     for (uint32_t group = 0; group < count_this_time; group++) {
7442                         tmp_group_props[group].sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES;
7443                         uint32_t cur_index = group + cur_icd_group_count;
7444                         if (*pPhysicalDeviceGroupCount > cur_index) {
7445                             tmp_group_props[group].pNext = pPhysicalDeviceGroupProperties[cur_index].pNext;
7446                         } else {
7447                             tmp_group_props[group].pNext = NULL;
7448                         }
7449                         tmp_group_props[group].subsetAllocation = false;
7450                     }
7451 
7452                     res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time, tmp_group_props);
7453                     if (res != VK_SUCCESS) {
7454                         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7455                                    "terminator_EnumeratePhysicalDeviceGroups:  Failed during dispatch call of "
7456                                    "\'EnumeratePhysicalDeviceGroups\' to ICD %s  to get group information for temp data.",
7457                                    icd_term->scanned_icd->lib_name);
7458                         goto out;
7459                     }
7460                     for (uint32_t group = 0; group < count_this_time; ++group) {
7461                         uint32_t cur_index = group + cur_icd_group_count;
7462                         local_phys_dev_groups[cur_index].group_props = tmp_group_props[group];
7463                         local_phys_dev_groups[cur_index].this_icd_term = icd_term;
7464                     }
7465                 }
7466                 if (VK_SUCCESS != res) {
7467                     loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7468                                "terminator_EnumeratePhysicalDeviceGroups:  Failed during dispatch call of "
7469                                "\'EnumeratePhysicalDeviceGroups\' to ICD %s to get content.",
7470                                icd_term->scanned_icd->lib_name);
7471                     goto out;
7472                 }
7473             }
7474 
7475             cur_icd_group_count += count_this_time;
7476             icd_term = icd_term->next;
7477         }
7478 
7479 #if defined(LOADER_ENABLE_LINUX_SORT)
7480         if (is_linux_sort_enabled(inst)) {
7481             // Get the physical devices supported by platform sorting mechanism into a separate list
7482             res = linux_sort_physical_device_groups(inst, total_count, local_phys_dev_groups);
7483         }
7484 #elif defined(_WIN32)
7485         // The Windows sorting information is only on physical devices.  We need to take that and convert it to the group
7486         // information if it's present.
7487         if (sorted_count > 0) {
7488             res =
7489                 windows_sort_physical_device_groups(inst, total_count, local_phys_dev_groups, sorted_count, sorted_phys_dev_array);
7490         }
7491 #endif  // LOADER_ENABLE_LINUX_SORT
7492 
7493         // Just to be safe, make sure we successfully completed setup_loader_term_phys_devs above
7494         // before attempting to do the following.  By verifying that setup_loader_term_phys_devs ran
7495         // first, it guarantees that each physical device will have a loader-specific handle.
7496         if (NULL != inst->phys_devs_term) {
7497             for (uint32_t group = 0; group < total_count; group++) {
7498                 for (uint32_t group_gpu = 0; group_gpu < local_phys_dev_groups[group].group_props.physicalDeviceCount;
7499                      group_gpu++) {
7500                     bool found = false;
7501                     for (uint32_t term_gpu = 0; term_gpu < inst->phys_dev_count_term; term_gpu++) {
7502                         if (local_phys_dev_groups[group].group_props.physicalDevices[group_gpu] ==
7503                             inst->phys_devs_term[term_gpu]->phys_dev) {
7504                             local_phys_dev_groups[group].group_props.physicalDevices[group_gpu] =
7505                                 (VkPhysicalDevice)inst->phys_devs_term[term_gpu];
7506                             found = true;
7507                             break;
7508                         }
7509                     }
7510                     if (!found) {
7511                         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7512                                    "terminator_EnumeratePhysicalDeviceGroups:  Failed to find GPU %d in group %d returned by "
7513                                    "\'EnumeratePhysicalDeviceGroups\' in list returned by \'EnumeratePhysicalDevices\'",
7514                                    group_gpu, group);
7515                         res = VK_ERROR_INITIALIZATION_FAILED;
7516                         goto out;
7517                     }
7518                 }
7519             }
7520         }
7521 
7522         uint32_t idx = 0;
7523 
7524         // Copy or create everything to fill the new array of physical device groups
7525         for (uint32_t group = 0; group < total_count; group++) {
7526             // Skip groups which have been included through sorting
7527             if (local_phys_dev_groups[group].group_props.physicalDeviceCount == 0) {
7528                 continue;
7529             }
7530 
7531             // Find the VkPhysicalDeviceGroupProperties object in local_phys_dev_groups
7532             VkPhysicalDeviceGroupProperties *group_properties = &local_phys_dev_groups[group].group_props;
7533 
7534             // Check if this physical device group with the same contents is already in the old buffer
7535             for (uint32_t old_idx = 0; old_idx < inst->phys_dev_group_count_term; old_idx++) {
7536                 if (NULL != group_properties && NULL != inst->phys_dev_groups_term[old_idx] &&
7537                     group_properties->physicalDeviceCount == inst->phys_dev_groups_term[old_idx]->physicalDeviceCount) {
7538                     bool found_all_gpus = true;
7539                     for (uint32_t old_gpu = 0; old_gpu < inst->phys_dev_groups_term[old_idx]->physicalDeviceCount; old_gpu++) {
7540                         bool found_gpu = false;
7541                         for (uint32_t new_gpu = 0; new_gpu < group_properties->physicalDeviceCount; new_gpu++) {
7542                             if (group_properties->physicalDevices[new_gpu] ==
7543                                 inst->phys_dev_groups_term[old_idx]->physicalDevices[old_gpu]) {
7544                                 found_gpu = true;
7545                                 break;
7546                             }
7547                         }
7548 
7549                         if (!found_gpu) {
7550                             found_all_gpus = false;
7551                             break;
7552                         }
7553                     }
7554                     if (!found_all_gpus) {
7555                         continue;
7556                     } else {
7557                         new_phys_dev_groups[idx] = inst->phys_dev_groups_term[old_idx];
7558                         break;
7559                     }
7560                 }
7561             }
7562             // If this physical device group isn't in the old buffer, create it
7563             if (group_properties != NULL && NULL == new_phys_dev_groups[idx]) {
7564                 new_phys_dev_groups[idx] = (VkPhysicalDeviceGroupProperties *)loader_instance_heap_alloc(
7565                     inst, sizeof(VkPhysicalDeviceGroupProperties), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
7566                 if (NULL == new_phys_dev_groups[idx]) {
7567                     loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7568                                "terminator_EnumeratePhysicalDeviceGroups:  Failed to allocate physical device group Terminator "
7569                                "object %d",
7570                                idx);
7571                     total_count = idx;
7572                     res = VK_ERROR_OUT_OF_HOST_MEMORY;
7573                     goto out;
7574                 }
7575                 memcpy(new_phys_dev_groups[idx], group_properties, sizeof(VkPhysicalDeviceGroupProperties));
7576             }
7577 
7578             ++idx;
7579         }
7580     }
7581 
7582 out:
7583 
7584     if (NULL != pPhysicalDeviceGroupProperties) {
7585         if (VK_SUCCESS != res) {
7586             if (NULL != new_phys_dev_groups) {
7587                 // We've encountered an error, so we should free the new buffers.
7588                 for (uint32_t i = 0; i < total_count; i++) {
7589                     // If an OOM occurred inside the copying of the new physical device groups into the existing array will
7590                     // leave some of the old physical device groups in the array which may have been copied into the new array,
7591                     // leading to them being freed twice. To avoid this we just make sure to not delete physical device groups
7592                     // which were copied.
7593                     bool found = false;
7594                     if (NULL != inst->phys_devs_term) {
7595                         for (uint32_t old_idx = 0; old_idx < inst->phys_dev_group_count_term; old_idx++) {
7596                             if (new_phys_dev_groups[i] == inst->phys_dev_groups_term[old_idx]) {
7597                                 found = true;
7598                                 break;
7599                             }
7600                         }
7601                     }
7602                     if (!found) {
7603                         loader_instance_heap_free(inst, new_phys_dev_groups[i]);
7604                     }
7605                 }
7606                 loader_instance_heap_free(inst, new_phys_dev_groups);
7607             }
7608         } else {
7609             if (NULL != inst->phys_dev_groups_term) {
7610                 // Free everything in the old array that was not copied into the new array
7611                 // here.  We can't attempt to do that before here since the previous loop
7612                 // looking before the "out:" label may hit an out of memory condition resulting
7613                 // in memory leaking.
7614                 for (uint32_t i = 0; i < inst->phys_dev_group_count_term; i++) {
7615                     bool found = false;
7616                     for (uint32_t j = 0; j < total_count; j++) {
7617                         if (inst->phys_dev_groups_term[i] == new_phys_dev_groups[j]) {
7618                             found = true;
7619                             break;
7620                         }
7621                     }
7622                     if (!found) {
7623                         loader_instance_heap_free(inst, inst->phys_dev_groups_term[i]);
7624                     }
7625                 }
7626                 loader_instance_heap_free(inst, inst->phys_dev_groups_term);
7627             }
7628 
7629             // Swap in the new physical device group list
7630             inst->phys_dev_group_count_term = total_count;
7631             inst->phys_dev_groups_term = new_phys_dev_groups;
7632         }
7633 
7634         if (sorted_phys_dev_array != NULL) {
7635             for (uint32_t i = 0; i < sorted_count; ++i) {
7636                 if (sorted_phys_dev_array[i].device_count > 0 && sorted_phys_dev_array[i].physical_devices != NULL) {
7637                     loader_instance_heap_free(inst, sorted_phys_dev_array[i].physical_devices);
7638                 }
7639             }
7640             loader_instance_heap_free(inst, sorted_phys_dev_array);
7641         }
7642 
7643         uint32_t copy_count = inst->phys_dev_group_count_term;
7644         if (NULL != pPhysicalDeviceGroupProperties) {
7645             if (copy_count > *pPhysicalDeviceGroupCount) {
7646                 copy_count = *pPhysicalDeviceGroupCount;
7647                 loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
7648                            "terminator_EnumeratePhysicalDeviceGroups : Trimming device count from %d to %d.",
7649                            inst->phys_dev_group_count_term, copy_count);
7650                 res = VK_INCOMPLETE;
7651             }
7652 
7653             for (uint32_t i = 0; i < copy_count; i++) {
7654                 memcpy(&pPhysicalDeviceGroupProperties[i], inst->phys_dev_groups_term[i], sizeof(VkPhysicalDeviceGroupProperties));
7655             }
7656         }
7657 
7658         *pPhysicalDeviceGroupCount = copy_count;
7659 
7660     } else {
7661         *pPhysicalDeviceGroupCount = total_count;
7662     }
7663     return res;
7664 }
7665