• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  * Copyright (c) 2014-2023 The Khronos Group Inc.
4  * Copyright (c) 2014-2023 Valve Corporation
5  * Copyright (c) 2014-2023 LunarG, Inc.
6  * Copyright (C) 2015 Google Inc.
7  * Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
8  * Copyright (c) 2023-2023 RasterGrid Kft.
9  *
10  * Licensed under the Apache License, Version 2.0 (the "License");
11  * you may not use this file except in compliance with the License.
12  * You may obtain a copy of the License at
13  *
14  *     http://www.apache.org/licenses/LICENSE-2.0
15  *
16  * Unless required by applicable law or agreed to in writing, software
17  * distributed under the License is distributed on an "AS IS" BASIS,
18  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19  * See the License for the specific language governing permissions and
20  * limitations under the License.
21 
22  *
23  * Author: Jon Ashburn <jon@lunarg.com>
24  * Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
25  * Author: Mark Young <marky@lunarg.com>
26  * Author: Lenny Komow <lenny@lunarg.com>
27  * Author: Charles Giessen <charles@lunarg.com>
28  *
29  */
30 
31 #include "loader.h"
32 
33 #include <ctype.h>
34 #include <inttypes.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <stdarg.h>
38 #include <stdbool.h>
39 #include <string.h>
40 #include <stddef.h>
41 
42 #if defined(__APPLE__)
43 #include <CoreFoundation/CoreFoundation.h>
44 #include <sys/param.h>
45 #endif
46 
47 #include <sys/types.h>
48 #if defined(_WIN32)
49 #include "dirent_on_windows.h"
50 #elif COMMON_UNIX_PLATFORMS
51 #include <dirent.h>
52 #else
53 #warning dirent.h not available on this platform
54 #endif  // _WIN32
55 
56 #include "allocation.h"
57 #include "cJSON.h"
58 #include "debug_utils.h"
59 #include "loader_environment.h"
60 #include "gpa_helper.h"
61 #include "log.h"
62 #include "unknown_function_handling.h"
63 #include "vk_loader_platform.h"
64 #include "wsi.h"
65 
66 #if defined(WIN32)
67 #include "loader_windows.h"
68 #endif
69 #if defined(LOADER_ENABLE_LINUX_SORT)
70 // This header is currently only used when sorting Linux devices, so don't include it otherwise.
71 #include "loader_linux.h"
72 #endif  // LOADER_ENABLE_LINUX_SORT
73 
74 // Generated file containing all the extension data
75 #include "vk_loader_extensions.c"
76 
77 #if defined(__OHOS__)
78 #define IGRAPHICS_CONFG_DIR "/system/etc/vulkan/igraphics"
79 #endif
80 
81 struct loader_struct loader = {0};
82 
83 struct activated_layer_info {
84     char *name;
85     char *manifest;
86     char *library;
87     bool is_implicit;
88     char *disable_env;
89 };
90 
91 // thread safety lock for accessing global data structures such as "loader"
92 // all entrypoints on the instance chain need to be locked except GPA
93 // additionally CreateDevice and DestroyDevice needs to be locked
94 loader_platform_thread_mutex loader_lock;
95 loader_platform_thread_mutex loader_preload_icd_lock;
96 loader_platform_thread_mutex loader_global_instance_list_lock;
97 
98 // A list of ICDs that gets initialized when the loader does its global initialization. This list should never be used by anything
99 // other than EnumerateInstanceExtensionProperties(), vkDestroyInstance, and loader_release(). This list does not change
100 // functionality, but the fact that the libraries already been loaded causes any call that needs to load ICD libraries to speed up
101 // significantly. This can have a huge impact when making repeated calls to vkEnumerateInstanceExtensionProperties and
102 // vkCreateInstance.
103 struct loader_icd_tramp_list scanned_icds;
104 
105 // controls whether loader_platform_close_library() closes the libraries or not - controlled by an environment
106 // variables - this is just the definition of the variable, usage is in vk_loader_platform.h
107 bool loader_disable_dynamic_library_unloading;
108 
109 LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_init);
110 
111 // Creates loader_api_version struct that contains the major and minor fields, setting patch to 0
loader_make_version(uint32_t version)112 loader_api_version loader_make_version(uint32_t version) {
113     loader_api_version out_version;
114     out_version.major = VK_API_VERSION_MAJOR(version);
115     out_version.minor = VK_API_VERSION_MINOR(version);
116     out_version.patch = 0;
117     return out_version;
118 }
119 
120 // Creates loader_api_version struct containing the major, minor, and patch fields
loader_make_full_version(uint32_t version)121 loader_api_version loader_make_full_version(uint32_t version) {
122     loader_api_version out_version;
123     out_version.major = VK_API_VERSION_MAJOR(version);
124     out_version.minor = VK_API_VERSION_MINOR(version);
125     out_version.patch = VK_API_VERSION_PATCH(version);
126     return out_version;
127 }
128 
loader_combine_version(uint32_t major,uint32_t minor,uint32_t patch)129 loader_api_version loader_combine_version(uint32_t major, uint32_t minor, uint32_t patch) {
130     loader_api_version out_version;
131     out_version.major = (uint16_t)major;
132     out_version.minor = (uint16_t)minor;
133     out_version.patch = (uint16_t)patch;
134     return out_version;
135 }
136 
137 // Helper macros for determining if a version is valid or not
loader_check_version_meets_required(loader_api_version required,loader_api_version version)138 bool loader_check_version_meets_required(loader_api_version required, loader_api_version version) {
139     // major version is satisfied
140     return (version.major > required.major) ||
141            // major version is equal, minor version is patch version is greater to minimum minor
142            (version.major == required.major && version.minor > required.minor) ||
143            // major and minor version are equal, patch version is greater or equal to minimum patch
144            (version.major == required.major && version.minor == required.minor && version.patch >= required.patch);
145 }
146 
147 // Wrapper around opendir so that the dirent_on_windows gets the instance it needs
148 // while linux opendir & readdir does not
loader_opendir(const struct loader_instance * instance,const char * name)149 DIR *loader_opendir(const struct loader_instance *instance, const char *name) {
150 #if defined(_WIN32)
151     return opendir(instance ? &instance->alloc_callbacks : NULL, name);
152 #elif COMMON_UNIX_PLATFORMS
153     (void)instance;
154     return opendir(name);
155 #else
156 #warning dirent.h - opendir not available on this platform
157 #endif  // _WIN32
158 }
loader_closedir(const struct loader_instance * instance,DIR * dir)159 int loader_closedir(const struct loader_instance *instance, DIR *dir) {
160 #if defined(_WIN32)
161     return closedir(instance ? &instance->alloc_callbacks : NULL, dir);
162 #elif COMMON_UNIX_PLATFORMS
163     (void)instance;
164     return closedir(dir);
165 #else
166 #warning dirent.h - closedir not available on this platform
167 #endif  // _WIN32
168 }
169 
is_json(const char * path,size_t len)170 bool is_json(const char *path, size_t len) {
171     if (len < 5) {
172         return false;
173     }
174     return !strncmp(path, ".json", 5);
175 }
176 
177 // Handle error from to library loading
loader_handle_load_library_error(const struct loader_instance * inst,const char * filename,enum loader_layer_library_status * lib_status)178 void loader_handle_load_library_error(const struct loader_instance *inst, const char *filename,
179                                       enum loader_layer_library_status *lib_status) {
180     const char *error_message = loader_platform_open_library_error(filename);
181     // If the error is due to incompatible architecture (eg 32 bit vs 64 bit), report it with INFO level
182     // Discussed in Github issue 262 & 644
183     // "wrong ELF class" is a linux error, " with error 193" is a windows error
184     VkFlags err_flag = VULKAN_LOADER_ERROR_BIT;
185     if (strstr(error_message, "wrong ELF class:") != NULL || strstr(error_message, " with error 193") != NULL) {
186         err_flag = VULKAN_LOADER_INFO_BIT;
187         if (NULL != lib_status) {
188             *lib_status = LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE;
189         }
190     }
191     // Check if the error is due to lack of memory
192     // "with error 8" is the windows error code for OOM cases, aka ERROR_NOT_ENOUGH_MEMORY
193     // Linux doesn't have such a nice error message - only if there are reported issues should this be called
194     else if (strstr(error_message, " with error 8") != NULL) {
195         if (NULL != lib_status) {
196             *lib_status = LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY;
197         }
198     } else if (NULL != lib_status) {
199         *lib_status = LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD;
200     }
201     loader_log(inst, err_flag, 0, error_message);
202 }
203 
vkSetInstanceDispatch(VkInstance instance,void * object)204 VKAPI_ATTR VkResult VKAPI_CALL vkSetInstanceDispatch(VkInstance instance, void *object) {
205     struct loader_instance *inst = loader_get_instance(instance);
206     if (!inst) {
207         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkSetInstanceDispatch: Can not retrieve Instance dispatch table.");
208         return VK_ERROR_INITIALIZATION_FAILED;
209     }
210     loader_set_dispatch(object, inst->disp);
211     return VK_SUCCESS;
212 }
213 
vkSetDeviceDispatch(VkDevice device,void * object)214 VKAPI_ATTR VkResult VKAPI_CALL vkSetDeviceDispatch(VkDevice device, void *object) {
215     struct loader_device *dev;
216     struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, NULL);
217 
218     if (NULL == icd_term || NULL == dev) {
219         return VK_ERROR_INITIALIZATION_FAILED;
220     }
221     loader_set_dispatch(object, &dev->loader_dispatch);
222     return VK_SUCCESS;
223 }
224 
loader_free_layer_properties(const struct loader_instance * inst,struct loader_layer_properties * layer_properties)225 void loader_free_layer_properties(const struct loader_instance *inst, struct loader_layer_properties *layer_properties) {
226     loader_instance_heap_free(inst, layer_properties->manifest_file_name);
227     loader_instance_heap_free(inst, layer_properties->lib_name);
228     loader_instance_heap_free(inst, layer_properties->functions.str_gipa);
229     loader_instance_heap_free(inst, layer_properties->functions.str_gdpa);
230     loader_instance_heap_free(inst, layer_properties->functions.str_negotiate_interface);
231     loader_destroy_generic_list(inst, (struct loader_generic_list *)&layer_properties->instance_extension_list);
232     if (layer_properties->device_extension_list.capacity > 0 && NULL != layer_properties->device_extension_list.list) {
233         for (uint32_t i = 0; i < layer_properties->device_extension_list.count; i++) {
234             free_string_list(inst, &layer_properties->device_extension_list.list[i].entrypoints);
235         }
236     }
237     loader_destroy_generic_list(inst, (struct loader_generic_list *)&layer_properties->device_extension_list);
238     loader_instance_heap_free(inst, layer_properties->disable_env_var.name);
239     loader_instance_heap_free(inst, layer_properties->disable_env_var.value);
240     loader_instance_heap_free(inst, layer_properties->enable_env_var.name);
241     loader_instance_heap_free(inst, layer_properties->enable_env_var.value);
242     free_string_list(inst, &layer_properties->component_layer_names);
243     loader_instance_heap_free(inst, layer_properties->pre_instance_functions.enumerate_instance_extension_properties);
244     loader_instance_heap_free(inst, layer_properties->pre_instance_functions.enumerate_instance_layer_properties);
245     loader_instance_heap_free(inst, layer_properties->pre_instance_functions.enumerate_instance_version);
246     free_string_list(inst, &layer_properties->override_paths);
247     free_string_list(inst, &layer_properties->blacklist_layer_names);
248     free_string_list(inst, &layer_properties->app_key_paths);
249 
250     // Make sure to clear out the removed layer, in case new layers are added in the previous location
251     memset(layer_properties, 0, sizeof(struct loader_layer_properties));
252 }
253 
loader_init_library_list(struct loader_layer_list * instance_layers,loader_platform_dl_handle ** libs)254 VkResult loader_init_library_list(struct loader_layer_list *instance_layers, loader_platform_dl_handle **libs) {
255     if (instance_layers->count > 0) {
256         *libs = loader_calloc(NULL, sizeof(loader_platform_dl_handle) * instance_layers->count, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
257         if (*libs == NULL) {
258             return VK_ERROR_OUT_OF_HOST_MEMORY;
259         }
260     }
261     return VK_SUCCESS;
262 }
263 
loader_copy_to_new_str(const struct loader_instance * inst,const char * source_str,char ** dest_str)264 VkResult loader_copy_to_new_str(const struct loader_instance *inst, const char *source_str, char **dest_str) {
265     assert(source_str && dest_str);
266     size_t str_len = strlen(source_str) + 1;
267     *dest_str = loader_instance_heap_calloc(inst, str_len, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
268     if (NULL == *dest_str) return VK_ERROR_OUT_OF_HOST_MEMORY;
269     loader_strncpy(*dest_str, str_len, source_str, str_len);
270     (*dest_str)[str_len - 1] = 0;
271     return VK_SUCCESS;
272 }
273 
create_string_list(const struct loader_instance * inst,uint32_t allocated_count,struct loader_string_list * string_list)274 VkResult create_string_list(const struct loader_instance *inst, uint32_t allocated_count, struct loader_string_list *string_list) {
275     assert(string_list);
276     string_list->list = loader_instance_heap_calloc(inst, sizeof(char *) * allocated_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
277     if (NULL == string_list->list) {
278         return VK_ERROR_OUT_OF_HOST_MEMORY;
279     }
280     string_list->allocated_count = allocated_count;
281     string_list->count = 0;
282     return VK_SUCCESS;
283 }
284 
append_str_to_string_list(const struct loader_instance * inst,struct loader_string_list * string_list,char * str)285 VkResult append_str_to_string_list(const struct loader_instance *inst, struct loader_string_list *string_list, char *str) {
286     assert(string_list && str);
287     if (string_list->allocated_count == 0) {
288         string_list->allocated_count = 32;
289         string_list->list =
290             loader_instance_heap_calloc(inst, sizeof(char *) * string_list->allocated_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
291         if (NULL == string_list->list) {
292             return VK_ERROR_OUT_OF_HOST_MEMORY;
293         }
294     } else if (string_list->count + 1 > string_list->allocated_count) {
295         uint32_t new_allocated_count = string_list->allocated_count * 2;
296         string_list->list = loader_instance_heap_realloc(inst, string_list->list, sizeof(char *) * string_list->allocated_count,
297                                                          sizeof(char *) * new_allocated_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
298         if (NULL == string_list->list) {
299             return VK_ERROR_OUT_OF_HOST_MEMORY;
300         }
301         // Null out the new space
302         memset(string_list->list + string_list->allocated_count, 0, string_list->allocated_count);
303         string_list->allocated_count *= 2;
304     }
305     string_list->list[string_list->count++] = str;
306     return VK_SUCCESS;
307 }
308 
copy_str_to_string_list(const struct loader_instance * inst,struct loader_string_list * string_list,const char * str,size_t str_len)309 VkResult copy_str_to_string_list(const struct loader_instance *inst, struct loader_string_list *string_list, const char *str,
310                                  size_t str_len) {
311     assert(string_list && str);
312     char *new_str = loader_instance_heap_calloc(inst, sizeof(char *) * str_len + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
313     if (NULL == new_str) {
314         return VK_ERROR_OUT_OF_HOST_MEMORY;
315     }
316     loader_strncpy(new_str, sizeof(char *) * str_len + 1, str, str_len);
317     new_str[str_len] = '\0';
318     VkResult res = append_str_to_string_list(inst, string_list, new_str);
319     if (res != VK_SUCCESS) {
320         // Cleanup new_str if the append failed - as append_str_to_string_list takes ownership but not if the function fails
321         loader_instance_heap_free(inst, new_str);
322     }
323     return res;
324 }
325 
free_string_list(const struct loader_instance * inst,struct loader_string_list * string_list)326 void free_string_list(const struct loader_instance *inst, struct loader_string_list *string_list) {
327     assert(string_list);
328     if (string_list->list) {
329         for (uint32_t i = 0; i < string_list->count; i++) {
330             loader_instance_heap_free(inst, string_list->list[i]);
331             string_list->list[i] = NULL;
332         }
333         loader_instance_heap_free(inst, string_list->list);
334     }
335     memset(string_list, 0, sizeof(struct loader_string_list));
336 }
337 
338 // Given string of three part form "maj.min.pat" convert to a vulkan version number.
339 // Also can understand four part form "variant.major.minor.patch" if provided.
loader_parse_version_string(char * vers_str)340 uint32_t loader_parse_version_string(char *vers_str) {
341     uint32_t variant = 0, major = 0, minor = 0, patch = 0;
342     char *vers_tok;
343     char *context = NULL;
344     if (!vers_str) {
345         return 0;
346     }
347 
348     vers_tok = thread_safe_strtok(vers_str, ".\"\n\r", &context);
349     if (NULL != vers_tok) {
350         major = (uint16_t)atoi(vers_tok);
351         vers_tok = thread_safe_strtok(NULL, ".\"\n\r", &context);
352         if (NULL != vers_tok) {
353             minor = (uint16_t)atoi(vers_tok);
354             vers_tok = thread_safe_strtok(NULL, ".\"\n\r", &context);
355             if (NULL != vers_tok) {
356                 patch = (uint16_t)atoi(vers_tok);
357                 vers_tok = thread_safe_strtok(NULL, ".\"\n\r", &context);
358                 // check that we are using a 4 part version string
359                 if (NULL != vers_tok) {
360                     // if we are, move the values over into the correct place
361                     variant = major;
362                     major = minor;
363                     minor = patch;
364                     patch = (uint16_t)atoi(vers_tok);
365                 }
366             }
367         }
368     }
369 
370     return VK_MAKE_API_VERSION(variant, major, minor, patch);
371 }
372 
compare_vk_extension_properties(const VkExtensionProperties * op1,const VkExtensionProperties * op2)373 bool compare_vk_extension_properties(const VkExtensionProperties *op1, const VkExtensionProperties *op2) {
374     return strcmp(op1->extensionName, op2->extensionName) == 0 ? true : false;
375 }
376 
377 // Search the given ext_array for an extension matching the given vk_ext_prop
has_vk_extension_property_array(const VkExtensionProperties * vk_ext_prop,const uint32_t count,const VkExtensionProperties * ext_array)378 bool has_vk_extension_property_array(const VkExtensionProperties *vk_ext_prop, const uint32_t count,
379                                      const VkExtensionProperties *ext_array) {
380     for (uint32_t i = 0; i < count; i++) {
381         if (compare_vk_extension_properties(vk_ext_prop, &ext_array[i])) return true;
382     }
383     return false;
384 }
385 
386 // Search the given ext_list for an extension matching the given vk_ext_prop
has_vk_extension_property(const VkExtensionProperties * vk_ext_prop,const struct loader_extension_list * ext_list)387 bool has_vk_extension_property(const VkExtensionProperties *vk_ext_prop, const struct loader_extension_list *ext_list) {
388     for (uint32_t i = 0; i < ext_list->count; i++) {
389         if (compare_vk_extension_properties(&ext_list->list[i], vk_ext_prop)) return true;
390     }
391     return false;
392 }
393 
394 // Search the given ext_list for a device extension matching the given ext_prop
has_vk_dev_ext_property(const VkExtensionProperties * ext_prop,const struct loader_device_extension_list * ext_list)395 bool has_vk_dev_ext_property(const VkExtensionProperties *ext_prop, const struct loader_device_extension_list *ext_list) {
396     for (uint32_t i = 0; i < ext_list->count; i++) {
397         if (compare_vk_extension_properties(&ext_list->list[i].props, ext_prop)) return true;
398     }
399     return false;
400 }
401 
loader_append_layer_property(const struct loader_instance * inst,struct loader_layer_list * layer_list,struct loader_layer_properties * layer_property)402 VkResult loader_append_layer_property(const struct loader_instance *inst, struct loader_layer_list *layer_list,
403                                       struct loader_layer_properties *layer_property) {
404     VkResult res = VK_SUCCESS;
405     if (layer_list->capacity == 0) {
406         res = loader_init_generic_list(inst, (struct loader_generic_list *)layer_list, sizeof(struct loader_layer_properties));
407         if (VK_SUCCESS != res) {
408             goto out;
409         }
410     }
411 
412     // Ensure enough room to add an entry
413     if ((layer_list->count + 1) * sizeof(struct loader_layer_properties) > layer_list->capacity) {
414         void *new_ptr = loader_instance_heap_realloc(inst, layer_list->list, layer_list->capacity, layer_list->capacity * 2,
415                                                      VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
416         if (NULL == new_ptr) {
417             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_append_layer_property: realloc failed for layer list");
418             res = VK_ERROR_OUT_OF_HOST_MEMORY;
419             goto out;
420         }
421         layer_list->list = new_ptr;
422         memset((uint8_t *)layer_list->list + layer_list->capacity, 0, layer_list->capacity);
423         layer_list->capacity *= 2;
424     }
425     memcpy(&layer_list->list[layer_list->count], layer_property, sizeof(struct loader_layer_properties));
426     layer_list->count++;
427     memset(layer_property, 0, sizeof(struct loader_layer_properties));
428 out:
429     if (res != VK_SUCCESS) {
430         loader_free_layer_properties(inst, layer_property);
431     }
432     return res;
433 }
434 
435 // Search the given layer list for a layer property matching the given layer name
loader_find_layer_property(const char * name,const struct loader_layer_list * layer_list)436 struct loader_layer_properties *loader_find_layer_property(const char *name, const struct loader_layer_list *layer_list) {
437     for (uint32_t i = 0; i < layer_list->count; i++) {
438         const VkLayerProperties *item = &layer_list->list[i].info;
439         if (strcmp(name, item->layerName) == 0) return &layer_list->list[i];
440     }
441     return NULL;
442 }
443 
loader_find_pointer_layer_property(const char * name,const struct loader_pointer_layer_list * layer_list)444 struct loader_layer_properties *loader_find_pointer_layer_property(const char *name,
445                                                                    const struct loader_pointer_layer_list *layer_list) {
446     for (uint32_t i = 0; i < layer_list->count; i++) {
447         const VkLayerProperties *item = &layer_list->list[i]->info;
448         if (strcmp(name, item->layerName) == 0) return layer_list->list[i];
449     }
450     return NULL;
451 }
452 
453 // Search the given layer list for a layer matching the given layer name
loader_find_layer_name_in_list(const char * name,const struct loader_pointer_layer_list * layer_list)454 bool loader_find_layer_name_in_list(const char *name, const struct loader_pointer_layer_list *layer_list) {
455     if (NULL == layer_list) {
456         return false;
457     }
458     if (NULL != loader_find_pointer_layer_property(name, layer_list)) {
459         return true;
460     }
461     return false;
462 }
463 
464 // Search the given meta-layer's component list for a layer matching the given layer name
loader_find_layer_name_in_meta_layer(const struct loader_instance * inst,const char * layer_name,struct loader_layer_list * layer_list,struct loader_layer_properties * meta_layer_props)465 bool loader_find_layer_name_in_meta_layer(const struct loader_instance *inst, const char *layer_name,
466                                           struct loader_layer_list *layer_list, struct loader_layer_properties *meta_layer_props) {
467     for (uint32_t comp_layer = 0; comp_layer < meta_layer_props->component_layer_names.count; comp_layer++) {
468         if (!strcmp(meta_layer_props->component_layer_names.list[comp_layer], layer_name)) {
469             return true;
470         }
471         struct loader_layer_properties *comp_layer_props =
472             loader_find_layer_property(meta_layer_props->component_layer_names.list[comp_layer], layer_list);
473         if (comp_layer_props->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
474             return loader_find_layer_name_in_meta_layer(inst, layer_name, layer_list, comp_layer_props);
475         }
476     }
477     return false;
478 }
479 
480 // Search the override layer's blacklist for a layer matching the given layer name
loader_find_layer_name_in_blacklist(const char * layer_name,struct loader_layer_properties * meta_layer_props)481 bool loader_find_layer_name_in_blacklist(const char *layer_name, struct loader_layer_properties *meta_layer_props) {
482     for (uint32_t black_layer = 0; black_layer < meta_layer_props->blacklist_layer_names.count; ++black_layer) {
483         if (!strcmp(meta_layer_props->blacklist_layer_names.list[black_layer], layer_name)) {
484             return true;
485         }
486     }
487     return false;
488 }
489 
490 // Remove all layer properties entries from the list
loader_delete_layer_list_and_properties(const struct loader_instance * inst,struct loader_layer_list * layer_list)491 void loader_delete_layer_list_and_properties(const struct loader_instance *inst, struct loader_layer_list *layer_list) {
492     uint32_t i;
493     if (!layer_list) return;
494 
495     for (i = 0; i < layer_list->count; i++) {
496         if (layer_list->list[i].lib_handle) {
497             loader_platform_close_library(layer_list->list[i].lib_handle);
498             loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Unloading layer library %s",
499                        layer_list->list[i].lib_name);
500             layer_list->list[i].lib_handle = NULL;
501         }
502         loader_free_layer_properties(inst, &(layer_list->list[i]));
503     }
504     layer_list->count = 0;
505 
506     if (layer_list->capacity > 0) {
507         layer_list->capacity = 0;
508         loader_instance_heap_free(inst, layer_list->list);
509     }
510     memset(layer_list, 0, sizeof(struct loader_layer_list));
511 }
512 
loader_remove_layer_in_list(const struct loader_instance * inst,struct loader_layer_list * layer_list,uint32_t layer_to_remove)513 void loader_remove_layer_in_list(const struct loader_instance *inst, struct loader_layer_list *layer_list,
514                                  uint32_t layer_to_remove) {
515     if (layer_list == NULL || layer_to_remove >= layer_list->count) {
516         return;
517     }
518     loader_free_layer_properties(inst, &(layer_list->list[layer_to_remove]));
519 
520     // Remove the current invalid meta-layer from the layer list.  Use memmove since we are
521     // overlapping the source and destination addresses.
522     memmove(&layer_list->list[layer_to_remove], &layer_list->list[layer_to_remove + 1],
523             sizeof(struct loader_layer_properties) * (layer_list->count - 1 - layer_to_remove));
524 
525     // Decrement the count (because we now have one less) and decrement the loop index since we need to
526     // re-check this index.
527     layer_list->count--;
528 }
529 
530 // Remove all layers in the layer list that are blacklisted by the override layer.
531 // NOTE: This should only be called if an override layer is found and not expired.
loader_remove_layers_in_blacklist(const struct loader_instance * inst,struct loader_layer_list * layer_list)532 void loader_remove_layers_in_blacklist(const struct loader_instance *inst, struct loader_layer_list *layer_list) {
533     struct loader_layer_properties *override_prop = loader_find_layer_property(VK_OVERRIDE_LAYER_NAME, layer_list);
534     if (NULL == override_prop) {
535         return;
536     }
537 
538     for (int32_t j = 0; j < (int32_t)(layer_list->count); j++) {
539         struct loader_layer_properties cur_layer_prop = layer_list->list[j];
540         const char *cur_layer_name = &cur_layer_prop.info.layerName[0];
541 
542         // Skip the override layer itself.
543         if (!strcmp(VK_OVERRIDE_LAYER_NAME, cur_layer_name)) {
544             continue;
545         }
546 
547         // If found in the override layer's blacklist, remove it
548         if (loader_find_layer_name_in_blacklist(cur_layer_name, override_prop)) {
549             loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0,
550                        "loader_remove_layers_in_blacklist: Override layer is active and layer %s is in the blacklist inside of it. "
551                        "Removing that layer from current layer list.",
552                        cur_layer_name);
553             loader_remove_layer_in_list(inst, layer_list, j);
554             j--;
555 
556             // Re-do the query for the override layer
557             override_prop = loader_find_layer_property(VK_OVERRIDE_LAYER_NAME, layer_list);
558         }
559     }
560 }
561 
562 // Remove all layers in the layer list that are not found inside any implicit meta-layers.
loader_remove_layers_not_in_implicit_meta_layers(const struct loader_instance * inst,struct loader_layer_list * layer_list)563 void loader_remove_layers_not_in_implicit_meta_layers(const struct loader_instance *inst, struct loader_layer_list *layer_list) {
564     int32_t i;
565     int32_t j;
566     int32_t layer_count = (int32_t)(layer_list->count);
567 
568     for (i = 0; i < layer_count; i++) {
569         layer_list->list[i].keep = false;
570     }
571 
572     for (i = 0; i < layer_count; i++) {
573         struct loader_layer_properties *cur_layer_prop = &layer_list->list[i];
574 
575         if (0 == (cur_layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
576             cur_layer_prop->keep = true;
577             continue;
578         }
579         for (j = 0; j < layer_count; j++) {
580             struct loader_layer_properties *layer_to_check = &layer_list->list[j];
581 
582             if (i == j) {
583                 continue;
584             }
585 
586             if (layer_to_check->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
587                 // For all layers found in this meta layer, we want to keep them as well.
588                 if (loader_find_layer_name_in_meta_layer(inst, cur_layer_prop->info.layerName, layer_list, layer_to_check)) {
589                     cur_layer_prop->keep = true;
590                 }
591             }
592         }
593     }
594 
595     // Remove any layers we don't want to keep (Don't use layer_count here as we need it to be
596     // dynamically updated if we delete a layer property in the list).
597     for (i = 0; i < (int32_t)(layer_list->count); i++) {
598         struct loader_layer_properties *cur_layer_prop = &layer_list->list[i];
599         if (!cur_layer_prop->keep) {
600             loader_log(
601                 inst, VULKAN_LOADER_DEBUG_BIT, 0,
602                 "loader_remove_layers_not_in_implicit_meta_layers : Implicit meta-layers are active, and layer %s is not list "
603                 "inside of any.  So removing layer from current layer list.",
604                 cur_layer_prop->info.layerName);
605             loader_remove_layer_in_list(inst, layer_list, i);
606             i--;
607         }
608     }
609 }
610 
loader_add_instance_extensions(const struct loader_instance * inst,const PFN_vkEnumerateInstanceExtensionProperties fp_get_props,const char * lib_name,struct loader_extension_list * ext_list)611 VkResult loader_add_instance_extensions(const struct loader_instance *inst,
612                                         const PFN_vkEnumerateInstanceExtensionProperties fp_get_props, const char *lib_name,
613                                         struct loader_extension_list *ext_list) {
614     uint32_t i, count = 0;
615     VkExtensionProperties *ext_props;
616     VkResult res = VK_SUCCESS;
617 
618     if (!fp_get_props) {
619         // No EnumerateInstanceExtensionProperties defined
620         goto out;
621     }
622 
623     // Make sure we never call ourself by accident, this should never happen outside of error paths
624     if (fp_get_props == vkEnumerateInstanceExtensionProperties) {
625         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
626                    "loader_add_instance_extensions: %s's vkEnumerateInstanceExtensionProperties points to the loader, this would "
627                    "lead to infinite recursion.",
628                    lib_name);
629         goto out;
630     }
631 
632     res = fp_get_props(NULL, &count, NULL);
633     if (res != VK_SUCCESS) {
634         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
635                    "loader_add_instance_extensions: Error getting Instance extension count from %s", lib_name);
636         goto out;
637     }
638 
639     if (count == 0) {
640         // No ExtensionProperties to report
641         goto out;
642     }
643 
644     ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties));
645     if (NULL == ext_props) {
646         res = VK_ERROR_OUT_OF_HOST_MEMORY;
647         goto out;
648     }
649 
650     res = fp_get_props(NULL, &count, ext_props);
651     if (res != VK_SUCCESS) {
652         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_add_instance_extensions: Error getting Instance extensions from %s",
653                    lib_name);
654         goto out;
655     }
656 
657     for (i = 0; i < count; i++) {
658         bool ext_unsupported = wsi_unsupported_instance_extension(&ext_props[i]);
659         if (!ext_unsupported) {
660             res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]);
661             if (res != VK_SUCCESS) {
662                 goto out;
663             }
664         }
665     }
666 
667 out:
668     return res;
669 }
670 
loader_add_device_extensions(const struct loader_instance * inst,PFN_vkEnumerateDeviceExtensionProperties fpEnumerateDeviceExtensionProperties,VkPhysicalDevice physical_device,const char * lib_name,struct loader_extension_list * ext_list)671 VkResult loader_add_device_extensions(const struct loader_instance *inst,
672                                       PFN_vkEnumerateDeviceExtensionProperties fpEnumerateDeviceExtensionProperties,
673                                       VkPhysicalDevice physical_device, const char *lib_name,
674                                       struct loader_extension_list *ext_list) {
675     uint32_t i = 0, count = 0;
676     VkResult res = VK_SUCCESS;
677     VkExtensionProperties *ext_props = NULL;
678 
679     res = fpEnumerateDeviceExtensionProperties(physical_device, NULL, &count, NULL);
680     if (res != VK_SUCCESS) {
681         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
682                    "loader_add_device_extensions: Error getting physical device extension info count from library %s", lib_name);
683         return res;
684     }
685     if (count > 0) {
686         ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties));
687         if (!ext_props) {
688             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
689                        "loader_add_device_extensions: Failed to allocate space for device extension properties from library %s.",
690                        lib_name);
691             return VK_ERROR_OUT_OF_HOST_MEMORY;
692         }
693         res = fpEnumerateDeviceExtensionProperties(physical_device, NULL, &count, ext_props);
694         if (res != VK_SUCCESS) {
695             return res;
696         }
697         for (i = 0; i < count; i++) {
698             res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]);
699             if (res != VK_SUCCESS) {
700                 return res;
701             }
702         }
703     }
704 
705     return VK_SUCCESS;
706 }
707 
loader_init_generic_list(const struct loader_instance * inst,struct loader_generic_list * list_info,size_t element_size)708 VkResult loader_init_generic_list(const struct loader_instance *inst, struct loader_generic_list *list_info, size_t element_size) {
709     size_t capacity = 32 * element_size;
710     list_info->count = 0;
711     list_info->capacity = 0;
712     list_info->list = loader_instance_heap_calloc(inst, capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
713     if (list_info->list == NULL) {
714         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_init_generic_list: Failed to allocate space for generic list");
715         return VK_ERROR_OUT_OF_HOST_MEMORY;
716     }
717     list_info->capacity = capacity;
718     return VK_SUCCESS;
719 }
720 
loader_destroy_generic_list(const struct loader_instance * inst,struct loader_generic_list * list)721 void loader_destroy_generic_list(const struct loader_instance *inst, struct loader_generic_list *list) {
722     loader_instance_heap_free(inst, list->list);
723     memset(list, 0, sizeof(struct loader_generic_list));
724 }
725 
726 // Append non-duplicate extension properties defined in props to the given ext_list.
727 // Return - Vk_SUCCESS on success
loader_add_to_ext_list(const struct loader_instance * inst,struct loader_extension_list * ext_list,uint32_t prop_list_count,const VkExtensionProperties * props)728 VkResult loader_add_to_ext_list(const struct loader_instance *inst, struct loader_extension_list *ext_list,
729                                 uint32_t prop_list_count, const VkExtensionProperties *props) {
730     if (ext_list->list == NULL || ext_list->capacity == 0) {
731         VkResult res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(VkExtensionProperties));
732         if (VK_SUCCESS != res) {
733             return res;
734         }
735     }
736 
737     for (uint32_t i = 0; i < prop_list_count; i++) {
738         const VkExtensionProperties *cur_ext = &props[i];
739 
740         // look for duplicates
741         if (has_vk_extension_property(cur_ext, ext_list)) {
742             continue;
743         }
744 
745         // add to list at end
746         // check for enough capacity
747         if (ext_list->count * sizeof(VkExtensionProperties) >= ext_list->capacity) {
748             void *new_ptr = loader_instance_heap_realloc(inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2,
749                                                          VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
750             if (new_ptr == NULL) {
751                 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
752                            "loader_add_to_ext_list: Failed to reallocate space for extension list");
753                 return VK_ERROR_OUT_OF_HOST_MEMORY;
754             }
755             ext_list->list = new_ptr;
756 
757             // double capacity
758             ext_list->capacity *= 2;
759         }
760 
761         memcpy(&ext_list->list[ext_list->count], cur_ext, sizeof(VkExtensionProperties));
762         ext_list->count++;
763     }
764     return VK_SUCCESS;
765 }
766 
767 // Append one extension property defined in props with entrypoints defined in entries to the given
768 // ext_list. Do not append if a duplicate.
769 // If this is a duplicate, this function free's the passed in entries - as in it takes ownership over that list (if it is not
770 // NULL) Return - Vk_SUCCESS on success
loader_add_to_dev_ext_list(const struct loader_instance * inst,struct loader_device_extension_list * ext_list,const VkExtensionProperties * props,struct loader_string_list * entrys)771 VkResult loader_add_to_dev_ext_list(const struct loader_instance *inst, struct loader_device_extension_list *ext_list,
772                                     const VkExtensionProperties *props, struct loader_string_list *entrys) {
773     VkResult res = VK_SUCCESS;
774     bool should_free_entrys = true;
775     if (ext_list->list == NULL || ext_list->capacity == 0) {
776         res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(struct loader_dev_ext_props));
777         if (VK_SUCCESS != res) {
778             goto out;
779         }
780     }
781 
782     // look for duplicates
783     if (has_vk_dev_ext_property(props, ext_list)) {
784         goto out;
785     }
786 
787     uint32_t idx = ext_list->count;
788     // add to list at end
789     // check for enough capacity
790     if (idx * sizeof(struct loader_dev_ext_props) >= ext_list->capacity) {
791         void *new_ptr = loader_instance_heap_realloc(inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2,
792                                                      VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
793 
794         if (NULL == new_ptr) {
795             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
796                        "loader_add_to_dev_ext_list: Failed to reallocate space for device extension list");
797             res = VK_ERROR_OUT_OF_HOST_MEMORY;
798             goto out;
799         }
800         ext_list->list = new_ptr;
801 
802         // double capacity
803         ext_list->capacity *= 2;
804     }
805 
806     memcpy(&ext_list->list[idx].props, props, sizeof(*props));
807     if (entrys) {
808         ext_list->list[idx].entrypoints = *entrys;
809         should_free_entrys = false;
810     }
811     ext_list->count++;
812 out:
813     if (NULL != entrys && should_free_entrys) {
814         free_string_list(inst, entrys);
815     }
816     return res;
817 }
818 
819 // Create storage for pointers to loader_layer_properties
loader_init_pointer_layer_list(const struct loader_instance * inst,struct loader_pointer_layer_list * list)820 bool loader_init_pointer_layer_list(const struct loader_instance *inst, struct loader_pointer_layer_list *list) {
821     list->capacity = 32 * sizeof(void *);
822     list->list = loader_instance_heap_calloc(inst, list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
823     if (list->list == NULL) {
824         return false;
825     }
826     list->count = 0;
827     return true;
828 }
829 
830 // Search the given array of layer names for an entry matching the given VkLayerProperties
loader_names_array_has_layer_property(const VkLayerProperties * vk_layer_prop,uint32_t layer_info_count,struct activated_layer_info * layer_info)831 bool loader_names_array_has_layer_property(const VkLayerProperties *vk_layer_prop, uint32_t layer_info_count,
832                                            struct activated_layer_info *layer_info) {
833     for (uint32_t i = 0; i < layer_info_count; i++) {
834         if (strcmp(vk_layer_prop->layerName, layer_info[i].name) == 0) {
835             return true;
836         }
837     }
838     return false;
839 }
840 
loader_destroy_pointer_layer_list(const struct loader_instance * inst,struct loader_pointer_layer_list * layer_list)841 void loader_destroy_pointer_layer_list(const struct loader_instance *inst, struct loader_pointer_layer_list *layer_list) {
842     loader_instance_heap_free(inst, layer_list->list);
843     memset(layer_list, 0, sizeof(struct loader_pointer_layer_list));
844 }
845 
846 // Append layer properties defined in prop_list to the given layer_info list
loader_add_layer_properties_to_list(const struct loader_instance * inst,struct loader_pointer_layer_list * list,struct loader_layer_properties * props)847 VkResult loader_add_layer_properties_to_list(const struct loader_instance *inst, struct loader_pointer_layer_list *list,
848                                              struct loader_layer_properties *props) {
849     if (list->list == NULL || list->capacity == 0) {
850         if (!loader_init_pointer_layer_list(inst, list)) {
851             return VK_ERROR_OUT_OF_HOST_MEMORY;
852         }
853     }
854 
855     // Check for enough capacity
856     if (((list->count + 1) * sizeof(struct loader_layer_properties)) >= list->capacity) {
857         size_t new_capacity = list->capacity * 2;
858         void *new_ptr =
859             loader_instance_heap_realloc(inst, list->list, list->capacity, new_capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
860         if (NULL == new_ptr) {
861             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
862                        "loader_add_layer_properties_to_list: Realloc failed for when attempting to add new layer");
863             return VK_ERROR_OUT_OF_HOST_MEMORY;
864         }
865         list->list = new_ptr;
866         list->capacity = new_capacity;
867     }
868     list->list[list->count++] = props;
869 
870     return VK_SUCCESS;
871 }
872 
873 // Determine if the provided explicit layer should be available by querying the appropriate environmental variables.
loader_layer_is_available(const struct loader_instance * inst,const struct loader_envvar_all_filters * filters,const struct loader_layer_properties * prop)874 bool loader_layer_is_available(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
875                                const struct loader_layer_properties *prop) {
876     bool available = true;
877     bool is_implicit = (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER));
878     bool disabled_by_type =
879         (is_implicit) ? (filters->disable_filter.disable_all_implicit) : (filters->disable_filter.disable_all_explicit);
880     if ((filters->disable_filter.disable_all || disabled_by_type ||
881          check_name_matches_filter_environment_var(prop->info.layerName, &filters->disable_filter.additional_filters)) &&
882         !check_name_matches_filter_environment_var(prop->info.layerName, &filters->allow_filter)) {
883         available = false;
884     }
885     if (check_name_matches_filter_environment_var(prop->info.layerName, &filters->enable_filter)) {
886         available = true;
887     } else if (!available) {
888         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
889                    "Layer \"%s\" forced disabled because name matches filter of env var \'%s\'.", prop->info.layerName,
890                    VK_LAYERS_DISABLE_ENV_VAR);
891     }
892 
893     return available;
894 }
895 
896 // Search the given search_list for any layers in the props list.  Add these to the
897 // output layer_list.
loader_add_layer_names_to_list(const struct loader_instance * inst,const struct loader_envvar_all_filters * filters,struct loader_pointer_layer_list * output_list,struct loader_pointer_layer_list * expanded_output_list,uint32_t name_count,const char * const * names,const struct loader_layer_list * source_list)898 VkResult loader_add_layer_names_to_list(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
899                                         struct loader_pointer_layer_list *output_list,
900                                         struct loader_pointer_layer_list *expanded_output_list, uint32_t name_count,
901                                         const char *const *names, const struct loader_layer_list *source_list) {
902     VkResult err = VK_SUCCESS;
903 
904     for (uint32_t i = 0; i < name_count; i++) {
905         const char *source_name = names[i];
906 
907         struct loader_layer_properties *layer_prop = loader_find_layer_property(source_name, source_list);
908         if (NULL == layer_prop) {
909             loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
910                        "loader_add_layer_names_to_list: Unable to find layer \"%s\"", source_name);
911             err = VK_ERROR_LAYER_NOT_PRESENT;
912             continue;
913         }
914 
915         // Make sure the layer isn't already in the output_list, skip adding it if it is.
916         if (loader_find_layer_name_in_list(source_name, output_list)) {
917             continue;
918         }
919 
920         if (!loader_layer_is_available(inst, filters, layer_prop)) {
921             continue;
922         }
923 
924         // If not a meta-layer, simply add it.
925         if (0 == (layer_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
926             err = loader_add_layer_properties_to_list(inst, output_list, layer_prop);
927             if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err;
928             err = loader_add_layer_properties_to_list(inst, expanded_output_list, layer_prop);
929             if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err;
930         } else {
931             err = loader_add_meta_layer(inst, filters, layer_prop, output_list, expanded_output_list, source_list, NULL);
932             if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err;
933         }
934     }
935 
936     return err;
937 }
938 
939 // Determine if the provided implicit layer should be enabled by querying the appropriate environmental variables.
940 // For an implicit layer, at least a disable environment variable is required.
loader_implicit_layer_is_enabled(const struct loader_instance * inst,const struct loader_envvar_all_filters * filters,const struct loader_layer_properties * prop)941 bool loader_implicit_layer_is_enabled(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
942                                       const struct loader_layer_properties *prop) {
943     bool enable = false;
944     bool forced_disabled = false;
945     bool forced_enabled = false;
946 
947     if ((filters->disable_filter.disable_all || filters->disable_filter.disable_all_implicit ||
948          check_name_matches_filter_environment_var(prop->info.layerName, &filters->disable_filter.additional_filters)) &&
949         !check_name_matches_filter_environment_var(prop->info.layerName, &filters->allow_filter)) {
950         forced_disabled = true;
951     }
952     if (check_name_matches_filter_environment_var(prop->info.layerName, &filters->enable_filter)) {
953         forced_enabled = true;
954     }
955 
956     // If no enable_environment variable is specified, this implicit layer is always be enabled by default.
957     if (NULL == prop->enable_env_var.name) {
958         enable = true;
959     } else {
960         char *env_value = loader_getenv(prop->enable_env_var.name, inst);
961         if (env_value && !strcmp(prop->enable_env_var.value, env_value)) {
962             enable = true;
963         }
964 
965         // Otherwise, only enable this layer if the enable environment variable is defined
966         loader_free_getenv(env_value, inst);
967     }
968 
969     if (forced_enabled) {
970         // Only report a message that we've forced on a layer if it wouldn't have been enabled
971         // normally.
972         if (!enable) {
973             enable = true;
974             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
975                        "Implicit layer \"%s\" forced enabled due to env var \'%s\'.", prop->info.layerName,
976                        VK_LAYERS_ENABLE_ENV_VAR);
977         }
978     } else if (enable && forced_disabled) {
979         enable = false;
980         // Report a message that we've forced off a layer if it would have been enabled normally.
981         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
982                    "Implicit layer \"%s\" forced disabled because name matches filter of env var \'%s\'.", prop->info.layerName,
983                    VK_LAYERS_DISABLE_ENV_VAR);
984         return enable;
985     }
986 
987     // The disable_environment has priority over everything else.  If it is defined, the layer is always
988     // disabled.
989     if (NULL != prop->disable_env_var.name) {
990         char *env_value = loader_getenv(prop->disable_env_var.name, inst);
991         if (NULL != env_value) {
992             enable = false;
993         }
994         loader_free_getenv(env_value, inst);
995     } else if ((prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER) == 0) {
996         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
997                    "Implicit layer \"%s\" missing disabled environment variable!", prop->info.layerName, VK_LAYERS_DISABLE_ENV_VAR);
998     }
999 
1000     // Enable this layer if it is included in the override layer
1001     if (inst != NULL && inst->override_layer_present) {
1002         struct loader_layer_properties *override = NULL;
1003         for (uint32_t i = 0; i < inst->instance_layer_list.count; ++i) {
1004             if (strcmp(inst->instance_layer_list.list[i].info.layerName, VK_OVERRIDE_LAYER_NAME) == 0) {
1005                 override = &inst->instance_layer_list.list[i];
1006                 break;
1007             }
1008         }
1009         if (override != NULL) {
1010             for (uint32_t i = 0; i < override->component_layer_names.count; ++i) {
1011                 if (strcmp(override->component_layer_names.list[i], prop->info.layerName) == 0) {
1012                     enable = true;
1013                     break;
1014                 }
1015             }
1016         }
1017     }
1018 
1019     return enable;
1020 }
1021 
1022 // Check the individual implicit layer for the enable/disable environment variable settings.  Only add it after
1023 // every check has passed indicating it should be used, including making sure a layer of the same name hasn't already been
1024 // added.
loader_add_implicit_layer(const struct loader_instance * inst,struct loader_layer_properties * prop,const struct loader_envvar_all_filters * filters,struct loader_pointer_layer_list * target_list,struct loader_pointer_layer_list * expanded_target_list,const struct loader_layer_list * source_list)1025 VkResult loader_add_implicit_layer(const struct loader_instance *inst, struct loader_layer_properties *prop,
1026                                    const struct loader_envvar_all_filters *filters, struct loader_pointer_layer_list *target_list,
1027                                    struct loader_pointer_layer_list *expanded_target_list,
1028                                    const struct loader_layer_list *source_list) {
1029     VkResult result = VK_SUCCESS;
1030     if (loader_implicit_layer_is_enabled(inst, filters, prop)) {
1031         if (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
1032             // Make sure the layer isn't already in the output_list, skip adding it if it is.
1033             if (loader_find_layer_name_in_list(&prop->info.layerName[0], target_list)) {
1034                 return result;
1035             }
1036 
1037             result = loader_add_layer_properties_to_list(inst, target_list, prop);
1038             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1039             if (NULL != expanded_target_list) {
1040                 result = loader_add_layer_properties_to_list(inst, expanded_target_list, prop);
1041             }
1042         } else {
1043             result = loader_add_meta_layer(inst, filters, prop, target_list, expanded_target_list, source_list, NULL);
1044         }
1045     }
1046     return result;
1047 }
1048 
1049 // Add the component layers of a meta-layer to the active list of layers
loader_add_meta_layer(const struct loader_instance * inst,const struct loader_envvar_all_filters * filters,struct loader_layer_properties * prop,struct loader_pointer_layer_list * target_list,struct loader_pointer_layer_list * expanded_target_list,const struct loader_layer_list * source_list,bool * out_found_all_component_layers)1050 VkResult loader_add_meta_layer(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
1051                                struct loader_layer_properties *prop, struct loader_pointer_layer_list *target_list,
1052                                struct loader_pointer_layer_list *expanded_target_list, const struct loader_layer_list *source_list,
1053                                bool *out_found_all_component_layers) {
1054     VkResult result = VK_SUCCESS;
1055     bool found_all_component_layers = true;
1056 
1057     // We need to add all the individual component layers
1058     loader_api_version meta_layer_api_version = loader_make_version(prop->info.specVersion);
1059     for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) {
1060         struct loader_layer_properties *search_prop =
1061             loader_find_layer_property(prop->component_layer_names.list[comp_layer], source_list);
1062         if (search_prop != NULL) {
1063             loader_api_version search_prop_version = loader_make_version(prop->info.specVersion);
1064             if (!loader_check_version_meets_required(meta_layer_api_version, search_prop_version)) {
1065                 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1066                            "Meta-layer \"%s\" API version %u.%u, component layer \"%s\" version %u.%u, may have "
1067                            "incompatibilities (Policy #LLP_LAYER_8)!",
1068                            prop->info.layerName, meta_layer_api_version.major, meta_layer_api_version.minor,
1069                            search_prop->info.layerName, search_prop_version.major, search_prop_version.minor);
1070             }
1071 
1072             if (!loader_layer_is_available(inst, filters, search_prop)) {
1073                 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1074                            "Meta Layer \"%s\" component layer \"%s\" disabled.", prop->info.layerName, search_prop->info.layerName);
1075                 continue;
1076             }
1077 
1078             // If the component layer is itself an implicit layer, we need to do the implicit layer enable
1079             // checks
1080             if (0 == (search_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
1081                 result = loader_add_implicit_layer(inst, search_prop, filters, target_list, expanded_target_list, source_list);
1082                 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1083             } else {
1084                 if (0 != (search_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
1085                     bool found_layers_in_component_meta_layer = true;
1086                     result = loader_add_meta_layer(inst, filters, search_prop, target_list, expanded_target_list, source_list,
1087                                                    &found_layers_in_component_meta_layer);
1088                     if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1089                     if (!found_layers_in_component_meta_layer) found_all_component_layers = false;
1090                 } else if (!loader_find_layer_name_in_list(&search_prop->info.layerName[0], target_list)) {
1091                     // Make sure the layer isn't already in the output_list, skip adding it if it is.
1092                     result = loader_add_layer_properties_to_list(inst, target_list, search_prop);
1093                     if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1094                     if (NULL != expanded_target_list) {
1095                         result = loader_add_layer_properties_to_list(inst, expanded_target_list, search_prop);
1096                         if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1097                     }
1098                 }
1099             }
1100         } else {
1101             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1102                        "Failed to find layer name \"%s\" component layer \"%s\" to activate (Policy #LLP_LAYER_7)",
1103                        prop->component_layer_names.list[comp_layer], prop->component_layer_names.list[comp_layer]);
1104             found_all_component_layers = false;
1105         }
1106     }
1107 
1108     // Add this layer to the overall target list (not the expanded one)
1109     if (found_all_component_layers) {
1110         result = loader_add_layer_properties_to_list(inst, target_list, prop);
1111         if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1112         // Write the result to out_found_all_component_layers in case this function is being recursed
1113         if (out_found_all_component_layers) *out_found_all_component_layers = found_all_component_layers;
1114     }
1115 
1116     return result;
1117 }
1118 
get_extension_property(const char * name,const struct loader_extension_list * list)1119 VkExtensionProperties *get_extension_property(const char *name, const struct loader_extension_list *list) {
1120     for (uint32_t i = 0; i < list->count; i++) {
1121         if (strcmp(name, list->list[i].extensionName) == 0) return &list->list[i];
1122     }
1123     return NULL;
1124 }
1125 
get_dev_extension_property(const char * name,const struct loader_device_extension_list * list)1126 VkExtensionProperties *get_dev_extension_property(const char *name, const struct loader_device_extension_list *list) {
1127     for (uint32_t i = 0; i < list->count; i++) {
1128         if (strcmp(name, list->list[i].props.extensionName) == 0) return &list->list[i].props;
1129     }
1130     return NULL;
1131 }
1132 
1133 // For Instance extensions implemented within the loader (i.e. DEBUG_REPORT
1134 // the extension must provide two entry points for the loader to use:
1135 // - "trampoline" entry point - this is the address returned by GetProcAddr
1136 //                              and will always do what's necessary to support a
1137 //                              global call.
1138 // - "terminator" function    - this function will be put at the end of the
1139 //                              instance chain and will contain the necessary logic
1140 //                              to call / process the extension for the appropriate
1141 //                              ICDs that are available.
1142 // There is no generic mechanism for including these functions, the references
1143 // must be placed into the appropriate loader entry points.
1144 // GetInstanceProcAddr: call extension GetInstanceProcAddr to check for GetProcAddr
1145 // requests
1146 // loader_coalesce_extensions(void) - add extension records to the list of global
1147 //                                    extension available to the app.
1148 // instance_disp                    - add function pointer for terminator function
1149 //                                    to this array.
1150 // The extension itself should be in a separate file that will be linked directly
1151 // with the loader.
loader_get_icd_loader_instance_extensions(const struct loader_instance * inst,struct loader_icd_tramp_list * icd_tramp_list,struct loader_extension_list * inst_exts)1152 VkResult loader_get_icd_loader_instance_extensions(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list,
1153                                                    struct loader_extension_list *inst_exts) {
1154     struct loader_extension_list icd_exts;
1155     VkResult res = VK_SUCCESS;
1156     char *env_value;
1157     bool filter_extensions = true;
1158 
1159     // Check if a user wants to disable the instance extension filtering behavior
1160     env_value = loader_getenv("VK_LOADER_DISABLE_INST_EXT_FILTER", inst);
1161     if (NULL != env_value && atoi(env_value) != 0) {
1162         filter_extensions = false;
1163     }
1164     loader_free_getenv(env_value, inst);
1165 
1166     // traverse scanned icd list adding non-duplicate extensions to the list
1167     for (uint32_t i = 0; i < icd_tramp_list->count; i++) {
1168         res = loader_init_generic_list(inst, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
1169         if (VK_SUCCESS != res) {
1170             goto out;
1171         }
1172         res = loader_add_instance_extensions(inst, icd_tramp_list->scanned_list[i].EnumerateInstanceExtensionProperties,
1173                                              icd_tramp_list->scanned_list[i].lib_name, &icd_exts);
1174         if (VK_SUCCESS == res) {
1175             if (filter_extensions) {
1176                 // Remove any extensions not recognized by the loader
1177                 for (int32_t j = 0; j < (int32_t)icd_exts.count; j++) {
1178                     // See if the extension is in the list of supported extensions
1179                     bool found = false;
1180                     for (uint32_t k = 0; LOADER_INSTANCE_EXTENSIONS[k] != NULL; k++) {
1181                         if (strcmp(icd_exts.list[j].extensionName, LOADER_INSTANCE_EXTENSIONS[k]) == 0) {
1182                             found = true;
1183                             break;
1184                         }
1185                     }
1186 
1187                     // If it isn't in the list, remove it
1188                     if (!found) {
1189                         for (uint32_t k = j + 1; k < icd_exts.count; k++) {
1190                             icd_exts.list[k - 1] = icd_exts.list[k];
1191                         }
1192                         --icd_exts.count;
1193                         --j;
1194                     }
1195                 }
1196             }
1197 
1198             res = loader_add_to_ext_list(inst, inst_exts, icd_exts.count, icd_exts.list);
1199         }
1200         loader_destroy_generic_list(inst, (struct loader_generic_list *)&icd_exts);
1201         if (VK_SUCCESS != res) {
1202             goto out;
1203         }
1204     };
1205 
1206     // Traverse loader's extensions, adding non-duplicate extensions to the list
1207     res = add_debug_extensions_to_ext_list(inst, inst_exts);
1208     if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
1209         goto out;
1210     }
1211     const VkExtensionProperties portability_enumeration_extension_info[] = {
1212         {VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME, VK_KHR_PORTABILITY_ENUMERATION_SPEC_VERSION}};
1213 
1214     // Add VK_KHR_portability_subset
1215     res = loader_add_to_ext_list(inst, inst_exts, sizeof(portability_enumeration_extension_info) / sizeof(VkExtensionProperties),
1216                                  portability_enumeration_extension_info);
1217     if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
1218         goto out;
1219     }
1220 
1221     const VkExtensionProperties direct_driver_loading_extension_info[] = {
1222         {VK_LUNARG_DIRECT_DRIVER_LOADING_EXTENSION_NAME, VK_LUNARG_DIRECT_DRIVER_LOADING_SPEC_VERSION}};
1223 
1224     // Add VK_LUNARG_direct_driver_loading
1225     res = loader_add_to_ext_list(inst, inst_exts, sizeof(direct_driver_loading_extension_info) / sizeof(VkExtensionProperties),
1226                                  direct_driver_loading_extension_info);
1227     if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
1228         goto out;
1229     }
1230 
1231 out:
1232     return res;
1233 }
1234 
loader_get_icd_and_device(const void * device,struct loader_device ** found_dev,uint32_t * icd_index)1235 struct loader_icd_term *loader_get_icd_and_device(const void *device, struct loader_device **found_dev, uint32_t *icd_index) {
1236     VkLayerDispatchTable *dispatch_table_device = loader_get_dispatch(device);
1237     if (NULL == dispatch_table_device) {
1238         *found_dev = NULL;
1239         return NULL;
1240     }
1241     loader_platform_thread_lock_mutex(&loader_global_instance_list_lock);
1242     *found_dev = NULL;
1243 
1244     for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) {
1245         uint32_t index = 0;
1246         for (struct loader_icd_term *icd_term = inst->icd_terms; icd_term; icd_term = icd_term->next) {
1247             for (struct loader_device *dev = icd_term->logical_device_list; dev; dev = dev->next) {
1248                 // Value comparison of device prevents object wrapping by layers
1249                 if (loader_get_dispatch(dev->icd_device) == dispatch_table_device ||
1250                     (dev->chain_device != VK_NULL_HANDLE && loader_get_dispatch(dev->chain_device) == dispatch_table_device)) {
1251                     *found_dev = dev;
1252                     if (NULL != icd_index) {
1253                         *icd_index = index;
1254                     }
1255                     loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock);
1256                     return icd_term;
1257                 }
1258             }
1259             index++;
1260         }
1261     }
1262     loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock);
1263     return NULL;
1264 }
1265 
loader_destroy_logical_device(struct loader_device * dev,const VkAllocationCallbacks * pAllocator)1266 void loader_destroy_logical_device(struct loader_device *dev, const VkAllocationCallbacks *pAllocator) {
1267     if (pAllocator) {
1268         dev->alloc_callbacks = *pAllocator;
1269     }
1270     loader_device_heap_free(dev, dev);
1271 }
1272 
loader_create_logical_device(const struct loader_instance * inst,const VkAllocationCallbacks * pAllocator)1273 struct loader_device *loader_create_logical_device(const struct loader_instance *inst, const VkAllocationCallbacks *pAllocator) {
1274     struct loader_device *new_dev;
1275     new_dev = loader_calloc(pAllocator, sizeof(struct loader_device), VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1276 
1277     if (!new_dev) {
1278         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_create_logical_device: Failed to alloc struct loader_device");
1279         return NULL;
1280     }
1281 
1282     new_dev->loader_dispatch.core_dispatch.magic = DEVICE_DISP_TABLE_MAGIC_NUMBER;
1283 
1284     if (pAllocator) {
1285         new_dev->alloc_callbacks = *pAllocator;
1286     }
1287 
1288     return new_dev;
1289 }
1290 
loader_add_logical_device(struct loader_icd_term * icd_term,struct loader_device * dev)1291 void loader_add_logical_device(struct loader_icd_term *icd_term, struct loader_device *dev) {
1292     dev->next = icd_term->logical_device_list;
1293     icd_term->logical_device_list = dev;
1294 }
1295 
loader_remove_logical_device(struct loader_icd_term * icd_term,struct loader_device * found_dev,const VkAllocationCallbacks * pAllocator)1296 void loader_remove_logical_device(struct loader_icd_term *icd_term, struct loader_device *found_dev,
1297                                   const VkAllocationCallbacks *pAllocator) {
1298     struct loader_device *dev, *prev_dev;
1299 
1300     if (!icd_term || !found_dev) return;
1301 
1302     prev_dev = NULL;
1303     dev = icd_term->logical_device_list;
1304     while (dev && dev != found_dev) {
1305         prev_dev = dev;
1306         dev = dev->next;
1307     }
1308 
1309     if (prev_dev)
1310         prev_dev->next = found_dev->next;
1311     else
1312         icd_term->logical_device_list = found_dev->next;
1313     loader_destroy_logical_device(found_dev, pAllocator);
1314 }
1315 
loader_icd_destroy(struct loader_instance * ptr_inst,struct loader_icd_term * icd_term,const VkAllocationCallbacks * pAllocator)1316 void loader_icd_destroy(struct loader_instance *ptr_inst, struct loader_icd_term *icd_term,
1317                         const VkAllocationCallbacks *pAllocator) {
1318     ptr_inst->total_icd_count--;
1319     for (struct loader_device *dev = icd_term->logical_device_list; dev;) {
1320         struct loader_device *next_dev = dev->next;
1321         loader_destroy_logical_device(dev, pAllocator);
1322         dev = next_dev;
1323     }
1324 
1325     loader_instance_heap_free(ptr_inst, icd_term);
1326 }
1327 
loader_icd_add(struct loader_instance * ptr_inst,const struct loader_scanned_icd * scanned_icd)1328 struct loader_icd_term *loader_icd_add(struct loader_instance *ptr_inst, const struct loader_scanned_icd *scanned_icd) {
1329     struct loader_icd_term *icd_term;
1330 
1331     icd_term = loader_instance_heap_calloc(ptr_inst, sizeof(struct loader_icd_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1332     if (!icd_term) {
1333         return NULL;
1334     }
1335 
1336     icd_term->scanned_icd = scanned_icd;
1337     icd_term->this_instance = ptr_inst;
1338 
1339     // Prepend to the list
1340     icd_term->next = ptr_inst->icd_terms;
1341     ptr_inst->icd_terms = icd_term;
1342     ptr_inst->total_icd_count++;
1343 
1344     return icd_term;
1345 }
1346 
1347 // Determine the ICD interface version to use.
1348 //     @param icd
1349 //     @param pVersion Output parameter indicating which version to use or 0 if
1350 //            the negotiation API is not supported by the ICD
1351 //     @return  bool indicating true if the selected interface version is supported
1352 //            by the loader, false indicates the version is not supported
loader_get_icd_interface_version(PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version,uint32_t * pVersion)1353 bool loader_get_icd_interface_version(PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version, uint32_t *pVersion) {
1354     if (fp_negotiate_icd_version == NULL) {
1355         // ICD does not support the negotiation API, it supports version 0 or 1
1356         // calling code must determine if it is version 0 or 1
1357         *pVersion = 0;
1358     } else {
1359         // ICD supports the negotiation API, so call it with the loader's
1360         // latest version supported
1361         *pVersion = CURRENT_LOADER_ICD_INTERFACE_VERSION;
1362         VkResult result = fp_negotiate_icd_version(pVersion);
1363 
1364         if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
1365             // ICD no longer supports the loader's latest interface version so
1366             // fail loading the ICD
1367             return false;
1368         }
1369     }
1370 
1371 #if MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION > 0
1372     if (*pVersion < MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION) {
1373         // Loader no longer supports the ICD's latest interface version so fail
1374         // loading the ICD
1375         return false;
1376     }
1377 #endif
1378     return true;
1379 }
1380 
loader_scanned_icd_clear(const struct loader_instance * inst,struct loader_icd_tramp_list * icd_tramp_list)1381 void loader_scanned_icd_clear(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list) {
1382     if (0 != icd_tramp_list->capacity && icd_tramp_list->scanned_list) {
1383         for (uint32_t i = 0; i < icd_tramp_list->count; i++) {
1384             if (icd_tramp_list->scanned_list[i].handle) {
1385                 loader_platform_close_library(icd_tramp_list->scanned_list[i].handle);
1386                 icd_tramp_list->scanned_list[i].handle = NULL;
1387             }
1388             loader_instance_heap_free(inst, icd_tramp_list->scanned_list[i].lib_name);
1389         }
1390         loader_instance_heap_free(inst, icd_tramp_list->scanned_list);
1391     }
1392     memset(icd_tramp_list, 0, sizeof(struct loader_icd_tramp_list));
1393 }
1394 
loader_scanned_icd_init(const struct loader_instance * inst,struct loader_icd_tramp_list * icd_tramp_list)1395 VkResult loader_scanned_icd_init(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list) {
1396     VkResult res = VK_SUCCESS;
1397     loader_scanned_icd_clear(inst, icd_tramp_list);
1398     icd_tramp_list->capacity = 8 * sizeof(struct loader_scanned_icd);
1399     icd_tramp_list->scanned_list = loader_instance_heap_alloc(inst, icd_tramp_list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1400     if (NULL == icd_tramp_list->scanned_list) {
1401         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1402                    "loader_scanned_icd_init: Realloc failed for layer list when attempting to add new layer");
1403         res = VK_ERROR_OUT_OF_HOST_MEMORY;
1404     }
1405     return res;
1406 }
1407 
loader_add_direct_driver(const struct loader_instance * inst,uint32_t index,const VkDirectDriverLoadingInfoLUNARG * pDriver,struct loader_icd_tramp_list * icd_tramp_list)1408 VkResult loader_add_direct_driver(const struct loader_instance *inst, uint32_t index,
1409                                   const VkDirectDriverLoadingInfoLUNARG *pDriver, struct loader_icd_tramp_list *icd_tramp_list) {
1410     // Assume pDriver is valid, since there is no real way to check it. Calling code should make sure the pointer to the array
1411     // of VkDirectDriverLoadingInfoLUNARG structures is non-null.
1412     if (NULL == pDriver->pfnGetInstanceProcAddr) {
1413         loader_log(
1414             inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1415             "loader_add_direct_driver: VkDirectDriverLoadingInfoLUNARG structure at index %d contains a NULL pointer for the "
1416             "pfnGetInstanceProcAddr member, skipping.",
1417             index);
1418         return VK_ERROR_INITIALIZATION_FAILED;
1419     }
1420 
1421     PFN_vkGetInstanceProcAddr fp_get_proc_addr = pDriver->pfnGetInstanceProcAddr;
1422     PFN_vkCreateInstance fp_create_inst = NULL;
1423     PFN_vkEnumerateInstanceExtensionProperties fp_get_inst_ext_props = NULL;
1424     PFN_GetPhysicalDeviceProcAddr fp_get_phys_dev_proc_addr = NULL;
1425     PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version = NULL;
1426 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1427     PFN_vk_icdEnumerateAdapterPhysicalDevices fp_enum_dxgi_adapter_phys_devs = NULL;
1428 #endif
1429     struct loader_scanned_icd *new_scanned_icd;
1430     uint32_t interface_version = 0;
1431 
1432     // Try to get the negotiate ICD interface version function
1433     fp_negotiate_icd_version = (PFN_vk_icdNegotiateLoaderICDInterfaceVersion)pDriver->pfnGetInstanceProcAddr(
1434         NULL, "vk_icdNegotiateLoaderICDInterfaceVersion");
1435 
1436     if (NULL == fp_negotiate_icd_version) {
1437         loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1438                    "loader_add_direct_driver: Could not get 'vk_icdNegotiateLoaderICDInterfaceVersion' from "
1439                    "VkDirectDriverLoadingInfoLUNARG structure at "
1440                    "index %d, skipping.",
1441                    index);
1442         return VK_ERROR_INITIALIZATION_FAILED;
1443     }
1444 
1445     if (!loader_get_icd_interface_version(fp_negotiate_icd_version, &interface_version)) {
1446         loader_log(
1447             inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1448             "loader_add_direct_driver: VkDirectDriverLoadingInfoLUNARG structure at index %d supports interface version %d, "
1449             "which is incompatible with the Loader Driver Interface version that supports the VK_LUNARG_direct_driver_loading "
1450             "extension, skipping.",
1451             index, interface_version);
1452         return VK_ERROR_INITIALIZATION_FAILED;
1453     }
1454 
1455     if (interface_version < 7) {
1456         loader_log(
1457             inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1458             "loader_add_direct_driver: VkDirectDriverLoadingInfoLUNARG structure at index %d supports interface version %d, "
1459             "which is incompatible with the Loader Driver Interface version that supports the VK_LUNARG_direct_driver_loading "
1460             "extension, skipping.",
1461             index, interface_version);
1462         return VK_ERROR_INITIALIZATION_FAILED;
1463     }
1464 
1465     fp_create_inst = (PFN_vkCreateInstance)pDriver->pfnGetInstanceProcAddr(NULL, "vkCreateInstance");
1466     if (NULL == fp_create_inst) {
1467         loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1468                    "loader_add_direct_driver: Could not get 'vkCreateInstance' from VkDirectDriverLoadingInfoLUNARG structure at "
1469                    "index %d, skipping.",
1470                    index);
1471         return VK_ERROR_INITIALIZATION_FAILED;
1472     }
1473     fp_get_inst_ext_props =
1474         (PFN_vkEnumerateInstanceExtensionProperties)pDriver->pfnGetInstanceProcAddr(NULL, "vkEnumerateInstanceExtensionProperties");
1475     if (NULL == fp_get_inst_ext_props) {
1476         loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1477                    "loader_add_direct_driver: Could not get 'vkEnumerateInstanceExtensionProperties' from "
1478                    "VkDirectDriverLoadingInfoLUNARG structure at index %d, skipping.",
1479                    index);
1480         return VK_ERROR_INITIALIZATION_FAILED;
1481     }
1482 
1483     fp_get_phys_dev_proc_addr =
1484         (PFN_vk_icdGetPhysicalDeviceProcAddr)pDriver->pfnGetInstanceProcAddr(NULL, "vk_icdGetPhysicalDeviceProcAddr");
1485 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1486     // Query "vk_icdEnumerateAdapterPhysicalDevices" with vk_icdGetInstanceProcAddr if the library reports interface version
1487     // 7 or greater, otherwise fallback to loading it from the platform dynamic linker
1488     fp_enum_dxgi_adapter_phys_devs =
1489         (PFN_vk_icdEnumerateAdapterPhysicalDevices)pDriver->pfnGetInstanceProcAddr(NULL, "vk_icdEnumerateAdapterPhysicalDevices");
1490 #endif
1491 
1492     // check for enough capacity
1493     if ((icd_tramp_list->count * sizeof(struct loader_scanned_icd)) >= icd_tramp_list->capacity) {
1494         void *new_ptr = loader_instance_heap_realloc(inst, icd_tramp_list->scanned_list, icd_tramp_list->capacity,
1495                                                      icd_tramp_list->capacity * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1496         if (NULL == new_ptr) {
1497             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1498                        "loader_add_direct_driver: Realloc failed on icd library list for ICD index %u", index);
1499             return VK_ERROR_OUT_OF_HOST_MEMORY;
1500         }
1501         icd_tramp_list->scanned_list = new_ptr;
1502 
1503         // double capacity
1504         icd_tramp_list->capacity *= 2;
1505     }
1506 
1507     // Driver must be 1.1 to support version 7
1508     uint32_t api_version = VK_API_VERSION_1_1;
1509     PFN_vkEnumerateInstanceVersion icd_enumerate_instance_version =
1510         (PFN_vkEnumerateInstanceVersion)pDriver->pfnGetInstanceProcAddr(NULL, "vkEnumerateInstanceVersion");
1511 
1512     if (icd_enumerate_instance_version) {
1513         VkResult res = icd_enumerate_instance_version(&api_version);
1514         if (res != VK_SUCCESS) {
1515             return res;
1516         }
1517     }
1518 
1519     new_scanned_icd = &(icd_tramp_list->scanned_list[icd_tramp_list->count]);
1520     new_scanned_icd->handle = NULL;
1521     new_scanned_icd->api_version = api_version;
1522     new_scanned_icd->GetInstanceProcAddr = fp_get_proc_addr;
1523     new_scanned_icd->GetPhysicalDeviceProcAddr = fp_get_phys_dev_proc_addr;
1524     new_scanned_icd->EnumerateInstanceExtensionProperties = fp_get_inst_ext_props;
1525     new_scanned_icd->CreateInstance = fp_create_inst;
1526 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1527     new_scanned_icd->EnumerateAdapterPhysicalDevices = fp_enum_dxgi_adapter_phys_devs;
1528 #endif
1529     new_scanned_icd->interface_version = interface_version;
1530 
1531     new_scanned_icd->lib_name = NULL;
1532     icd_tramp_list->count++;
1533 
1534     loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1535                "loader_add_direct_driver: Adding driver found in index %d of "
1536                "VkDirectDriverLoadingListLUNARG::pDrivers structure. pfnGetInstanceProcAddr was set to %p",
1537                index, pDriver->pfnGetInstanceProcAddr);
1538 
1539     return VK_SUCCESS;
1540 }
1541 
1542 // Search through VkInstanceCreateInfo's pNext chain for any drivers from the direct driver loading extension and load them.
loader_scan_for_direct_drivers(const struct loader_instance * inst,const VkInstanceCreateInfo * pCreateInfo,struct loader_icd_tramp_list * icd_tramp_list,bool * direct_driver_loading_exclusive_mode)1543 VkResult loader_scan_for_direct_drivers(const struct loader_instance *inst, const VkInstanceCreateInfo *pCreateInfo,
1544                                         struct loader_icd_tramp_list *icd_tramp_list, bool *direct_driver_loading_exclusive_mode) {
1545     if (NULL == pCreateInfo) {
1546         // Don't do this logic unless we are being called from vkCreateInstance, when pCreateInfo will be non-null
1547         return VK_SUCCESS;
1548     }
1549     bool direct_driver_loading_enabled = false;
1550     // Try to if VK_LUNARG_direct_driver_loading is enabled and if we are using it exclusively
1551     // Skip this step if inst is NULL, aka when this function is being called before instance creation
1552     if (inst != NULL && pCreateInfo->ppEnabledExtensionNames && pCreateInfo->enabledExtensionCount > 0) {
1553         // Look through the enabled extension list, make sure VK_LUNARG_direct_driver_loading is present
1554         for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1555             if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_LUNARG_DIRECT_DRIVER_LOADING_EXTENSION_NAME) == 0) {
1556                 direct_driver_loading_enabled = true;
1557                 break;
1558             }
1559         }
1560     }
1561     const VkDirectDriverLoadingListLUNARG *ddl_list = NULL;
1562     // Find the VkDirectDriverLoadingListLUNARG struct in the pNext chain of vkInstanceCreateInfo
1563     const VkBaseOutStructure *chain = pCreateInfo->pNext;
1564     while (chain) {
1565         if (chain->sType == VK_STRUCTURE_TYPE_DIRECT_DRIVER_LOADING_LIST_LUNARG) {
1566             ddl_list = (VkDirectDriverLoadingListLUNARG *)chain;
1567             break;
1568         }
1569         chain = (const VkBaseOutStructure *)chain->pNext;
1570     }
1571     if (NULL == ddl_list) {
1572         if (direct_driver_loading_enabled) {
1573             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1574                        "loader_scan_for_direct_drivers: The VK_LUNARG_direct_driver_loading extension was enabled but the "
1575                        "pNext chain of "
1576                        "VkInstanceCreateInfo did not contain the "
1577                        "VkDirectDriverLoadingListLUNARG structure.");
1578         }
1579         // Always want to exit early if there was no VkDirectDriverLoadingListLUNARG in the pNext chain
1580         return VK_SUCCESS;
1581     }
1582 
1583     if (!direct_driver_loading_enabled) {
1584         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1585                    "loader_scan_for_direct_drivers: The pNext chain of VkInstanceCreateInfo contained the "
1586                    "VkDirectDriverLoadingListLUNARG structure, but the VK_LUNARG_direct_driver_loading extension was "
1587                    "not enabled.");
1588         return VK_SUCCESS;
1589     }
1590     // If we are using exclusive mode, skip looking for any more drivers from system or environment variables
1591     if (ddl_list->mode == VK_DIRECT_DRIVER_LOADING_MODE_EXCLUSIVE_LUNARG) {
1592         *direct_driver_loading_exclusive_mode = true;
1593         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1594                    "loader_scan_for_direct_drivers: The VK_LUNARG_direct_driver_loading extension is active and specified "
1595                    "VK_DIRECT_DRIVER_LOADING_MODE_EXCLUSIVE_LUNARG, skipping system and environment "
1596                    "variable driver search mechanisms.");
1597     }
1598     if (NULL == ddl_list->pDrivers) {
1599         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1600                    "loader_scan_for_direct_drivers: The VkDirectDriverLoadingListLUNARG structure in the pNext chain of "
1601                    "VkInstanceCreateInfo has a NULL pDrivers member.");
1602         return VK_SUCCESS;
1603     }
1604     if (ddl_list->driverCount == 0) {
1605         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1606                    "loader_scan_for_direct_drivers: The VkDirectDriverLoadingListLUNARG structure in the pNext chain of "
1607                    "VkInstanceCreateInfo has a non-null pDrivers member but a driverCount member with a value "
1608                    "of zero.");
1609         return VK_SUCCESS;
1610     }
1611     // Go through all VkDirectDriverLoadingInfoLUNARG entries and add each driver
1612     // Because icd_tramp's are prepended, this will result in the drivers appearing at the end
1613     for (uint32_t i = 0; i < ddl_list->driverCount; i++) {
1614         VkResult res = loader_add_direct_driver(inst, i, &ddl_list->pDrivers[i], icd_tramp_list);
1615         if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
1616             return res;
1617         }
1618     }
1619 
1620     return VK_SUCCESS;
1621 }
1622 
loader_scanned_icd_add(const struct loader_instance * inst,struct loader_icd_tramp_list * icd_tramp_list,const char * filename,uint32_t api_version,enum loader_layer_library_status * lib_status)1623 VkResult loader_scanned_icd_add(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list,
1624                                 const char *filename, uint32_t api_version, enum loader_layer_library_status *lib_status) {
1625     loader_platform_dl_handle handle = NULL;
1626     PFN_vkCreateInstance fp_create_inst = NULL;
1627     PFN_vkEnumerateInstanceExtensionProperties fp_get_inst_ext_props = NULL;
1628     PFN_vkGetInstanceProcAddr fp_get_proc_addr = NULL;
1629     PFN_GetPhysicalDeviceProcAddr fp_get_phys_dev_proc_addr = NULL;
1630     PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version = NULL;
1631 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1632     PFN_vk_icdEnumerateAdapterPhysicalDevices fp_enum_dxgi_adapter_phys_devs = NULL;
1633 #endif
1634     struct loader_scanned_icd *new_scanned_icd = NULL;
1635     uint32_t interface_vers;
1636     VkResult res = VK_SUCCESS;
1637 
1638     // This shouldn't happen, but the check is necessary because dlopen returns a handle to the main program when
1639     // filename is NULL
1640     if (filename == NULL) {
1641         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_scanned_icd_add: A NULL filename was used, skipping this ICD",
1642                    filename);
1643         res = VK_ERROR_INCOMPATIBLE_DRIVER;
1644         goto out;
1645     }
1646 
1647 // TODO implement smarter opening/closing of libraries. For now this
1648 // function leaves libraries open and the scanned_icd_clear closes them
1649 #if defined(__Fuchsia__)
1650     handle = loader_platform_open_driver(filename);
1651 #else
1652     handle = loader_platform_open_library(filename);
1653 #endif
1654     if (NULL == handle) {
1655         loader_handle_load_library_error(inst, filename, lib_status);
1656         if (lib_status && *lib_status == LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY) {
1657             res = VK_ERROR_OUT_OF_HOST_MEMORY;
1658         } else {
1659             res = VK_ERROR_INCOMPATIBLE_DRIVER;
1660         }
1661         goto out;
1662     }
1663 
1664     // Try to load the driver's exported vk_icdNegotiateLoaderICDInterfaceVersion
1665     fp_negotiate_icd_version = loader_platform_get_proc_address(handle, "vk_icdNegotiateLoaderICDInterfaceVersion");
1666 
1667     // If it isn't exported, we are dealing with either a v0, v1, or a v7 and up driver
1668     if (NULL == fp_negotiate_icd_version) {
1669         // Try to load the driver's exported vk_icdGetInstanceProcAddr - if this is a v7 or up driver, we can use it to get
1670         // the driver's vk_icdNegotiateLoaderICDInterfaceVersion function
1671         fp_get_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetInstanceProcAddr");
1672 
1673         // If we successfully loaded vk_icdGetInstanceProcAddr, try to get vk_icdNegotiateLoaderICDInterfaceVersion
1674         if (fp_get_proc_addr) {
1675             fp_negotiate_icd_version =
1676                 (PFN_vk_icdNegotiateLoaderICDInterfaceVersion)fp_get_proc_addr(NULL, "vk_icdNegotiateLoaderICDInterfaceVersion");
1677         }
1678     }
1679 
1680     // Try to negotiate the Loader and Driver Interface Versions
1681     // loader_get_icd_interface_version will check if fp_negotiate_icd_version is NULL, so we don't have to.
1682     // If it *is* NULL, that means this driver uses interface version 0 or 1
1683     if (!loader_get_icd_interface_version(fp_negotiate_icd_version, &interface_vers)) {
1684         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1685                    "loader_scanned_icd_add: ICD %s doesn't support interface version compatible with loader, skip this ICD.",
1686                    filename);
1687         goto out;
1688     }
1689 
1690     // If we didn't already query vk_icdGetInstanceProcAddr, try now
1691     if (NULL == fp_get_proc_addr) {
1692         fp_get_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetInstanceProcAddr");
1693     }
1694 
1695     // If vk_icdGetInstanceProcAddr is NULL, this ICD is using version 0 and so we should respond accordingly.
1696     if (NULL == fp_get_proc_addr) {
1697         // Exporting vk_icdNegotiateLoaderICDInterfaceVersion but not vk_icdGetInstanceProcAddr violates Version 2's
1698         // requirements, as for Version 2 to be supported Version 1 must also be supported
1699         if (interface_vers != 0) {
1700             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1701                        "loader_scanned_icd_add: ICD %s reports an interface version of %d but doesn't export "
1702                        "vk_icdGetInstanceProcAddr, skip this ICD.",
1703                        filename, interface_vers);
1704             goto out;
1705         }
1706         // Use deprecated interface from version 0
1707         fp_get_proc_addr = loader_platform_get_proc_address(handle, "vkGetInstanceProcAddr");
1708         if (NULL == fp_get_proc_addr) {
1709             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1710                        "loader_scanned_icd_add: Attempt to retrieve either \'vkGetInstanceProcAddr\' or "
1711                        "\'vk_icdGetInstanceProcAddr\' from ICD %s failed.",
1712                        filename);
1713             goto out;
1714         } else {
1715             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
1716                        "loader_scanned_icd_add: Using deprecated ICD interface of \'vkGetInstanceProcAddr\' instead of "
1717                        "\'vk_icdGetInstanceProcAddr\' for ICD %s",
1718                        filename);
1719         }
1720         fp_create_inst = loader_platform_get_proc_address(handle, "vkCreateInstance");
1721         if (NULL == fp_create_inst) {
1722             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1723                        "loader_scanned_icd_add:  Failed querying \'vkCreateInstance\' via dlsym/LoadLibrary for ICD %s", filename);
1724             goto out;
1725         }
1726         fp_get_inst_ext_props = loader_platform_get_proc_address(handle, "vkEnumerateInstanceExtensionProperties");
1727         if (NULL == fp_get_inst_ext_props) {
1728             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1729                        "loader_scanned_icd_add: Could not get \'vkEnumerateInstanceExtensionProperties\' via dlsym/LoadLibrary "
1730                        "for ICD %s",
1731                        filename);
1732             goto out;
1733         }
1734     } else {
1735         // vk_icdGetInstanceProcAddr was successfully found, we can assume the version is at least one
1736         // If vk_icdNegotiateLoaderICDInterfaceVersion was also found, interface_vers must be 2 or greater, so this check is
1737         // fine
1738         if (interface_vers == 0) {
1739             interface_vers = 1;
1740         }
1741 
1742         fp_create_inst = (PFN_vkCreateInstance)fp_get_proc_addr(NULL, "vkCreateInstance");
1743         if (NULL == fp_create_inst) {
1744             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1745                        "loader_scanned_icd_add: Could not get \'vkCreateInstance\' via \'vk_icdGetInstanceProcAddr\' for ICD %s",
1746                        filename);
1747             goto out;
1748         }
1749         fp_get_inst_ext_props =
1750             (PFN_vkEnumerateInstanceExtensionProperties)fp_get_proc_addr(NULL, "vkEnumerateInstanceExtensionProperties");
1751         if (NULL == fp_get_inst_ext_props) {
1752             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1753                        "loader_scanned_icd_add: Could not get \'vkEnumerateInstanceExtensionProperties\' via "
1754                        "\'vk_icdGetInstanceProcAddr\' for ICD %s",
1755                        filename);
1756             goto out;
1757         }
1758         // Query "vk_icdGetPhysicalDeviceProcAddr" with vk_icdGetInstanceProcAddr if the library reports interface version 7 or
1759         // greater, otherwise fallback to loading it from the platform dynamic linker
1760         if (interface_vers >= 7) {
1761             fp_get_phys_dev_proc_addr =
1762                 (PFN_vk_icdGetPhysicalDeviceProcAddr)fp_get_proc_addr(NULL, "vk_icdGetPhysicalDeviceProcAddr");
1763         }
1764         if (NULL == fp_get_phys_dev_proc_addr && interface_vers >= 3) {
1765             fp_get_phys_dev_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetPhysicalDeviceProcAddr");
1766         }
1767 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1768         // Query "vk_icdEnumerateAdapterPhysicalDevices" with vk_icdGetInstanceProcAddr if the library reports interface version
1769         // 7 or greater, otherwise fallback to loading it from the platform dynamic linker
1770         if (interface_vers >= 7) {
1771             fp_enum_dxgi_adapter_phys_devs =
1772                 (PFN_vk_icdEnumerateAdapterPhysicalDevices)fp_get_proc_addr(NULL, "vk_icdEnumerateAdapterPhysicalDevices");
1773         }
1774         if (NULL == fp_enum_dxgi_adapter_phys_devs && interface_vers >= 6) {
1775             fp_enum_dxgi_adapter_phys_devs = loader_platform_get_proc_address(handle, "vk_icdEnumerateAdapterPhysicalDevices");
1776         }
1777 #endif
1778     }
1779 
1780     // check for enough capacity
1781     if ((icd_tramp_list->count * sizeof(struct loader_scanned_icd)) >= icd_tramp_list->capacity) {
1782         void *new_ptr = loader_instance_heap_realloc(inst, icd_tramp_list->scanned_list, icd_tramp_list->capacity,
1783                                                      icd_tramp_list->capacity * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1784         if (NULL == new_ptr) {
1785             res = VK_ERROR_OUT_OF_HOST_MEMORY;
1786             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_scanned_icd_add: Realloc failed on icd library list for ICD %s",
1787                        filename);
1788             goto out;
1789         }
1790         icd_tramp_list->scanned_list = new_ptr;
1791 
1792         // double capacity
1793         icd_tramp_list->capacity *= 2;
1794     }
1795 
1796     loader_api_version api_version_struct = loader_make_version(api_version);
1797     if (interface_vers <= 4 && loader_check_version_meets_required(LOADER_VERSION_1_1_0, api_version_struct)) {
1798         loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
1799                    "loader_scanned_icd_add: Driver %s supports Vulkan %u.%u, but only supports loader interface version %u."
1800                    " Interface version 5 or newer required to support this version of Vulkan (Policy #LDP_DRIVER_7)",
1801                    filename, api_version_struct.major, api_version_struct.minor, interface_vers);
1802     }
1803 
1804     new_scanned_icd = &(icd_tramp_list->scanned_list[icd_tramp_list->count]);
1805     new_scanned_icd->handle = handle;
1806     new_scanned_icd->api_version = api_version;
1807     new_scanned_icd->GetInstanceProcAddr = fp_get_proc_addr;
1808     new_scanned_icd->GetPhysicalDeviceProcAddr = fp_get_phys_dev_proc_addr;
1809     new_scanned_icd->EnumerateInstanceExtensionProperties = fp_get_inst_ext_props;
1810     new_scanned_icd->CreateInstance = fp_create_inst;
1811 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1812     new_scanned_icd->EnumerateAdapterPhysicalDevices = fp_enum_dxgi_adapter_phys_devs;
1813 #endif
1814     new_scanned_icd->interface_version = interface_vers;
1815 
1816     res = loader_copy_to_new_str(inst, filename, &new_scanned_icd->lib_name);
1817     if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
1818         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_scanned_icd_add: Out of memory can't add ICD %s", filename);
1819         goto out;
1820     }
1821     icd_tramp_list->count++;
1822 
1823 out:
1824 
1825     return res;
1826 }
1827 
loader_initialize(void)1828 void loader_initialize(void) {
1829     // initialize mutexes
1830     loader_platform_thread_create_mutex(&loader_lock);
1831     loader_platform_thread_create_mutex(&loader_preload_icd_lock);
1832     loader_platform_thread_create_mutex(&loader_global_instance_list_lock);
1833     init_global_loader_settings();
1834 
1835     // initialize logging
1836     loader_init_global_debug_level();
1837 #if defined(_WIN32)
1838     windows_initialization();
1839 #endif
1840 
1841     loader_api_version version = loader_make_full_version(VK_HEADER_VERSION_COMPLETE);
1842     loader_log(NULL, VULKAN_LOADER_INFO_BIT, 0, "Vulkan Loader Version %d.%d.%d", version.major, version.minor, version.patch);
1843 
1844 #if defined(GIT_BRANCH_NAME) && defined(GIT_TAG_INFO)
1845     loader_log(NULL, VULKAN_LOADER_INFO_BIT, 0, "[Vulkan Loader Git - Tag: " GIT_BRANCH_NAME ", Branch/Commit: " GIT_TAG_INFO "]");
1846 #endif
1847 
1848     char *loader_disable_dynamic_library_unloading_env_var = loader_getenv("VK_LOADER_DISABLE_DYNAMIC_LIBRARY_UNLOADING", NULL);
1849     if (loader_disable_dynamic_library_unloading_env_var &&
1850         0 == strncmp(loader_disable_dynamic_library_unloading_env_var, "1", 2)) {
1851         loader_disable_dynamic_library_unloading = true;
1852         loader_log(NULL, VULKAN_LOADER_WARN_BIT, 0, "Vulkan Loader: library unloading is disabled");
1853     } else {
1854         loader_disable_dynamic_library_unloading = false;
1855     }
1856     loader_free_getenv(loader_disable_dynamic_library_unloading_env_var, NULL);
1857 #if defined(LOADER_USE_UNSAFE_FILE_SEARCH)
1858     loader_log(NULL, VULKAN_LOADER_WARN_BIT, 0, "Vulkan Loader: unsafe searching is enabled");
1859 #endif
1860 }
1861 
loader_release()1862 void loader_release() {
1863     // Guarantee release of the preloaded ICD libraries. This may have already been called in vkDestroyInstance.
1864     loader_unload_preloaded_icds();
1865 
1866     // release mutexes
1867     teardown_global_loader_settings();
1868     loader_platform_thread_delete_mutex(&loader_lock);
1869     loader_platform_thread_delete_mutex(&loader_preload_icd_lock);
1870     loader_platform_thread_delete_mutex(&loader_global_instance_list_lock);
1871 }
1872 
1873 // Preload the ICD libraries that are likely to be needed so we don't repeatedly load/unload them later
loader_preload_icds(void)1874 void loader_preload_icds(void) {
1875     loader_platform_thread_lock_mutex(&loader_preload_icd_lock);
1876 
1877     // Already preloaded, skip loading again.
1878     if (scanned_icds.scanned_list != NULL) {
1879         loader_platform_thread_unlock_mutex(&loader_preload_icd_lock);
1880         return;
1881     }
1882 
1883     VkResult result = loader_icd_scan(NULL, &scanned_icds, NULL, NULL);
1884     if (result != VK_SUCCESS) {
1885         loader_scanned_icd_clear(NULL, &scanned_icds);
1886     }
1887     loader_platform_thread_unlock_mutex(&loader_preload_icd_lock);
1888 }
1889 
1890 // Release the ICD libraries that were preloaded
loader_unload_preloaded_icds(void)1891 void loader_unload_preloaded_icds(void) {
1892     loader_platform_thread_lock_mutex(&loader_preload_icd_lock);
1893     loader_scanned_icd_clear(NULL, &scanned_icds);
1894     loader_platform_thread_unlock_mutex(&loader_preload_icd_lock);
1895 }
1896 
1897 #if !defined(_WIN32)
loader_init_library(void)1898 __attribute__((constructor)) void loader_init_library(void) { loader_initialize(); }
1899 
loader_free_library(void)1900 __attribute__((destructor)) void loader_free_library(void) { loader_release(); }
1901 #endif
1902 
1903 // Get next file or dirname given a string list or registry key path
1904 //
1905 // \returns
1906 // A pointer to first char in the next path.
1907 // The next path (or NULL) in the list is returned in next_path.
1908 // Note: input string is modified in some cases. PASS IN A COPY!
loader_get_next_path(char * path)1909 char *loader_get_next_path(char *path) {
1910     uint32_t len;
1911     char *next;
1912 
1913     if (path == NULL) return NULL;
1914     next = strchr(path, PATH_SEPARATOR);
1915     if (next == NULL) {
1916         len = (uint32_t)strlen(path);
1917         next = path + len;
1918     } else {
1919         *next = '\0';
1920         next++;
1921     }
1922 
1923     return next;
1924 }
1925 
1926 /* Processes a json manifest's library_path and the location of the json manifest to create the path of the library
1927  * The output is stored in out_fullpath by allocating a string - so its the caller's responsibility to free it
1928  * The output is the combination of the base path of manifest_file_path concatenated with library path
1929  * If library_path is an absolute path, we do not prepend the base path of manifest_file_path
1930  *
1931  * This function takes ownership of library_path - caller does not need to worry about freeing it.
1932  */
combine_manifest_directory_and_library_path(const struct loader_instance * inst,char * library_path,const char * manifest_file_path,char ** out_fullpath)1933 VkResult combine_manifest_directory_and_library_path(const struct loader_instance *inst, char *library_path,
1934                                                      const char *manifest_file_path, char **out_fullpath) {
1935     assert(library_path && manifest_file_path && out_fullpath);
1936     if (loader_platform_is_path_absolute(library_path)) {
1937         *out_fullpath = library_path;
1938         return VK_SUCCESS;
1939     }
1940     VkResult res = VK_SUCCESS;
1941 
1942     size_t library_path_len = strlen(library_path);
1943     size_t manifest_file_path_str_len = strlen(manifest_file_path);
1944     bool library_path_contains_directory_symbol = false;
1945     for (size_t i = 0; i < library_path_len; i++) {
1946         if (library_path[i] == DIRECTORY_SYMBOL) {
1947             library_path_contains_directory_symbol = true;
1948             break;
1949         }
1950     }
1951     // Means that the library_path is neither absolute nor relative - thus we should not modify it at all
1952     if (!library_path_contains_directory_symbol) {
1953         *out_fullpath = library_path;
1954         return VK_SUCCESS;
1955     }
1956     // must include both a directory symbol and the null terminator
1957     size_t new_str_len = library_path_len + manifest_file_path_str_len + 1 + 1;
1958 
1959     *out_fullpath = loader_instance_heap_calloc(inst, new_str_len, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1960     if (NULL == *out_fullpath) {
1961         res = VK_ERROR_OUT_OF_HOST_MEMORY;
1962         goto out;
1963     }
1964     size_t cur_loc_in_out_fullpath = 0;
1965     // look for the last occurrence of DIRECTORY_SYMBOL in manifest_file_path
1966     size_t last_directory_symbol = 0;
1967     bool found_directory_symbol = false;
1968     for (size_t i = 0; i < manifest_file_path_str_len; i++) {
1969         if (manifest_file_path[i] == DIRECTORY_SYMBOL) {
1970             last_directory_symbol = i + 1;  // we want to include the symbol
1971             found_directory_symbol = true;
1972             // dont break because we want to find the last occurrence
1973         }
1974     }
1975     // Add manifest_file_path up to the last directory symbol
1976     if (found_directory_symbol) {
1977         loader_strncpy(*out_fullpath, new_str_len, manifest_file_path, last_directory_symbol);
1978         cur_loc_in_out_fullpath += last_directory_symbol;
1979     }
1980     loader_strncpy(&(*out_fullpath)[cur_loc_in_out_fullpath], new_str_len - cur_loc_in_out_fullpath, library_path,
1981                    library_path_len);
1982     cur_loc_in_out_fullpath += library_path_len + 1;
1983     (*out_fullpath)[cur_loc_in_out_fullpath] = '\0';
1984 
1985 out:
1986     loader_instance_heap_free(inst, library_path);
1987 
1988     return res;
1989 }
1990 
1991 // Given a filename (file)  and a list of paths (in_dirs), try to find an existing
1992 // file in the paths.  If filename already is a path then no searching in the given paths.
1993 //
1994 // @return - A string in out_fullpath of either the full path or file.
loader_get_fullpath(const char * file,const char * in_dirs,size_t out_size,char * out_fullpath)1995 void loader_get_fullpath(const char *file, const char *in_dirs, size_t out_size, char *out_fullpath) {
1996     if (!loader_platform_is_path(file) && *in_dirs) {
1997         size_t dirs_copy_len = strlen(in_dirs) + 1;
1998         char *dirs_copy = loader_stack_alloc(dirs_copy_len);
1999         loader_strncpy(dirs_copy, dirs_copy_len, in_dirs, dirs_copy_len);
2000 
2001         // find if file exists after prepending paths in given list
2002         // for (dir = dirs_copy; *dir && (next_dir = loader_get_next_path(dir)); dir = next_dir) {
2003         char *dir = dirs_copy;
2004         char *next_dir = loader_get_next_path(dir);
2005         while (*dir && next_dir) {
2006             int path_concat_ret = snprintf(out_fullpath, out_size, "%s%c%s", dir, DIRECTORY_SYMBOL, file);
2007             if (path_concat_ret < 0) {
2008                 continue;
2009             }
2010             if (loader_platform_file_exists(out_fullpath)) {
2011                 return;
2012             }
2013             dir = next_dir;
2014             next_dir = loader_get_next_path(dir);
2015         }
2016     }
2017 
2018     (void)snprintf(out_fullpath, out_size, "%s", file);
2019 }
2020 
2021 // Verify that all component layers in a meta-layer are valid.
verify_meta_layer_component_layers(const struct loader_instance * inst,struct loader_layer_properties * prop,struct loader_layer_list * instance_layers)2022 bool verify_meta_layer_component_layers(const struct loader_instance *inst, struct loader_layer_properties *prop,
2023                                         struct loader_layer_list *instance_layers) {
2024     loader_api_version meta_layer_version = loader_make_version(prop->info.specVersion);
2025 
2026     for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) {
2027         struct loader_layer_properties *comp_prop =
2028             loader_find_layer_property(prop->component_layer_names.list[comp_layer], instance_layers);
2029         if (comp_prop == NULL) {
2030             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2031                        "verify_meta_layer_component_layers: Meta-layer %s can't find component layer %s at index %d."
2032                        "  Skipping this layer.",
2033                        prop->info.layerName, prop->component_layer_names.list[comp_layer], comp_layer);
2034 
2035             return false;
2036         }
2037 
2038         // Check the version of each layer, they need to be at least MAJOR and MINOR
2039         loader_api_version comp_prop_version = loader_make_version(comp_prop->info.specVersion);
2040         if (!loader_check_version_meets_required(meta_layer_version, comp_prop_version)) {
2041             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2042                        "verify_meta_layer_component_layers: Meta-layer uses API version %d.%d, but component "
2043                        "layer %d has API version %d.%d that is lower.  Skipping this layer.",
2044                        meta_layer_version.major, meta_layer_version.minor, comp_layer, comp_prop_version.major,
2045                        comp_prop_version.minor);
2046 
2047             return false;
2048         }
2049 
2050         // Make sure the layer isn't using it's own name
2051         if (!strcmp(prop->info.layerName, prop->component_layer_names.list[comp_layer])) {
2052             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2053                        "verify_meta_layer_component_layers: Meta-layer %s lists itself in its component layer "
2054                        "list at index %d.  Skipping this layer.",
2055                        prop->info.layerName, comp_layer);
2056 
2057             return false;
2058         }
2059         if (comp_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
2060             loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
2061                        "verify_meta_layer_component_layers: Adding meta-layer %s which also contains meta-layer %s",
2062                        prop->info.layerName, comp_prop->info.layerName);
2063 
2064             // Make sure if the layer is using a meta-layer in its component list that we also verify that.
2065             if (!verify_meta_layer_component_layers(inst, comp_prop, instance_layers)) {
2066                 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2067                            "Meta-layer %s component layer %s can not find all component layers."
2068                            "  Skipping this layer.",
2069                            prop->info.layerName, prop->component_layer_names.list[comp_layer]);
2070                 return false;
2071             }
2072         }
2073     }
2074     // Didn't exit early so that means it passed all checks
2075     loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2076                "Meta-layer \"%s\" all %d component layers appear to be valid.", prop->info.layerName,
2077                prop->component_layer_names.count);
2078 
2079     // If layer logging is on, list the internals included in the meta-layer
2080     for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) {
2081         loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "  [%d] %s", comp_layer, prop->component_layer_names.list[comp_layer]);
2082     }
2083     return true;
2084 }
2085 
2086 // Add any instance and device extensions from component layers to this layer
2087 // list, so that anyone querying extensions will only need to look at the meta-layer
update_meta_layer_extensions_from_component_layers(const struct loader_instance * inst,struct loader_layer_properties * prop,struct loader_layer_list * instance_layers)2088 bool update_meta_layer_extensions_from_component_layers(const struct loader_instance *inst, struct loader_layer_properties *prop,
2089                                                         struct loader_layer_list *instance_layers) {
2090     VkResult res = VK_SUCCESS;
2091     for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) {
2092         struct loader_layer_properties *comp_prop =
2093             loader_find_layer_property(prop->component_layer_names.list[comp_layer], instance_layers);
2094 
2095         if (NULL != comp_prop->instance_extension_list.list) {
2096             for (uint32_t ext = 0; ext < comp_prop->instance_extension_list.count; ext++) {
2097                 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "Meta-layer %s component layer %s adding instance extension %s",
2098                            prop->info.layerName, prop->component_layer_names.list[comp_layer],
2099                            comp_prop->instance_extension_list.list[ext].extensionName);
2100 
2101                 if (!has_vk_extension_property(&comp_prop->instance_extension_list.list[ext], &prop->instance_extension_list)) {
2102                     res = loader_add_to_ext_list(inst, &prop->instance_extension_list, 1,
2103                                                  &comp_prop->instance_extension_list.list[ext]);
2104                     if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
2105                         return res;
2106                     }
2107                 }
2108             }
2109         }
2110         if (NULL != comp_prop->device_extension_list.list) {
2111             for (uint32_t ext = 0; ext < comp_prop->device_extension_list.count; ext++) {
2112                 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "Meta-layer %s component layer %s adding device extension %s",
2113                            prop->info.layerName, prop->component_layer_names.list[comp_layer],
2114                            comp_prop->device_extension_list.list[ext].props.extensionName);
2115 
2116                 if (!has_vk_dev_ext_property(&comp_prop->device_extension_list.list[ext].props, &prop->device_extension_list)) {
2117                     loader_add_to_dev_ext_list(inst, &prop->device_extension_list,
2118                                                &comp_prop->device_extension_list.list[ext].props, NULL);
2119                     if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
2120                         return res;
2121                     }
2122                 }
2123             }
2124         }
2125     }
2126     return res;
2127 }
2128 
2129 // Verify that all meta-layers in a layer list are valid.
verify_all_meta_layers(struct loader_instance * inst,const struct loader_envvar_all_filters * filters,struct loader_layer_list * instance_layers,bool * override_layer_present)2130 VkResult verify_all_meta_layers(struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
2131                                 struct loader_layer_list *instance_layers, bool *override_layer_present) {
2132     VkResult res = VK_SUCCESS;
2133     *override_layer_present = false;
2134     for (int32_t i = 0; i < (int32_t)instance_layers->count; i++) {
2135         struct loader_layer_properties *prop = &instance_layers->list[i];
2136 
2137         // If this is a meta-layer, make sure it is valid
2138         if (prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
2139             if (verify_meta_layer_component_layers(inst, prop, instance_layers)) {
2140                 // If any meta layer is valid, update its extension list to include the extensions from its component layers.
2141                 res = update_meta_layer_extensions_from_component_layers(inst, prop, instance_layers);
2142                 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
2143                     return res;
2144                 }
2145                 if (prop->is_override && loader_implicit_layer_is_enabled(inst, filters, prop)) {
2146                     *override_layer_present = true;
2147                 }
2148             } else {
2149                 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0,
2150                            "Removing meta-layer %s from instance layer list since it appears invalid.", prop->info.layerName);
2151 
2152                 loader_remove_layer_in_list(inst, instance_layers, i);
2153                 i--;
2154             }
2155         }
2156     }
2157     return res;
2158 }
2159 
2160 // If the current working directory matches any app_key_path of the layers, remove all other override layers.
2161 // Otherwise if no matching app_key was found, remove all but the global override layer, which has no app_key_path.
remove_all_non_valid_override_layers(struct loader_instance * inst,struct loader_layer_list * instance_layers)2162 void remove_all_non_valid_override_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers) {
2163     if (instance_layers == NULL) {
2164         return;
2165     }
2166 
2167     char cur_path[1024];
2168     char *ret = loader_platform_executable_path(cur_path, 1024);
2169     if (NULL == ret) {
2170         return;
2171     }
2172     // Find out if there is an override layer with same the app_key_path as the path to the current executable.
2173     // If more than one is found, remove it and use the first layer
2174     // Remove any layers which aren't global and do not have the same app_key_path as the path to the current executable.
2175     bool found_active_override_layer = false;
2176     int global_layer_index = -1;
2177     for (uint32_t i = 0; i < instance_layers->count; i++) {
2178         struct loader_layer_properties *props = &instance_layers->list[i];
2179         if (strcmp(props->info.layerName, VK_OVERRIDE_LAYER_NAME) == 0) {
2180             if (props->app_key_paths.count > 0) {  // not the global layer
2181                 for (uint32_t j = 0; j < props->app_key_paths.count; j++) {
2182                     if (strcmp(props->app_key_paths.list[j], cur_path) == 0) {
2183                         if (!found_active_override_layer) {
2184                             found_active_override_layer = true;
2185                         } else {
2186                             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2187                                        "remove_all_non_valid_override_layers: Multiple override layers where the same path in "
2188                                        "app_keys "
2189                                        "was found. Using the first layer found");
2190 
2191                             // Remove duplicate active override layers that have the same app_key_path
2192                             loader_remove_layer_in_list(inst, instance_layers, i);
2193                             i--;
2194                         }
2195                     }
2196                 }
2197                 if (!found_active_override_layer) {
2198                     loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2199                                "--Override layer found but not used because app \'%s\' is not in \'app_keys\' list!", cur_path);
2200 
2201                     // Remove non-global override layers that don't have an app_key that matches cur_path
2202                     loader_remove_layer_in_list(inst, instance_layers, i);
2203                     i--;
2204                 }
2205             } else {
2206                 if (global_layer_index == -1) {
2207                     global_layer_index = i;
2208                 } else {
2209                     loader_log(
2210                         inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2211                         "remove_all_non_valid_override_layers: Multiple global override layers found. Using the first global "
2212                         "layer found");
2213                     loader_remove_layer_in_list(inst, instance_layers, i);
2214                     i--;
2215                 }
2216             }
2217         }
2218     }
2219     // Remove global layer if layer with same the app_key_path as the path to the current executable is found
2220     if (found_active_override_layer && global_layer_index >= 0) {
2221         loader_remove_layer_in_list(inst, instance_layers, global_layer_index);
2222     }
2223     // Should be at most 1 override layer in the list now.
2224     if (found_active_override_layer) {
2225         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Using the override layer for app key %s", cur_path);
2226     } else if (global_layer_index >= 0) {
2227         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Using the global override layer");
2228     }
2229 }
2230 
2231 /* The following are required in the "layer" object:
2232  * "name"
2233  * "type"
2234  * (for non-meta layers) "library_path"
2235  * (for meta layers) "component_layers"
2236  * "api_version"
2237  * "implementation_version"
2238  * "description"
2239  * (for implicit layers) "disable_environment"
2240  */
2241 
loader_read_layer_json(const struct loader_instance * inst,struct loader_layer_list * layer_instance_list,cJSON * layer_node,loader_api_version version,bool is_implicit,char * filename)2242 VkResult loader_read_layer_json(const struct loader_instance *inst, struct loader_layer_list *layer_instance_list,
2243                                 cJSON *layer_node, loader_api_version version, bool is_implicit, char *filename) {
2244     assert(layer_instance_list);
2245     char *type = NULL;
2246     char *api_version = NULL;
2247     char *implementation_version = NULL;
2248     VkResult result = VK_SUCCESS;
2249     struct loader_layer_properties props = {0};
2250 
2251     // Parse name
2252 
2253     result = loader_parse_json_string_to_existing_str(inst, layer_node, "name", VK_MAX_EXTENSION_NAME_SIZE, props.info.layerName);
2254     if (VK_ERROR_OUT_OF_HOST_MEMORY == result) goto out;
2255     if (VK_ERROR_INITIALIZATION_FAILED == result) {
2256         loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2257                    "Layer located at %s didn't find required layer value \"name\" in manifest JSON file, skipping this layer",
2258                    filename);
2259         goto out;
2260     }
2261 
2262     // Check if this layer's name matches the override layer name, set is_override to true if so.
2263     if (!strcmp(props.info.layerName, VK_OVERRIDE_LAYER_NAME)) {
2264         props.is_override = true;
2265     }
2266 
2267     if (0 != strncmp(props.info.layerName, "VK_LAYER_", 9)) {
2268         loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, "Layer name %s does not conform to naming standard (Policy #LLP_LAYER_3)",
2269                    props.info.layerName);
2270     }
2271 
2272     // Parse type
2273 
2274     result = loader_parse_json_string(layer_node, "type", &type);
2275     if (VK_ERROR_OUT_OF_HOST_MEMORY == result) goto out;
2276     if (VK_ERROR_INITIALIZATION_FAILED == result) {
2277         loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2278                    "Layer located at %s didn't find required layer value \"type\" in manifest JSON file, skipping this layer",
2279                    filename);
2280         goto out;
2281     }
2282 
2283     // Add list entry
2284     if (!strcmp(type, "DEVICE")) {
2285         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Device layers are deprecated. Skipping this layer");
2286         result = VK_ERROR_INITIALIZATION_FAILED;
2287         goto out;
2288     }
2289 
2290     // Allow either GLOBAL or INSTANCE type interchangeably to handle layers that must work with older loaders
2291     if (!strcmp(type, "INSTANCE") || !strcmp(type, "GLOBAL")) {
2292         props.type_flags = VK_LAYER_TYPE_FLAG_INSTANCE_LAYER;
2293         if (!is_implicit) {
2294             props.type_flags |= VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER;
2295         }
2296     } else {
2297         result = VK_ERROR_INITIALIZATION_FAILED;
2298         goto out;
2299     }
2300 
2301     // Parse api_version
2302 
2303     result = loader_parse_json_string(layer_node, "api_version", &api_version);
2304     if (VK_ERROR_OUT_OF_HOST_MEMORY == result) goto out;
2305     if (VK_ERROR_INITIALIZATION_FAILED == result) {
2306         loader_log(
2307             inst, VULKAN_LOADER_WARN_BIT, 0,
2308             "Layer located at %s didn't find required layer value \"api_version\" in manifest JSON file, skipping this layer",
2309             filename);
2310         goto out;
2311     }
2312 
2313     props.info.specVersion = loader_parse_version_string(api_version);
2314 
2315     // Make sure the layer's manifest doesn't contain a non zero variant value
2316     if (VK_API_VERSION_VARIANT(props.info.specVersion) != 0) {
2317         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2318                    "Layer \"%s\" has an \'api_version\' field which contains a non-zero variant value of %d. "
2319                    " Skipping Layer.",
2320                    props.info.layerName, VK_API_VERSION_VARIANT(props.info.specVersion));
2321         result = VK_ERROR_INITIALIZATION_FAILED;
2322         goto out;
2323     }
2324 
2325     // Parse implementation_version
2326 
2327     result = loader_parse_json_string(layer_node, "implementation_version", &implementation_version);
2328     if (VK_ERROR_OUT_OF_HOST_MEMORY == result) goto out;
2329     if (VK_ERROR_INITIALIZATION_FAILED == result) {
2330         loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2331                    "Layer located at %s didn't find required layer value \"implementation_version\" in manifest JSON file, "
2332                    "skipping this layer",
2333                    filename);
2334         goto out;
2335     }
2336     props.info.implementationVersion = atoi(implementation_version);
2337 
2338     // Parse description
2339 
2340     result = loader_parse_json_string_to_existing_str(inst, layer_node, "description", VK_MAX_EXTENSION_NAME_SIZE,
2341                                                       props.info.description);
2342     if (VK_ERROR_OUT_OF_HOST_MEMORY == result) goto out;
2343     if (VK_ERROR_INITIALIZATION_FAILED == result) {
2344         loader_log(
2345             inst, VULKAN_LOADER_WARN_BIT, 0,
2346             "Layer located at %s didn't find required layer value \"description\" in manifest JSON file, skipping this layer",
2347             filename);
2348         goto out;
2349     }
2350 
2351     // Parse library_path
2352 
2353     // Library path no longer required unless component_layers is also not defined
2354     cJSON *library_path = loader_cJSON_GetObjectItem(layer_node, "library_path");
2355 
2356     if (NULL != library_path) {
2357         if (NULL != loader_cJSON_GetObjectItem(layer_node, "component_layers")) {
2358             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2359                        "Indicating meta-layer-specific component_layers, but also defining layer library path.  Both are not "
2360                        "compatible, so skipping this layer");
2361             result = VK_ERROR_INITIALIZATION_FAILED;
2362             goto out;
2363         }
2364 
2365         result = loader_copy_to_new_str(inst, filename, &props.manifest_file_name);
2366         if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2367 
2368         char *library_path_str = loader_cJSON_Print(library_path);
2369         if (NULL == library_path_str) {
2370             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2371                        "Skipping layer due to problem accessing the library_path value in manifest JSON file %s", filename);
2372             result = VK_ERROR_OUT_OF_HOST_MEMORY;
2373             goto out;
2374         }
2375 
2376         // This function takes ownership of library_path_str - so we don't need to clean it up
2377         result = combine_manifest_directory_and_library_path(inst, library_path_str, filename, &props.lib_name);
2378         if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2379     }
2380 
2381     // Parse component_layers
2382 
2383     if (NULL == library_path) {
2384         if (!loader_check_version_meets_required(LOADER_VERSION_1_1_0, version)) {
2385             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2386                        "Indicating meta-layer-specific component_layers, but using older JSON file version.");
2387         }
2388 
2389         result = loader_parse_json_array_of_strings(inst, layer_node, "component_layers", &(props.component_layer_names));
2390         if (VK_ERROR_OUT_OF_HOST_MEMORY == result) {
2391             goto out;
2392         }
2393         if (VK_ERROR_INITIALIZATION_FAILED == result) {
2394             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2395                        "Layer missing both library_path and component_layers fields.  One or the other MUST be defined.  Skipping "
2396                        "this layer");
2397             goto out;
2398         }
2399         // This is now, officially, a meta-layer
2400         props.type_flags |= VK_LAYER_TYPE_FLAG_META_LAYER;
2401         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Encountered meta-layer \"%s\"",
2402                    props.info.layerName);
2403     }
2404 
2405     // Parse blacklisted_layers
2406 
2407     if (props.is_override) {
2408         result = loader_parse_json_array_of_strings(inst, layer_node, "blacklisted_layers", &(props.blacklist_layer_names));
2409         if (VK_ERROR_OUT_OF_HOST_MEMORY == result) {
2410             goto out;
2411         }
2412     }
2413 
2414     // Parse override_paths
2415 
2416     result = loader_parse_json_array_of_strings(inst, layer_node, "override_paths", &(props.override_paths));
2417     if (VK_ERROR_OUT_OF_HOST_MEMORY == result) {
2418         goto out;
2419     }
2420     if (NULL != props.override_paths.list && !loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) {
2421         loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2422                    "Indicating meta-layer-specific override paths, but using older JSON file version.");
2423     }
2424 
2425     // Parse disable_environment
2426 
2427     if (is_implicit) {
2428         cJSON *disable_environment = loader_cJSON_GetObjectItem(layer_node, "disable_environment");
2429         if (disable_environment == NULL) {
2430             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2431                        "Didn't find required layer object disable_environment in manifest JSON file, skipping this layer");
2432             result = VK_ERROR_INITIALIZATION_FAILED;
2433             goto out;
2434         }
2435 
2436         if (!disable_environment->child || disable_environment->child->type != cJSON_String) {
2437             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2438                        "Didn't find required layer child value disable_environment in manifest JSON file, skipping this layer "
2439                        "(Policy #LLP_LAYER_9)");
2440             result = VK_ERROR_INITIALIZATION_FAILED;
2441             goto out;
2442         }
2443         result = loader_copy_to_new_str(inst, disable_environment->child->string, &(props.disable_env_var.name));
2444         if (VK_SUCCESS != result) goto out;
2445         result = loader_copy_to_new_str(inst, disable_environment->child->valuestring, &(props.disable_env_var.value));
2446         if (VK_SUCCESS != result) goto out;
2447     }
2448 
2449     // Now get all optional items and objects and put in list:
2450     // functions
2451     // instance_extensions
2452     // device_extensions
2453     // enable_environment (implicit layers only)
2454     // library_arch
2455 
2456     // Layer interface functions
2457     //    vkGetInstanceProcAddr
2458     //    vkGetDeviceProcAddr
2459     //    vkNegotiateLoaderLayerInterfaceVersion (starting with JSON file 1.1.0)
2460     cJSON *functions = loader_cJSON_GetObjectItem(layer_node, "functions");
2461     if (functions != NULL) {
2462         if (loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) {
2463             result = loader_parse_json_string(functions, "vkNegotiateLoaderLayerInterfaceVersion",
2464                                               &props.functions.str_negotiate_interface);
2465             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2466         }
2467         result = loader_parse_json_string(functions, "vkGetInstanceProcAddr", &props.functions.str_gipa);
2468         if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2469 
2470         if (props.functions.str_gipa && loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) {
2471             loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
2472                        "Layer \"%s\" using deprecated \'vkGetInstanceProcAddr\' tag which was deprecated starting with JSON "
2473                        "file version 1.1.0. The new vkNegotiateLoaderLayerInterfaceVersion function is preferred, though for "
2474                        "compatibility reasons it may be desirable to continue using the deprecated tag.",
2475                        props.info.layerName);
2476         }
2477 
2478         result = loader_parse_json_string(functions, "vkGetDeviceProcAddr", &props.functions.str_gdpa);
2479         if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2480 
2481         if (props.functions.str_gdpa && loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) {
2482             loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
2483                        "Layer \"%s\" using deprecated \'vkGetDeviceProcAddr\' tag which was deprecated starting with JSON "
2484                        "file version 1.1.0. The new vkNegotiateLoaderLayerInterfaceVersion function is preferred, though for "
2485                        "compatibility reasons it may be desirable to continue using the deprecated tag.",
2486                        props.info.layerName);
2487         }
2488     }
2489 
2490     // instance_extensions
2491     //   array of {
2492     //     name
2493     //     spec_version
2494     //   }
2495 
2496     cJSON *instance_extensions = loader_cJSON_GetObjectItem(layer_node, "instance_extensions");
2497     if (instance_extensions != NULL) {
2498         int count = loader_cJSON_GetArraySize(instance_extensions);
2499         for (int i = 0; i < count; i++) {
2500             VkExtensionProperties ext_prop = {0};
2501             cJSON *ext_item = loader_cJSON_GetArrayItem(instance_extensions, i);
2502             result = loader_parse_json_string_to_existing_str(inst, ext_item, "name", VK_MAX_EXTENSION_NAME_SIZE,
2503                                                               ext_prop.extensionName);
2504             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2505             if (result == VK_ERROR_INITIALIZATION_FAILED) continue;
2506             char *spec_version = NULL;
2507             result = loader_parse_json_string(ext_item, "spec_version", &spec_version);
2508             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2509             if (NULL != spec_version) {
2510                 ext_prop.specVersion = atoi(spec_version);
2511             }
2512             loader_instance_heap_free(inst, spec_version);
2513             bool ext_unsupported = wsi_unsupported_instance_extension(&ext_prop);
2514             if (!ext_unsupported) {
2515                 loader_add_to_ext_list(inst, &props.instance_extension_list, 1, &ext_prop);
2516             }
2517         }
2518     }
2519 
2520     // device_extensions
2521     //   array of {
2522     //     name
2523     //     spec_version
2524     //     entrypoints
2525     //   }
2526     cJSON *device_extensions = loader_cJSON_GetObjectItem(layer_node, "device_extensions");
2527     if (device_extensions != NULL) {
2528         int count = loader_cJSON_GetArraySize(device_extensions);
2529         for (int i = 0; i < count; i++) {
2530             VkExtensionProperties ext_prop = {0};
2531 
2532             cJSON *ext_item = loader_cJSON_GetArrayItem(device_extensions, i);
2533 
2534             result = loader_parse_json_string_to_existing_str(inst, ext_item, "name", VK_MAX_EXTENSION_NAME_SIZE,
2535                                                               ext_prop.extensionName);
2536             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2537 
2538             char *spec_version = NULL;
2539             result = loader_parse_json_string(ext_item, "spec_version", &spec_version);
2540             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2541             if (NULL != spec_version) {
2542                 ext_prop.specVersion = atoi(spec_version);
2543             }
2544             loader_instance_heap_free(inst, spec_version);
2545 
2546             cJSON *entrypoints = loader_cJSON_GetObjectItem(ext_item, "entrypoints");
2547             if (entrypoints == NULL) {
2548                 result = loader_add_to_dev_ext_list(inst, &props.device_extension_list, &ext_prop, NULL);
2549                 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2550                 continue;
2551             }
2552 
2553             struct loader_string_list entrys = {0};
2554             result = loader_parse_json_array_of_strings(inst, ext_item, "entrypoints", &entrys);
2555             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2556             result = loader_add_to_dev_ext_list(inst, &props.device_extension_list, &ext_prop, &entrys);
2557             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2558         }
2559     }
2560     if (is_implicit) {
2561         cJSON *enable_environment = loader_cJSON_GetObjectItem(layer_node, "enable_environment");
2562 
2563         // enable_environment is optional
2564         if (enable_environment && enable_environment->child && enable_environment->child->type == cJSON_String) {
2565             result = loader_copy_to_new_str(inst, enable_environment->child->string, &(props.enable_env_var.name));
2566             if (VK_SUCCESS != result) goto out;
2567             result = loader_copy_to_new_str(inst, enable_environment->child->valuestring, &(props.enable_env_var.value));
2568             if (VK_SUCCESS != result) goto out;
2569         }
2570     }
2571 
2572     // Read in the pre-instance stuff
2573     cJSON *pre_instance = loader_cJSON_GetObjectItem(layer_node, "pre_instance_functions");
2574     if (NULL != pre_instance) {
2575         // Supported versions started in 1.1.2, so anything newer
2576         if (!loader_check_version_meets_required(loader_combine_version(1, 1, 2), version)) {
2577             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
2578                        "Found pre_instance_functions section in layer from \"%s\". This section is only valid in manifest version "
2579                        "1.1.2 or later. The section will be ignored",
2580                        filename);
2581         } else if (!is_implicit) {
2582             loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2583                        "Found pre_instance_functions section in explicit layer from \"%s\". This section is only valid in implicit "
2584                        "layers. The section will be ignored",
2585                        filename);
2586         } else {
2587             result = loader_parse_json_string(pre_instance, "vkEnumerateInstanceExtensionProperties",
2588                                               &props.pre_instance_functions.enumerate_instance_extension_properties);
2589             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2590 
2591             result = loader_parse_json_string(pre_instance, "vkEnumerateInstanceLayerProperties",
2592                                               &props.pre_instance_functions.enumerate_instance_layer_properties);
2593             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2594 
2595             result = loader_parse_json_string(pre_instance, "vkEnumerateInstanceVersion",
2596                                               &props.pre_instance_functions.enumerate_instance_version);
2597             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2598         }
2599     }
2600 
2601     if (loader_cJSON_GetObjectItem(layer_node, "app_keys")) {
2602         if (!props.is_override) {
2603             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2604                        "Layer %s contains app_keys, but any app_keys can only be provided by the override meta layer. "
2605                        "These will be ignored.",
2606                        props.info.layerName);
2607         }
2608 
2609         result = loader_parse_json_array_of_strings(inst, layer_node, "app_keys", &props.app_key_paths);
2610         if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2611     }
2612 
2613     char *library_arch = NULL;
2614     result = loader_parse_json_string(layer_node, "library_arch", &library_arch);
2615     if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2616     if (library_arch != NULL) {
2617         if ((strncmp(library_arch, "32", 2) == 0 && sizeof(void *) != 4) ||
2618             (strncmp(library_arch, "64", 2) == 0 && sizeof(void *) != 8)) {
2619             loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
2620                        "Layer library architecture doesn't match the current running architecture, skipping this layer");
2621             loader_instance_heap_free(inst, library_arch);
2622             result = VK_ERROR_INITIALIZATION_FAILED;
2623             goto out;
2624         }
2625         loader_instance_heap_free(inst, library_arch);
2626     }
2627 
2628     result = VK_SUCCESS;
2629 
2630 out:
2631     // Try to append the layer property
2632     if (VK_SUCCESS == result) {
2633         result = loader_append_layer_property(inst, layer_instance_list, &props);
2634     }
2635     // If appending fails - free all the memory allocated in it
2636     if (VK_SUCCESS != result) {
2637         loader_free_layer_properties(inst, &props);
2638     }
2639     loader_instance_heap_free(inst, type);
2640     loader_instance_heap_free(inst, api_version);
2641     loader_instance_heap_free(inst, implementation_version);
2642     return result;
2643 }
2644 
is_valid_layer_json_version(const loader_api_version * layer_json)2645 bool is_valid_layer_json_version(const loader_api_version *layer_json) {
2646     // Supported versions are: 1.0.0, 1.0.1, 1.1.0 - 1.1.2, and 1.2.0 - 1.2.1.
2647     if ((layer_json->major == 1 && layer_json->minor == 2 && layer_json->patch < 2) ||
2648         (layer_json->major == 1 && layer_json->minor == 1 && layer_json->patch < 3) ||
2649         (layer_json->major == 1 && layer_json->minor == 0 && layer_json->patch < 2)) {
2650         return true;
2651     }
2652     return false;
2653 }
2654 
2655 // Given a cJSON struct (json) of the top level JSON object from layer manifest
2656 // file, add entry to the layer_list. Fill out the layer_properties in this list
2657 // entry from the input cJSON object.
2658 //
2659 // \returns
2660 // void
2661 // layer_list has a new entry and initialized accordingly.
2662 // If the json input object does not have all the required fields no entry
2663 // is added to the list.
loader_add_layer_properties(const struct loader_instance * inst,struct loader_layer_list * layer_instance_list,cJSON * json,bool is_implicit,char * filename)2664 VkResult loader_add_layer_properties(const struct loader_instance *inst, struct loader_layer_list *layer_instance_list, cJSON *json,
2665                                      bool is_implicit, char *filename) {
2666     // The following Fields in layer manifest file that are required:
2667     //   - "file_format_version"
2668     //   - If more than one "layer" object are used, then the "layers" array is
2669     //     required
2670     VkResult result = VK_ERROR_INITIALIZATION_FAILED;
2671     cJSON *item, *layers_node, *layer_node;
2672     loader_api_version json_version = {0, 0, 0};
2673     char *file_vers = NULL;
2674     // Make sure sure the top level json value is an object
2675     if (!json || json->type != 6) {
2676         goto out;
2677     }
2678     item = loader_cJSON_GetObjectItem(json, "file_format_version");
2679     if (item == NULL) {
2680         goto out;
2681     }
2682     file_vers = loader_cJSON_PrintUnformatted(item);
2683     if (NULL == file_vers) {
2684         result = VK_ERROR_OUT_OF_HOST_MEMORY;
2685         goto out;
2686     }
2687     loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, "Found manifest file %s (file version %s)", filename, file_vers);
2688     // Get the major/minor/and patch as integers for easier comparison
2689     json_version = loader_make_full_version(loader_parse_version_string(file_vers));
2690 
2691     if (!is_valid_layer_json_version(&json_version)) {
2692         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2693                    "loader_add_layer_properties: %s has unknown layer manifest file version %d.%d.%d.  May cause errors.", filename,
2694                    json_version.major, json_version.minor, json_version.patch);
2695     }
2696 
2697     // If "layers" is present, read in the array of layer objects
2698     layers_node = loader_cJSON_GetObjectItem(json, "layers");
2699     if (layers_node != NULL) {
2700         int numItems = loader_cJSON_GetArraySize(layers_node);
2701         // Supported versions started in 1.0.1, so anything newer
2702         if (!loader_check_version_meets_required(loader_combine_version(1, 0, 1), json_version)) {
2703             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2704                        "loader_add_layer_properties: \'layers\' tag not supported until file version 1.0.1, but %s is reporting "
2705                        "version %s",
2706                        filename, file_vers);
2707         }
2708         for (int curLayer = 0; curLayer < numItems; curLayer++) {
2709             layer_node = loader_cJSON_GetArrayItem(layers_node, curLayer);
2710             if (layer_node == NULL) {
2711                 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2712                            "loader_add_layer_properties: Can not find 'layers' array element %d object in manifest JSON file %s.  "
2713                            "Skipping this file",
2714                            curLayer, filename);
2715                 goto out;
2716             }
2717             result = loader_read_layer_json(inst, layer_instance_list, layer_node, json_version, is_implicit, filename);
2718         }
2719     } else {
2720         // Otherwise, try to read in individual layers
2721         layer_node = loader_cJSON_GetObjectItem(json, "layer");
2722         if (layer_node == NULL) {
2723             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2724                        "loader_add_layer_properties: Can not find 'layer' object in manifest JSON file %s.  Skipping this file.",
2725                        filename);
2726             goto out;
2727         }
2728         // Loop through all "layer" objects in the file to get a count of them
2729         // first.
2730         uint16_t layer_count = 0;
2731         cJSON *tempNode = layer_node;
2732         do {
2733             tempNode = tempNode->next;
2734             layer_count++;
2735         } while (tempNode != NULL);
2736 
2737         // Throw a warning if we encounter multiple "layer" objects in file
2738         // versions newer than 1.0.0.  Having multiple objects with the same
2739         // name at the same level is actually a JSON standard violation.
2740         if (layer_count > 1 && loader_check_version_meets_required(loader_combine_version(1, 0, 1), json_version)) {
2741             loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2742                        "loader_add_layer_properties: Multiple 'layer' nodes are deprecated starting in file version \"1.0.1\".  "
2743                        "Please use 'layers' : [] array instead in %s.",
2744                        filename);
2745         } else {
2746             do {
2747                 result = loader_read_layer_json(inst, layer_instance_list, layer_node, json_version, is_implicit, filename);
2748                 layer_node = layer_node->next;
2749             } while (layer_node != NULL);
2750         }
2751     }
2752 
2753 out:
2754     loader_instance_heap_free(inst, file_vers);
2755 
2756     return result;
2757 }
2758 
determine_data_file_path_size(const char * cur_path,size_t relative_path_size)2759 size_t determine_data_file_path_size(const char *cur_path, size_t relative_path_size) {
2760     size_t path_size = 0;
2761 
2762     if (NULL != cur_path) {
2763         // For each folder in cur_path, (detected by finding additional
2764         // path separators in the string) we need to add the relative path on
2765         // the end.  Plus, leave an additional two slots on the end to add an
2766         // additional directory slash and path separator if needed
2767         path_size += strlen(cur_path) + relative_path_size + 2;
2768         for (const char *x = cur_path; *x; ++x) {
2769             if (*x == PATH_SEPARATOR) {
2770                 path_size += relative_path_size + 2;
2771             }
2772         }
2773     }
2774 
2775     return path_size;
2776 }
2777 
copy_data_file_info(const char * cur_path,const char * relative_path,size_t relative_path_size,char ** output_path)2778 void copy_data_file_info(const char *cur_path, const char *relative_path, size_t relative_path_size, char **output_path) {
2779     if (NULL != cur_path) {
2780         uint32_t start = 0;
2781         uint32_t stop = 0;
2782         char *cur_write = *output_path;
2783 
2784         while (cur_path[start] != '\0') {
2785             while (cur_path[start] == PATH_SEPARATOR) {
2786                 start++;
2787             }
2788             stop = start;
2789             while (cur_path[stop] != PATH_SEPARATOR && cur_path[stop] != '\0') {
2790                 stop++;
2791             }
2792             const size_t s = stop - start;
2793             if (s) {
2794                 memcpy(cur_write, &cur_path[start], s);
2795                 cur_write += s;
2796 
2797                 // If this is a specific JSON file, just add it and don't add any
2798                 // relative path or directory symbol to it.
2799                 if (!is_json(cur_write - 5, s)) {
2800                     // Add the relative directory if present.
2801                     if (relative_path_size > 0) {
2802                         // If last symbol written was not a directory symbol, add it.
2803                         if (*(cur_write - 1) != DIRECTORY_SYMBOL) {
2804                             *cur_write++ = DIRECTORY_SYMBOL;
2805                         }
2806                         memcpy(cur_write, relative_path, relative_path_size);
2807                         cur_write += relative_path_size;
2808                     }
2809                 }
2810 
2811                 *cur_write++ = PATH_SEPARATOR;
2812                 start = stop;
2813             }
2814         }
2815         *output_path = cur_write;
2816     }
2817 }
2818 
2819 // If the file found is a manifest file name, add it to the out_files manifest list.
add_if_manifest_file(const struct loader_instance * inst,const char * file_name,struct loader_string_list * out_files)2820 VkResult add_if_manifest_file(const struct loader_instance *inst, const char *file_name, struct loader_string_list *out_files) {
2821     VkResult vk_result = VK_SUCCESS;
2822 
2823     assert(NULL != file_name && "add_if_manifest_file: Received NULL pointer for file_name");
2824     assert(NULL != out_files && "add_if_manifest_file: Received NULL pointer for out_files");
2825 
2826     // Look for files ending with ".json" suffix
2827     size_t name_len = strlen(file_name);
2828     const char *name_suffix = file_name + name_len - 5;
2829     if (!is_json(name_suffix, name_len)) {
2830         // Use incomplete to indicate invalid name, but to keep going.
2831         vk_result = VK_INCOMPLETE;
2832         goto out;
2833     }
2834 
2835     vk_result = copy_str_to_string_list(inst, out_files, file_name, name_len);
2836 
2837 out:
2838 
2839     return vk_result;
2840 }
2841 
2842 // Add any files found in the search_path.  If any path in the search path points to a specific JSON, attempt to
2843 // only open that one JSON.  Otherwise, if the path is a folder, search the folder for JSON files.
add_data_files(const struct loader_instance * inst,char * search_path,struct loader_string_list * out_files,bool use_first_found_manifest)2844 VkResult add_data_files(const struct loader_instance *inst, char *search_path, struct loader_string_list *out_files,
2845                         bool use_first_found_manifest) {
2846     VkResult vk_result = VK_SUCCESS;
2847     DIR *dir_stream = NULL;
2848     struct dirent *dir_entry;
2849     char *cur_file;
2850     char *next_file;
2851     char *name;
2852     char full_path[2048];
2853 #if !defined(_WIN32)
2854     char temp_path[2048];
2855 #endif
2856 
2857     // Now, parse the paths
2858     next_file = search_path;
2859     while (NULL != next_file && *next_file != '\0') {
2860         name = NULL;
2861         cur_file = next_file;
2862         next_file = loader_get_next_path(cur_file);
2863 
2864         // Is this a JSON file, then try to open it.
2865         size_t len = strlen(cur_file);
2866         if (is_json(cur_file + len - 5, len)) {
2867 #if defined(_WIN32)
2868             name = cur_file;
2869 #elif COMMON_UNIX_PLATFORMS
2870             // Only Linux has relative paths, make a copy of location so it isn't modified
2871             size_t str_len;
2872             if (NULL != next_file) {
2873                 str_len = next_file - cur_file + 1;
2874             } else {
2875                 str_len = strlen(cur_file) + 1;
2876             }
2877             if (str_len > sizeof(temp_path)) {
2878                 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "add_data_files: Path to %s too long", cur_file);
2879                 continue;
2880             }
2881             strncpy(temp_path, cur_file, str_len);
2882             name = temp_path;
2883 #else
2884 #warning add_data_files must define relative path copy for this platform
2885 #endif
2886             loader_get_fullpath(cur_file, name, sizeof(full_path), full_path);
2887             name = full_path;
2888 
2889             VkResult local_res;
2890             local_res = add_if_manifest_file(inst, name, out_files);
2891 
2892             // Incomplete means this was not a valid data file.
2893             if (local_res == VK_INCOMPLETE) {
2894                 continue;
2895             } else if (local_res != VK_SUCCESS) {
2896                 vk_result = local_res;
2897                 break;
2898             }
2899         } else {  // Otherwise, treat it as a directory
2900             dir_stream = loader_opendir(inst, cur_file);
2901             if (NULL == dir_stream) {
2902                 continue;
2903             }
2904             while (1) {
2905                 dir_entry = readdir(dir_stream);
2906                 if (NULL == dir_entry) {
2907                     break;
2908                 }
2909 
2910                 name = &(dir_entry->d_name[0]);
2911                 loader_get_fullpath(name, cur_file, sizeof(full_path), full_path);
2912                 name = full_path;
2913 
2914                 VkResult local_res;
2915                 local_res = add_if_manifest_file(inst, name, out_files);
2916 
2917                 // Incomplete means this was not a valid data file.
2918                 if (local_res == VK_INCOMPLETE) {
2919                     continue;
2920                 } else if (local_res != VK_SUCCESS) {
2921                     vk_result = local_res;
2922                     break;
2923                 }
2924             }
2925             loader_closedir(inst, dir_stream);
2926             if (vk_result != VK_SUCCESS) {
2927                 goto out;
2928             }
2929         }
2930         if (use_first_found_manifest && out_files->count > 0) {
2931             break;
2932         }
2933     }
2934 
2935 out:
2936 
2937     return vk_result;
2938 }
2939 
2940 // Look for data files in the provided paths, but first check the environment override to determine if we should use that
2941 // instead.
read_data_files_in_search_paths(const struct loader_instance * inst,enum loader_data_files_type manifest_type,const char * path_override,bool * override_active,struct loader_string_list * out_files)2942 VkResult read_data_files_in_search_paths(const struct loader_instance *inst, enum loader_data_files_type manifest_type,
2943                                          const char *path_override, bool *override_active, struct loader_string_list *out_files) {
2944     VkResult vk_result = VK_SUCCESS;
2945     char *override_env = NULL;
2946     const char *override_path = NULL;
2947     char *additional_env = NULL;
2948     size_t search_path_size = 0;
2949     char *search_path = NULL;
2950     char *cur_path_ptr = NULL;
2951     bool use_first_found_manifest = false;
2952 #if COMMON_UNIX_PLATFORMS
2953     char *relative_location = NULL;  // Only used on unix platforms
2954     size_t rel_size = 0;             // unused in windows, dont declare so no compiler warnings are generated
2955 #endif
2956 
2957 #if defined(_WIN32)
2958     char *package_path = NULL;
2959 #elif COMMON_UNIX_PLATFORMS
2960     // Determine how much space is needed to generate the full search path
2961     // for the current manifest files.
2962     char *xdg_config_home = loader_secure_getenv("XDG_CONFIG_HOME", inst);
2963     char *xdg_config_dirs = loader_secure_getenv("XDG_CONFIG_DIRS", inst);
2964 
2965 #if !defined(__Fuchsia__) && !defined(__QNX__) && !defined(__OHOS__)
2966     if (NULL == xdg_config_dirs || '\0' == xdg_config_dirs[0]) {
2967         xdg_config_dirs = FALLBACK_CONFIG_DIRS;
2968     }
2969 #endif
2970 
2971     char *xdg_data_home = loader_secure_getenv("XDG_DATA_HOME", inst);
2972     char *xdg_data_dirs = loader_secure_getenv("XDG_DATA_DIRS", inst);
2973 
2974 #if !defined(__Fuchsia__) && !defined(__QNX__) && !defined(__OHOS__)
2975     if (NULL == xdg_data_dirs || '\0' == xdg_data_dirs[0]) {
2976         xdg_data_dirs = FALLBACK_DATA_DIRS;
2977     }
2978 #endif
2979 
2980     char *home = NULL;
2981     char *default_data_home = NULL;
2982     char *default_config_home = NULL;
2983     char *home_data_dir = NULL;
2984     char *home_config_dir = NULL;
2985 
2986     // Only use HOME if XDG_DATA_HOME is not present on the system
2987     home = loader_secure_getenv("HOME", inst);
2988     if (home != NULL) {
2989         if (NULL == xdg_config_home || '\0' == xdg_config_home[0]) {
2990             const char config_suffix[] = "/.config";
2991             size_t default_config_home_len = strlen(home) + sizeof(config_suffix) + 1;
2992             default_config_home = loader_instance_heap_calloc(inst, default_config_home_len, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2993             if (default_config_home == NULL) {
2994                 vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
2995                 goto out;
2996             }
2997             strncpy(default_config_home, home, default_config_home_len);
2998             strncat(default_config_home, config_suffix, default_config_home_len);
2999         }
3000         if (NULL == xdg_data_home || '\0' == xdg_data_home[0]) {
3001             const char data_suffix[] = "/.local/share";
3002             size_t default_data_home_len = strlen(home) + sizeof(data_suffix) + 1;
3003             default_data_home = loader_instance_heap_calloc(inst, default_data_home_len, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3004             if (default_data_home == NULL) {
3005                 vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3006                 goto out;
3007             }
3008             strncpy(default_data_home, home, default_data_home_len);
3009             strncat(default_data_home, data_suffix, default_data_home_len);
3010         }
3011     }
3012 
3013     if (NULL != default_config_home) {
3014         home_config_dir = default_config_home;
3015     } else {
3016         home_config_dir = xdg_config_home;
3017     }
3018     if (NULL != default_data_home) {
3019         home_data_dir = default_data_home;
3020     } else {
3021         home_data_dir = xdg_data_home;
3022     }
3023 
3024 #if defined(__OHOS__)
3025     char *debug_layer_name = loader_secure_getenv("debug.graphic.debug_layer", inst); // squid squidsubcapture
3026     char *debug_hap_name = loader_secure_getenv("debug.graphic.debug_hap", inst);
3027     char *debug_layer_json_path = NULL;
3028 
3029     bool currentProcessEnableDebugLayer = false;
3030     if (NULL != debug_layer_name && '\0' != debug_layer_name[0] && InitBundleInfo(debug_hap_name)) {
3031         currentProcessEnableDebugLayer = true;
3032         debug_layer_json_path = loader_secure_getenv("debug.graphic.vklayer_json_path",inst);
3033         if (NULL == debug_layer_json_path || '\0' == debug_layer_json_path[0]){
3034             const char default_json_path[] = "/data/storage/el2/base/haps/entry/files/";
3035             const char json_suffix[] = ".json";
3036             size_t debug_layer_json_path_len = strlen(default_json_path) + sizeof(debug_layer_name) + sizeof(json_suffix) +1;
3037             debug_layer_json_path = loader_instance_heap_calloc(inst,debug_layer_json_path_len,VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3038             if(debug_layer_json_path == NULL){
3039                 vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3040                 goto out;
3041             }
3042             strncpy(debug_layer_json_path,default_json_path,debug_layer_json_path_len);
3043             strncat(debug_layer_json_path,debug_layer_name,debug_layer_json_path_len);
3044             strncat(debug_layer_json_path,json_suffix,debug_layer_json_path_len);
3045         }
3046         loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "OHOS:: debug_layer_json_path: %s", debug_layer_json_path);
3047     }
3048 #endif
3049 
3050 #else
3051 #warning read_data_files_in_search_paths unsupported platform
3052 #endif
3053 
3054     switch (manifest_type) {
3055         case LOADER_DATA_FILE_MANIFEST_DRIVER:
3056             override_env = loader_secure_getenv(VK_DRIVER_FILES_ENV_VAR, inst);
3057             if (NULL == override_env) {
3058                 // Not there, so fall back to the old name
3059                 override_env = loader_secure_getenv(VK_ICD_FILENAMES_ENV_VAR, inst);
3060             }
3061             additional_env = loader_secure_getenv(VK_ADDITIONAL_DRIVER_FILES_ENV_VAR, inst);
3062 #if COMMON_UNIX_PLATFORMS
3063             relative_location = VK_DRIVERS_INFO_RELATIVE_DIR;
3064 #endif
3065 #if defined(_WIN32)
3066             package_path = windows_get_app_package_manifest_path(inst);
3067 #endif
3068             break;
3069         case LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER:
3070 #if COMMON_UNIX_PLATFORMS
3071             relative_location = VK_ILAYERS_INFO_RELATIVE_DIR;
3072 #endif
3073             break;
3074         case LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER:
3075             override_env = loader_secure_getenv(VK_LAYER_PATH_ENV_VAR, inst);
3076             additional_env = loader_secure_getenv(VK_ADDITIONAL_LAYER_PATH_ENV_VAR, inst);
3077 #if COMMON_UNIX_PLATFORMS
3078             relative_location = VK_ELAYERS_INFO_RELATIVE_DIR;
3079 #endif
3080             break;
3081         default:
3082             assert(false && "Shouldn't get here!");
3083             break;
3084     }
3085 
3086     // Log a message when VK_LAYER_PATH is set but the override layer paths take priority
3087     if (manifest_type == LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER && NULL != override_env && NULL != path_override) {
3088         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
3089                    "Ignoring VK_LAYER_PATH. The Override layer is active and has override paths set, which takes priority. "
3090                    "VK_LAYER_PATH is set to %s",
3091                    override_env);
3092     }
3093 
3094     if (path_override != NULL) {
3095         override_path = path_override;
3096     } else if (override_env != NULL) {
3097         override_path = override_env;
3098     }
3099 
3100     // Add two by default for NULL terminator and one path separator on end (just in case)
3101     search_path_size = 2;
3102 
3103     // If there's an override, use that (and the local folder if required) and nothing else
3104     if (NULL != override_path) {
3105         // Local folder and null terminator
3106         search_path_size += strlen(override_path) + 2;
3107     } else {
3108         // Add the size of any additional search paths defined in the additive environment variable
3109         if (NULL != additional_env) {
3110             search_path_size += determine_data_file_path_size(additional_env, 0) + 2;
3111 #if defined(_WIN32)
3112         }
3113         if (NULL != package_path) {
3114             search_path_size += determine_data_file_path_size(package_path, 0) + 2;
3115         }
3116         if (search_path_size == 2) {
3117             goto out;
3118         }
3119 #elif COMMON_UNIX_PLATFORMS
3120         }
3121 
3122         // Add the general search folders (with the appropriate relative folder added)
3123         rel_size = strlen(relative_location);
3124         if (rel_size > 0) {
3125 #if defined(__APPLE__)
3126             search_path_size += MAXPATHLEN;
3127 #endif
3128 
3129 #if defined (__OHOS__)
3130             search_path_size += determine_data_file_path_size(IGRAPHICS_CONFG_DIR, rel_size);
3131 #endif
3132 
3133             // Only add the home folders if defined
3134             if (NULL != home_config_dir) {
3135                 search_path_size += determine_data_file_path_size(home_config_dir, rel_size);
3136             }
3137             search_path_size += determine_data_file_path_size(xdg_config_dirs, rel_size);
3138             search_path_size += determine_data_file_path_size(SYSCONFDIR, rel_size);
3139 #if defined(EXTRASYSCONFDIR)
3140             search_path_size += determine_data_file_path_size(EXTRASYSCONFDIR, rel_size);
3141 #endif
3142             // Only add the home folders if defined
3143             if (NULL != home_data_dir) {
3144                 search_path_size += determine_data_file_path_size(home_data_dir, rel_size);
3145             }
3146             search_path_size += determine_data_file_path_size(xdg_data_dirs, rel_size);
3147 #if defined (__OHOS__)
3148             if(currentProcessEnableDebugLayer) {
3149                 search_path_size += determine_data_file_path_size(debug_layer_json_path, rel_size);
3150             }
3151             search_path_size += determine_data_file_path_size("/system/etc/vulkan/swapchain", rel_size);
3152 #endif
3153         }
3154 #else
3155 #warning read_data_files_in_search_paths unsupported platform
3156 #endif
3157     }
3158 
3159     // Allocate the required space
3160     search_path = loader_instance_heap_calloc(inst, search_path_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3161     if (NULL == search_path) {
3162         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
3163                    "read_data_files_in_search_paths: Failed to allocate space for search path of length %d",
3164                    (uint32_t)search_path_size);
3165         vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3166         goto out;
3167     }
3168 
3169     cur_path_ptr = search_path;
3170 
3171     // Add the remaining paths to the list
3172     if (NULL != override_path) {
3173         size_t override_path_len = strlen(override_path);
3174         loader_strncpy(cur_path_ptr, search_path_size, override_path, override_path_len);
3175         cur_path_ptr += override_path_len;
3176     } else {
3177         // Add any additional search paths defined in the additive environment variable
3178         if (NULL != additional_env) {
3179             copy_data_file_info(additional_env, NULL, 0, &cur_path_ptr);
3180         }
3181 
3182 #if defined(_WIN32)
3183         if (NULL != package_path) {
3184             copy_data_file_info(package_path, NULL, 0, &cur_path_ptr);
3185         }
3186 #elif COMMON_UNIX_PLATFORMS
3187         if (rel_size > 0) {
3188 #if defined(__APPLE__)
3189             // Add the bundle's Resources dir to the beginning of the search path.
3190             // Looks for manifests in the bundle first, before any system directories.
3191             // This also appears to work unmodified for iOS, it finds the app bundle on the devices
3192             // file system. (RSW)
3193             CFBundleRef main_bundle = CFBundleGetMainBundle();
3194             if (NULL != main_bundle) {
3195                 CFURLRef ref = CFBundleCopyResourcesDirectoryURL(main_bundle);
3196                 if (NULL != ref) {
3197                     if (CFURLGetFileSystemRepresentation(ref, TRUE, (UInt8 *)cur_path_ptr, search_path_size)) {
3198                         cur_path_ptr += strlen(cur_path_ptr);
3199                         *cur_path_ptr++ = DIRECTORY_SYMBOL;
3200                         memcpy(cur_path_ptr, relative_location, rel_size);
3201                         cur_path_ptr += rel_size;
3202                         *cur_path_ptr++ = PATH_SEPARATOR;
3203                         if (manifest_type == LOADER_DATA_FILE_MANIFEST_DRIVER) {
3204                             use_first_found_manifest = true;
3205                         }
3206                     }
3207                     CFRelease(ref);
3208                 }
3209             }
3210 #endif  // __APPLE__
3211 
3212 #if defined (__OHOS__)
3213             copy_data_file_info(IGRAPHICS_CONFG_DIR, relative_location, rel_size, &cur_path_ptr);
3214 #endif
3215 
3216             // Only add the home folders if not NULL
3217             if (NULL != home_config_dir) {
3218                 copy_data_file_info(home_config_dir, relative_location, rel_size, &cur_path_ptr);
3219             }
3220             copy_data_file_info(xdg_config_dirs, relative_location, rel_size, &cur_path_ptr);
3221             copy_data_file_info(SYSCONFDIR, relative_location, rel_size, &cur_path_ptr);
3222 #if defined(EXTRASYSCONFDIR)
3223             copy_data_file_info(EXTRASYSCONFDIR, relative_location, rel_size, &cur_path_ptr);
3224 #endif
3225 
3226             // Only add the home folders if not NULL
3227             if (NULL != home_data_dir) {
3228                 copy_data_file_info(home_data_dir, relative_location, rel_size, &cur_path_ptr);
3229             }
3230             copy_data_file_info(xdg_data_dirs, relative_location, rel_size, &cur_path_ptr);
3231 #if defined (__OHOS__)
3232             if(currentProcessEnableDebugLayer){
3233                 copy_data_file_info(debug_layer_json_path,relative_location,rel_size,&cur_path_ptr);
3234             }
3235             copy_data_file_info("/system/etc/vulkan/swapchain/",relative_location,rel_size,&cur_path_ptr);
3236 #endif
3237         }
3238 
3239         // Remove the last path separator
3240         --cur_path_ptr;
3241 
3242         assert(cur_path_ptr - search_path < (ptrdiff_t)search_path_size);
3243         *cur_path_ptr = '\0';
3244 #else
3245 #warning read_data_files_in_search_paths unsupported platform
3246 #endif
3247     }
3248 
3249     // Remove duplicate paths, or it would result in duplicate extensions, duplicate devices, etc.
3250     // This uses minimal memory, but is O(N^2) on the number of paths. Expect only a few paths.
3251     char path_sep_str[2] = {PATH_SEPARATOR, '\0'};
3252     size_t search_path_updated_size = strlen(search_path);
3253     for (size_t first = 0; first < search_path_updated_size;) {
3254         // If this is an empty path, erase it
3255         if (search_path[first] == PATH_SEPARATOR) {
3256             memmove(&search_path[first], &search_path[first + 1], search_path_updated_size - first + 1);
3257             search_path_updated_size -= 1;
3258             continue;
3259         }
3260 
3261         size_t first_end = first + 1;
3262         first_end += strcspn(&search_path[first_end], path_sep_str);
3263         for (size_t second = first_end + 1; second < search_path_updated_size;) {
3264             size_t second_end = second + 1;
3265             second_end += strcspn(&search_path[second_end], path_sep_str);
3266             if (first_end - first == second_end - second &&
3267                 !strncmp(&search_path[first], &search_path[second], second_end - second)) {
3268                 // Found duplicate. Include PATH_SEPARATOR in second_end, then erase it from search_path.
3269                 if (search_path[second_end] == PATH_SEPARATOR) {
3270                     second_end++;
3271                 }
3272                 memmove(&search_path[second], &search_path[second_end], search_path_updated_size - second_end + 1);
3273                 search_path_updated_size -= second_end - second;
3274             } else {
3275                 second = second_end + 1;
3276             }
3277         }
3278         first = first_end + 1;
3279     }
3280     search_path_size = search_path_updated_size;
3281 
3282     // Print out the paths being searched if debugging is enabled
3283     uint32_t log_flags = 0;
3284     if (search_path_size > 0) {
3285         char *tmp_search_path = loader_instance_heap_alloc(inst, search_path_size + 1, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3286         if (NULL != tmp_search_path) {
3287             loader_strncpy(tmp_search_path, search_path_size + 1, search_path, search_path_size);
3288             tmp_search_path[search_path_size] = '\0';
3289             if (manifest_type == LOADER_DATA_FILE_MANIFEST_DRIVER) {
3290                 log_flags = VULKAN_LOADER_DRIVER_BIT;
3291                 loader_log(inst, VULKAN_LOADER_DRIVER_BIT, 0, "Searching for driver manifest files");
3292             } else {
3293                 log_flags = VULKAN_LOADER_LAYER_BIT;
3294                 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "Searching for %s layer manifest files",
3295                            manifest_type == LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER ? "explicit" : "implicit");
3296             }
3297             loader_log(inst, log_flags, 0, "   In following locations:");
3298             char *cur_file;
3299             char *next_file = tmp_search_path;
3300             while (NULL != next_file && *next_file != '\0') {
3301                 cur_file = next_file;
3302                 next_file = loader_get_next_path(cur_file);
3303                 loader_log(inst, log_flags, 0, "      %s", cur_file);
3304             }
3305             loader_instance_heap_free(inst, tmp_search_path);
3306         }
3307     }
3308 
3309     // Now, parse the paths and add any manifest files found in them.
3310     vk_result = add_data_files(inst, search_path, out_files, use_first_found_manifest);
3311 
3312     if (log_flags != 0 && out_files->count > 0) {
3313         loader_log(inst, log_flags, 0, "   Found the following files:");
3314         for (uint32_t cur_file = 0; cur_file < out_files->count; ++cur_file) {
3315             loader_log(inst, log_flags, 0, "      %s", out_files->list[cur_file]);
3316         }
3317     } else {
3318         loader_log(inst, log_flags, 0, "   Found no files");
3319     }
3320 
3321     if (NULL != override_path) {
3322         *override_active = true;
3323     } else {
3324         *override_active = false;
3325     }
3326 
3327 out:
3328 
3329     loader_free_getenv(additional_env, inst);
3330     loader_free_getenv(override_env, inst);
3331 #if defined(_WIN32)
3332     loader_instance_heap_free(inst, package_path);
3333 #elif COMMON_UNIX_PLATFORMS
3334     loader_free_getenv(xdg_config_home, inst);
3335     loader_free_getenv(xdg_config_dirs, inst);
3336     loader_free_getenv(xdg_data_home, inst);
3337     loader_free_getenv(xdg_data_dirs, inst);
3338     loader_free_getenv(xdg_data_home, inst);
3339     loader_free_getenv(home, inst);
3340     loader_instance_heap_free(inst, default_data_home);
3341     loader_instance_heap_free(inst, default_config_home);
3342 #elif defined(__OHOS__)
3343     if(currentProcessEnableDebugLayer){
3344         loader_free_getenv(debug_layer_json_path, inst);
3345     }
3346     loader_free_getenv(debug_layer_name, inst);
3347     loader_free_getenv(debug_hap_name, inst);
3348 #else
3349 #warning read_data_files_in_search_paths unsupported platform
3350 #endif
3351 
3352     loader_instance_heap_free(inst, search_path);
3353 
3354     return vk_result;
3355 }
3356 
3357 // Find the Vulkan library manifest files.
3358 //
3359 // This function scans the appropriate locations for a list of JSON manifest files based on the
3360 // "manifest_type".  The location is interpreted as Registry path on Windows and a directory path(s)
3361 // on Linux.
3362 // "home_location" is an additional directory in the users home directory to look at. It is
3363 // expanded into the dir path $XDG_DATA_HOME/home_location or $HOME/.local/share/home_location
3364 // depending on environment variables. This "home_location" is only used on Linux.
3365 //
3366 // \returns
3367 // VKResult
3368 // A string list of manifest files to be opened in out_files param.
3369 // List has a pointer to string for each manifest filename.
3370 // When done using the list in out_files, pointers should be freed.
3371 // Location or override  string lists can be either files or directories as
3372 // follows:
3373 //            | location | override
3374 // --------------------------------
3375 // Win ICD    | files    | files
3376 // Win Layer  | files    | dirs
3377 // Linux ICD  | dirs     | files
3378 // Linux Layer| dirs     | dirs
3379 
loader_get_data_files(const struct loader_instance * inst,enum loader_data_files_type manifest_type,const char * path_override,struct loader_string_list * out_files)3380 VkResult loader_get_data_files(const struct loader_instance *inst, enum loader_data_files_type manifest_type,
3381                                const char *path_override, struct loader_string_list *out_files) {
3382     VkResult res = VK_SUCCESS;
3383     bool override_active = false;
3384 
3385     // Free and init the out_files information so there's no false data left from uninitialized variables.
3386     free_string_list(inst, out_files);
3387 
3388     res = read_data_files_in_search_paths(inst, manifest_type, path_override, &override_active, out_files);
3389     if (VK_SUCCESS != res) {
3390         goto out;
3391     }
3392 
3393 #if defined(_WIN32)
3394     // Read the registry if the override wasn't active.
3395     if (!override_active) {
3396         bool warn_if_not_present = false;
3397         char *registry_location = NULL;
3398 
3399         switch (manifest_type) {
3400             default:
3401                 goto out;
3402             case LOADER_DATA_FILE_MANIFEST_DRIVER:
3403                 warn_if_not_present = true;
3404                 registry_location = VK_DRIVERS_INFO_REGISTRY_LOC;
3405                 break;
3406             case LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER:
3407                 registry_location = VK_ILAYERS_INFO_REGISTRY_LOC;
3408                 break;
3409             case LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER:
3410                 warn_if_not_present = true;
3411                 registry_location = VK_ELAYERS_INFO_REGISTRY_LOC;
3412                 break;
3413         }
3414         VkResult tmp_res =
3415             windows_read_data_files_in_registry(inst, manifest_type, warn_if_not_present, registry_location, out_files);
3416         // Only return an error if there was an error this time, and no manifest files from before.
3417         if (VK_SUCCESS != tmp_res && out_files->count == 0) {
3418             res = tmp_res;
3419             goto out;
3420         }
3421     }
3422 #endif
3423 
3424 out:
3425 
3426     if (VK_SUCCESS != res) {
3427         free_string_list(inst, out_files);
3428     }
3429 
3430     return res;
3431 }
3432 
3433 struct ICDManifestInfo {
3434     char *full_library_path;
3435     uint32_t version;
3436 };
3437 
3438 // Takes a json file, opens, reads, and parses an ICD Manifest out of it.
3439 // Should only return VK_SUCCESS, VK_ERROR_INCOMPATIBLE_DRIVER, or VK_ERROR_OUT_OF_HOST_MEMORY
loader_parse_icd_manifest(const struct loader_instance * inst,char * file_str,struct ICDManifestInfo * icd,bool * skipped_portability_drivers)3440 VkResult loader_parse_icd_manifest(const struct loader_instance *inst, char *file_str, struct ICDManifestInfo *icd,
3441                                    bool *skipped_portability_drivers) {
3442     VkResult res = VK_SUCCESS;
3443     cJSON *json = NULL;
3444     char *file_vers_str = NULL;
3445     char *library_arch_str = NULL;
3446     char *version_str = NULL;
3447 
3448     if (file_str == NULL) {
3449         goto out;
3450     }
3451 
3452     res = loader_get_json(inst, file_str, &json);
3453     if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
3454         goto out;
3455     }
3456     if (res != VK_SUCCESS || NULL == json) {
3457         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3458         goto out;
3459     }
3460 
3461     cJSON *item = loader_cJSON_GetObjectItem(json, "file_format_version");
3462     if (item == NULL) {
3463         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3464                    "loader_parse_icd_manifest: ICD JSON %s does not have a \'file_format_version\' field. Skipping ICD JSON.",
3465                    file_str);
3466         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3467         goto out;
3468     }
3469 
3470     file_vers_str = loader_cJSON_Print(item);
3471     if (NULL == file_vers_str) {
3472         // Only reason the print can fail is if there was an allocation issue
3473         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3474                    "loader_parse_icd_manifest: Failed retrieving ICD JSON %s \'file_format_version\' field. Skipping ICD JSON",
3475                    file_str);
3476         res = VK_ERROR_OUT_OF_HOST_MEMORY;
3477         goto out;
3478     }
3479     loader_log(inst, VULKAN_LOADER_DRIVER_BIT, 0, "Found ICD manifest file %s, version %s", file_str, file_vers_str);
3480 
3481     // Get the version of the driver manifest
3482     loader_api_version json_file_version = loader_make_full_version(loader_parse_version_string(file_vers_str));
3483 
3484     // Loader only knows versions 1.0.0 and 1.0.1, anything above it is unknown
3485     if (loader_check_version_meets_required(loader_combine_version(1, 0, 2), json_file_version)) {
3486         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3487                    "loader_parse_icd_manifest: %s has unknown icd manifest file version %d.%d.%d. May cause errors.", file_str,
3488                    json_file_version.major, json_file_version.minor, json_file_version.patch);
3489     }
3490 
3491     cJSON *itemICD = loader_cJSON_GetObjectItem(json, "ICD");
3492     if (itemICD == NULL) {
3493         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3494                    "loader_parse_icd_manifest: Can not find \'ICD\' object in ICD JSON file %s. Skipping ICD JSON", file_str);
3495         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3496         goto out;
3497     }
3498 
3499     item = loader_cJSON_GetObjectItem(itemICD, "library_path");
3500     if (item == NULL) {
3501         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3502                    "loader_parse_icd_manifest: Failed to find \'library_path\' object in ICD JSON file %s. Skipping ICD JSON.",
3503                    file_str);
3504         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3505         goto out;
3506     }
3507     char *library_path = loader_cJSON_Print(item);
3508     if (!library_path) {
3509         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3510                    "loader_parse_icd_manifest: Failed retrieving ICD JSON %s \'library_path\' field. Skipping ICD JSON.", file_str);
3511         res = VK_ERROR_OUT_OF_HOST_MEMORY;
3512         goto out;
3513     }
3514 
3515     if (strlen(library_path) == 0) {
3516         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3517                    "loader_parse_icd_manifest: ICD JSON %s \'library_path\' field is empty. Skipping ICD JSON.", file_str);
3518         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3519         goto out;
3520     }
3521 
3522     // Print out the paths being searched if debugging is enabled
3523     loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0, "Searching for ICD drivers named %s", library_path);
3524     // This function takes ownership of library_path - so we don't need to clean it up
3525     res = combine_manifest_directory_and_library_path(inst, library_path, file_str, &icd->full_library_path);
3526     if (VK_SUCCESS != res) {
3527         goto out;
3528     }
3529 
3530     item = loader_cJSON_GetObjectItem(itemICD, "api_version");
3531     if (item == NULL) {
3532         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3533                    "loader_parse_icd_manifest: ICD JSON %s does not have an \'api_version\' field. Skipping ICD JSON.", file_str);
3534         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3535         goto out;
3536     }
3537     version_str = loader_cJSON_Print(item);
3538     if (NULL == version_str) {
3539         // Only reason the print can fail is if there was an allocation issue
3540         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3541                    "loader_parse_icd_manifest: Failed retrieving ICD JSON %s \'api_version\' field. Skipping ICD JSON.", file_str);
3542 
3543         res = VK_ERROR_OUT_OF_HOST_MEMORY;
3544         goto out;
3545     }
3546     icd->version = loader_parse_version_string(version_str);
3547 
3548     if (VK_API_VERSION_VARIANT(icd->version) != 0) {
3549         loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3550                    "loader_parse_icd_manifest: Driver's ICD JSON %s \'api_version\' field contains a non-zero variant value of %d. "
3551                    " Skipping ICD JSON.",
3552                    file_str, VK_API_VERSION_VARIANT(icd->version));
3553         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3554         goto out;
3555     }
3556 
3557     // Skip over ICD's which contain a true "is_portability_driver" value whenever the application doesn't enable
3558     // portability enumeration.
3559     item = loader_cJSON_GetObjectItem(itemICD, "is_portability_driver");
3560     if (item != NULL && item->type == cJSON_True && inst && !inst->portability_enumeration_enabled) {
3561         if (skipped_portability_drivers) {
3562             *skipped_portability_drivers = true;
3563         }
3564         res = VK_ERROR_INCOMPATIBLE_DRIVER;
3565         goto out;
3566     }
3567 
3568     item = loader_cJSON_GetObjectItem(itemICD, "library_arch");
3569     if (item != NULL) {
3570         library_arch_str = loader_cJSON_Print(item);
3571         if (NULL != library_arch_str) {
3572             // cJSON includes the quotes by default, so we need to look for those here
3573             if ((strncmp(library_arch_str, "32", 4) == 0 && sizeof(void *) != 4) ||
3574                 (strncmp(library_arch_str, "64", 4) == 0 && sizeof(void *) != 8)) {
3575                 loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
3576                            "loader_parse_icd_manifest: Driver library architecture doesn't match the current running "
3577                            "architecture, skipping this driver");
3578                 res = VK_ERROR_INCOMPATIBLE_DRIVER;
3579                 goto out;
3580             }
3581         } else {
3582             res = VK_ERROR_OUT_OF_HOST_MEMORY;
3583             goto out;
3584         }
3585     }
3586 out:
3587     loader_cJSON_Delete(json);
3588     loader_instance_heap_free(inst, file_vers_str);
3589     loader_instance_heap_free(inst, version_str);
3590     loader_instance_heap_free(inst, library_arch_str);
3591     return res;
3592 }
3593 
3594 // Try to find the Vulkan ICD driver(s).
3595 //
3596 // This function scans the default system loader path(s) or path specified by either the
3597 // VK_DRIVER_FILES or VK_ICD_FILENAMES environment variable in order to find loadable
3598 // VK ICDs manifest files.
3599 // From these manifest files it finds the ICD libraries.
3600 //
3601 // skipped_portability_drivers is used to report whether the loader found drivers which report
3602 // portability but the application didn't enable the bit to enumerate them
3603 // Can be NULL
3604 //
3605 // \returns
3606 // Vulkan result
3607 // (on result == VK_SUCCESS) a list of icds that were discovered
loader_icd_scan(const struct loader_instance * inst,struct loader_icd_tramp_list * icd_tramp_list,const VkInstanceCreateInfo * pCreateInfo,bool * skipped_portability_drivers)3608 VkResult loader_icd_scan(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list,
3609                          const VkInstanceCreateInfo *pCreateInfo, bool *skipped_portability_drivers) {
3610     VkResult res = VK_SUCCESS;
3611     struct loader_string_list manifest_files = {0};
3612     struct loader_envvar_filter select_filter = {0};
3613     struct loader_envvar_filter disable_filter = {0};
3614     struct ICDManifestInfo *icd_details = NULL;
3615 
3616     // Set up the ICD Trampoline list so elements can be written into it.
3617     res = loader_scanned_icd_init(inst, icd_tramp_list);
3618     if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
3619         return res;
3620     }
3621 
3622     bool direct_driver_loading_exclusive_mode = false;
3623     res = loader_scan_for_direct_drivers(inst, pCreateInfo, icd_tramp_list, &direct_driver_loading_exclusive_mode);
3624     if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
3625         goto out;
3626     }
3627     if (direct_driver_loading_exclusive_mode) {
3628         // Make sure to jump over the system & env-var driver discovery mechanisms if exclusive mode is set, even if no drivers
3629         // were successfully found through the direct driver loading mechanism
3630         goto out;
3631     }
3632 
3633     // Parse the filter environment variables to determine if we have any special behavior
3634     res = parse_generic_filter_environment_var(inst, VK_DRIVERS_SELECT_ENV_VAR, &select_filter);
3635     if (VK_SUCCESS != res) {
3636         goto out;
3637     }
3638     res = parse_generic_filter_environment_var(inst, VK_DRIVERS_DISABLE_ENV_VAR, &disable_filter);
3639     if (VK_SUCCESS != res) {
3640         goto out;
3641     }
3642 
3643     // Get a list of manifest files for ICDs
3644     res = loader_get_data_files(inst, LOADER_DATA_FILE_MANIFEST_DRIVER, NULL, &manifest_files);
3645     if (VK_SUCCESS != res) {
3646         goto out;
3647     }
3648 
3649     icd_details = loader_stack_alloc(sizeof(struct ICDManifestInfo) * manifest_files.count);
3650     if (NULL == icd_details) {
3651         res = VK_ERROR_OUT_OF_HOST_MEMORY;
3652         goto out;
3653     }
3654     memset(icd_details, 0, sizeof(struct ICDManifestInfo) * manifest_files.count);
3655 
3656     for (uint32_t i = 0; i < manifest_files.count; i++) {
3657         VkResult icd_res = VK_SUCCESS;
3658 
3659         icd_res = loader_parse_icd_manifest(inst, manifest_files.list[i], &icd_details[i], skipped_portability_drivers);
3660         if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_res) {
3661             res = icd_res;
3662             goto out;
3663         } else if (VK_ERROR_INCOMPATIBLE_DRIVER == icd_res) {
3664             continue;
3665         }
3666 
3667         if (select_filter.count > 0 || disable_filter.count > 0) {
3668             // Get only the filename for comparing to the filters
3669             char *just_filename_str = strrchr(manifest_files.list[i], DIRECTORY_SYMBOL);
3670 
3671             // No directory symbol, just the filename
3672             if (NULL == just_filename_str) {
3673                 just_filename_str = manifest_files.list[i];
3674             } else {
3675                 just_filename_str++;
3676             }
3677 
3678             bool name_matches_select =
3679                 (select_filter.count > 0 && check_name_matches_filter_environment_var(just_filename_str, &select_filter));
3680             bool name_matches_disable =
3681                 (disable_filter.count > 0 && check_name_matches_filter_environment_var(just_filename_str, &disable_filter));
3682 
3683             if (name_matches_disable && !name_matches_select) {
3684                 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3685                            "Driver \"%s\" ignored because it was disabled by env var \'%s\'", just_filename_str,
3686                            VK_DRIVERS_DISABLE_ENV_VAR);
3687                 continue;
3688             }
3689             if (select_filter.count != 0 && !name_matches_select) {
3690                 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3691                            "Driver \"%s\" ignored because not selected by env var \'%s\'", just_filename_str,
3692                            VK_DRIVERS_SELECT_ENV_VAR);
3693                 continue;
3694             }
3695         }
3696 
3697         enum loader_layer_library_status lib_status;
3698         icd_res =
3699             loader_scanned_icd_add(inst, icd_tramp_list, icd_details[i].full_library_path, icd_details[i].version, &lib_status);
3700         if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_res) {
3701             res = icd_res;
3702             goto out;
3703         } else if (VK_ERROR_INCOMPATIBLE_DRIVER == icd_res) {
3704             switch (lib_status) {
3705                 case LOADER_LAYER_LIB_NOT_LOADED:
3706                 case LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD:
3707                     loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3708                                "loader_icd_scan: Failed loading library associated with ICD JSON %s. Ignoring this JSON",
3709                                icd_details[i].full_library_path);
3710                     break;
3711                 case LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE: {
3712                     loader_log(inst, VULKAN_LOADER_DRIVER_BIT, 0, "Requested ICD %s was wrong bit-type. Ignoring this JSON",
3713                                icd_details[i].full_library_path);
3714                     break;
3715                 }
3716                 case LOADER_LAYER_LIB_SUCCESS_LOADED:
3717                 case LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY:
3718                     // Shouldn't be able to reach this but if it is, best to report a debug
3719                     loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3720                                "Shouldn't reach this. A valid version of requested ICD %s was loaded but something bad "
3721                                "happened afterwards.",
3722                                icd_details[i].full_library_path);
3723                     break;
3724             }
3725         }
3726     }
3727 
3728 out:
3729     if (NULL != icd_details) {
3730         // Successfully got the icd_details structure, which means we need to free the paths contained within
3731         for (uint32_t i = 0; i < manifest_files.count; i++) {
3732             loader_instance_heap_free(inst, icd_details[i].full_library_path);
3733         }
3734     }
3735     free_string_list(inst, &manifest_files);
3736     return res;
3737 }
3738 
3739 // Gets the layer data files corresponding to manifest_type & path_override, then parses the resulting json objects
3740 // into instance_layers
3741 // Manifest type must be either implicit or explicit
loader_parse_instance_layers(struct loader_instance * inst,enum loader_data_files_type manifest_type,const char * path_override,struct loader_layer_list * instance_layers)3742 VkResult loader_parse_instance_layers(struct loader_instance *inst, enum loader_data_files_type manifest_type,
3743                                       const char *path_override, struct loader_layer_list *instance_layers) {
3744     assert(manifest_type == LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER || manifest_type == LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER);
3745     VkResult res = VK_SUCCESS;
3746     struct loader_string_list manifest_files = {0};
3747 
3748     res = loader_get_data_files(inst, manifest_type, path_override, &manifest_files);
3749     if (VK_SUCCESS != res) {
3750         goto out;
3751     }
3752 
3753     for (uint32_t i = 0; i < manifest_files.count; i++) {
3754         char *file_str = manifest_files.list[i];
3755         if (file_str == NULL) {
3756             continue;
3757         }
3758 
3759         // Parse file into JSON struct
3760         cJSON *json = NULL;
3761         VkResult local_res = loader_get_json(inst, file_str, &json);
3762         if (VK_ERROR_OUT_OF_HOST_MEMORY == local_res) {
3763             res = VK_ERROR_OUT_OF_HOST_MEMORY;
3764             goto out;
3765         } else if (VK_SUCCESS != local_res || NULL == json) {
3766             continue;
3767         }
3768 
3769         local_res = loader_add_layer_properties(inst, instance_layers, json,
3770                                                 manifest_type == LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER, file_str);
3771         loader_cJSON_Delete(json);
3772 
3773         // If the error is anything other than out of memory we still want to try to load the other layers
3774         if (VK_ERROR_OUT_OF_HOST_MEMORY == local_res) {
3775             res = VK_ERROR_OUT_OF_HOST_MEMORY;
3776             goto out;
3777         }
3778     }
3779 out:
3780     free_string_list(inst, &manifest_files);
3781 
3782     return res;
3783 }
3784 
3785 // Given a loader_layer_properties struct that is a valid override layer, concatenate the properties override paths and put them
3786 // into the output parameter override_paths
get_override_layer_override_paths(struct loader_instance * inst,struct loader_layer_properties * prop,char ** override_paths)3787 VkResult get_override_layer_override_paths(struct loader_instance *inst, struct loader_layer_properties *prop,
3788                                            char **override_paths) {
3789     if (prop->override_paths.count > 0) {
3790         char *cur_write_ptr = NULL;
3791         size_t override_path_size = 0;
3792         for (uint32_t j = 0; j < prop->override_paths.count; j++) {
3793             override_path_size += determine_data_file_path_size(prop->override_paths.list[j], 0);
3794         }
3795         *override_paths = loader_instance_heap_alloc(inst, override_path_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3796         if (*override_paths == NULL) {
3797             return VK_ERROR_OUT_OF_HOST_MEMORY;
3798         }
3799         cur_write_ptr = &(*override_paths)[0];
3800         for (uint32_t j = 0; j < prop->override_paths.count; j++) {
3801             copy_data_file_info(prop->override_paths.list[j], NULL, 0, &cur_write_ptr);
3802         }
3803         // Remove the last path separator
3804         --cur_write_ptr;
3805         assert(cur_write_ptr - (*override_paths) < (ptrdiff_t)override_path_size);
3806         *cur_write_ptr = '\0';
3807         loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Override layer has override paths set to %s",
3808                    *override_paths);
3809     }
3810     return VK_SUCCESS;
3811 }
3812 
loader_scan_for_layers(struct loader_instance * inst,struct loader_layer_list * instance_layers,const struct loader_envvar_all_filters * filters)3813 VkResult loader_scan_for_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers,
3814                                 const struct loader_envvar_all_filters *filters) {
3815     VkResult res = VK_SUCCESS;
3816     struct loader_layer_list settings_layers = {0};
3817     struct loader_layer_list regular_instance_layers = {0};
3818     bool override_layer_valid = false;
3819     char *override_paths = NULL;
3820 
3821     bool should_search_for_other_layers = true;
3822     res = get_settings_layers(inst, &settings_layers, &should_search_for_other_layers);
3823     if (VK_SUCCESS != res) {
3824         goto out;
3825     }
3826 
3827     // If we should not look for layers using other mechanisms, assign settings_layers to instance_layers and jump to the
3828     // output
3829     if (!should_search_for_other_layers) {
3830         *instance_layers = settings_layers;
3831         memset(&settings_layers, 0, sizeof(struct loader_layer_list));
3832         goto out;
3833     }
3834 
3835     res = loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER, NULL, &regular_instance_layers);
3836     if (VK_SUCCESS != res) {
3837         goto out;
3838     }
3839 
3840     // Remove any extraneous override layers.
3841     remove_all_non_valid_override_layers(inst, &regular_instance_layers);
3842 
3843     // Check to see if the override layer is present, and use it's override paths.
3844     for (uint32_t i = 0; i < regular_instance_layers.count; i++) {
3845         struct loader_layer_properties *prop = &regular_instance_layers.list[i];
3846         if (prop->is_override && loader_implicit_layer_is_enabled(inst, filters, prop) && prop->override_paths.count > 0) {
3847             res = get_override_layer_override_paths(inst, prop, &override_paths);
3848             if (VK_SUCCESS != res) {
3849                 goto out;
3850             }
3851             break;
3852         }
3853     }
3854 
3855     // Get a list of manifest files for explicit layers
3856     res = loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER, override_paths, &regular_instance_layers);
3857     if (VK_SUCCESS != res) {
3858         goto out;
3859     }
3860 
3861     // Verify any meta-layers in the list are valid and all the component layers are
3862     // actually present in the available layer list
3863     res = verify_all_meta_layers(inst, filters, &regular_instance_layers, &override_layer_valid);
3864     if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
3865         return res;
3866     }
3867 
3868     if (override_layer_valid) {
3869         loader_remove_layers_in_blacklist(inst, &regular_instance_layers);
3870         if (NULL != inst) {
3871             inst->override_layer_present = true;
3872         }
3873     }
3874 
3875     // Remove disabled layers
3876     for (uint32_t i = 0; i < regular_instance_layers.count; ++i) {
3877         if (!loader_layer_is_available(inst, filters, &regular_instance_layers.list[i])) {
3878             loader_remove_layer_in_list(inst, &regular_instance_layers, i);
3879             i--;
3880         }
3881     }
3882 
3883     res = combine_settings_layers_with_regular_layers(inst, &settings_layers, &regular_instance_layers, instance_layers);
3884 
3885 out:
3886     loader_delete_layer_list_and_properties(inst, &settings_layers);
3887     loader_delete_layer_list_and_properties(inst, &regular_instance_layers);
3888 
3889     loader_instance_heap_free(inst, override_paths);
3890     return res;
3891 }
3892 
loader_scan_for_implicit_layers(struct loader_instance * inst,struct loader_layer_list * instance_layers,const struct loader_envvar_all_filters * layer_filters)3893 VkResult loader_scan_for_implicit_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers,
3894                                          const struct loader_envvar_all_filters *layer_filters) {
3895     VkResult res = VK_SUCCESS;
3896     struct loader_layer_list settings_layers = {0};
3897     struct loader_layer_list regular_instance_layers = {0};
3898     bool override_layer_valid = false;
3899     char *override_paths = NULL;
3900     bool implicit_metalayer_present = false;
3901 
3902     bool should_search_for_other_layers = true;
3903     res = get_settings_layers(inst, &settings_layers, &should_search_for_other_layers);
3904     if (VK_SUCCESS != res) {
3905         goto out;
3906     }
3907 
3908     // If we should not look for layers using other mechanisms, assign settings_layers to instance_layers and jump to the
3909     // output
3910     if (!should_search_for_other_layers) {
3911         *instance_layers = settings_layers;
3912         memset(&settings_layers, 0, sizeof(struct loader_layer_list));
3913         goto out;
3914     }
3915 
3916     res = loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER, NULL, &regular_instance_layers);
3917     if (VK_SUCCESS != res) {
3918         goto out;
3919     }
3920 
3921     // Remove any extraneous override layers.
3922     remove_all_non_valid_override_layers(inst, &regular_instance_layers);
3923 
3924     // Check to see if either the override layer is present, or another implicit meta-layer.
3925     // Each of these may require explicit layers to be enabled at this time.
3926     for (uint32_t i = 0; i < regular_instance_layers.count; i++) {
3927         struct loader_layer_properties *prop = &regular_instance_layers.list[i];
3928         if (prop->is_override && loader_implicit_layer_is_enabled(inst, layer_filters, prop)) {
3929             override_layer_valid = true;
3930             res = get_override_layer_override_paths(inst, prop, &override_paths);
3931             if (VK_SUCCESS != res) {
3932                 goto out;
3933             }
3934         } else if (!prop->is_override && prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
3935             implicit_metalayer_present = true;
3936         }
3937     }
3938 
3939     // If either the override layer or an implicit meta-layer are present, we need to add
3940     // explicit layer info as well.  Not to worry, though, all explicit layers not included
3941     // in the override layer will be removed below in loader_remove_layers_in_blacklist().
3942     if (override_layer_valid || implicit_metalayer_present) {
3943         res =
3944             loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER, override_paths, &regular_instance_layers);
3945         if (VK_SUCCESS != res) {
3946             goto out;
3947         }
3948     }
3949 
3950     // Verify any meta-layers in the list are valid and all the component layers are
3951     // actually present in the available layer list
3952     res = verify_all_meta_layers(inst, layer_filters, &regular_instance_layers, &override_layer_valid);
3953     if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
3954         return res;
3955     }
3956 
3957     if (override_layer_valid || implicit_metalayer_present) {
3958         loader_remove_layers_not_in_implicit_meta_layers(inst, &regular_instance_layers);
3959         if (override_layer_valid && inst != NULL) {
3960             inst->override_layer_present = true;
3961         }
3962     }
3963 
3964     // Remove disabled layers
3965     for (uint32_t i = 0; i < regular_instance_layers.count; ++i) {
3966         if (!loader_implicit_layer_is_enabled(inst, layer_filters, &regular_instance_layers.list[i])) {
3967             loader_remove_layer_in_list(inst, &regular_instance_layers, i);
3968             i--;
3969         }
3970     }
3971 
3972     res = combine_settings_layers_with_regular_layers(inst, &settings_layers, &regular_instance_layers, instance_layers);
3973 
3974 out:
3975     loader_delete_layer_list_and_properties(inst, &settings_layers);
3976     loader_delete_layer_list_and_properties(inst, &regular_instance_layers);
3977 
3978     loader_instance_heap_free(inst, override_paths);
3979     return res;
3980 }
3981 
loader_gpdpa_instance_terminator(VkInstance inst,const char * pName)3982 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpdpa_instance_terminator(VkInstance inst, const char *pName) {
3983     // inst is not wrapped
3984     if (inst == VK_NULL_HANDLE) {
3985         return NULL;
3986     }
3987 
3988     VkLayerInstanceDispatchTable *disp_table = *(VkLayerInstanceDispatchTable **)inst;
3989 
3990     if (disp_table == NULL) return NULL;
3991 
3992     struct loader_instance *loader_inst = loader_get_instance(inst);
3993 
3994     if (loader_inst->instance_finished_creation) {
3995         disp_table = &loader_inst->terminator_dispatch;
3996     }
3997 
3998     bool found_name;
3999     void *addr = loader_lookup_instance_dispatch_table(disp_table, pName, &found_name);
4000     if (found_name) {
4001         return addr;
4002     }
4003 
4004     // Check if any drivers support the function, and if so, add it to the unknown function list
4005     addr = loader_phys_dev_ext_gpa_term(loader_get_instance(inst), pName);
4006     if (NULL != addr) return addr;
4007 
4008     // Don't call down the chain, this would be an infinite loop
4009     loader_log(NULL, VULKAN_LOADER_DEBUG_BIT, 0, "loader_gpdpa_instance_terminator() unrecognized name %s", pName);
4010     return NULL;
4011 }
4012 
loader_gpa_instance_terminator(VkInstance inst,const char * pName)4013 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpa_instance_terminator(VkInstance inst, const char *pName) {
4014     // Global functions - Do not need a valid instance handle to query
4015     if (!strcmp(pName, "vkGetInstanceProcAddr")) {
4016         return (PFN_vkVoidFunction)loader_gpa_instance_terminator;
4017     }
4018     if (!strcmp(pName, "vk_layerGetPhysicalDeviceProcAddr")) {
4019         return (PFN_vkVoidFunction)loader_gpdpa_instance_terminator;
4020     }
4021     if (!strcmp(pName, "vkCreateInstance")) {
4022         return (PFN_vkVoidFunction)terminator_CreateInstance;
4023     }
4024 
4025     // While the spec is very clear that querying vkCreateDevice requires a valid VkInstance, because the loader allowed querying
4026     // with a NULL VkInstance handle for a long enough time, it is impractical to fix this bug in the loader
4027 
4028     // As such, this is a bug to maintain compatibility for the RTSS layer (Riva Tuner Statistics Server) but may
4029     // be depended upon by other layers out in the wild.
4030     if (!strcmp(pName, "vkCreateDevice")) {
4031         return (PFN_vkVoidFunction)terminator_CreateDevice;
4032     }
4033 
4034     // inst is not wrapped
4035     if (inst == VK_NULL_HANDLE) {
4036         return NULL;
4037     }
4038     VkLayerInstanceDispatchTable *disp_table = *(VkLayerInstanceDispatchTable **)inst;
4039 
4040     if (disp_table == NULL) return NULL;
4041 
4042     struct loader_instance *loader_inst = loader_get_instance(inst);
4043 
4044     // The VK_EXT_debug_utils functions need a special case here so the terminators can still be found from
4045     // vkGetInstanceProcAddr This is because VK_EXT_debug_utils is an instance level extension with device level functions, and
4046     // is 'supported' by the loader.
4047     // These functions need a terminator to handle the case of a driver not supporting VK_EXT_debug_utils when there are layers
4048     // present which not check for NULL before calling the function.
4049     if (!strcmp(pName, "vkSetDebugUtilsObjectNameEXT")) {
4050         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_SetDebugUtilsObjectNameEXT
4051                                                                      : NULL;
4052     }
4053     if (!strcmp(pName, "vkSetDebugUtilsObjectTagEXT")) {
4054         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_SetDebugUtilsObjectTagEXT
4055                                                                      : NULL;
4056     }
4057     if (!strcmp(pName, "vkQueueBeginDebugUtilsLabelEXT")) {
4058         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_QueueBeginDebugUtilsLabelEXT
4059                                                                      : NULL;
4060     }
4061     if (!strcmp(pName, "vkQueueEndDebugUtilsLabelEXT")) {
4062         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_QueueEndDebugUtilsLabelEXT
4063                                                                      : NULL;
4064     }
4065     if (!strcmp(pName, "vkQueueInsertDebugUtilsLabelEXT")) {
4066         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_QueueInsertDebugUtilsLabelEXT
4067                                                                      : NULL;
4068     }
4069     if (!strcmp(pName, "vkCmdBeginDebugUtilsLabelEXT")) {
4070         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_CmdBeginDebugUtilsLabelEXT
4071                                                                      : NULL;
4072     }
4073     if (!strcmp(pName, "vkCmdEndDebugUtilsLabelEXT")) {
4074         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_CmdEndDebugUtilsLabelEXT
4075                                                                      : NULL;
4076     }
4077     if (!strcmp(pName, "vkCmdInsertDebugUtilsLabelEXT")) {
4078         return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_CmdInsertDebugUtilsLabelEXT
4079                                                                      : NULL;
4080     }
4081 
4082     if (loader_inst->instance_finished_creation) {
4083         disp_table = &loader_inst->terminator_dispatch;
4084     }
4085 
4086     bool found_name;
4087     void *addr = loader_lookup_instance_dispatch_table(disp_table, pName, &found_name);
4088     if (found_name) {
4089         return addr;
4090     }
4091 
4092     // Check if it is an unknown physical device function, to see if any drivers support it.
4093     addr = loader_phys_dev_ext_gpa_term(loader_get_instance(inst), pName);
4094     if (addr) {
4095         return addr;
4096     }
4097 
4098     // Assume it is an unknown device function, check to see if any drivers support it.
4099     addr = loader_dev_ext_gpa_term(loader_get_instance(inst), pName);
4100     if (addr) {
4101         return addr;
4102     }
4103 
4104     // Don't call down the chain, this would be an infinite loop
4105     loader_log(NULL, VULKAN_LOADER_DEBUG_BIT, 0, "loader_gpa_instance_terminator() unrecognized name %s", pName);
4106     return NULL;
4107 }
4108 
loader_gpa_device_terminator(VkDevice device,const char * pName)4109 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpa_device_terminator(VkDevice device, const char *pName) {
4110     struct loader_device *dev;
4111     struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, NULL);
4112 
4113     // Return this function if a layer above here is asking for the vkGetDeviceProcAddr.
4114     // This is so we can properly intercept any device commands needing a terminator.
4115     if (!strcmp(pName, "vkGetDeviceProcAddr")) {
4116         return (PFN_vkVoidFunction)loader_gpa_device_terminator;
4117     }
4118 
4119     // NOTE: Device Funcs needing Trampoline/Terminator.
4120     // Overrides for device functions needing a trampoline and
4121     // a terminator because certain device entry-points still need to go
4122     // through a terminator before hitting the ICD.  This could be for
4123     // several reasons, but the main one is currently unwrapping an
4124     // object before passing the appropriate info along to the ICD.
4125     // This is why we also have to override the direct ICD call to
4126     // vkGetDeviceProcAddr to intercept those calls.
4127     // If the pName is for a 'known' function but isn't available, due to
4128     // the corresponding extension/feature not being enabled, we need to
4129     // return NULL and not call down to the driver's GetDeviceProcAddr.
4130     if (NULL != dev) {
4131         bool found_name = false;
4132         PFN_vkVoidFunction addr = get_extension_device_proc_terminator(dev, pName, &found_name);
4133         if (found_name) {
4134             return addr;
4135         }
4136     }
4137 
4138     if (icd_term == NULL) {
4139         return NULL;
4140     }
4141 
4142     return icd_term->dispatch.GetDeviceProcAddr(device, pName);
4143 }
4144 
loader_get_instance(const VkInstance instance)4145 struct loader_instance *loader_get_instance(const VkInstance instance) {
4146     // look up the loader_instance in our list by comparing dispatch tables, as
4147     // there is no guarantee the instance is still a loader_instance* after any
4148     // layers which wrap the instance object.
4149     const VkLayerInstanceDispatchTable *disp;
4150     struct loader_instance *ptr_instance = (struct loader_instance *)instance;
4151     if (VK_NULL_HANDLE == instance || LOADER_MAGIC_NUMBER != ptr_instance->magic) {
4152         return NULL;
4153     } else {
4154         disp = loader_get_instance_layer_dispatch(instance);
4155         loader_platform_thread_lock_mutex(&loader_global_instance_list_lock);
4156         for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) {
4157             if (&inst->disp->layer_inst_disp == disp) {
4158                 ptr_instance = inst;
4159                 break;
4160             }
4161         }
4162         loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock);
4163     }
4164     return ptr_instance;
4165 }
4166 
loader_open_layer_file(const struct loader_instance * inst,struct loader_layer_properties * prop)4167 loader_platform_dl_handle loader_open_layer_file(const struct loader_instance *inst, struct loader_layer_properties *prop) {
4168     char* libPath = prop->lib_name;
4169 #if defined(__OHOS__)
4170     char *debug_layer_name = loader_secure_getenv("debug.graphic.debug_layer", inst);
4171     char *debug_hap_name = loader_secure_getenv("debug.graphic.debug_hap", inst);
4172     bool isDebugLayer = false;
4173     char* debugLayerLibPath = NULL;
4174     if (NULL != debug_layer_name && '\0' != debug_layer_name[0] && InitBundleInfo(debug_hap_name)) {
4175         const char lib_prefix[] = "lib";
4176         const char so_suffix[] = ".so";
4177         size_t totalLen = strlen(debug_layer_name) + strlen(lib_prefix) + strlen(so_suffix) + 1;
4178         char* layerSoName = loader_instance_heap_calloc(inst, totalLen, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
4179         if (layerSoName == NULL) {
4180             loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0, "malloc layerSoName fail");
4181             goto mallocErr;
4182         }
4183         strncpy(layerSoName, lib_prefix, totalLen);
4184         strncat(layerSoName, debug_layer_name, totalLen);
4185         strncat(layerSoName, so_suffix, totalLen);
4186         if (strcmp(layerSoName, libPath) == 0) {
4187             isDebugLayer = true;
4188             debugLayerLibPath = GetDebugLayerLibPath(inst, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
4189             if(debugLayerLibPath == NULL) {
4190                 loader_instance_heap_free(inst, layerSoName);
4191                 isDebugLayer = false;
4192                 goto mallocErr;
4193             }
4194             size_t totalLength = strlen(libPath) + strlen(debugLayerLibPath) + 1;
4195             libPath = loader_instance_heap_calloc(inst, totalLength, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
4196             if (libPath == NULL) {
4197                 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0, "malloc libPath fail");
4198                 loader_instance_heap_free(inst, layerSoName);
4199                 loader_instance_heap_free(inst, debugLayerLibPath);
4200                 libPath = prop->lib_name;
4201                 isDebugLayer = false;
4202                 goto mallocErr;
4203             }
4204             strncpy(libPath, debugLayerLibPath, totalLength);
4205             strncat(libPath, prop->lib_name, totalLength);
4206         } else {
4207             loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0, "layerSoName != libPath : %s != %s",
4208                 layerSoName, libPath);
4209         }
4210         loader_instance_heap_free(inst, layerSoName);
4211     }
4212 mallocErr:
4213     loader_free_getenv(debug_layer_name, inst);
4214     loader_free_getenv(debug_hap_name, inst);
4215 #endif
4216     loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_LAYER_BIT, 0, "try to open libPath: %s", libPath);
4217     if ((prop->lib_handle = loader_platform_open_library(libPath)) == NULL) {
4218         loader_handle_load_library_error(inst, prop->lib_name, &prop->lib_status);
4219     } else {
4220         prop->lib_status = LOADER_LAYER_LIB_SUCCESS_LOADED;
4221         loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Loading layer library %s", prop->lib_name);
4222     }
4223 #if defined(__OHOS__)
4224     if (isDebugLayer) {
4225         loader_instance_heap_free(inst, debugLayerLibPath);
4226         loader_instance_heap_free(inst, libPath);
4227     }
4228 #endif
4229     return prop->lib_handle;
4230 }
4231 
4232 // Go through the search_list and find any layers which match type. If layer
4233 // type match is found in then add it to ext_list.
loader_add_implicit_layers(const struct loader_instance * inst,const struct loader_envvar_all_filters * filters,struct loader_pointer_layer_list * target_list,struct loader_pointer_layer_list * expanded_target_list,const struct loader_layer_list * source_list)4234 VkResult loader_add_implicit_layers(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
4235                                     struct loader_pointer_layer_list *target_list,
4236                                     struct loader_pointer_layer_list *expanded_target_list,
4237                                     const struct loader_layer_list *source_list) {
4238     for (uint32_t src_layer = 0; src_layer < source_list->count; src_layer++) {
4239         struct loader_layer_properties *prop = &source_list->list[src_layer];
4240         if (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
4241             VkResult result = loader_add_implicit_layer(inst, prop, filters, target_list, expanded_target_list, source_list);
4242             if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
4243         }
4244     }
4245     return VK_SUCCESS;
4246 }
4247 
warn_if_layers_are_older_than_application(struct loader_instance * inst)4248 void warn_if_layers_are_older_than_application(struct loader_instance *inst) {
4249     for (uint32_t i = 0; i < inst->expanded_activated_layer_list.count; i++) {
4250         // Verify that the layer api version is at least that of the application's request, if not, throw a warning since
4251         // undefined behavior could occur.
4252         struct loader_layer_properties *prop = inst->expanded_activated_layer_list.list[i];
4253         loader_api_version prop_spec_version = loader_make_version(prop->info.specVersion);
4254         if (!loader_check_version_meets_required(inst->app_api_version, prop_spec_version)) {
4255             loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4256                        "Layer %s uses API version %u.%u which is older than the application specified "
4257                        "API version of %u.%u. May cause issues.",
4258                        prop->info.layerName, prop_spec_version.major, prop_spec_version.minor, inst->app_api_version.major,
4259                        inst->app_api_version.minor);
4260         }
4261     }
4262 }
4263 
loader_enable_instance_layers(struct loader_instance * inst,const VkInstanceCreateInfo * pCreateInfo,const struct loader_layer_list * instance_layers,const struct loader_envvar_all_filters * layer_filters)4264 VkResult loader_enable_instance_layers(struct loader_instance *inst, const VkInstanceCreateInfo *pCreateInfo,
4265                                        const struct loader_layer_list *instance_layers,
4266                                        const struct loader_envvar_all_filters *layer_filters) {
4267     VkResult res = VK_SUCCESS;
4268 
4269     assert(inst && "Cannot have null instance");
4270 
4271     if (!loader_init_pointer_layer_list(inst, &inst->app_activated_layer_list)) {
4272         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4273                    "loader_enable_instance_layers: Failed to initialize application version of the layer list");
4274         res = VK_ERROR_OUT_OF_HOST_MEMORY;
4275         goto out;
4276     }
4277 
4278     if (!loader_init_pointer_layer_list(inst, &inst->expanded_activated_layer_list)) {
4279         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4280                    "loader_enable_instance_layers: Failed to initialize expanded version of the layer list");
4281         res = VK_ERROR_OUT_OF_HOST_MEMORY;
4282         goto out;
4283     }
4284 
4285     if (inst->settings.settings_active) {
4286         res = enable_correct_layers_from_settings(inst, layer_filters, pCreateInfo->enabledLayerCount,
4287                                                   pCreateInfo->ppEnabledLayerNames, &inst->instance_layer_list,
4288                                                   &inst->app_activated_layer_list, &inst->expanded_activated_layer_list);
4289         warn_if_layers_are_older_than_application(inst);
4290 
4291         goto out;
4292     }
4293 
4294     // Add any implicit layers first
4295     res = loader_add_implicit_layers(inst, layer_filters, &inst->app_activated_layer_list, &inst->expanded_activated_layer_list,
4296                                      instance_layers);
4297     if (res != VK_SUCCESS) {
4298         goto out;
4299     }
4300 
4301     // Add any layers specified via environment variable next
4302     res = loader_add_environment_layers(inst, VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER, layer_filters, &inst->app_activated_layer_list,
4303                                         &inst->expanded_activated_layer_list, instance_layers);
4304     if (res != VK_SUCCESS) {
4305         goto out;
4306     }
4307 
4308     // Add layers specified by the application
4309     res = loader_add_layer_names_to_list(inst, layer_filters, &inst->app_activated_layer_list, &inst->expanded_activated_layer_list,
4310                                          pCreateInfo->enabledLayerCount, pCreateInfo->ppEnabledLayerNames, instance_layers);
4311 
4312     warn_if_layers_are_older_than_application(inst);
4313 out:
4314     return res;
4315 }
4316 
4317 // Determine the layer interface version to use.
loader_get_layer_interface_version(PFN_vkNegotiateLoaderLayerInterfaceVersion fp_negotiate_layer_version,VkNegotiateLayerInterface * interface_struct)4318 bool loader_get_layer_interface_version(PFN_vkNegotiateLoaderLayerInterfaceVersion fp_negotiate_layer_version,
4319                                         VkNegotiateLayerInterface *interface_struct) {
4320     memset(interface_struct, 0, sizeof(VkNegotiateLayerInterface));
4321     interface_struct->sType = LAYER_NEGOTIATE_INTERFACE_STRUCT;
4322     interface_struct->loaderLayerInterfaceVersion = 1;
4323     interface_struct->pNext = NULL;
4324 
4325     if (fp_negotiate_layer_version != NULL) {
4326         // Layer supports the negotiation API, so call it with the loader's
4327         // latest version supported
4328         interface_struct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
4329         VkResult result = fp_negotiate_layer_version(interface_struct);
4330 
4331         if (result != VK_SUCCESS) {
4332             // Layer no longer supports the loader's latest interface version so
4333             // fail loading the Layer
4334             return false;
4335         }
4336     }
4337 
4338     if (interface_struct->loaderLayerInterfaceVersion < MIN_SUPPORTED_LOADER_LAYER_INTERFACE_VERSION) {
4339         // Loader no longer supports the layer's latest interface version so
4340         // fail loading the layer
4341         return false;
4342     }
4343 
4344     return true;
4345 }
4346 
4347 // Every extension that has a loader-defined trampoline needs to be marked as enabled or disabled so that we know whether or
4348 // not to return that trampoline when vkGetDeviceProcAddr is called
setup_logical_device_enabled_layer_extensions(const struct loader_instance * inst,struct loader_device * dev,const struct loader_extension_list * icd_exts,const VkDeviceCreateInfo * pCreateInfo)4349 void setup_logical_device_enabled_layer_extensions(const struct loader_instance *inst, struct loader_device *dev,
4350                                                    const struct loader_extension_list *icd_exts,
4351                                                    const VkDeviceCreateInfo *pCreateInfo) {
4352     // Can only setup debug marker as debug utils is an instance extensions.
4353     for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; ++i) {
4354         if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
4355             // Check if its supported by the driver
4356             for (uint32_t j = 0; j < icd_exts->count; ++j) {
4357                 if (!strcmp(icd_exts->list[j].extensionName, VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
4358                     dev->layer_extensions.ext_debug_marker_enabled = true;
4359                 }
4360             }
4361             // also check if any layers support it.
4362             for (uint32_t j = 0; j < inst->app_activated_layer_list.count; j++) {
4363                 struct loader_layer_properties *layer = inst->app_activated_layer_list.list[j];
4364                 for (uint32_t k = 0; k < layer->device_extension_list.count; k++) {
4365                     if (!strcmp(layer->device_extension_list.list[k].props.extensionName, VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
4366                         dev->layer_extensions.ext_debug_marker_enabled = true;
4367                     }
4368                 }
4369             }
4370         }
4371     }
4372 }
4373 
loader_layer_create_device(VkInstance instance,VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice,PFN_vkGetInstanceProcAddr layerGIPA,PFN_vkGetDeviceProcAddr * nextGDPA)4374 VKAPI_ATTR VkResult VKAPI_CALL loader_layer_create_device(VkInstance instance, VkPhysicalDevice physicalDevice,
4375                                                           const VkDeviceCreateInfo *pCreateInfo,
4376                                                           const VkAllocationCallbacks *pAllocator, VkDevice *pDevice,
4377                                                           PFN_vkGetInstanceProcAddr layerGIPA, PFN_vkGetDeviceProcAddr *nextGDPA) {
4378     VkResult res;
4379     VkPhysicalDevice internal_device = VK_NULL_HANDLE;
4380     struct loader_device *dev = NULL;
4381     struct loader_instance *inst = NULL;
4382 
4383     if (instance != VK_NULL_HANDLE) {
4384         inst = loader_get_instance(instance);
4385         internal_device = physicalDevice;
4386     } else {
4387         struct loader_physical_device_tramp *phys_dev = (struct loader_physical_device_tramp *)physicalDevice;
4388         internal_device = phys_dev->phys_dev;
4389         inst = (struct loader_instance *)phys_dev->this_instance;
4390     }
4391 
4392     // Get the physical device (ICD) extensions
4393     struct loader_extension_list icd_exts = {0};
4394     icd_exts.list = NULL;
4395     res = loader_init_generic_list(inst, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
4396     if (VK_SUCCESS != res) {
4397         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to create ICD extension list");
4398         goto out;
4399     }
4400 
4401     PFN_vkEnumerateDeviceExtensionProperties enumDeviceExtensionProperties = NULL;
4402     if (layerGIPA != NULL) {
4403         enumDeviceExtensionProperties =
4404             (PFN_vkEnumerateDeviceExtensionProperties)layerGIPA(instance, "vkEnumerateDeviceExtensionProperties");
4405     } else {
4406         enumDeviceExtensionProperties = inst->disp->layer_inst_disp.EnumerateDeviceExtensionProperties;
4407     }
4408     res = loader_add_device_extensions(inst, enumDeviceExtensionProperties, internal_device, "Unknown", &icd_exts);
4409     if (res != VK_SUCCESS) {
4410         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to add extensions to list");
4411         goto out;
4412     }
4413 
4414     // Make sure requested extensions to be enabled are supported
4415     res = loader_validate_device_extensions(inst, &inst->expanded_activated_layer_list, &icd_exts, pCreateInfo);
4416     if (res != VK_SUCCESS) {
4417         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to validate extensions in list");
4418         goto out;
4419     }
4420 
4421     dev = loader_create_logical_device(inst, pAllocator);
4422     if (dev == NULL) {
4423         res = VK_ERROR_OUT_OF_HOST_MEMORY;
4424         goto out;
4425     }
4426 
4427     setup_logical_device_enabled_layer_extensions(inst, dev, &icd_exts, pCreateInfo);
4428 
4429     res = loader_create_device_chain(internal_device, pCreateInfo, pAllocator, inst, dev, layerGIPA, nextGDPA);
4430     if (res != VK_SUCCESS) {
4431         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice:  Failed to create device chain.");
4432         goto out;
4433     }
4434 
4435     *pDevice = dev->chain_device;
4436 
4437     // Initialize any device extension dispatch entry's from the instance list
4438     loader_init_dispatch_dev_ext(inst, dev);
4439 
4440     // Initialize WSI device extensions as part of core dispatch since loader
4441     // has dedicated trampoline code for these
4442     loader_init_device_extension_dispatch_table(&dev->loader_dispatch, inst->disp->layer_inst_disp.GetInstanceProcAddr,
4443                                                 dev->loader_dispatch.core_dispatch.GetDeviceProcAddr, inst->instance, *pDevice);
4444 
4445 out:
4446 
4447     // Failure cleanup
4448     if (VK_SUCCESS != res) {
4449         if (NULL != dev) {
4450             // Find the icd_term this device belongs to then remove it from that icd_term.
4451             // Need to iterate the linked lists and remove the device from it. Don't delete
4452             // the device here since it may not have been added to the icd_term and there
4453             // are other allocations attached to it.
4454             struct loader_icd_term *icd_term = inst->icd_terms;
4455             bool found = false;
4456             while (!found && NULL != icd_term) {
4457                 struct loader_device *cur_dev = icd_term->logical_device_list;
4458                 struct loader_device *prev_dev = NULL;
4459                 while (NULL != cur_dev) {
4460                     if (cur_dev == dev) {
4461                         if (cur_dev == icd_term->logical_device_list) {
4462                             icd_term->logical_device_list = cur_dev->next;
4463                         } else if (prev_dev) {
4464                             prev_dev->next = cur_dev->next;
4465                         }
4466 
4467                         found = true;
4468                         break;
4469                     }
4470                     prev_dev = cur_dev;
4471                     cur_dev = cur_dev->next;
4472                 }
4473                 icd_term = icd_term->next;
4474             }
4475             // Now destroy the device and the allocations associated with it.
4476             loader_destroy_logical_device(dev, pAllocator);
4477         }
4478     }
4479 
4480     if (NULL != icd_exts.list) {
4481         loader_destroy_generic_list(inst, (struct loader_generic_list *)&icd_exts);
4482     }
4483     return res;
4484 }
4485 
loader_layer_destroy_device(VkDevice device,const VkAllocationCallbacks * pAllocator,PFN_vkDestroyDevice destroyFunction)4486 VKAPI_ATTR void VKAPI_CALL loader_layer_destroy_device(VkDevice device, const VkAllocationCallbacks *pAllocator,
4487                                                        PFN_vkDestroyDevice destroyFunction) {
4488     struct loader_device *dev;
4489 
4490     if (device == VK_NULL_HANDLE) {
4491         return;
4492     }
4493 
4494     struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, NULL);
4495 
4496     destroyFunction(device, pAllocator);
4497     if (NULL != dev) {
4498         dev->chain_device = NULL;
4499         dev->icd_device = NULL;
4500         loader_remove_logical_device(icd_term, dev, pAllocator);
4501     }
4502 }
4503 
4504 // Given the list of layers to activate in the loader_instance
4505 // structure. This function will add a VkLayerInstanceCreateInfo
4506 // structure to the VkInstanceCreateInfo.pNext pointer.
4507 // Each activated layer will have it's own VkLayerInstanceLink
4508 // structure that tells the layer what Get*ProcAddr to call to
4509 // get function pointers to the next layer down.
4510 // Once the chain info has been created this function will
4511 // execute the CreateInstance call chain. Each layer will
4512 // then have an opportunity in it's CreateInstance function
4513 // to setup it's dispatch table when the lower layer returns
4514 // successfully.
4515 // Each layer can wrap or not-wrap the returned VkInstance object
4516 // as it sees fit.
4517 // The instance chain is terminated by a loader function
4518 // that will call CreateInstance on all available ICD's and
4519 // cache those VkInstance objects for future use.
loader_create_instance_chain(const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,struct loader_instance * inst,VkInstance * created_instance)4520 VkResult loader_create_instance_chain(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
4521                                       struct loader_instance *inst, VkInstance *created_instance) {
4522     uint32_t num_activated_layers = 0;
4523     struct activated_layer_info *activated_layers = NULL;
4524     VkLayerInstanceCreateInfo chain_info;
4525     VkLayerInstanceLink *layer_instance_link_info = NULL;
4526     VkInstanceCreateInfo loader_create_info;
4527     VkResult res;
4528 
4529     PFN_vkGetInstanceProcAddr next_gipa = loader_gpa_instance_terminator;
4530     PFN_vkGetInstanceProcAddr cur_gipa = loader_gpa_instance_terminator;
4531     PFN_vkGetDeviceProcAddr cur_gdpa = loader_gpa_device_terminator;
4532     PFN_GetPhysicalDeviceProcAddr next_gpdpa = loader_gpdpa_instance_terminator;
4533     PFN_GetPhysicalDeviceProcAddr cur_gpdpa = loader_gpdpa_instance_terminator;
4534 
4535     memcpy(&loader_create_info, pCreateInfo, sizeof(VkInstanceCreateInfo));
4536 
4537     if (inst->expanded_activated_layer_list.count > 0) {
4538         chain_info.u.pLayerInfo = NULL;
4539         chain_info.pNext = pCreateInfo->pNext;
4540         chain_info.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
4541         chain_info.function = VK_LAYER_LINK_INFO;
4542         loader_create_info.pNext = &chain_info;
4543 
4544         layer_instance_link_info = loader_stack_alloc(sizeof(VkLayerInstanceLink) * inst->expanded_activated_layer_list.count);
4545         if (!layer_instance_link_info) {
4546             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4547                        "loader_create_instance_chain: Failed to alloc Instance objects for layer");
4548             return VK_ERROR_OUT_OF_HOST_MEMORY;
4549         }
4550 
4551         activated_layers = loader_stack_alloc(sizeof(struct activated_layer_info) * inst->expanded_activated_layer_list.count);
4552         if (!activated_layers) {
4553             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4554                        "loader_create_instance_chain: Failed to alloc activated layer storage array");
4555             return VK_ERROR_OUT_OF_HOST_MEMORY;
4556         }
4557 
4558         // Create instance chain of enabled layers
4559         for (int32_t i = inst->expanded_activated_layer_list.count - 1; i >= 0; i--) {
4560             struct loader_layer_properties *layer_prop = inst->expanded_activated_layer_list.list[i];
4561             loader_platform_dl_handle lib_handle;
4562 
4563             // Skip it if a Layer with the same name has been already successfully activated
4564             if (loader_names_array_has_layer_property(&layer_prop->info, num_activated_layers, activated_layers)) {
4565                 continue;
4566             }
4567 
4568             lib_handle = loader_open_layer_file(inst, layer_prop);
4569             if (layer_prop->lib_status == LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY) {
4570                 return VK_ERROR_OUT_OF_HOST_MEMORY;
4571             }
4572             if (!lib_handle) {
4573                 continue;
4574             }
4575 
4576             if (NULL == layer_prop->functions.negotiate_layer_interface) {
4577                 PFN_vkNegotiateLoaderLayerInterfaceVersion negotiate_interface = NULL;
4578                 bool functions_in_interface = false;
4579                 if (!layer_prop->functions.str_negotiate_interface || strlen(layer_prop->functions.str_negotiate_interface) == 0) {
4580                     negotiate_interface = (PFN_vkNegotiateLoaderLayerInterfaceVersion)loader_platform_get_proc_address(
4581                         lib_handle, "vkNegotiateLoaderLayerInterfaceVersion");
4582                 } else {
4583                     negotiate_interface = (PFN_vkNegotiateLoaderLayerInterfaceVersion)loader_platform_get_proc_address(
4584                         lib_handle, layer_prop->functions.str_negotiate_interface);
4585                 }
4586 
4587                 // If we can negotiate an interface version, then we can also
4588                 // get everything we need from the one function call, so try
4589                 // that first, and see if we can get all the function pointers
4590                 // necessary from that one call.
4591                 if (NULL != negotiate_interface) {
4592                     layer_prop->functions.negotiate_layer_interface = negotiate_interface;
4593 
4594                     VkNegotiateLayerInterface interface_struct;
4595 
4596                     if (loader_get_layer_interface_version(negotiate_interface, &interface_struct)) {
4597                         // Go ahead and set the properties version to the
4598                         // correct value.
4599                         layer_prop->interface_version = interface_struct.loaderLayerInterfaceVersion;
4600 
4601                         // If the interface is 2 or newer, we have access to the
4602                         // new GetPhysicalDeviceProcAddr function, so grab it,
4603                         // and the other necessary functions, from the
4604                         // structure.
4605                         if (interface_struct.loaderLayerInterfaceVersion > 1) {
4606                             cur_gipa = interface_struct.pfnGetInstanceProcAddr;
4607                             cur_gdpa = interface_struct.pfnGetDeviceProcAddr;
4608                             cur_gpdpa = interface_struct.pfnGetPhysicalDeviceProcAddr;
4609                             if (cur_gipa != NULL) {
4610                                 // We've set the functions, so make sure we
4611                                 // don't do the unnecessary calls later.
4612                                 functions_in_interface = true;
4613                             }
4614                         }
4615                     }
4616                 }
4617 
4618                 if (!functions_in_interface) {
4619                     if ((cur_gipa = layer_prop->functions.get_instance_proc_addr) == NULL) {
4620                         if (layer_prop->functions.str_gipa == NULL || strlen(layer_prop->functions.str_gipa) == 0) {
4621                             cur_gipa =
4622                                 (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr");
4623                             layer_prop->functions.get_instance_proc_addr = cur_gipa;
4624 
4625                             if (NULL == cur_gipa) {
4626                                 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4627                                            "loader_create_instance_chain: Failed to find \'vkGetInstanceProcAddr\' in layer \"%s\"",
4628                                            layer_prop->lib_name);
4629                                 continue;
4630                             }
4631                         } else {
4632                             cur_gipa = (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle,
4633                                                                                                    layer_prop->functions.str_gipa);
4634 
4635                             if (NULL == cur_gipa) {
4636                                 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4637                                            "loader_create_instance_chain: Failed to find \'%s\' in layer \"%s\"",
4638                                            layer_prop->functions.str_gipa, layer_prop->lib_name);
4639                                 continue;
4640                             }
4641                         }
4642                     }
4643                 }
4644             }
4645 
4646             layer_instance_link_info[num_activated_layers].pNext = chain_info.u.pLayerInfo;
4647             layer_instance_link_info[num_activated_layers].pfnNextGetInstanceProcAddr = next_gipa;
4648             layer_instance_link_info[num_activated_layers].pfnNextGetPhysicalDeviceProcAddr = next_gpdpa;
4649             next_gipa = cur_gipa;
4650             if (layer_prop->interface_version > 1 && cur_gpdpa != NULL) {
4651                 layer_prop->functions.get_physical_device_proc_addr = cur_gpdpa;
4652                 next_gpdpa = cur_gpdpa;
4653             }
4654             if (layer_prop->interface_version > 1 && cur_gipa != NULL) {
4655                 layer_prop->functions.get_instance_proc_addr = cur_gipa;
4656             }
4657             if (layer_prop->interface_version > 1 && cur_gdpa != NULL) {
4658                 layer_prop->functions.get_device_proc_addr = cur_gdpa;
4659             }
4660 
4661             chain_info.u.pLayerInfo = &layer_instance_link_info[num_activated_layers];
4662 
4663             activated_layers[num_activated_layers].name = layer_prop->info.layerName;
4664             activated_layers[num_activated_layers].manifest = layer_prop->manifest_file_name;
4665             activated_layers[num_activated_layers].library = layer_prop->lib_name;
4666             activated_layers[num_activated_layers].is_implicit = !(layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER);
4667             if (activated_layers[num_activated_layers].is_implicit) {
4668                 activated_layers[num_activated_layers].disable_env = layer_prop->disable_env_var.name;
4669             }
4670 
4671             loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Insert instance layer \"%s\" (%s)",
4672                        layer_prop->info.layerName, layer_prop->lib_name);
4673 
4674             num_activated_layers++;
4675         }
4676     }
4677 
4678     // Make sure each layer requested by the application was actually loaded
4679     for (uint32_t exp = 0; exp < inst->expanded_activated_layer_list.count; ++exp) {
4680         struct loader_layer_properties *exp_layer_prop = inst->expanded_activated_layer_list.list[exp];
4681         bool found = false;
4682         for (uint32_t act = 0; act < num_activated_layers; ++act) {
4683             if (!strcmp(activated_layers[act].name, exp_layer_prop->info.layerName)) {
4684                 found = true;
4685                 break;
4686             }
4687         }
4688         // If it wasn't found, we want to at least log an error.  However, if it was enabled by the application directly,
4689         // we want to return a bad layer error.
4690         if (!found) {
4691             bool app_requested = false;
4692             for (uint32_t act = 0; act < pCreateInfo->enabledLayerCount; ++act) {
4693                 if (!strcmp(pCreateInfo->ppEnabledLayerNames[act], exp_layer_prop->info.layerName)) {
4694                     app_requested = true;
4695                     break;
4696                 }
4697             }
4698             VkFlags log_flag = VULKAN_LOADER_LAYER_BIT;
4699             char ending = '.';
4700             if (app_requested) {
4701                 log_flag |= VULKAN_LOADER_ERROR_BIT;
4702                 ending = '!';
4703             } else {
4704                 log_flag |= VULKAN_LOADER_INFO_BIT;
4705             }
4706             switch (exp_layer_prop->lib_status) {
4707                 case LOADER_LAYER_LIB_NOT_LOADED:
4708                     loader_log(inst, log_flag, 0, "Requested layer \"%s\" was not loaded%c", exp_layer_prop->info.layerName,
4709                                ending);
4710                     break;
4711                 case LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE: {
4712                     loader_log(inst, log_flag, 0, "Requested layer \"%s\" was wrong bit-type%c", exp_layer_prop->info.layerName,
4713                                ending);
4714                     break;
4715                 }
4716                 case LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD:
4717                     loader_log(inst, log_flag, 0, "Requested layer \"%s\" failed to load%c", exp_layer_prop->info.layerName,
4718                                ending);
4719                     break;
4720                 case LOADER_LAYER_LIB_SUCCESS_LOADED:
4721                 case LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY:
4722                     // Shouldn't be able to reach this but if it is, best to report a debug
4723                     loader_log(inst, log_flag, 0,
4724                                "Shouldn't reach this. A valid version of requested layer %s was loaded but was not found in the "
4725                                "list of activated layers%c",
4726                                exp_layer_prop->info.layerName, ending);
4727                     break;
4728             }
4729             if (app_requested) {
4730                 return VK_ERROR_LAYER_NOT_PRESENT;
4731             }
4732         }
4733     }
4734 
4735     VkLoaderFeatureFlags feature_flags = 0;
4736 #if defined(_WIN32)
4737     feature_flags = windows_initialize_dxgi();
4738 #endif
4739 
4740     PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)next_gipa(*created_instance, "vkCreateInstance");
4741     if (fpCreateInstance) {
4742         VkLayerInstanceCreateInfo instance_dispatch;
4743         instance_dispatch.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
4744         instance_dispatch.pNext = loader_create_info.pNext;
4745         instance_dispatch.function = VK_LOADER_DATA_CALLBACK;
4746         instance_dispatch.u.pfnSetInstanceLoaderData = vkSetInstanceDispatch;
4747 
4748         VkLayerInstanceCreateInfo device_callback;
4749         device_callback.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
4750         device_callback.pNext = &instance_dispatch;
4751         device_callback.function = VK_LOADER_LAYER_CREATE_DEVICE_CALLBACK;
4752         device_callback.u.layerDevice.pfnLayerCreateDevice = loader_layer_create_device;
4753         device_callback.u.layerDevice.pfnLayerDestroyDevice = loader_layer_destroy_device;
4754 
4755         VkLayerInstanceCreateInfo loader_features;
4756         loader_features.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
4757         loader_features.pNext = &device_callback;
4758         loader_features.function = VK_LOADER_FEATURES;
4759         loader_features.u.loaderFeatures = feature_flags;
4760 
4761         loader_create_info.pNext = &loader_features;
4762 
4763         // If layer debugging is enabled, let's print out the full callstack with layers in their
4764         // defined order.
4765         loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "vkCreateInstance layer callstack setup to:");
4766         loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "   <Application>");
4767         loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "     ||");
4768         loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "   <Loader>");
4769         loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "     ||");
4770         for (uint32_t cur_layer = 0; cur_layer < num_activated_layers; ++cur_layer) {
4771             uint32_t index = num_activated_layers - cur_layer - 1;
4772             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "   %s", activated_layers[index].name);
4773             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "           Type: %s",
4774                        activated_layers[index].is_implicit ? "Implicit" : "Explicit");
4775             if (activated_layers[index].is_implicit) {
4776                 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "               Disable Env Var:  %s",
4777                            activated_layers[index].disable_env);
4778             }
4779             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "           Manifest: %s", activated_layers[index].manifest);
4780             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "           Library:  %s", activated_layers[index].library);
4781             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "     ||");
4782         }
4783         loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "   <Drivers>");
4784 
4785         res = fpCreateInstance(&loader_create_info, pAllocator, created_instance);
4786     } else {
4787         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_create_instance_chain: Failed to find \'vkCreateInstance\'");
4788         // Couldn't find CreateInstance function!
4789         res = VK_ERROR_INITIALIZATION_FAILED;
4790     }
4791 
4792     if (res == VK_SUCCESS) {
4793         // Copy the current disp table into the terminator_dispatch table so we can use it in loader_gpa_instance_terminator()
4794         memcpy(&inst->terminator_dispatch, &inst->disp->layer_inst_disp, sizeof(VkLayerInstanceDispatchTable));
4795 
4796         loader_init_instance_core_dispatch_table(&inst->disp->layer_inst_disp, next_gipa, *created_instance);
4797         inst->instance = *created_instance;
4798 
4799         if (pCreateInfo->enabledLayerCount > 0 && pCreateInfo->ppEnabledLayerNames != NULL) {
4800             res = create_string_list(inst, pCreateInfo->enabledLayerCount, &inst->enabled_layer_names);
4801             if (res != VK_SUCCESS) {
4802                 return res;
4803             }
4804 
4805             for (uint32_t i = 0; i < pCreateInfo->enabledLayerCount; ++i) {
4806                 res = copy_str_to_string_list(inst, &inst->enabled_layer_names, pCreateInfo->ppEnabledLayerNames[i],
4807                                               strlen(pCreateInfo->ppEnabledLayerNames[i]));
4808                 if (res != VK_SUCCESS) return res;
4809             }
4810         }
4811     }
4812 
4813     return res;
4814 }
4815 
loader_activate_instance_layer_extensions(struct loader_instance * inst,VkInstance created_inst)4816 void loader_activate_instance_layer_extensions(struct loader_instance *inst, VkInstance created_inst) {
4817     loader_init_instance_extension_dispatch_table(&inst->disp->layer_inst_disp, inst->disp->layer_inst_disp.GetInstanceProcAddr,
4818                                                   created_inst);
4819 }
4820 
4821 #if defined(__APPLE__)
loader_create_device_chain(const VkPhysicalDevice pd,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,const struct loader_instance * inst,struct loader_device * dev,PFN_vkGetInstanceProcAddr callingLayer,PFN_vkGetDeviceProcAddr * layerNextGDPA)4822 VkResult loader_create_device_chain(const VkPhysicalDevice pd, const VkDeviceCreateInfo *pCreateInfo,
4823                                     const VkAllocationCallbacks *pAllocator, const struct loader_instance *inst,
4824                                     struct loader_device *dev, PFN_vkGetInstanceProcAddr callingLayer,
4825                                     PFN_vkGetDeviceProcAddr *layerNextGDPA) __attribute__((optnone)) {
4826 #else
4827 VkResult loader_create_device_chain(const VkPhysicalDevice pd, const VkDeviceCreateInfo *pCreateInfo,
4828                                     const VkAllocationCallbacks *pAllocator, const struct loader_instance *inst,
4829                                     struct loader_device *dev, PFN_vkGetInstanceProcAddr callingLayer,
4830                                     PFN_vkGetDeviceProcAddr *layerNextGDPA) {
4831 #endif
4832     uint32_t num_activated_layers = 0;
4833     struct activated_layer_info *activated_layers = NULL;
4834     VkLayerDeviceLink *layer_device_link_info;
4835     VkLayerDeviceCreateInfo chain_info;
4836     VkDeviceCreateInfo loader_create_info;
4837     VkDeviceGroupDeviceCreateInfo *original_device_group_create_info_struct = NULL;
4838     VkResult res;
4839 
4840     PFN_vkGetDeviceProcAddr fpGDPA = NULL, nextGDPA = loader_gpa_device_terminator;
4841     PFN_vkGetInstanceProcAddr fpGIPA = NULL, nextGIPA = loader_gpa_instance_terminator;
4842 
4843     memcpy(&loader_create_info, pCreateInfo, sizeof(VkDeviceCreateInfo));
4844 
4845     if (loader_create_info.enabledLayerCount > 0 && loader_create_info.ppEnabledLayerNames != NULL) {
4846         bool invalid_device_layer_usage = false;
4847 
4848         if (loader_create_info.enabledLayerCount != inst->enabled_layer_names.count && loader_create_info.enabledLayerCount > 0) {
4849             invalid_device_layer_usage = true;
4850         } else if (loader_create_info.enabledLayerCount > 0 && loader_create_info.ppEnabledLayerNames == NULL) {
4851             invalid_device_layer_usage = true;
4852         } else if (loader_create_info.enabledLayerCount == 0 && loader_create_info.ppEnabledLayerNames != NULL) {
4853             invalid_device_layer_usage = true;
4854         } else if (inst->enabled_layer_names.list != NULL) {
4855             for (uint32_t i = 0; i < loader_create_info.enabledLayerCount; i++) {
4856                 const char *device_layer_names = loader_create_info.ppEnabledLayerNames[i];
4857 
4858                 if (strcmp(device_layer_names, inst->enabled_layer_names.list[i]) != 0) {
4859                     invalid_device_layer_usage = true;
4860                     break;
4861                 }
4862             }
4863         }
4864 
4865         if (invalid_device_layer_usage) {
4866             loader_log(
4867                 inst, VULKAN_LOADER_WARN_BIT, 0,
4868                 "loader_create_device_chain: Using deprecated and ignored 'ppEnabledLayerNames' member of 'VkDeviceCreateInfo' "
4869                 "when creating a Vulkan device.");
4870         }
4871     }
4872 
4873     // Before we continue, we need to find out if the KHR_device_group extension is in the enabled list.  If it is, we then
4874     // need to look for the corresponding VkDeviceGroupDeviceCreateInfo struct in the device list.  This is because we
4875     // need to replace all the incoming physical device values (which are really loader trampoline physical device values)
4876     // with the layer/ICD version.
4877     {
4878         VkBaseOutStructure *pNext = (VkBaseOutStructure *)loader_create_info.pNext;
4879         VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&loader_create_info;
4880         while (NULL != pNext) {
4881             if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) {
4882                 VkDeviceGroupDeviceCreateInfo *cur_struct = (VkDeviceGroupDeviceCreateInfo *)pNext;
4883                 if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) {
4884                     VkDeviceGroupDeviceCreateInfo *temp_struct = loader_stack_alloc(sizeof(VkDeviceGroupDeviceCreateInfo));
4885                     VkPhysicalDevice *phys_dev_array = NULL;
4886                     if (NULL == temp_struct) {
4887                         return VK_ERROR_OUT_OF_HOST_MEMORY;
4888                     }
4889                     memcpy(temp_struct, cur_struct, sizeof(VkDeviceGroupDeviceCreateInfo));
4890                     phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * cur_struct->physicalDeviceCount);
4891                     if (NULL == phys_dev_array) {
4892                         return VK_ERROR_OUT_OF_HOST_MEMORY;
4893                     }
4894 
4895                     // Before calling down, replace the incoming physical device values (which are really loader trampoline
4896                     // physical devices) with the next layer (or possibly even the terminator) physical device values.
4897                     struct loader_physical_device_tramp *cur_tramp;
4898                     for (uint32_t phys_dev = 0; phys_dev < cur_struct->physicalDeviceCount; phys_dev++) {
4899                         cur_tramp = (struct loader_physical_device_tramp *)cur_struct->pPhysicalDevices[phys_dev];
4900                         phys_dev_array[phys_dev] = cur_tramp->phys_dev;
4901                     }
4902                     temp_struct->pPhysicalDevices = phys_dev_array;
4903 
4904                     original_device_group_create_info_struct = (VkDeviceGroupDeviceCreateInfo *)pPrev->pNext;
4905 
4906                     // Replace the old struct in the pNext chain with this one.
4907                     pPrev->pNext = (VkBaseOutStructure *)temp_struct;
4908                 }
4909                 break;
4910             }
4911 
4912             pPrev = pNext;
4913             pNext = pNext->pNext;
4914         }
4915     }
4916     if (inst->expanded_activated_layer_list.count > 0) {
4917         layer_device_link_info = loader_stack_alloc(sizeof(VkLayerDeviceLink) * inst->expanded_activated_layer_list.count);
4918         if (!layer_device_link_info) {
4919             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4920                        "loader_create_device_chain: Failed to alloc Device objects for layer. Skipping Layer.");
4921             return VK_ERROR_OUT_OF_HOST_MEMORY;
4922         }
4923 
4924         activated_layers = loader_stack_alloc(sizeof(struct activated_layer_info) * inst->expanded_activated_layer_list.count);
4925         if (!activated_layers) {
4926             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4927                        "loader_create_device_chain: Failed to alloc activated layer storage array");
4928             return VK_ERROR_OUT_OF_HOST_MEMORY;
4929         }
4930 
4931         chain_info.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO;
4932         chain_info.function = VK_LAYER_LINK_INFO;
4933         chain_info.u.pLayerInfo = NULL;
4934         chain_info.pNext = loader_create_info.pNext;
4935         loader_create_info.pNext = &chain_info;
4936 
4937         // Create instance chain of enabled layers
4938         for (int32_t i = inst->expanded_activated_layer_list.count - 1; i >= 0; i--) {
4939             struct loader_layer_properties *layer_prop = inst->expanded_activated_layer_list.list[i];
4940             loader_platform_dl_handle lib_handle = layer_prop->lib_handle;
4941 
4942             // Skip it if a Layer with the same name has been already successfully activated
4943             if (loader_names_array_has_layer_property(&layer_prop->info, num_activated_layers, activated_layers)) {
4944                 continue;
4945             }
4946 
4947             // Skip the layer if the handle is NULL - this is likely because the library failed to load but wasn't removed from
4948             // the list.
4949             if (!lib_handle) {
4950                 continue;
4951             }
4952 
4953             // The Get*ProcAddr pointers will already be filled in if they were received from either the json file or the
4954             // version negotiation
4955             if ((fpGIPA = layer_prop->functions.get_instance_proc_addr) == NULL) {
4956                 if (layer_prop->functions.str_gipa == NULL || strlen(layer_prop->functions.str_gipa) == 0) {
4957                     fpGIPA = (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr");
4958                     layer_prop->functions.get_instance_proc_addr = fpGIPA;
4959                 } else
4960                     fpGIPA =
4961                         (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gipa);
4962                 if (!fpGIPA) {
4963                     loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4964                                "loader_create_device_chain: Failed to find \'vkGetInstanceProcAddr\' in layer \"%s\".  "
4965                                "Skipping layer.",
4966                                layer_prop->lib_name);
4967                     continue;
4968                 }
4969             }
4970 
4971             if (fpGIPA == callingLayer) {
4972                 if (layerNextGDPA != NULL) {
4973                     *layerNextGDPA = nextGDPA;
4974                 }
4975                 // Break here because if fpGIPA is the same as callingLayer, that means a layer is trying to create a device,
4976                 // and once we don't want to continue any further as the next layer will be the calling layer
4977                 break;
4978             }
4979 
4980             if ((fpGDPA = layer_prop->functions.get_device_proc_addr) == NULL) {
4981                 if (layer_prop->functions.str_gdpa == NULL || strlen(layer_prop->functions.str_gdpa) == 0) {
4982                     fpGDPA = (PFN_vkGetDeviceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetDeviceProcAddr");
4983                     layer_prop->functions.get_device_proc_addr = fpGDPA;
4984                 } else
4985                     fpGDPA = (PFN_vkGetDeviceProcAddr)loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gdpa);
4986                 if (!fpGDPA) {
4987                     loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4988                                "Failed to find vkGetDeviceProcAddr in layer \"%s\"", layer_prop->lib_name);
4989                     continue;
4990                 }
4991             }
4992 
4993             layer_device_link_info[num_activated_layers].pNext = chain_info.u.pLayerInfo;
4994             layer_device_link_info[num_activated_layers].pfnNextGetInstanceProcAddr = nextGIPA;
4995             layer_device_link_info[num_activated_layers].pfnNextGetDeviceProcAddr = nextGDPA;
4996             chain_info.u.pLayerInfo = &layer_device_link_info[num_activated_layers];
4997             nextGIPA = fpGIPA;
4998             nextGDPA = fpGDPA;
4999 
5000             activated_layers[num_activated_layers].name = layer_prop->info.layerName;
5001             activated_layers[num_activated_layers].manifest = layer_prop->manifest_file_name;
5002             activated_layers[num_activated_layers].library = layer_prop->lib_name;
5003             activated_layers[num_activated_layers].is_implicit = !(layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER);
5004             if (activated_layers[num_activated_layers].is_implicit) {
5005                 activated_layers[num_activated_layers].disable_env = layer_prop->disable_env_var.name;
5006             }
5007 
5008             loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Inserted device layer \"%s\" (%s)",
5009                        layer_prop->info.layerName, layer_prop->lib_name);
5010 
5011             num_activated_layers++;
5012         }
5013     }
5014 
5015     VkDevice created_device = (VkDevice)dev;
5016     PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)nextGIPA(inst->instance, "vkCreateDevice");
5017     if (fpCreateDevice) {
5018         VkLayerDeviceCreateInfo create_info_disp;
5019 
5020         create_info_disp.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO;
5021         create_info_disp.function = VK_LOADER_DATA_CALLBACK;
5022 
5023         create_info_disp.u.pfnSetDeviceLoaderData = vkSetDeviceDispatch;
5024 
5025         // If layer debugging is enabled, let's print out the full callstack with layers in their
5026         // defined order.
5027         uint32_t layer_driver_bits = VULKAN_LOADER_LAYER_BIT | VULKAN_LOADER_DRIVER_BIT;
5028         loader_log(inst, layer_driver_bits, 0, "vkCreateDevice layer callstack setup to:");
5029         loader_log(inst, layer_driver_bits, 0, "   <Application>");
5030         loader_log(inst, layer_driver_bits, 0, "     ||");
5031         loader_log(inst, layer_driver_bits, 0, "   <Loader>");
5032         loader_log(inst, layer_driver_bits, 0, "     ||");
5033         for (uint32_t cur_layer = 0; cur_layer < num_activated_layers; ++cur_layer) {
5034             uint32_t index = num_activated_layers - cur_layer - 1;
5035             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "   %s", activated_layers[index].name);
5036             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "           Type: %s",
5037                        activated_layers[index].is_implicit ? "Implicit" : "Explicit");
5038             if (activated_layers[index].is_implicit) {
5039                 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "               Disable Env Var:  %s",
5040                            activated_layers[index].disable_env);
5041             }
5042             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "           Manifest: %s", activated_layers[index].manifest);
5043             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "           Library:  %s", activated_layers[index].library);
5044             loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "     ||");
5045         }
5046         loader_log(inst, layer_driver_bits, 0, "   <Device>");
5047         create_info_disp.pNext = loader_create_info.pNext;
5048         loader_create_info.pNext = &create_info_disp;
5049         res = fpCreateDevice(pd, &loader_create_info, pAllocator, &created_device);
5050         if (res != VK_SUCCESS) {
5051             return res;
5052         }
5053         dev->chain_device = created_device;
5054 
5055         // Because we changed the pNext chain to use our own VkDeviceGroupDeviceCreateInfo, we need to fixup the chain to
5056         // point back at the original VkDeviceGroupDeviceCreateInfo.
5057         VkBaseOutStructure *pNext = (VkBaseOutStructure *)loader_create_info.pNext;
5058         VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&loader_create_info;
5059         while (NULL != pNext) {
5060             if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) {
5061                 VkDeviceGroupDeviceCreateInfo *cur_struct = (VkDeviceGroupDeviceCreateInfo *)pNext;
5062                 if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) {
5063                     pPrev->pNext = (VkBaseOutStructure *)original_device_group_create_info_struct;
5064                 }
5065                 break;
5066             }
5067 
5068             pPrev = pNext;
5069             pNext = pNext->pNext;
5070         }
5071 
5072     } else {
5073         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5074                    "loader_create_device_chain: Failed to find \'vkCreateDevice\' in layers or ICD");
5075         // Couldn't find CreateDevice function!
5076         return VK_ERROR_INITIALIZATION_FAILED;
5077     }
5078 
5079     // Initialize device dispatch table
5080     loader_init_device_dispatch_table(&dev->loader_dispatch, nextGDPA, dev->chain_device);
5081     // Initialize the dispatch table to functions which need terminators
5082     // These functions point directly to the driver, not the terminator functions
5083     init_extension_device_proc_terminator_dispatch(dev);
5084 
5085     return res;
5086 }
5087 
5088 VkResult loader_validate_layers(const struct loader_instance *inst, const uint32_t layer_count,
5089                                 const char *const *ppEnabledLayerNames, const struct loader_layer_list *list) {
5090     struct loader_layer_properties *prop;
5091 
5092     if (layer_count > 0 && ppEnabledLayerNames == NULL) {
5093         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5094                    "loader_validate_layers: ppEnabledLayerNames is NULL but enabledLayerCount is greater than zero");
5095         return VK_ERROR_LAYER_NOT_PRESENT;
5096     }
5097 
5098     for (uint32_t i = 0; i < layer_count; i++) {
5099         VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, ppEnabledLayerNames[i]);
5100         if (result != VK_STRING_ERROR_NONE) {
5101             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5102                        "loader_validate_layers: ppEnabledLayerNames contains string that is too long or is badly formed");
5103             return VK_ERROR_LAYER_NOT_PRESENT;
5104         }
5105 
5106         prop = loader_find_layer_property(ppEnabledLayerNames[i], list);
5107         if (NULL == prop) {
5108             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5109                        "loader_validate_layers: Layer %d does not exist in the list of available layers", i);
5110             return VK_ERROR_LAYER_NOT_PRESENT;
5111         }
5112         if (inst->settings.settings_active && prop->settings_control_value != LOADER_SETTINGS_LAYER_CONTROL_ON &&
5113             prop->settings_control_value != LOADER_SETTINGS_LAYER_CONTROL_DEFAULT) {
5114             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5115                        "loader_validate_layers: Layer %d was explicitly prevented from being enabled by the loader settings file",
5116                        i);
5117             return VK_ERROR_LAYER_NOT_PRESENT;
5118         }
5119     }
5120     return VK_SUCCESS;
5121 }
5122 
5123 VkResult loader_validate_instance_extensions(struct loader_instance *inst, const struct loader_extension_list *icd_exts,
5124                                              const struct loader_layer_list *instance_layers,
5125                                              const struct loader_envvar_all_filters *layer_filters,
5126                                              const VkInstanceCreateInfo *pCreateInfo) {
5127     VkExtensionProperties *extension_prop;
5128     char *env_value;
5129     bool check_if_known = true;
5130     VkResult res = VK_SUCCESS;
5131 
5132     struct loader_pointer_layer_list active_layers = {0};
5133     struct loader_pointer_layer_list expanded_layers = {0};
5134 
5135     if (pCreateInfo->enabledExtensionCount > 0 && pCreateInfo->ppEnabledExtensionNames == NULL) {
5136         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5137                    "loader_validate_instance_extensions: Instance ppEnabledExtensionNames is NULL but enabledExtensionCount is "
5138                    "greater than zero");
5139         return VK_ERROR_EXTENSION_NOT_PRESENT;
5140     }
5141     if (!loader_init_pointer_layer_list(inst, &active_layers)) {
5142         res = VK_ERROR_OUT_OF_HOST_MEMORY;
5143         goto out;
5144     }
5145     if (!loader_init_pointer_layer_list(inst, &expanded_layers)) {
5146         res = VK_ERROR_OUT_OF_HOST_MEMORY;
5147         goto out;
5148     }
5149 
5150     if (inst->settings.settings_active) {
5151         res = enable_correct_layers_from_settings(inst, layer_filters, pCreateInfo->enabledLayerCount,
5152                                                   pCreateInfo->ppEnabledLayerNames, instance_layers, &active_layers,
5153                                                   &expanded_layers);
5154         if (res != VK_SUCCESS) {
5155             goto out;
5156         }
5157     } else {
5158         // Build the lists of active layers (including meta layers) and expanded layers (with meta layers resolved to their
5159         // components)
5160         res = loader_add_implicit_layers(inst, layer_filters, &active_layers, &expanded_layers, instance_layers);
5161         if (res != VK_SUCCESS) {
5162             goto out;
5163         }
5164         res = loader_add_environment_layers(inst, VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER, layer_filters, &active_layers,
5165                                             &expanded_layers, instance_layers);
5166         if (res != VK_SUCCESS) {
5167             goto out;
5168         }
5169         res = loader_add_layer_names_to_list(inst, layer_filters, &active_layers, &expanded_layers, pCreateInfo->enabledLayerCount,
5170                                              pCreateInfo->ppEnabledLayerNames, instance_layers);
5171         if (VK_SUCCESS != res) {
5172             goto out;
5173         }
5174     }
5175     for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
5176         VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]);
5177         if (result != VK_STRING_ERROR_NONE) {
5178             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5179                        "loader_validate_instance_extensions: Instance ppEnabledExtensionNames contains "
5180                        "string that is too long or is badly formed");
5181             res = VK_ERROR_EXTENSION_NOT_PRESENT;
5182             goto out;
5183         }
5184 
5185         // Check if a user wants to disable the instance extension filtering behavior
5186         env_value = loader_getenv("VK_LOADER_DISABLE_INST_EXT_FILTER", inst);
5187         if (NULL != env_value && atoi(env_value) != 0) {
5188             check_if_known = false;
5189         }
5190         loader_free_getenv(env_value, inst);
5191 
5192         if (check_if_known) {
5193             // See if the extension is in the list of supported extensions
5194             bool found = false;
5195             for (uint32_t j = 0; LOADER_INSTANCE_EXTENSIONS[j] != NULL; j++) {
5196                 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], LOADER_INSTANCE_EXTENSIONS[j]) == 0) {
5197                     found = true;
5198                     break;
5199                 }
5200             }
5201 
5202             // If it isn't in the list, return an error
5203             if (!found) {
5204                 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5205                            "loader_validate_instance_extensions: Extension %s not found in list of known instance extensions.",
5206                            pCreateInfo->ppEnabledExtensionNames[i]);
5207                 res = VK_ERROR_EXTENSION_NOT_PRESENT;
5208                 goto out;
5209             }
5210         }
5211 
5212         extension_prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], icd_exts);
5213 
5214         if (extension_prop) {
5215             continue;
5216         }
5217 
5218         extension_prop = NULL;
5219 
5220         // Not in global list, search layer extension lists
5221         for (uint32_t j = 0; NULL == extension_prop && j < expanded_layers.count; ++j) {
5222             extension_prop =
5223                 get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], &expanded_layers.list[j]->instance_extension_list);
5224             if (extension_prop) {
5225                 // Found the extension in one of the layers enabled by the app.
5226                 break;
5227             }
5228 
5229             struct loader_layer_properties *layer_prop =
5230                 loader_find_layer_property(expanded_layers.list[j]->info.layerName, instance_layers);
5231             if (NULL == layer_prop) {
5232                 // Should NOT get here, loader_validate_layers should have already filtered this case out.
5233                 continue;
5234             }
5235         }
5236 
5237         if (!extension_prop) {
5238             // Didn't find extension name in any of the global layers, error out
5239             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5240                        "loader_validate_instance_extensions: Instance extension %s not supported by available ICDs or enabled "
5241                        "layers.",
5242                        pCreateInfo->ppEnabledExtensionNames[i]);
5243             res = VK_ERROR_EXTENSION_NOT_PRESENT;
5244             goto out;
5245         }
5246     }
5247 
5248 out:
5249     loader_destroy_pointer_layer_list(inst, &active_layers);
5250     loader_destroy_pointer_layer_list(inst, &expanded_layers);
5251     return res;
5252 }
5253 
5254 VkResult loader_validate_device_extensions(struct loader_instance *this_instance,
5255                                            const struct loader_pointer_layer_list *activated_device_layers,
5256                                            const struct loader_extension_list *icd_exts, const VkDeviceCreateInfo *pCreateInfo) {
5257     for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
5258         VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]);
5259         if (result != VK_STRING_ERROR_NONE) {
5260             loader_log(this_instance, VULKAN_LOADER_ERROR_BIT, 0,
5261                        "loader_validate_device_extensions: Device ppEnabledExtensionNames contains "
5262                        "string that is too long or is badly formed");
5263             return VK_ERROR_EXTENSION_NOT_PRESENT;
5264         }
5265 
5266         const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i];
5267         VkExtensionProperties *extension_prop = get_extension_property(extension_name, icd_exts);
5268 
5269         if (extension_prop) {
5270             continue;
5271         }
5272 
5273         // Not in global list, search activated layer extension lists
5274         for (uint32_t j = 0; j < activated_device_layers->count; j++) {
5275             struct loader_layer_properties *layer_prop = activated_device_layers->list[j];
5276 
5277             extension_prop = get_dev_extension_property(extension_name, &layer_prop->device_extension_list);
5278             if (extension_prop) {
5279                 // Found the extension in one of the layers enabled by the app.
5280                 break;
5281             }
5282         }
5283 
5284         if (!extension_prop) {
5285             // Didn't find extension name in any of the device layers, error out
5286             loader_log(this_instance, VULKAN_LOADER_ERROR_BIT, 0,
5287                        "loader_validate_device_extensions: Device extension %s not supported by selected physical device "
5288                        "or enabled layers.",
5289                        pCreateInfo->ppEnabledExtensionNames[i]);
5290             return VK_ERROR_EXTENSION_NOT_PRESENT;
5291         }
5292     }
5293     return VK_SUCCESS;
5294 }
5295 
5296 // Terminator functions for the Instance chain
5297 // All named terminator_<Vulkan API name>
5298 VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
5299                                                          const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
5300     struct loader_icd_term *icd_term;
5301     VkExtensionProperties *prop;
5302     char **filtered_extension_names = NULL;
5303     VkInstanceCreateInfo icd_create_info;
5304     VkResult res = VK_SUCCESS;
5305     bool one_icd_successful = false;
5306 
5307     struct loader_instance *ptr_instance = (struct loader_instance *)*pInstance;
5308     if (NULL == ptr_instance) {
5309         loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5310                    "terminator_CreateInstance: Loader instance pointer null encountered.  Possibly set by active layer. (Policy "
5311                    "#LLP_LAYER_21)");
5312     } else if (LOADER_MAGIC_NUMBER != ptr_instance->magic) {
5313         loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5314                    "terminator_CreateInstance: Instance pointer (%p) has invalid MAGIC value 0x%08lx. Instance value possibly "
5315                    "corrupted by active layer (Policy #LLP_LAYER_21).  ",
5316                    ptr_instance, ptr_instance->magic);
5317     }
5318 
5319     // Save the application version if it has been modified - layers sometimes needs features in newer API versions than
5320     // what the application requested, and thus will increase the instance version to a level that suites their needs.
5321     if (pCreateInfo->pApplicationInfo && pCreateInfo->pApplicationInfo->apiVersion) {
5322         loader_api_version altered_version = loader_make_version(pCreateInfo->pApplicationInfo->apiVersion);
5323         if (altered_version.major != ptr_instance->app_api_version.major ||
5324             altered_version.minor != ptr_instance->app_api_version.minor) {
5325             ptr_instance->app_api_version = altered_version;
5326         }
5327     }
5328 
5329     memcpy(&icd_create_info, pCreateInfo, sizeof(icd_create_info));
5330 
5331     icd_create_info.enabledLayerCount = 0;
5332     icd_create_info.ppEnabledLayerNames = NULL;
5333 
5334     // NOTE: Need to filter the extensions to only those supported by the ICD.
5335     //       No ICD will advertise support for layers. An ICD library could
5336     //       support a layer, but it would be independent of the actual ICD,
5337     //       just in the same library.
5338     uint32_t extension_count = pCreateInfo->enabledExtensionCount;
5339 #if defined(LOADER_ENABLE_LINUX_SORT)
5340     extension_count += 1;
5341 #endif  // LOADER_ENABLE_LINUX_SORT
5342     filtered_extension_names = loader_stack_alloc(extension_count * sizeof(char *));
5343     if (!filtered_extension_names) {
5344         loader_log(ptr_instance, VULKAN_LOADER_ERROR_BIT, 0,
5345                    "terminator_CreateInstance: Failed create extension name array for %d extensions", extension_count);
5346         res = VK_ERROR_OUT_OF_HOST_MEMORY;
5347         goto out;
5348     }
5349     icd_create_info.ppEnabledExtensionNames = (const char *const *)filtered_extension_names;
5350 
5351     // Determine if Get Physical Device Properties 2 is available to this Instance
5352     if (pCreateInfo->pApplicationInfo && pCreateInfo->pApplicationInfo->apiVersion >= VK_API_VERSION_1_1) {
5353         ptr_instance->supports_get_dev_prop_2 = true;
5354     } else {
5355         for (uint32_t j = 0; j < pCreateInfo->enabledExtensionCount; j++) {
5356             if (!strcmp(pCreateInfo->ppEnabledExtensionNames[j], VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
5357                 ptr_instance->supports_get_dev_prop_2 = true;
5358                 break;
5359             }
5360         }
5361     }
5362 
5363     for (uint32_t i = 0; i < ptr_instance->icd_tramp_list.count; i++) {
5364         icd_term = loader_icd_add(ptr_instance, &ptr_instance->icd_tramp_list.scanned_list[i]);
5365         if (NULL == icd_term) {
5366             loader_log(ptr_instance, VULKAN_LOADER_ERROR_BIT, 0,
5367                        "terminator_CreateInstance: Failed to add ICD %d to ICD trampoline list.", i);
5368             res = VK_ERROR_OUT_OF_HOST_MEMORY;
5369             goto out;
5370         }
5371 
5372         // If any error happens after here, we need to remove the ICD from the list,
5373         // because we've already added it, but haven't validated it
5374 
5375         // Make sure that we reset the pApplicationInfo so we don't get an old pointer
5376         icd_create_info.pApplicationInfo = pCreateInfo->pApplicationInfo;
5377         icd_create_info.enabledExtensionCount = 0;
5378         struct loader_extension_list icd_exts = {0};
5379 
5380         // traverse scanned icd list adding non-duplicate extensions to the list
5381         res = loader_init_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
5382         if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
5383             // If out of memory, bail immediately.
5384             goto out;
5385         } else if (VK_SUCCESS != res) {
5386             // Something bad happened with this ICD, so free it and try the
5387             // next.
5388             ptr_instance->icd_terms = icd_term->next;
5389             icd_term->next = NULL;
5390             loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5391             continue;
5392         }
5393 
5394         res = loader_add_instance_extensions(ptr_instance, icd_term->scanned_icd->EnumerateInstanceExtensionProperties,
5395                                              icd_term->scanned_icd->lib_name, &icd_exts);
5396         if (VK_SUCCESS != res) {
5397             loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts);
5398             if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
5399                 // If out of memory, bail immediately.
5400                 goto out;
5401             } else {
5402                 // Something bad happened with this ICD, so free it and try the next.
5403                 ptr_instance->icd_terms = icd_term->next;
5404                 icd_term->next = NULL;
5405                 loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5406                 continue;
5407             }
5408         }
5409 
5410         for (uint32_t j = 0; j < pCreateInfo->enabledExtensionCount; j++) {
5411             prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[j], &icd_exts);
5412             if (prop) {
5413                 filtered_extension_names[icd_create_info.enabledExtensionCount] = (char *)pCreateInfo->ppEnabledExtensionNames[j];
5414                 icd_create_info.enabledExtensionCount++;
5415             }
5416         }
5417 #if defined(LOADER_ENABLE_LINUX_SORT)
5418         // Force on "VK_KHR_get_physical_device_properties2" for Linux as we use it for GPU sorting.  This
5419         // should be done if the API version of either the application or the driver does not natively support
5420         // the core version of vkGetPhysicalDeviceProperties2 entrypoint.
5421         if ((ptr_instance->app_api_version.major == 1 && ptr_instance->app_api_version.minor == 0) ||
5422             (VK_API_VERSION_MAJOR(icd_term->scanned_icd->api_version) == 1 &&
5423              VK_API_VERSION_MINOR(icd_term->scanned_icd->api_version) == 0)) {
5424             prop = get_extension_property(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, &icd_exts);
5425             if (prop) {
5426                 filtered_extension_names[icd_create_info.enabledExtensionCount] =
5427                     (char *)VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME;
5428                 icd_create_info.enabledExtensionCount++;
5429 
5430                 // At least one ICD supports this, so the instance should be able to support it
5431                 ptr_instance->supports_get_dev_prop_2 = true;
5432             }
5433         }
5434 #endif  // LOADER_ENABLE_LINUX_SORT
5435 
5436         // Determine if vkGetPhysicalDeviceProperties2 is available to this Instance
5437         if (icd_term->scanned_icd->api_version >= VK_API_VERSION_1_1) {
5438             icd_term->supports_get_dev_prop_2 = true;
5439         } else {
5440             for (uint32_t j = 0; j < icd_create_info.enabledExtensionCount; j++) {
5441                 if (!strcmp(filtered_extension_names[j], VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
5442                     icd_term->supports_get_dev_prop_2 = true;
5443                     break;
5444                 }
5445             }
5446         }
5447 
5448         loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts);
5449 
5450         // Get the driver version from vkEnumerateInstanceVersion
5451         uint32_t icd_version = VK_API_VERSION_1_0;
5452         VkResult icd_result = VK_SUCCESS;
5453         if (icd_term->scanned_icd->api_version >= VK_API_VERSION_1_1) {
5454             PFN_vkEnumerateInstanceVersion icd_enumerate_instance_version =
5455                 (PFN_vkEnumerateInstanceVersion)icd_term->scanned_icd->GetInstanceProcAddr(NULL, "vkEnumerateInstanceVersion");
5456             if (icd_enumerate_instance_version != NULL) {
5457                 icd_result = icd_enumerate_instance_version(&icd_version);
5458                 if (icd_result != VK_SUCCESS) {
5459                     icd_version = VK_API_VERSION_1_0;
5460                     loader_log(ptr_instance, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5461                                "terminator_CreateInstance: ICD \"%s\" vkEnumerateInstanceVersion returned error. The ICD will be "
5462                                "treated as a 1.0 ICD",
5463                                icd_term->scanned_icd->lib_name);
5464                 } else if (VK_API_VERSION_MINOR(icd_version) == 0) {
5465                     loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5466                                "terminator_CreateInstance: Manifest ICD for \"%s\" contained a 1.1 or greater API version, but "
5467                                "vkEnumerateInstanceVersion returned 1.0, treating as a 1.0 ICD",
5468                                icd_term->scanned_icd->lib_name);
5469                 }
5470             } else {
5471                 loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5472                            "terminator_CreateInstance: Manifest ICD for \"%s\" contained a 1.1 or greater API version, but does "
5473                            "not support vkEnumerateInstanceVersion, treating as a 1.0 ICD",
5474                            icd_term->scanned_icd->lib_name);
5475             }
5476         }
5477 
5478         // Remove the portability enumeration flag bit if the ICD doesn't support the extension
5479         if ((pCreateInfo->flags & VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR) == 1) {
5480             bool supports_portability_enumeration = false;
5481             for (uint32_t j = 0; j < icd_create_info.enabledExtensionCount; j++) {
5482                 if (strcmp(filtered_extension_names[j], VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME) == 0) {
5483                     supports_portability_enumeration = true;
5484                     break;
5485                 }
5486             }
5487             // If the icd supports the extension, use the flags as given, otherwise remove the portability bit
5488             icd_create_info.flags = supports_portability_enumeration
5489                                         ? pCreateInfo->flags
5490                                         : pCreateInfo->flags & (~VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR);
5491         }
5492 
5493         // Create an instance, substituting the version to 1.0 if necessary
5494         VkApplicationInfo icd_app_info;
5495         const uint32_t api_variant = 0;
5496         const uint32_t api_version_1_0 = VK_API_VERSION_1_0;
5497         uint32_t icd_version_nopatch =
5498             VK_MAKE_API_VERSION(api_variant, VK_API_VERSION_MAJOR(icd_version), VK_API_VERSION_MINOR(icd_version), 0);
5499         uint32_t requested_version = (pCreateInfo == NULL || pCreateInfo->pApplicationInfo == NULL)
5500                                          ? api_version_1_0
5501                                          : pCreateInfo->pApplicationInfo->apiVersion;
5502         if ((requested_version != 0) && (icd_version_nopatch == api_version_1_0)) {
5503             if (icd_create_info.pApplicationInfo == NULL) {
5504                 memset(&icd_app_info, 0, sizeof(icd_app_info));
5505             } else {
5506                 memmove(&icd_app_info, icd_create_info.pApplicationInfo, sizeof(icd_app_info));
5507             }
5508             icd_app_info.apiVersion = icd_version;
5509             icd_create_info.pApplicationInfo = &icd_app_info;
5510         }
5511         icd_result =
5512             ptr_instance->icd_tramp_list.scanned_list[i].CreateInstance(&icd_create_info, pAllocator, &(icd_term->instance));
5513         if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_result) {
5514             // If out of memory, bail immediately.
5515             res = VK_ERROR_OUT_OF_HOST_MEMORY;
5516             goto out;
5517         } else if (VK_SUCCESS != icd_result) {
5518             loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5519                        "terminator_CreateInstance: Received return code %i from call to vkCreateInstance in ICD %s. Skipping "
5520                        "this driver.",
5521                        icd_result, icd_term->scanned_icd->lib_name);
5522             ptr_instance->icd_terms = icd_term->next;
5523             icd_term->next = NULL;
5524             loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5525             continue;
5526         }
5527 
5528         if (!loader_icd_init_entries(ptr_instance, icd_term)) {
5529             loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5530                        "terminator_CreateInstance: Failed to find required entrypoints in ICD %s. Skipping this driver.",
5531                        icd_term->scanned_icd->lib_name);
5532             ptr_instance->icd_terms = icd_term->next;
5533             icd_term->next = NULL;
5534             loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5535             continue;
5536         }
5537 
5538         if (ptr_instance->icd_tramp_list.scanned_list[i].interface_version < 3 &&
5539             (
5540 #if defined(VK_USE_PLATFORM_XLIB_KHR)
5541                 NULL != icd_term->dispatch.CreateXlibSurfaceKHR ||
5542 #endif  // VK_USE_PLATFORM_XLIB_KHR
5543 #if defined(VK_USE_PLATFORM_XCB_KHR)
5544                 NULL != icd_term->dispatch.CreateXcbSurfaceKHR ||
5545 #endif  // VK_USE_PLATFORM_XCB_KHR
5546 #if defined(VK_USE_PLATFORM_WAYLAND_KHR)
5547                 NULL != icd_term->dispatch.CreateWaylandSurfaceKHR ||
5548 #endif  // VK_USE_PLATFORM_WAYLAND_KHR
5549 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
5550                 NULL != icd_term->dispatch.CreateAndroidSurfaceKHR ||
5551 #endif  // VK_USE_PLATFORM_ANDROID_KHR
5552 #if defined(VK_USE_PLATFORM_OHOS)
5553                 NULL != icd_term->dispatch.CreateSurfaceOHOS ||
5554 #endif  // VK_USE_PLATFORM_OHOS
5555 #if defined(VK_USE_PLATFORM_WIN32_KHR)
5556                 NULL != icd_term->dispatch.CreateWin32SurfaceKHR ||
5557 #endif  // VK_USE_PLATFORM_WIN32_KHR
5558                 NULL != icd_term->dispatch.DestroySurfaceKHR)) {
5559             loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5560                        "terminator_CreateInstance: Driver %s supports interface version %u but still exposes VkSurfaceKHR"
5561                        " create/destroy entrypoints (Policy #LDP_DRIVER_8)",
5562                        ptr_instance->icd_tramp_list.scanned_list[i].lib_name,
5563                        ptr_instance->icd_tramp_list.scanned_list[i].interface_version);
5564         }
5565 
5566         // If we made it this far, at least one ICD was successful
5567         one_icd_successful = true;
5568     }
5569 
5570     // For vkGetPhysicalDeviceProperties2, at least one ICD needs to support the extension for the
5571     // instance to have it
5572     if (ptr_instance->supports_get_dev_prop_2) {
5573         bool at_least_one_supports = false;
5574         icd_term = ptr_instance->icd_terms;
5575         while (icd_term != NULL) {
5576             if (icd_term->supports_get_dev_prop_2) {
5577                 at_least_one_supports = true;
5578                 break;
5579             }
5580             icd_term = icd_term->next;
5581         }
5582         if (!at_least_one_supports) {
5583             ptr_instance->supports_get_dev_prop_2 = false;
5584         }
5585     }
5586 
5587     // If no ICDs were added to instance list and res is unchanged from it's initial value, the loader was unable to
5588     // find a suitable ICD.
5589     if (VK_SUCCESS == res && (ptr_instance->icd_terms == NULL || !one_icd_successful)) {
5590         loader_log(ptr_instance, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5591                    "terminator_CreateInstance: Found no drivers!");
5592         res = VK_ERROR_INCOMPATIBLE_DRIVER;
5593     }
5594 
5595 out:
5596 
5597     ptr_instance->create_terminator_invalid_extension = false;
5598 
5599     if (VK_SUCCESS != res) {
5600         if (VK_ERROR_EXTENSION_NOT_PRESENT == res) {
5601             ptr_instance->create_terminator_invalid_extension = true;
5602         }
5603 
5604         while (NULL != ptr_instance->icd_terms) {
5605             icd_term = ptr_instance->icd_terms;
5606             ptr_instance->icd_terms = icd_term->next;
5607             if (NULL != icd_term->instance) {
5608                 icd_term->dispatch.DestroyInstance(icd_term->instance, pAllocator);
5609             }
5610             loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5611         }
5612     } else {
5613         // Check for enabled extensions here to setup the loader structures so the loader knows what extensions
5614         // it needs to worry about.
5615         // We do it here and again above the layers in the trampoline function since the trampoline function
5616         // may think different extensions are enabled than what's down here.
5617         // This is why we don't clear inside of these function calls.
5618         // The clearing should actually be handled by the overall memset of the pInstance structure in the
5619         // trampoline.
5620         wsi_create_instance(ptr_instance, pCreateInfo);
5621         check_for_enabled_debug_extensions(ptr_instance, pCreateInfo);
5622         extensions_create_instance(ptr_instance, pCreateInfo);
5623     }
5624 
5625     return res;
5626 }
5627 
5628 VKAPI_ATTR void VKAPI_CALL terminator_DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
5629     struct loader_instance *ptr_instance = loader_get_instance(instance);
5630     if (NULL == ptr_instance) {
5631         return;
5632     }
5633     struct loader_icd_term *icd_terms = ptr_instance->icd_terms;
5634     struct loader_icd_term *next_icd_term;
5635 
5636     // Remove this instance from the list of instances:
5637     struct loader_instance *prev = NULL;
5638     loader_platform_thread_lock_mutex(&loader_global_instance_list_lock);
5639     struct loader_instance *next = loader.instances;
5640     while (next != NULL) {
5641         if (next == ptr_instance) {
5642             // Remove this instance from the list:
5643             if (prev)
5644                 prev->next = next->next;
5645             else
5646                 loader.instances = next->next;
5647             break;
5648         }
5649         prev = next;
5650         next = next->next;
5651     }
5652     loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock);
5653 
5654     while (NULL != icd_terms) {
5655         if (icd_terms->instance) {
5656             icd_terms->dispatch.DestroyInstance(icd_terms->instance, pAllocator);
5657         }
5658         next_icd_term = icd_terms->next;
5659         icd_terms->instance = VK_NULL_HANDLE;
5660         loader_icd_destroy(ptr_instance, icd_terms, pAllocator);
5661 
5662         icd_terms = next_icd_term;
5663     }
5664 
5665     loader_scanned_icd_clear(ptr_instance, &ptr_instance->icd_tramp_list);
5666     loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&ptr_instance->ext_list);
5667     if (NULL != ptr_instance->phys_devs_term) {
5668         for (uint32_t i = 0; i < ptr_instance->phys_dev_count_term; i++) {
5669             for (uint32_t j = i + 1; j < ptr_instance->phys_dev_count_term; j++) {
5670                 if (ptr_instance->phys_devs_term[i] == ptr_instance->phys_devs_term[j]) {
5671                     ptr_instance->phys_devs_term[j] = NULL;
5672                 }
5673             }
5674         }
5675         for (uint32_t i = 0; i < ptr_instance->phys_dev_count_term; i++) {
5676             loader_instance_heap_free(ptr_instance, ptr_instance->phys_devs_term[i]);
5677         }
5678         loader_instance_heap_free(ptr_instance, ptr_instance->phys_devs_term);
5679     }
5680     if (NULL != ptr_instance->phys_dev_groups_term) {
5681         for (uint32_t i = 0; i < ptr_instance->phys_dev_group_count_term; i++) {
5682             loader_instance_heap_free(ptr_instance, ptr_instance->phys_dev_groups_term[i]);
5683         }
5684         loader_instance_heap_free(ptr_instance, ptr_instance->phys_dev_groups_term);
5685     }
5686     loader_free_dev_ext_table(ptr_instance);
5687     loader_free_phys_dev_ext_table(ptr_instance);
5688 
5689     free_string_list(ptr_instance, &ptr_instance->enabled_layer_names);
5690 }
5691 
5692 VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo,
5693                                                        const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
5694     VkResult res = VK_SUCCESS;
5695     struct loader_physical_device_term *phys_dev_term;
5696     phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
5697     struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
5698 
5699     struct loader_device *dev = (struct loader_device *)*pDevice;
5700     PFN_vkCreateDevice fpCreateDevice = icd_term->dispatch.CreateDevice;
5701     struct loader_extension_list icd_exts;
5702 
5703     VkBaseOutStructure *caller_dgci_container = NULL;
5704     VkDeviceGroupDeviceCreateInfo *caller_dgci = NULL;
5705 
5706     if (NULL == dev) {
5707         loader_log(icd_term->this_instance, VULKAN_LOADER_WARN_BIT, 0,
5708                    "terminator_CreateDevice: Loader device pointer null encountered.  Possibly set by active layer. (Policy "
5709                    "#LLP_LAYER_22)");
5710     } else if (DEVICE_DISP_TABLE_MAGIC_NUMBER != dev->loader_dispatch.core_dispatch.magic) {
5711         loader_log(icd_term->this_instance, VULKAN_LOADER_WARN_BIT, 0,
5712                    "terminator_CreateDevice: Device pointer (%p) has invalid MAGIC value 0x%08lx. The expected value is "
5713                    "0x10ADED040410ADED. Device value possibly "
5714                    "corrupted by active layer (Policy #LLP_LAYER_22).  ",
5715                    dev, dev->loader_dispatch.core_dispatch.magic);
5716     }
5717 
5718     dev->phys_dev_term = phys_dev_term;
5719 
5720     icd_exts.list = NULL;
5721 
5722     if (fpCreateDevice == NULL) {
5723         loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5724                    "terminator_CreateDevice: No vkCreateDevice command exposed by ICD %s", icd_term->scanned_icd->lib_name);
5725         res = VK_ERROR_INITIALIZATION_FAILED;
5726         goto out;
5727     }
5728 
5729     VkDeviceCreateInfo localCreateInfo;
5730     memcpy(&localCreateInfo, pCreateInfo, sizeof(localCreateInfo));
5731 
5732     // NOTE: Need to filter the extensions to only those supported by the ICD.
5733     //       No ICD will advertise support for layers. An ICD library could support a layer,
5734     //       but it would be independent of the actual ICD, just in the same library.
5735     char **filtered_extension_names = NULL;
5736     if (0 < pCreateInfo->enabledExtensionCount) {
5737         filtered_extension_names = loader_stack_alloc(pCreateInfo->enabledExtensionCount * sizeof(char *));
5738         if (NULL == filtered_extension_names) {
5739             loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT, 0,
5740                        "terminator_CreateDevice: Failed to create extension name storage for %d extensions",
5741                        pCreateInfo->enabledExtensionCount);
5742             return VK_ERROR_OUT_OF_HOST_MEMORY;
5743         }
5744     }
5745 
5746     localCreateInfo.enabledLayerCount = 0;
5747     localCreateInfo.ppEnabledLayerNames = NULL;
5748 
5749     localCreateInfo.enabledExtensionCount = 0;
5750     localCreateInfo.ppEnabledExtensionNames = (const char *const *)filtered_extension_names;
5751 
5752     // Get the physical device (ICD) extensions
5753     res = loader_init_generic_list(icd_term->this_instance, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
5754     if (VK_SUCCESS != res) {
5755         goto out;
5756     }
5757 
5758     res = loader_add_device_extensions(icd_term->this_instance, icd_term->dispatch.EnumerateDeviceExtensionProperties,
5759                                        phys_dev_term->phys_dev, icd_term->scanned_icd->lib_name, &icd_exts);
5760     if (res != VK_SUCCESS) {
5761         goto out;
5762     }
5763 
5764     for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
5765         const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i];
5766         VkExtensionProperties *prop = get_extension_property(extension_name, &icd_exts);
5767         if (prop) {
5768             filtered_extension_names[localCreateInfo.enabledExtensionCount] = (char *)extension_name;
5769             localCreateInfo.enabledExtensionCount++;
5770         } else {
5771             loader_log(icd_term->this_instance, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5772                        "vkCreateDevice extension %s not available for devices associated with ICD %s", extension_name,
5773                        icd_term->scanned_icd->lib_name);
5774         }
5775     }
5776 
5777     // Before we continue, If KHX_device_group is the list of enabled and viable extensions, then we then need to look for the
5778     // corresponding VkDeviceGroupDeviceCreateInfo struct in the device list and replace all the physical device values (which
5779     // are really loader physical device terminator values) with the ICD versions.
5780     // if (icd_term->this_instance->enabled_known_extensions.khr_device_group_creation == 1) {
5781     {
5782         VkBaseOutStructure *pNext = (VkBaseOutStructure *)localCreateInfo.pNext;
5783         VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&localCreateInfo;
5784         while (NULL != pNext) {
5785             if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) {
5786                 VkDeviceGroupDeviceCreateInfo *cur_struct = (VkDeviceGroupDeviceCreateInfo *)pNext;
5787                 if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) {
5788                     VkDeviceGroupDeviceCreateInfo *temp_struct = loader_stack_alloc(sizeof(VkDeviceGroupDeviceCreateInfo));
5789                     VkPhysicalDevice *phys_dev_array = NULL;
5790                     if (NULL == temp_struct) {
5791                         return VK_ERROR_OUT_OF_HOST_MEMORY;
5792                     }
5793                     memcpy(temp_struct, cur_struct, sizeof(VkDeviceGroupDeviceCreateInfo));
5794                     phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * cur_struct->physicalDeviceCount);
5795                     if (NULL == phys_dev_array) {
5796                         return VK_ERROR_OUT_OF_HOST_MEMORY;
5797                     }
5798 
5799                     // Before calling down, replace the incoming physical device values (which are really loader terminator
5800                     // physical devices) with the ICDs physical device values.
5801                     struct loader_physical_device_term *cur_term;
5802                     for (uint32_t phys_dev = 0; phys_dev < cur_struct->physicalDeviceCount; phys_dev++) {
5803                         cur_term = (struct loader_physical_device_term *)cur_struct->pPhysicalDevices[phys_dev];
5804                         phys_dev_array[phys_dev] = cur_term->phys_dev;
5805                     }
5806                     temp_struct->pPhysicalDevices = phys_dev_array;
5807 
5808                     // Keep track of pointers to restore pNext chain before returning
5809                     caller_dgci_container = pPrev;
5810                     caller_dgci = cur_struct;
5811 
5812                     // Replace the old struct in the pNext chain with this one.
5813                     pPrev->pNext = (VkBaseOutStructure *)temp_struct;
5814                 }
5815                 break;
5816             }
5817 
5818             pPrev = pNext;
5819             pNext = pNext->pNext;
5820         }
5821     }
5822 
5823     // Handle loader emulation for structs that are not supported by the ICD:
5824     // Presently, the emulation leaves the pNext chain alone. This means that the ICD will receive items in the chain which
5825     // are not recognized by the ICD. If this causes the ICD to fail, then the items would have to be removed here. The current
5826     // implementation does not remove them because copying the pNext chain would be impossible if the loader does not recognize
5827     // the any of the struct types, as the loader would not know the size to allocate and copy.
5828     // if (icd_term->dispatch.GetPhysicalDeviceFeatures2 == NULL && icd_term->dispatch.GetPhysicalDeviceFeatures2KHR == NULL) {
5829     {
5830         const void *pNext = localCreateInfo.pNext;
5831         while (pNext != NULL) {
5832             switch (*(VkStructureType *)pNext) {
5833                 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2: {
5834                     const VkPhysicalDeviceFeatures2KHR *features = pNext;
5835 
5836                     if (icd_term->dispatch.GetPhysicalDeviceFeatures2 == NULL &&
5837                         icd_term->dispatch.GetPhysicalDeviceFeatures2KHR == NULL) {
5838                         loader_log(icd_term->this_instance, VULKAN_LOADER_INFO_BIT, 0,
5839                                    "vkCreateDevice: Emulating handling of VkPhysicalDeviceFeatures2 in pNext chain for ICD \"%s\"",
5840                                    icd_term->scanned_icd->lib_name);
5841 
5842                         // Verify that VK_KHR_get_physical_device_properties2 is enabled
5843                         if (icd_term->this_instance->enabled_known_extensions.khr_get_physical_device_properties2) {
5844                             localCreateInfo.pEnabledFeatures = &features->features;
5845                         }
5846                     }
5847 
5848                     // Leave this item in the pNext chain for now
5849 
5850                     pNext = features->pNext;
5851                     break;
5852                 }
5853 
5854                 case VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO: {
5855                     const VkDeviceGroupDeviceCreateInfo *group_info = pNext;
5856 
5857                     if (icd_term->dispatch.EnumeratePhysicalDeviceGroups == NULL &&
5858                         icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR == NULL) {
5859                         loader_log(icd_term->this_instance, VULKAN_LOADER_INFO_BIT, 0,
5860                                    "vkCreateDevice: Emulating handling of VkPhysicalDeviceGroupProperties in pNext chain for "
5861                                    "ICD \"%s\"",
5862                                    icd_term->scanned_icd->lib_name);
5863 
5864                         // The group must contain only this one device, since physical device groups aren't actually supported
5865                         if (group_info->physicalDeviceCount != 1) {
5866                             loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT, 0,
5867                                        "vkCreateDevice: Emulation failed to create device from device group info");
5868                             res = VK_ERROR_INITIALIZATION_FAILED;
5869                             goto out;
5870                         }
5871                     }
5872 
5873                     // Nothing needs to be done here because we're leaving the item in the pNext chain and because the spec
5874                     // states that the physicalDevice argument must be included in the device group, and we've already checked
5875                     // that it is
5876 
5877                     pNext = group_info->pNext;
5878                     break;
5879                 }
5880 
5881                 // Multiview properties are also allowed, but since VK_KHX_multiview is a device extension, we'll just let the
5882                 // ICD handle that error when the user enables the extension here
5883                 default: {
5884                     const VkBaseInStructure *header = pNext;
5885                     pNext = header->pNext;
5886                     break;
5887                 }
5888             }
5889         }
5890     }
5891 
5892     VkBool32 maintenance5_feature_enabled = false;
5893     // Look for the VkPhysicalDeviceMaintenance5FeaturesKHR struct to see if the feature was enabled
5894     {
5895         const void *pNext = localCreateInfo.pNext;
5896         while (pNext != NULL) {
5897             switch (*(VkStructureType *)pNext) {
5898                 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES_KHR: {
5899                     const VkPhysicalDeviceMaintenance5FeaturesKHR *maintenance_features = pNext;
5900                     if (maintenance_features->maintenance5 == VK_TRUE) {
5901                         maintenance5_feature_enabled = true;
5902                     }
5903                     pNext = maintenance_features->pNext;
5904                     break;
5905                 }
5906 
5907                 default: {
5908                     const VkBaseInStructure *header = pNext;
5909                     pNext = header->pNext;
5910                     break;
5911                 }
5912             }
5913         }
5914     }
5915 
5916     // Every extension that has a loader-defined terminator needs to be marked as enabled or disabled so that we know whether or
5917     // not to return that terminator when vkGetDeviceProcAddr is called
5918     for (uint32_t i = 0; i < localCreateInfo.enabledExtensionCount; ++i) {
5919         if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
5920             dev->driver_extensions.khr_swapchain_enabled = true;
5921         } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME)) {
5922             dev->driver_extensions.khr_display_swapchain_enabled = true;
5923         } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_DEVICE_GROUP_EXTENSION_NAME)) {
5924             dev->driver_extensions.khr_device_group_enabled = true;
5925         } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
5926             dev->driver_extensions.ext_debug_marker_enabled = true;
5927         } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], "VK_EXT_full_screen_exclusive")) {
5928             dev->driver_extensions.ext_full_screen_exclusive_enabled = true;
5929         } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_MAINTENANCE_5_EXTENSION_NAME) &&
5930                    maintenance5_feature_enabled) {
5931             dev->should_ignore_device_commands_from_newer_version = true;
5932         }
5933     }
5934     dev->layer_extensions.ext_debug_utils_enabled = icd_term->this_instance->enabled_known_extensions.ext_debug_utils;
5935     dev->driver_extensions.ext_debug_utils_enabled = icd_term->this_instance->enabled_known_extensions.ext_debug_utils;
5936 
5937     VkPhysicalDeviceProperties properties;
5938     icd_term->dispatch.GetPhysicalDeviceProperties(phys_dev_term->phys_dev, &properties);
5939     if (!dev->driver_extensions.khr_device_group_enabled) {
5940         if (properties.apiVersion >= VK_API_VERSION_1_1) {
5941             dev->driver_extensions.khr_device_group_enabled = true;
5942         }
5943     }
5944 
5945     loader_log(icd_term->this_instance, VULKAN_LOADER_LAYER_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5946                "       Using \"%s\" with driver: \"%s\"", properties.deviceName, icd_term->scanned_icd->lib_name);
5947 
5948     res = fpCreateDevice(phys_dev_term->phys_dev, &localCreateInfo, pAllocator, &dev->icd_device);
5949     if (res != VK_SUCCESS) {
5950         loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5951                    "terminator_CreateDevice: Failed in ICD %s vkCreateDevice call", icd_term->scanned_icd->lib_name);
5952         goto out;
5953     }
5954 
5955     *pDevice = dev->icd_device;
5956     loader_add_logical_device(icd_term, dev);
5957 
5958     // Init dispatch pointer in new device object
5959     loader_init_dispatch(*pDevice, &dev->loader_dispatch);
5960 
5961 out:
5962     if (NULL != icd_exts.list) {
5963         loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&icd_exts);
5964     }
5965 
5966     // Restore pNext pointer to old VkDeviceGroupDeviceCreateInfo
5967     // in the chain to maintain consistency for the caller.
5968     if (caller_dgci_container != NULL) {
5969         caller_dgci_container->pNext = (VkBaseOutStructure *)caller_dgci;
5970     }
5971 
5972     return res;
5973 }
5974 
5975 // Update the trampoline physical devices with the wrapped version.
5976 // We always want to re-use previous physical device pointers since they may be used by an application
5977 // after returning previously.
5978 VkResult setup_loader_tramp_phys_devs(struct loader_instance *inst, uint32_t phys_dev_count, VkPhysicalDevice *phys_devs) {
5979     VkResult res = VK_SUCCESS;
5980     uint32_t found_count = 0;
5981     uint32_t old_count = inst->phys_dev_count_tramp;
5982     uint32_t new_count = inst->total_gpu_count;
5983     struct loader_physical_device_tramp **new_phys_devs = NULL;
5984 
5985     if (0 == phys_dev_count) {
5986         return VK_SUCCESS;
5987     }
5988     if (phys_dev_count > new_count) {
5989         new_count = phys_dev_count;
5990     }
5991 
5992     // We want an old to new index array and a new to old index array
5993     int32_t *old_to_new_index = (int32_t *)loader_stack_alloc(sizeof(int32_t) * old_count);
5994     int32_t *new_to_old_index = (int32_t *)loader_stack_alloc(sizeof(int32_t) * new_count);
5995     if (NULL == old_to_new_index || NULL == new_to_old_index) {
5996         return VK_ERROR_OUT_OF_HOST_MEMORY;
5997     }
5998 
5999     // Initialize both
6000     for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
6001         old_to_new_index[cur_idx] = -1;
6002     }
6003     for (uint32_t cur_idx = 0; cur_idx < new_count; ++cur_idx) {
6004         new_to_old_index[cur_idx] = -1;
6005     }
6006 
6007     // Figure out the old->new and new->old indices
6008     for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
6009         for (uint32_t new_idx = 0; new_idx < phys_dev_count; ++new_idx) {
6010             if (inst->phys_devs_tramp[cur_idx]->phys_dev == phys_devs[new_idx]) {
6011                 old_to_new_index[cur_idx] = (int32_t)new_idx;
6012                 new_to_old_index[new_idx] = (int32_t)cur_idx;
6013                 found_count++;
6014                 break;
6015             }
6016         }
6017     }
6018 
6019     // If we found exactly the number of items we were looking for as we had before.  Then everything
6020     // we already have is good enough and we just need to update the array that was passed in with
6021     // the loader values.
6022     if (found_count == phys_dev_count && 0 != old_count && old_count == new_count) {
6023         for (uint32_t new_idx = 0; new_idx < phys_dev_count; ++new_idx) {
6024             for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
6025                 if (old_to_new_index[cur_idx] == (int32_t)new_idx) {
6026                     phys_devs[new_idx] = (VkPhysicalDevice)inst->phys_devs_tramp[cur_idx];
6027                     break;
6028                 }
6029             }
6030         }
6031         // Nothing else to do for this path
6032         res = VK_SUCCESS;
6033     } else {
6034         // Something is different, so do the full path of checking every device and creating a new array to use.
6035         // This can happen if a device was added, or removed, or we hadn't previously queried all the data and we
6036         // have more to store.
6037         new_phys_devs = loader_instance_heap_calloc(inst, sizeof(struct loader_physical_device_tramp *) * new_count,
6038                                                     VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6039         if (NULL == new_phys_devs) {
6040             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6041                        "setup_loader_tramp_phys_devs:  Failed to allocate new physical device array of size %d", new_count);
6042             res = VK_ERROR_OUT_OF_HOST_MEMORY;
6043             goto out;
6044         }
6045 
6046         if (new_count > phys_dev_count) {
6047             found_count = phys_dev_count;
6048         } else {
6049             found_count = new_count;
6050         }
6051 
6052         // First try to see if an old item exists that matches the new item.  If so, just copy it over.
6053         for (uint32_t new_idx = 0; new_idx < found_count; ++new_idx) {
6054             bool old_item_found = false;
6055             for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
6056                 if (old_to_new_index[cur_idx] == (int32_t)new_idx) {
6057                     // Copy over old item to correct spot in the new array
6058                     new_phys_devs[new_idx] = inst->phys_devs_tramp[cur_idx];
6059                     old_item_found = true;
6060                     break;
6061                 }
6062             }
6063             // Something wasn't found, so it's new so add it to the new list
6064             if (!old_item_found) {
6065                 new_phys_devs[new_idx] = loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_tramp),
6066                                                                     VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6067                 if (NULL == new_phys_devs[new_idx]) {
6068                     loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6069                                "setup_loader_tramp_phys_devs:  Failed to allocate new trampoline physical device");
6070                     res = VK_ERROR_OUT_OF_HOST_MEMORY;
6071                     goto out;
6072                 }
6073 
6074                 // Initialize the new physicalDevice object
6075                 loader_set_dispatch((void *)new_phys_devs[new_idx], inst->disp);
6076                 new_phys_devs[new_idx]->this_instance = inst;
6077                 new_phys_devs[new_idx]->phys_dev = phys_devs[new_idx];
6078                 new_phys_devs[new_idx]->magic = PHYS_TRAMP_MAGIC_NUMBER;
6079             }
6080 
6081             phys_devs[new_idx] = (VkPhysicalDevice)new_phys_devs[new_idx];
6082         }
6083 
6084         // We usually get here if the user array is smaller than the total number of devices, so copy the
6085         // remaining devices we have over to the new array.
6086         uint32_t start = found_count;
6087         for (uint32_t new_idx = start; new_idx < new_count; ++new_idx) {
6088             for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
6089                 if (old_to_new_index[cur_idx] == -1) {
6090                     new_phys_devs[new_idx] = inst->phys_devs_tramp[cur_idx];
6091                     old_to_new_index[cur_idx] = new_idx;
6092                     found_count++;
6093                     break;
6094                 }
6095             }
6096         }
6097     }
6098 
6099 out:
6100 
6101     if (NULL != new_phys_devs) {
6102         if (VK_SUCCESS != res) {
6103             for (uint32_t new_idx = 0; new_idx < found_count; ++new_idx) {
6104                 // If an OOM occurred inside the copying of the new physical devices into the existing array
6105                 // will leave some of the old physical devices in the array which may have been copied into
6106                 // the new array, leading to them being freed twice. To avoid this we just make sure to not
6107                 // delete physical devices which were copied.
6108                 bool found = false;
6109                 for (uint32_t cur_idx = 0; cur_idx < inst->phys_dev_count_tramp; cur_idx++) {
6110                     if (new_phys_devs[new_idx] == inst->phys_devs_tramp[cur_idx]) {
6111                         found = true;
6112                         break;
6113                     }
6114                 }
6115                 if (!found) {
6116                     loader_instance_heap_free(inst, new_phys_devs[new_idx]);
6117                 }
6118             }
6119             loader_instance_heap_free(inst, new_phys_devs);
6120         } else {
6121             if (new_count > inst->total_gpu_count) {
6122                 inst->total_gpu_count = new_count;
6123             }
6124             // Free everything in the old array that was not copied into the new array
6125             // here.  We can't attempt to do that before here since the previous loop
6126             // looking before the "out:" label may hit an out of memory condition resulting
6127             // in memory leaking.
6128             if (NULL != inst->phys_devs_tramp) {
6129                 for (uint32_t i = 0; i < inst->phys_dev_count_tramp; i++) {
6130                     bool found = false;
6131                     for (uint32_t j = 0; j < inst->total_gpu_count; j++) {
6132                         if (inst->phys_devs_tramp[i] == new_phys_devs[j]) {
6133                             found = true;
6134                             break;
6135                         }
6136                     }
6137                     if (!found) {
6138                         loader_instance_heap_free(inst, inst->phys_devs_tramp[i]);
6139                     }
6140                 }
6141                 loader_instance_heap_free(inst, inst->phys_devs_tramp);
6142             }
6143             inst->phys_devs_tramp = new_phys_devs;
6144             inst->phys_dev_count_tramp = found_count;
6145         }
6146     }
6147     if (VK_SUCCESS != res) {
6148         inst->total_gpu_count = 0;
6149     }
6150 
6151     return res;
6152 }
6153 
6154 #if defined(LOADER_ENABLE_LINUX_SORT)
6155 bool is_linux_sort_enabled(struct loader_instance *inst) {
6156     bool sort_items = inst->supports_get_dev_prop_2;
6157     char *env_value = loader_getenv("VK_LOADER_DISABLE_SELECT", inst);
6158     if (NULL != env_value) {
6159         int32_t int_env_val = atoi(env_value);
6160         loader_free_getenv(env_value, inst);
6161         if (int_env_val != 0) {
6162             sort_items = false;
6163         }
6164     }
6165     return sort_items;
6166 }
6167 #endif  // LOADER_ENABLE_LINUX_SORT
6168 
6169 // Look for physical_device in the provided phys_devs list, return true if found and put the index into out_idx, otherwise
6170 // return false
6171 bool find_phys_dev(VkPhysicalDevice physical_device, uint32_t phys_devs_count, struct loader_physical_device_term **phys_devs,
6172                    uint32_t *out_idx) {
6173     if (NULL == phys_devs) return false;
6174     for (uint32_t idx = 0; idx < phys_devs_count; idx++) {
6175         if (NULL != phys_devs[idx] && physical_device == phys_devs[idx]->phys_dev) {
6176             *out_idx = idx;
6177             return true;
6178         }
6179     }
6180     return false;
6181 }
6182 
6183 // Add physical_device to new_phys_devs
6184 VkResult check_and_add_to_new_phys_devs(struct loader_instance *inst, VkPhysicalDevice physical_device,
6185                                         struct loader_icd_physical_devices *dev_array, uint32_t *cur_new_phys_dev_count,
6186                                         struct loader_physical_device_term **new_phys_devs) {
6187     uint32_t out_idx = 0;
6188     uint32_t idx = *cur_new_phys_dev_count;
6189     // Check if the physical_device already exists in the new_phys_devs buffer, that means it was found from both
6190     // EnumerateAdapterPhysicalDevices and EnumeratePhysicalDevices and we need to skip it.
6191     if (find_phys_dev(physical_device, idx, new_phys_devs, &out_idx)) {
6192         return VK_SUCCESS;
6193     }
6194     // Check if it was found in a previous call to vkEnumeratePhysicalDevices, we can just copy over the old data.
6195     if (find_phys_dev(physical_device, inst->phys_dev_count_term, inst->phys_devs_term, &out_idx)) {
6196         new_phys_devs[idx] = inst->phys_devs_term[out_idx];
6197         (*cur_new_phys_dev_count)++;
6198         return VK_SUCCESS;
6199     }
6200 
6201     // Exit in case something is already present - this shouldn't happen but better to be safe than overwrite existing data
6202     // since this code has been refactored a half dozen times.
6203     if (NULL != new_phys_devs[idx]) {
6204         return VK_SUCCESS;
6205     }
6206     // If this physical device is new, we need to allocate space for it.
6207     new_phys_devs[idx] =
6208         loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6209     if (NULL == new_phys_devs[idx]) {
6210         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6211                    "check_and_add_to_new_phys_devs:  Failed to allocate physical device terminator object %d", idx);
6212         return VK_ERROR_OUT_OF_HOST_MEMORY;
6213     }
6214 
6215     loader_set_dispatch((void *)new_phys_devs[idx], inst->disp);
6216     new_phys_devs[idx]->this_icd_term = dev_array->icd_term;
6217     new_phys_devs[idx]->icd_index = (uint8_t)(dev_array->icd_index);
6218     new_phys_devs[idx]->phys_dev = physical_device;
6219 
6220     // Increment the count of new physical devices
6221     (*cur_new_phys_dev_count)++;
6222     return VK_SUCCESS;
6223 }
6224 
6225 /* Enumerate all physical devices from ICDs and add them to inst->phys_devs_term
6226  *
6227  * There are two methods to find VkPhysicalDevices - vkEnumeratePhysicalDevices and vkEnumerateAdapterPhysicalDevices
6228  * The latter is supported on windows only and on devices supporting ICD Interface Version 6 and greater.
6229  *
6230  * Once all physical devices are acquired, they need to be pulled into a single list of `loader_physical_device_term`'s.
6231  * They also need to be setup - the icd_term, icd_index, phys_dev, and disp (dispatch table) all need the correct data.
6232  * Additionally, we need to keep using already setup physical devices as they may be in use, thus anything enumerated
6233  * that is already in inst->phys_devs_term will be carried over.
6234  */
6235 
6236 VkResult setup_loader_term_phys_devs(struct loader_instance *inst) {
6237     VkResult res = VK_SUCCESS;
6238     struct loader_icd_term *icd_term;
6239     uint32_t icd_idx = 0;
6240     uint32_t windows_sorted_devices_count = 0;
6241     struct loader_icd_physical_devices *windows_sorted_devices_array = NULL;
6242     uint32_t icd_count = 0;
6243     struct loader_icd_physical_devices *icd_phys_dev_array = NULL;
6244     uint32_t new_phys_devs_capacity = 0;
6245     uint32_t new_phys_devs_count = 0;
6246     struct loader_physical_device_term **new_phys_devs = NULL;
6247 
6248 #if defined(_WIN32)
6249     // Get the physical devices supported by platform sorting mechanism into a separate list
6250     res = windows_read_sorted_physical_devices(inst, &windows_sorted_devices_count, &windows_sorted_devices_array);
6251     if (VK_SUCCESS != res) {
6252         goto out;
6253     }
6254 #endif
6255 
6256     icd_count = inst->total_icd_count;
6257 
6258     // Allocate something to store the physical device characteristics that we read from each ICD.
6259     icd_phys_dev_array =
6260         (struct loader_icd_physical_devices *)loader_stack_alloc(sizeof(struct loader_icd_physical_devices) * icd_count);
6261     if (NULL == icd_phys_dev_array) {
6262         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6263                    "setup_loader_term_phys_devs:  Failed to allocate temporary ICD Physical device info array of size %d",
6264                    icd_count);
6265         res = VK_ERROR_OUT_OF_HOST_MEMORY;
6266         goto out;
6267     }
6268     memset(icd_phys_dev_array, 0, sizeof(struct loader_icd_physical_devices) * icd_count);
6269 
6270     // For each ICD, query the number of physical devices, and then get an
6271     // internal value for those physical devices.
6272     icd_term = inst->icd_terms;
6273     while (NULL != icd_term) {
6274         res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &icd_phys_dev_array[icd_idx].device_count, NULL);
6275         if (VK_SUCCESS != res) {
6276             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6277                        "setup_loader_term_phys_devs:  Call to ICD %d's \'vkEnumeratePhysicalDevices\' failed with error 0x%08x",
6278                        icd_idx, res);
6279             goto out;
6280         }
6281 
6282         icd_phys_dev_array[icd_idx].physical_devices =
6283             (VkPhysicalDevice *)loader_stack_alloc(icd_phys_dev_array[icd_idx].device_count * sizeof(VkPhysicalDevice));
6284         if (NULL == icd_phys_dev_array[icd_idx].physical_devices) {
6285             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6286                        "setup_loader_term_phys_devs:  Failed to allocate temporary ICD Physical device array for ICD %d of size %d",
6287                        icd_idx, icd_phys_dev_array[icd_idx].device_count);
6288             res = VK_ERROR_OUT_OF_HOST_MEMORY;
6289             goto out;
6290         }
6291 
6292         res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &(icd_phys_dev_array[icd_idx].device_count),
6293                                                           icd_phys_dev_array[icd_idx].physical_devices);
6294         if (VK_SUCCESS != res) {
6295             goto out;
6296         }
6297         icd_phys_dev_array[icd_idx].icd_term = icd_term;
6298         icd_phys_dev_array[icd_idx].icd_index = icd_idx;
6299         icd_term = icd_term->next;
6300         ++icd_idx;
6301     }
6302 
6303     // Add up both the windows sorted and non windows found physical device counts
6304     for (uint32_t i = 0; i < windows_sorted_devices_count; ++i) {
6305         new_phys_devs_capacity += windows_sorted_devices_array[i].device_count;
6306     }
6307     for (uint32_t i = 0; i < icd_count; ++i) {
6308         new_phys_devs_capacity += icd_phys_dev_array[i].device_count;
6309     }
6310 
6311     // Bail out if there are no physical devices reported
6312     if (0 == new_phys_devs_capacity) {
6313         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6314                    "setup_loader_term_phys_devs:  Failed to detect any valid GPUs in the current config");
6315         res = VK_ERROR_INITIALIZATION_FAILED;
6316         goto out;
6317     }
6318 
6319     // Create an allocation large enough to hold both the windows sorting enumeration and non-windows physical device
6320     // enumeration
6321     new_phys_devs = loader_instance_heap_calloc(inst, sizeof(struct loader_physical_device_term *) * new_phys_devs_capacity,
6322                                                 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6323     if (NULL == new_phys_devs) {
6324         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6325                    "setup_loader_term_phys_devs:  Failed to allocate new physical device array of size %d", new_phys_devs_capacity);
6326         res = VK_ERROR_OUT_OF_HOST_MEMORY;
6327         goto out;
6328     }
6329 
6330     // Copy over everything found through sorted enumeration
6331     for (uint32_t i = 0; i < windows_sorted_devices_count; ++i) {
6332         for (uint32_t j = 0; j < windows_sorted_devices_array[i].device_count; ++j) {
6333             res = check_and_add_to_new_phys_devs(inst, windows_sorted_devices_array[i].physical_devices[j],
6334                                                  &windows_sorted_devices_array[i], &new_phys_devs_count, new_phys_devs);
6335             if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
6336                 goto out;
6337             }
6338         }
6339     }
6340 
6341 // Now go through the rest of the physical devices and add them to new_phys_devs
6342 #if defined(LOADER_ENABLE_LINUX_SORT)
6343 
6344     if (is_linux_sort_enabled(inst)) {
6345         for (uint32_t dev = new_phys_devs_count; dev < new_phys_devs_capacity; ++dev) {
6346             new_phys_devs[dev] =
6347                 loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6348             if (NULL == new_phys_devs[dev]) {
6349                 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6350                            "setup_loader_term_phys_devs:  Failed to allocate physical device terminator object %d", dev);
6351                 res = VK_ERROR_OUT_OF_HOST_MEMORY;
6352                 goto out;
6353             }
6354         }
6355 
6356         // Get the physical devices supported by platform sorting mechanism into a separate list
6357         // Pass in a sublist to the function so it only operates on the correct elements. This means passing in a pointer to the
6358         // current next element in new_phys_devs and passing in a `count` of currently unwritten elements
6359         res = linux_read_sorted_physical_devices(inst, icd_count, icd_phys_dev_array, new_phys_devs_capacity - new_phys_devs_count,
6360                                                  &new_phys_devs[new_phys_devs_count]);
6361         if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
6362             goto out;
6363         }
6364         // Keep previously allocated physical device info since apps may already be using that!
6365         for (uint32_t new_idx = new_phys_devs_count; new_idx < new_phys_devs_capacity; new_idx++) {
6366             for (uint32_t old_idx = 0; old_idx < inst->phys_dev_count_term; old_idx++) {
6367                 if (new_phys_devs[new_idx]->phys_dev == inst->phys_devs_term[old_idx]->phys_dev) {
6368                     loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
6369                                "Copying old device %u into new device %u", old_idx, new_idx);
6370                     // Free the old new_phys_devs info since we're not using it before we assign the new info
6371                     loader_instance_heap_free(inst, new_phys_devs[new_idx]);
6372                     new_phys_devs[new_idx] = inst->phys_devs_term[old_idx];
6373                     break;
6374                 }
6375             }
6376         }
6377         // now set the count to the capacity, as now the list is filled in
6378         new_phys_devs_count = new_phys_devs_capacity;
6379         // We want the following code to run if either linux sorting is disabled at compile time or runtime
6380     } else {
6381 #endif  // LOADER_ENABLE_LINUX_SORT
6382 
6383         // Copy over everything found through the non-sorted means.
6384         for (uint32_t i = 0; i < icd_count; ++i) {
6385             for (uint32_t j = 0; j < icd_phys_dev_array[i].device_count; ++j) {
6386                 res = check_and_add_to_new_phys_devs(inst, icd_phys_dev_array[i].physical_devices[j], &icd_phys_dev_array[i],
6387                                                      &new_phys_devs_count, new_phys_devs);
6388                 if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
6389                     goto out;
6390                 }
6391             }
6392         }
6393 #if defined(LOADER_ENABLE_LINUX_SORT)
6394     }
6395 #endif  // LOADER_ENABLE_LINUX_SORT
6396 out:
6397 
6398     if (VK_SUCCESS != res) {
6399         if (NULL != new_phys_devs) {
6400             // We've encountered an error, so we should free the new buffers.
6401             for (uint32_t i = 0; i < new_phys_devs_capacity; i++) {
6402                 // May not have allocated this far, skip it if we hadn't.
6403                 if (new_phys_devs[i] == NULL) continue;
6404 
6405                 // If an OOM occurred inside the copying of the new physical devices into the existing array
6406                 // will leave some of the old physical devices in the array which may have been copied into
6407                 // the new array, leading to them being freed twice. To avoid this we just make sure to not
6408                 // delete physical devices which were copied.
6409                 bool found = false;
6410                 if (NULL != inst->phys_devs_term) {
6411                     for (uint32_t old_idx = 0; old_idx < inst->phys_dev_count_term; old_idx++) {
6412                         if (new_phys_devs[i] == inst->phys_devs_term[old_idx]) {
6413                             found = true;
6414                             break;
6415                         }
6416                     }
6417                 }
6418                 if (!found) {
6419                     loader_instance_heap_free(inst, new_phys_devs[i]);
6420                 }
6421             }
6422             loader_instance_heap_free(inst, new_phys_devs);
6423         }
6424         inst->total_gpu_count = 0;
6425     } else {
6426         if (NULL != inst->phys_devs_term) {
6427             // Free everything in the old array that was not copied into the new array
6428             // here.  We can't attempt to do that before here since the previous loop
6429             // looking before the "out:" label may hit an out of memory condition resulting
6430             // in memory leaking.
6431             for (uint32_t i = 0; i < inst->phys_dev_count_term; i++) {
6432                 bool found = false;
6433                 for (uint32_t j = 0; j < new_phys_devs_count; j++) {
6434                     if (new_phys_devs != NULL && inst->phys_devs_term[i] == new_phys_devs[j]) {
6435                         found = true;
6436                         break;
6437                     }
6438                 }
6439                 if (!found) {
6440                     loader_instance_heap_free(inst, inst->phys_devs_term[i]);
6441                 }
6442             }
6443             loader_instance_heap_free(inst, inst->phys_devs_term);
6444         }
6445 
6446         // Swap out old and new devices list
6447         inst->phys_dev_count_term = new_phys_devs_count;
6448         inst->phys_devs_term = new_phys_devs;
6449         inst->total_gpu_count = new_phys_devs_count;
6450     }
6451 
6452     if (windows_sorted_devices_array != NULL) {
6453         for (uint32_t i = 0; i < windows_sorted_devices_count; ++i) {
6454             if (windows_sorted_devices_array[i].device_count > 0 && windows_sorted_devices_array[i].physical_devices != NULL) {
6455                 loader_instance_heap_free(inst, windows_sorted_devices_array[i].physical_devices);
6456             }
6457         }
6458         loader_instance_heap_free(inst, windows_sorted_devices_array);
6459     }
6460 
6461     return res;
6462 }
6463 
6464 VkResult setup_loader_tramp_phys_dev_groups(struct loader_instance *inst, uint32_t group_count,
6465                                             VkPhysicalDeviceGroupProperties *groups) {
6466     VkResult res = VK_SUCCESS;
6467     uint32_t cur_idx;
6468     uint32_t dev_idx;
6469 
6470     if (0 == group_count) {
6471         return VK_SUCCESS;
6472     }
6473 
6474     // Generate a list of all the devices and convert them to the loader ID
6475     uint32_t phys_dev_count = 0;
6476     for (cur_idx = 0; cur_idx < group_count; ++cur_idx) {
6477         phys_dev_count += groups[cur_idx].physicalDeviceCount;
6478     }
6479     VkPhysicalDevice *devices = (VkPhysicalDevice *)loader_stack_alloc(sizeof(VkPhysicalDevice) * phys_dev_count);
6480     if (NULL == devices) {
6481         return VK_ERROR_OUT_OF_HOST_MEMORY;
6482     }
6483 
6484     uint32_t cur_device = 0;
6485     for (cur_idx = 0; cur_idx < group_count; ++cur_idx) {
6486         for (dev_idx = 0; dev_idx < groups[cur_idx].physicalDeviceCount; ++dev_idx) {
6487             devices[cur_device++] = groups[cur_idx].physicalDevices[dev_idx];
6488         }
6489     }
6490 
6491     // Update the devices based on the loader physical device values.
6492     res = setup_loader_tramp_phys_devs(inst, phys_dev_count, devices);
6493     if (VK_SUCCESS != res) {
6494         return res;
6495     }
6496 
6497     // Update the devices in the group structures now
6498     cur_device = 0;
6499     for (cur_idx = 0; cur_idx < group_count; ++cur_idx) {
6500         for (dev_idx = 0; dev_idx < groups[cur_idx].physicalDeviceCount; ++dev_idx) {
6501             groups[cur_idx].physicalDevices[dev_idx] = devices[cur_device++];
6502         }
6503     }
6504 
6505     return res;
6506 }
6507 
6508 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
6509                                                                    VkPhysicalDevice *pPhysicalDevices) {
6510     struct loader_instance *inst = (struct loader_instance *)instance;
6511     VkResult res = VK_SUCCESS;
6512 
6513     // Always call the setup loader terminator physical devices because they may
6514     // have changed at any point.
6515     res = setup_loader_term_phys_devs(inst);
6516     if (VK_SUCCESS != res) {
6517         goto out;
6518     }
6519 
6520     uint32_t copy_count = inst->phys_dev_count_term;
6521     if (NULL != pPhysicalDevices) {
6522         if (copy_count > *pPhysicalDeviceCount) {
6523             copy_count = *pPhysicalDeviceCount;
6524             loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
6525                        "terminator_EnumeratePhysicalDevices : Trimming device count from %d to %d.", inst->phys_dev_count_term,
6526                        copy_count);
6527             res = VK_INCOMPLETE;
6528         }
6529 
6530         for (uint32_t i = 0; i < copy_count; i++) {
6531             pPhysicalDevices[i] = (VkPhysicalDevice)inst->phys_devs_term[i];
6532         }
6533     }
6534 
6535     *pPhysicalDeviceCount = copy_count;
6536 
6537 out:
6538 
6539     return res;
6540 }
6541 
6542 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
6543                                                                              const char *pLayerName, uint32_t *pPropertyCount,
6544                                                                              VkExtensionProperties *pProperties) {
6545     if (NULL == pPropertyCount) {
6546         return VK_INCOMPLETE;
6547     }
6548 
6549     struct loader_physical_device_term *phys_dev_term;
6550 
6551     // Any layer or trampoline wrapping should be removed at this point in time can just cast to the expected
6552     // type for VkPhysicalDevice.
6553     phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
6554 
6555     // if we got here with a non-empty pLayerName, look up the extensions
6556     // from the json
6557     if (pLayerName != NULL && strlen(pLayerName) > 0) {
6558         uint32_t count;
6559         uint32_t copy_size;
6560         const struct loader_instance *inst = phys_dev_term->this_icd_term->this_instance;
6561         struct loader_device_extension_list *dev_ext_list = NULL;
6562         struct loader_device_extension_list local_ext_list;
6563         memset(&local_ext_list, 0, sizeof(local_ext_list));
6564         if (vk_string_validate(MaxLoaderStringLength, pLayerName) == VK_STRING_ERROR_NONE) {
6565             for (uint32_t i = 0; i < inst->instance_layer_list.count; i++) {
6566                 struct loader_layer_properties *props = &inst->instance_layer_list.list[i];
6567                 if (strcmp(props->info.layerName, pLayerName) == 0) {
6568                     dev_ext_list = &props->device_extension_list;
6569                 }
6570             }
6571 
6572             count = (dev_ext_list == NULL) ? 0 : dev_ext_list->count;
6573             if (pProperties == NULL) {
6574                 *pPropertyCount = count;
6575                 loader_destroy_generic_list(inst, (struct loader_generic_list *)&local_ext_list);
6576                 return VK_SUCCESS;
6577             }
6578 
6579             copy_size = *pPropertyCount < count ? *pPropertyCount : count;
6580             for (uint32_t i = 0; i < copy_size; i++) {
6581                 memcpy(&pProperties[i], &dev_ext_list->list[i].props, sizeof(VkExtensionProperties));
6582             }
6583             *pPropertyCount = copy_size;
6584 
6585             loader_destroy_generic_list(inst, (struct loader_generic_list *)&local_ext_list);
6586             if (copy_size < count) {
6587                 return VK_INCOMPLETE;
6588             }
6589         } else {
6590             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6591                        "vkEnumerateDeviceExtensionProperties:  pLayerName is too long or is badly formed");
6592             return VK_ERROR_EXTENSION_NOT_PRESENT;
6593         }
6594 
6595         return VK_SUCCESS;
6596     }
6597 
6598     // user is querying driver extensions and has supplied their own storage - just fill it out
6599     else if (pProperties) {
6600         struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
6601         uint32_t written_count = *pPropertyCount;
6602         VkResult res =
6603             icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &written_count, pProperties);
6604         if (res != VK_SUCCESS) {
6605             return res;
6606         }
6607 
6608         // Iterate over active layers, if they are an implicit layer, add their device extensions
6609         // After calling into the driver, written_count contains the amount of device extensions written. We can therefore write
6610         // layer extensions starting at that point in pProperties
6611         for (uint32_t i = 0; i < icd_term->this_instance->expanded_activated_layer_list.count; i++) {
6612             struct loader_layer_properties *layer_props = icd_term->this_instance->expanded_activated_layer_list.list[i];
6613             if (0 == (layer_props->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
6614                 struct loader_device_extension_list *layer_ext_list = &layer_props->device_extension_list;
6615                 for (uint32_t j = 0; j < layer_ext_list->count; j++) {
6616                     struct loader_dev_ext_props *cur_ext_props = &layer_ext_list->list[j];
6617                     // look for duplicates
6618                     if (has_vk_extension_property_array(&cur_ext_props->props, written_count, pProperties)) {
6619                         continue;
6620                     }
6621 
6622                     if (*pPropertyCount <= written_count) {
6623                         return VK_INCOMPLETE;
6624                     }
6625 
6626                     memcpy(&pProperties[written_count], &cur_ext_props->props, sizeof(VkExtensionProperties));
6627                     written_count++;
6628                 }
6629             }
6630         }
6631         // Make sure we update the pPropertyCount with the how many were written
6632         *pPropertyCount = written_count;
6633         return res;
6634     }
6635     // Use `goto out;` for rest of this function
6636 
6637     // This case is during the call down the instance chain with pLayerName == NULL and pProperties == NULL
6638     struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
6639     struct loader_extension_list all_exts = {0};
6640     VkResult res;
6641 
6642     // We need to find the count without duplicates. This requires querying the driver for the names of the extensions.
6643     res = icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &all_exts.count, NULL);
6644     if (res != VK_SUCCESS) {
6645         goto out;
6646     }
6647     // Then allocate memory to store the physical device extension list + the extensions layers provide
6648     // all_exts.count currently is the number of driver extensions
6649     all_exts.capacity = sizeof(VkExtensionProperties) * (all_exts.count + 20);
6650     all_exts.list = loader_instance_heap_alloc(icd_term->this_instance, all_exts.capacity, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
6651     if (NULL == all_exts.list) {
6652         res = VK_ERROR_OUT_OF_HOST_MEMORY;
6653         goto out;
6654     }
6655 
6656     // Get the available device extensions and put them in all_exts.list
6657     res = icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &all_exts.count, all_exts.list);
6658     if (res != VK_SUCCESS) {
6659         goto out;
6660     }
6661 
6662     // Iterate over active layers, if they are an implicit layer, add their device extensions to all_exts.list
6663     for (uint32_t i = 0; i < icd_term->this_instance->expanded_activated_layer_list.count; i++) {
6664         struct loader_layer_properties *layer_props = icd_term->this_instance->expanded_activated_layer_list.list[i];
6665         if (0 == (layer_props->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
6666             struct loader_device_extension_list *layer_ext_list = &layer_props->device_extension_list;
6667             for (uint32_t j = 0; j < layer_ext_list->count; j++) {
6668                 res = loader_add_to_ext_list(icd_term->this_instance, &all_exts, 1, &layer_ext_list->list[j].props);
6669                 if (res != VK_SUCCESS) {
6670                     goto out;
6671                 }
6672             }
6673         }
6674     }
6675 
6676     // Write out the final de-duplicated count to pPropertyCount
6677     *pPropertyCount = all_exts.count;
6678     res = VK_SUCCESS;
6679 
6680 out:
6681 
6682     loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&all_exts);
6683     return res;
6684 }
6685 
6686 VkStringErrorFlags vk_string_validate(const int max_length, const char *utf8) {
6687     VkStringErrorFlags result = VK_STRING_ERROR_NONE;
6688     int num_char_bytes = 0;
6689     int i, j;
6690 
6691     if (utf8 == NULL) {
6692         return VK_STRING_ERROR_NULL_PTR;
6693     }
6694 
6695     for (i = 0; i <= max_length; i++) {
6696         if (utf8[i] == 0) {
6697             break;
6698         } else if (i == max_length) {
6699             result |= VK_STRING_ERROR_LENGTH;
6700             break;
6701         } else if ((utf8[i] >= 0x20) && (utf8[i] < 0x7f)) {
6702             num_char_bytes = 0;
6703         } else if ((utf8[i] & UTF8_ONE_BYTE_MASK) == UTF8_ONE_BYTE_CODE) {
6704             num_char_bytes = 1;
6705         } else if ((utf8[i] & UTF8_TWO_BYTE_MASK) == UTF8_TWO_BYTE_CODE) {
6706             num_char_bytes = 2;
6707         } else if ((utf8[i] & UTF8_THREE_BYTE_MASK) == UTF8_THREE_BYTE_CODE) {
6708             num_char_bytes = 3;
6709         } else {
6710             result = VK_STRING_ERROR_BAD_DATA;
6711         }
6712 
6713         // Validate the following num_char_bytes of data
6714         for (j = 0; (j < num_char_bytes) && (i < max_length); j++) {
6715             if (++i == max_length) {
6716                 result |= VK_STRING_ERROR_LENGTH;
6717                 break;
6718             }
6719             if ((utf8[i] & UTF8_DATA_BYTE_MASK) != UTF8_DATA_BYTE_CODE) {
6720                 result |= VK_STRING_ERROR_BAD_DATA;
6721             }
6722         }
6723     }
6724     return result;
6725 }
6726 
6727 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceVersion(const VkEnumerateInstanceVersionChain *chain,
6728                                                                    uint32_t *pApiVersion) {
6729     (void)chain;
6730     // NOTE: The Vulkan WG doesn't want us checking pApiVersion for NULL, but instead
6731     // prefers us crashing.
6732     *pApiVersion = VK_HEADER_VERSION_COMPLETE;
6733     return VK_SUCCESS;
6734 }
6735 
6736 VKAPI_ATTR VkResult VKAPI_CALL
6737 terminator_EnumerateInstanceExtensionProperties(const VkEnumerateInstanceExtensionPropertiesChain *chain, const char *pLayerName,
6738                                                 uint32_t *pPropertyCount, VkExtensionProperties *pProperties) {
6739     (void)chain;
6740     struct loader_extension_list *global_ext_list = NULL;
6741     struct loader_layer_list instance_layers;
6742     struct loader_extension_list local_ext_list;
6743     struct loader_icd_tramp_list icd_tramp_list;
6744     uint32_t copy_size;
6745     VkResult res = VK_SUCCESS;
6746     struct loader_envvar_all_filters layer_filters = {0};
6747 
6748     memset(&local_ext_list, 0, sizeof(local_ext_list));
6749     memset(&instance_layers, 0, sizeof(instance_layers));
6750     memset(&icd_tramp_list, 0, sizeof(icd_tramp_list));
6751 
6752     res = parse_layer_environment_var_filters(NULL, &layer_filters);
6753     if (VK_SUCCESS != res) {
6754         goto out;
6755     }
6756 
6757     // Get layer libraries if needed
6758     if (pLayerName && strlen(pLayerName) != 0) {
6759         if (vk_string_validate(MaxLoaderStringLength, pLayerName) != VK_STRING_ERROR_NONE) {
6760             assert(VK_FALSE && "vkEnumerateInstanceExtensionProperties: pLayerName is too long or is badly formed");
6761             res = VK_ERROR_EXTENSION_NOT_PRESENT;
6762             goto out;
6763         }
6764 
6765         res = loader_scan_for_layers(NULL, &instance_layers, &layer_filters);
6766         if (VK_SUCCESS != res) {
6767             goto out;
6768         }
6769         for (uint32_t i = 0; i < instance_layers.count; i++) {
6770             struct loader_layer_properties *props = &instance_layers.list[i];
6771             if (strcmp(props->info.layerName, pLayerName) == 0) {
6772                 global_ext_list = &props->instance_extension_list;
6773                 break;
6774             }
6775         }
6776     } else {
6777         // Preload ICD libraries so subsequent calls to EnumerateInstanceExtensionProperties don't have to load them
6778         loader_preload_icds();
6779 
6780         // Scan/discover all ICD libraries
6781         res = loader_icd_scan(NULL, &icd_tramp_list, NULL, NULL);
6782         // EnumerateInstanceExtensionProperties can't return anything other than OOM or VK_ERROR_LAYER_NOT_PRESENT
6783         if ((VK_SUCCESS != res && icd_tramp_list.count > 0) || res == VK_ERROR_OUT_OF_HOST_MEMORY) {
6784             goto out;
6785         }
6786         // Get extensions from all ICD's, merge so no duplicates
6787         res = loader_get_icd_loader_instance_extensions(NULL, &icd_tramp_list, &local_ext_list);
6788         if (VK_SUCCESS != res) {
6789             goto out;
6790         }
6791         loader_scanned_icd_clear(NULL, &icd_tramp_list);
6792 
6793         // Append enabled implicit layers.
6794         res = loader_scan_for_implicit_layers(NULL, &instance_layers, &layer_filters);
6795         if (VK_SUCCESS != res) {
6796             goto out;
6797         }
6798         for (uint32_t i = 0; i < instance_layers.count; i++) {
6799             struct loader_extension_list *ext_list = &instance_layers.list[i].instance_extension_list;
6800             loader_add_to_ext_list(NULL, &local_ext_list, ext_list->count, ext_list->list);
6801         }
6802 
6803         global_ext_list = &local_ext_list;
6804     }
6805 
6806     if (global_ext_list == NULL) {
6807         res = VK_ERROR_LAYER_NOT_PRESENT;
6808         goto out;
6809     }
6810 
6811     if (pProperties == NULL) {
6812         *pPropertyCount = global_ext_list->count;
6813         goto out;
6814     }
6815 
6816     copy_size = *pPropertyCount < global_ext_list->count ? *pPropertyCount : global_ext_list->count;
6817     for (uint32_t i = 0; i < copy_size; i++) {
6818         memcpy(&pProperties[i], &global_ext_list->list[i], sizeof(VkExtensionProperties));
6819     }
6820     *pPropertyCount = copy_size;
6821 
6822     if (copy_size < global_ext_list->count) {
6823         res = VK_INCOMPLETE;
6824         goto out;
6825     }
6826 
6827 out:
6828     loader_destroy_generic_list(NULL, (struct loader_generic_list *)&icd_tramp_list);
6829     loader_destroy_generic_list(NULL, (struct loader_generic_list *)&local_ext_list);
6830     loader_delete_layer_list_and_properties(NULL, &instance_layers);
6831     return res;
6832 }
6833 
6834 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceLayerProperties(const VkEnumerateInstanceLayerPropertiesChain *chain,
6835                                                                            uint32_t *pPropertyCount,
6836                                                                            VkLayerProperties *pProperties) {
6837     (void)chain;
6838     VkResult result = VK_SUCCESS;
6839     struct loader_layer_list instance_layer_list;
6840     struct loader_envvar_all_filters layer_filters = {0};
6841 
6842     LOADER_PLATFORM_THREAD_ONCE(&once_init, loader_initialize);
6843 
6844     uint32_t copy_size;
6845 
6846     result = parse_layer_environment_var_filters(NULL, &layer_filters);
6847     if (VK_SUCCESS != result) {
6848         goto out;
6849     }
6850 
6851     // Get layer libraries
6852     memset(&instance_layer_list, 0, sizeof(instance_layer_list));
6853     result = loader_scan_for_layers(NULL, &instance_layer_list, &layer_filters);
6854     if (VK_SUCCESS != result) {
6855         goto out;
6856     }
6857 
6858     uint32_t active_layer_count = 0;
6859     for (uint32_t i = 0; i < instance_layer_list.count; i++) {
6860         if (instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_ON ||
6861             instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_DEFAULT) {
6862             active_layer_count++;
6863         }
6864     }
6865 
6866     if (pProperties == NULL) {
6867         *pPropertyCount = active_layer_count;
6868         goto out;
6869     }
6870 
6871     copy_size = (*pPropertyCount < active_layer_count) ? *pPropertyCount : active_layer_count;
6872     uint32_t output_properties_index = 0;
6873     for (uint32_t i = 0; i < copy_size; i++) {
6874         if (instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_ON ||
6875             instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_DEFAULT) {
6876             memcpy(&pProperties[output_properties_index], &instance_layer_list.list[i].info, sizeof(VkLayerProperties));
6877             output_properties_index++;
6878         }
6879     }
6880 
6881     *pPropertyCount = copy_size;
6882 
6883     if (copy_size < instance_layer_list.count) {
6884         result = VK_INCOMPLETE;
6885         goto out;
6886     }
6887 
6888 out:
6889 
6890     loader_delete_layer_list_and_properties(NULL, &instance_layer_list);
6891     return result;
6892 }
6893 
6894 // ---- Vulkan Core 1.1 terminators
6895 
6896 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumeratePhysicalDeviceGroups(
6897     VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties) {
6898     struct loader_instance *inst = (struct loader_instance *)instance;
6899 
6900     VkResult res = VK_SUCCESS;
6901     struct loader_icd_term *icd_term;
6902     uint32_t total_count = 0;
6903     uint32_t cur_icd_group_count = 0;
6904     VkPhysicalDeviceGroupProperties **new_phys_dev_groups = NULL;
6905     struct loader_physical_device_group_term *local_phys_dev_groups = NULL;
6906     PFN_vkEnumeratePhysicalDeviceGroups fpEnumeratePhysicalDeviceGroups = NULL;
6907     struct loader_icd_physical_devices *sorted_phys_dev_array = NULL;
6908     uint32_t sorted_count = 0;
6909 
6910     // For each ICD, query the number of physical device groups, and then get an
6911     // internal value for those physical devices.
6912     icd_term = inst->icd_terms;
6913     for (uint32_t icd_idx = 0; NULL != icd_term; icd_term = icd_term->next, icd_idx++) {
6914         cur_icd_group_count = 0;
6915 
6916         // Get the function pointer to use to call into the ICD. This could be the core or KHR version
6917         if (inst->enabled_known_extensions.khr_device_group_creation) {
6918             fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR;
6919         } else {
6920             fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroups;
6921         }
6922 
6923         if (NULL == fpEnumeratePhysicalDeviceGroups) {
6924             // Treat each ICD's GPU as it's own group if the extension isn't supported
6925             res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &cur_icd_group_count, NULL);
6926             if (res != VK_SUCCESS) {
6927                 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6928                            "terminator_EnumeratePhysicalDeviceGroups:  Failed during dispatch call of \'EnumeratePhysicalDevices\' "
6929                            "to ICD %d to get plain phys dev count.",
6930                            icd_idx);
6931                 continue;
6932             }
6933         } else {
6934             // Query the actual group info
6935             res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &cur_icd_group_count, NULL);
6936             if (res != VK_SUCCESS) {
6937                 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6938                            "terminator_EnumeratePhysicalDeviceGroups:  Failed during dispatch call of "
6939                            "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get count.",
6940                            icd_idx);
6941                 continue;
6942             }
6943         }
6944         total_count += cur_icd_group_count;
6945     }
6946 
6947     // If GPUs not sorted yet, look through them and generate list of all available GPUs
6948     if (0 == total_count || 0 == inst->total_gpu_count) {
6949         res = setup_loader_term_phys_devs(inst);
6950         if (VK_SUCCESS != res) {
6951             goto out;
6952         }
6953     }
6954 
6955     if (NULL != pPhysicalDeviceGroupProperties) {
6956         // Create an array for the new physical device groups, which will be stored
6957         // in the instance for the Terminator code.
6958         new_phys_dev_groups = (VkPhysicalDeviceGroupProperties **)loader_instance_heap_calloc(
6959             inst, total_count * sizeof(VkPhysicalDeviceGroupProperties *), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6960         if (NULL == new_phys_dev_groups) {
6961             loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6962                        "terminator_EnumeratePhysicalDeviceGroups:  Failed to allocate new physical device group array of size %d",
6963                        total_count);
6964             res = VK_ERROR_OUT_OF_HOST_MEMORY;
6965             goto out;
6966         }
6967 
6968         // Create a temporary array (on the stack) to keep track of the
6969         // returned VkPhysicalDevice values.
6970         local_phys_dev_groups = loader_stack_alloc(sizeof(struct loader_physical_device_group_term) * total_count);
6971         // Initialize the memory to something valid
6972         memset(local_phys_dev_groups, 0, sizeof(struct loader_physical_device_group_term) * total_count);
6973 
6974 #if defined(_WIN32)
6975         // Get the physical devices supported by platform sorting mechanism into a separate list
6976         res = windows_read_sorted_physical_devices(inst, &sorted_count, &sorted_phys_dev_array);
6977         if (VK_SUCCESS != res) {
6978             goto out;
6979         }
6980 #endif
6981 
6982         cur_icd_group_count = 0;
6983         icd_term = inst->icd_terms;
6984         for (uint8_t icd_idx = 0; NULL != icd_term; icd_term = icd_term->next, icd_idx++) {
6985             uint32_t count_this_time = total_count - cur_icd_group_count;
6986 
6987             // Get the function pointer to use to call into the ICD. This could be the core or KHR version
6988             if (inst->enabled_known_extensions.khr_device_group_creation) {
6989                 fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR;
6990             } else {
6991                 fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroups;
6992             }
6993 
6994             if (NULL == fpEnumeratePhysicalDeviceGroups) {
6995                 icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &count_this_time, NULL);
6996 
6997                 VkPhysicalDevice *phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * count_this_time);
6998                 if (NULL == phys_dev_array) {
6999                     loader_log(
7000                         inst, VULKAN_LOADER_ERROR_BIT, 0,
7001                         "terminator_EnumeratePhysicalDeviceGroups:  Failed to allocate local physical device array of size %d",
7002                         count_this_time);
7003                     res = VK_ERROR_OUT_OF_HOST_MEMORY;
7004                     goto out;
7005                 }
7006 
7007                 res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &count_this_time, phys_dev_array);
7008                 if (res != VK_SUCCESS) {
7009                     loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7010                                "terminator_EnumeratePhysicalDeviceGroups:  Failed during dispatch call of "
7011                                "\'EnumeratePhysicalDevices\' to ICD %d to get plain phys dev count.",
7012                                icd_idx);
7013                     goto out;
7014                 }
7015 
7016                 // Add each GPU as it's own group
7017                 for (uint32_t indiv_gpu = 0; indiv_gpu < count_this_time; indiv_gpu++) {
7018                     uint32_t cur_index = indiv_gpu + cur_icd_group_count;
7019                     local_phys_dev_groups[cur_index].this_icd_term = icd_term;
7020                     local_phys_dev_groups[cur_index].icd_index = icd_idx;
7021                     local_phys_dev_groups[cur_index].group_props.physicalDeviceCount = 1;
7022                     local_phys_dev_groups[cur_index].group_props.physicalDevices[0] = phys_dev_array[indiv_gpu];
7023                 }
7024 
7025             } else {
7026                 res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time, NULL);
7027                 if (res != VK_SUCCESS) {
7028                     loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7029                                "terminator_EnumeratePhysicalDeviceGroups:  Failed during dispatch call of "
7030                                "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get group count.",
7031                                icd_idx);
7032                     goto out;
7033                 }
7034                 if (cur_icd_group_count + count_this_time < *pPhysicalDeviceGroupCount) {
7035                     // The total amount is still less than the amount of physical device group data passed in
7036                     // by the callee.  Therefore, we don't have to allocate any temporary structures and we
7037                     // can just use the data that was passed in.
7038                     res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time,
7039                                                           &pPhysicalDeviceGroupProperties[cur_icd_group_count]);
7040                     if (res != VK_SUCCESS) {
7041                         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7042                                    "terminator_EnumeratePhysicalDeviceGroups:  Failed during dispatch call of "
7043                                    "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get group information.",
7044                                    icd_idx);
7045                         goto out;
7046                     }
7047                     for (uint32_t group = 0; group < count_this_time; ++group) {
7048                         uint32_t cur_index = group + cur_icd_group_count;
7049                         local_phys_dev_groups[cur_index].group_props = pPhysicalDeviceGroupProperties[cur_index];
7050                         local_phys_dev_groups[cur_index].this_icd_term = icd_term;
7051                         local_phys_dev_groups[cur_index].icd_index = icd_idx;
7052                     }
7053                 } else {
7054                     // There's not enough space in the callee's allocated pPhysicalDeviceGroupProperties structs,
7055                     // so we have to allocate temporary versions to collect all the data.  However, we need to make
7056                     // sure that at least the ones we do query utilize any pNext data in the callee's version.
7057                     VkPhysicalDeviceGroupProperties *tmp_group_props =
7058                         loader_stack_alloc(count_this_time * sizeof(VkPhysicalDeviceGroupProperties));
7059                     for (uint32_t group = 0; group < count_this_time; group++) {
7060                         tmp_group_props[group].sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES;
7061                         uint32_t cur_index = group + cur_icd_group_count;
7062                         if (*pPhysicalDeviceGroupCount > cur_index) {
7063                             tmp_group_props[group].pNext = pPhysicalDeviceGroupProperties[cur_index].pNext;
7064                         } else {
7065                             tmp_group_props[group].pNext = NULL;
7066                         }
7067                         tmp_group_props[group].subsetAllocation = false;
7068                     }
7069 
7070                     res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time, tmp_group_props);
7071                     if (res != VK_SUCCESS) {
7072                         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7073                                    "terminator_EnumeratePhysicalDeviceGroups:  Failed during dispatch call of "
7074                                    "\'EnumeratePhysicalDeviceGroups\' to ICD %d  to get group information for temp data.",
7075                                    icd_idx);
7076                         goto out;
7077                     }
7078                     for (uint32_t group = 0; group < count_this_time; ++group) {
7079                         uint32_t cur_index = group + cur_icd_group_count;
7080                         local_phys_dev_groups[cur_index].group_props = tmp_group_props[group];
7081                         local_phys_dev_groups[cur_index].this_icd_term = icd_term;
7082                         local_phys_dev_groups[cur_index].icd_index = icd_idx;
7083                     }
7084                 }
7085                 if (VK_SUCCESS != res) {
7086                     loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7087                                "terminator_EnumeratePhysicalDeviceGroups:  Failed during dispatch call of "
7088                                "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get content.",
7089                                icd_idx);
7090                     goto out;
7091                 }
7092             }
7093 
7094             cur_icd_group_count += count_this_time;
7095         }
7096 
7097 #if defined(LOADER_ENABLE_LINUX_SORT)
7098         if (is_linux_sort_enabled(inst)) {
7099             // Get the physical devices supported by platform sorting mechanism into a separate list
7100             res = linux_sort_physical_device_groups(inst, total_count, local_phys_dev_groups);
7101         }
7102 #elif defined(_WIN32)
7103         // The Windows sorting information is only on physical devices.  We need to take that and convert it to the group
7104         // information if it's present.
7105         if (sorted_count > 0) {
7106             res =
7107                 windows_sort_physical_device_groups(inst, total_count, local_phys_dev_groups, sorted_count, sorted_phys_dev_array);
7108         }
7109 #endif  // LOADER_ENABLE_LINUX_SORT
7110 
7111         // Just to be safe, make sure we successfully completed setup_loader_term_phys_devs above
7112         // before attempting to do the following.  By verifying that setup_loader_term_phys_devs ran
7113         // first, it guarantees that each physical device will have a loader-specific handle.
7114         if (NULL != inst->phys_devs_term) {
7115             for (uint32_t group = 0; group < total_count; group++) {
7116                 for (uint32_t group_gpu = 0; group_gpu < local_phys_dev_groups[group].group_props.physicalDeviceCount;
7117                      group_gpu++) {
7118                     bool found = false;
7119                     for (uint32_t term_gpu = 0; term_gpu < inst->phys_dev_count_term; term_gpu++) {
7120                         if (local_phys_dev_groups[group].group_props.physicalDevices[group_gpu] ==
7121                             inst->phys_devs_term[term_gpu]->phys_dev) {
7122                             local_phys_dev_groups[group].group_props.physicalDevices[group_gpu] =
7123                                 (VkPhysicalDevice)inst->phys_devs_term[term_gpu];
7124                             found = true;
7125                             break;
7126                         }
7127                     }
7128                     if (!found) {
7129                         loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7130                                    "terminator_EnumeratePhysicalDeviceGroups:  Failed to find GPU %d in group %d returned by "
7131                                    "\'EnumeratePhysicalDeviceGroups\' in list returned by \'EnumeratePhysicalDevices\'",
7132                                    group_gpu, group);
7133                         res = VK_ERROR_INITIALIZATION_FAILED;
7134                         goto out;
7135                     }
7136                 }
7137             }
7138         }
7139 
7140         uint32_t idx = 0;
7141 
7142         // Copy or create everything to fill the new array of physical device groups
7143         for (uint32_t group = 0; group < total_count; group++) {
7144             // Skip groups which have been included through sorting
7145             if (local_phys_dev_groups[group].group_props.physicalDeviceCount == 0) {
7146                 continue;
7147             }
7148 
7149             // Find the VkPhysicalDeviceGroupProperties object in local_phys_dev_groups
7150             VkPhysicalDeviceGroupProperties *group_properties = &local_phys_dev_groups[group].group_props;
7151 
7152             // Check if this physical device group with the same contents is already in the old buffer
7153             for (uint32_t old_idx = 0; old_idx < inst->phys_dev_group_count_term; old_idx++) {
7154                 if (NULL != group_properties && NULL != inst->phys_dev_groups_term[old_idx] &&
7155                     group_properties->physicalDeviceCount == inst->phys_dev_groups_term[old_idx]->physicalDeviceCount) {
7156                     bool found_all_gpus = true;
7157                     for (uint32_t old_gpu = 0; old_gpu < inst->phys_dev_groups_term[old_idx]->physicalDeviceCount; old_gpu++) {
7158                         bool found_gpu = false;
7159                         for (uint32_t new_gpu = 0; new_gpu < group_properties->physicalDeviceCount; new_gpu++) {
7160                             if (group_properties->physicalDevices[new_gpu] ==
7161                                 inst->phys_dev_groups_term[old_idx]->physicalDevices[old_gpu]) {
7162                                 found_gpu = true;
7163                                 break;
7164                             }
7165                         }
7166 
7167                         if (!found_gpu) {
7168                             found_all_gpus = false;
7169                             break;
7170                         }
7171                     }
7172                     if (!found_all_gpus) {
7173                         continue;
7174                     } else {
7175                         new_phys_dev_groups[idx] = inst->phys_dev_groups_term[old_idx];
7176                         break;
7177                     }
7178                 }
7179             }
7180             // If this physical device group isn't in the old buffer, create it
7181             if (group_properties != NULL && NULL == new_phys_dev_groups[idx]) {
7182                 new_phys_dev_groups[idx] = (VkPhysicalDeviceGroupProperties *)loader_instance_heap_alloc(
7183                     inst, sizeof(VkPhysicalDeviceGroupProperties), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
7184                 if (NULL == new_phys_dev_groups[idx]) {
7185                     loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7186                                "terminator_EnumeratePhysicalDeviceGroups:  Failed to allocate physical device group Terminator "
7187                                "object %d",
7188                                idx);
7189                     total_count = idx;
7190                     res = VK_ERROR_OUT_OF_HOST_MEMORY;
7191                     goto out;
7192                 }
7193                 memcpy(new_phys_dev_groups[idx], group_properties, sizeof(VkPhysicalDeviceGroupProperties));
7194             }
7195 
7196             ++idx;
7197         }
7198     }
7199 
7200 out:
7201 
7202     if (NULL != pPhysicalDeviceGroupProperties) {
7203         if (VK_SUCCESS != res) {
7204             if (NULL != new_phys_dev_groups) {
7205                 // We've encountered an error, so we should free the new buffers.
7206                 for (uint32_t i = 0; i < total_count; i++) {
7207                     // If an OOM occurred inside the copying of the new physical device groups into the existing array will
7208                     // leave some of the old physical device groups in the array which may have been copied into the new array,
7209                     // leading to them being freed twice. To avoid this we just make sure to not delete physical device groups
7210                     // which were copied.
7211                     bool found = false;
7212                     if (NULL != inst->phys_devs_term) {
7213                         for (uint32_t old_idx = 0; old_idx < inst->phys_dev_group_count_term; old_idx++) {
7214                             if (new_phys_dev_groups[i] == inst->phys_dev_groups_term[old_idx]) {
7215                                 found = true;
7216                                 break;
7217                             }
7218                         }
7219                     }
7220                     if (!found) {
7221                         loader_instance_heap_free(inst, new_phys_dev_groups[i]);
7222                     }
7223                 }
7224                 loader_instance_heap_free(inst, new_phys_dev_groups);
7225             }
7226         } else {
7227             if (NULL != inst->phys_dev_groups_term) {
7228                 // Free everything in the old array that was not copied into the new array
7229                 // here.  We can't attempt to do that before here since the previous loop
7230                 // looking before the "out:" label may hit an out of memory condition resulting
7231                 // in memory leaking.
7232                 for (uint32_t i = 0; i < inst->phys_dev_group_count_term; i++) {
7233                     bool found = false;
7234                     for (uint32_t j = 0; j < total_count; j++) {
7235                         if (inst->phys_dev_groups_term[i] == new_phys_dev_groups[j]) {
7236                             found = true;
7237                             break;
7238                         }
7239                     }
7240                     if (!found) {
7241                         loader_instance_heap_free(inst, inst->phys_dev_groups_term[i]);
7242                     }
7243                 }
7244                 loader_instance_heap_free(inst, inst->phys_dev_groups_term);
7245             }
7246 
7247             // Swap in the new physical device group list
7248             inst->phys_dev_group_count_term = total_count;
7249             inst->phys_dev_groups_term = new_phys_dev_groups;
7250         }
7251 
7252         if (sorted_phys_dev_array != NULL) {
7253             for (uint32_t i = 0; i < sorted_count; ++i) {
7254                 if (sorted_phys_dev_array[i].device_count > 0 && sorted_phys_dev_array[i].physical_devices != NULL) {
7255                     loader_instance_heap_free(inst, sorted_phys_dev_array[i].physical_devices);
7256                 }
7257             }
7258             loader_instance_heap_free(inst, sorted_phys_dev_array);
7259         }
7260 
7261         uint32_t copy_count = inst->phys_dev_group_count_term;
7262         if (NULL != pPhysicalDeviceGroupProperties) {
7263             if (copy_count > *pPhysicalDeviceGroupCount) {
7264                 copy_count = *pPhysicalDeviceGroupCount;
7265                 loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
7266                            "terminator_EnumeratePhysicalDeviceGroups : Trimming device count from %d to %d.",
7267                            inst->phys_dev_group_count_term, copy_count);
7268                 res = VK_INCOMPLETE;
7269             }
7270 
7271             for (uint32_t i = 0; i < copy_count; i++) {
7272                 memcpy(&pPhysicalDeviceGroupProperties[i], inst->phys_dev_groups_term[i], sizeof(VkPhysicalDeviceGroupProperties));
7273             }
7274         }
7275 
7276         *pPhysicalDeviceGroupCount = copy_count;
7277 
7278     } else {
7279         *pPhysicalDeviceGroupCount = total_count;
7280     }
7281     return res;
7282 }
7283