1 /*
2 *
3 * Copyright (c) 2014-2023 The Khronos Group Inc.
4 * Copyright (c) 2014-2023 Valve Corporation
5 * Copyright (c) 2014-2023 LunarG, Inc.
6 * Copyright (C) 2015 Google Inc.
7 * Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
8 * Copyright (c) 2023-2023 RasterGrid Kft.
9 *
10 * Licensed under the Apache License, Version 2.0 (the "License");
11 * you may not use this file except in compliance with the License.
12 * You may obtain a copy of the License at
13 *
14 * http://www.apache.org/licenses/LICENSE-2.0
15 *
16 * Unless required by applicable law or agreed to in writing, software
17 * distributed under the License is distributed on an "AS IS" BASIS,
18 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 * See the License for the specific language governing permissions and
20 * limitations under the License.
21
22 *
23 * Author: Jon Ashburn <jon@lunarg.com>
24 * Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
25 * Author: Mark Young <marky@lunarg.com>
26 * Author: Lenny Komow <lenny@lunarg.com>
27 * Author: Charles Giessen <charles@lunarg.com>
28 *
29 */
30
31 #include "loader.h"
32
33 #include <ctype.h>
34 #include <inttypes.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <stdarg.h>
38 #include <stdbool.h>
39 #include <string.h>
40 #include <stddef.h>
41
42 #if defined(__APPLE__)
43 #include <CoreFoundation/CoreFoundation.h>
44 #include <sys/param.h>
45 #endif
46
47 #include <sys/types.h>
48 #if defined(_WIN32)
49 #include "dirent_on_windows.h"
50 #elif COMMON_UNIX_PLATFORMS
51 #include <dirent.h>
52 #else
53 #warning dirent.h not available on this platform
54 #endif // _WIN32
55
56 #include "allocation.h"
57 #include "cJSON.h"
58 #include "debug_utils.h"
59 #include "loader_environment.h"
60 #include "gpa_helper.h"
61 #include "log.h"
62 #include "unknown_function_handling.h"
63 #include "vk_loader_platform.h"
64 #include "wsi.h"
65
66 #if defined(WIN32)
67 #include "loader_windows.h"
68 #endif
69 #if defined(LOADER_ENABLE_LINUX_SORT)
70 // This header is currently only used when sorting Linux devices, so don't include it otherwise.
71 #include "loader_linux.h"
72 #endif // LOADER_ENABLE_LINUX_SORT
73
74 // Generated file containing all the extension data
75 #include "vk_loader_extensions.c"
76
77 struct loader_struct loader = {0};
78
79 struct activated_layer_info {
80 char *name;
81 char *manifest;
82 char *library;
83 bool is_implicit;
84 char *disable_env;
85 };
86
87 // thread safety lock for accessing global data structures such as "loader"
88 // all entrypoints on the instance chain need to be locked except GPA
89 // additionally CreateDevice and DestroyDevice needs to be locked
90 loader_platform_thread_mutex loader_lock;
91 loader_platform_thread_mutex loader_preload_icd_lock;
92 loader_platform_thread_mutex loader_global_instance_list_lock;
93
94 // A list of ICDs that gets initialized when the loader does its global initialization. This list should never be used by anything
95 // other than EnumerateInstanceExtensionProperties(), vkDestroyInstance, and loader_release(). This list does not change
96 // functionality, but the fact that the libraries already been loaded causes any call that needs to load ICD libraries to speed up
97 // significantly. This can have a huge impact when making repeated calls to vkEnumerateInstanceExtensionProperties and
98 // vkCreateInstance.
99 struct loader_icd_tramp_list scanned_icds;
100
101 // controls whether loader_platform_close_library() closes the libraries or not - controlled by an environment
102 // variables - this is just the definition of the variable, usage is in vk_loader_platform.h
103 bool loader_disable_dynamic_library_unloading;
104
105 LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_init);
106
107 // Creates loader_api_version struct that contains the major and minor fields, setting patch to 0
loader_make_version(uint32_t version)108 loader_api_version loader_make_version(uint32_t version) {
109 loader_api_version out_version;
110 out_version.major = VK_API_VERSION_MAJOR(version);
111 out_version.minor = VK_API_VERSION_MINOR(version);
112 out_version.patch = 0;
113 return out_version;
114 }
115
116 // Creates loader_api_version struct containing the major, minor, and patch fields
loader_make_full_version(uint32_t version)117 loader_api_version loader_make_full_version(uint32_t version) {
118 loader_api_version out_version;
119 out_version.major = VK_API_VERSION_MAJOR(version);
120 out_version.minor = VK_API_VERSION_MINOR(version);
121 out_version.patch = VK_API_VERSION_PATCH(version);
122 return out_version;
123 }
124
loader_combine_version(uint32_t major,uint32_t minor,uint32_t patch)125 loader_api_version loader_combine_version(uint32_t major, uint32_t minor, uint32_t patch) {
126 loader_api_version out_version;
127 out_version.major = (uint16_t)major;
128 out_version.minor = (uint16_t)minor;
129 out_version.patch = (uint16_t)patch;
130 return out_version;
131 }
132
133 // Helper macros for determining if a version is valid or not
loader_check_version_meets_required(loader_api_version required,loader_api_version version)134 bool loader_check_version_meets_required(loader_api_version required, loader_api_version version) {
135 // major version is satisfied
136 return (version.major > required.major) ||
137 // major version is equal, minor version is patch version is greater to minimum minor
138 (version.major == required.major && version.minor > required.minor) ||
139 // major and minor version are equal, patch version is greater or equal to minimum patch
140 (version.major == required.major && version.minor == required.minor && version.patch >= required.patch);
141 }
142
143 // Wrapper around opendir so that the dirent_on_windows gets the instance it needs
144 // while linux opendir & readdir does not
loader_opendir(const struct loader_instance * instance,const char * name)145 DIR *loader_opendir(const struct loader_instance *instance, const char *name) {
146 #if defined(_WIN32)
147 return opendir(instance ? &instance->alloc_callbacks : NULL, name);
148 #elif COMMON_UNIX_PLATFORMS
149 (void)instance;
150 return opendir(name);
151 #else
152 #warning dirent.h - opendir not available on this platform
153 #endif // _WIN32
154 }
loader_closedir(const struct loader_instance * instance,DIR * dir)155 int loader_closedir(const struct loader_instance *instance, DIR *dir) {
156 #if defined(_WIN32)
157 return closedir(instance ? &instance->alloc_callbacks : NULL, dir);
158 #elif COMMON_UNIX_PLATFORMS
159 (void)instance;
160 return closedir(dir);
161 #else
162 #warning dirent.h - closedir not available on this platform
163 #endif // _WIN32
164 }
165
is_json(const char * path,size_t len)166 bool is_json(const char *path, size_t len) {
167 if (len < 5) {
168 return false;
169 }
170 return !strncmp(path, ".json", 5);
171 }
172
173 // Handle error from to library loading
loader_handle_load_library_error(const struct loader_instance * inst,const char * filename,enum loader_layer_library_status * lib_status)174 void loader_handle_load_library_error(const struct loader_instance *inst, const char *filename,
175 enum loader_layer_library_status *lib_status) {
176 const char *error_message = loader_platform_open_library_error(filename);
177 // If the error is due to incompatible architecture (eg 32 bit vs 64 bit), report it with INFO level
178 // Discussed in Github issue 262 & 644
179 // "wrong ELF class" is a linux error, " with error 193" is a windows error
180 VkFlags err_flag = VULKAN_LOADER_ERROR_BIT;
181 if (strstr(error_message, "wrong ELF class:") != NULL || strstr(error_message, " with error 193") != NULL) {
182 err_flag = VULKAN_LOADER_INFO_BIT;
183 if (NULL != lib_status) {
184 *lib_status = LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE;
185 }
186 }
187 // Check if the error is due to lack of memory
188 // "with error 8" is the windows error code for OOM cases, aka ERROR_NOT_ENOUGH_MEMORY
189 // Linux doesn't have such a nice error message - only if there are reported issues should this be called
190 else if (strstr(error_message, " with error 8") != NULL) {
191 if (NULL != lib_status) {
192 *lib_status = LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY;
193 }
194 } else if (NULL != lib_status) {
195 *lib_status = LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD;
196 }
197 loader_log(inst, err_flag, 0, error_message);
198 }
199
vkSetInstanceDispatch(VkInstance instance,void * object)200 VKAPI_ATTR VkResult VKAPI_CALL vkSetInstanceDispatch(VkInstance instance, void *object) {
201 struct loader_instance *inst = loader_get_instance(instance);
202 if (!inst) {
203 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkSetInstanceDispatch: Can not retrieve Instance dispatch table.");
204 return VK_ERROR_INITIALIZATION_FAILED;
205 }
206 loader_set_dispatch(object, inst->disp);
207 return VK_SUCCESS;
208 }
209
vkSetDeviceDispatch(VkDevice device,void * object)210 VKAPI_ATTR VkResult VKAPI_CALL vkSetDeviceDispatch(VkDevice device, void *object) {
211 struct loader_device *dev;
212 struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, NULL);
213
214 if (NULL == icd_term || NULL == dev) {
215 return VK_ERROR_INITIALIZATION_FAILED;
216 }
217 loader_set_dispatch(object, &dev->loader_dispatch);
218 return VK_SUCCESS;
219 }
220
loader_free_layer_properties(const struct loader_instance * inst,struct loader_layer_properties * layer_properties)221 void loader_free_layer_properties(const struct loader_instance *inst, struct loader_layer_properties *layer_properties) {
222 loader_instance_heap_free(inst, layer_properties->manifest_file_name);
223 loader_instance_heap_free(inst, layer_properties->lib_name);
224 loader_instance_heap_free(inst, layer_properties->functions.str_gipa);
225 loader_instance_heap_free(inst, layer_properties->functions.str_gdpa);
226 loader_instance_heap_free(inst, layer_properties->functions.str_negotiate_interface);
227 loader_destroy_generic_list(inst, (struct loader_generic_list *)&layer_properties->instance_extension_list);
228 if (layer_properties->device_extension_list.capacity > 0 && NULL != layer_properties->device_extension_list.list) {
229 for (uint32_t i = 0; i < layer_properties->device_extension_list.count; i++) {
230 free_string_list(inst, &layer_properties->device_extension_list.list[i].entrypoints);
231 }
232 }
233 loader_destroy_generic_list(inst, (struct loader_generic_list *)&layer_properties->device_extension_list);
234 loader_instance_heap_free(inst, layer_properties->disable_env_var.name);
235 loader_instance_heap_free(inst, layer_properties->disable_env_var.value);
236 loader_instance_heap_free(inst, layer_properties->enable_env_var.name);
237 loader_instance_heap_free(inst, layer_properties->enable_env_var.value);
238 free_string_list(inst, &layer_properties->component_layer_names);
239 loader_instance_heap_free(inst, layer_properties->pre_instance_functions.enumerate_instance_extension_properties);
240 loader_instance_heap_free(inst, layer_properties->pre_instance_functions.enumerate_instance_layer_properties);
241 loader_instance_heap_free(inst, layer_properties->pre_instance_functions.enumerate_instance_version);
242 free_string_list(inst, &layer_properties->override_paths);
243 free_string_list(inst, &layer_properties->blacklist_layer_names);
244 free_string_list(inst, &layer_properties->app_key_paths);
245
246 // Make sure to clear out the removed layer, in case new layers are added in the previous location
247 memset(layer_properties, 0, sizeof(struct loader_layer_properties));
248 }
249
loader_init_library_list(struct loader_layer_list * instance_layers,loader_platform_dl_handle ** libs)250 VkResult loader_init_library_list(struct loader_layer_list *instance_layers, loader_platform_dl_handle **libs) {
251 if (instance_layers->count > 0) {
252 *libs = loader_calloc(NULL, sizeof(loader_platform_dl_handle) * instance_layers->count, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
253 if (*libs == NULL) {
254 return VK_ERROR_OUT_OF_HOST_MEMORY;
255 }
256 }
257 return VK_SUCCESS;
258 }
259
loader_copy_to_new_str(const struct loader_instance * inst,const char * source_str,char ** dest_str)260 VkResult loader_copy_to_new_str(const struct loader_instance *inst, const char *source_str, char **dest_str) {
261 assert(source_str && dest_str);
262 size_t str_len = strlen(source_str) + 1;
263 *dest_str = loader_instance_heap_calloc(inst, str_len, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
264 if (NULL == *dest_str) return VK_ERROR_OUT_OF_HOST_MEMORY;
265 loader_strncpy(*dest_str, str_len, source_str, str_len);
266 (*dest_str)[str_len - 1] = 0;
267 return VK_SUCCESS;
268 }
269
create_string_list(const struct loader_instance * inst,uint32_t allocated_count,struct loader_string_list * string_list)270 VkResult create_string_list(const struct loader_instance *inst, uint32_t allocated_count, struct loader_string_list *string_list) {
271 assert(string_list);
272 string_list->list = loader_instance_heap_calloc(inst, sizeof(char *) * allocated_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
273 if (NULL == string_list->list) {
274 return VK_ERROR_OUT_OF_HOST_MEMORY;
275 }
276 string_list->allocated_count = allocated_count;
277 string_list->count = 0;
278 return VK_SUCCESS;
279 }
280
append_str_to_string_list(const struct loader_instance * inst,struct loader_string_list * string_list,char * str)281 VkResult append_str_to_string_list(const struct loader_instance *inst, struct loader_string_list *string_list, char *str) {
282 assert(string_list && str);
283 if (string_list->allocated_count == 0) {
284 string_list->allocated_count = 32;
285 string_list->list =
286 loader_instance_heap_calloc(inst, sizeof(char *) * string_list->allocated_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
287 if (NULL == string_list->list) {
288 return VK_ERROR_OUT_OF_HOST_MEMORY;
289 }
290 } else if (string_list->count + 1 > string_list->allocated_count) {
291 uint32_t new_allocated_count = string_list->allocated_count * 2;
292 string_list->list = loader_instance_heap_realloc(inst, string_list->list, sizeof(char *) * string_list->allocated_count,
293 sizeof(char *) * new_allocated_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
294 if (NULL == string_list->list) {
295 return VK_ERROR_OUT_OF_HOST_MEMORY;
296 }
297 // Null out the new space
298 memset(string_list->list + string_list->allocated_count, 0, string_list->allocated_count);
299 string_list->allocated_count *= 2;
300 }
301 string_list->list[string_list->count++] = str;
302 return VK_SUCCESS;
303 }
304
copy_str_to_string_list(const struct loader_instance * inst,struct loader_string_list * string_list,const char * str,size_t str_len)305 VkResult copy_str_to_string_list(const struct loader_instance *inst, struct loader_string_list *string_list, const char *str,
306 size_t str_len) {
307 assert(string_list && str);
308 char *new_str = loader_instance_heap_calloc(inst, sizeof(char *) * str_len + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
309 if (NULL == new_str) {
310 return VK_ERROR_OUT_OF_HOST_MEMORY;
311 }
312 loader_strncpy(new_str, sizeof(char *) * str_len + 1, str, str_len);
313 new_str[str_len] = '\0';
314 VkResult res = append_str_to_string_list(inst, string_list, new_str);
315 if (res != VK_SUCCESS) {
316 // Cleanup new_str if the append failed - as append_str_to_string_list takes ownership but not if the function fails
317 loader_instance_heap_free(inst, new_str);
318 }
319 return res;
320 }
321
free_string_list(const struct loader_instance * inst,struct loader_string_list * string_list)322 void free_string_list(const struct loader_instance *inst, struct loader_string_list *string_list) {
323 assert(string_list);
324 if (string_list->list) {
325 for (uint32_t i = 0; i < string_list->count; i++) {
326 loader_instance_heap_free(inst, string_list->list[i]);
327 string_list->list[i] = NULL;
328 }
329 loader_instance_heap_free(inst, string_list->list);
330 }
331 memset(string_list, 0, sizeof(struct loader_string_list));
332 }
333
334 // Given string of three part form "maj.min.pat" convert to a vulkan version number.
335 // Also can understand four part form "variant.major.minor.patch" if provided.
loader_parse_version_string(char * vers_str)336 uint32_t loader_parse_version_string(char *vers_str) {
337 uint32_t variant = 0, major = 0, minor = 0, patch = 0;
338 char *vers_tok;
339 char *context = NULL;
340 if (!vers_str) {
341 return 0;
342 }
343
344 vers_tok = thread_safe_strtok(vers_str, ".\"\n\r", &context);
345 if (NULL != vers_tok) {
346 major = (uint16_t)atoi(vers_tok);
347 vers_tok = thread_safe_strtok(NULL, ".\"\n\r", &context);
348 if (NULL != vers_tok) {
349 minor = (uint16_t)atoi(vers_tok);
350 vers_tok = thread_safe_strtok(NULL, ".\"\n\r", &context);
351 if (NULL != vers_tok) {
352 patch = (uint16_t)atoi(vers_tok);
353 vers_tok = thread_safe_strtok(NULL, ".\"\n\r", &context);
354 // check that we are using a 4 part version string
355 if (NULL != vers_tok) {
356 // if we are, move the values over into the correct place
357 variant = major;
358 major = minor;
359 minor = patch;
360 patch = (uint16_t)atoi(vers_tok);
361 }
362 }
363 }
364 }
365
366 return VK_MAKE_API_VERSION(variant, major, minor, patch);
367 }
368
compare_vk_extension_properties(const VkExtensionProperties * op1,const VkExtensionProperties * op2)369 bool compare_vk_extension_properties(const VkExtensionProperties *op1, const VkExtensionProperties *op2) {
370 return strcmp(op1->extensionName, op2->extensionName) == 0 ? true : false;
371 }
372
373 // Search the given ext_array for an extension matching the given vk_ext_prop
has_vk_extension_property_array(const VkExtensionProperties * vk_ext_prop,const uint32_t count,const VkExtensionProperties * ext_array)374 bool has_vk_extension_property_array(const VkExtensionProperties *vk_ext_prop, const uint32_t count,
375 const VkExtensionProperties *ext_array) {
376 for (uint32_t i = 0; i < count; i++) {
377 if (compare_vk_extension_properties(vk_ext_prop, &ext_array[i])) return true;
378 }
379 return false;
380 }
381
382 // Search the given ext_list for an extension matching the given vk_ext_prop
has_vk_extension_property(const VkExtensionProperties * vk_ext_prop,const struct loader_extension_list * ext_list)383 bool has_vk_extension_property(const VkExtensionProperties *vk_ext_prop, const struct loader_extension_list *ext_list) {
384 for (uint32_t i = 0; i < ext_list->count; i++) {
385 if (compare_vk_extension_properties(&ext_list->list[i], vk_ext_prop)) return true;
386 }
387 return false;
388 }
389
390 // Search the given ext_list for a device extension matching the given ext_prop
has_vk_dev_ext_property(const VkExtensionProperties * ext_prop,const struct loader_device_extension_list * ext_list)391 bool has_vk_dev_ext_property(const VkExtensionProperties *ext_prop, const struct loader_device_extension_list *ext_list) {
392 for (uint32_t i = 0; i < ext_list->count; i++) {
393 if (compare_vk_extension_properties(&ext_list->list[i].props, ext_prop)) return true;
394 }
395 return false;
396 }
397
loader_append_layer_property(const struct loader_instance * inst,struct loader_layer_list * layer_list,struct loader_layer_properties * layer_property)398 VkResult loader_append_layer_property(const struct loader_instance *inst, struct loader_layer_list *layer_list,
399 struct loader_layer_properties *layer_property) {
400 VkResult res = VK_SUCCESS;
401 if (layer_list->capacity == 0) {
402 res = loader_init_generic_list(inst, (struct loader_generic_list *)layer_list, sizeof(struct loader_layer_properties));
403 if (VK_SUCCESS != res) {
404 goto out;
405 }
406 }
407
408 // Ensure enough room to add an entry
409 if ((layer_list->count + 1) * sizeof(struct loader_layer_properties) > layer_list->capacity) {
410 void *new_ptr = loader_instance_heap_realloc(inst, layer_list->list, layer_list->capacity, layer_list->capacity * 2,
411 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
412 if (NULL == new_ptr) {
413 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_append_layer_property: realloc failed for layer list");
414 res = VK_ERROR_OUT_OF_HOST_MEMORY;
415 goto out;
416 }
417 layer_list->list = new_ptr;
418 memset((uint8_t *)layer_list->list + layer_list->capacity, 0, layer_list->capacity);
419 layer_list->capacity *= 2;
420 }
421 memcpy(&layer_list->list[layer_list->count], layer_property, sizeof(struct loader_layer_properties));
422 layer_list->count++;
423 memset(layer_property, 0, sizeof(struct loader_layer_properties));
424 out:
425 if (res != VK_SUCCESS) {
426 loader_free_layer_properties(inst, layer_property);
427 }
428 return res;
429 }
430
431 // Search the given layer list for a layer property matching the given layer name
loader_find_layer_property(const char * name,const struct loader_layer_list * layer_list)432 struct loader_layer_properties *loader_find_layer_property(const char *name, const struct loader_layer_list *layer_list) {
433 for (uint32_t i = 0; i < layer_list->count; i++) {
434 const VkLayerProperties *item = &layer_list->list[i].info;
435 if (strcmp(name, item->layerName) == 0) return &layer_list->list[i];
436 }
437 return NULL;
438 }
439
loader_find_pointer_layer_property(const char * name,const struct loader_pointer_layer_list * layer_list)440 struct loader_layer_properties *loader_find_pointer_layer_property(const char *name,
441 const struct loader_pointer_layer_list *layer_list) {
442 for (uint32_t i = 0; i < layer_list->count; i++) {
443 const VkLayerProperties *item = &layer_list->list[i]->info;
444 if (strcmp(name, item->layerName) == 0) return layer_list->list[i];
445 }
446 return NULL;
447 }
448
449 // Search the given layer list for a layer matching the given layer name
loader_find_layer_name_in_list(const char * name,const struct loader_pointer_layer_list * layer_list)450 bool loader_find_layer_name_in_list(const char *name, const struct loader_pointer_layer_list *layer_list) {
451 if (NULL == layer_list) {
452 return false;
453 }
454 if (NULL != loader_find_pointer_layer_property(name, layer_list)) {
455 return true;
456 }
457 return false;
458 }
459
460 // Search the given meta-layer's component list for a layer matching the given layer name
loader_find_layer_name_in_meta_layer(const struct loader_instance * inst,const char * layer_name,struct loader_layer_list * layer_list,struct loader_layer_properties * meta_layer_props)461 bool loader_find_layer_name_in_meta_layer(const struct loader_instance *inst, const char *layer_name,
462 struct loader_layer_list *layer_list, struct loader_layer_properties *meta_layer_props) {
463 for (uint32_t comp_layer = 0; comp_layer < meta_layer_props->component_layer_names.count; comp_layer++) {
464 if (!strcmp(meta_layer_props->component_layer_names.list[comp_layer], layer_name)) {
465 return true;
466 }
467 struct loader_layer_properties *comp_layer_props =
468 loader_find_layer_property(meta_layer_props->component_layer_names.list[comp_layer], layer_list);
469 if (comp_layer_props->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
470 return loader_find_layer_name_in_meta_layer(inst, layer_name, layer_list, comp_layer_props);
471 }
472 }
473 return false;
474 }
475
476 // Search the override layer's blacklist for a layer matching the given layer name
loader_find_layer_name_in_blacklist(const char * layer_name,struct loader_layer_properties * meta_layer_props)477 bool loader_find_layer_name_in_blacklist(const char *layer_name, struct loader_layer_properties *meta_layer_props) {
478 for (uint32_t black_layer = 0; black_layer < meta_layer_props->blacklist_layer_names.count; ++black_layer) {
479 if (!strcmp(meta_layer_props->blacklist_layer_names.list[black_layer], layer_name)) {
480 return true;
481 }
482 }
483 return false;
484 }
485
486 // Remove all layer properties entries from the list
loader_delete_layer_list_and_properties(const struct loader_instance * inst,struct loader_layer_list * layer_list)487 void loader_delete_layer_list_and_properties(const struct loader_instance *inst, struct loader_layer_list *layer_list) {
488 uint32_t i;
489 if (!layer_list) return;
490
491 for (i = 0; i < layer_list->count; i++) {
492 if (layer_list->list[i].lib_handle) {
493 loader_platform_close_library(layer_list->list[i].lib_handle);
494 loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Unloading layer library %s",
495 layer_list->list[i].lib_name);
496 layer_list->list[i].lib_handle = NULL;
497 }
498 loader_free_layer_properties(inst, &(layer_list->list[i]));
499 }
500 layer_list->count = 0;
501
502 if (layer_list->capacity > 0) {
503 layer_list->capacity = 0;
504 loader_instance_heap_free(inst, layer_list->list);
505 }
506 memset(layer_list, 0, sizeof(struct loader_layer_list));
507 }
508
loader_remove_layer_in_list(const struct loader_instance * inst,struct loader_layer_list * layer_list,uint32_t layer_to_remove)509 void loader_remove_layer_in_list(const struct loader_instance *inst, struct loader_layer_list *layer_list,
510 uint32_t layer_to_remove) {
511 if (layer_list == NULL || layer_to_remove >= layer_list->count) {
512 return;
513 }
514 loader_free_layer_properties(inst, &(layer_list->list[layer_to_remove]));
515
516 // Remove the current invalid meta-layer from the layer list. Use memmove since we are
517 // overlapping the source and destination addresses.
518 memmove(&layer_list->list[layer_to_remove], &layer_list->list[layer_to_remove + 1],
519 sizeof(struct loader_layer_properties) * (layer_list->count - 1 - layer_to_remove));
520
521 // Decrement the count (because we now have one less) and decrement the loop index since we need to
522 // re-check this index.
523 layer_list->count--;
524 }
525
526 // Remove all layers in the layer list that are blacklisted by the override layer.
527 // NOTE: This should only be called if an override layer is found and not expired.
loader_remove_layers_in_blacklist(const struct loader_instance * inst,struct loader_layer_list * layer_list)528 void loader_remove_layers_in_blacklist(const struct loader_instance *inst, struct loader_layer_list *layer_list) {
529 struct loader_layer_properties *override_prop = loader_find_layer_property(VK_OVERRIDE_LAYER_NAME, layer_list);
530 if (NULL == override_prop) {
531 return;
532 }
533
534 for (int32_t j = 0; j < (int32_t)(layer_list->count); j++) {
535 struct loader_layer_properties cur_layer_prop = layer_list->list[j];
536 const char *cur_layer_name = &cur_layer_prop.info.layerName[0];
537
538 // Skip the override layer itself.
539 if (!strcmp(VK_OVERRIDE_LAYER_NAME, cur_layer_name)) {
540 continue;
541 }
542
543 // If found in the override layer's blacklist, remove it
544 if (loader_find_layer_name_in_blacklist(cur_layer_name, override_prop)) {
545 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0,
546 "loader_remove_layers_in_blacklist: Override layer is active and layer %s is in the blacklist inside of it. "
547 "Removing that layer from current layer list.",
548 cur_layer_name);
549 loader_remove_layer_in_list(inst, layer_list, j);
550 j--;
551
552 // Re-do the query for the override layer
553 override_prop = loader_find_layer_property(VK_OVERRIDE_LAYER_NAME, layer_list);
554 }
555 }
556 }
557
558 // Remove all layers in the layer list that are not found inside any implicit meta-layers.
loader_remove_layers_not_in_implicit_meta_layers(const struct loader_instance * inst,struct loader_layer_list * layer_list)559 void loader_remove_layers_not_in_implicit_meta_layers(const struct loader_instance *inst, struct loader_layer_list *layer_list) {
560 int32_t i;
561 int32_t j;
562 int32_t layer_count = (int32_t)(layer_list->count);
563
564 for (i = 0; i < layer_count; i++) {
565 layer_list->list[i].keep = false;
566 }
567
568 for (i = 0; i < layer_count; i++) {
569 struct loader_layer_properties *cur_layer_prop = &layer_list->list[i];
570
571 if (0 == (cur_layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
572 cur_layer_prop->keep = true;
573 continue;
574 }
575 for (j = 0; j < layer_count; j++) {
576 struct loader_layer_properties *layer_to_check = &layer_list->list[j];
577
578 if (i == j) {
579 continue;
580 }
581
582 if (layer_to_check->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
583 // For all layers found in this meta layer, we want to keep them as well.
584 if (loader_find_layer_name_in_meta_layer(inst, cur_layer_prop->info.layerName, layer_list, layer_to_check)) {
585 cur_layer_prop->keep = true;
586 }
587 }
588 }
589 }
590
591 // Remove any layers we don't want to keep (Don't use layer_count here as we need it to be
592 // dynamically updated if we delete a layer property in the list).
593 for (i = 0; i < (int32_t)(layer_list->count); i++) {
594 struct loader_layer_properties *cur_layer_prop = &layer_list->list[i];
595 if (!cur_layer_prop->keep) {
596 loader_log(
597 inst, VULKAN_LOADER_DEBUG_BIT, 0,
598 "loader_remove_layers_not_in_implicit_meta_layers : Implicit meta-layers are active, and layer %s is not list "
599 "inside of any. So removing layer from current layer list.",
600 cur_layer_prop->info.layerName);
601 loader_remove_layer_in_list(inst, layer_list, i);
602 i--;
603 }
604 }
605 }
606
loader_add_instance_extensions(const struct loader_instance * inst,const PFN_vkEnumerateInstanceExtensionProperties fp_get_props,const char * lib_name,struct loader_extension_list * ext_list)607 VkResult loader_add_instance_extensions(const struct loader_instance *inst,
608 const PFN_vkEnumerateInstanceExtensionProperties fp_get_props, const char *lib_name,
609 struct loader_extension_list *ext_list) {
610 uint32_t i, count = 0;
611 VkExtensionProperties *ext_props;
612 VkResult res = VK_SUCCESS;
613
614 if (!fp_get_props) {
615 // No EnumerateInstanceExtensionProperties defined
616 goto out;
617 }
618
619 // Make sure we never call ourself by accident, this should never happen outside of error paths
620 if (fp_get_props == vkEnumerateInstanceExtensionProperties) {
621 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
622 "loader_add_instance_extensions: %s's vkEnumerateInstanceExtensionProperties points to the loader, this would "
623 "lead to infinite recursion.",
624 lib_name);
625 goto out;
626 }
627
628 res = fp_get_props(NULL, &count, NULL);
629 if (res != VK_SUCCESS) {
630 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
631 "loader_add_instance_extensions: Error getting Instance extension count from %s", lib_name);
632 goto out;
633 }
634
635 if (count == 0) {
636 // No ExtensionProperties to report
637 goto out;
638 }
639
640 ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties));
641 if (NULL == ext_props) {
642 res = VK_ERROR_OUT_OF_HOST_MEMORY;
643 goto out;
644 }
645
646 res = fp_get_props(NULL, &count, ext_props);
647 if (res != VK_SUCCESS) {
648 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_add_instance_extensions: Error getting Instance extensions from %s",
649 lib_name);
650 goto out;
651 }
652
653 for (i = 0; i < count; i++) {
654 bool ext_unsupported = wsi_unsupported_instance_extension(&ext_props[i]);
655 if (!ext_unsupported) {
656 res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]);
657 if (res != VK_SUCCESS) {
658 goto out;
659 }
660 }
661 }
662
663 out:
664 return res;
665 }
666
loader_add_device_extensions(const struct loader_instance * inst,PFN_vkEnumerateDeviceExtensionProperties fpEnumerateDeviceExtensionProperties,VkPhysicalDevice physical_device,const char * lib_name,struct loader_extension_list * ext_list)667 VkResult loader_add_device_extensions(const struct loader_instance *inst,
668 PFN_vkEnumerateDeviceExtensionProperties fpEnumerateDeviceExtensionProperties,
669 VkPhysicalDevice physical_device, const char *lib_name,
670 struct loader_extension_list *ext_list) {
671 uint32_t i = 0, count = 0;
672 VkResult res = VK_SUCCESS;
673 VkExtensionProperties *ext_props = NULL;
674
675 res = fpEnumerateDeviceExtensionProperties(physical_device, NULL, &count, NULL);
676 if (res != VK_SUCCESS) {
677 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
678 "loader_add_device_extensions: Error getting physical device extension info count from library %s", lib_name);
679 return res;
680 }
681 if (count > 0) {
682 ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties));
683 if (!ext_props) {
684 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
685 "loader_add_device_extensions: Failed to allocate space for device extension properties from library %s.",
686 lib_name);
687 return VK_ERROR_OUT_OF_HOST_MEMORY;
688 }
689 res = fpEnumerateDeviceExtensionProperties(physical_device, NULL, &count, ext_props);
690 if (res != VK_SUCCESS) {
691 return res;
692 }
693 for (i = 0; i < count; i++) {
694 res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]);
695 if (res != VK_SUCCESS) {
696 return res;
697 }
698 }
699 }
700
701 return VK_SUCCESS;
702 }
703
loader_init_generic_list(const struct loader_instance * inst,struct loader_generic_list * list_info,size_t element_size)704 VkResult loader_init_generic_list(const struct loader_instance *inst, struct loader_generic_list *list_info, size_t element_size) {
705 size_t capacity = 32 * element_size;
706 list_info->count = 0;
707 list_info->capacity = 0;
708 list_info->list = loader_instance_heap_calloc(inst, capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
709 if (list_info->list == NULL) {
710 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_init_generic_list: Failed to allocate space for generic list");
711 return VK_ERROR_OUT_OF_HOST_MEMORY;
712 }
713 list_info->capacity = capacity;
714 return VK_SUCCESS;
715 }
716
loader_destroy_generic_list(const struct loader_instance * inst,struct loader_generic_list * list)717 void loader_destroy_generic_list(const struct loader_instance *inst, struct loader_generic_list *list) {
718 loader_instance_heap_free(inst, list->list);
719 memset(list, 0, sizeof(struct loader_generic_list));
720 }
721
722 // Append non-duplicate extension properties defined in props to the given ext_list.
723 // Return - Vk_SUCCESS on success
loader_add_to_ext_list(const struct loader_instance * inst,struct loader_extension_list * ext_list,uint32_t prop_list_count,const VkExtensionProperties * props)724 VkResult loader_add_to_ext_list(const struct loader_instance *inst, struct loader_extension_list *ext_list,
725 uint32_t prop_list_count, const VkExtensionProperties *props) {
726 if (ext_list->list == NULL || ext_list->capacity == 0) {
727 VkResult res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(VkExtensionProperties));
728 if (VK_SUCCESS != res) {
729 return res;
730 }
731 }
732
733 for (uint32_t i = 0; i < prop_list_count; i++) {
734 const VkExtensionProperties *cur_ext = &props[i];
735
736 // look for duplicates
737 if (has_vk_extension_property(cur_ext, ext_list)) {
738 continue;
739 }
740
741 // add to list at end
742 // check for enough capacity
743 if (ext_list->count * sizeof(VkExtensionProperties) >= ext_list->capacity) {
744 void *new_ptr = loader_instance_heap_realloc(inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2,
745 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
746 if (new_ptr == NULL) {
747 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
748 "loader_add_to_ext_list: Failed to reallocate space for extension list");
749 return VK_ERROR_OUT_OF_HOST_MEMORY;
750 }
751 ext_list->list = new_ptr;
752
753 // double capacity
754 ext_list->capacity *= 2;
755 }
756
757 memcpy(&ext_list->list[ext_list->count], cur_ext, sizeof(VkExtensionProperties));
758 ext_list->count++;
759 }
760 return VK_SUCCESS;
761 }
762
763 // Append one extension property defined in props with entrypoints defined in entries to the given
764 // ext_list. Do not append if a duplicate.
765 // If this is a duplicate, this function free's the passed in entries - as in it takes ownership over that list (if it is not
766 // NULL) Return - Vk_SUCCESS on success
loader_add_to_dev_ext_list(const struct loader_instance * inst,struct loader_device_extension_list * ext_list,const VkExtensionProperties * props,struct loader_string_list * entrys)767 VkResult loader_add_to_dev_ext_list(const struct loader_instance *inst, struct loader_device_extension_list *ext_list,
768 const VkExtensionProperties *props, struct loader_string_list *entrys) {
769 VkResult res = VK_SUCCESS;
770 bool should_free_entrys = true;
771 if (ext_list->list == NULL || ext_list->capacity == 0) {
772 res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(struct loader_dev_ext_props));
773 if (VK_SUCCESS != res) {
774 goto out;
775 }
776 }
777
778 // look for duplicates
779 if (has_vk_dev_ext_property(props, ext_list)) {
780 goto out;
781 }
782
783 uint32_t idx = ext_list->count;
784 // add to list at end
785 // check for enough capacity
786 if (idx * sizeof(struct loader_dev_ext_props) >= ext_list->capacity) {
787 void *new_ptr = loader_instance_heap_realloc(inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2,
788 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
789
790 if (NULL == new_ptr) {
791 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
792 "loader_add_to_dev_ext_list: Failed to reallocate space for device extension list");
793 res = VK_ERROR_OUT_OF_HOST_MEMORY;
794 goto out;
795 }
796 ext_list->list = new_ptr;
797
798 // double capacity
799 ext_list->capacity *= 2;
800 }
801
802 memcpy(&ext_list->list[idx].props, props, sizeof(*props));
803 if (entrys) {
804 ext_list->list[idx].entrypoints = *entrys;
805 should_free_entrys = false;
806 }
807 ext_list->count++;
808 out:
809 if (NULL != entrys && should_free_entrys) {
810 free_string_list(inst, entrys);
811 }
812 return res;
813 }
814
815 // Create storage for pointers to loader_layer_properties
loader_init_pointer_layer_list(const struct loader_instance * inst,struct loader_pointer_layer_list * list)816 bool loader_init_pointer_layer_list(const struct loader_instance *inst, struct loader_pointer_layer_list *list) {
817 list->capacity = 32 * sizeof(void *);
818 list->list = loader_instance_heap_calloc(inst, list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
819 if (list->list == NULL) {
820 return false;
821 }
822 list->count = 0;
823 return true;
824 }
825
826 // Search the given array of layer names for an entry matching the given VkLayerProperties
loader_names_array_has_layer_property(const VkLayerProperties * vk_layer_prop,uint32_t layer_info_count,struct activated_layer_info * layer_info)827 bool loader_names_array_has_layer_property(const VkLayerProperties *vk_layer_prop, uint32_t layer_info_count,
828 struct activated_layer_info *layer_info) {
829 for (uint32_t i = 0; i < layer_info_count; i++) {
830 if (strcmp(vk_layer_prop->layerName, layer_info[i].name) == 0) {
831 return true;
832 }
833 }
834 return false;
835 }
836
loader_destroy_pointer_layer_list(const struct loader_instance * inst,struct loader_pointer_layer_list * layer_list)837 void loader_destroy_pointer_layer_list(const struct loader_instance *inst, struct loader_pointer_layer_list *layer_list) {
838 loader_instance_heap_free(inst, layer_list->list);
839 memset(layer_list, 0, sizeof(struct loader_pointer_layer_list));
840 }
841
842 // Append layer properties defined in prop_list to the given layer_info list
loader_add_layer_properties_to_list(const struct loader_instance * inst,struct loader_pointer_layer_list * list,struct loader_layer_properties * props)843 VkResult loader_add_layer_properties_to_list(const struct loader_instance *inst, struct loader_pointer_layer_list *list,
844 struct loader_layer_properties *props) {
845 if (list->list == NULL || list->capacity == 0) {
846 if (!loader_init_pointer_layer_list(inst, list)) {
847 return VK_ERROR_OUT_OF_HOST_MEMORY;
848 }
849 }
850
851 // Check for enough capacity
852 if (((list->count + 1) * sizeof(struct loader_layer_properties)) >= list->capacity) {
853 size_t new_capacity = list->capacity * 2;
854 void *new_ptr =
855 loader_instance_heap_realloc(inst, list->list, list->capacity, new_capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
856 if (NULL == new_ptr) {
857 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
858 "loader_add_layer_properties_to_list: Realloc failed for when attempting to add new layer");
859 return VK_ERROR_OUT_OF_HOST_MEMORY;
860 }
861 list->list = new_ptr;
862 list->capacity = new_capacity;
863 }
864 list->list[list->count++] = props;
865
866 return VK_SUCCESS;
867 }
868
869 // Determine if the provided explicit layer should be available by querying the appropriate environmental variables.
loader_layer_is_available(const struct loader_instance * inst,const struct loader_envvar_all_filters * filters,const struct loader_layer_properties * prop)870 bool loader_layer_is_available(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
871 const struct loader_layer_properties *prop) {
872 bool available = true;
873 bool is_implicit = (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER));
874 bool disabled_by_type =
875 (is_implicit) ? (filters->disable_filter.disable_all_implicit) : (filters->disable_filter.disable_all_explicit);
876 if ((filters->disable_filter.disable_all || disabled_by_type ||
877 check_name_matches_filter_environment_var(prop->info.layerName, &filters->disable_filter.additional_filters)) &&
878 !check_name_matches_filter_environment_var(prop->info.layerName, &filters->allow_filter)) {
879 available = false;
880 }
881 if (check_name_matches_filter_environment_var(prop->info.layerName, &filters->enable_filter)) {
882 available = true;
883 } else if (!available) {
884 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
885 "Layer \"%s\" forced disabled because name matches filter of env var \'%s\'.", prop->info.layerName,
886 VK_LAYERS_DISABLE_ENV_VAR);
887 }
888
889 return available;
890 }
891
892 // Search the given search_list for any layers in the props list. Add these to the
893 // output layer_list.
loader_add_layer_names_to_list(const struct loader_instance * inst,const struct loader_envvar_all_filters * filters,struct loader_pointer_layer_list * output_list,struct loader_pointer_layer_list * expanded_output_list,uint32_t name_count,const char * const * names,const struct loader_layer_list * source_list)894 VkResult loader_add_layer_names_to_list(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
895 struct loader_pointer_layer_list *output_list,
896 struct loader_pointer_layer_list *expanded_output_list, uint32_t name_count,
897 const char *const *names, const struct loader_layer_list *source_list) {
898 VkResult err = VK_SUCCESS;
899
900 for (uint32_t i = 0; i < name_count; i++) {
901 const char *source_name = names[i];
902
903 struct loader_layer_properties *layer_prop = loader_find_layer_property(source_name, source_list);
904 if (NULL == layer_prop) {
905 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
906 "loader_add_layer_names_to_list: Unable to find layer \"%s\"", source_name);
907 err = VK_ERROR_LAYER_NOT_PRESENT;
908 continue;
909 }
910
911 // Make sure the layer isn't already in the output_list, skip adding it if it is.
912 if (loader_find_layer_name_in_list(source_name, output_list)) {
913 continue;
914 }
915
916 if (!loader_layer_is_available(inst, filters, layer_prop)) {
917 continue;
918 }
919
920 // If not a meta-layer, simply add it.
921 if (0 == (layer_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
922 err = loader_add_layer_properties_to_list(inst, output_list, layer_prop);
923 if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err;
924 err = loader_add_layer_properties_to_list(inst, expanded_output_list, layer_prop);
925 if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err;
926 } else {
927 err = loader_add_meta_layer(inst, filters, layer_prop, output_list, expanded_output_list, source_list, NULL);
928 if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err;
929 }
930 }
931
932 return err;
933 }
934
935 // Determine if the provided implicit layer should be enabled by querying the appropriate environmental variables.
936 // For an implicit layer, at least a disable environment variable is required.
loader_implicit_layer_is_enabled(const struct loader_instance * inst,const struct loader_envvar_all_filters * filters,const struct loader_layer_properties * prop)937 bool loader_implicit_layer_is_enabled(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
938 const struct loader_layer_properties *prop) {
939 bool enable = false;
940 bool forced_disabled = false;
941 bool forced_enabled = false;
942
943 if ((filters->disable_filter.disable_all || filters->disable_filter.disable_all_implicit ||
944 check_name_matches_filter_environment_var(prop->info.layerName, &filters->disable_filter.additional_filters)) &&
945 !check_name_matches_filter_environment_var(prop->info.layerName, &filters->allow_filter)) {
946 forced_disabled = true;
947 }
948 if (check_name_matches_filter_environment_var(prop->info.layerName, &filters->enable_filter)) {
949 forced_enabled = true;
950 }
951
952 // If no enable_environment variable is specified, this implicit layer is always be enabled by default.
953 if (NULL == prop->enable_env_var.name) {
954 enable = true;
955 } else {
956 char *env_value = loader_getenv(prop->enable_env_var.name, inst);
957 if (env_value && !strcmp(prop->enable_env_var.value, env_value)) {
958 enable = true;
959 }
960
961 // Otherwise, only enable this layer if the enable environment variable is defined
962 loader_free_getenv(env_value, inst);
963 }
964
965 if (forced_enabled) {
966 // Only report a message that we've forced on a layer if it wouldn't have been enabled
967 // normally.
968 if (!enable) {
969 enable = true;
970 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
971 "Implicit layer \"%s\" forced enabled due to env var \'%s\'.", prop->info.layerName,
972 VK_LAYERS_ENABLE_ENV_VAR);
973 }
974 } else if (enable && forced_disabled) {
975 enable = false;
976 // Report a message that we've forced off a layer if it would have been enabled normally.
977 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
978 "Implicit layer \"%s\" forced disabled because name matches filter of env var \'%s\'.", prop->info.layerName,
979 VK_LAYERS_DISABLE_ENV_VAR);
980 return enable;
981 }
982
983 // The disable_environment has priority over everything else. If it is defined, the layer is always
984 // disabled.
985 if (NULL != prop->disable_env_var.name) {
986 char *env_value = loader_getenv(prop->disable_env_var.name, inst);
987 if (NULL != env_value) {
988 enable = false;
989 }
990 loader_free_getenv(env_value, inst);
991 } else if ((prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER) == 0) {
992 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
993 "Implicit layer \"%s\" missing disabled environment variable!", prop->info.layerName, VK_LAYERS_DISABLE_ENV_VAR);
994 }
995
996 // Enable this layer if it is included in the override layer
997 if (inst != NULL && inst->override_layer_present) {
998 struct loader_layer_properties *override = NULL;
999 for (uint32_t i = 0; i < inst->instance_layer_list.count; ++i) {
1000 if (strcmp(inst->instance_layer_list.list[i].info.layerName, VK_OVERRIDE_LAYER_NAME) == 0) {
1001 override = &inst->instance_layer_list.list[i];
1002 break;
1003 }
1004 }
1005 if (override != NULL) {
1006 for (uint32_t i = 0; i < override->component_layer_names.count; ++i) {
1007 if (strcmp(override->component_layer_names.list[i], prop->info.layerName) == 0) {
1008 enable = true;
1009 break;
1010 }
1011 }
1012 }
1013 }
1014
1015 return enable;
1016 }
1017
1018 // Check the individual implicit layer for the enable/disable environment variable settings. Only add it after
1019 // every check has passed indicating it should be used, including making sure a layer of the same name hasn't already been
1020 // added.
loader_add_implicit_layer(const struct loader_instance * inst,struct loader_layer_properties * prop,const struct loader_envvar_all_filters * filters,struct loader_pointer_layer_list * target_list,struct loader_pointer_layer_list * expanded_target_list,const struct loader_layer_list * source_list)1021 VkResult loader_add_implicit_layer(const struct loader_instance *inst, struct loader_layer_properties *prop,
1022 const struct loader_envvar_all_filters *filters, struct loader_pointer_layer_list *target_list,
1023 struct loader_pointer_layer_list *expanded_target_list,
1024 const struct loader_layer_list *source_list) {
1025 VkResult result = VK_SUCCESS;
1026 if (loader_implicit_layer_is_enabled(inst, filters, prop)) {
1027 if (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
1028 // Make sure the layer isn't already in the output_list, skip adding it if it is.
1029 if (loader_find_layer_name_in_list(&prop->info.layerName[0], target_list)) {
1030 return result;
1031 }
1032
1033 result = loader_add_layer_properties_to_list(inst, target_list, prop);
1034 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1035 if (NULL != expanded_target_list) {
1036 result = loader_add_layer_properties_to_list(inst, expanded_target_list, prop);
1037 }
1038 } else {
1039 result = loader_add_meta_layer(inst, filters, prop, target_list, expanded_target_list, source_list, NULL);
1040 }
1041 }
1042 return result;
1043 }
1044
1045 // Add the component layers of a meta-layer to the active list of layers
loader_add_meta_layer(const struct loader_instance * inst,const struct loader_envvar_all_filters * filters,struct loader_layer_properties * prop,struct loader_pointer_layer_list * target_list,struct loader_pointer_layer_list * expanded_target_list,const struct loader_layer_list * source_list,bool * out_found_all_component_layers)1046 VkResult loader_add_meta_layer(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
1047 struct loader_layer_properties *prop, struct loader_pointer_layer_list *target_list,
1048 struct loader_pointer_layer_list *expanded_target_list, const struct loader_layer_list *source_list,
1049 bool *out_found_all_component_layers) {
1050 VkResult result = VK_SUCCESS;
1051 bool found_all_component_layers = true;
1052
1053 // We need to add all the individual component layers
1054 loader_api_version meta_layer_api_version = loader_make_version(prop->info.specVersion);
1055 for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) {
1056 struct loader_layer_properties *search_prop =
1057 loader_find_layer_property(prop->component_layer_names.list[comp_layer], source_list);
1058 if (search_prop != NULL) {
1059 loader_api_version search_prop_version = loader_make_version(prop->info.specVersion);
1060 if (!loader_check_version_meets_required(meta_layer_api_version, search_prop_version)) {
1061 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1062 "Meta-layer \"%s\" API version %u.%u, component layer \"%s\" version %u.%u, may have "
1063 "incompatibilities (Policy #LLP_LAYER_8)!",
1064 prop->info.layerName, meta_layer_api_version.major, meta_layer_api_version.minor,
1065 search_prop->info.layerName, search_prop_version.major, search_prop_version.minor);
1066 }
1067
1068 if (!loader_layer_is_available(inst, filters, search_prop)) {
1069 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1070 "Meta Layer \"%s\" component layer \"%s\" disabled.", prop->info.layerName, search_prop->info.layerName);
1071 continue;
1072 }
1073
1074 // If the component layer is itself an implicit layer, we need to do the implicit layer enable
1075 // checks
1076 if (0 == (search_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
1077 result = loader_add_implicit_layer(inst, search_prop, filters, target_list, expanded_target_list, source_list);
1078 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1079 } else {
1080 if (0 != (search_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
1081 bool found_layers_in_component_meta_layer = true;
1082 result = loader_add_meta_layer(inst, filters, search_prop, target_list, expanded_target_list, source_list,
1083 &found_layers_in_component_meta_layer);
1084 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1085 if (!found_layers_in_component_meta_layer) found_all_component_layers = false;
1086 } else if (!loader_find_layer_name_in_list(&search_prop->info.layerName[0], target_list)) {
1087 // Make sure the layer isn't already in the output_list, skip adding it if it is.
1088 result = loader_add_layer_properties_to_list(inst, target_list, search_prop);
1089 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1090 if (NULL != expanded_target_list) {
1091 result = loader_add_layer_properties_to_list(inst, expanded_target_list, search_prop);
1092 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1093 }
1094 }
1095 }
1096 } else {
1097 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1098 "Failed to find layer name \"%s\" component layer \"%s\" to activate (Policy #LLP_LAYER_7)",
1099 prop->component_layer_names.list[comp_layer], prop->component_layer_names.list[comp_layer]);
1100 found_all_component_layers = false;
1101 }
1102 }
1103
1104 // Add this layer to the overall target list (not the expanded one)
1105 if (found_all_component_layers) {
1106 result = loader_add_layer_properties_to_list(inst, target_list, prop);
1107 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
1108 // Write the result to out_found_all_component_layers in case this function is being recursed
1109 if (out_found_all_component_layers) *out_found_all_component_layers = found_all_component_layers;
1110 }
1111
1112 return result;
1113 }
1114
get_extension_property(const char * name,const struct loader_extension_list * list)1115 VkExtensionProperties *get_extension_property(const char *name, const struct loader_extension_list *list) {
1116 for (uint32_t i = 0; i < list->count; i++) {
1117 if (strcmp(name, list->list[i].extensionName) == 0) return &list->list[i];
1118 }
1119 return NULL;
1120 }
1121
get_dev_extension_property(const char * name,const struct loader_device_extension_list * list)1122 VkExtensionProperties *get_dev_extension_property(const char *name, const struct loader_device_extension_list *list) {
1123 for (uint32_t i = 0; i < list->count; i++) {
1124 if (strcmp(name, list->list[i].props.extensionName) == 0) return &list->list[i].props;
1125 }
1126 return NULL;
1127 }
1128
1129 // For Instance extensions implemented within the loader (i.e. DEBUG_REPORT
1130 // the extension must provide two entry points for the loader to use:
1131 // - "trampoline" entry point - this is the address returned by GetProcAddr
1132 // and will always do what's necessary to support a
1133 // global call.
1134 // - "terminator" function - this function will be put at the end of the
1135 // instance chain and will contain the necessary logic
1136 // to call / process the extension for the appropriate
1137 // ICDs that are available.
1138 // There is no generic mechanism for including these functions, the references
1139 // must be placed into the appropriate loader entry points.
1140 // GetInstanceProcAddr: call extension GetInstanceProcAddr to check for GetProcAddr
1141 // requests
1142 // loader_coalesce_extensions(void) - add extension records to the list of global
1143 // extension available to the app.
1144 // instance_disp - add function pointer for terminator function
1145 // to this array.
1146 // The extension itself should be in a separate file that will be linked directly
1147 // with the loader.
loader_get_icd_loader_instance_extensions(const struct loader_instance * inst,struct loader_icd_tramp_list * icd_tramp_list,struct loader_extension_list * inst_exts)1148 VkResult loader_get_icd_loader_instance_extensions(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list,
1149 struct loader_extension_list *inst_exts) {
1150 struct loader_extension_list icd_exts;
1151 VkResult res = VK_SUCCESS;
1152 char *env_value;
1153 bool filter_extensions = true;
1154
1155 // Check if a user wants to disable the instance extension filtering behavior
1156 env_value = loader_getenv("VK_LOADER_DISABLE_INST_EXT_FILTER", inst);
1157 if (NULL != env_value && atoi(env_value) != 0) {
1158 filter_extensions = false;
1159 }
1160 loader_free_getenv(env_value, inst);
1161
1162 // traverse scanned icd list adding non-duplicate extensions to the list
1163 for (uint32_t i = 0; i < icd_tramp_list->count; i++) {
1164 res = loader_init_generic_list(inst, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
1165 if (VK_SUCCESS != res) {
1166 goto out;
1167 }
1168 res = loader_add_instance_extensions(inst, icd_tramp_list->scanned_list[i].EnumerateInstanceExtensionProperties,
1169 icd_tramp_list->scanned_list[i].lib_name, &icd_exts);
1170 if (VK_SUCCESS == res) {
1171 if (filter_extensions) {
1172 // Remove any extensions not recognized by the loader
1173 for (int32_t j = 0; j < (int32_t)icd_exts.count; j++) {
1174 // See if the extension is in the list of supported extensions
1175 bool found = false;
1176 for (uint32_t k = 0; LOADER_INSTANCE_EXTENSIONS[k] != NULL; k++) {
1177 if (strcmp(icd_exts.list[j].extensionName, LOADER_INSTANCE_EXTENSIONS[k]) == 0) {
1178 found = true;
1179 break;
1180 }
1181 }
1182
1183 // If it isn't in the list, remove it
1184 if (!found) {
1185 for (uint32_t k = j + 1; k < icd_exts.count; k++) {
1186 icd_exts.list[k - 1] = icd_exts.list[k];
1187 }
1188 --icd_exts.count;
1189 --j;
1190 }
1191 }
1192 }
1193
1194 res = loader_add_to_ext_list(inst, inst_exts, icd_exts.count, icd_exts.list);
1195 }
1196 loader_destroy_generic_list(inst, (struct loader_generic_list *)&icd_exts);
1197 if (VK_SUCCESS != res) {
1198 goto out;
1199 }
1200 };
1201
1202 // Traverse loader's extensions, adding non-duplicate extensions to the list
1203 res = add_debug_extensions_to_ext_list(inst, inst_exts);
1204 if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
1205 goto out;
1206 }
1207 const VkExtensionProperties portability_enumeration_extension_info[] = {
1208 {VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME, VK_KHR_PORTABILITY_ENUMERATION_SPEC_VERSION}};
1209
1210 // Add VK_KHR_portability_subset
1211 res = loader_add_to_ext_list(inst, inst_exts, sizeof(portability_enumeration_extension_info) / sizeof(VkExtensionProperties),
1212 portability_enumeration_extension_info);
1213 if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
1214 goto out;
1215 }
1216
1217 const VkExtensionProperties direct_driver_loading_extension_info[] = {
1218 {VK_LUNARG_DIRECT_DRIVER_LOADING_EXTENSION_NAME, VK_LUNARG_DIRECT_DRIVER_LOADING_SPEC_VERSION}};
1219
1220 // Add VK_LUNARG_direct_driver_loading
1221 res = loader_add_to_ext_list(inst, inst_exts, sizeof(direct_driver_loading_extension_info) / sizeof(VkExtensionProperties),
1222 direct_driver_loading_extension_info);
1223 if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
1224 goto out;
1225 }
1226
1227 out:
1228 return res;
1229 }
1230
loader_get_icd_and_device(const void * device,struct loader_device ** found_dev,uint32_t * icd_index)1231 struct loader_icd_term *loader_get_icd_and_device(const void *device, struct loader_device **found_dev, uint32_t *icd_index) {
1232 VkLayerDispatchTable *dispatch_table_device = loader_get_dispatch(device);
1233 if (NULL == dispatch_table_device) {
1234 *found_dev = NULL;
1235 return NULL;
1236 }
1237 loader_platform_thread_lock_mutex(&loader_global_instance_list_lock);
1238 *found_dev = NULL;
1239
1240 for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) {
1241 uint32_t index = 0;
1242 for (struct loader_icd_term *icd_term = inst->icd_terms; icd_term; icd_term = icd_term->next) {
1243 for (struct loader_device *dev = icd_term->logical_device_list; dev; dev = dev->next) {
1244 // Value comparison of device prevents object wrapping by layers
1245 if (loader_get_dispatch(dev->icd_device) == dispatch_table_device ||
1246 (dev->chain_device != VK_NULL_HANDLE && loader_get_dispatch(dev->chain_device) == dispatch_table_device)) {
1247 *found_dev = dev;
1248 if (NULL != icd_index) {
1249 *icd_index = index;
1250 }
1251 loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock);
1252 return icd_term;
1253 }
1254 }
1255 index++;
1256 }
1257 }
1258 loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock);
1259 return NULL;
1260 }
1261
loader_destroy_logical_device(struct loader_device * dev,const VkAllocationCallbacks * pAllocator)1262 void loader_destroy_logical_device(struct loader_device *dev, const VkAllocationCallbacks *pAllocator) {
1263 if (pAllocator) {
1264 dev->alloc_callbacks = *pAllocator;
1265 }
1266 loader_device_heap_free(dev, dev);
1267 }
1268
loader_create_logical_device(const struct loader_instance * inst,const VkAllocationCallbacks * pAllocator)1269 struct loader_device *loader_create_logical_device(const struct loader_instance *inst, const VkAllocationCallbacks *pAllocator) {
1270 struct loader_device *new_dev;
1271 new_dev = loader_calloc(pAllocator, sizeof(struct loader_device), VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1272
1273 if (!new_dev) {
1274 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_create_logical_device: Failed to alloc struct loader_device");
1275 return NULL;
1276 }
1277
1278 new_dev->loader_dispatch.core_dispatch.magic = DEVICE_DISP_TABLE_MAGIC_NUMBER;
1279
1280 if (pAllocator) {
1281 new_dev->alloc_callbacks = *pAllocator;
1282 }
1283
1284 return new_dev;
1285 }
1286
loader_add_logical_device(struct loader_icd_term * icd_term,struct loader_device * dev)1287 void loader_add_logical_device(struct loader_icd_term *icd_term, struct loader_device *dev) {
1288 dev->next = icd_term->logical_device_list;
1289 icd_term->logical_device_list = dev;
1290 }
1291
loader_remove_logical_device(struct loader_icd_term * icd_term,struct loader_device * found_dev,const VkAllocationCallbacks * pAllocator)1292 void loader_remove_logical_device(struct loader_icd_term *icd_term, struct loader_device *found_dev,
1293 const VkAllocationCallbacks *pAllocator) {
1294 struct loader_device *dev, *prev_dev;
1295
1296 if (!icd_term || !found_dev) return;
1297
1298 prev_dev = NULL;
1299 dev = icd_term->logical_device_list;
1300 while (dev && dev != found_dev) {
1301 prev_dev = dev;
1302 dev = dev->next;
1303 }
1304
1305 if (prev_dev)
1306 prev_dev->next = found_dev->next;
1307 else
1308 icd_term->logical_device_list = found_dev->next;
1309 loader_destroy_logical_device(found_dev, pAllocator);
1310 }
1311
loader_icd_destroy(struct loader_instance * ptr_inst,struct loader_icd_term * icd_term,const VkAllocationCallbacks * pAllocator)1312 void loader_icd_destroy(struct loader_instance *ptr_inst, struct loader_icd_term *icd_term,
1313 const VkAllocationCallbacks *pAllocator) {
1314 ptr_inst->total_icd_count--;
1315 for (struct loader_device *dev = icd_term->logical_device_list; dev;) {
1316 struct loader_device *next_dev = dev->next;
1317 loader_destroy_logical_device(dev, pAllocator);
1318 dev = next_dev;
1319 }
1320
1321 loader_instance_heap_free(ptr_inst, icd_term);
1322 }
1323
loader_icd_add(struct loader_instance * ptr_inst,const struct loader_scanned_icd * scanned_icd)1324 struct loader_icd_term *loader_icd_add(struct loader_instance *ptr_inst, const struct loader_scanned_icd *scanned_icd) {
1325 struct loader_icd_term *icd_term;
1326
1327 icd_term = loader_instance_heap_calloc(ptr_inst, sizeof(struct loader_icd_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1328 if (!icd_term) {
1329 return NULL;
1330 }
1331
1332 icd_term->scanned_icd = scanned_icd;
1333 icd_term->this_instance = ptr_inst;
1334
1335 // Prepend to the list
1336 icd_term->next = ptr_inst->icd_terms;
1337 ptr_inst->icd_terms = icd_term;
1338 ptr_inst->total_icd_count++;
1339
1340 return icd_term;
1341 }
1342
1343 // Determine the ICD interface version to use.
1344 // @param icd
1345 // @param pVersion Output parameter indicating which version to use or 0 if
1346 // the negotiation API is not supported by the ICD
1347 // @return bool indicating true if the selected interface version is supported
1348 // by the loader, false indicates the version is not supported
loader_get_icd_interface_version(PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version,uint32_t * pVersion)1349 bool loader_get_icd_interface_version(PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version, uint32_t *pVersion) {
1350 if (fp_negotiate_icd_version == NULL) {
1351 // ICD does not support the negotiation API, it supports version 0 or 1
1352 // calling code must determine if it is version 0 or 1
1353 *pVersion = 0;
1354 } else {
1355 // ICD supports the negotiation API, so call it with the loader's
1356 // latest version supported
1357 *pVersion = CURRENT_LOADER_ICD_INTERFACE_VERSION;
1358 VkResult result = fp_negotiate_icd_version(pVersion);
1359
1360 if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
1361 // ICD no longer supports the loader's latest interface version so
1362 // fail loading the ICD
1363 return false;
1364 }
1365 }
1366
1367 #if MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION > 0
1368 if (*pVersion < MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION) {
1369 // Loader no longer supports the ICD's latest interface version so fail
1370 // loading the ICD
1371 return false;
1372 }
1373 #endif
1374 return true;
1375 }
1376
loader_scanned_icd_clear(const struct loader_instance * inst,struct loader_icd_tramp_list * icd_tramp_list)1377 void loader_scanned_icd_clear(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list) {
1378 if (0 != icd_tramp_list->capacity && icd_tramp_list->scanned_list) {
1379 for (uint32_t i = 0; i < icd_tramp_list->count; i++) {
1380 if (icd_tramp_list->scanned_list[i].handle) {
1381 loader_platform_close_library(icd_tramp_list->scanned_list[i].handle);
1382 icd_tramp_list->scanned_list[i].handle = NULL;
1383 }
1384 loader_instance_heap_free(inst, icd_tramp_list->scanned_list[i].lib_name);
1385 }
1386 loader_instance_heap_free(inst, icd_tramp_list->scanned_list);
1387 }
1388 memset(icd_tramp_list, 0, sizeof(struct loader_icd_tramp_list));
1389 }
1390
loader_scanned_icd_init(const struct loader_instance * inst,struct loader_icd_tramp_list * icd_tramp_list)1391 VkResult loader_scanned_icd_init(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list) {
1392 VkResult res = VK_SUCCESS;
1393 loader_scanned_icd_clear(inst, icd_tramp_list);
1394 icd_tramp_list->capacity = 8 * sizeof(struct loader_scanned_icd);
1395 icd_tramp_list->scanned_list = loader_instance_heap_alloc(inst, icd_tramp_list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1396 if (NULL == icd_tramp_list->scanned_list) {
1397 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1398 "loader_scanned_icd_init: Realloc failed for layer list when attempting to add new layer");
1399 res = VK_ERROR_OUT_OF_HOST_MEMORY;
1400 }
1401 return res;
1402 }
1403
loader_add_direct_driver(const struct loader_instance * inst,uint32_t index,const VkDirectDriverLoadingInfoLUNARG * pDriver,struct loader_icd_tramp_list * icd_tramp_list)1404 VkResult loader_add_direct_driver(const struct loader_instance *inst, uint32_t index,
1405 const VkDirectDriverLoadingInfoLUNARG *pDriver, struct loader_icd_tramp_list *icd_tramp_list) {
1406 // Assume pDriver is valid, since there is no real way to check it. Calling code should make sure the pointer to the array
1407 // of VkDirectDriverLoadingInfoLUNARG structures is non-null.
1408 if (NULL == pDriver->pfnGetInstanceProcAddr) {
1409 loader_log(
1410 inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1411 "loader_add_direct_driver: VkDirectDriverLoadingInfoLUNARG structure at index %d contains a NULL pointer for the "
1412 "pfnGetInstanceProcAddr member, skipping.",
1413 index);
1414 return VK_ERROR_INITIALIZATION_FAILED;
1415 }
1416
1417 PFN_vkGetInstanceProcAddr fp_get_proc_addr = pDriver->pfnGetInstanceProcAddr;
1418 PFN_vkCreateInstance fp_create_inst = NULL;
1419 PFN_vkEnumerateInstanceExtensionProperties fp_get_inst_ext_props = NULL;
1420 PFN_GetPhysicalDeviceProcAddr fp_get_phys_dev_proc_addr = NULL;
1421 PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version = NULL;
1422 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1423 PFN_vk_icdEnumerateAdapterPhysicalDevices fp_enum_dxgi_adapter_phys_devs = NULL;
1424 #endif
1425 struct loader_scanned_icd *new_scanned_icd;
1426 uint32_t interface_version = 0;
1427
1428 // Try to get the negotiate ICD interface version function
1429 fp_negotiate_icd_version = (PFN_vk_icdNegotiateLoaderICDInterfaceVersion)pDriver->pfnGetInstanceProcAddr(
1430 NULL, "vk_icdNegotiateLoaderICDInterfaceVersion");
1431
1432 if (NULL == fp_negotiate_icd_version) {
1433 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1434 "loader_add_direct_driver: Could not get 'vk_icdNegotiateLoaderICDInterfaceVersion' from "
1435 "VkDirectDriverLoadingInfoLUNARG structure at "
1436 "index %d, skipping.",
1437 index);
1438 return VK_ERROR_INITIALIZATION_FAILED;
1439 }
1440
1441 if (!loader_get_icd_interface_version(fp_negotiate_icd_version, &interface_version)) {
1442 loader_log(
1443 inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1444 "loader_add_direct_driver: VkDirectDriverLoadingInfoLUNARG structure at index %d supports interface version %d, "
1445 "which is incompatible with the Loader Driver Interface version that supports the VK_LUNARG_direct_driver_loading "
1446 "extension, skipping.",
1447 index, interface_version);
1448 return VK_ERROR_INITIALIZATION_FAILED;
1449 }
1450
1451 if (interface_version < 7) {
1452 loader_log(
1453 inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1454 "loader_add_direct_driver: VkDirectDriverLoadingInfoLUNARG structure at index %d supports interface version %d, "
1455 "which is incompatible with the Loader Driver Interface version that supports the VK_LUNARG_direct_driver_loading "
1456 "extension, skipping.",
1457 index, interface_version);
1458 return VK_ERROR_INITIALIZATION_FAILED;
1459 }
1460
1461 fp_create_inst = (PFN_vkCreateInstance)pDriver->pfnGetInstanceProcAddr(NULL, "vkCreateInstance");
1462 if (NULL == fp_create_inst) {
1463 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1464 "loader_add_direct_driver: Could not get 'vkCreateInstance' from VkDirectDriverLoadingInfoLUNARG structure at "
1465 "index %d, skipping.",
1466 index);
1467 return VK_ERROR_INITIALIZATION_FAILED;
1468 }
1469 fp_get_inst_ext_props =
1470 (PFN_vkEnumerateInstanceExtensionProperties)pDriver->pfnGetInstanceProcAddr(NULL, "vkEnumerateInstanceExtensionProperties");
1471 if (NULL == fp_get_inst_ext_props) {
1472 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1473 "loader_add_direct_driver: Could not get 'vkEnumerateInstanceExtensionProperties' from "
1474 "VkDirectDriverLoadingInfoLUNARG structure at index %d, skipping.",
1475 index);
1476 return VK_ERROR_INITIALIZATION_FAILED;
1477 }
1478
1479 fp_get_phys_dev_proc_addr =
1480 (PFN_vk_icdGetPhysicalDeviceProcAddr)pDriver->pfnGetInstanceProcAddr(NULL, "vk_icdGetPhysicalDeviceProcAddr");
1481 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1482 // Query "vk_icdEnumerateAdapterPhysicalDevices" with vk_icdGetInstanceProcAddr if the library reports interface version
1483 // 7 or greater, otherwise fallback to loading it from the platform dynamic linker
1484 fp_enum_dxgi_adapter_phys_devs =
1485 (PFN_vk_icdEnumerateAdapterPhysicalDevices)pDriver->pfnGetInstanceProcAddr(NULL, "vk_icdEnumerateAdapterPhysicalDevices");
1486 #endif
1487
1488 // check for enough capacity
1489 if ((icd_tramp_list->count * sizeof(struct loader_scanned_icd)) >= icd_tramp_list->capacity) {
1490 void *new_ptr = loader_instance_heap_realloc(inst, icd_tramp_list->scanned_list, icd_tramp_list->capacity,
1491 icd_tramp_list->capacity * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1492 if (NULL == new_ptr) {
1493 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1494 "loader_add_direct_driver: Realloc failed on icd library list for ICD index %u", index);
1495 return VK_ERROR_OUT_OF_HOST_MEMORY;
1496 }
1497 icd_tramp_list->scanned_list = new_ptr;
1498
1499 // double capacity
1500 icd_tramp_list->capacity *= 2;
1501 }
1502
1503 // Driver must be 1.1 to support version 7
1504 uint32_t api_version = VK_API_VERSION_1_1;
1505 PFN_vkEnumerateInstanceVersion icd_enumerate_instance_version =
1506 (PFN_vkEnumerateInstanceVersion)pDriver->pfnGetInstanceProcAddr(NULL, "vkEnumerateInstanceVersion");
1507
1508 if (icd_enumerate_instance_version) {
1509 VkResult res = icd_enumerate_instance_version(&api_version);
1510 if (res != VK_SUCCESS) {
1511 return res;
1512 }
1513 }
1514
1515 new_scanned_icd = &(icd_tramp_list->scanned_list[icd_tramp_list->count]);
1516 new_scanned_icd->handle = NULL;
1517 new_scanned_icd->api_version = api_version;
1518 new_scanned_icd->GetInstanceProcAddr = fp_get_proc_addr;
1519 new_scanned_icd->GetPhysicalDeviceProcAddr = fp_get_phys_dev_proc_addr;
1520 new_scanned_icd->EnumerateInstanceExtensionProperties = fp_get_inst_ext_props;
1521 new_scanned_icd->CreateInstance = fp_create_inst;
1522 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1523 new_scanned_icd->EnumerateAdapterPhysicalDevices = fp_enum_dxgi_adapter_phys_devs;
1524 #endif
1525 new_scanned_icd->interface_version = interface_version;
1526
1527 new_scanned_icd->lib_name = NULL;
1528 icd_tramp_list->count++;
1529
1530 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1531 "loader_add_direct_driver: Adding driver found in index %d of "
1532 "VkDirectDriverLoadingListLUNARG::pDrivers structure. pfnGetInstanceProcAddr was set to %p",
1533 index, pDriver->pfnGetInstanceProcAddr);
1534
1535 return VK_SUCCESS;
1536 }
1537
1538 // Search through VkInstanceCreateInfo's pNext chain for any drivers from the direct driver loading extension and load them.
loader_scan_for_direct_drivers(const struct loader_instance * inst,const VkInstanceCreateInfo * pCreateInfo,struct loader_icd_tramp_list * icd_tramp_list,bool * direct_driver_loading_exclusive_mode)1539 VkResult loader_scan_for_direct_drivers(const struct loader_instance *inst, const VkInstanceCreateInfo *pCreateInfo,
1540 struct loader_icd_tramp_list *icd_tramp_list, bool *direct_driver_loading_exclusive_mode) {
1541 if (NULL == pCreateInfo) {
1542 // Don't do this logic unless we are being called from vkCreateInstance, when pCreateInfo will be non-null
1543 return VK_SUCCESS;
1544 }
1545 bool direct_driver_loading_enabled = false;
1546 // Try to if VK_LUNARG_direct_driver_loading is enabled and if we are using it exclusively
1547 // Skip this step if inst is NULL, aka when this function is being called before instance creation
1548 if (inst != NULL && pCreateInfo->ppEnabledExtensionNames && pCreateInfo->enabledExtensionCount > 0) {
1549 // Look through the enabled extension list, make sure VK_LUNARG_direct_driver_loading is present
1550 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1551 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_LUNARG_DIRECT_DRIVER_LOADING_EXTENSION_NAME) == 0) {
1552 direct_driver_loading_enabled = true;
1553 break;
1554 }
1555 }
1556 }
1557 const VkDirectDriverLoadingListLUNARG *ddl_list = NULL;
1558 // Find the VkDirectDriverLoadingListLUNARG struct in the pNext chain of vkInstanceCreateInfo
1559 const VkBaseOutStructure *chain = pCreateInfo->pNext;
1560 while (chain) {
1561 if (chain->sType == VK_STRUCTURE_TYPE_DIRECT_DRIVER_LOADING_LIST_LUNARG) {
1562 ddl_list = (VkDirectDriverLoadingListLUNARG *)chain;
1563 break;
1564 }
1565 chain = (const VkBaseOutStructure *)chain->pNext;
1566 }
1567 if (NULL == ddl_list) {
1568 if (direct_driver_loading_enabled) {
1569 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1570 "loader_scan_for_direct_drivers: The VK_LUNARG_direct_driver_loading extension was enabled but the "
1571 "pNext chain of "
1572 "VkInstanceCreateInfo did not contain the "
1573 "VkDirectDriverLoadingListLUNARG structure.");
1574 }
1575 // Always want to exit early if there was no VkDirectDriverLoadingListLUNARG in the pNext chain
1576 return VK_SUCCESS;
1577 }
1578
1579 if (!direct_driver_loading_enabled) {
1580 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1581 "loader_scan_for_direct_drivers: The pNext chain of VkInstanceCreateInfo contained the "
1582 "VkDirectDriverLoadingListLUNARG structure, but the VK_LUNARG_direct_driver_loading extension was "
1583 "not enabled.");
1584 return VK_SUCCESS;
1585 }
1586 // If we are using exclusive mode, skip looking for any more drivers from system or environment variables
1587 if (ddl_list->mode == VK_DIRECT_DRIVER_LOADING_MODE_EXCLUSIVE_LUNARG) {
1588 *direct_driver_loading_exclusive_mode = true;
1589 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1590 "loader_scan_for_direct_drivers: The VK_LUNARG_direct_driver_loading extension is active and specified "
1591 "VK_DIRECT_DRIVER_LOADING_MODE_EXCLUSIVE_LUNARG, skipping system and environment "
1592 "variable driver search mechanisms.");
1593 }
1594 if (NULL == ddl_list->pDrivers) {
1595 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1596 "loader_scan_for_direct_drivers: The VkDirectDriverLoadingListLUNARG structure in the pNext chain of "
1597 "VkInstanceCreateInfo has a NULL pDrivers member.");
1598 return VK_SUCCESS;
1599 }
1600 if (ddl_list->driverCount == 0) {
1601 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
1602 "loader_scan_for_direct_drivers: The VkDirectDriverLoadingListLUNARG structure in the pNext chain of "
1603 "VkInstanceCreateInfo has a non-null pDrivers member but a driverCount member with a value "
1604 "of zero.");
1605 return VK_SUCCESS;
1606 }
1607 // Go through all VkDirectDriverLoadingInfoLUNARG entries and add each driver
1608 // Because icd_tramp's are prepended, this will result in the drivers appearing at the end
1609 for (uint32_t i = 0; i < ddl_list->driverCount; i++) {
1610 VkResult res = loader_add_direct_driver(inst, i, &ddl_list->pDrivers[i], icd_tramp_list);
1611 if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
1612 return res;
1613 }
1614 }
1615
1616 return VK_SUCCESS;
1617 }
1618
loader_scanned_icd_add(const struct loader_instance * inst,struct loader_icd_tramp_list * icd_tramp_list,const char * filename,uint32_t api_version,enum loader_layer_library_status * lib_status)1619 VkResult loader_scanned_icd_add(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list,
1620 const char *filename, uint32_t api_version, enum loader_layer_library_status *lib_status) {
1621 loader_platform_dl_handle handle = NULL;
1622 PFN_vkCreateInstance fp_create_inst = NULL;
1623 PFN_vkEnumerateInstanceExtensionProperties fp_get_inst_ext_props = NULL;
1624 PFN_vkGetInstanceProcAddr fp_get_proc_addr = NULL;
1625 PFN_GetPhysicalDeviceProcAddr fp_get_phys_dev_proc_addr = NULL;
1626 PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version = NULL;
1627 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1628 PFN_vk_icdEnumerateAdapterPhysicalDevices fp_enum_dxgi_adapter_phys_devs = NULL;
1629 #endif
1630 struct loader_scanned_icd *new_scanned_icd = NULL;
1631 uint32_t interface_vers;
1632 VkResult res = VK_SUCCESS;
1633
1634 // This shouldn't happen, but the check is necessary because dlopen returns a handle to the main program when
1635 // filename is NULL
1636 if (filename == NULL) {
1637 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_scanned_icd_add: A NULL filename was used, skipping this ICD",
1638 filename);
1639 res = VK_ERROR_INCOMPATIBLE_DRIVER;
1640 goto out;
1641 }
1642
1643 // TODO implement smarter opening/closing of libraries. For now this
1644 // function leaves libraries open and the scanned_icd_clear closes them
1645 #if defined(__Fuchsia__)
1646 handle = loader_platform_open_driver(filename);
1647 #else
1648 handle = loader_platform_open_library(filename);
1649 #endif
1650 if (NULL == handle) {
1651 loader_handle_load_library_error(inst, filename, lib_status);
1652 if (lib_status && *lib_status == LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY) {
1653 res = VK_ERROR_OUT_OF_HOST_MEMORY;
1654 } else {
1655 res = VK_ERROR_INCOMPATIBLE_DRIVER;
1656 }
1657 goto out;
1658 }
1659
1660 // Try to load the driver's exported vk_icdNegotiateLoaderICDInterfaceVersion
1661 fp_negotiate_icd_version = loader_platform_get_proc_address(handle, "vk_icdNegotiateLoaderICDInterfaceVersion");
1662
1663 // If it isn't exported, we are dealing with either a v0, v1, or a v7 and up driver
1664 if (NULL == fp_negotiate_icd_version) {
1665 // Try to load the driver's exported vk_icdGetInstanceProcAddr - if this is a v7 or up driver, we can use it to get
1666 // the driver's vk_icdNegotiateLoaderICDInterfaceVersion function
1667 fp_get_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetInstanceProcAddr");
1668
1669 // If we successfully loaded vk_icdGetInstanceProcAddr, try to get vk_icdNegotiateLoaderICDInterfaceVersion
1670 if (fp_get_proc_addr) {
1671 fp_negotiate_icd_version =
1672 (PFN_vk_icdNegotiateLoaderICDInterfaceVersion)fp_get_proc_addr(NULL, "vk_icdNegotiateLoaderICDInterfaceVersion");
1673 }
1674 }
1675
1676 // Try to negotiate the Loader and Driver Interface Versions
1677 // loader_get_icd_interface_version will check if fp_negotiate_icd_version is NULL, so we don't have to.
1678 // If it *is* NULL, that means this driver uses interface version 0 or 1
1679 if (!loader_get_icd_interface_version(fp_negotiate_icd_version, &interface_vers)) {
1680 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1681 "loader_scanned_icd_add: ICD %s doesn't support interface version compatible with loader, skip this ICD.",
1682 filename);
1683 goto out;
1684 }
1685
1686 // If we didn't already query vk_icdGetInstanceProcAddr, try now
1687 if (NULL == fp_get_proc_addr) {
1688 fp_get_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetInstanceProcAddr");
1689 }
1690
1691 // If vk_icdGetInstanceProcAddr is NULL, this ICD is using version 0 and so we should respond accordingly.
1692 if (NULL == fp_get_proc_addr) {
1693 // Exporting vk_icdNegotiateLoaderICDInterfaceVersion but not vk_icdGetInstanceProcAddr violates Version 2's
1694 // requirements, as for Version 2 to be supported Version 1 must also be supported
1695 if (interface_vers != 0) {
1696 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1697 "loader_scanned_icd_add: ICD %s reports an interface version of %d but doesn't export "
1698 "vk_icdGetInstanceProcAddr, skip this ICD.",
1699 filename, interface_vers);
1700 goto out;
1701 }
1702 // Use deprecated interface from version 0
1703 fp_get_proc_addr = loader_platform_get_proc_address(handle, "vkGetInstanceProcAddr");
1704 if (NULL == fp_get_proc_addr) {
1705 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1706 "loader_scanned_icd_add: Attempt to retrieve either \'vkGetInstanceProcAddr\' or "
1707 "\'vk_icdGetInstanceProcAddr\' from ICD %s failed.",
1708 filename);
1709 goto out;
1710 } else {
1711 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
1712 "loader_scanned_icd_add: Using deprecated ICD interface of \'vkGetInstanceProcAddr\' instead of "
1713 "\'vk_icdGetInstanceProcAddr\' for ICD %s",
1714 filename);
1715 }
1716 fp_create_inst = loader_platform_get_proc_address(handle, "vkCreateInstance");
1717 if (NULL == fp_create_inst) {
1718 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1719 "loader_scanned_icd_add: Failed querying \'vkCreateInstance\' via dlsym/LoadLibrary for ICD %s", filename);
1720 goto out;
1721 }
1722 fp_get_inst_ext_props = loader_platform_get_proc_address(handle, "vkEnumerateInstanceExtensionProperties");
1723 if (NULL == fp_get_inst_ext_props) {
1724 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1725 "loader_scanned_icd_add: Could not get \'vkEnumerateInstanceExtensionProperties\' via dlsym/LoadLibrary "
1726 "for ICD %s",
1727 filename);
1728 goto out;
1729 }
1730 } else {
1731 // vk_icdGetInstanceProcAddr was successfully found, we can assume the version is at least one
1732 // If vk_icdNegotiateLoaderICDInterfaceVersion was also found, interface_vers must be 2 or greater, so this check is
1733 // fine
1734 if (interface_vers == 0) {
1735 interface_vers = 1;
1736 }
1737
1738 fp_create_inst = (PFN_vkCreateInstance)fp_get_proc_addr(NULL, "vkCreateInstance");
1739 if (NULL == fp_create_inst) {
1740 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1741 "loader_scanned_icd_add: Could not get \'vkCreateInstance\' via \'vk_icdGetInstanceProcAddr\' for ICD %s",
1742 filename);
1743 goto out;
1744 }
1745 fp_get_inst_ext_props =
1746 (PFN_vkEnumerateInstanceExtensionProperties)fp_get_proc_addr(NULL, "vkEnumerateInstanceExtensionProperties");
1747 if (NULL == fp_get_inst_ext_props) {
1748 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1749 "loader_scanned_icd_add: Could not get \'vkEnumerateInstanceExtensionProperties\' via "
1750 "\'vk_icdGetInstanceProcAddr\' for ICD %s",
1751 filename);
1752 goto out;
1753 }
1754 // Query "vk_icdGetPhysicalDeviceProcAddr" with vk_icdGetInstanceProcAddr if the library reports interface version 7 or
1755 // greater, otherwise fallback to loading it from the platform dynamic linker
1756 if (interface_vers >= 7) {
1757 fp_get_phys_dev_proc_addr =
1758 (PFN_vk_icdGetPhysicalDeviceProcAddr)fp_get_proc_addr(NULL, "vk_icdGetPhysicalDeviceProcAddr");
1759 }
1760 if (NULL == fp_get_phys_dev_proc_addr && interface_vers >= 3) {
1761 fp_get_phys_dev_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetPhysicalDeviceProcAddr");
1762 }
1763 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1764 // Query "vk_icdEnumerateAdapterPhysicalDevices" with vk_icdGetInstanceProcAddr if the library reports interface version
1765 // 7 or greater, otherwise fallback to loading it from the platform dynamic linker
1766 if (interface_vers >= 7) {
1767 fp_enum_dxgi_adapter_phys_devs =
1768 (PFN_vk_icdEnumerateAdapterPhysicalDevices)fp_get_proc_addr(NULL, "vk_icdEnumerateAdapterPhysicalDevices");
1769 }
1770 if (NULL == fp_enum_dxgi_adapter_phys_devs && interface_vers >= 6) {
1771 fp_enum_dxgi_adapter_phys_devs = loader_platform_get_proc_address(handle, "vk_icdEnumerateAdapterPhysicalDevices");
1772 }
1773 #endif
1774 }
1775
1776 // check for enough capacity
1777 if ((icd_tramp_list->count * sizeof(struct loader_scanned_icd)) >= icd_tramp_list->capacity) {
1778 void *new_ptr = loader_instance_heap_realloc(inst, icd_tramp_list->scanned_list, icd_tramp_list->capacity,
1779 icd_tramp_list->capacity * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1780 if (NULL == new_ptr) {
1781 res = VK_ERROR_OUT_OF_HOST_MEMORY;
1782 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_scanned_icd_add: Realloc failed on icd library list for ICD %s",
1783 filename);
1784 goto out;
1785 }
1786 icd_tramp_list->scanned_list = new_ptr;
1787
1788 // double capacity
1789 icd_tramp_list->capacity *= 2;
1790 }
1791
1792 loader_api_version api_version_struct = loader_make_version(api_version);
1793 if (interface_vers <= 4 && loader_check_version_meets_required(LOADER_VERSION_1_1_0, api_version_struct)) {
1794 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
1795 "loader_scanned_icd_add: Driver %s supports Vulkan %u.%u, but only supports loader interface version %u."
1796 " Interface version 5 or newer required to support this version of Vulkan (Policy #LDP_DRIVER_7)",
1797 filename, api_version_struct.major, api_version_struct.minor, interface_vers);
1798 }
1799
1800 new_scanned_icd = &(icd_tramp_list->scanned_list[icd_tramp_list->count]);
1801 new_scanned_icd->handle = handle;
1802 new_scanned_icd->api_version = api_version;
1803 new_scanned_icd->GetInstanceProcAddr = fp_get_proc_addr;
1804 new_scanned_icd->GetPhysicalDeviceProcAddr = fp_get_phys_dev_proc_addr;
1805 new_scanned_icd->EnumerateInstanceExtensionProperties = fp_get_inst_ext_props;
1806 new_scanned_icd->CreateInstance = fp_create_inst;
1807 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1808 new_scanned_icd->EnumerateAdapterPhysicalDevices = fp_enum_dxgi_adapter_phys_devs;
1809 #endif
1810 new_scanned_icd->interface_version = interface_vers;
1811
1812 res = loader_copy_to_new_str(inst, filename, &new_scanned_icd->lib_name);
1813 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
1814 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_scanned_icd_add: Out of memory can't add ICD %s", filename);
1815 goto out;
1816 }
1817 icd_tramp_list->count++;
1818
1819 out:
1820
1821 return res;
1822 }
1823
loader_initialize(void)1824 void loader_initialize(void) {
1825 // initialize mutexes
1826 loader_platform_thread_create_mutex(&loader_lock);
1827 loader_platform_thread_create_mutex(&loader_preload_icd_lock);
1828 loader_platform_thread_create_mutex(&loader_global_instance_list_lock);
1829 init_global_loader_settings();
1830
1831 // initialize logging
1832 loader_init_global_debug_level();
1833 #if defined(_WIN32)
1834 windows_initialization();
1835 #endif
1836
1837 loader_api_version version = loader_make_full_version(VK_HEADER_VERSION_COMPLETE);
1838 loader_log(NULL, VULKAN_LOADER_INFO_BIT, 0, "Vulkan Loader Version %d.%d.%d", version.major, version.minor, version.patch);
1839
1840 #if defined(GIT_BRANCH_NAME) && defined(GIT_TAG_INFO)
1841 loader_log(NULL, VULKAN_LOADER_INFO_BIT, 0, "[Vulkan Loader Git - Tag: " GIT_BRANCH_NAME ", Branch/Commit: " GIT_TAG_INFO "]");
1842 #endif
1843
1844 char *loader_disable_dynamic_library_unloading_env_var = loader_getenv("VK_LOADER_DISABLE_DYNAMIC_LIBRARY_UNLOADING", NULL);
1845 if (loader_disable_dynamic_library_unloading_env_var &&
1846 0 == strncmp(loader_disable_dynamic_library_unloading_env_var, "1", 2)) {
1847 loader_disable_dynamic_library_unloading = true;
1848 loader_log(NULL, VULKAN_LOADER_WARN_BIT, 0, "Vulkan Loader: library unloading is disabled");
1849 } else {
1850 loader_disable_dynamic_library_unloading = false;
1851 }
1852 loader_free_getenv(loader_disable_dynamic_library_unloading_env_var, NULL);
1853 #if defined(LOADER_USE_UNSAFE_FILE_SEARCH)
1854 loader_log(NULL, VULKAN_LOADER_WARN_BIT, 0, "Vulkan Loader: unsafe searching is enabled");
1855 #endif
1856 }
1857
loader_release()1858 void loader_release() {
1859 // Guarantee release of the preloaded ICD libraries. This may have already been called in vkDestroyInstance.
1860 loader_unload_preloaded_icds();
1861
1862 // release mutexes
1863 teardown_global_loader_settings();
1864 loader_platform_thread_delete_mutex(&loader_lock);
1865 loader_platform_thread_delete_mutex(&loader_preload_icd_lock);
1866 loader_platform_thread_delete_mutex(&loader_global_instance_list_lock);
1867 }
1868
1869 // Preload the ICD libraries that are likely to be needed so we don't repeatedly load/unload them later
loader_preload_icds(void)1870 void loader_preload_icds(void) {
1871 loader_platform_thread_lock_mutex(&loader_preload_icd_lock);
1872
1873 // Already preloaded, skip loading again.
1874 if (scanned_icds.scanned_list != NULL) {
1875 loader_platform_thread_unlock_mutex(&loader_preload_icd_lock);
1876 return;
1877 }
1878
1879 VkResult result = loader_icd_scan(NULL, &scanned_icds, NULL, NULL);
1880 if (result != VK_SUCCESS) {
1881 loader_scanned_icd_clear(NULL, &scanned_icds);
1882 }
1883 loader_platform_thread_unlock_mutex(&loader_preload_icd_lock);
1884 }
1885
1886 // Release the ICD libraries that were preloaded
loader_unload_preloaded_icds(void)1887 void loader_unload_preloaded_icds(void) {
1888 loader_platform_thread_lock_mutex(&loader_preload_icd_lock);
1889 loader_scanned_icd_clear(NULL, &scanned_icds);
1890 loader_platform_thread_unlock_mutex(&loader_preload_icd_lock);
1891 }
1892
1893 #if !defined(_WIN32)
loader_init_library(void)1894 __attribute__((constructor)) void loader_init_library(void) { loader_initialize(); }
1895
loader_free_library(void)1896 __attribute__((destructor)) void loader_free_library(void) { loader_release(); }
1897 #endif
1898
1899 // Get next file or dirname given a string list or registry key path
1900 //
1901 // \returns
1902 // A pointer to first char in the next path.
1903 // The next path (or NULL) in the list is returned in next_path.
1904 // Note: input string is modified in some cases. PASS IN A COPY!
loader_get_next_path(char * path)1905 char *loader_get_next_path(char *path) {
1906 uint32_t len;
1907 char *next;
1908
1909 if (path == NULL) return NULL;
1910 next = strchr(path, PATH_SEPARATOR);
1911 if (next == NULL) {
1912 len = (uint32_t)strlen(path);
1913 next = path + len;
1914 } else {
1915 *next = '\0';
1916 next++;
1917 }
1918
1919 return next;
1920 }
1921
1922 /* Processes a json manifest's library_path and the location of the json manifest to create the path of the library
1923 * The output is stored in out_fullpath by allocating a string - so its the caller's responsibility to free it
1924 * The output is the combination of the base path of manifest_file_path concatenated with library path
1925 * If library_path is an absolute path, we do not prepend the base path of manifest_file_path
1926 *
1927 * This function takes ownership of library_path - caller does not need to worry about freeing it.
1928 */
combine_manifest_directory_and_library_path(const struct loader_instance * inst,char * library_path,const char * manifest_file_path,char ** out_fullpath)1929 VkResult combine_manifest_directory_and_library_path(const struct loader_instance *inst, char *library_path,
1930 const char *manifest_file_path, char **out_fullpath) {
1931 assert(library_path && manifest_file_path && out_fullpath);
1932 if (loader_platform_is_path_absolute(library_path)) {
1933 *out_fullpath = library_path;
1934 return VK_SUCCESS;
1935 }
1936 VkResult res = VK_SUCCESS;
1937
1938 size_t library_path_len = strlen(library_path);
1939 size_t manifest_file_path_str_len = strlen(manifest_file_path);
1940 bool library_path_contains_directory_symbol = false;
1941 for (size_t i = 0; i < library_path_len; i++) {
1942 if (library_path[i] == DIRECTORY_SYMBOL) {
1943 library_path_contains_directory_symbol = true;
1944 break;
1945 }
1946 }
1947 // Means that the library_path is neither absolute nor relative - thus we should not modify it at all
1948 if (!library_path_contains_directory_symbol) {
1949 *out_fullpath = library_path;
1950 return VK_SUCCESS;
1951 }
1952 // must include both a directory symbol and the null terminator
1953 size_t new_str_len = library_path_len + manifest_file_path_str_len + 1 + 1;
1954
1955 *out_fullpath = loader_instance_heap_calloc(inst, new_str_len, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1956 if (NULL == *out_fullpath) {
1957 res = VK_ERROR_OUT_OF_HOST_MEMORY;
1958 goto out;
1959 }
1960 size_t cur_loc_in_out_fullpath = 0;
1961 // look for the last occurrence of DIRECTORY_SYMBOL in manifest_file_path
1962 size_t last_directory_symbol = 0;
1963 bool found_directory_symbol = false;
1964 for (size_t i = 0; i < manifest_file_path_str_len; i++) {
1965 if (manifest_file_path[i] == DIRECTORY_SYMBOL) {
1966 last_directory_symbol = i + 1; // we want to include the symbol
1967 found_directory_symbol = true;
1968 // dont break because we want to find the last occurrence
1969 }
1970 }
1971 // Add manifest_file_path up to the last directory symbol
1972 if (found_directory_symbol) {
1973 loader_strncpy(*out_fullpath, new_str_len, manifest_file_path, last_directory_symbol);
1974 cur_loc_in_out_fullpath += last_directory_symbol;
1975 }
1976 loader_strncpy(&(*out_fullpath)[cur_loc_in_out_fullpath], new_str_len - cur_loc_in_out_fullpath, library_path,
1977 library_path_len);
1978 cur_loc_in_out_fullpath += library_path_len + 1;
1979 (*out_fullpath)[cur_loc_in_out_fullpath] = '\0';
1980
1981 out:
1982 loader_instance_heap_free(inst, library_path);
1983
1984 return res;
1985 }
1986
1987 // Given a filename (file) and a list of paths (in_dirs), try to find an existing
1988 // file in the paths. If filename already is a path then no searching in the given paths.
1989 //
1990 // @return - A string in out_fullpath of either the full path or file.
loader_get_fullpath(const char * file,const char * in_dirs,size_t out_size,char * out_fullpath)1991 void loader_get_fullpath(const char *file, const char *in_dirs, size_t out_size, char *out_fullpath) {
1992 if (!loader_platform_is_path(file) && *in_dirs) {
1993 size_t dirs_copy_len = strlen(in_dirs) + 1;
1994 char *dirs_copy = loader_stack_alloc(dirs_copy_len);
1995 loader_strncpy(dirs_copy, dirs_copy_len, in_dirs, dirs_copy_len);
1996
1997 // find if file exists after prepending paths in given list
1998 // for (dir = dirs_copy; *dir && (next_dir = loader_get_next_path(dir)); dir = next_dir) {
1999 char *dir = dirs_copy;
2000 char *next_dir = loader_get_next_path(dir);
2001 while (*dir && next_dir) {
2002 int path_concat_ret = snprintf(out_fullpath, out_size, "%s%c%s", dir, DIRECTORY_SYMBOL, file);
2003 if (path_concat_ret < 0) {
2004 continue;
2005 }
2006 if (loader_platform_file_exists(out_fullpath)) {
2007 return;
2008 }
2009 dir = next_dir;
2010 next_dir = loader_get_next_path(dir);
2011 }
2012 }
2013
2014 (void)snprintf(out_fullpath, out_size, "%s", file);
2015 }
2016
2017 // Verify that all component layers in a meta-layer are valid.
verify_meta_layer_component_layers(const struct loader_instance * inst,struct loader_layer_properties * prop,struct loader_layer_list * instance_layers)2018 bool verify_meta_layer_component_layers(const struct loader_instance *inst, struct loader_layer_properties *prop,
2019 struct loader_layer_list *instance_layers) {
2020 loader_api_version meta_layer_version = loader_make_version(prop->info.specVersion);
2021
2022 for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) {
2023 struct loader_layer_properties *comp_prop =
2024 loader_find_layer_property(prop->component_layer_names.list[comp_layer], instance_layers);
2025 if (comp_prop == NULL) {
2026 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2027 "verify_meta_layer_component_layers: Meta-layer %s can't find component layer %s at index %d."
2028 " Skipping this layer.",
2029 prop->info.layerName, prop->component_layer_names.list[comp_layer], comp_layer);
2030
2031 return false;
2032 }
2033
2034 // Check the version of each layer, they need to be at least MAJOR and MINOR
2035 loader_api_version comp_prop_version = loader_make_version(comp_prop->info.specVersion);
2036 if (!loader_check_version_meets_required(meta_layer_version, comp_prop_version)) {
2037 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2038 "verify_meta_layer_component_layers: Meta-layer uses API version %d.%d, but component "
2039 "layer %d has API version %d.%d that is lower. Skipping this layer.",
2040 meta_layer_version.major, meta_layer_version.minor, comp_layer, comp_prop_version.major,
2041 comp_prop_version.minor);
2042
2043 return false;
2044 }
2045
2046 // Make sure the layer isn't using it's own name
2047 if (!strcmp(prop->info.layerName, prop->component_layer_names.list[comp_layer])) {
2048 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2049 "verify_meta_layer_component_layers: Meta-layer %s lists itself in its component layer "
2050 "list at index %d. Skipping this layer.",
2051 prop->info.layerName, comp_layer);
2052
2053 return false;
2054 }
2055 if (comp_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
2056 loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
2057 "verify_meta_layer_component_layers: Adding meta-layer %s which also contains meta-layer %s",
2058 prop->info.layerName, comp_prop->info.layerName);
2059
2060 // Make sure if the layer is using a meta-layer in its component list that we also verify that.
2061 if (!verify_meta_layer_component_layers(inst, comp_prop, instance_layers)) {
2062 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2063 "Meta-layer %s component layer %s can not find all component layers."
2064 " Skipping this layer.",
2065 prop->info.layerName, prop->component_layer_names.list[comp_layer]);
2066 return false;
2067 }
2068 }
2069 }
2070 // Didn't exit early so that means it passed all checks
2071 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2072 "Meta-layer \"%s\" all %d component layers appear to be valid.", prop->info.layerName,
2073 prop->component_layer_names.count);
2074
2075 // If layer logging is on, list the internals included in the meta-layer
2076 for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) {
2077 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " [%d] %s", comp_layer, prop->component_layer_names.list[comp_layer]);
2078 }
2079 return true;
2080 }
2081
2082 // Add any instance and device extensions from component layers to this layer
2083 // list, so that anyone querying extensions will only need to look at the meta-layer
update_meta_layer_extensions_from_component_layers(const struct loader_instance * inst,struct loader_layer_properties * prop,struct loader_layer_list * instance_layers)2084 bool update_meta_layer_extensions_from_component_layers(const struct loader_instance *inst, struct loader_layer_properties *prop,
2085 struct loader_layer_list *instance_layers) {
2086 VkResult res = VK_SUCCESS;
2087 for (uint32_t comp_layer = 0; comp_layer < prop->component_layer_names.count; comp_layer++) {
2088 struct loader_layer_properties *comp_prop =
2089 loader_find_layer_property(prop->component_layer_names.list[comp_layer], instance_layers);
2090
2091 if (NULL != comp_prop->instance_extension_list.list) {
2092 for (uint32_t ext = 0; ext < comp_prop->instance_extension_list.count; ext++) {
2093 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "Meta-layer %s component layer %s adding instance extension %s",
2094 prop->info.layerName, prop->component_layer_names.list[comp_layer],
2095 comp_prop->instance_extension_list.list[ext].extensionName);
2096
2097 if (!has_vk_extension_property(&comp_prop->instance_extension_list.list[ext], &prop->instance_extension_list)) {
2098 res = loader_add_to_ext_list(inst, &prop->instance_extension_list, 1,
2099 &comp_prop->instance_extension_list.list[ext]);
2100 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
2101 return res;
2102 }
2103 }
2104 }
2105 }
2106 if (NULL != comp_prop->device_extension_list.list) {
2107 for (uint32_t ext = 0; ext < comp_prop->device_extension_list.count; ext++) {
2108 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "Meta-layer %s component layer %s adding device extension %s",
2109 prop->info.layerName, prop->component_layer_names.list[comp_layer],
2110 comp_prop->device_extension_list.list[ext].props.extensionName);
2111
2112 if (!has_vk_dev_ext_property(&comp_prop->device_extension_list.list[ext].props, &prop->device_extension_list)) {
2113 loader_add_to_dev_ext_list(inst, &prop->device_extension_list,
2114 &comp_prop->device_extension_list.list[ext].props, NULL);
2115 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
2116 return res;
2117 }
2118 }
2119 }
2120 }
2121 }
2122 return res;
2123 }
2124
2125 // Verify that all meta-layers in a layer list are valid.
verify_all_meta_layers(struct loader_instance * inst,const struct loader_envvar_all_filters * filters,struct loader_layer_list * instance_layers,bool * override_layer_present)2126 VkResult verify_all_meta_layers(struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
2127 struct loader_layer_list *instance_layers, bool *override_layer_present) {
2128 VkResult res = VK_SUCCESS;
2129 *override_layer_present = false;
2130 for (int32_t i = 0; i < (int32_t)instance_layers->count; i++) {
2131 struct loader_layer_properties *prop = &instance_layers->list[i];
2132
2133 // If this is a meta-layer, make sure it is valid
2134 if (prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
2135 if (verify_meta_layer_component_layers(inst, prop, instance_layers)) {
2136 // If any meta layer is valid, update its extension list to include the extensions from its component layers.
2137 res = update_meta_layer_extensions_from_component_layers(inst, prop, instance_layers);
2138 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
2139 return res;
2140 }
2141 if (prop->is_override && loader_implicit_layer_is_enabled(inst, filters, prop)) {
2142 *override_layer_present = true;
2143 }
2144 } else {
2145 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0,
2146 "Removing meta-layer %s from instance layer list since it appears invalid.", prop->info.layerName);
2147
2148 loader_remove_layer_in_list(inst, instance_layers, i);
2149 i--;
2150 }
2151 }
2152 }
2153 return res;
2154 }
2155
2156 // If the current working directory matches any app_key_path of the layers, remove all other override layers.
2157 // Otherwise if no matching app_key was found, remove all but the global override layer, which has no app_key_path.
remove_all_non_valid_override_layers(struct loader_instance * inst,struct loader_layer_list * instance_layers)2158 void remove_all_non_valid_override_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers) {
2159 if (instance_layers == NULL) {
2160 return;
2161 }
2162
2163 char cur_path[1024];
2164 char *ret = loader_platform_executable_path(cur_path, 1024);
2165 if (NULL == ret) {
2166 return;
2167 }
2168 // Find out if there is an override layer with same the app_key_path as the path to the current executable.
2169 // If more than one is found, remove it and use the first layer
2170 // Remove any layers which aren't global and do not have the same app_key_path as the path to the current executable.
2171 bool found_active_override_layer = false;
2172 int global_layer_index = -1;
2173 for (uint32_t i = 0; i < instance_layers->count; i++) {
2174 struct loader_layer_properties *props = &instance_layers->list[i];
2175 if (strcmp(props->info.layerName, VK_OVERRIDE_LAYER_NAME) == 0) {
2176 if (props->app_key_paths.count > 0) { // not the global layer
2177 for (uint32_t j = 0; j < props->app_key_paths.count; j++) {
2178 if (strcmp(props->app_key_paths.list[j], cur_path) == 0) {
2179 if (!found_active_override_layer) {
2180 found_active_override_layer = true;
2181 } else {
2182 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2183 "remove_all_non_valid_override_layers: Multiple override layers where the same path in "
2184 "app_keys "
2185 "was found. Using the first layer found");
2186
2187 // Remove duplicate active override layers that have the same app_key_path
2188 loader_remove_layer_in_list(inst, instance_layers, i);
2189 i--;
2190 }
2191 }
2192 }
2193 if (!found_active_override_layer) {
2194 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2195 "--Override layer found but not used because app \'%s\' is not in \'app_keys\' list!", cur_path);
2196
2197 // Remove non-global override layers that don't have an app_key that matches cur_path
2198 loader_remove_layer_in_list(inst, instance_layers, i);
2199 i--;
2200 }
2201 } else {
2202 if (global_layer_index == -1) {
2203 global_layer_index = i;
2204 } else {
2205 loader_log(
2206 inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2207 "remove_all_non_valid_override_layers: Multiple global override layers found. Using the first global "
2208 "layer found");
2209 loader_remove_layer_in_list(inst, instance_layers, i);
2210 i--;
2211 }
2212 }
2213 }
2214 }
2215 // Remove global layer if layer with same the app_key_path as the path to the current executable is found
2216 if (found_active_override_layer && global_layer_index >= 0) {
2217 loader_remove_layer_in_list(inst, instance_layers, global_layer_index);
2218 }
2219 // Should be at most 1 override layer in the list now.
2220 if (found_active_override_layer) {
2221 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Using the override layer for app key %s", cur_path);
2222 } else if (global_layer_index >= 0) {
2223 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Using the global override layer");
2224 }
2225 }
2226
2227 /* The following are required in the "layer" object:
2228 * "name"
2229 * "type"
2230 * (for non-meta layers) "library_path"
2231 * (for meta layers) "component_layers"
2232 * "api_version"
2233 * "implementation_version"
2234 * "description"
2235 * (for implicit layers) "disable_environment"
2236 */
2237
loader_read_layer_json(const struct loader_instance * inst,struct loader_layer_list * layer_instance_list,cJSON * layer_node,loader_api_version version,bool is_implicit,char * filename)2238 VkResult loader_read_layer_json(const struct loader_instance *inst, struct loader_layer_list *layer_instance_list,
2239 cJSON *layer_node, loader_api_version version, bool is_implicit, char *filename) {
2240 assert(layer_instance_list);
2241 char *type = NULL;
2242 char *api_version = NULL;
2243 char *implementation_version = NULL;
2244 VkResult result = VK_SUCCESS;
2245 struct loader_layer_properties props = {0};
2246
2247 // Parse name
2248
2249 result = loader_parse_json_string_to_existing_str(inst, layer_node, "name", VK_MAX_EXTENSION_NAME_SIZE, props.info.layerName);
2250 if (VK_ERROR_OUT_OF_HOST_MEMORY == result) goto out;
2251 if (VK_ERROR_INITIALIZATION_FAILED == result) {
2252 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2253 "Layer located at %s didn't find required layer value \"name\" in manifest JSON file, skipping this layer",
2254 filename);
2255 goto out;
2256 }
2257
2258 // Check if this layer's name matches the override layer name, set is_override to true if so.
2259 if (!strcmp(props.info.layerName, VK_OVERRIDE_LAYER_NAME)) {
2260 props.is_override = true;
2261 }
2262
2263 if (0 != strncmp(props.info.layerName, "VK_LAYER_", 9)) {
2264 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, "Layer name %s does not conform to naming standard (Policy #LLP_LAYER_3)",
2265 props.info.layerName);
2266 }
2267
2268 // Parse type
2269
2270 result = loader_parse_json_string(layer_node, "type", &type);
2271 if (VK_ERROR_OUT_OF_HOST_MEMORY == result) goto out;
2272 if (VK_ERROR_INITIALIZATION_FAILED == result) {
2273 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2274 "Layer located at %s didn't find required layer value \"type\" in manifest JSON file, skipping this layer",
2275 filename);
2276 goto out;
2277 }
2278
2279 // Add list entry
2280 if (!strcmp(type, "DEVICE")) {
2281 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Device layers are deprecated. Skipping this layer");
2282 result = VK_ERROR_INITIALIZATION_FAILED;
2283 goto out;
2284 }
2285
2286 // Allow either GLOBAL or INSTANCE type interchangeably to handle layers that must work with older loaders
2287 if (!strcmp(type, "INSTANCE") || !strcmp(type, "GLOBAL")) {
2288 props.type_flags = VK_LAYER_TYPE_FLAG_INSTANCE_LAYER;
2289 if (!is_implicit) {
2290 props.type_flags |= VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER;
2291 }
2292 } else {
2293 result = VK_ERROR_INITIALIZATION_FAILED;
2294 goto out;
2295 }
2296
2297 // Parse api_version
2298
2299 result = loader_parse_json_string(layer_node, "api_version", &api_version);
2300 if (VK_ERROR_OUT_OF_HOST_MEMORY == result) goto out;
2301 if (VK_ERROR_INITIALIZATION_FAILED == result) {
2302 loader_log(
2303 inst, VULKAN_LOADER_WARN_BIT, 0,
2304 "Layer located at %s didn't find required layer value \"api_version\" in manifest JSON file, skipping this layer",
2305 filename);
2306 goto out;
2307 }
2308
2309 props.info.specVersion = loader_parse_version_string(api_version);
2310
2311 // Make sure the layer's manifest doesn't contain a non zero variant value
2312 if (VK_API_VERSION_VARIANT(props.info.specVersion) != 0) {
2313 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2314 "Layer \"%s\" has an \'api_version\' field which contains a non-zero variant value of %d. "
2315 " Skipping Layer.",
2316 props.info.layerName, VK_API_VERSION_VARIANT(props.info.specVersion));
2317 result = VK_ERROR_INITIALIZATION_FAILED;
2318 goto out;
2319 }
2320
2321 // Parse implementation_version
2322
2323 result = loader_parse_json_string(layer_node, "implementation_version", &implementation_version);
2324 if (VK_ERROR_OUT_OF_HOST_MEMORY == result) goto out;
2325 if (VK_ERROR_INITIALIZATION_FAILED == result) {
2326 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2327 "Layer located at %s didn't find required layer value \"implementation_version\" in manifest JSON file, "
2328 "skipping this layer",
2329 filename);
2330 goto out;
2331 }
2332 props.info.implementationVersion = atoi(implementation_version);
2333
2334 // Parse description
2335
2336 result = loader_parse_json_string_to_existing_str(inst, layer_node, "description", VK_MAX_EXTENSION_NAME_SIZE,
2337 props.info.description);
2338 if (VK_ERROR_OUT_OF_HOST_MEMORY == result) goto out;
2339 if (VK_ERROR_INITIALIZATION_FAILED == result) {
2340 loader_log(
2341 inst, VULKAN_LOADER_WARN_BIT, 0,
2342 "Layer located at %s didn't find required layer value \"description\" in manifest JSON file, skipping this layer",
2343 filename);
2344 goto out;
2345 }
2346
2347 // Parse library_path
2348
2349 // Library path no longer required unless component_layers is also not defined
2350 cJSON *library_path = loader_cJSON_GetObjectItem(layer_node, "library_path");
2351
2352 if (NULL != library_path) {
2353 if (NULL != loader_cJSON_GetObjectItem(layer_node, "component_layers")) {
2354 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2355 "Indicating meta-layer-specific component_layers, but also defining layer library path. Both are not "
2356 "compatible, so skipping this layer");
2357 result = VK_ERROR_INITIALIZATION_FAILED;
2358 goto out;
2359 }
2360
2361 result = loader_copy_to_new_str(inst, filename, &props.manifest_file_name);
2362 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2363
2364 char *library_path_str = loader_cJSON_Print(library_path);
2365 if (NULL == library_path_str) {
2366 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2367 "Skipping layer due to problem accessing the library_path value in manifest JSON file %s", filename);
2368 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2369 goto out;
2370 }
2371
2372 // This function takes ownership of library_path_str - so we don't need to clean it up
2373 result = combine_manifest_directory_and_library_path(inst, library_path_str, filename, &props.lib_name);
2374 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2375 }
2376
2377 // Parse component_layers
2378
2379 if (NULL == library_path) {
2380 if (!loader_check_version_meets_required(LOADER_VERSION_1_1_0, version)) {
2381 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2382 "Indicating meta-layer-specific component_layers, but using older JSON file version.");
2383 }
2384
2385 result = loader_parse_json_array_of_strings(inst, layer_node, "component_layers", &(props.component_layer_names));
2386 if (VK_ERROR_OUT_OF_HOST_MEMORY == result) {
2387 goto out;
2388 }
2389 if (VK_ERROR_INITIALIZATION_FAILED == result) {
2390 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2391 "Layer missing both library_path and component_layers fields. One or the other MUST be defined. Skipping "
2392 "this layer");
2393 goto out;
2394 }
2395 // This is now, officially, a meta-layer
2396 props.type_flags |= VK_LAYER_TYPE_FLAG_META_LAYER;
2397 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Encountered meta-layer \"%s\"",
2398 props.info.layerName);
2399 }
2400
2401 // Parse blacklisted_layers
2402
2403 if (props.is_override) {
2404 result = loader_parse_json_array_of_strings(inst, layer_node, "blacklisted_layers", &(props.blacklist_layer_names));
2405 if (VK_ERROR_OUT_OF_HOST_MEMORY == result) {
2406 goto out;
2407 }
2408 }
2409
2410 // Parse override_paths
2411
2412 result = loader_parse_json_array_of_strings(inst, layer_node, "override_paths", &(props.override_paths));
2413 if (VK_ERROR_OUT_OF_HOST_MEMORY == result) {
2414 goto out;
2415 }
2416 if (NULL != props.override_paths.list && !loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) {
2417 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2418 "Indicating meta-layer-specific override paths, but using older JSON file version.");
2419 }
2420
2421 // Parse disable_environment
2422
2423 if (is_implicit) {
2424 cJSON *disable_environment = loader_cJSON_GetObjectItem(layer_node, "disable_environment");
2425 if (disable_environment == NULL) {
2426 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2427 "Didn't find required layer object disable_environment in manifest JSON file, skipping this layer");
2428 result = VK_ERROR_INITIALIZATION_FAILED;
2429 goto out;
2430 }
2431
2432 if (!disable_environment->child || disable_environment->child->type != cJSON_String) {
2433 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2434 "Didn't find required layer child value disable_environment in manifest JSON file, skipping this layer "
2435 "(Policy #LLP_LAYER_9)");
2436 result = VK_ERROR_INITIALIZATION_FAILED;
2437 goto out;
2438 }
2439 result = loader_copy_to_new_str(inst, disable_environment->child->string, &(props.disable_env_var.name));
2440 if (VK_SUCCESS != result) goto out;
2441 result = loader_copy_to_new_str(inst, disable_environment->child->valuestring, &(props.disable_env_var.value));
2442 if (VK_SUCCESS != result) goto out;
2443 }
2444
2445 // Now get all optional items and objects and put in list:
2446 // functions
2447 // instance_extensions
2448 // device_extensions
2449 // enable_environment (implicit layers only)
2450 // library_arch
2451
2452 // Layer interface functions
2453 // vkGetInstanceProcAddr
2454 // vkGetDeviceProcAddr
2455 // vkNegotiateLoaderLayerInterfaceVersion (starting with JSON file 1.1.0)
2456 cJSON *functions = loader_cJSON_GetObjectItem(layer_node, "functions");
2457 if (functions != NULL) {
2458 if (loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) {
2459 result = loader_parse_json_string(functions, "vkNegotiateLoaderLayerInterfaceVersion",
2460 &props.functions.str_negotiate_interface);
2461 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2462 }
2463 result = loader_parse_json_string(functions, "vkGetInstanceProcAddr", &props.functions.str_gipa);
2464 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2465
2466 if (props.functions.str_gipa && loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) {
2467 loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
2468 "Layer \"%s\" using deprecated \'vkGetInstanceProcAddr\' tag which was deprecated starting with JSON "
2469 "file version 1.1.0. The new vkNegotiateLoaderLayerInterfaceVersion function is preferred, though for "
2470 "compatibility reasons it may be desirable to continue using the deprecated tag.",
2471 props.info.layerName);
2472 }
2473
2474 result = loader_parse_json_string(functions, "vkGetDeviceProcAddr", &props.functions.str_gdpa);
2475 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2476
2477 if (props.functions.str_gdpa && loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) {
2478 loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
2479 "Layer \"%s\" using deprecated \'vkGetDeviceProcAddr\' tag which was deprecated starting with JSON "
2480 "file version 1.1.0. The new vkNegotiateLoaderLayerInterfaceVersion function is preferred, though for "
2481 "compatibility reasons it may be desirable to continue using the deprecated tag.",
2482 props.info.layerName);
2483 }
2484 }
2485
2486 // instance_extensions
2487 // array of {
2488 // name
2489 // spec_version
2490 // }
2491
2492 cJSON *instance_extensions = loader_cJSON_GetObjectItem(layer_node, "instance_extensions");
2493 if (instance_extensions != NULL) {
2494 int count = loader_cJSON_GetArraySize(instance_extensions);
2495 for (int i = 0; i < count; i++) {
2496 VkExtensionProperties ext_prop = {0};
2497 cJSON *ext_item = loader_cJSON_GetArrayItem(instance_extensions, i);
2498 result = loader_parse_json_string_to_existing_str(inst, ext_item, "name", VK_MAX_EXTENSION_NAME_SIZE,
2499 ext_prop.extensionName);
2500 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2501 if (result == VK_ERROR_INITIALIZATION_FAILED) continue;
2502 char *spec_version = NULL;
2503 result = loader_parse_json_string(ext_item, "spec_version", &spec_version);
2504 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2505 if (NULL != spec_version) {
2506 ext_prop.specVersion = atoi(spec_version);
2507 }
2508 loader_instance_heap_free(inst, spec_version);
2509 bool ext_unsupported = wsi_unsupported_instance_extension(&ext_prop);
2510 if (!ext_unsupported) {
2511 loader_add_to_ext_list(inst, &props.instance_extension_list, 1, &ext_prop);
2512 }
2513 }
2514 }
2515
2516 // device_extensions
2517 // array of {
2518 // name
2519 // spec_version
2520 // entrypoints
2521 // }
2522 cJSON *device_extensions = loader_cJSON_GetObjectItem(layer_node, "device_extensions");
2523 if (device_extensions != NULL) {
2524 int count = loader_cJSON_GetArraySize(device_extensions);
2525 for (int i = 0; i < count; i++) {
2526 VkExtensionProperties ext_prop = {0};
2527
2528 cJSON *ext_item = loader_cJSON_GetArrayItem(device_extensions, i);
2529
2530 result = loader_parse_json_string_to_existing_str(inst, ext_item, "name", VK_MAX_EXTENSION_NAME_SIZE,
2531 ext_prop.extensionName);
2532 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2533
2534 char *spec_version = NULL;
2535 result = loader_parse_json_string(ext_item, "spec_version", &spec_version);
2536 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2537 if (NULL != spec_version) {
2538 ext_prop.specVersion = atoi(spec_version);
2539 }
2540 loader_instance_heap_free(inst, spec_version);
2541
2542 cJSON *entrypoints = loader_cJSON_GetObjectItem(ext_item, "entrypoints");
2543 if (entrypoints == NULL) {
2544 result = loader_add_to_dev_ext_list(inst, &props.device_extension_list, &ext_prop, NULL);
2545 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2546 continue;
2547 }
2548
2549 struct loader_string_list entrys = {0};
2550 result = loader_parse_json_array_of_strings(inst, ext_item, "entrypoints", &entrys);
2551 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2552 result = loader_add_to_dev_ext_list(inst, &props.device_extension_list, &ext_prop, &entrys);
2553 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2554 }
2555 }
2556 if (is_implicit) {
2557 cJSON *enable_environment = loader_cJSON_GetObjectItem(layer_node, "enable_environment");
2558
2559 // enable_environment is optional
2560 if (enable_environment && enable_environment->child && enable_environment->child->type == cJSON_String) {
2561 result = loader_copy_to_new_str(inst, enable_environment->child->string, &(props.enable_env_var.name));
2562 if (VK_SUCCESS != result) goto out;
2563 result = loader_copy_to_new_str(inst, enable_environment->child->valuestring, &(props.enable_env_var.value));
2564 if (VK_SUCCESS != result) goto out;
2565 }
2566 }
2567
2568 // Read in the pre-instance stuff
2569 cJSON *pre_instance = loader_cJSON_GetObjectItem(layer_node, "pre_instance_functions");
2570 if (NULL != pre_instance) {
2571 // Supported versions started in 1.1.2, so anything newer
2572 if (!loader_check_version_meets_required(loader_combine_version(1, 1, 2), version)) {
2573 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
2574 "Found pre_instance_functions section in layer from \"%s\". This section is only valid in manifest version "
2575 "1.1.2 or later. The section will be ignored",
2576 filename);
2577 } else if (!is_implicit) {
2578 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2579 "Found pre_instance_functions section in explicit layer from \"%s\". This section is only valid in implicit "
2580 "layers. The section will be ignored",
2581 filename);
2582 } else {
2583 result = loader_parse_json_string(pre_instance, "vkEnumerateInstanceExtensionProperties",
2584 &props.pre_instance_functions.enumerate_instance_extension_properties);
2585 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2586
2587 result = loader_parse_json_string(pre_instance, "vkEnumerateInstanceLayerProperties",
2588 &props.pre_instance_functions.enumerate_instance_layer_properties);
2589 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2590
2591 result = loader_parse_json_string(pre_instance, "vkEnumerateInstanceVersion",
2592 &props.pre_instance_functions.enumerate_instance_version);
2593 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2594 }
2595 }
2596
2597 if (loader_cJSON_GetObjectItem(layer_node, "app_keys")) {
2598 if (!props.is_override) {
2599 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2600 "Layer %s contains app_keys, but any app_keys can only be provided by the override meta layer. "
2601 "These will be ignored.",
2602 props.info.layerName);
2603 }
2604
2605 result = loader_parse_json_array_of_strings(inst, layer_node, "app_keys", &props.app_key_paths);
2606 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2607 }
2608
2609 char *library_arch = NULL;
2610 result = loader_parse_json_string(layer_node, "library_arch", &library_arch);
2611 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) goto out;
2612 if (library_arch != NULL) {
2613 if ((strncmp(library_arch, "32", 2) == 0 && sizeof(void *) != 4) ||
2614 (strncmp(library_arch, "64", 2) == 0 && sizeof(void *) != 8)) {
2615 loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
2616 "Layer library architecture doesn't match the current running architecture, skipping this layer");
2617 loader_instance_heap_free(inst, library_arch);
2618 result = VK_ERROR_INITIALIZATION_FAILED;
2619 goto out;
2620 }
2621 loader_instance_heap_free(inst, library_arch);
2622 }
2623
2624 result = VK_SUCCESS;
2625
2626 out:
2627 // Try to append the layer property
2628 if (VK_SUCCESS == result) {
2629 result = loader_append_layer_property(inst, layer_instance_list, &props);
2630 }
2631 // If appending fails - free all the memory allocated in it
2632 if (VK_SUCCESS != result) {
2633 loader_free_layer_properties(inst, &props);
2634 }
2635 loader_instance_heap_free(inst, type);
2636 loader_instance_heap_free(inst, api_version);
2637 loader_instance_heap_free(inst, implementation_version);
2638 return result;
2639 }
2640
is_valid_layer_json_version(const loader_api_version * layer_json)2641 bool is_valid_layer_json_version(const loader_api_version *layer_json) {
2642 // Supported versions are: 1.0.0, 1.0.1, 1.1.0 - 1.1.2, and 1.2.0 - 1.2.1.
2643 if ((layer_json->major == 1 && layer_json->minor == 2 && layer_json->patch < 2) ||
2644 (layer_json->major == 1 && layer_json->minor == 1 && layer_json->patch < 3) ||
2645 (layer_json->major == 1 && layer_json->minor == 0 && layer_json->patch < 2)) {
2646 return true;
2647 }
2648 return false;
2649 }
2650
2651 // Given a cJSON struct (json) of the top level JSON object from layer manifest
2652 // file, add entry to the layer_list. Fill out the layer_properties in this list
2653 // entry from the input cJSON object.
2654 //
2655 // \returns
2656 // void
2657 // layer_list has a new entry and initialized accordingly.
2658 // If the json input object does not have all the required fields no entry
2659 // is added to the list.
loader_add_layer_properties(const struct loader_instance * inst,struct loader_layer_list * layer_instance_list,cJSON * json,bool is_implicit,char * filename)2660 VkResult loader_add_layer_properties(const struct loader_instance *inst, struct loader_layer_list *layer_instance_list, cJSON *json,
2661 bool is_implicit, char *filename) {
2662 // The following Fields in layer manifest file that are required:
2663 // - "file_format_version"
2664 // - If more than one "layer" object are used, then the "layers" array is
2665 // required
2666 VkResult result = VK_ERROR_INITIALIZATION_FAILED;
2667 cJSON *item, *layers_node, *layer_node;
2668 loader_api_version json_version = {0, 0, 0};
2669 char *file_vers = NULL;
2670 // Make sure sure the top level json value is an object
2671 if (!json || json->type != 6) {
2672 goto out;
2673 }
2674 item = loader_cJSON_GetObjectItem(json, "file_format_version");
2675 if (item == NULL) {
2676 goto out;
2677 }
2678 file_vers = loader_cJSON_PrintUnformatted(item);
2679 if (NULL == file_vers) {
2680 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2681 goto out;
2682 }
2683 loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, "Found manifest file %s (file version %s)", filename, file_vers);
2684 // Get the major/minor/and patch as integers for easier comparison
2685 json_version = loader_make_full_version(loader_parse_version_string(file_vers));
2686
2687 if (!is_valid_layer_json_version(&json_version)) {
2688 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2689 "loader_add_layer_properties: %s has unknown layer manifest file version %d.%d.%d. May cause errors.", filename,
2690 json_version.major, json_version.minor, json_version.patch);
2691 }
2692
2693 // If "layers" is present, read in the array of layer objects
2694 layers_node = loader_cJSON_GetObjectItem(json, "layers");
2695 if (layers_node != NULL) {
2696 int numItems = loader_cJSON_GetArraySize(layers_node);
2697 // Supported versions started in 1.0.1, so anything newer
2698 if (!loader_check_version_meets_required(loader_combine_version(1, 0, 1), json_version)) {
2699 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2700 "loader_add_layer_properties: \'layers\' tag not supported until file version 1.0.1, but %s is reporting "
2701 "version %s",
2702 filename, file_vers);
2703 }
2704 for (int curLayer = 0; curLayer < numItems; curLayer++) {
2705 layer_node = loader_cJSON_GetArrayItem(layers_node, curLayer);
2706 if (layer_node == NULL) {
2707 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2708 "loader_add_layer_properties: Can not find 'layers' array element %d object in manifest JSON file %s. "
2709 "Skipping this file",
2710 curLayer, filename);
2711 goto out;
2712 }
2713 result = loader_read_layer_json(inst, layer_instance_list, layer_node, json_version, is_implicit, filename);
2714 }
2715 } else {
2716 // Otherwise, try to read in individual layers
2717 layer_node = loader_cJSON_GetObjectItem(json, "layer");
2718 if (layer_node == NULL) {
2719 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2720 "loader_add_layer_properties: Can not find 'layer' object in manifest JSON file %s. Skipping this file.",
2721 filename);
2722 goto out;
2723 }
2724 // Loop through all "layer" objects in the file to get a count of them
2725 // first.
2726 uint16_t layer_count = 0;
2727 cJSON *tempNode = layer_node;
2728 do {
2729 tempNode = tempNode->next;
2730 layer_count++;
2731 } while (tempNode != NULL);
2732
2733 // Throw a warning if we encounter multiple "layer" objects in file
2734 // versions newer than 1.0.0. Having multiple objects with the same
2735 // name at the same level is actually a JSON standard violation.
2736 if (layer_count > 1 && loader_check_version_meets_required(loader_combine_version(1, 0, 1), json_version)) {
2737 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2738 "loader_add_layer_properties: Multiple 'layer' nodes are deprecated starting in file version \"1.0.1\". "
2739 "Please use 'layers' : [] array instead in %s.",
2740 filename);
2741 } else {
2742 do {
2743 result = loader_read_layer_json(inst, layer_instance_list, layer_node, json_version, is_implicit, filename);
2744 layer_node = layer_node->next;
2745 } while (layer_node != NULL);
2746 }
2747 }
2748
2749 out:
2750 loader_instance_heap_free(inst, file_vers);
2751
2752 return result;
2753 }
2754
determine_data_file_path_size(const char * cur_path,size_t relative_path_size)2755 size_t determine_data_file_path_size(const char *cur_path, size_t relative_path_size) {
2756 size_t path_size = 0;
2757
2758 if (NULL != cur_path) {
2759 // For each folder in cur_path, (detected by finding additional
2760 // path separators in the string) we need to add the relative path on
2761 // the end. Plus, leave an additional two slots on the end to add an
2762 // additional directory slash and path separator if needed
2763 path_size += strlen(cur_path) + relative_path_size + 2;
2764 for (const char *x = cur_path; *x; ++x) {
2765 if (*x == PATH_SEPARATOR) {
2766 path_size += relative_path_size + 2;
2767 }
2768 }
2769 }
2770
2771 return path_size;
2772 }
2773
copy_data_file_info(const char * cur_path,const char * relative_path,size_t relative_path_size,char ** output_path)2774 void copy_data_file_info(const char *cur_path, const char *relative_path, size_t relative_path_size, char **output_path) {
2775 if (NULL != cur_path) {
2776 uint32_t start = 0;
2777 uint32_t stop = 0;
2778 char *cur_write = *output_path;
2779
2780 while (cur_path[start] != '\0') {
2781 while (cur_path[start] == PATH_SEPARATOR) {
2782 start++;
2783 }
2784 stop = start;
2785 while (cur_path[stop] != PATH_SEPARATOR && cur_path[stop] != '\0') {
2786 stop++;
2787 }
2788 const size_t s = stop - start;
2789 if (s) {
2790 memcpy(cur_write, &cur_path[start], s);
2791 cur_write += s;
2792
2793 // If this is a specific JSON file, just add it and don't add any
2794 // relative path or directory symbol to it.
2795 if (!is_json(cur_write - 5, s)) {
2796 // Add the relative directory if present.
2797 if (relative_path_size > 0) {
2798 // If last symbol written was not a directory symbol, add it.
2799 if (*(cur_write - 1) != DIRECTORY_SYMBOL) {
2800 *cur_write++ = DIRECTORY_SYMBOL;
2801 }
2802 memcpy(cur_write, relative_path, relative_path_size);
2803 cur_write += relative_path_size;
2804 }
2805 }
2806
2807 *cur_write++ = PATH_SEPARATOR;
2808 start = stop;
2809 }
2810 }
2811 *output_path = cur_write;
2812 }
2813 }
2814
2815 // If the file found is a manifest file name, add it to the out_files manifest list.
add_if_manifest_file(const struct loader_instance * inst,const char * file_name,struct loader_string_list * out_files)2816 VkResult add_if_manifest_file(const struct loader_instance *inst, const char *file_name, struct loader_string_list *out_files) {
2817 VkResult vk_result = VK_SUCCESS;
2818
2819 assert(NULL != file_name && "add_if_manifest_file: Received NULL pointer for file_name");
2820 assert(NULL != out_files && "add_if_manifest_file: Received NULL pointer for out_files");
2821
2822 // Look for files ending with ".json" suffix
2823 size_t name_len = strlen(file_name);
2824 const char *name_suffix = file_name + name_len - 5;
2825 if (!is_json(name_suffix, name_len)) {
2826 // Use incomplete to indicate invalid name, but to keep going.
2827 vk_result = VK_INCOMPLETE;
2828 goto out;
2829 }
2830
2831 vk_result = copy_str_to_string_list(inst, out_files, file_name, name_len);
2832
2833 out:
2834
2835 return vk_result;
2836 }
2837
2838 // Add any files found in the search_path. If any path in the search path points to a specific JSON, attempt to
2839 // only open that one JSON. Otherwise, if the path is a folder, search the folder for JSON files.
add_data_files(const struct loader_instance * inst,char * search_path,struct loader_string_list * out_files,bool use_first_found_manifest)2840 VkResult add_data_files(const struct loader_instance *inst, char *search_path, struct loader_string_list *out_files,
2841 bool use_first_found_manifest) {
2842 VkResult vk_result = VK_SUCCESS;
2843 DIR *dir_stream = NULL;
2844 struct dirent *dir_entry;
2845 char *cur_file;
2846 char *next_file;
2847 char *name;
2848 char full_path[2048];
2849 #if !defined(_WIN32)
2850 char temp_path[2048];
2851 #endif
2852
2853 // Now, parse the paths
2854 next_file = search_path;
2855 while (NULL != next_file && *next_file != '\0') {
2856 name = NULL;
2857 cur_file = next_file;
2858 next_file = loader_get_next_path(cur_file);
2859
2860 // Is this a JSON file, then try to open it.
2861 size_t len = strlen(cur_file);
2862 if (is_json(cur_file + len - 5, len)) {
2863 #if defined(_WIN32)
2864 name = cur_file;
2865 #elif COMMON_UNIX_PLATFORMS
2866 // Only Linux has relative paths, make a copy of location so it isn't modified
2867 size_t str_len;
2868 if (NULL != next_file) {
2869 str_len = next_file - cur_file + 1;
2870 } else {
2871 str_len = strlen(cur_file) + 1;
2872 }
2873 if (str_len > sizeof(temp_path)) {
2874 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "add_data_files: Path to %s too long", cur_file);
2875 continue;
2876 }
2877 strncpy(temp_path, cur_file, str_len);
2878 name = temp_path;
2879 #else
2880 #warning add_data_files must define relative path copy for this platform
2881 #endif
2882 loader_get_fullpath(cur_file, name, sizeof(full_path), full_path);
2883 name = full_path;
2884
2885 VkResult local_res;
2886 local_res = add_if_manifest_file(inst, name, out_files);
2887
2888 // Incomplete means this was not a valid data file.
2889 if (local_res == VK_INCOMPLETE) {
2890 continue;
2891 } else if (local_res != VK_SUCCESS) {
2892 vk_result = local_res;
2893 break;
2894 }
2895 } else { // Otherwise, treat it as a directory
2896 dir_stream = loader_opendir(inst, cur_file);
2897 if (NULL == dir_stream) {
2898 continue;
2899 }
2900 while (1) {
2901 dir_entry = readdir(dir_stream);
2902 if (NULL == dir_entry) {
2903 break;
2904 }
2905
2906 name = &(dir_entry->d_name[0]);
2907 loader_get_fullpath(name, cur_file, sizeof(full_path), full_path);
2908 name = full_path;
2909
2910 VkResult local_res;
2911 local_res = add_if_manifest_file(inst, name, out_files);
2912
2913 // Incomplete means this was not a valid data file.
2914 if (local_res == VK_INCOMPLETE) {
2915 continue;
2916 } else if (local_res != VK_SUCCESS) {
2917 vk_result = local_res;
2918 break;
2919 }
2920 }
2921 loader_closedir(inst, dir_stream);
2922 if (vk_result != VK_SUCCESS) {
2923 goto out;
2924 }
2925 }
2926 if (use_first_found_manifest && out_files->count > 0) {
2927 break;
2928 }
2929 }
2930
2931 out:
2932
2933 return vk_result;
2934 }
2935
2936 // Look for data files in the provided paths, but first check the environment override to determine if we should use that
2937 // instead.
read_data_files_in_search_paths(const struct loader_instance * inst,enum loader_data_files_type manifest_type,const char * path_override,bool * override_active,struct loader_string_list * out_files)2938 VkResult read_data_files_in_search_paths(const struct loader_instance *inst, enum loader_data_files_type manifest_type,
2939 const char *path_override, bool *override_active, struct loader_string_list *out_files) {
2940 VkResult vk_result = VK_SUCCESS;
2941 char *override_env = NULL;
2942 const char *override_path = NULL;
2943 char *additional_env = NULL;
2944 size_t search_path_size = 0;
2945 char *search_path = NULL;
2946 char *cur_path_ptr = NULL;
2947 bool use_first_found_manifest = false;
2948 #if COMMON_UNIX_PLATFORMS
2949 char *relative_location = NULL; // Only used on unix platforms
2950 size_t rel_size = 0; // unused in windows, dont declare so no compiler warnings are generated
2951 #endif
2952
2953 #if defined(_WIN32)
2954 char *package_path = NULL;
2955 #elif COMMON_UNIX_PLATFORMS
2956 // Determine how much space is needed to generate the full search path
2957 // for the current manifest files.
2958 char *xdg_config_home = loader_secure_getenv("XDG_CONFIG_HOME", inst);
2959 char *xdg_config_dirs = loader_secure_getenv("XDG_CONFIG_DIRS", inst);
2960
2961 #if !defined(__Fuchsia__) && !defined(__QNX__) && !defined(__OHOS__)
2962 if (NULL == xdg_config_dirs || '\0' == xdg_config_dirs[0]) {
2963 xdg_config_dirs = FALLBACK_CONFIG_DIRS;
2964 }
2965 #endif
2966
2967 char *xdg_data_home = loader_secure_getenv("XDG_DATA_HOME", inst);
2968 char *xdg_data_dirs = loader_secure_getenv("XDG_DATA_DIRS", inst);
2969
2970 #if !defined(__Fuchsia__) && !defined(__QNX__) && !defined(__OHOS__)
2971 if (NULL == xdg_data_dirs || '\0' == xdg_data_dirs[0]) {
2972 xdg_data_dirs = FALLBACK_DATA_DIRS;
2973 }
2974 #endif
2975
2976 char *home = NULL;
2977 char *default_data_home = NULL;
2978 char *default_config_home = NULL;
2979 char *home_data_dir = NULL;
2980 char *home_config_dir = NULL;
2981
2982 // Only use HOME if XDG_DATA_HOME is not present on the system
2983 home = loader_secure_getenv("HOME", inst);
2984 if (home != NULL) {
2985 if (NULL == xdg_config_home || '\0' == xdg_config_home[0]) {
2986 const char config_suffix[] = "/.config";
2987 size_t default_config_home_len = strlen(home) + sizeof(config_suffix) + 1;
2988 default_config_home = loader_instance_heap_calloc(inst, default_config_home_len, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2989 if (default_config_home == NULL) {
2990 vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
2991 goto out;
2992 }
2993 strncpy(default_config_home, home, default_config_home_len);
2994 strncat(default_config_home, config_suffix, default_config_home_len);
2995 }
2996 if (NULL == xdg_data_home || '\0' == xdg_data_home[0]) {
2997 const char data_suffix[] = "/.local/share";
2998 size_t default_data_home_len = strlen(home) + sizeof(data_suffix) + 1;
2999 default_data_home = loader_instance_heap_calloc(inst, default_data_home_len, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3000 if (default_data_home == NULL) {
3001 vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3002 goto out;
3003 }
3004 strncpy(default_data_home, home, default_data_home_len);
3005 strncat(default_data_home, data_suffix, default_data_home_len);
3006 }
3007 }
3008
3009 if (NULL != default_config_home) {
3010 home_config_dir = default_config_home;
3011 } else {
3012 home_config_dir = xdg_config_home;
3013 }
3014 if (NULL != default_data_home) {
3015 home_data_dir = default_data_home;
3016 } else {
3017 home_data_dir = xdg_data_home;
3018 }
3019
3020 #if defined(__OHOS__)
3021 char *debug_layer_name = loader_secure_getenv("debug.graphic.debug_layer", inst); // squid squidsubcapture
3022 char *debug_hap_name = loader_secure_getenv("debug.graphic.debug_hap", inst);
3023 char *debug_layer_json_path = NULL;
3024
3025 bool currentProcessEnableDebugLayer = false;
3026 if(NULL != debug_hap_name && '\0' != debug_hap_name[0] && NULL != debug_layer_name && '\0' != debug_layer_name[0]){
3027 currentProcessEnableDebugLayer = true;
3028 debug_layer_json_path = loader_secure_getenv("debug.graphic.vklayer_json_path",inst);
3029 if (NULL == debug_layer_json_path || '\0' == debug_layer_json_path[0]){
3030 const char default_json_path[] = "/data/storage/el2/base/haps/entry/files/";
3031 const char json_suffix[] = ".json";
3032 size_t debug_layer_json_path_len = strlen(default_json_path) + sizeof(debug_layer_name) + sizeof(json_suffix) +1;
3033 debug_layer_json_path = loader_instance_heap_calloc(inst,debug_layer_json_path_len,VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3034 if(debug_layer_json_path == NULL){
3035 vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3036 goto out;
3037 }
3038 strncpy(debug_layer_json_path,default_json_path,debug_layer_json_path_len);
3039 strncat(debug_layer_json_path,debug_layer_name,debug_layer_json_path_len);
3040 strncat(debug_layer_json_path,json_suffix,debug_layer_json_path_len);
3041 }
3042 }
3043 #endif
3044
3045 #else
3046 #warning read_data_files_in_search_paths unsupported platform
3047 #endif
3048
3049 switch (manifest_type) {
3050 case LOADER_DATA_FILE_MANIFEST_DRIVER:
3051 override_env = loader_secure_getenv(VK_DRIVER_FILES_ENV_VAR, inst);
3052 if (NULL == override_env) {
3053 // Not there, so fall back to the old name
3054 override_env = loader_secure_getenv(VK_ICD_FILENAMES_ENV_VAR, inst);
3055 }
3056 additional_env = loader_secure_getenv(VK_ADDITIONAL_DRIVER_FILES_ENV_VAR, inst);
3057 #if COMMON_UNIX_PLATFORMS
3058 relative_location = VK_DRIVERS_INFO_RELATIVE_DIR;
3059 #endif
3060 #if defined(_WIN32)
3061 package_path = windows_get_app_package_manifest_path(inst);
3062 #endif
3063 break;
3064 case LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER:
3065 #if COMMON_UNIX_PLATFORMS
3066 relative_location = VK_ILAYERS_INFO_RELATIVE_DIR;
3067 #endif
3068 break;
3069 case LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER:
3070 override_env = loader_secure_getenv(VK_LAYER_PATH_ENV_VAR, inst);
3071 additional_env = loader_secure_getenv(VK_ADDITIONAL_LAYER_PATH_ENV_VAR, inst);
3072 #if COMMON_UNIX_PLATFORMS
3073 relative_location = VK_ELAYERS_INFO_RELATIVE_DIR;
3074 #endif
3075 break;
3076 default:
3077 assert(false && "Shouldn't get here!");
3078 break;
3079 }
3080
3081 // Log a message when VK_LAYER_PATH is set but the override layer paths take priority
3082 if (manifest_type == LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER && NULL != override_env && NULL != path_override) {
3083 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
3084 "Ignoring VK_LAYER_PATH. The Override layer is active and has override paths set, which takes priority. "
3085 "VK_LAYER_PATH is set to %s",
3086 override_env);
3087 }
3088
3089 if (path_override != NULL) {
3090 override_path = path_override;
3091 } else if (override_env != NULL) {
3092 override_path = override_env;
3093 }
3094
3095 // Add two by default for NULL terminator and one path separator on end (just in case)
3096 search_path_size = 2;
3097
3098 // If there's an override, use that (and the local folder if required) and nothing else
3099 if (NULL != override_path) {
3100 // Local folder and null terminator
3101 search_path_size += strlen(override_path) + 2;
3102 } else {
3103 // Add the size of any additional search paths defined in the additive environment variable
3104 if (NULL != additional_env) {
3105 search_path_size += determine_data_file_path_size(additional_env, 0) + 2;
3106 #if defined(_WIN32)
3107 }
3108 if (NULL != package_path) {
3109 search_path_size += determine_data_file_path_size(package_path, 0) + 2;
3110 }
3111 if (search_path_size == 2) {
3112 goto out;
3113 }
3114 #elif COMMON_UNIX_PLATFORMS
3115 }
3116
3117 // Add the general search folders (with the appropriate relative folder added)
3118 rel_size = strlen(relative_location);
3119 if (rel_size > 0) {
3120 #if defined(__APPLE__)
3121 search_path_size += MAXPATHLEN;
3122 #endif
3123 // Only add the home folders if defined
3124 if (NULL != home_config_dir) {
3125 search_path_size += determine_data_file_path_size(home_config_dir, rel_size);
3126 }
3127 search_path_size += determine_data_file_path_size(xdg_config_dirs, rel_size);
3128 search_path_size += determine_data_file_path_size(SYSCONFDIR, rel_size);
3129 #if defined(EXTRASYSCONFDIR)
3130 search_path_size += determine_data_file_path_size(EXTRASYSCONFDIR, rel_size);
3131 #endif
3132 // Only add the home folders if defined
3133 if (NULL != home_data_dir) {
3134 search_path_size += determine_data_file_path_size(home_data_dir, rel_size);
3135 }
3136 search_path_size += determine_data_file_path_size(xdg_data_dirs, rel_size);
3137 #if defined (__OHOS__)
3138 if(currentProcessEnableDebugLayer) {
3139 search_path_size += determine_data_file_path_size(debug_layer_json_path, rel_size);
3140 }
3141 search_path_size += determine_data_file_path_size("/system/etc/vulkan/swapchain", rel_size);
3142 #endif
3143 }
3144 #else
3145 #warning read_data_files_in_search_paths unsupported platform
3146 #endif
3147 }
3148
3149 // Allocate the required space
3150 search_path = loader_instance_heap_calloc(inst, search_path_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3151 if (NULL == search_path) {
3152 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
3153 "read_data_files_in_search_paths: Failed to allocate space for search path of length %d",
3154 (uint32_t)search_path_size);
3155 vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3156 goto out;
3157 }
3158
3159 cur_path_ptr = search_path;
3160
3161 // Add the remaining paths to the list
3162 if (NULL != override_path) {
3163 size_t override_path_len = strlen(override_path);
3164 loader_strncpy(cur_path_ptr, search_path_size, override_path, override_path_len);
3165 cur_path_ptr += override_path_len;
3166 } else {
3167 // Add any additional search paths defined in the additive environment variable
3168 if (NULL != additional_env) {
3169 copy_data_file_info(additional_env, NULL, 0, &cur_path_ptr);
3170 }
3171
3172 #if defined(_WIN32)
3173 if (NULL != package_path) {
3174 copy_data_file_info(package_path, NULL, 0, &cur_path_ptr);
3175 }
3176 #elif COMMON_UNIX_PLATFORMS
3177 if (rel_size > 0) {
3178 #if defined(__APPLE__)
3179 // Add the bundle's Resources dir to the beginning of the search path.
3180 // Looks for manifests in the bundle first, before any system directories.
3181 // This also appears to work unmodified for iOS, it finds the app bundle on the devices
3182 // file system. (RSW)
3183 CFBundleRef main_bundle = CFBundleGetMainBundle();
3184 if (NULL != main_bundle) {
3185 CFURLRef ref = CFBundleCopyResourcesDirectoryURL(main_bundle);
3186 if (NULL != ref) {
3187 if (CFURLGetFileSystemRepresentation(ref, TRUE, (UInt8 *)cur_path_ptr, search_path_size)) {
3188 cur_path_ptr += strlen(cur_path_ptr);
3189 *cur_path_ptr++ = DIRECTORY_SYMBOL;
3190 memcpy(cur_path_ptr, relative_location, rel_size);
3191 cur_path_ptr += rel_size;
3192 *cur_path_ptr++ = PATH_SEPARATOR;
3193 if (manifest_type == LOADER_DATA_FILE_MANIFEST_DRIVER) {
3194 use_first_found_manifest = true;
3195 }
3196 }
3197 CFRelease(ref);
3198 }
3199 }
3200 #endif // __APPLE__
3201
3202 // Only add the home folders if not NULL
3203 if (NULL != home_config_dir) {
3204 copy_data_file_info(home_config_dir, relative_location, rel_size, &cur_path_ptr);
3205 }
3206 copy_data_file_info(xdg_config_dirs, relative_location, rel_size, &cur_path_ptr);
3207 copy_data_file_info(SYSCONFDIR, relative_location, rel_size, &cur_path_ptr);
3208 #if defined(EXTRASYSCONFDIR)
3209 copy_data_file_info(EXTRASYSCONFDIR, relative_location, rel_size, &cur_path_ptr);
3210 #endif
3211
3212 // Only add the home folders if not NULL
3213 if (NULL != home_data_dir) {
3214 copy_data_file_info(home_data_dir, relative_location, rel_size, &cur_path_ptr);
3215 }
3216 copy_data_file_info(xdg_data_dirs, relative_location, rel_size, &cur_path_ptr);
3217 #if defined (__OHOS__)
3218 if(currentProcessEnableDebugLayer){
3219 copy_data_file_info(debug_layer_json_path,relative_location,rel_size,&cur_path_ptr);
3220 }
3221 copy_data_file_info("/system/etc/vulkan/swapchain/",relative_location,rel_size,&cur_path_ptr);
3222 #endif
3223 }
3224
3225 // Remove the last path separator
3226 --cur_path_ptr;
3227
3228 assert(cur_path_ptr - search_path < (ptrdiff_t)search_path_size);
3229 *cur_path_ptr = '\0';
3230 #else
3231 #warning read_data_files_in_search_paths unsupported platform
3232 #endif
3233 }
3234
3235 // Remove duplicate paths, or it would result in duplicate extensions, duplicate devices, etc.
3236 // This uses minimal memory, but is O(N^2) on the number of paths. Expect only a few paths.
3237 char path_sep_str[2] = {PATH_SEPARATOR, '\0'};
3238 size_t search_path_updated_size = strlen(search_path);
3239 for (size_t first = 0; first < search_path_updated_size;) {
3240 // If this is an empty path, erase it
3241 if (search_path[first] == PATH_SEPARATOR) {
3242 memmove(&search_path[first], &search_path[first + 1], search_path_updated_size - first + 1);
3243 search_path_updated_size -= 1;
3244 continue;
3245 }
3246
3247 size_t first_end = first + 1;
3248 first_end += strcspn(&search_path[first_end], path_sep_str);
3249 for (size_t second = first_end + 1; second < search_path_updated_size;) {
3250 size_t second_end = second + 1;
3251 second_end += strcspn(&search_path[second_end], path_sep_str);
3252 if (first_end - first == second_end - second &&
3253 !strncmp(&search_path[first], &search_path[second], second_end - second)) {
3254 // Found duplicate. Include PATH_SEPARATOR in second_end, then erase it from search_path.
3255 if (search_path[second_end] == PATH_SEPARATOR) {
3256 second_end++;
3257 }
3258 memmove(&search_path[second], &search_path[second_end], search_path_updated_size - second_end + 1);
3259 search_path_updated_size -= second_end - second;
3260 } else {
3261 second = second_end + 1;
3262 }
3263 }
3264 first = first_end + 1;
3265 }
3266 search_path_size = search_path_updated_size;
3267
3268 // Print out the paths being searched if debugging is enabled
3269 uint32_t log_flags = 0;
3270 if (search_path_size > 0) {
3271 char *tmp_search_path = loader_instance_heap_alloc(inst, search_path_size + 1, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3272 if (NULL != tmp_search_path) {
3273 loader_strncpy(tmp_search_path, search_path_size + 1, search_path, search_path_size);
3274 tmp_search_path[search_path_size] = '\0';
3275 if (manifest_type == LOADER_DATA_FILE_MANIFEST_DRIVER) {
3276 log_flags = VULKAN_LOADER_DRIVER_BIT;
3277 loader_log(inst, VULKAN_LOADER_DRIVER_BIT, 0, "Searching for driver manifest files");
3278 } else {
3279 log_flags = VULKAN_LOADER_LAYER_BIT;
3280 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "Searching for %s layer manifest files",
3281 manifest_type == LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER ? "explicit" : "implicit");
3282 }
3283 loader_log(inst, log_flags, 0, " In following locations:");
3284 char *cur_file;
3285 char *next_file = tmp_search_path;
3286 while (NULL != next_file && *next_file != '\0') {
3287 cur_file = next_file;
3288 next_file = loader_get_next_path(cur_file);
3289 loader_log(inst, log_flags, 0, " %s", cur_file);
3290 }
3291 loader_instance_heap_free(inst, tmp_search_path);
3292 }
3293 }
3294
3295 // Now, parse the paths and add any manifest files found in them.
3296 vk_result = add_data_files(inst, search_path, out_files, use_first_found_manifest);
3297
3298 if (log_flags != 0 && out_files->count > 0) {
3299 loader_log(inst, log_flags, 0, " Found the following files:");
3300 for (uint32_t cur_file = 0; cur_file < out_files->count; ++cur_file) {
3301 loader_log(inst, log_flags, 0, " %s", out_files->list[cur_file]);
3302 }
3303 } else {
3304 loader_log(inst, log_flags, 0, " Found no files");
3305 }
3306
3307 if (NULL != override_path) {
3308 *override_active = true;
3309 } else {
3310 *override_active = false;
3311 }
3312
3313 out:
3314
3315 loader_free_getenv(additional_env, inst);
3316 loader_free_getenv(override_env, inst);
3317 #if defined(_WIN32)
3318 loader_instance_heap_free(inst, package_path);
3319 #elif COMMON_UNIX_PLATFORMS
3320 loader_free_getenv(xdg_config_home, inst);
3321 loader_free_getenv(xdg_config_dirs, inst);
3322 loader_free_getenv(xdg_data_home, inst);
3323 loader_free_getenv(xdg_data_dirs, inst);
3324 loader_free_getenv(xdg_data_home, inst);
3325 loader_free_getenv(home, inst);
3326 loader_instance_heap_free(inst, default_data_home);
3327 loader_instance_heap_free(inst, default_config_home);
3328 #elif defined(__OHOS__)
3329 if(currentProcessEnableDebugLayer){
3330 loader_free_getenv(debug_layer_json_path, inst);
3331 }
3332 loader_free_getenv(debug_layer_name, inst);
3333 loader_free_getenv(debug_hap_name, inst);
3334 #else
3335 #warning read_data_files_in_search_paths unsupported platform
3336 #endif
3337
3338 loader_instance_heap_free(inst, search_path);
3339
3340 return vk_result;
3341 }
3342
3343 // Find the Vulkan library manifest files.
3344 //
3345 // This function scans the appropriate locations for a list of JSON manifest files based on the
3346 // "manifest_type". The location is interpreted as Registry path on Windows and a directory path(s)
3347 // on Linux.
3348 // "home_location" is an additional directory in the users home directory to look at. It is
3349 // expanded into the dir path $XDG_DATA_HOME/home_location or $HOME/.local/share/home_location
3350 // depending on environment variables. This "home_location" is only used on Linux.
3351 //
3352 // \returns
3353 // VKResult
3354 // A string list of manifest files to be opened in out_files param.
3355 // List has a pointer to string for each manifest filename.
3356 // When done using the list in out_files, pointers should be freed.
3357 // Location or override string lists can be either files or directories as
3358 // follows:
3359 // | location | override
3360 // --------------------------------
3361 // Win ICD | files | files
3362 // Win Layer | files | dirs
3363 // Linux ICD | dirs | files
3364 // Linux Layer| dirs | dirs
3365
loader_get_data_files(const struct loader_instance * inst,enum loader_data_files_type manifest_type,const char * path_override,struct loader_string_list * out_files)3366 VkResult loader_get_data_files(const struct loader_instance *inst, enum loader_data_files_type manifest_type,
3367 const char *path_override, struct loader_string_list *out_files) {
3368 VkResult res = VK_SUCCESS;
3369 bool override_active = false;
3370
3371 // Free and init the out_files information so there's no false data left from uninitialized variables.
3372 free_string_list(inst, out_files);
3373
3374 res = read_data_files_in_search_paths(inst, manifest_type, path_override, &override_active, out_files);
3375 if (VK_SUCCESS != res) {
3376 goto out;
3377 }
3378
3379 #if defined(_WIN32)
3380 // Read the registry if the override wasn't active.
3381 if (!override_active) {
3382 bool warn_if_not_present = false;
3383 char *registry_location = NULL;
3384
3385 switch (manifest_type) {
3386 default:
3387 goto out;
3388 case LOADER_DATA_FILE_MANIFEST_DRIVER:
3389 warn_if_not_present = true;
3390 registry_location = VK_DRIVERS_INFO_REGISTRY_LOC;
3391 break;
3392 case LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER:
3393 registry_location = VK_ILAYERS_INFO_REGISTRY_LOC;
3394 break;
3395 case LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER:
3396 warn_if_not_present = true;
3397 registry_location = VK_ELAYERS_INFO_REGISTRY_LOC;
3398 break;
3399 }
3400 VkResult tmp_res =
3401 windows_read_data_files_in_registry(inst, manifest_type, warn_if_not_present, registry_location, out_files);
3402 // Only return an error if there was an error this time, and no manifest files from before.
3403 if (VK_SUCCESS != tmp_res && out_files->count == 0) {
3404 res = tmp_res;
3405 goto out;
3406 }
3407 }
3408 #endif
3409
3410 out:
3411
3412 if (VK_SUCCESS != res) {
3413 free_string_list(inst, out_files);
3414 }
3415
3416 return res;
3417 }
3418
3419 struct ICDManifestInfo {
3420 char *full_library_path;
3421 uint32_t version;
3422 };
3423
3424 // Takes a json file, opens, reads, and parses an ICD Manifest out of it.
3425 // Should only return VK_SUCCESS, VK_ERROR_INCOMPATIBLE_DRIVER, or VK_ERROR_OUT_OF_HOST_MEMORY
loader_parse_icd_manifest(const struct loader_instance * inst,char * file_str,struct ICDManifestInfo * icd,bool * skipped_portability_drivers)3426 VkResult loader_parse_icd_manifest(const struct loader_instance *inst, char *file_str, struct ICDManifestInfo *icd,
3427 bool *skipped_portability_drivers) {
3428 VkResult res = VK_SUCCESS;
3429 cJSON *json = NULL;
3430 char *file_vers_str = NULL;
3431 char *library_arch_str = NULL;
3432 char *version_str = NULL;
3433
3434 if (file_str == NULL) {
3435 goto out;
3436 }
3437
3438 res = loader_get_json(inst, file_str, &json);
3439 if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
3440 goto out;
3441 }
3442 if (res != VK_SUCCESS || NULL == json) {
3443 res = VK_ERROR_INCOMPATIBLE_DRIVER;
3444 goto out;
3445 }
3446
3447 cJSON *item = loader_cJSON_GetObjectItem(json, "file_format_version");
3448 if (item == NULL) {
3449 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3450 "loader_parse_icd_manifest: ICD JSON %s does not have a \'file_format_version\' field. Skipping ICD JSON.",
3451 file_str);
3452 res = VK_ERROR_INCOMPATIBLE_DRIVER;
3453 goto out;
3454 }
3455
3456 file_vers_str = loader_cJSON_Print(item);
3457 if (NULL == file_vers_str) {
3458 // Only reason the print can fail is if there was an allocation issue
3459 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3460 "loader_parse_icd_manifest: Failed retrieving ICD JSON %s \'file_format_version\' field. Skipping ICD JSON",
3461 file_str);
3462 res = VK_ERROR_OUT_OF_HOST_MEMORY;
3463 goto out;
3464 }
3465 loader_log(inst, VULKAN_LOADER_DRIVER_BIT, 0, "Found ICD manifest file %s, version %s", file_str, file_vers_str);
3466
3467 // Get the version of the driver manifest
3468 loader_api_version json_file_version = loader_make_full_version(loader_parse_version_string(file_vers_str));
3469
3470 // Loader only knows versions 1.0.0 and 1.0.1, anything above it is unknown
3471 if (loader_check_version_meets_required(loader_combine_version(1, 0, 2), json_file_version)) {
3472 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3473 "loader_parse_icd_manifest: %s has unknown icd manifest file version %d.%d.%d. May cause errors.", file_str,
3474 json_file_version.major, json_file_version.minor, json_file_version.patch);
3475 }
3476
3477 cJSON *itemICD = loader_cJSON_GetObjectItem(json, "ICD");
3478 if (itemICD == NULL) {
3479 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3480 "loader_parse_icd_manifest: Can not find \'ICD\' object in ICD JSON file %s. Skipping ICD JSON", file_str);
3481 res = VK_ERROR_INCOMPATIBLE_DRIVER;
3482 goto out;
3483 }
3484
3485 item = loader_cJSON_GetObjectItem(itemICD, "library_path");
3486 if (item == NULL) {
3487 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3488 "loader_parse_icd_manifest: Failed to find \'library_path\' object in ICD JSON file %s. Skipping ICD JSON.",
3489 file_str);
3490 res = VK_ERROR_INCOMPATIBLE_DRIVER;
3491 goto out;
3492 }
3493 char *library_path = loader_cJSON_Print(item);
3494 if (!library_path) {
3495 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3496 "loader_parse_icd_manifest: Failed retrieving ICD JSON %s \'library_path\' field. Skipping ICD JSON.", file_str);
3497 res = VK_ERROR_OUT_OF_HOST_MEMORY;
3498 goto out;
3499 }
3500
3501 if (strlen(library_path) == 0) {
3502 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3503 "loader_parse_icd_manifest: ICD JSON %s \'library_path\' field is empty. Skipping ICD JSON.", file_str);
3504 res = VK_ERROR_INCOMPATIBLE_DRIVER;
3505 goto out;
3506 }
3507
3508 // Print out the paths being searched if debugging is enabled
3509 loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0, "Searching for ICD drivers named %s", library_path);
3510 // This function takes ownership of library_path - so we don't need to clean it up
3511 res = combine_manifest_directory_and_library_path(inst, library_path, file_str, &icd->full_library_path);
3512 if (VK_SUCCESS != res) {
3513 goto out;
3514 }
3515
3516 item = loader_cJSON_GetObjectItem(itemICD, "api_version");
3517 if (item == NULL) {
3518 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3519 "loader_parse_icd_manifest: ICD JSON %s does not have an \'api_version\' field. Skipping ICD JSON.", file_str);
3520 res = VK_ERROR_INCOMPATIBLE_DRIVER;
3521 goto out;
3522 }
3523 version_str = loader_cJSON_Print(item);
3524 if (NULL == version_str) {
3525 // Only reason the print can fail is if there was an allocation issue
3526 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3527 "loader_parse_icd_manifest: Failed retrieving ICD JSON %s \'api_version\' field. Skipping ICD JSON.", file_str);
3528
3529 res = VK_ERROR_OUT_OF_HOST_MEMORY;
3530 goto out;
3531 }
3532 icd->version = loader_parse_version_string(version_str);
3533
3534 if (VK_API_VERSION_VARIANT(icd->version) != 0) {
3535 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3536 "loader_parse_icd_manifest: Driver's ICD JSON %s \'api_version\' field contains a non-zero variant value of %d. "
3537 " Skipping ICD JSON.",
3538 file_str, VK_API_VERSION_VARIANT(icd->version));
3539 res = VK_ERROR_INCOMPATIBLE_DRIVER;
3540 goto out;
3541 }
3542
3543 // Skip over ICD's which contain a true "is_portability_driver" value whenever the application doesn't enable
3544 // portability enumeration.
3545 item = loader_cJSON_GetObjectItem(itemICD, "is_portability_driver");
3546 if (item != NULL && item->type == cJSON_True && inst && !inst->portability_enumeration_enabled) {
3547 if (skipped_portability_drivers) {
3548 *skipped_portability_drivers = true;
3549 }
3550 res = VK_ERROR_INCOMPATIBLE_DRIVER;
3551 goto out;
3552 }
3553
3554 item = loader_cJSON_GetObjectItem(itemICD, "library_arch");
3555 if (item != NULL) {
3556 library_arch_str = loader_cJSON_Print(item);
3557 if (NULL != library_arch_str) {
3558 // cJSON includes the quotes by default, so we need to look for those here
3559 if ((strncmp(library_arch_str, "32", 4) == 0 && sizeof(void *) != 4) ||
3560 (strncmp(library_arch_str, "64", 4) == 0 && sizeof(void *) != 8)) {
3561 loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
3562 "loader_parse_icd_manifest: Driver library architecture doesn't match the current running "
3563 "architecture, skipping this driver");
3564 res = VK_ERROR_INCOMPATIBLE_DRIVER;
3565 goto out;
3566 }
3567 } else {
3568 res = VK_ERROR_OUT_OF_HOST_MEMORY;
3569 goto out;
3570 }
3571 }
3572 out:
3573 loader_cJSON_Delete(json);
3574 loader_instance_heap_free(inst, file_vers_str);
3575 loader_instance_heap_free(inst, version_str);
3576 loader_instance_heap_free(inst, library_arch_str);
3577 return res;
3578 }
3579
3580 // Try to find the Vulkan ICD driver(s).
3581 //
3582 // This function scans the default system loader path(s) or path specified by either the
3583 // VK_DRIVER_FILES or VK_ICD_FILENAMES environment variable in order to find loadable
3584 // VK ICDs manifest files.
3585 // From these manifest files it finds the ICD libraries.
3586 //
3587 // skipped_portability_drivers is used to report whether the loader found drivers which report
3588 // portability but the application didn't enable the bit to enumerate them
3589 // Can be NULL
3590 //
3591 // \returns
3592 // Vulkan result
3593 // (on result == VK_SUCCESS) a list of icds that were discovered
loader_icd_scan(const struct loader_instance * inst,struct loader_icd_tramp_list * icd_tramp_list,const VkInstanceCreateInfo * pCreateInfo,bool * skipped_portability_drivers)3594 VkResult loader_icd_scan(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list,
3595 const VkInstanceCreateInfo *pCreateInfo, bool *skipped_portability_drivers) {
3596 VkResult res = VK_SUCCESS;
3597 struct loader_string_list manifest_files = {0};
3598 struct loader_envvar_filter select_filter = {0};
3599 struct loader_envvar_filter disable_filter = {0};
3600 struct ICDManifestInfo *icd_details = NULL;
3601
3602 // Set up the ICD Trampoline list so elements can be written into it.
3603 res = loader_scanned_icd_init(inst, icd_tramp_list);
3604 if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
3605 return res;
3606 }
3607
3608 bool direct_driver_loading_exclusive_mode = false;
3609 res = loader_scan_for_direct_drivers(inst, pCreateInfo, icd_tramp_list, &direct_driver_loading_exclusive_mode);
3610 if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
3611 goto out;
3612 }
3613 if (direct_driver_loading_exclusive_mode) {
3614 // Make sure to jump over the system & env-var driver discovery mechanisms if exclusive mode is set, even if no drivers
3615 // were successfully found through the direct driver loading mechanism
3616 goto out;
3617 }
3618
3619 // Parse the filter environment variables to determine if we have any special behavior
3620 res = parse_generic_filter_environment_var(inst, VK_DRIVERS_SELECT_ENV_VAR, &select_filter);
3621 if (VK_SUCCESS != res) {
3622 goto out;
3623 }
3624 res = parse_generic_filter_environment_var(inst, VK_DRIVERS_DISABLE_ENV_VAR, &disable_filter);
3625 if (VK_SUCCESS != res) {
3626 goto out;
3627 }
3628
3629 // Get a list of manifest files for ICDs
3630 res = loader_get_data_files(inst, LOADER_DATA_FILE_MANIFEST_DRIVER, NULL, &manifest_files);
3631 if (VK_SUCCESS != res) {
3632 goto out;
3633 }
3634
3635 icd_details = loader_stack_alloc(sizeof(struct ICDManifestInfo) * manifest_files.count);
3636 if (NULL == icd_details) {
3637 res = VK_ERROR_OUT_OF_HOST_MEMORY;
3638 goto out;
3639 }
3640 memset(icd_details, 0, sizeof(struct ICDManifestInfo) * manifest_files.count);
3641
3642 for (uint32_t i = 0; i < manifest_files.count; i++) {
3643 VkResult icd_res = VK_SUCCESS;
3644
3645 icd_res = loader_parse_icd_manifest(inst, manifest_files.list[i], &icd_details[i], skipped_portability_drivers);
3646 if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_res) {
3647 res = icd_res;
3648 goto out;
3649 } else if (VK_ERROR_INCOMPATIBLE_DRIVER == icd_res) {
3650 continue;
3651 }
3652
3653 if (select_filter.count > 0 || disable_filter.count > 0) {
3654 // Get only the filename for comparing to the filters
3655 char *just_filename_str = strrchr(manifest_files.list[i], DIRECTORY_SYMBOL);
3656
3657 // No directory symbol, just the filename
3658 if (NULL == just_filename_str) {
3659 just_filename_str = manifest_files.list[i];
3660 } else {
3661 just_filename_str++;
3662 }
3663
3664 bool name_matches_select =
3665 (select_filter.count > 0 && check_name_matches_filter_environment_var(just_filename_str, &select_filter));
3666 bool name_matches_disable =
3667 (disable_filter.count > 0 && check_name_matches_filter_environment_var(just_filename_str, &disable_filter));
3668
3669 if (name_matches_disable && !name_matches_select) {
3670 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3671 "Driver \"%s\" ignored because it was disabled by env var \'%s\'", just_filename_str,
3672 VK_DRIVERS_DISABLE_ENV_VAR);
3673 continue;
3674 }
3675 if (select_filter.count != 0 && !name_matches_select) {
3676 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3677 "Driver \"%s\" ignored because not selected by env var \'%s\'", just_filename_str,
3678 VK_DRIVERS_SELECT_ENV_VAR);
3679 continue;
3680 }
3681 }
3682
3683 enum loader_layer_library_status lib_status;
3684 icd_res =
3685 loader_scanned_icd_add(inst, icd_tramp_list, icd_details[i].full_library_path, icd_details[i].version, &lib_status);
3686 if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_res) {
3687 res = icd_res;
3688 goto out;
3689 } else if (VK_ERROR_INCOMPATIBLE_DRIVER == icd_res) {
3690 switch (lib_status) {
3691 case LOADER_LAYER_LIB_NOT_LOADED:
3692 case LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD:
3693 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3694 "loader_icd_scan: Failed loading library associated with ICD JSON %s. Ignoring this JSON",
3695 icd_details[i].full_library_path);
3696 break;
3697 case LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE: {
3698 loader_log(inst, VULKAN_LOADER_DRIVER_BIT, 0, "Requested ICD %s was wrong bit-type. Ignoring this JSON",
3699 icd_details[i].full_library_path);
3700 break;
3701 }
3702 case LOADER_LAYER_LIB_SUCCESS_LOADED:
3703 case LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY:
3704 // Shouldn't be able to reach this but if it is, best to report a debug
3705 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3706 "Shouldn't reach this. A valid version of requested ICD %s was loaded but something bad "
3707 "happened afterwards.",
3708 icd_details[i].full_library_path);
3709 break;
3710 }
3711 }
3712 }
3713
3714 out:
3715 if (NULL != icd_details) {
3716 // Successfully got the icd_details structure, which means we need to free the paths contained within
3717 for (uint32_t i = 0; i < manifest_files.count; i++) {
3718 loader_instance_heap_free(inst, icd_details[i].full_library_path);
3719 }
3720 }
3721 free_string_list(inst, &manifest_files);
3722 return res;
3723 }
3724
3725 // Gets the layer data files corresponding to manifest_type & path_override, then parses the resulting json objects
3726 // into instance_layers
3727 // Manifest type must be either implicit or explicit
loader_parse_instance_layers(struct loader_instance * inst,enum loader_data_files_type manifest_type,const char * path_override,struct loader_layer_list * instance_layers)3728 VkResult loader_parse_instance_layers(struct loader_instance *inst, enum loader_data_files_type manifest_type,
3729 const char *path_override, struct loader_layer_list *instance_layers) {
3730 assert(manifest_type == LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER || manifest_type == LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER);
3731 VkResult res = VK_SUCCESS;
3732 struct loader_string_list manifest_files = {0};
3733
3734 res = loader_get_data_files(inst, manifest_type, path_override, &manifest_files);
3735 if (VK_SUCCESS != res) {
3736 goto out;
3737 }
3738
3739 for (uint32_t i = 0; i < manifest_files.count; i++) {
3740 char *file_str = manifest_files.list[i];
3741 if (file_str == NULL) {
3742 continue;
3743 }
3744
3745 // Parse file into JSON struct
3746 cJSON *json = NULL;
3747 VkResult local_res = loader_get_json(inst, file_str, &json);
3748 if (VK_ERROR_OUT_OF_HOST_MEMORY == local_res) {
3749 res = VK_ERROR_OUT_OF_HOST_MEMORY;
3750 goto out;
3751 } else if (VK_SUCCESS != local_res || NULL == json) {
3752 continue;
3753 }
3754
3755 local_res = loader_add_layer_properties(inst, instance_layers, json,
3756 manifest_type == LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER, file_str);
3757 loader_cJSON_Delete(json);
3758
3759 // If the error is anything other than out of memory we still want to try to load the other layers
3760 if (VK_ERROR_OUT_OF_HOST_MEMORY == local_res) {
3761 res = VK_ERROR_OUT_OF_HOST_MEMORY;
3762 goto out;
3763 }
3764 }
3765 out:
3766 free_string_list(inst, &manifest_files);
3767
3768 return res;
3769 }
3770
3771 // Given a loader_layer_properties struct that is a valid override layer, concatenate the properties override paths and put them
3772 // into the output parameter override_paths
get_override_layer_override_paths(struct loader_instance * inst,struct loader_layer_properties * prop,char ** override_paths)3773 VkResult get_override_layer_override_paths(struct loader_instance *inst, struct loader_layer_properties *prop,
3774 char **override_paths) {
3775 if (prop->override_paths.count > 0) {
3776 char *cur_write_ptr = NULL;
3777 size_t override_path_size = 0;
3778 for (uint32_t j = 0; j < prop->override_paths.count; j++) {
3779 override_path_size += determine_data_file_path_size(prop->override_paths.list[j], 0);
3780 }
3781 *override_paths = loader_instance_heap_alloc(inst, override_path_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3782 if (*override_paths == NULL) {
3783 return VK_ERROR_OUT_OF_HOST_MEMORY;
3784 }
3785 cur_write_ptr = &(*override_paths)[0];
3786 for (uint32_t j = 0; j < prop->override_paths.count; j++) {
3787 copy_data_file_info(prop->override_paths.list[j], NULL, 0, &cur_write_ptr);
3788 }
3789 // Remove the last path separator
3790 --cur_write_ptr;
3791 assert(cur_write_ptr - (*override_paths) < (ptrdiff_t)override_path_size);
3792 *cur_write_ptr = '\0';
3793 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Override layer has override paths set to %s",
3794 *override_paths);
3795 }
3796 return VK_SUCCESS;
3797 }
3798
loader_scan_for_layers(struct loader_instance * inst,struct loader_layer_list * instance_layers,const struct loader_envvar_all_filters * filters)3799 VkResult loader_scan_for_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers,
3800 const struct loader_envvar_all_filters *filters) {
3801 VkResult res = VK_SUCCESS;
3802 struct loader_layer_list settings_layers = {0};
3803 struct loader_layer_list regular_instance_layers = {0};
3804 bool override_layer_valid = false;
3805 char *override_paths = NULL;
3806
3807 bool should_search_for_other_layers = true;
3808 res = get_settings_layers(inst, &settings_layers, &should_search_for_other_layers);
3809 if (VK_SUCCESS != res) {
3810 goto out;
3811 }
3812
3813 // If we should not look for layers using other mechanisms, assign settings_layers to instance_layers and jump to the
3814 // output
3815 if (!should_search_for_other_layers) {
3816 *instance_layers = settings_layers;
3817 memset(&settings_layers, 0, sizeof(struct loader_layer_list));
3818 goto out;
3819 }
3820
3821 res = loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER, NULL, ®ular_instance_layers);
3822 if (VK_SUCCESS != res) {
3823 goto out;
3824 }
3825
3826 // Remove any extraneous override layers.
3827 remove_all_non_valid_override_layers(inst, ®ular_instance_layers);
3828
3829 // Check to see if the override layer is present, and use it's override paths.
3830 for (uint32_t i = 0; i < regular_instance_layers.count; i++) {
3831 struct loader_layer_properties *prop = ®ular_instance_layers.list[i];
3832 if (prop->is_override && loader_implicit_layer_is_enabled(inst, filters, prop) && prop->override_paths.count > 0) {
3833 res = get_override_layer_override_paths(inst, prop, &override_paths);
3834 if (VK_SUCCESS != res) {
3835 goto out;
3836 }
3837 break;
3838 }
3839 }
3840
3841 // Get a list of manifest files for explicit layers
3842 res = loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER, override_paths, ®ular_instance_layers);
3843 if (VK_SUCCESS != res) {
3844 goto out;
3845 }
3846
3847 // Verify any meta-layers in the list are valid and all the component layers are
3848 // actually present in the available layer list
3849 res = verify_all_meta_layers(inst, filters, ®ular_instance_layers, &override_layer_valid);
3850 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
3851 return res;
3852 }
3853
3854 if (override_layer_valid) {
3855 loader_remove_layers_in_blacklist(inst, ®ular_instance_layers);
3856 if (NULL != inst) {
3857 inst->override_layer_present = true;
3858 }
3859 }
3860
3861 // Remove disabled layers
3862 for (uint32_t i = 0; i < regular_instance_layers.count; ++i) {
3863 if (!loader_layer_is_available(inst, filters, ®ular_instance_layers.list[i])) {
3864 loader_remove_layer_in_list(inst, ®ular_instance_layers, i);
3865 i--;
3866 }
3867 }
3868
3869 res = combine_settings_layers_with_regular_layers(inst, &settings_layers, ®ular_instance_layers, instance_layers);
3870
3871 out:
3872 loader_delete_layer_list_and_properties(inst, &settings_layers);
3873 loader_delete_layer_list_and_properties(inst, ®ular_instance_layers);
3874
3875 loader_instance_heap_free(inst, override_paths);
3876 return res;
3877 }
3878
loader_scan_for_implicit_layers(struct loader_instance * inst,struct loader_layer_list * instance_layers,const struct loader_envvar_all_filters * layer_filters)3879 VkResult loader_scan_for_implicit_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers,
3880 const struct loader_envvar_all_filters *layer_filters) {
3881 VkResult res = VK_SUCCESS;
3882 struct loader_layer_list settings_layers = {0};
3883 struct loader_layer_list regular_instance_layers = {0};
3884 bool override_layer_valid = false;
3885 char *override_paths = NULL;
3886 bool implicit_metalayer_present = false;
3887
3888 bool should_search_for_other_layers = true;
3889 res = get_settings_layers(inst, &settings_layers, &should_search_for_other_layers);
3890 if (VK_SUCCESS != res) {
3891 goto out;
3892 }
3893
3894 // If we should not look for layers using other mechanisms, assign settings_layers to instance_layers and jump to the
3895 // output
3896 if (!should_search_for_other_layers) {
3897 *instance_layers = settings_layers;
3898 memset(&settings_layers, 0, sizeof(struct loader_layer_list));
3899 goto out;
3900 }
3901
3902 res = loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER, NULL, ®ular_instance_layers);
3903 if (VK_SUCCESS != res) {
3904 goto out;
3905 }
3906
3907 // Remove any extraneous override layers.
3908 remove_all_non_valid_override_layers(inst, ®ular_instance_layers);
3909
3910 // Check to see if either the override layer is present, or another implicit meta-layer.
3911 // Each of these may require explicit layers to be enabled at this time.
3912 for (uint32_t i = 0; i < regular_instance_layers.count; i++) {
3913 struct loader_layer_properties *prop = ®ular_instance_layers.list[i];
3914 if (prop->is_override && loader_implicit_layer_is_enabled(inst, layer_filters, prop)) {
3915 override_layer_valid = true;
3916 res = get_override_layer_override_paths(inst, prop, &override_paths);
3917 if (VK_SUCCESS != res) {
3918 goto out;
3919 }
3920 } else if (!prop->is_override && prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
3921 implicit_metalayer_present = true;
3922 }
3923 }
3924
3925 // If either the override layer or an implicit meta-layer are present, we need to add
3926 // explicit layer info as well. Not to worry, though, all explicit layers not included
3927 // in the override layer will be removed below in loader_remove_layers_in_blacklist().
3928 if (override_layer_valid || implicit_metalayer_present) {
3929 res =
3930 loader_parse_instance_layers(inst, LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER, override_paths, ®ular_instance_layers);
3931 if (VK_SUCCESS != res) {
3932 goto out;
3933 }
3934 }
3935
3936 // Verify any meta-layers in the list are valid and all the component layers are
3937 // actually present in the available layer list
3938 res = verify_all_meta_layers(inst, layer_filters, ®ular_instance_layers, &override_layer_valid);
3939 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
3940 return res;
3941 }
3942
3943 if (override_layer_valid || implicit_metalayer_present) {
3944 loader_remove_layers_not_in_implicit_meta_layers(inst, ®ular_instance_layers);
3945 if (override_layer_valid && inst != NULL) {
3946 inst->override_layer_present = true;
3947 }
3948 }
3949
3950 // Remove disabled layers
3951 for (uint32_t i = 0; i < regular_instance_layers.count; ++i) {
3952 if (!loader_implicit_layer_is_enabled(inst, layer_filters, ®ular_instance_layers.list[i])) {
3953 loader_remove_layer_in_list(inst, ®ular_instance_layers, i);
3954 i--;
3955 }
3956 }
3957
3958 res = combine_settings_layers_with_regular_layers(inst, &settings_layers, ®ular_instance_layers, instance_layers);
3959
3960 out:
3961 loader_delete_layer_list_and_properties(inst, &settings_layers);
3962 loader_delete_layer_list_and_properties(inst, ®ular_instance_layers);
3963
3964 loader_instance_heap_free(inst, override_paths);
3965 return res;
3966 }
3967
loader_gpdpa_instance_terminator(VkInstance inst,const char * pName)3968 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpdpa_instance_terminator(VkInstance inst, const char *pName) {
3969 // inst is not wrapped
3970 if (inst == VK_NULL_HANDLE) {
3971 return NULL;
3972 }
3973
3974 VkLayerInstanceDispatchTable *disp_table = *(VkLayerInstanceDispatchTable **)inst;
3975
3976 if (disp_table == NULL) return NULL;
3977
3978 struct loader_instance *loader_inst = loader_get_instance(inst);
3979
3980 if (loader_inst->instance_finished_creation) {
3981 disp_table = &loader_inst->terminator_dispatch;
3982 }
3983
3984 bool found_name;
3985 void *addr = loader_lookup_instance_dispatch_table(disp_table, pName, &found_name);
3986 if (found_name) {
3987 return addr;
3988 }
3989
3990 // Check if any drivers support the function, and if so, add it to the unknown function list
3991 addr = loader_phys_dev_ext_gpa_term(loader_get_instance(inst), pName);
3992 if (NULL != addr) return addr;
3993
3994 // Don't call down the chain, this would be an infinite loop
3995 loader_log(NULL, VULKAN_LOADER_DEBUG_BIT, 0, "loader_gpdpa_instance_terminator() unrecognized name %s", pName);
3996 return NULL;
3997 }
3998
loader_gpa_instance_terminator(VkInstance inst,const char * pName)3999 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpa_instance_terminator(VkInstance inst, const char *pName) {
4000 // Global functions - Do not need a valid instance handle to query
4001 if (!strcmp(pName, "vkGetInstanceProcAddr")) {
4002 return (PFN_vkVoidFunction)loader_gpa_instance_terminator;
4003 }
4004 if (!strcmp(pName, "vk_layerGetPhysicalDeviceProcAddr")) {
4005 return (PFN_vkVoidFunction)loader_gpdpa_instance_terminator;
4006 }
4007 if (!strcmp(pName, "vkCreateInstance")) {
4008 return (PFN_vkVoidFunction)terminator_CreateInstance;
4009 }
4010
4011 // While the spec is very clear that querying vkCreateDevice requires a valid VkInstance, because the loader allowed querying
4012 // with a NULL VkInstance handle for a long enough time, it is impractical to fix this bug in the loader
4013
4014 // As such, this is a bug to maintain compatibility for the RTSS layer (Riva Tuner Statistics Server) but may
4015 // be depended upon by other layers out in the wild.
4016 if (!strcmp(pName, "vkCreateDevice")) {
4017 return (PFN_vkVoidFunction)terminator_CreateDevice;
4018 }
4019
4020 // inst is not wrapped
4021 if (inst == VK_NULL_HANDLE) {
4022 return NULL;
4023 }
4024 VkLayerInstanceDispatchTable *disp_table = *(VkLayerInstanceDispatchTable **)inst;
4025
4026 if (disp_table == NULL) return NULL;
4027
4028 struct loader_instance *loader_inst = loader_get_instance(inst);
4029
4030 // The VK_EXT_debug_utils functions need a special case here so the terminators can still be found from
4031 // vkGetInstanceProcAddr This is because VK_EXT_debug_utils is an instance level extension with device level functions, and
4032 // is 'supported' by the loader.
4033 // These functions need a terminator to handle the case of a driver not supporting VK_EXT_debug_utils when there are layers
4034 // present which not check for NULL before calling the function.
4035 if (!strcmp(pName, "vkSetDebugUtilsObjectNameEXT")) {
4036 return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_SetDebugUtilsObjectNameEXT
4037 : NULL;
4038 }
4039 if (!strcmp(pName, "vkSetDebugUtilsObjectTagEXT")) {
4040 return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_SetDebugUtilsObjectTagEXT
4041 : NULL;
4042 }
4043 if (!strcmp(pName, "vkQueueBeginDebugUtilsLabelEXT")) {
4044 return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_QueueBeginDebugUtilsLabelEXT
4045 : NULL;
4046 }
4047 if (!strcmp(pName, "vkQueueEndDebugUtilsLabelEXT")) {
4048 return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_QueueEndDebugUtilsLabelEXT
4049 : NULL;
4050 }
4051 if (!strcmp(pName, "vkQueueInsertDebugUtilsLabelEXT")) {
4052 return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_QueueInsertDebugUtilsLabelEXT
4053 : NULL;
4054 }
4055 if (!strcmp(pName, "vkCmdBeginDebugUtilsLabelEXT")) {
4056 return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_CmdBeginDebugUtilsLabelEXT
4057 : NULL;
4058 }
4059 if (!strcmp(pName, "vkCmdEndDebugUtilsLabelEXT")) {
4060 return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_CmdEndDebugUtilsLabelEXT
4061 : NULL;
4062 }
4063 if (!strcmp(pName, "vkCmdInsertDebugUtilsLabelEXT")) {
4064 return loader_inst->enabled_known_extensions.ext_debug_utils ? (PFN_vkVoidFunction)terminator_CmdInsertDebugUtilsLabelEXT
4065 : NULL;
4066 }
4067
4068 if (loader_inst->instance_finished_creation) {
4069 disp_table = &loader_inst->terminator_dispatch;
4070 }
4071
4072 bool found_name;
4073 void *addr = loader_lookup_instance_dispatch_table(disp_table, pName, &found_name);
4074 if (found_name) {
4075 return addr;
4076 }
4077
4078 // Check if it is an unknown physical device function, to see if any drivers support it.
4079 addr = loader_phys_dev_ext_gpa_term(loader_get_instance(inst), pName);
4080 if (addr) {
4081 return addr;
4082 }
4083
4084 // Assume it is an unknown device function, check to see if any drivers support it.
4085 addr = loader_dev_ext_gpa_term(loader_get_instance(inst), pName);
4086 if (addr) {
4087 return addr;
4088 }
4089
4090 // Don't call down the chain, this would be an infinite loop
4091 loader_log(NULL, VULKAN_LOADER_DEBUG_BIT, 0, "loader_gpa_instance_terminator() unrecognized name %s", pName);
4092 return NULL;
4093 }
4094
loader_gpa_device_terminator(VkDevice device,const char * pName)4095 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpa_device_terminator(VkDevice device, const char *pName) {
4096 struct loader_device *dev;
4097 struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, NULL);
4098
4099 // Return this function if a layer above here is asking for the vkGetDeviceProcAddr.
4100 // This is so we can properly intercept any device commands needing a terminator.
4101 if (!strcmp(pName, "vkGetDeviceProcAddr")) {
4102 return (PFN_vkVoidFunction)loader_gpa_device_terminator;
4103 }
4104
4105 // NOTE: Device Funcs needing Trampoline/Terminator.
4106 // Overrides for device functions needing a trampoline and
4107 // a terminator because certain device entry-points still need to go
4108 // through a terminator before hitting the ICD. This could be for
4109 // several reasons, but the main one is currently unwrapping an
4110 // object before passing the appropriate info along to the ICD.
4111 // This is why we also have to override the direct ICD call to
4112 // vkGetDeviceProcAddr to intercept those calls.
4113 // If the pName is for a 'known' function but isn't available, due to
4114 // the corresponding extension/feature not being enabled, we need to
4115 // return NULL and not call down to the driver's GetDeviceProcAddr.
4116 if (NULL != dev) {
4117 bool found_name = false;
4118 PFN_vkVoidFunction addr = get_extension_device_proc_terminator(dev, pName, &found_name);
4119 if (found_name) {
4120 return addr;
4121 }
4122 }
4123
4124 if (icd_term == NULL) {
4125 return NULL;
4126 }
4127
4128 return icd_term->dispatch.GetDeviceProcAddr(device, pName);
4129 }
4130
loader_get_instance(const VkInstance instance)4131 struct loader_instance *loader_get_instance(const VkInstance instance) {
4132 // look up the loader_instance in our list by comparing dispatch tables, as
4133 // there is no guarantee the instance is still a loader_instance* after any
4134 // layers which wrap the instance object.
4135 const VkLayerInstanceDispatchTable *disp;
4136 struct loader_instance *ptr_instance = (struct loader_instance *)instance;
4137 if (VK_NULL_HANDLE == instance || LOADER_MAGIC_NUMBER != ptr_instance->magic) {
4138 return NULL;
4139 } else {
4140 disp = loader_get_instance_layer_dispatch(instance);
4141 loader_platform_thread_lock_mutex(&loader_global_instance_list_lock);
4142 for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) {
4143 if (&inst->disp->layer_inst_disp == disp) {
4144 ptr_instance = inst;
4145 break;
4146 }
4147 }
4148 loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock);
4149 }
4150 return ptr_instance;
4151 }
4152
loader_open_layer_file(const struct loader_instance * inst,struct loader_layer_properties * prop)4153 loader_platform_dl_handle loader_open_layer_file(const struct loader_instance *inst, struct loader_layer_properties *prop) {
4154 if ((prop->lib_handle = loader_platform_open_library(prop->lib_name)) == NULL) {
4155 loader_handle_load_library_error(inst, prop->lib_name, &prop->lib_status);
4156 } else {
4157 prop->lib_status = LOADER_LAYER_LIB_SUCCESS_LOADED;
4158 loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Loading layer library %s", prop->lib_name);
4159 }
4160
4161 return prop->lib_handle;
4162 }
4163
4164 // Go through the search_list and find any layers which match type. If layer
4165 // type match is found in then add it to ext_list.
loader_add_implicit_layers(const struct loader_instance * inst,const struct loader_envvar_all_filters * filters,struct loader_pointer_layer_list * target_list,struct loader_pointer_layer_list * expanded_target_list,const struct loader_layer_list * source_list)4166 VkResult loader_add_implicit_layers(const struct loader_instance *inst, const struct loader_envvar_all_filters *filters,
4167 struct loader_pointer_layer_list *target_list,
4168 struct loader_pointer_layer_list *expanded_target_list,
4169 const struct loader_layer_list *source_list) {
4170 for (uint32_t src_layer = 0; src_layer < source_list->count; src_layer++) {
4171 struct loader_layer_properties *prop = &source_list->list[src_layer];
4172 if (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
4173 VkResult result = loader_add_implicit_layer(inst, prop, filters, target_list, expanded_target_list, source_list);
4174 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
4175 }
4176 }
4177 return VK_SUCCESS;
4178 }
4179
warn_if_layers_are_older_than_application(struct loader_instance * inst)4180 void warn_if_layers_are_older_than_application(struct loader_instance *inst) {
4181 for (uint32_t i = 0; i < inst->expanded_activated_layer_list.count; i++) {
4182 // Verify that the layer api version is at least that of the application's request, if not, throw a warning since
4183 // undefined behavior could occur.
4184 struct loader_layer_properties *prop = inst->expanded_activated_layer_list.list[i];
4185 loader_api_version prop_spec_version = loader_make_version(prop->info.specVersion);
4186 if (!loader_check_version_meets_required(inst->app_api_version, prop_spec_version)) {
4187 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4188 "Layer %s uses API version %u.%u which is older than the application specified "
4189 "API version of %u.%u. May cause issues.",
4190 prop->info.layerName, prop_spec_version.major, prop_spec_version.minor, inst->app_api_version.major,
4191 inst->app_api_version.minor);
4192 }
4193 }
4194 }
4195
loader_enable_instance_layers(struct loader_instance * inst,const VkInstanceCreateInfo * pCreateInfo,const struct loader_layer_list * instance_layers,const struct loader_envvar_all_filters * layer_filters)4196 VkResult loader_enable_instance_layers(struct loader_instance *inst, const VkInstanceCreateInfo *pCreateInfo,
4197 const struct loader_layer_list *instance_layers,
4198 const struct loader_envvar_all_filters *layer_filters) {
4199 VkResult res = VK_SUCCESS;
4200
4201 assert(inst && "Cannot have null instance");
4202
4203 if (!loader_init_pointer_layer_list(inst, &inst->app_activated_layer_list)) {
4204 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4205 "loader_enable_instance_layers: Failed to initialize application version of the layer list");
4206 res = VK_ERROR_OUT_OF_HOST_MEMORY;
4207 goto out;
4208 }
4209
4210 if (!loader_init_pointer_layer_list(inst, &inst->expanded_activated_layer_list)) {
4211 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4212 "loader_enable_instance_layers: Failed to initialize expanded version of the layer list");
4213 res = VK_ERROR_OUT_OF_HOST_MEMORY;
4214 goto out;
4215 }
4216
4217 if (inst->settings.settings_active) {
4218 res = enable_correct_layers_from_settings(inst, layer_filters, pCreateInfo->enabledLayerCount,
4219 pCreateInfo->ppEnabledLayerNames, &inst->instance_layer_list,
4220 &inst->app_activated_layer_list, &inst->expanded_activated_layer_list);
4221 warn_if_layers_are_older_than_application(inst);
4222
4223 goto out;
4224 }
4225
4226 // Add any implicit layers first
4227 res = loader_add_implicit_layers(inst, layer_filters, &inst->app_activated_layer_list, &inst->expanded_activated_layer_list,
4228 instance_layers);
4229 if (res != VK_SUCCESS) {
4230 goto out;
4231 }
4232
4233 // Add any layers specified via environment variable next
4234 res = loader_add_environment_layers(inst, VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER, layer_filters, &inst->app_activated_layer_list,
4235 &inst->expanded_activated_layer_list, instance_layers);
4236 if (res != VK_SUCCESS) {
4237 goto out;
4238 }
4239
4240 // Add layers specified by the application
4241 res = loader_add_layer_names_to_list(inst, layer_filters, &inst->app_activated_layer_list, &inst->expanded_activated_layer_list,
4242 pCreateInfo->enabledLayerCount, pCreateInfo->ppEnabledLayerNames, instance_layers);
4243
4244 warn_if_layers_are_older_than_application(inst);
4245 out:
4246 return res;
4247 }
4248
4249 // Determine the layer interface version to use.
loader_get_layer_interface_version(PFN_vkNegotiateLoaderLayerInterfaceVersion fp_negotiate_layer_version,VkNegotiateLayerInterface * interface_struct)4250 bool loader_get_layer_interface_version(PFN_vkNegotiateLoaderLayerInterfaceVersion fp_negotiate_layer_version,
4251 VkNegotiateLayerInterface *interface_struct) {
4252 memset(interface_struct, 0, sizeof(VkNegotiateLayerInterface));
4253 interface_struct->sType = LAYER_NEGOTIATE_INTERFACE_STRUCT;
4254 interface_struct->loaderLayerInterfaceVersion = 1;
4255 interface_struct->pNext = NULL;
4256
4257 if (fp_negotiate_layer_version != NULL) {
4258 // Layer supports the negotiation API, so call it with the loader's
4259 // latest version supported
4260 interface_struct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
4261 VkResult result = fp_negotiate_layer_version(interface_struct);
4262
4263 if (result != VK_SUCCESS) {
4264 // Layer no longer supports the loader's latest interface version so
4265 // fail loading the Layer
4266 return false;
4267 }
4268 }
4269
4270 if (interface_struct->loaderLayerInterfaceVersion < MIN_SUPPORTED_LOADER_LAYER_INTERFACE_VERSION) {
4271 // Loader no longer supports the layer's latest interface version so
4272 // fail loading the layer
4273 return false;
4274 }
4275
4276 return true;
4277 }
4278
4279 // Every extension that has a loader-defined trampoline needs to be marked as enabled or disabled so that we know whether or
4280 // not to return that trampoline when vkGetDeviceProcAddr is called
setup_logical_device_enabled_layer_extensions(const struct loader_instance * inst,struct loader_device * dev,const struct loader_extension_list * icd_exts,const VkDeviceCreateInfo * pCreateInfo)4281 void setup_logical_device_enabled_layer_extensions(const struct loader_instance *inst, struct loader_device *dev,
4282 const struct loader_extension_list *icd_exts,
4283 const VkDeviceCreateInfo *pCreateInfo) {
4284 // Can only setup debug marker as debug utils is an instance extensions.
4285 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; ++i) {
4286 if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
4287 // Check if its supported by the driver
4288 for (uint32_t j = 0; j < icd_exts->count; ++j) {
4289 if (!strcmp(icd_exts->list[j].extensionName, VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
4290 dev->layer_extensions.ext_debug_marker_enabled = true;
4291 }
4292 }
4293 // also check if any layers support it.
4294 for (uint32_t j = 0; j < inst->app_activated_layer_list.count; j++) {
4295 struct loader_layer_properties *layer = inst->app_activated_layer_list.list[j];
4296 for (uint32_t k = 0; k < layer->device_extension_list.count; k++) {
4297 if (!strcmp(layer->device_extension_list.list[k].props.extensionName, VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
4298 dev->layer_extensions.ext_debug_marker_enabled = true;
4299 }
4300 }
4301 }
4302 }
4303 }
4304 }
4305
loader_layer_create_device(VkInstance instance,VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice,PFN_vkGetInstanceProcAddr layerGIPA,PFN_vkGetDeviceProcAddr * nextGDPA)4306 VKAPI_ATTR VkResult VKAPI_CALL loader_layer_create_device(VkInstance instance, VkPhysicalDevice physicalDevice,
4307 const VkDeviceCreateInfo *pCreateInfo,
4308 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice,
4309 PFN_vkGetInstanceProcAddr layerGIPA, PFN_vkGetDeviceProcAddr *nextGDPA) {
4310 VkResult res;
4311 VkPhysicalDevice internal_device = VK_NULL_HANDLE;
4312 struct loader_device *dev = NULL;
4313 struct loader_instance *inst = NULL;
4314
4315 if (instance != VK_NULL_HANDLE) {
4316 inst = loader_get_instance(instance);
4317 internal_device = physicalDevice;
4318 } else {
4319 struct loader_physical_device_tramp *phys_dev = (struct loader_physical_device_tramp *)physicalDevice;
4320 internal_device = phys_dev->phys_dev;
4321 inst = (struct loader_instance *)phys_dev->this_instance;
4322 }
4323
4324 // Get the physical device (ICD) extensions
4325 struct loader_extension_list icd_exts = {0};
4326 icd_exts.list = NULL;
4327 res = loader_init_generic_list(inst, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
4328 if (VK_SUCCESS != res) {
4329 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to create ICD extension list");
4330 goto out;
4331 }
4332
4333 PFN_vkEnumerateDeviceExtensionProperties enumDeviceExtensionProperties = NULL;
4334 if (layerGIPA != NULL) {
4335 enumDeviceExtensionProperties =
4336 (PFN_vkEnumerateDeviceExtensionProperties)layerGIPA(instance, "vkEnumerateDeviceExtensionProperties");
4337 } else {
4338 enumDeviceExtensionProperties = inst->disp->layer_inst_disp.EnumerateDeviceExtensionProperties;
4339 }
4340 res = loader_add_device_extensions(inst, enumDeviceExtensionProperties, internal_device, "Unknown", &icd_exts);
4341 if (res != VK_SUCCESS) {
4342 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to add extensions to list");
4343 goto out;
4344 }
4345
4346 // Make sure requested extensions to be enabled are supported
4347 res = loader_validate_device_extensions(inst, &inst->expanded_activated_layer_list, &icd_exts, pCreateInfo);
4348 if (res != VK_SUCCESS) {
4349 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to validate extensions in list");
4350 goto out;
4351 }
4352
4353 dev = loader_create_logical_device(inst, pAllocator);
4354 if (dev == NULL) {
4355 res = VK_ERROR_OUT_OF_HOST_MEMORY;
4356 goto out;
4357 }
4358
4359 setup_logical_device_enabled_layer_extensions(inst, dev, &icd_exts, pCreateInfo);
4360
4361 res = loader_create_device_chain(internal_device, pCreateInfo, pAllocator, inst, dev, layerGIPA, nextGDPA);
4362 if (res != VK_SUCCESS) {
4363 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to create device chain.");
4364 goto out;
4365 }
4366
4367 *pDevice = dev->chain_device;
4368
4369 // Initialize any device extension dispatch entry's from the instance list
4370 loader_init_dispatch_dev_ext(inst, dev);
4371
4372 // Initialize WSI device extensions as part of core dispatch since loader
4373 // has dedicated trampoline code for these
4374 loader_init_device_extension_dispatch_table(&dev->loader_dispatch, inst->disp->layer_inst_disp.GetInstanceProcAddr,
4375 dev->loader_dispatch.core_dispatch.GetDeviceProcAddr, inst->instance, *pDevice);
4376
4377 out:
4378
4379 // Failure cleanup
4380 if (VK_SUCCESS != res) {
4381 if (NULL != dev) {
4382 // Find the icd_term this device belongs to then remove it from that icd_term.
4383 // Need to iterate the linked lists and remove the device from it. Don't delete
4384 // the device here since it may not have been added to the icd_term and there
4385 // are other allocations attached to it.
4386 struct loader_icd_term *icd_term = inst->icd_terms;
4387 bool found = false;
4388 while (!found && NULL != icd_term) {
4389 struct loader_device *cur_dev = icd_term->logical_device_list;
4390 struct loader_device *prev_dev = NULL;
4391 while (NULL != cur_dev) {
4392 if (cur_dev == dev) {
4393 if (cur_dev == icd_term->logical_device_list) {
4394 icd_term->logical_device_list = cur_dev->next;
4395 } else if (prev_dev) {
4396 prev_dev->next = cur_dev->next;
4397 }
4398
4399 found = true;
4400 break;
4401 }
4402 prev_dev = cur_dev;
4403 cur_dev = cur_dev->next;
4404 }
4405 icd_term = icd_term->next;
4406 }
4407 // Now destroy the device and the allocations associated with it.
4408 loader_destroy_logical_device(dev, pAllocator);
4409 }
4410 }
4411
4412 if (NULL != icd_exts.list) {
4413 loader_destroy_generic_list(inst, (struct loader_generic_list *)&icd_exts);
4414 }
4415 return res;
4416 }
4417
loader_layer_destroy_device(VkDevice device,const VkAllocationCallbacks * pAllocator,PFN_vkDestroyDevice destroyFunction)4418 VKAPI_ATTR void VKAPI_CALL loader_layer_destroy_device(VkDevice device, const VkAllocationCallbacks *pAllocator,
4419 PFN_vkDestroyDevice destroyFunction) {
4420 struct loader_device *dev;
4421
4422 if (device == VK_NULL_HANDLE) {
4423 return;
4424 }
4425
4426 struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, NULL);
4427
4428 destroyFunction(device, pAllocator);
4429 if (NULL != dev) {
4430 dev->chain_device = NULL;
4431 dev->icd_device = NULL;
4432 loader_remove_logical_device(icd_term, dev, pAllocator);
4433 }
4434 }
4435
4436 // Given the list of layers to activate in the loader_instance
4437 // structure. This function will add a VkLayerInstanceCreateInfo
4438 // structure to the VkInstanceCreateInfo.pNext pointer.
4439 // Each activated layer will have it's own VkLayerInstanceLink
4440 // structure that tells the layer what Get*ProcAddr to call to
4441 // get function pointers to the next layer down.
4442 // Once the chain info has been created this function will
4443 // execute the CreateInstance call chain. Each layer will
4444 // then have an opportunity in it's CreateInstance function
4445 // to setup it's dispatch table when the lower layer returns
4446 // successfully.
4447 // Each layer can wrap or not-wrap the returned VkInstance object
4448 // as it sees fit.
4449 // The instance chain is terminated by a loader function
4450 // that will call CreateInstance on all available ICD's and
4451 // cache those VkInstance objects for future use.
loader_create_instance_chain(const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,struct loader_instance * inst,VkInstance * created_instance)4452 VkResult loader_create_instance_chain(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
4453 struct loader_instance *inst, VkInstance *created_instance) {
4454 uint32_t num_activated_layers = 0;
4455 struct activated_layer_info *activated_layers = NULL;
4456 VkLayerInstanceCreateInfo chain_info;
4457 VkLayerInstanceLink *layer_instance_link_info = NULL;
4458 VkInstanceCreateInfo loader_create_info;
4459 VkResult res;
4460
4461 PFN_vkGetInstanceProcAddr next_gipa = loader_gpa_instance_terminator;
4462 PFN_vkGetInstanceProcAddr cur_gipa = loader_gpa_instance_terminator;
4463 PFN_vkGetDeviceProcAddr cur_gdpa = loader_gpa_device_terminator;
4464 PFN_GetPhysicalDeviceProcAddr next_gpdpa = loader_gpdpa_instance_terminator;
4465 PFN_GetPhysicalDeviceProcAddr cur_gpdpa = loader_gpdpa_instance_terminator;
4466
4467 memcpy(&loader_create_info, pCreateInfo, sizeof(VkInstanceCreateInfo));
4468
4469 if (inst->expanded_activated_layer_list.count > 0) {
4470 chain_info.u.pLayerInfo = NULL;
4471 chain_info.pNext = pCreateInfo->pNext;
4472 chain_info.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
4473 chain_info.function = VK_LAYER_LINK_INFO;
4474 loader_create_info.pNext = &chain_info;
4475
4476 layer_instance_link_info = loader_stack_alloc(sizeof(VkLayerInstanceLink) * inst->expanded_activated_layer_list.count);
4477 if (!layer_instance_link_info) {
4478 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4479 "loader_create_instance_chain: Failed to alloc Instance objects for layer");
4480 return VK_ERROR_OUT_OF_HOST_MEMORY;
4481 }
4482
4483 activated_layers = loader_stack_alloc(sizeof(struct activated_layer_info) * inst->expanded_activated_layer_list.count);
4484 if (!activated_layers) {
4485 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4486 "loader_create_instance_chain: Failed to alloc activated layer storage array");
4487 return VK_ERROR_OUT_OF_HOST_MEMORY;
4488 }
4489
4490 // Create instance chain of enabled layers
4491 for (int32_t i = inst->expanded_activated_layer_list.count - 1; i >= 0; i--) {
4492 struct loader_layer_properties *layer_prop = inst->expanded_activated_layer_list.list[i];
4493 loader_platform_dl_handle lib_handle;
4494
4495 // Skip it if a Layer with the same name has been already successfully activated
4496 if (loader_names_array_has_layer_property(&layer_prop->info, num_activated_layers, activated_layers)) {
4497 continue;
4498 }
4499
4500 lib_handle = loader_open_layer_file(inst, layer_prop);
4501 if (layer_prop->lib_status == LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY) {
4502 return VK_ERROR_OUT_OF_HOST_MEMORY;
4503 }
4504 if (!lib_handle) {
4505 continue;
4506 }
4507
4508 if (NULL == layer_prop->functions.negotiate_layer_interface) {
4509 PFN_vkNegotiateLoaderLayerInterfaceVersion negotiate_interface = NULL;
4510 bool functions_in_interface = false;
4511 if (!layer_prop->functions.str_negotiate_interface || strlen(layer_prop->functions.str_negotiate_interface) == 0) {
4512 negotiate_interface = (PFN_vkNegotiateLoaderLayerInterfaceVersion)loader_platform_get_proc_address(
4513 lib_handle, "vkNegotiateLoaderLayerInterfaceVersion");
4514 } else {
4515 negotiate_interface = (PFN_vkNegotiateLoaderLayerInterfaceVersion)loader_platform_get_proc_address(
4516 lib_handle, layer_prop->functions.str_negotiate_interface);
4517 }
4518
4519 // If we can negotiate an interface version, then we can also
4520 // get everything we need from the one function call, so try
4521 // that first, and see if we can get all the function pointers
4522 // necessary from that one call.
4523 if (NULL != negotiate_interface) {
4524 layer_prop->functions.negotiate_layer_interface = negotiate_interface;
4525
4526 VkNegotiateLayerInterface interface_struct;
4527
4528 if (loader_get_layer_interface_version(negotiate_interface, &interface_struct)) {
4529 // Go ahead and set the properties version to the
4530 // correct value.
4531 layer_prop->interface_version = interface_struct.loaderLayerInterfaceVersion;
4532
4533 // If the interface is 2 or newer, we have access to the
4534 // new GetPhysicalDeviceProcAddr function, so grab it,
4535 // and the other necessary functions, from the
4536 // structure.
4537 if (interface_struct.loaderLayerInterfaceVersion > 1) {
4538 cur_gipa = interface_struct.pfnGetInstanceProcAddr;
4539 cur_gdpa = interface_struct.pfnGetDeviceProcAddr;
4540 cur_gpdpa = interface_struct.pfnGetPhysicalDeviceProcAddr;
4541 if (cur_gipa != NULL) {
4542 // We've set the functions, so make sure we
4543 // don't do the unnecessary calls later.
4544 functions_in_interface = true;
4545 }
4546 }
4547 }
4548 }
4549
4550 if (!functions_in_interface) {
4551 if ((cur_gipa = layer_prop->functions.get_instance_proc_addr) == NULL) {
4552 if (layer_prop->functions.str_gipa == NULL || strlen(layer_prop->functions.str_gipa) == 0) {
4553 cur_gipa =
4554 (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr");
4555 layer_prop->functions.get_instance_proc_addr = cur_gipa;
4556
4557 if (NULL == cur_gipa) {
4558 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4559 "loader_create_instance_chain: Failed to find \'vkGetInstanceProcAddr\' in layer \"%s\"",
4560 layer_prop->lib_name);
4561 continue;
4562 }
4563 } else {
4564 cur_gipa = (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle,
4565 layer_prop->functions.str_gipa);
4566
4567 if (NULL == cur_gipa) {
4568 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4569 "loader_create_instance_chain: Failed to find \'%s\' in layer \"%s\"",
4570 layer_prop->functions.str_gipa, layer_prop->lib_name);
4571 continue;
4572 }
4573 }
4574 }
4575 }
4576 }
4577
4578 layer_instance_link_info[num_activated_layers].pNext = chain_info.u.pLayerInfo;
4579 layer_instance_link_info[num_activated_layers].pfnNextGetInstanceProcAddr = next_gipa;
4580 layer_instance_link_info[num_activated_layers].pfnNextGetPhysicalDeviceProcAddr = next_gpdpa;
4581 next_gipa = cur_gipa;
4582 if (layer_prop->interface_version > 1 && cur_gpdpa != NULL) {
4583 layer_prop->functions.get_physical_device_proc_addr = cur_gpdpa;
4584 next_gpdpa = cur_gpdpa;
4585 }
4586 if (layer_prop->interface_version > 1 && cur_gipa != NULL) {
4587 layer_prop->functions.get_instance_proc_addr = cur_gipa;
4588 }
4589 if (layer_prop->interface_version > 1 && cur_gdpa != NULL) {
4590 layer_prop->functions.get_device_proc_addr = cur_gdpa;
4591 }
4592
4593 chain_info.u.pLayerInfo = &layer_instance_link_info[num_activated_layers];
4594
4595 activated_layers[num_activated_layers].name = layer_prop->info.layerName;
4596 activated_layers[num_activated_layers].manifest = layer_prop->manifest_file_name;
4597 activated_layers[num_activated_layers].library = layer_prop->lib_name;
4598 activated_layers[num_activated_layers].is_implicit = !(layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER);
4599 if (activated_layers[num_activated_layers].is_implicit) {
4600 activated_layers[num_activated_layers].disable_env = layer_prop->disable_env_var.name;
4601 }
4602
4603 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Insert instance layer \"%s\" (%s)",
4604 layer_prop->info.layerName, layer_prop->lib_name);
4605
4606 num_activated_layers++;
4607 }
4608 }
4609
4610 // Make sure each layer requested by the application was actually loaded
4611 for (uint32_t exp = 0; exp < inst->expanded_activated_layer_list.count; ++exp) {
4612 struct loader_layer_properties *exp_layer_prop = inst->expanded_activated_layer_list.list[exp];
4613 bool found = false;
4614 for (uint32_t act = 0; act < num_activated_layers; ++act) {
4615 if (!strcmp(activated_layers[act].name, exp_layer_prop->info.layerName)) {
4616 found = true;
4617 break;
4618 }
4619 }
4620 // If it wasn't found, we want to at least log an error. However, if it was enabled by the application directly,
4621 // we want to return a bad layer error.
4622 if (!found) {
4623 bool app_requested = false;
4624 for (uint32_t act = 0; act < pCreateInfo->enabledLayerCount; ++act) {
4625 if (!strcmp(pCreateInfo->ppEnabledLayerNames[act], exp_layer_prop->info.layerName)) {
4626 app_requested = true;
4627 break;
4628 }
4629 }
4630 VkFlags log_flag = VULKAN_LOADER_LAYER_BIT;
4631 char ending = '.';
4632 if (app_requested) {
4633 log_flag |= VULKAN_LOADER_ERROR_BIT;
4634 ending = '!';
4635 } else {
4636 log_flag |= VULKAN_LOADER_INFO_BIT;
4637 }
4638 switch (exp_layer_prop->lib_status) {
4639 case LOADER_LAYER_LIB_NOT_LOADED:
4640 loader_log(inst, log_flag, 0, "Requested layer \"%s\" was not loaded%c", exp_layer_prop->info.layerName,
4641 ending);
4642 break;
4643 case LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE: {
4644 loader_log(inst, log_flag, 0, "Requested layer \"%s\" was wrong bit-type%c", exp_layer_prop->info.layerName,
4645 ending);
4646 break;
4647 }
4648 case LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD:
4649 loader_log(inst, log_flag, 0, "Requested layer \"%s\" failed to load%c", exp_layer_prop->info.layerName,
4650 ending);
4651 break;
4652 case LOADER_LAYER_LIB_SUCCESS_LOADED:
4653 case LOADER_LAYER_LIB_ERROR_OUT_OF_MEMORY:
4654 // Shouldn't be able to reach this but if it is, best to report a debug
4655 loader_log(inst, log_flag, 0,
4656 "Shouldn't reach this. A valid version of requested layer %s was loaded but was not found in the "
4657 "list of activated layers%c",
4658 exp_layer_prop->info.layerName, ending);
4659 break;
4660 }
4661 if (app_requested) {
4662 return VK_ERROR_LAYER_NOT_PRESENT;
4663 }
4664 }
4665 }
4666
4667 VkLoaderFeatureFlags feature_flags = 0;
4668 #if defined(_WIN32)
4669 feature_flags = windows_initialize_dxgi();
4670 #endif
4671
4672 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)next_gipa(*created_instance, "vkCreateInstance");
4673 if (fpCreateInstance) {
4674 VkLayerInstanceCreateInfo instance_dispatch;
4675 instance_dispatch.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
4676 instance_dispatch.pNext = loader_create_info.pNext;
4677 instance_dispatch.function = VK_LOADER_DATA_CALLBACK;
4678 instance_dispatch.u.pfnSetInstanceLoaderData = vkSetInstanceDispatch;
4679
4680 VkLayerInstanceCreateInfo device_callback;
4681 device_callback.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
4682 device_callback.pNext = &instance_dispatch;
4683 device_callback.function = VK_LOADER_LAYER_CREATE_DEVICE_CALLBACK;
4684 device_callback.u.layerDevice.pfnLayerCreateDevice = loader_layer_create_device;
4685 device_callback.u.layerDevice.pfnLayerDestroyDevice = loader_layer_destroy_device;
4686
4687 VkLayerInstanceCreateInfo loader_features;
4688 loader_features.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
4689 loader_features.pNext = &device_callback;
4690 loader_features.function = VK_LOADER_FEATURES;
4691 loader_features.u.loaderFeatures = feature_flags;
4692
4693 loader_create_info.pNext = &loader_features;
4694
4695 // If layer debugging is enabled, let's print out the full callstack with layers in their
4696 // defined order.
4697 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "vkCreateInstance layer callstack setup to:");
4698 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " <Application>");
4699 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " ||");
4700 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " <Loader>");
4701 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " ||");
4702 for (uint32_t cur_layer = 0; cur_layer < num_activated_layers; ++cur_layer) {
4703 uint32_t index = num_activated_layers - cur_layer - 1;
4704 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " %s", activated_layers[index].name);
4705 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Type: %s",
4706 activated_layers[index].is_implicit ? "Implicit" : "Explicit");
4707 if (activated_layers[index].is_implicit) {
4708 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Disable Env Var: %s",
4709 activated_layers[index].disable_env);
4710 }
4711 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Manifest: %s", activated_layers[index].manifest);
4712 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Library: %s", activated_layers[index].library);
4713 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " ||");
4714 }
4715 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " <Drivers>");
4716
4717 res = fpCreateInstance(&loader_create_info, pAllocator, created_instance);
4718 } else {
4719 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_create_instance_chain: Failed to find \'vkCreateInstance\'");
4720 // Couldn't find CreateInstance function!
4721 res = VK_ERROR_INITIALIZATION_FAILED;
4722 }
4723
4724 if (res == VK_SUCCESS) {
4725 // Copy the current disp table into the terminator_dispatch table so we can use it in loader_gpa_instance_terminator()
4726 memcpy(&inst->terminator_dispatch, &inst->disp->layer_inst_disp, sizeof(VkLayerInstanceDispatchTable));
4727
4728 loader_init_instance_core_dispatch_table(&inst->disp->layer_inst_disp, next_gipa, *created_instance);
4729 inst->instance = *created_instance;
4730
4731 if (pCreateInfo->enabledLayerCount > 0 && pCreateInfo->ppEnabledLayerNames != NULL) {
4732 res = create_string_list(inst, pCreateInfo->enabledLayerCount, &inst->enabled_layer_names);
4733 if (res != VK_SUCCESS) {
4734 return res;
4735 }
4736
4737 for (uint32_t i = 0; i < pCreateInfo->enabledLayerCount; ++i) {
4738 res = copy_str_to_string_list(inst, &inst->enabled_layer_names, pCreateInfo->ppEnabledLayerNames[i],
4739 strlen(pCreateInfo->ppEnabledLayerNames[i]));
4740 if (res != VK_SUCCESS) return res;
4741 }
4742 }
4743 }
4744
4745 return res;
4746 }
4747
loader_activate_instance_layer_extensions(struct loader_instance * inst,VkInstance created_inst)4748 void loader_activate_instance_layer_extensions(struct loader_instance *inst, VkInstance created_inst) {
4749 loader_init_instance_extension_dispatch_table(&inst->disp->layer_inst_disp, inst->disp->layer_inst_disp.GetInstanceProcAddr,
4750 created_inst);
4751 }
4752
4753 #if defined(__APPLE__)
loader_create_device_chain(const VkPhysicalDevice pd,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,const struct loader_instance * inst,struct loader_device * dev,PFN_vkGetInstanceProcAddr callingLayer,PFN_vkGetDeviceProcAddr * layerNextGDPA)4754 VkResult loader_create_device_chain(const VkPhysicalDevice pd, const VkDeviceCreateInfo *pCreateInfo,
4755 const VkAllocationCallbacks *pAllocator, const struct loader_instance *inst,
4756 struct loader_device *dev, PFN_vkGetInstanceProcAddr callingLayer,
4757 PFN_vkGetDeviceProcAddr *layerNextGDPA) __attribute__((optnone)) {
4758 #else
4759 VkResult loader_create_device_chain(const VkPhysicalDevice pd, const VkDeviceCreateInfo *pCreateInfo,
4760 const VkAllocationCallbacks *pAllocator, const struct loader_instance *inst,
4761 struct loader_device *dev, PFN_vkGetInstanceProcAddr callingLayer,
4762 PFN_vkGetDeviceProcAddr *layerNextGDPA) {
4763 #endif
4764 uint32_t num_activated_layers = 0;
4765 struct activated_layer_info *activated_layers = NULL;
4766 VkLayerDeviceLink *layer_device_link_info;
4767 VkLayerDeviceCreateInfo chain_info;
4768 VkDeviceCreateInfo loader_create_info;
4769 VkDeviceGroupDeviceCreateInfo *original_device_group_create_info_struct = NULL;
4770 VkResult res;
4771
4772 PFN_vkGetDeviceProcAddr fpGDPA = NULL, nextGDPA = loader_gpa_device_terminator;
4773 PFN_vkGetInstanceProcAddr fpGIPA = NULL, nextGIPA = loader_gpa_instance_terminator;
4774
4775 memcpy(&loader_create_info, pCreateInfo, sizeof(VkDeviceCreateInfo));
4776
4777 if (loader_create_info.enabledLayerCount > 0 && loader_create_info.ppEnabledLayerNames != NULL) {
4778 bool invalid_device_layer_usage = false;
4779
4780 if (loader_create_info.enabledLayerCount != inst->enabled_layer_names.count && loader_create_info.enabledLayerCount > 0) {
4781 invalid_device_layer_usage = true;
4782 } else if (loader_create_info.enabledLayerCount > 0 && loader_create_info.ppEnabledLayerNames == NULL) {
4783 invalid_device_layer_usage = true;
4784 } else if (loader_create_info.enabledLayerCount == 0 && loader_create_info.ppEnabledLayerNames != NULL) {
4785 invalid_device_layer_usage = true;
4786 } else if (inst->enabled_layer_names.list != NULL) {
4787 for (uint32_t i = 0; i < loader_create_info.enabledLayerCount; i++) {
4788 const char *device_layer_names = loader_create_info.ppEnabledLayerNames[i];
4789
4790 if (strcmp(device_layer_names, inst->enabled_layer_names.list[i]) != 0) {
4791 invalid_device_layer_usage = true;
4792 break;
4793 }
4794 }
4795 }
4796
4797 if (invalid_device_layer_usage) {
4798 loader_log(
4799 inst, VULKAN_LOADER_WARN_BIT, 0,
4800 "loader_create_device_chain: Using deprecated and ignored 'ppEnabledLayerNames' member of 'VkDeviceCreateInfo' "
4801 "when creating a Vulkan device.");
4802 }
4803 }
4804
4805 // Before we continue, we need to find out if the KHR_device_group extension is in the enabled list. If it is, we then
4806 // need to look for the corresponding VkDeviceGroupDeviceCreateInfo struct in the device list. This is because we
4807 // need to replace all the incoming physical device values (which are really loader trampoline physical device values)
4808 // with the layer/ICD version.
4809 {
4810 VkBaseOutStructure *pNext = (VkBaseOutStructure *)loader_create_info.pNext;
4811 VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&loader_create_info;
4812 while (NULL != pNext) {
4813 if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) {
4814 VkDeviceGroupDeviceCreateInfo *cur_struct = (VkDeviceGroupDeviceCreateInfo *)pNext;
4815 if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) {
4816 VkDeviceGroupDeviceCreateInfo *temp_struct = loader_stack_alloc(sizeof(VkDeviceGroupDeviceCreateInfo));
4817 VkPhysicalDevice *phys_dev_array = NULL;
4818 if (NULL == temp_struct) {
4819 return VK_ERROR_OUT_OF_HOST_MEMORY;
4820 }
4821 memcpy(temp_struct, cur_struct, sizeof(VkDeviceGroupDeviceCreateInfo));
4822 phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * cur_struct->physicalDeviceCount);
4823 if (NULL == phys_dev_array) {
4824 return VK_ERROR_OUT_OF_HOST_MEMORY;
4825 }
4826
4827 // Before calling down, replace the incoming physical device values (which are really loader trampoline
4828 // physical devices) with the next layer (or possibly even the terminator) physical device values.
4829 struct loader_physical_device_tramp *cur_tramp;
4830 for (uint32_t phys_dev = 0; phys_dev < cur_struct->physicalDeviceCount; phys_dev++) {
4831 cur_tramp = (struct loader_physical_device_tramp *)cur_struct->pPhysicalDevices[phys_dev];
4832 phys_dev_array[phys_dev] = cur_tramp->phys_dev;
4833 }
4834 temp_struct->pPhysicalDevices = phys_dev_array;
4835
4836 original_device_group_create_info_struct = (VkDeviceGroupDeviceCreateInfo *)pPrev->pNext;
4837
4838 // Replace the old struct in the pNext chain with this one.
4839 pPrev->pNext = (VkBaseOutStructure *)temp_struct;
4840 }
4841 break;
4842 }
4843
4844 pPrev = pNext;
4845 pNext = pNext->pNext;
4846 }
4847 }
4848 if (inst->expanded_activated_layer_list.count > 0) {
4849 layer_device_link_info = loader_stack_alloc(sizeof(VkLayerDeviceLink) * inst->expanded_activated_layer_list.count);
4850 if (!layer_device_link_info) {
4851 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4852 "loader_create_device_chain: Failed to alloc Device objects for layer. Skipping Layer.");
4853 return VK_ERROR_OUT_OF_HOST_MEMORY;
4854 }
4855
4856 activated_layers = loader_stack_alloc(sizeof(struct activated_layer_info) * inst->expanded_activated_layer_list.count);
4857 if (!activated_layers) {
4858 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4859 "loader_create_device_chain: Failed to alloc activated layer storage array");
4860 return VK_ERROR_OUT_OF_HOST_MEMORY;
4861 }
4862
4863 chain_info.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO;
4864 chain_info.function = VK_LAYER_LINK_INFO;
4865 chain_info.u.pLayerInfo = NULL;
4866 chain_info.pNext = loader_create_info.pNext;
4867 loader_create_info.pNext = &chain_info;
4868
4869 // Create instance chain of enabled layers
4870 for (int32_t i = inst->expanded_activated_layer_list.count - 1; i >= 0; i--) {
4871 struct loader_layer_properties *layer_prop = inst->expanded_activated_layer_list.list[i];
4872 loader_platform_dl_handle lib_handle = layer_prop->lib_handle;
4873
4874 // Skip it if a Layer with the same name has been already successfully activated
4875 if (loader_names_array_has_layer_property(&layer_prop->info, num_activated_layers, activated_layers)) {
4876 continue;
4877 }
4878
4879 // Skip the layer if the handle is NULL - this is likely because the library failed to load but wasn't removed from
4880 // the list.
4881 if (!lib_handle) {
4882 continue;
4883 }
4884
4885 // The Get*ProcAddr pointers will already be filled in if they were received from either the json file or the
4886 // version negotiation
4887 if ((fpGIPA = layer_prop->functions.get_instance_proc_addr) == NULL) {
4888 if (layer_prop->functions.str_gipa == NULL || strlen(layer_prop->functions.str_gipa) == 0) {
4889 fpGIPA = (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr");
4890 layer_prop->functions.get_instance_proc_addr = fpGIPA;
4891 } else
4892 fpGIPA =
4893 (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gipa);
4894 if (!fpGIPA) {
4895 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4896 "loader_create_device_chain: Failed to find \'vkGetInstanceProcAddr\' in layer \"%s\". "
4897 "Skipping layer.",
4898 layer_prop->lib_name);
4899 continue;
4900 }
4901 }
4902
4903 if (fpGIPA == callingLayer) {
4904 if (layerNextGDPA != NULL) {
4905 *layerNextGDPA = nextGDPA;
4906 }
4907 // Break here because if fpGIPA is the same as callingLayer, that means a layer is trying to create a device,
4908 // and once we don't want to continue any further as the next layer will be the calling layer
4909 break;
4910 }
4911
4912 if ((fpGDPA = layer_prop->functions.get_device_proc_addr) == NULL) {
4913 if (layer_prop->functions.str_gdpa == NULL || strlen(layer_prop->functions.str_gdpa) == 0) {
4914 fpGDPA = (PFN_vkGetDeviceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetDeviceProcAddr");
4915 layer_prop->functions.get_device_proc_addr = fpGDPA;
4916 } else
4917 fpGDPA = (PFN_vkGetDeviceProcAddr)loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gdpa);
4918 if (!fpGDPA) {
4919 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4920 "Failed to find vkGetDeviceProcAddr in layer \"%s\"", layer_prop->lib_name);
4921 continue;
4922 }
4923 }
4924
4925 layer_device_link_info[num_activated_layers].pNext = chain_info.u.pLayerInfo;
4926 layer_device_link_info[num_activated_layers].pfnNextGetInstanceProcAddr = nextGIPA;
4927 layer_device_link_info[num_activated_layers].pfnNextGetDeviceProcAddr = nextGDPA;
4928 chain_info.u.pLayerInfo = &layer_device_link_info[num_activated_layers];
4929 nextGIPA = fpGIPA;
4930 nextGDPA = fpGDPA;
4931
4932 activated_layers[num_activated_layers].name = layer_prop->info.layerName;
4933 activated_layers[num_activated_layers].manifest = layer_prop->manifest_file_name;
4934 activated_layers[num_activated_layers].library = layer_prop->lib_name;
4935 activated_layers[num_activated_layers].is_implicit = !(layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER);
4936 if (activated_layers[num_activated_layers].is_implicit) {
4937 activated_layers[num_activated_layers].disable_env = layer_prop->disable_env_var.name;
4938 }
4939
4940 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Inserted device layer \"%s\" (%s)",
4941 layer_prop->info.layerName, layer_prop->lib_name);
4942
4943 num_activated_layers++;
4944 }
4945 }
4946
4947 VkDevice created_device = (VkDevice)dev;
4948 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)nextGIPA(inst->instance, "vkCreateDevice");
4949 if (fpCreateDevice) {
4950 VkLayerDeviceCreateInfo create_info_disp;
4951
4952 create_info_disp.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO;
4953 create_info_disp.function = VK_LOADER_DATA_CALLBACK;
4954
4955 create_info_disp.u.pfnSetDeviceLoaderData = vkSetDeviceDispatch;
4956
4957 // If layer debugging is enabled, let's print out the full callstack with layers in their
4958 // defined order.
4959 uint32_t layer_driver_bits = VULKAN_LOADER_LAYER_BIT | VULKAN_LOADER_DRIVER_BIT;
4960 loader_log(inst, layer_driver_bits, 0, "vkCreateDevice layer callstack setup to:");
4961 loader_log(inst, layer_driver_bits, 0, " <Application>");
4962 loader_log(inst, layer_driver_bits, 0, " ||");
4963 loader_log(inst, layer_driver_bits, 0, " <Loader>");
4964 loader_log(inst, layer_driver_bits, 0, " ||");
4965 for (uint32_t cur_layer = 0; cur_layer < num_activated_layers; ++cur_layer) {
4966 uint32_t index = num_activated_layers - cur_layer - 1;
4967 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " %s", activated_layers[index].name);
4968 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Type: %s",
4969 activated_layers[index].is_implicit ? "Implicit" : "Explicit");
4970 if (activated_layers[index].is_implicit) {
4971 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Disable Env Var: %s",
4972 activated_layers[index].disable_env);
4973 }
4974 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Manifest: %s", activated_layers[index].manifest);
4975 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Library: %s", activated_layers[index].library);
4976 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " ||");
4977 }
4978 loader_log(inst, layer_driver_bits, 0, " <Device>");
4979 create_info_disp.pNext = loader_create_info.pNext;
4980 loader_create_info.pNext = &create_info_disp;
4981 res = fpCreateDevice(pd, &loader_create_info, pAllocator, &created_device);
4982 if (res != VK_SUCCESS) {
4983 return res;
4984 }
4985 dev->chain_device = created_device;
4986
4987 // Because we changed the pNext chain to use our own VkDeviceGroupDeviceCreateInfo, we need to fixup the chain to
4988 // point back at the original VkDeviceGroupDeviceCreateInfo.
4989 VkBaseOutStructure *pNext = (VkBaseOutStructure *)loader_create_info.pNext;
4990 VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&loader_create_info;
4991 while (NULL != pNext) {
4992 if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) {
4993 VkDeviceGroupDeviceCreateInfo *cur_struct = (VkDeviceGroupDeviceCreateInfo *)pNext;
4994 if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) {
4995 pPrev->pNext = (VkBaseOutStructure *)original_device_group_create_info_struct;
4996 }
4997 break;
4998 }
4999
5000 pPrev = pNext;
5001 pNext = pNext->pNext;
5002 }
5003
5004 } else {
5005 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5006 "loader_create_device_chain: Failed to find \'vkCreateDevice\' in layers or ICD");
5007 // Couldn't find CreateDevice function!
5008 return VK_ERROR_INITIALIZATION_FAILED;
5009 }
5010
5011 // Initialize device dispatch table
5012 loader_init_device_dispatch_table(&dev->loader_dispatch, nextGDPA, dev->chain_device);
5013 // Initialize the dispatch table to functions which need terminators
5014 // These functions point directly to the driver, not the terminator functions
5015 init_extension_device_proc_terminator_dispatch(dev);
5016
5017 return res;
5018 }
5019
5020 VkResult loader_validate_layers(const struct loader_instance *inst, const uint32_t layer_count,
5021 const char *const *ppEnabledLayerNames, const struct loader_layer_list *list) {
5022 struct loader_layer_properties *prop;
5023
5024 if (layer_count > 0 && ppEnabledLayerNames == NULL) {
5025 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5026 "loader_validate_layers: ppEnabledLayerNames is NULL but enabledLayerCount is greater than zero");
5027 return VK_ERROR_LAYER_NOT_PRESENT;
5028 }
5029
5030 for (uint32_t i = 0; i < layer_count; i++) {
5031 VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, ppEnabledLayerNames[i]);
5032 if (result != VK_STRING_ERROR_NONE) {
5033 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5034 "loader_validate_layers: ppEnabledLayerNames contains string that is too long or is badly formed");
5035 return VK_ERROR_LAYER_NOT_PRESENT;
5036 }
5037
5038 prop = loader_find_layer_property(ppEnabledLayerNames[i], list);
5039 if (NULL == prop) {
5040 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5041 "loader_validate_layers: Layer %d does not exist in the list of available layers", i);
5042 return VK_ERROR_LAYER_NOT_PRESENT;
5043 }
5044 if (inst->settings.settings_active && prop->settings_control_value != LOADER_SETTINGS_LAYER_CONTROL_ON &&
5045 prop->settings_control_value != LOADER_SETTINGS_LAYER_CONTROL_DEFAULT) {
5046 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5047 "loader_validate_layers: Layer %d was explicitly prevented from being enabled by the loader settings file",
5048 i);
5049 return VK_ERROR_LAYER_NOT_PRESENT;
5050 }
5051 }
5052 return VK_SUCCESS;
5053 }
5054
5055 VkResult loader_validate_instance_extensions(struct loader_instance *inst, const struct loader_extension_list *icd_exts,
5056 const struct loader_layer_list *instance_layers,
5057 const struct loader_envvar_all_filters *layer_filters,
5058 const VkInstanceCreateInfo *pCreateInfo) {
5059 VkExtensionProperties *extension_prop;
5060 char *env_value;
5061 bool check_if_known = true;
5062 VkResult res = VK_SUCCESS;
5063
5064 struct loader_pointer_layer_list active_layers = {0};
5065 struct loader_pointer_layer_list expanded_layers = {0};
5066
5067 if (pCreateInfo->enabledExtensionCount > 0 && pCreateInfo->ppEnabledExtensionNames == NULL) {
5068 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5069 "loader_validate_instance_extensions: Instance ppEnabledExtensionNames is NULL but enabledExtensionCount is "
5070 "greater than zero");
5071 return VK_ERROR_EXTENSION_NOT_PRESENT;
5072 }
5073 if (!loader_init_pointer_layer_list(inst, &active_layers)) {
5074 res = VK_ERROR_OUT_OF_HOST_MEMORY;
5075 goto out;
5076 }
5077 if (!loader_init_pointer_layer_list(inst, &expanded_layers)) {
5078 res = VK_ERROR_OUT_OF_HOST_MEMORY;
5079 goto out;
5080 }
5081
5082 if (inst->settings.settings_active) {
5083 res = enable_correct_layers_from_settings(inst, layer_filters, pCreateInfo->enabledLayerCount,
5084 pCreateInfo->ppEnabledLayerNames, instance_layers, &active_layers,
5085 &expanded_layers);
5086 if (res != VK_SUCCESS) {
5087 goto out;
5088 }
5089 } else {
5090 // Build the lists of active layers (including meta layers) and expanded layers (with meta layers resolved to their
5091 // components)
5092 res = loader_add_implicit_layers(inst, layer_filters, &active_layers, &expanded_layers, instance_layers);
5093 if (res != VK_SUCCESS) {
5094 goto out;
5095 }
5096 res = loader_add_environment_layers(inst, VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER, layer_filters, &active_layers,
5097 &expanded_layers, instance_layers);
5098 if (res != VK_SUCCESS) {
5099 goto out;
5100 }
5101 res = loader_add_layer_names_to_list(inst, layer_filters, &active_layers, &expanded_layers, pCreateInfo->enabledLayerCount,
5102 pCreateInfo->ppEnabledLayerNames, instance_layers);
5103 if (VK_SUCCESS != res) {
5104 goto out;
5105 }
5106 }
5107 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
5108 VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]);
5109 if (result != VK_STRING_ERROR_NONE) {
5110 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5111 "loader_validate_instance_extensions: Instance ppEnabledExtensionNames contains "
5112 "string that is too long or is badly formed");
5113 res = VK_ERROR_EXTENSION_NOT_PRESENT;
5114 goto out;
5115 }
5116
5117 // Check if a user wants to disable the instance extension filtering behavior
5118 env_value = loader_getenv("VK_LOADER_DISABLE_INST_EXT_FILTER", inst);
5119 if (NULL != env_value && atoi(env_value) != 0) {
5120 check_if_known = false;
5121 }
5122 loader_free_getenv(env_value, inst);
5123
5124 if (check_if_known) {
5125 // See if the extension is in the list of supported extensions
5126 bool found = false;
5127 for (uint32_t j = 0; LOADER_INSTANCE_EXTENSIONS[j] != NULL; j++) {
5128 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], LOADER_INSTANCE_EXTENSIONS[j]) == 0) {
5129 found = true;
5130 break;
5131 }
5132 }
5133
5134 // If it isn't in the list, return an error
5135 if (!found) {
5136 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5137 "loader_validate_instance_extensions: Extension %s not found in list of known instance extensions.",
5138 pCreateInfo->ppEnabledExtensionNames[i]);
5139 res = VK_ERROR_EXTENSION_NOT_PRESENT;
5140 goto out;
5141 }
5142 }
5143
5144 extension_prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], icd_exts);
5145
5146 if (extension_prop) {
5147 continue;
5148 }
5149
5150 extension_prop = NULL;
5151
5152 // Not in global list, search layer extension lists
5153 for (uint32_t j = 0; NULL == extension_prop && j < expanded_layers.count; ++j) {
5154 extension_prop =
5155 get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], &expanded_layers.list[j]->instance_extension_list);
5156 if (extension_prop) {
5157 // Found the extension in one of the layers enabled by the app.
5158 break;
5159 }
5160
5161 struct loader_layer_properties *layer_prop =
5162 loader_find_layer_property(expanded_layers.list[j]->info.layerName, instance_layers);
5163 if (NULL == layer_prop) {
5164 // Should NOT get here, loader_validate_layers should have already filtered this case out.
5165 continue;
5166 }
5167 }
5168
5169 if (!extension_prop) {
5170 // Didn't find extension name in any of the global layers, error out
5171 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5172 "loader_validate_instance_extensions: Instance extension %s not supported by available ICDs or enabled "
5173 "layers.",
5174 pCreateInfo->ppEnabledExtensionNames[i]);
5175 res = VK_ERROR_EXTENSION_NOT_PRESENT;
5176 goto out;
5177 }
5178 }
5179
5180 out:
5181 loader_destroy_pointer_layer_list(inst, &active_layers);
5182 loader_destroy_pointer_layer_list(inst, &expanded_layers);
5183 return res;
5184 }
5185
5186 VkResult loader_validate_device_extensions(struct loader_instance *this_instance,
5187 const struct loader_pointer_layer_list *activated_device_layers,
5188 const struct loader_extension_list *icd_exts, const VkDeviceCreateInfo *pCreateInfo) {
5189 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
5190 VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]);
5191 if (result != VK_STRING_ERROR_NONE) {
5192 loader_log(this_instance, VULKAN_LOADER_ERROR_BIT, 0,
5193 "loader_validate_device_extensions: Device ppEnabledExtensionNames contains "
5194 "string that is too long or is badly formed");
5195 return VK_ERROR_EXTENSION_NOT_PRESENT;
5196 }
5197
5198 const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i];
5199 VkExtensionProperties *extension_prop = get_extension_property(extension_name, icd_exts);
5200
5201 if (extension_prop) {
5202 continue;
5203 }
5204
5205 // Not in global list, search activated layer extension lists
5206 for (uint32_t j = 0; j < activated_device_layers->count; j++) {
5207 struct loader_layer_properties *layer_prop = activated_device_layers->list[j];
5208
5209 extension_prop = get_dev_extension_property(extension_name, &layer_prop->device_extension_list);
5210 if (extension_prop) {
5211 // Found the extension in one of the layers enabled by the app.
5212 break;
5213 }
5214 }
5215
5216 if (!extension_prop) {
5217 // Didn't find extension name in any of the device layers, error out
5218 loader_log(this_instance, VULKAN_LOADER_ERROR_BIT, 0,
5219 "loader_validate_device_extensions: Device extension %s not supported by selected physical device "
5220 "or enabled layers.",
5221 pCreateInfo->ppEnabledExtensionNames[i]);
5222 return VK_ERROR_EXTENSION_NOT_PRESENT;
5223 }
5224 }
5225 return VK_SUCCESS;
5226 }
5227
5228 // Terminator functions for the Instance chain
5229 // All named terminator_<Vulkan API name>
5230 VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
5231 const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
5232 struct loader_icd_term *icd_term;
5233 VkExtensionProperties *prop;
5234 char **filtered_extension_names = NULL;
5235 VkInstanceCreateInfo icd_create_info;
5236 VkResult res = VK_SUCCESS;
5237 bool one_icd_successful = false;
5238
5239 struct loader_instance *ptr_instance = (struct loader_instance *)*pInstance;
5240 if (NULL == ptr_instance) {
5241 loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5242 "terminator_CreateInstance: Loader instance pointer null encountered. Possibly set by active layer. (Policy "
5243 "#LLP_LAYER_21)");
5244 } else if (LOADER_MAGIC_NUMBER != ptr_instance->magic) {
5245 loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5246 "terminator_CreateInstance: Instance pointer (%p) has invalid MAGIC value 0x%08lx. Instance value possibly "
5247 "corrupted by active layer (Policy #LLP_LAYER_21). ",
5248 ptr_instance, ptr_instance->magic);
5249 }
5250
5251 // Save the application version if it has been modified - layers sometimes needs features in newer API versions than
5252 // what the application requested, and thus will increase the instance version to a level that suites their needs.
5253 if (pCreateInfo->pApplicationInfo && pCreateInfo->pApplicationInfo->apiVersion) {
5254 loader_api_version altered_version = loader_make_version(pCreateInfo->pApplicationInfo->apiVersion);
5255 if (altered_version.major != ptr_instance->app_api_version.major ||
5256 altered_version.minor != ptr_instance->app_api_version.minor) {
5257 ptr_instance->app_api_version = altered_version;
5258 }
5259 }
5260
5261 memcpy(&icd_create_info, pCreateInfo, sizeof(icd_create_info));
5262
5263 icd_create_info.enabledLayerCount = 0;
5264 icd_create_info.ppEnabledLayerNames = NULL;
5265
5266 // NOTE: Need to filter the extensions to only those supported by the ICD.
5267 // No ICD will advertise support for layers. An ICD library could
5268 // support a layer, but it would be independent of the actual ICD,
5269 // just in the same library.
5270 uint32_t extension_count = pCreateInfo->enabledExtensionCount;
5271 #if defined(LOADER_ENABLE_LINUX_SORT)
5272 extension_count += 1;
5273 #endif // LOADER_ENABLE_LINUX_SORT
5274 filtered_extension_names = loader_stack_alloc(extension_count * sizeof(char *));
5275 if (!filtered_extension_names) {
5276 loader_log(ptr_instance, VULKAN_LOADER_ERROR_BIT, 0,
5277 "terminator_CreateInstance: Failed create extension name array for %d extensions", extension_count);
5278 res = VK_ERROR_OUT_OF_HOST_MEMORY;
5279 goto out;
5280 }
5281 icd_create_info.ppEnabledExtensionNames = (const char *const *)filtered_extension_names;
5282
5283 // Determine if Get Physical Device Properties 2 is available to this Instance
5284 if (pCreateInfo->pApplicationInfo && pCreateInfo->pApplicationInfo->apiVersion >= VK_API_VERSION_1_1) {
5285 ptr_instance->supports_get_dev_prop_2 = true;
5286 } else {
5287 for (uint32_t j = 0; j < pCreateInfo->enabledExtensionCount; j++) {
5288 if (!strcmp(pCreateInfo->ppEnabledExtensionNames[j], VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
5289 ptr_instance->supports_get_dev_prop_2 = true;
5290 break;
5291 }
5292 }
5293 }
5294
5295 for (uint32_t i = 0; i < ptr_instance->icd_tramp_list.count; i++) {
5296 icd_term = loader_icd_add(ptr_instance, &ptr_instance->icd_tramp_list.scanned_list[i]);
5297 if (NULL == icd_term) {
5298 loader_log(ptr_instance, VULKAN_LOADER_ERROR_BIT, 0,
5299 "terminator_CreateInstance: Failed to add ICD %d to ICD trampoline list.", i);
5300 res = VK_ERROR_OUT_OF_HOST_MEMORY;
5301 goto out;
5302 }
5303
5304 // If any error happens after here, we need to remove the ICD from the list,
5305 // because we've already added it, but haven't validated it
5306
5307 // Make sure that we reset the pApplicationInfo so we don't get an old pointer
5308 icd_create_info.pApplicationInfo = pCreateInfo->pApplicationInfo;
5309 icd_create_info.enabledExtensionCount = 0;
5310 struct loader_extension_list icd_exts = {0};
5311
5312 // traverse scanned icd list adding non-duplicate extensions to the list
5313 res = loader_init_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
5314 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
5315 // If out of memory, bail immediately.
5316 goto out;
5317 } else if (VK_SUCCESS != res) {
5318 // Something bad happened with this ICD, so free it and try the
5319 // next.
5320 ptr_instance->icd_terms = icd_term->next;
5321 icd_term->next = NULL;
5322 loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5323 continue;
5324 }
5325
5326 res = loader_add_instance_extensions(ptr_instance, icd_term->scanned_icd->EnumerateInstanceExtensionProperties,
5327 icd_term->scanned_icd->lib_name, &icd_exts);
5328 if (VK_SUCCESS != res) {
5329 loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts);
5330 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
5331 // If out of memory, bail immediately.
5332 goto out;
5333 } else {
5334 // Something bad happened with this ICD, so free it and try the next.
5335 ptr_instance->icd_terms = icd_term->next;
5336 icd_term->next = NULL;
5337 loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5338 continue;
5339 }
5340 }
5341
5342 for (uint32_t j = 0; j < pCreateInfo->enabledExtensionCount; j++) {
5343 prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[j], &icd_exts);
5344 if (prop) {
5345 filtered_extension_names[icd_create_info.enabledExtensionCount] = (char *)pCreateInfo->ppEnabledExtensionNames[j];
5346 icd_create_info.enabledExtensionCount++;
5347 }
5348 }
5349 #if defined(LOADER_ENABLE_LINUX_SORT)
5350 // Force on "VK_KHR_get_physical_device_properties2" for Linux as we use it for GPU sorting. This
5351 // should be done if the API version of either the application or the driver does not natively support
5352 // the core version of vkGetPhysicalDeviceProperties2 entrypoint.
5353 if ((ptr_instance->app_api_version.major == 1 && ptr_instance->app_api_version.minor == 0) ||
5354 (VK_API_VERSION_MAJOR(icd_term->scanned_icd->api_version) == 1 &&
5355 VK_API_VERSION_MINOR(icd_term->scanned_icd->api_version) == 0)) {
5356 prop = get_extension_property(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, &icd_exts);
5357 if (prop) {
5358 filtered_extension_names[icd_create_info.enabledExtensionCount] =
5359 (char *)VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME;
5360 icd_create_info.enabledExtensionCount++;
5361
5362 // At least one ICD supports this, so the instance should be able to support it
5363 ptr_instance->supports_get_dev_prop_2 = true;
5364 }
5365 }
5366 #endif // LOADER_ENABLE_LINUX_SORT
5367
5368 // Determine if vkGetPhysicalDeviceProperties2 is available to this Instance
5369 if (icd_term->scanned_icd->api_version >= VK_API_VERSION_1_1) {
5370 icd_term->supports_get_dev_prop_2 = true;
5371 } else {
5372 for (uint32_t j = 0; j < icd_create_info.enabledExtensionCount; j++) {
5373 if (!strcmp(filtered_extension_names[j], VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
5374 icd_term->supports_get_dev_prop_2 = true;
5375 break;
5376 }
5377 }
5378 }
5379
5380 loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts);
5381
5382 // Get the driver version from vkEnumerateInstanceVersion
5383 uint32_t icd_version = VK_API_VERSION_1_0;
5384 VkResult icd_result = VK_SUCCESS;
5385 if (icd_term->scanned_icd->api_version >= VK_API_VERSION_1_1) {
5386 PFN_vkEnumerateInstanceVersion icd_enumerate_instance_version =
5387 (PFN_vkEnumerateInstanceVersion)icd_term->scanned_icd->GetInstanceProcAddr(NULL, "vkEnumerateInstanceVersion");
5388 if (icd_enumerate_instance_version != NULL) {
5389 icd_result = icd_enumerate_instance_version(&icd_version);
5390 if (icd_result != VK_SUCCESS) {
5391 icd_version = VK_API_VERSION_1_0;
5392 loader_log(ptr_instance, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5393 "terminator_CreateInstance: ICD \"%s\" vkEnumerateInstanceVersion returned error. The ICD will be "
5394 "treated as a 1.0 ICD",
5395 icd_term->scanned_icd->lib_name);
5396 } else if (VK_API_VERSION_MINOR(icd_version) == 0) {
5397 loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5398 "terminator_CreateInstance: Manifest ICD for \"%s\" contained a 1.1 or greater API version, but "
5399 "vkEnumerateInstanceVersion returned 1.0, treating as a 1.0 ICD",
5400 icd_term->scanned_icd->lib_name);
5401 }
5402 } else {
5403 loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5404 "terminator_CreateInstance: Manifest ICD for \"%s\" contained a 1.1 or greater API version, but does "
5405 "not support vkEnumerateInstanceVersion, treating as a 1.0 ICD",
5406 icd_term->scanned_icd->lib_name);
5407 }
5408 }
5409
5410 // Remove the portability enumeration flag bit if the ICD doesn't support the extension
5411 if ((pCreateInfo->flags & VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR) == 1) {
5412 bool supports_portability_enumeration = false;
5413 for (uint32_t j = 0; j < icd_create_info.enabledExtensionCount; j++) {
5414 if (strcmp(filtered_extension_names[j], VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME) == 0) {
5415 supports_portability_enumeration = true;
5416 break;
5417 }
5418 }
5419 // If the icd supports the extension, use the flags as given, otherwise remove the portability bit
5420 icd_create_info.flags = supports_portability_enumeration
5421 ? pCreateInfo->flags
5422 : pCreateInfo->flags & (~VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR);
5423 }
5424
5425 // Create an instance, substituting the version to 1.0 if necessary
5426 VkApplicationInfo icd_app_info;
5427 const uint32_t api_variant = 0;
5428 const uint32_t api_version_1_0 = VK_API_VERSION_1_0;
5429 uint32_t icd_version_nopatch =
5430 VK_MAKE_API_VERSION(api_variant, VK_API_VERSION_MAJOR(icd_version), VK_API_VERSION_MINOR(icd_version), 0);
5431 uint32_t requested_version = (pCreateInfo == NULL || pCreateInfo->pApplicationInfo == NULL)
5432 ? api_version_1_0
5433 : pCreateInfo->pApplicationInfo->apiVersion;
5434 if ((requested_version != 0) && (icd_version_nopatch == api_version_1_0)) {
5435 if (icd_create_info.pApplicationInfo == NULL) {
5436 memset(&icd_app_info, 0, sizeof(icd_app_info));
5437 } else {
5438 memmove(&icd_app_info, icd_create_info.pApplicationInfo, sizeof(icd_app_info));
5439 }
5440 icd_app_info.apiVersion = icd_version;
5441 icd_create_info.pApplicationInfo = &icd_app_info;
5442 }
5443 icd_result =
5444 ptr_instance->icd_tramp_list.scanned_list[i].CreateInstance(&icd_create_info, pAllocator, &(icd_term->instance));
5445 if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_result) {
5446 // If out of memory, bail immediately.
5447 res = VK_ERROR_OUT_OF_HOST_MEMORY;
5448 goto out;
5449 } else if (VK_SUCCESS != icd_result) {
5450 loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5451 "terminator_CreateInstance: Received return code %i from call to vkCreateInstance in ICD %s. Skipping "
5452 "this driver.",
5453 icd_result, icd_term->scanned_icd->lib_name);
5454 ptr_instance->icd_terms = icd_term->next;
5455 icd_term->next = NULL;
5456 loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5457 continue;
5458 }
5459
5460 if (!loader_icd_init_entries(ptr_instance, icd_term)) {
5461 loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5462 "terminator_CreateInstance: Failed to find required entrypoints in ICD %s. Skipping this driver.",
5463 icd_term->scanned_icd->lib_name);
5464 ptr_instance->icd_terms = icd_term->next;
5465 icd_term->next = NULL;
5466 loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5467 continue;
5468 }
5469
5470 if (ptr_instance->icd_tramp_list.scanned_list[i].interface_version < 3 &&
5471 (
5472 #if defined(VK_USE_PLATFORM_XLIB_KHR)
5473 NULL != icd_term->dispatch.CreateXlibSurfaceKHR ||
5474 #endif // VK_USE_PLATFORM_XLIB_KHR
5475 #if defined(VK_USE_PLATFORM_XCB_KHR)
5476 NULL != icd_term->dispatch.CreateXcbSurfaceKHR ||
5477 #endif // VK_USE_PLATFORM_XCB_KHR
5478 #if defined(VK_USE_PLATFORM_WAYLAND_KHR)
5479 NULL != icd_term->dispatch.CreateWaylandSurfaceKHR ||
5480 #endif // VK_USE_PLATFORM_WAYLAND_KHR
5481 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
5482 NULL != icd_term->dispatch.CreateAndroidSurfaceKHR ||
5483 #endif // VK_USE_PLATFORM_ANDROID_KHR
5484 #if defined(VK_USE_PLATFORM_OHOS)
5485 NULL != icd_term->dispatch.CreateSurfaceOHOS ||
5486 #endif // VK_USE_PLATFORM_OHOS
5487 #if defined(VK_USE_PLATFORM_WIN32_KHR)
5488 NULL != icd_term->dispatch.CreateWin32SurfaceKHR ||
5489 #endif // VK_USE_PLATFORM_WIN32_KHR
5490 NULL != icd_term->dispatch.DestroySurfaceKHR)) {
5491 loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5492 "terminator_CreateInstance: Driver %s supports interface version %u but still exposes VkSurfaceKHR"
5493 " create/destroy entrypoints (Policy #LDP_DRIVER_8)",
5494 ptr_instance->icd_tramp_list.scanned_list[i].lib_name,
5495 ptr_instance->icd_tramp_list.scanned_list[i].interface_version);
5496 }
5497
5498 // If we made it this far, at least one ICD was successful
5499 one_icd_successful = true;
5500 }
5501
5502 // For vkGetPhysicalDeviceProperties2, at least one ICD needs to support the extension for the
5503 // instance to have it
5504 if (ptr_instance->supports_get_dev_prop_2) {
5505 bool at_least_one_supports = false;
5506 icd_term = ptr_instance->icd_terms;
5507 while (icd_term != NULL) {
5508 if (icd_term->supports_get_dev_prop_2) {
5509 at_least_one_supports = true;
5510 break;
5511 }
5512 icd_term = icd_term->next;
5513 }
5514 if (!at_least_one_supports) {
5515 ptr_instance->supports_get_dev_prop_2 = false;
5516 }
5517 }
5518
5519 // If no ICDs were added to instance list and res is unchanged from it's initial value, the loader was unable to
5520 // find a suitable ICD.
5521 if (VK_SUCCESS == res && (ptr_instance->icd_terms == NULL || !one_icd_successful)) {
5522 loader_log(ptr_instance, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5523 "terminator_CreateInstance: Found no drivers!");
5524 res = VK_ERROR_INCOMPATIBLE_DRIVER;
5525 }
5526
5527 out:
5528
5529 ptr_instance->create_terminator_invalid_extension = false;
5530
5531 if (VK_SUCCESS != res) {
5532 if (VK_ERROR_EXTENSION_NOT_PRESENT == res) {
5533 ptr_instance->create_terminator_invalid_extension = true;
5534 }
5535
5536 while (NULL != ptr_instance->icd_terms) {
5537 icd_term = ptr_instance->icd_terms;
5538 ptr_instance->icd_terms = icd_term->next;
5539 if (NULL != icd_term->instance) {
5540 icd_term->dispatch.DestroyInstance(icd_term->instance, pAllocator);
5541 }
5542 loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5543 }
5544 } else {
5545 // Check for enabled extensions here to setup the loader structures so the loader knows what extensions
5546 // it needs to worry about.
5547 // We do it here and again above the layers in the trampoline function since the trampoline function
5548 // may think different extensions are enabled than what's down here.
5549 // This is why we don't clear inside of these function calls.
5550 // The clearing should actually be handled by the overall memset of the pInstance structure in the
5551 // trampoline.
5552 wsi_create_instance(ptr_instance, pCreateInfo);
5553 check_for_enabled_debug_extensions(ptr_instance, pCreateInfo);
5554 extensions_create_instance(ptr_instance, pCreateInfo);
5555 }
5556
5557 return res;
5558 }
5559
5560 VKAPI_ATTR void VKAPI_CALL terminator_DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
5561 struct loader_instance *ptr_instance = loader_get_instance(instance);
5562 if (NULL == ptr_instance) {
5563 return;
5564 }
5565 struct loader_icd_term *icd_terms = ptr_instance->icd_terms;
5566 struct loader_icd_term *next_icd_term;
5567
5568 // Remove this instance from the list of instances:
5569 struct loader_instance *prev = NULL;
5570 loader_platform_thread_lock_mutex(&loader_global_instance_list_lock);
5571 struct loader_instance *next = loader.instances;
5572 while (next != NULL) {
5573 if (next == ptr_instance) {
5574 // Remove this instance from the list:
5575 if (prev)
5576 prev->next = next->next;
5577 else
5578 loader.instances = next->next;
5579 break;
5580 }
5581 prev = next;
5582 next = next->next;
5583 }
5584 loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock);
5585
5586 while (NULL != icd_terms) {
5587 if (icd_terms->instance) {
5588 icd_terms->dispatch.DestroyInstance(icd_terms->instance, pAllocator);
5589 }
5590 next_icd_term = icd_terms->next;
5591 icd_terms->instance = VK_NULL_HANDLE;
5592 loader_icd_destroy(ptr_instance, icd_terms, pAllocator);
5593
5594 icd_terms = next_icd_term;
5595 }
5596
5597 loader_scanned_icd_clear(ptr_instance, &ptr_instance->icd_tramp_list);
5598 loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&ptr_instance->ext_list);
5599 if (NULL != ptr_instance->phys_devs_term) {
5600 for (uint32_t i = 0; i < ptr_instance->phys_dev_count_term; i++) {
5601 for (uint32_t j = i + 1; j < ptr_instance->phys_dev_count_term; j++) {
5602 if (ptr_instance->phys_devs_term[i] == ptr_instance->phys_devs_term[j]) {
5603 ptr_instance->phys_devs_term[j] = NULL;
5604 }
5605 }
5606 }
5607 for (uint32_t i = 0; i < ptr_instance->phys_dev_count_term; i++) {
5608 loader_instance_heap_free(ptr_instance, ptr_instance->phys_devs_term[i]);
5609 }
5610 loader_instance_heap_free(ptr_instance, ptr_instance->phys_devs_term);
5611 }
5612 if (NULL != ptr_instance->phys_dev_groups_term) {
5613 for (uint32_t i = 0; i < ptr_instance->phys_dev_group_count_term; i++) {
5614 loader_instance_heap_free(ptr_instance, ptr_instance->phys_dev_groups_term[i]);
5615 }
5616 loader_instance_heap_free(ptr_instance, ptr_instance->phys_dev_groups_term);
5617 }
5618 loader_free_dev_ext_table(ptr_instance);
5619 loader_free_phys_dev_ext_table(ptr_instance);
5620
5621 free_string_list(ptr_instance, &ptr_instance->enabled_layer_names);
5622 }
5623
5624 VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo,
5625 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
5626 VkResult res = VK_SUCCESS;
5627 struct loader_physical_device_term *phys_dev_term;
5628 phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
5629 struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
5630
5631 struct loader_device *dev = (struct loader_device *)*pDevice;
5632 PFN_vkCreateDevice fpCreateDevice = icd_term->dispatch.CreateDevice;
5633 struct loader_extension_list icd_exts;
5634
5635 VkBaseOutStructure *caller_dgci_container = NULL;
5636 VkDeviceGroupDeviceCreateInfo *caller_dgci = NULL;
5637
5638 if (NULL == dev) {
5639 loader_log(icd_term->this_instance, VULKAN_LOADER_WARN_BIT, 0,
5640 "terminator_CreateDevice: Loader device pointer null encountered. Possibly set by active layer. (Policy "
5641 "#LLP_LAYER_22)");
5642 } else if (DEVICE_DISP_TABLE_MAGIC_NUMBER != dev->loader_dispatch.core_dispatch.magic) {
5643 loader_log(icd_term->this_instance, VULKAN_LOADER_WARN_BIT, 0,
5644 "terminator_CreateDevice: Device pointer (%p) has invalid MAGIC value 0x%08lx. The expected value is "
5645 "0x10ADED040410ADED. Device value possibly "
5646 "corrupted by active layer (Policy #LLP_LAYER_22). ",
5647 dev, dev->loader_dispatch.core_dispatch.magic);
5648 }
5649
5650 dev->phys_dev_term = phys_dev_term;
5651
5652 icd_exts.list = NULL;
5653
5654 if (fpCreateDevice == NULL) {
5655 loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5656 "terminator_CreateDevice: No vkCreateDevice command exposed by ICD %s", icd_term->scanned_icd->lib_name);
5657 res = VK_ERROR_INITIALIZATION_FAILED;
5658 goto out;
5659 }
5660
5661 VkDeviceCreateInfo localCreateInfo;
5662 memcpy(&localCreateInfo, pCreateInfo, sizeof(localCreateInfo));
5663
5664 // NOTE: Need to filter the extensions to only those supported by the ICD.
5665 // No ICD will advertise support for layers. An ICD library could support a layer,
5666 // but it would be independent of the actual ICD, just in the same library.
5667 char **filtered_extension_names = NULL;
5668 if (0 < pCreateInfo->enabledExtensionCount) {
5669 filtered_extension_names = loader_stack_alloc(pCreateInfo->enabledExtensionCount * sizeof(char *));
5670 if (NULL == filtered_extension_names) {
5671 loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT, 0,
5672 "terminator_CreateDevice: Failed to create extension name storage for %d extensions",
5673 pCreateInfo->enabledExtensionCount);
5674 return VK_ERROR_OUT_OF_HOST_MEMORY;
5675 }
5676 }
5677
5678 localCreateInfo.enabledLayerCount = 0;
5679 localCreateInfo.ppEnabledLayerNames = NULL;
5680
5681 localCreateInfo.enabledExtensionCount = 0;
5682 localCreateInfo.ppEnabledExtensionNames = (const char *const *)filtered_extension_names;
5683
5684 // Get the physical device (ICD) extensions
5685 res = loader_init_generic_list(icd_term->this_instance, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
5686 if (VK_SUCCESS != res) {
5687 goto out;
5688 }
5689
5690 res = loader_add_device_extensions(icd_term->this_instance, icd_term->dispatch.EnumerateDeviceExtensionProperties,
5691 phys_dev_term->phys_dev, icd_term->scanned_icd->lib_name, &icd_exts);
5692 if (res != VK_SUCCESS) {
5693 goto out;
5694 }
5695
5696 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
5697 const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i];
5698 VkExtensionProperties *prop = get_extension_property(extension_name, &icd_exts);
5699 if (prop) {
5700 filtered_extension_names[localCreateInfo.enabledExtensionCount] = (char *)extension_name;
5701 localCreateInfo.enabledExtensionCount++;
5702 } else {
5703 loader_log(icd_term->this_instance, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5704 "vkCreateDevice extension %s not available for devices associated with ICD %s", extension_name,
5705 icd_term->scanned_icd->lib_name);
5706 }
5707 }
5708
5709 // Before we continue, If KHX_device_group is the list of enabled and viable extensions, then we then need to look for the
5710 // corresponding VkDeviceGroupDeviceCreateInfo struct in the device list and replace all the physical device values (which
5711 // are really loader physical device terminator values) with the ICD versions.
5712 // if (icd_term->this_instance->enabled_known_extensions.khr_device_group_creation == 1) {
5713 {
5714 VkBaseOutStructure *pNext = (VkBaseOutStructure *)localCreateInfo.pNext;
5715 VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&localCreateInfo;
5716 while (NULL != pNext) {
5717 if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) {
5718 VkDeviceGroupDeviceCreateInfo *cur_struct = (VkDeviceGroupDeviceCreateInfo *)pNext;
5719 if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) {
5720 VkDeviceGroupDeviceCreateInfo *temp_struct = loader_stack_alloc(sizeof(VkDeviceGroupDeviceCreateInfo));
5721 VkPhysicalDevice *phys_dev_array = NULL;
5722 if (NULL == temp_struct) {
5723 return VK_ERROR_OUT_OF_HOST_MEMORY;
5724 }
5725 memcpy(temp_struct, cur_struct, sizeof(VkDeviceGroupDeviceCreateInfo));
5726 phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * cur_struct->physicalDeviceCount);
5727 if (NULL == phys_dev_array) {
5728 return VK_ERROR_OUT_OF_HOST_MEMORY;
5729 }
5730
5731 // Before calling down, replace the incoming physical device values (which are really loader terminator
5732 // physical devices) with the ICDs physical device values.
5733 struct loader_physical_device_term *cur_term;
5734 for (uint32_t phys_dev = 0; phys_dev < cur_struct->physicalDeviceCount; phys_dev++) {
5735 cur_term = (struct loader_physical_device_term *)cur_struct->pPhysicalDevices[phys_dev];
5736 phys_dev_array[phys_dev] = cur_term->phys_dev;
5737 }
5738 temp_struct->pPhysicalDevices = phys_dev_array;
5739
5740 // Keep track of pointers to restore pNext chain before returning
5741 caller_dgci_container = pPrev;
5742 caller_dgci = cur_struct;
5743
5744 // Replace the old struct in the pNext chain with this one.
5745 pPrev->pNext = (VkBaseOutStructure *)temp_struct;
5746 }
5747 break;
5748 }
5749
5750 pPrev = pNext;
5751 pNext = pNext->pNext;
5752 }
5753 }
5754
5755 // Handle loader emulation for structs that are not supported by the ICD:
5756 // Presently, the emulation leaves the pNext chain alone. This means that the ICD will receive items in the chain which
5757 // are not recognized by the ICD. If this causes the ICD to fail, then the items would have to be removed here. The current
5758 // implementation does not remove them because copying the pNext chain would be impossible if the loader does not recognize
5759 // the any of the struct types, as the loader would not know the size to allocate and copy.
5760 // if (icd_term->dispatch.GetPhysicalDeviceFeatures2 == NULL && icd_term->dispatch.GetPhysicalDeviceFeatures2KHR == NULL) {
5761 {
5762 const void *pNext = localCreateInfo.pNext;
5763 while (pNext != NULL) {
5764 switch (*(VkStructureType *)pNext) {
5765 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2: {
5766 const VkPhysicalDeviceFeatures2KHR *features = pNext;
5767
5768 if (icd_term->dispatch.GetPhysicalDeviceFeatures2 == NULL &&
5769 icd_term->dispatch.GetPhysicalDeviceFeatures2KHR == NULL) {
5770 loader_log(icd_term->this_instance, VULKAN_LOADER_INFO_BIT, 0,
5771 "vkCreateDevice: Emulating handling of VkPhysicalDeviceFeatures2 in pNext chain for ICD \"%s\"",
5772 icd_term->scanned_icd->lib_name);
5773
5774 // Verify that VK_KHR_get_physical_device_properties2 is enabled
5775 if (icd_term->this_instance->enabled_known_extensions.khr_get_physical_device_properties2) {
5776 localCreateInfo.pEnabledFeatures = &features->features;
5777 }
5778 }
5779
5780 // Leave this item in the pNext chain for now
5781
5782 pNext = features->pNext;
5783 break;
5784 }
5785
5786 case VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO: {
5787 const VkDeviceGroupDeviceCreateInfo *group_info = pNext;
5788
5789 if (icd_term->dispatch.EnumeratePhysicalDeviceGroups == NULL &&
5790 icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR == NULL) {
5791 loader_log(icd_term->this_instance, VULKAN_LOADER_INFO_BIT, 0,
5792 "vkCreateDevice: Emulating handling of VkPhysicalDeviceGroupProperties in pNext chain for "
5793 "ICD \"%s\"",
5794 icd_term->scanned_icd->lib_name);
5795
5796 // The group must contain only this one device, since physical device groups aren't actually supported
5797 if (group_info->physicalDeviceCount != 1) {
5798 loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT, 0,
5799 "vkCreateDevice: Emulation failed to create device from device group info");
5800 res = VK_ERROR_INITIALIZATION_FAILED;
5801 goto out;
5802 }
5803 }
5804
5805 // Nothing needs to be done here because we're leaving the item in the pNext chain and because the spec
5806 // states that the physicalDevice argument must be included in the device group, and we've already checked
5807 // that it is
5808
5809 pNext = group_info->pNext;
5810 break;
5811 }
5812
5813 // Multiview properties are also allowed, but since VK_KHX_multiview is a device extension, we'll just let the
5814 // ICD handle that error when the user enables the extension here
5815 default: {
5816 const VkBaseInStructure *header = pNext;
5817 pNext = header->pNext;
5818 break;
5819 }
5820 }
5821 }
5822 }
5823
5824 VkBool32 maintenance5_feature_enabled = false;
5825 // Look for the VkPhysicalDeviceMaintenance5FeaturesKHR struct to see if the feature was enabled
5826 {
5827 const void *pNext = localCreateInfo.pNext;
5828 while (pNext != NULL) {
5829 switch (*(VkStructureType *)pNext) {
5830 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES_KHR: {
5831 const VkPhysicalDeviceMaintenance5FeaturesKHR *maintenance_features = pNext;
5832 if (maintenance_features->maintenance5 == VK_TRUE) {
5833 maintenance5_feature_enabled = true;
5834 }
5835 pNext = maintenance_features->pNext;
5836 break;
5837 }
5838
5839 default: {
5840 const VkBaseInStructure *header = pNext;
5841 pNext = header->pNext;
5842 break;
5843 }
5844 }
5845 }
5846 }
5847
5848 // Every extension that has a loader-defined terminator needs to be marked as enabled or disabled so that we know whether or
5849 // not to return that terminator when vkGetDeviceProcAddr is called
5850 for (uint32_t i = 0; i < localCreateInfo.enabledExtensionCount; ++i) {
5851 if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
5852 dev->driver_extensions.khr_swapchain_enabled = true;
5853 } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME)) {
5854 dev->driver_extensions.khr_display_swapchain_enabled = true;
5855 } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_DEVICE_GROUP_EXTENSION_NAME)) {
5856 dev->driver_extensions.khr_device_group_enabled = true;
5857 } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
5858 dev->driver_extensions.ext_debug_marker_enabled = true;
5859 } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], "VK_EXT_full_screen_exclusive")) {
5860 dev->driver_extensions.ext_full_screen_exclusive_enabled = true;
5861 } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_MAINTENANCE_5_EXTENSION_NAME) &&
5862 maintenance5_feature_enabled) {
5863 dev->should_ignore_device_commands_from_newer_version = true;
5864 }
5865 }
5866 dev->layer_extensions.ext_debug_utils_enabled = icd_term->this_instance->enabled_known_extensions.ext_debug_utils;
5867 dev->driver_extensions.ext_debug_utils_enabled = icd_term->this_instance->enabled_known_extensions.ext_debug_utils;
5868
5869 VkPhysicalDeviceProperties properties;
5870 icd_term->dispatch.GetPhysicalDeviceProperties(phys_dev_term->phys_dev, &properties);
5871 if (!dev->driver_extensions.khr_device_group_enabled) {
5872 if (properties.apiVersion >= VK_API_VERSION_1_1) {
5873 dev->driver_extensions.khr_device_group_enabled = true;
5874 }
5875 }
5876
5877 loader_log(icd_term->this_instance, VULKAN_LOADER_LAYER_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5878 " Using \"%s\" with driver: \"%s\"", properties.deviceName, icd_term->scanned_icd->lib_name);
5879
5880 res = fpCreateDevice(phys_dev_term->phys_dev, &localCreateInfo, pAllocator, &dev->icd_device);
5881 if (res != VK_SUCCESS) {
5882 loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5883 "terminator_CreateDevice: Failed in ICD %s vkCreateDevice call", icd_term->scanned_icd->lib_name);
5884 goto out;
5885 }
5886
5887 *pDevice = dev->icd_device;
5888 loader_add_logical_device(icd_term, dev);
5889
5890 // Init dispatch pointer in new device object
5891 loader_init_dispatch(*pDevice, &dev->loader_dispatch);
5892
5893 out:
5894 if (NULL != icd_exts.list) {
5895 loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&icd_exts);
5896 }
5897
5898 // Restore pNext pointer to old VkDeviceGroupDeviceCreateInfo
5899 // in the chain to maintain consistency for the caller.
5900 if (caller_dgci_container != NULL) {
5901 caller_dgci_container->pNext = (VkBaseOutStructure *)caller_dgci;
5902 }
5903
5904 return res;
5905 }
5906
5907 // Update the trampoline physical devices with the wrapped version.
5908 // We always want to re-use previous physical device pointers since they may be used by an application
5909 // after returning previously.
5910 VkResult setup_loader_tramp_phys_devs(struct loader_instance *inst, uint32_t phys_dev_count, VkPhysicalDevice *phys_devs) {
5911 VkResult res = VK_SUCCESS;
5912 uint32_t found_count = 0;
5913 uint32_t old_count = inst->phys_dev_count_tramp;
5914 uint32_t new_count = inst->total_gpu_count;
5915 struct loader_physical_device_tramp **new_phys_devs = NULL;
5916
5917 if (0 == phys_dev_count) {
5918 return VK_SUCCESS;
5919 }
5920 if (phys_dev_count > new_count) {
5921 new_count = phys_dev_count;
5922 }
5923
5924 // We want an old to new index array and a new to old index array
5925 int32_t *old_to_new_index = (int32_t *)loader_stack_alloc(sizeof(int32_t) * old_count);
5926 int32_t *new_to_old_index = (int32_t *)loader_stack_alloc(sizeof(int32_t) * new_count);
5927 if (NULL == old_to_new_index || NULL == new_to_old_index) {
5928 return VK_ERROR_OUT_OF_HOST_MEMORY;
5929 }
5930
5931 // Initialize both
5932 for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
5933 old_to_new_index[cur_idx] = -1;
5934 }
5935 for (uint32_t cur_idx = 0; cur_idx < new_count; ++cur_idx) {
5936 new_to_old_index[cur_idx] = -1;
5937 }
5938
5939 // Figure out the old->new and new->old indices
5940 for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
5941 for (uint32_t new_idx = 0; new_idx < phys_dev_count; ++new_idx) {
5942 if (inst->phys_devs_tramp[cur_idx]->phys_dev == phys_devs[new_idx]) {
5943 old_to_new_index[cur_idx] = (int32_t)new_idx;
5944 new_to_old_index[new_idx] = (int32_t)cur_idx;
5945 found_count++;
5946 break;
5947 }
5948 }
5949 }
5950
5951 // If we found exactly the number of items we were looking for as we had before. Then everything
5952 // we already have is good enough and we just need to update the array that was passed in with
5953 // the loader values.
5954 if (found_count == phys_dev_count && 0 != old_count && old_count == new_count) {
5955 for (uint32_t new_idx = 0; new_idx < phys_dev_count; ++new_idx) {
5956 for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
5957 if (old_to_new_index[cur_idx] == (int32_t)new_idx) {
5958 phys_devs[new_idx] = (VkPhysicalDevice)inst->phys_devs_tramp[cur_idx];
5959 break;
5960 }
5961 }
5962 }
5963 // Nothing else to do for this path
5964 res = VK_SUCCESS;
5965 } else {
5966 // Something is different, so do the full path of checking every device and creating a new array to use.
5967 // This can happen if a device was added, or removed, or we hadn't previously queried all the data and we
5968 // have more to store.
5969 new_phys_devs = loader_instance_heap_calloc(inst, sizeof(struct loader_physical_device_tramp *) * new_count,
5970 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
5971 if (NULL == new_phys_devs) {
5972 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5973 "setup_loader_tramp_phys_devs: Failed to allocate new physical device array of size %d", new_count);
5974 res = VK_ERROR_OUT_OF_HOST_MEMORY;
5975 goto out;
5976 }
5977
5978 if (new_count > phys_dev_count) {
5979 found_count = phys_dev_count;
5980 } else {
5981 found_count = new_count;
5982 }
5983
5984 // First try to see if an old item exists that matches the new item. If so, just copy it over.
5985 for (uint32_t new_idx = 0; new_idx < found_count; ++new_idx) {
5986 bool old_item_found = false;
5987 for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
5988 if (old_to_new_index[cur_idx] == (int32_t)new_idx) {
5989 // Copy over old item to correct spot in the new array
5990 new_phys_devs[new_idx] = inst->phys_devs_tramp[cur_idx];
5991 old_item_found = true;
5992 break;
5993 }
5994 }
5995 // Something wasn't found, so it's new so add it to the new list
5996 if (!old_item_found) {
5997 new_phys_devs[new_idx] = loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_tramp),
5998 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
5999 if (NULL == new_phys_devs[new_idx]) {
6000 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6001 "setup_loader_tramp_phys_devs: Failed to allocate new trampoline physical device");
6002 res = VK_ERROR_OUT_OF_HOST_MEMORY;
6003 goto out;
6004 }
6005
6006 // Initialize the new physicalDevice object
6007 loader_set_dispatch((void *)new_phys_devs[new_idx], inst->disp);
6008 new_phys_devs[new_idx]->this_instance = inst;
6009 new_phys_devs[new_idx]->phys_dev = phys_devs[new_idx];
6010 new_phys_devs[new_idx]->magic = PHYS_TRAMP_MAGIC_NUMBER;
6011 }
6012
6013 phys_devs[new_idx] = (VkPhysicalDevice)new_phys_devs[new_idx];
6014 }
6015
6016 // We usually get here if the user array is smaller than the total number of devices, so copy the
6017 // remaining devices we have over to the new array.
6018 uint32_t start = found_count;
6019 for (uint32_t new_idx = start; new_idx < new_count; ++new_idx) {
6020 for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
6021 if (old_to_new_index[cur_idx] == -1) {
6022 new_phys_devs[new_idx] = inst->phys_devs_tramp[cur_idx];
6023 old_to_new_index[cur_idx] = new_idx;
6024 found_count++;
6025 break;
6026 }
6027 }
6028 }
6029 }
6030
6031 out:
6032
6033 if (NULL != new_phys_devs) {
6034 if (VK_SUCCESS != res) {
6035 for (uint32_t new_idx = 0; new_idx < found_count; ++new_idx) {
6036 // If an OOM occurred inside the copying of the new physical devices into the existing array
6037 // will leave some of the old physical devices in the array which may have been copied into
6038 // the new array, leading to them being freed twice. To avoid this we just make sure to not
6039 // delete physical devices which were copied.
6040 bool found = false;
6041 for (uint32_t cur_idx = 0; cur_idx < inst->phys_dev_count_tramp; cur_idx++) {
6042 if (new_phys_devs[new_idx] == inst->phys_devs_tramp[cur_idx]) {
6043 found = true;
6044 break;
6045 }
6046 }
6047 if (!found) {
6048 loader_instance_heap_free(inst, new_phys_devs[new_idx]);
6049 }
6050 }
6051 loader_instance_heap_free(inst, new_phys_devs);
6052 } else {
6053 if (new_count > inst->total_gpu_count) {
6054 inst->total_gpu_count = new_count;
6055 }
6056 // Free everything in the old array that was not copied into the new array
6057 // here. We can't attempt to do that before here since the previous loop
6058 // looking before the "out:" label may hit an out of memory condition resulting
6059 // in memory leaking.
6060 if (NULL != inst->phys_devs_tramp) {
6061 for (uint32_t i = 0; i < inst->phys_dev_count_tramp; i++) {
6062 bool found = false;
6063 for (uint32_t j = 0; j < inst->total_gpu_count; j++) {
6064 if (inst->phys_devs_tramp[i] == new_phys_devs[j]) {
6065 found = true;
6066 break;
6067 }
6068 }
6069 if (!found) {
6070 loader_instance_heap_free(inst, inst->phys_devs_tramp[i]);
6071 }
6072 }
6073 loader_instance_heap_free(inst, inst->phys_devs_tramp);
6074 }
6075 inst->phys_devs_tramp = new_phys_devs;
6076 inst->phys_dev_count_tramp = found_count;
6077 }
6078 }
6079 if (VK_SUCCESS != res) {
6080 inst->total_gpu_count = 0;
6081 }
6082
6083 return res;
6084 }
6085
6086 #if defined(LOADER_ENABLE_LINUX_SORT)
6087 bool is_linux_sort_enabled(struct loader_instance *inst) {
6088 bool sort_items = inst->supports_get_dev_prop_2;
6089 char *env_value = loader_getenv("VK_LOADER_DISABLE_SELECT", inst);
6090 if (NULL != env_value) {
6091 int32_t int_env_val = atoi(env_value);
6092 loader_free_getenv(env_value, inst);
6093 if (int_env_val != 0) {
6094 sort_items = false;
6095 }
6096 }
6097 return sort_items;
6098 }
6099 #endif // LOADER_ENABLE_LINUX_SORT
6100
6101 // Look for physical_device in the provided phys_devs list, return true if found and put the index into out_idx, otherwise
6102 // return false
6103 bool find_phys_dev(VkPhysicalDevice physical_device, uint32_t phys_devs_count, struct loader_physical_device_term **phys_devs,
6104 uint32_t *out_idx) {
6105 if (NULL == phys_devs) return false;
6106 for (uint32_t idx = 0; idx < phys_devs_count; idx++) {
6107 if (NULL != phys_devs[idx] && physical_device == phys_devs[idx]->phys_dev) {
6108 *out_idx = idx;
6109 return true;
6110 }
6111 }
6112 return false;
6113 }
6114
6115 // Add physical_device to new_phys_devs
6116 VkResult check_and_add_to_new_phys_devs(struct loader_instance *inst, VkPhysicalDevice physical_device,
6117 struct loader_icd_physical_devices *dev_array, uint32_t *cur_new_phys_dev_count,
6118 struct loader_physical_device_term **new_phys_devs) {
6119 uint32_t out_idx = 0;
6120 uint32_t idx = *cur_new_phys_dev_count;
6121 // Check if the physical_device already exists in the new_phys_devs buffer, that means it was found from both
6122 // EnumerateAdapterPhysicalDevices and EnumeratePhysicalDevices and we need to skip it.
6123 if (find_phys_dev(physical_device, idx, new_phys_devs, &out_idx)) {
6124 return VK_SUCCESS;
6125 }
6126 // Check if it was found in a previous call to vkEnumeratePhysicalDevices, we can just copy over the old data.
6127 if (find_phys_dev(physical_device, inst->phys_dev_count_term, inst->phys_devs_term, &out_idx)) {
6128 new_phys_devs[idx] = inst->phys_devs_term[out_idx];
6129 (*cur_new_phys_dev_count)++;
6130 return VK_SUCCESS;
6131 }
6132
6133 // Exit in case something is already present - this shouldn't happen but better to be safe than overwrite existing data
6134 // since this code has been refactored a half dozen times.
6135 if (NULL != new_phys_devs[idx]) {
6136 return VK_SUCCESS;
6137 }
6138 // If this physical device is new, we need to allocate space for it.
6139 new_phys_devs[idx] =
6140 loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6141 if (NULL == new_phys_devs[idx]) {
6142 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6143 "check_and_add_to_new_phys_devs: Failed to allocate physical device terminator object %d", idx);
6144 return VK_ERROR_OUT_OF_HOST_MEMORY;
6145 }
6146
6147 loader_set_dispatch((void *)new_phys_devs[idx], inst->disp);
6148 new_phys_devs[idx]->this_icd_term = dev_array->icd_term;
6149 new_phys_devs[idx]->icd_index = (uint8_t)(dev_array->icd_index);
6150 new_phys_devs[idx]->phys_dev = physical_device;
6151
6152 // Increment the count of new physical devices
6153 (*cur_new_phys_dev_count)++;
6154 return VK_SUCCESS;
6155 }
6156
6157 /* Enumerate all physical devices from ICDs and add them to inst->phys_devs_term
6158 *
6159 * There are two methods to find VkPhysicalDevices - vkEnumeratePhysicalDevices and vkEnumerateAdapterPhysicalDevices
6160 * The latter is supported on windows only and on devices supporting ICD Interface Version 6 and greater.
6161 *
6162 * Once all physical devices are acquired, they need to be pulled into a single list of `loader_physical_device_term`'s.
6163 * They also need to be setup - the icd_term, icd_index, phys_dev, and disp (dispatch table) all need the correct data.
6164 * Additionally, we need to keep using already setup physical devices as they may be in use, thus anything enumerated
6165 * that is already in inst->phys_devs_term will be carried over.
6166 */
6167
6168 VkResult setup_loader_term_phys_devs(struct loader_instance *inst) {
6169 VkResult res = VK_SUCCESS;
6170 struct loader_icd_term *icd_term;
6171 uint32_t icd_idx = 0;
6172 uint32_t windows_sorted_devices_count = 0;
6173 struct loader_icd_physical_devices *windows_sorted_devices_array = NULL;
6174 uint32_t icd_count = 0;
6175 struct loader_icd_physical_devices *icd_phys_dev_array = NULL;
6176 uint32_t new_phys_devs_capacity = 0;
6177 uint32_t new_phys_devs_count = 0;
6178 struct loader_physical_device_term **new_phys_devs = NULL;
6179
6180 #if defined(_WIN32)
6181 // Get the physical devices supported by platform sorting mechanism into a separate list
6182 res = windows_read_sorted_physical_devices(inst, &windows_sorted_devices_count, &windows_sorted_devices_array);
6183 if (VK_SUCCESS != res) {
6184 goto out;
6185 }
6186 #endif
6187
6188 icd_count = inst->total_icd_count;
6189
6190 // Allocate something to store the physical device characteristics that we read from each ICD.
6191 icd_phys_dev_array =
6192 (struct loader_icd_physical_devices *)loader_stack_alloc(sizeof(struct loader_icd_physical_devices) * icd_count);
6193 if (NULL == icd_phys_dev_array) {
6194 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6195 "setup_loader_term_phys_devs: Failed to allocate temporary ICD Physical device info array of size %d",
6196 icd_count);
6197 res = VK_ERROR_OUT_OF_HOST_MEMORY;
6198 goto out;
6199 }
6200 memset(icd_phys_dev_array, 0, sizeof(struct loader_icd_physical_devices) * icd_count);
6201
6202 // For each ICD, query the number of physical devices, and then get an
6203 // internal value for those physical devices.
6204 icd_term = inst->icd_terms;
6205 while (NULL != icd_term) {
6206 res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &icd_phys_dev_array[icd_idx].device_count, NULL);
6207 if (VK_SUCCESS != res) {
6208 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6209 "setup_loader_term_phys_devs: Call to ICD %d's \'vkEnumeratePhysicalDevices\' failed with error 0x%08x",
6210 icd_idx, res);
6211 goto out;
6212 }
6213
6214 icd_phys_dev_array[icd_idx].physical_devices =
6215 (VkPhysicalDevice *)loader_stack_alloc(icd_phys_dev_array[icd_idx].device_count * sizeof(VkPhysicalDevice));
6216 if (NULL == icd_phys_dev_array[icd_idx].physical_devices) {
6217 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6218 "setup_loader_term_phys_devs: Failed to allocate temporary ICD Physical device array for ICD %d of size %d",
6219 icd_idx, icd_phys_dev_array[icd_idx].device_count);
6220 res = VK_ERROR_OUT_OF_HOST_MEMORY;
6221 goto out;
6222 }
6223
6224 res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &(icd_phys_dev_array[icd_idx].device_count),
6225 icd_phys_dev_array[icd_idx].physical_devices);
6226 if (VK_SUCCESS != res) {
6227 goto out;
6228 }
6229 icd_phys_dev_array[icd_idx].icd_term = icd_term;
6230 icd_phys_dev_array[icd_idx].icd_index = icd_idx;
6231 icd_term = icd_term->next;
6232 ++icd_idx;
6233 }
6234
6235 // Add up both the windows sorted and non windows found physical device counts
6236 for (uint32_t i = 0; i < windows_sorted_devices_count; ++i) {
6237 new_phys_devs_capacity += windows_sorted_devices_array[i].device_count;
6238 }
6239 for (uint32_t i = 0; i < icd_count; ++i) {
6240 new_phys_devs_capacity += icd_phys_dev_array[i].device_count;
6241 }
6242
6243 // Bail out if there are no physical devices reported
6244 if (0 == new_phys_devs_capacity) {
6245 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6246 "setup_loader_term_phys_devs: Failed to detect any valid GPUs in the current config");
6247 res = VK_ERROR_INITIALIZATION_FAILED;
6248 goto out;
6249 }
6250
6251 // Create an allocation large enough to hold both the windows sorting enumeration and non-windows physical device
6252 // enumeration
6253 new_phys_devs = loader_instance_heap_calloc(inst, sizeof(struct loader_physical_device_term *) * new_phys_devs_capacity,
6254 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6255 if (NULL == new_phys_devs) {
6256 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6257 "setup_loader_term_phys_devs: Failed to allocate new physical device array of size %d", new_phys_devs_capacity);
6258 res = VK_ERROR_OUT_OF_HOST_MEMORY;
6259 goto out;
6260 }
6261
6262 // Copy over everything found through sorted enumeration
6263 for (uint32_t i = 0; i < windows_sorted_devices_count; ++i) {
6264 for (uint32_t j = 0; j < windows_sorted_devices_array[i].device_count; ++j) {
6265 res = check_and_add_to_new_phys_devs(inst, windows_sorted_devices_array[i].physical_devices[j],
6266 &windows_sorted_devices_array[i], &new_phys_devs_count, new_phys_devs);
6267 if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
6268 goto out;
6269 }
6270 }
6271 }
6272
6273 // Now go through the rest of the physical devices and add them to new_phys_devs
6274 #if defined(LOADER_ENABLE_LINUX_SORT)
6275
6276 if (is_linux_sort_enabled(inst)) {
6277 for (uint32_t dev = new_phys_devs_count; dev < new_phys_devs_capacity; ++dev) {
6278 new_phys_devs[dev] =
6279 loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6280 if (NULL == new_phys_devs[dev]) {
6281 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6282 "setup_loader_term_phys_devs: Failed to allocate physical device terminator object %d", dev);
6283 res = VK_ERROR_OUT_OF_HOST_MEMORY;
6284 goto out;
6285 }
6286 }
6287
6288 // Get the physical devices supported by platform sorting mechanism into a separate list
6289 // Pass in a sublist to the function so it only operates on the correct elements. This means passing in a pointer to the
6290 // current next element in new_phys_devs and passing in a `count` of currently unwritten elements
6291 res = linux_read_sorted_physical_devices(inst, icd_count, icd_phys_dev_array, new_phys_devs_capacity - new_phys_devs_count,
6292 &new_phys_devs[new_phys_devs_count]);
6293 if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
6294 goto out;
6295 }
6296 // Keep previously allocated physical device info since apps may already be using that!
6297 for (uint32_t new_idx = new_phys_devs_count; new_idx < new_phys_devs_capacity; new_idx++) {
6298 for (uint32_t old_idx = 0; old_idx < inst->phys_dev_count_term; old_idx++) {
6299 if (new_phys_devs[new_idx]->phys_dev == inst->phys_devs_term[old_idx]->phys_dev) {
6300 loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
6301 "Copying old device %u into new device %u", old_idx, new_idx);
6302 // Free the old new_phys_devs info since we're not using it before we assign the new info
6303 loader_instance_heap_free(inst, new_phys_devs[new_idx]);
6304 new_phys_devs[new_idx] = inst->phys_devs_term[old_idx];
6305 break;
6306 }
6307 }
6308 }
6309 // now set the count to the capacity, as now the list is filled in
6310 new_phys_devs_count = new_phys_devs_capacity;
6311 // We want the following code to run if either linux sorting is disabled at compile time or runtime
6312 } else {
6313 #endif // LOADER_ENABLE_LINUX_SORT
6314
6315 // Copy over everything found through the non-sorted means.
6316 for (uint32_t i = 0; i < icd_count; ++i) {
6317 for (uint32_t j = 0; j < icd_phys_dev_array[i].device_count; ++j) {
6318 res = check_and_add_to_new_phys_devs(inst, icd_phys_dev_array[i].physical_devices[j], &icd_phys_dev_array[i],
6319 &new_phys_devs_count, new_phys_devs);
6320 if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
6321 goto out;
6322 }
6323 }
6324 }
6325 #if defined(LOADER_ENABLE_LINUX_SORT)
6326 }
6327 #endif // LOADER_ENABLE_LINUX_SORT
6328 out:
6329
6330 if (VK_SUCCESS != res) {
6331 if (NULL != new_phys_devs) {
6332 // We've encountered an error, so we should free the new buffers.
6333 for (uint32_t i = 0; i < new_phys_devs_capacity; i++) {
6334 // May not have allocated this far, skip it if we hadn't.
6335 if (new_phys_devs[i] == NULL) continue;
6336
6337 // If an OOM occurred inside the copying of the new physical devices into the existing array
6338 // will leave some of the old physical devices in the array which may have been copied into
6339 // the new array, leading to them being freed twice. To avoid this we just make sure to not
6340 // delete physical devices which were copied.
6341 bool found = false;
6342 if (NULL != inst->phys_devs_term) {
6343 for (uint32_t old_idx = 0; old_idx < inst->phys_dev_count_term; old_idx++) {
6344 if (new_phys_devs[i] == inst->phys_devs_term[old_idx]) {
6345 found = true;
6346 break;
6347 }
6348 }
6349 }
6350 if (!found) {
6351 loader_instance_heap_free(inst, new_phys_devs[i]);
6352 }
6353 }
6354 loader_instance_heap_free(inst, new_phys_devs);
6355 }
6356 inst->total_gpu_count = 0;
6357 } else {
6358 if (NULL != inst->phys_devs_term) {
6359 // Free everything in the old array that was not copied into the new array
6360 // here. We can't attempt to do that before here since the previous loop
6361 // looking before the "out:" label may hit an out of memory condition resulting
6362 // in memory leaking.
6363 for (uint32_t i = 0; i < inst->phys_dev_count_term; i++) {
6364 bool found = false;
6365 for (uint32_t j = 0; j < new_phys_devs_count; j++) {
6366 if (new_phys_devs != NULL && inst->phys_devs_term[i] == new_phys_devs[j]) {
6367 found = true;
6368 break;
6369 }
6370 }
6371 if (!found) {
6372 loader_instance_heap_free(inst, inst->phys_devs_term[i]);
6373 }
6374 }
6375 loader_instance_heap_free(inst, inst->phys_devs_term);
6376 }
6377
6378 // Swap out old and new devices list
6379 inst->phys_dev_count_term = new_phys_devs_count;
6380 inst->phys_devs_term = new_phys_devs;
6381 inst->total_gpu_count = new_phys_devs_count;
6382 }
6383
6384 if (windows_sorted_devices_array != NULL) {
6385 for (uint32_t i = 0; i < windows_sorted_devices_count; ++i) {
6386 if (windows_sorted_devices_array[i].device_count > 0 && windows_sorted_devices_array[i].physical_devices != NULL) {
6387 loader_instance_heap_free(inst, windows_sorted_devices_array[i].physical_devices);
6388 }
6389 }
6390 loader_instance_heap_free(inst, windows_sorted_devices_array);
6391 }
6392
6393 return res;
6394 }
6395
6396 VkResult setup_loader_tramp_phys_dev_groups(struct loader_instance *inst, uint32_t group_count,
6397 VkPhysicalDeviceGroupProperties *groups) {
6398 VkResult res = VK_SUCCESS;
6399 uint32_t cur_idx;
6400 uint32_t dev_idx;
6401
6402 if (0 == group_count) {
6403 return VK_SUCCESS;
6404 }
6405
6406 // Generate a list of all the devices and convert them to the loader ID
6407 uint32_t phys_dev_count = 0;
6408 for (cur_idx = 0; cur_idx < group_count; ++cur_idx) {
6409 phys_dev_count += groups[cur_idx].physicalDeviceCount;
6410 }
6411 VkPhysicalDevice *devices = (VkPhysicalDevice *)loader_stack_alloc(sizeof(VkPhysicalDevice) * phys_dev_count);
6412 if (NULL == devices) {
6413 return VK_ERROR_OUT_OF_HOST_MEMORY;
6414 }
6415
6416 uint32_t cur_device = 0;
6417 for (cur_idx = 0; cur_idx < group_count; ++cur_idx) {
6418 for (dev_idx = 0; dev_idx < groups[cur_idx].physicalDeviceCount; ++dev_idx) {
6419 devices[cur_device++] = groups[cur_idx].physicalDevices[dev_idx];
6420 }
6421 }
6422
6423 // Update the devices based on the loader physical device values.
6424 res = setup_loader_tramp_phys_devs(inst, phys_dev_count, devices);
6425 if (VK_SUCCESS != res) {
6426 return res;
6427 }
6428
6429 // Update the devices in the group structures now
6430 cur_device = 0;
6431 for (cur_idx = 0; cur_idx < group_count; ++cur_idx) {
6432 for (dev_idx = 0; dev_idx < groups[cur_idx].physicalDeviceCount; ++dev_idx) {
6433 groups[cur_idx].physicalDevices[dev_idx] = devices[cur_device++];
6434 }
6435 }
6436
6437 return res;
6438 }
6439
6440 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
6441 VkPhysicalDevice *pPhysicalDevices) {
6442 struct loader_instance *inst = (struct loader_instance *)instance;
6443 VkResult res = VK_SUCCESS;
6444
6445 // Always call the setup loader terminator physical devices because they may
6446 // have changed at any point.
6447 res = setup_loader_term_phys_devs(inst);
6448 if (VK_SUCCESS != res) {
6449 goto out;
6450 }
6451
6452 uint32_t copy_count = inst->phys_dev_count_term;
6453 if (NULL != pPhysicalDevices) {
6454 if (copy_count > *pPhysicalDeviceCount) {
6455 copy_count = *pPhysicalDeviceCount;
6456 loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
6457 "terminator_EnumeratePhysicalDevices : Trimming device count from %d to %d.", inst->phys_dev_count_term,
6458 copy_count);
6459 res = VK_INCOMPLETE;
6460 }
6461
6462 for (uint32_t i = 0; i < copy_count; i++) {
6463 pPhysicalDevices[i] = (VkPhysicalDevice)inst->phys_devs_term[i];
6464 }
6465 }
6466
6467 *pPhysicalDeviceCount = copy_count;
6468
6469 out:
6470
6471 return res;
6472 }
6473
6474 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
6475 const char *pLayerName, uint32_t *pPropertyCount,
6476 VkExtensionProperties *pProperties) {
6477 if (NULL == pPropertyCount) {
6478 return VK_INCOMPLETE;
6479 }
6480
6481 struct loader_physical_device_term *phys_dev_term;
6482
6483 // Any layer or trampoline wrapping should be removed at this point in time can just cast to the expected
6484 // type for VkPhysicalDevice.
6485 phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
6486
6487 // if we got here with a non-empty pLayerName, look up the extensions
6488 // from the json
6489 if (pLayerName != NULL && strlen(pLayerName) > 0) {
6490 uint32_t count;
6491 uint32_t copy_size;
6492 const struct loader_instance *inst = phys_dev_term->this_icd_term->this_instance;
6493 struct loader_device_extension_list *dev_ext_list = NULL;
6494 struct loader_device_extension_list local_ext_list;
6495 memset(&local_ext_list, 0, sizeof(local_ext_list));
6496 if (vk_string_validate(MaxLoaderStringLength, pLayerName) == VK_STRING_ERROR_NONE) {
6497 for (uint32_t i = 0; i < inst->instance_layer_list.count; i++) {
6498 struct loader_layer_properties *props = &inst->instance_layer_list.list[i];
6499 if (strcmp(props->info.layerName, pLayerName) == 0) {
6500 dev_ext_list = &props->device_extension_list;
6501 }
6502 }
6503
6504 count = (dev_ext_list == NULL) ? 0 : dev_ext_list->count;
6505 if (pProperties == NULL) {
6506 *pPropertyCount = count;
6507 loader_destroy_generic_list(inst, (struct loader_generic_list *)&local_ext_list);
6508 return VK_SUCCESS;
6509 }
6510
6511 copy_size = *pPropertyCount < count ? *pPropertyCount : count;
6512 for (uint32_t i = 0; i < copy_size; i++) {
6513 memcpy(&pProperties[i], &dev_ext_list->list[i].props, sizeof(VkExtensionProperties));
6514 }
6515 *pPropertyCount = copy_size;
6516
6517 loader_destroy_generic_list(inst, (struct loader_generic_list *)&local_ext_list);
6518 if (copy_size < count) {
6519 return VK_INCOMPLETE;
6520 }
6521 } else {
6522 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6523 "vkEnumerateDeviceExtensionProperties: pLayerName is too long or is badly formed");
6524 return VK_ERROR_EXTENSION_NOT_PRESENT;
6525 }
6526
6527 return VK_SUCCESS;
6528 }
6529
6530 // user is querying driver extensions and has supplied their own storage - just fill it out
6531 else if (pProperties) {
6532 struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
6533 uint32_t written_count = *pPropertyCount;
6534 VkResult res =
6535 icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &written_count, pProperties);
6536 if (res != VK_SUCCESS) {
6537 return res;
6538 }
6539
6540 // Iterate over active layers, if they are an implicit layer, add their device extensions
6541 // After calling into the driver, written_count contains the amount of device extensions written. We can therefore write
6542 // layer extensions starting at that point in pProperties
6543 for (uint32_t i = 0; i < icd_term->this_instance->expanded_activated_layer_list.count; i++) {
6544 struct loader_layer_properties *layer_props = icd_term->this_instance->expanded_activated_layer_list.list[i];
6545 if (0 == (layer_props->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
6546 struct loader_device_extension_list *layer_ext_list = &layer_props->device_extension_list;
6547 for (uint32_t j = 0; j < layer_ext_list->count; j++) {
6548 struct loader_dev_ext_props *cur_ext_props = &layer_ext_list->list[j];
6549 // look for duplicates
6550 if (has_vk_extension_property_array(&cur_ext_props->props, written_count, pProperties)) {
6551 continue;
6552 }
6553
6554 if (*pPropertyCount <= written_count) {
6555 return VK_INCOMPLETE;
6556 }
6557
6558 memcpy(&pProperties[written_count], &cur_ext_props->props, sizeof(VkExtensionProperties));
6559 written_count++;
6560 }
6561 }
6562 }
6563 // Make sure we update the pPropertyCount with the how many were written
6564 *pPropertyCount = written_count;
6565 return res;
6566 }
6567 // Use `goto out;` for rest of this function
6568
6569 // This case is during the call down the instance chain with pLayerName == NULL and pProperties == NULL
6570 struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
6571 struct loader_extension_list all_exts = {0};
6572 VkResult res;
6573
6574 // We need to find the count without duplicates. This requires querying the driver for the names of the extensions.
6575 res = icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &all_exts.count, NULL);
6576 if (res != VK_SUCCESS) {
6577 goto out;
6578 }
6579 // Then allocate memory to store the physical device extension list + the extensions layers provide
6580 // all_exts.count currently is the number of driver extensions
6581 all_exts.capacity = sizeof(VkExtensionProperties) * (all_exts.count + 20);
6582 all_exts.list = loader_instance_heap_alloc(icd_term->this_instance, all_exts.capacity, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
6583 if (NULL == all_exts.list) {
6584 res = VK_ERROR_OUT_OF_HOST_MEMORY;
6585 goto out;
6586 }
6587
6588 // Get the available device extensions and put them in all_exts.list
6589 res = icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &all_exts.count, all_exts.list);
6590 if (res != VK_SUCCESS) {
6591 goto out;
6592 }
6593
6594 // Iterate over active layers, if they are an implicit layer, add their device extensions to all_exts.list
6595 for (uint32_t i = 0; i < icd_term->this_instance->expanded_activated_layer_list.count; i++) {
6596 struct loader_layer_properties *layer_props = icd_term->this_instance->expanded_activated_layer_list.list[i];
6597 if (0 == (layer_props->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
6598 struct loader_device_extension_list *layer_ext_list = &layer_props->device_extension_list;
6599 for (uint32_t j = 0; j < layer_ext_list->count; j++) {
6600 res = loader_add_to_ext_list(icd_term->this_instance, &all_exts, 1, &layer_ext_list->list[j].props);
6601 if (res != VK_SUCCESS) {
6602 goto out;
6603 }
6604 }
6605 }
6606 }
6607
6608 // Write out the final de-duplicated count to pPropertyCount
6609 *pPropertyCount = all_exts.count;
6610 res = VK_SUCCESS;
6611
6612 out:
6613
6614 loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&all_exts);
6615 return res;
6616 }
6617
6618 VkStringErrorFlags vk_string_validate(const int max_length, const char *utf8) {
6619 VkStringErrorFlags result = VK_STRING_ERROR_NONE;
6620 int num_char_bytes = 0;
6621 int i, j;
6622
6623 if (utf8 == NULL) {
6624 return VK_STRING_ERROR_NULL_PTR;
6625 }
6626
6627 for (i = 0; i <= max_length; i++) {
6628 if (utf8[i] == 0) {
6629 break;
6630 } else if (i == max_length) {
6631 result |= VK_STRING_ERROR_LENGTH;
6632 break;
6633 } else if ((utf8[i] >= 0x20) && (utf8[i] < 0x7f)) {
6634 num_char_bytes = 0;
6635 } else if ((utf8[i] & UTF8_ONE_BYTE_MASK) == UTF8_ONE_BYTE_CODE) {
6636 num_char_bytes = 1;
6637 } else if ((utf8[i] & UTF8_TWO_BYTE_MASK) == UTF8_TWO_BYTE_CODE) {
6638 num_char_bytes = 2;
6639 } else if ((utf8[i] & UTF8_THREE_BYTE_MASK) == UTF8_THREE_BYTE_CODE) {
6640 num_char_bytes = 3;
6641 } else {
6642 result = VK_STRING_ERROR_BAD_DATA;
6643 }
6644
6645 // Validate the following num_char_bytes of data
6646 for (j = 0; (j < num_char_bytes) && (i < max_length); j++) {
6647 if (++i == max_length) {
6648 result |= VK_STRING_ERROR_LENGTH;
6649 break;
6650 }
6651 if ((utf8[i] & UTF8_DATA_BYTE_MASK) != UTF8_DATA_BYTE_CODE) {
6652 result |= VK_STRING_ERROR_BAD_DATA;
6653 }
6654 }
6655 }
6656 return result;
6657 }
6658
6659 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceVersion(const VkEnumerateInstanceVersionChain *chain,
6660 uint32_t *pApiVersion) {
6661 (void)chain;
6662 // NOTE: The Vulkan WG doesn't want us checking pApiVersion for NULL, but instead
6663 // prefers us crashing.
6664 *pApiVersion = VK_HEADER_VERSION_COMPLETE;
6665 return VK_SUCCESS;
6666 }
6667
6668 VKAPI_ATTR VkResult VKAPI_CALL
6669 terminator_EnumerateInstanceExtensionProperties(const VkEnumerateInstanceExtensionPropertiesChain *chain, const char *pLayerName,
6670 uint32_t *pPropertyCount, VkExtensionProperties *pProperties) {
6671 (void)chain;
6672 struct loader_extension_list *global_ext_list = NULL;
6673 struct loader_layer_list instance_layers;
6674 struct loader_extension_list local_ext_list;
6675 struct loader_icd_tramp_list icd_tramp_list;
6676 uint32_t copy_size;
6677 VkResult res = VK_SUCCESS;
6678 struct loader_envvar_all_filters layer_filters = {0};
6679
6680 memset(&local_ext_list, 0, sizeof(local_ext_list));
6681 memset(&instance_layers, 0, sizeof(instance_layers));
6682 memset(&icd_tramp_list, 0, sizeof(icd_tramp_list));
6683
6684 res = parse_layer_environment_var_filters(NULL, &layer_filters);
6685 if (VK_SUCCESS != res) {
6686 goto out;
6687 }
6688
6689 // Get layer libraries if needed
6690 if (pLayerName && strlen(pLayerName) != 0) {
6691 if (vk_string_validate(MaxLoaderStringLength, pLayerName) != VK_STRING_ERROR_NONE) {
6692 assert(VK_FALSE && "vkEnumerateInstanceExtensionProperties: pLayerName is too long or is badly formed");
6693 res = VK_ERROR_EXTENSION_NOT_PRESENT;
6694 goto out;
6695 }
6696
6697 res = loader_scan_for_layers(NULL, &instance_layers, &layer_filters);
6698 if (VK_SUCCESS != res) {
6699 goto out;
6700 }
6701 for (uint32_t i = 0; i < instance_layers.count; i++) {
6702 struct loader_layer_properties *props = &instance_layers.list[i];
6703 if (strcmp(props->info.layerName, pLayerName) == 0) {
6704 global_ext_list = &props->instance_extension_list;
6705 break;
6706 }
6707 }
6708 } else {
6709 // Preload ICD libraries so subsequent calls to EnumerateInstanceExtensionProperties don't have to load them
6710 loader_preload_icds();
6711
6712 // Scan/discover all ICD libraries
6713 res = loader_icd_scan(NULL, &icd_tramp_list, NULL, NULL);
6714 // EnumerateInstanceExtensionProperties can't return anything other than OOM or VK_ERROR_LAYER_NOT_PRESENT
6715 if ((VK_SUCCESS != res && icd_tramp_list.count > 0) || res == VK_ERROR_OUT_OF_HOST_MEMORY) {
6716 goto out;
6717 }
6718 // Get extensions from all ICD's, merge so no duplicates
6719 res = loader_get_icd_loader_instance_extensions(NULL, &icd_tramp_list, &local_ext_list);
6720 if (VK_SUCCESS != res) {
6721 goto out;
6722 }
6723 loader_scanned_icd_clear(NULL, &icd_tramp_list);
6724
6725 // Append enabled implicit layers.
6726 res = loader_scan_for_implicit_layers(NULL, &instance_layers, &layer_filters);
6727 if (VK_SUCCESS != res) {
6728 goto out;
6729 }
6730 for (uint32_t i = 0; i < instance_layers.count; i++) {
6731 struct loader_extension_list *ext_list = &instance_layers.list[i].instance_extension_list;
6732 loader_add_to_ext_list(NULL, &local_ext_list, ext_list->count, ext_list->list);
6733 }
6734
6735 global_ext_list = &local_ext_list;
6736 }
6737
6738 if (global_ext_list == NULL) {
6739 res = VK_ERROR_LAYER_NOT_PRESENT;
6740 goto out;
6741 }
6742
6743 if (pProperties == NULL) {
6744 *pPropertyCount = global_ext_list->count;
6745 goto out;
6746 }
6747
6748 copy_size = *pPropertyCount < global_ext_list->count ? *pPropertyCount : global_ext_list->count;
6749 for (uint32_t i = 0; i < copy_size; i++) {
6750 memcpy(&pProperties[i], &global_ext_list->list[i], sizeof(VkExtensionProperties));
6751 }
6752 *pPropertyCount = copy_size;
6753
6754 if (copy_size < global_ext_list->count) {
6755 res = VK_INCOMPLETE;
6756 goto out;
6757 }
6758
6759 out:
6760 loader_destroy_generic_list(NULL, (struct loader_generic_list *)&icd_tramp_list);
6761 loader_destroy_generic_list(NULL, (struct loader_generic_list *)&local_ext_list);
6762 loader_delete_layer_list_and_properties(NULL, &instance_layers);
6763 return res;
6764 }
6765
6766 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceLayerProperties(const VkEnumerateInstanceLayerPropertiesChain *chain,
6767 uint32_t *pPropertyCount,
6768 VkLayerProperties *pProperties) {
6769 (void)chain;
6770 VkResult result = VK_SUCCESS;
6771 struct loader_layer_list instance_layer_list;
6772 struct loader_envvar_all_filters layer_filters = {0};
6773
6774 LOADER_PLATFORM_THREAD_ONCE(&once_init, loader_initialize);
6775
6776 uint32_t copy_size;
6777
6778 result = parse_layer_environment_var_filters(NULL, &layer_filters);
6779 if (VK_SUCCESS != result) {
6780 goto out;
6781 }
6782
6783 // Get layer libraries
6784 memset(&instance_layer_list, 0, sizeof(instance_layer_list));
6785 result = loader_scan_for_layers(NULL, &instance_layer_list, &layer_filters);
6786 if (VK_SUCCESS != result) {
6787 goto out;
6788 }
6789
6790 uint32_t active_layer_count = 0;
6791 for (uint32_t i = 0; i < instance_layer_list.count; i++) {
6792 if (instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_ON ||
6793 instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_DEFAULT) {
6794 active_layer_count++;
6795 }
6796 }
6797
6798 if (pProperties == NULL) {
6799 *pPropertyCount = active_layer_count;
6800 goto out;
6801 }
6802
6803 copy_size = (*pPropertyCount < active_layer_count) ? *pPropertyCount : active_layer_count;
6804 uint32_t output_properties_index = 0;
6805 for (uint32_t i = 0; i < copy_size; i++) {
6806 if (instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_ON ||
6807 instance_layer_list.list[i].settings_control_value == LOADER_SETTINGS_LAYER_CONTROL_DEFAULT) {
6808 memcpy(&pProperties[output_properties_index], &instance_layer_list.list[i].info, sizeof(VkLayerProperties));
6809 output_properties_index++;
6810 }
6811 }
6812
6813 *pPropertyCount = copy_size;
6814
6815 if (copy_size < instance_layer_list.count) {
6816 result = VK_INCOMPLETE;
6817 goto out;
6818 }
6819
6820 out:
6821
6822 loader_delete_layer_list_and_properties(NULL, &instance_layer_list);
6823 return result;
6824 }
6825
6826 // ---- Vulkan Core 1.1 terminators
6827
6828 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumeratePhysicalDeviceGroups(
6829 VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties) {
6830 struct loader_instance *inst = (struct loader_instance *)instance;
6831
6832 VkResult res = VK_SUCCESS;
6833 struct loader_icd_term *icd_term;
6834 uint32_t total_count = 0;
6835 uint32_t cur_icd_group_count = 0;
6836 VkPhysicalDeviceGroupProperties **new_phys_dev_groups = NULL;
6837 struct loader_physical_device_group_term *local_phys_dev_groups = NULL;
6838 PFN_vkEnumeratePhysicalDeviceGroups fpEnumeratePhysicalDeviceGroups = NULL;
6839 struct loader_icd_physical_devices *sorted_phys_dev_array = NULL;
6840 uint32_t sorted_count = 0;
6841
6842 // For each ICD, query the number of physical device groups, and then get an
6843 // internal value for those physical devices.
6844 icd_term = inst->icd_terms;
6845 for (uint32_t icd_idx = 0; NULL != icd_term; icd_term = icd_term->next, icd_idx++) {
6846 cur_icd_group_count = 0;
6847
6848 // Get the function pointer to use to call into the ICD. This could be the core or KHR version
6849 if (inst->enabled_known_extensions.khr_device_group_creation) {
6850 fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR;
6851 } else {
6852 fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroups;
6853 }
6854
6855 if (NULL == fpEnumeratePhysicalDeviceGroups) {
6856 // Treat each ICD's GPU as it's own group if the extension isn't supported
6857 res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &cur_icd_group_count, NULL);
6858 if (res != VK_SUCCESS) {
6859 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6860 "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of \'EnumeratePhysicalDevices\' "
6861 "to ICD %d to get plain phys dev count.",
6862 icd_idx);
6863 continue;
6864 }
6865 } else {
6866 // Query the actual group info
6867 res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &cur_icd_group_count, NULL);
6868 if (res != VK_SUCCESS) {
6869 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6870 "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of "
6871 "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get count.",
6872 icd_idx);
6873 continue;
6874 }
6875 }
6876 total_count += cur_icd_group_count;
6877 }
6878
6879 // If GPUs not sorted yet, look through them and generate list of all available GPUs
6880 if (0 == total_count || 0 == inst->total_gpu_count) {
6881 res = setup_loader_term_phys_devs(inst);
6882 if (VK_SUCCESS != res) {
6883 goto out;
6884 }
6885 }
6886
6887 if (NULL != pPhysicalDeviceGroupProperties) {
6888 // Create an array for the new physical device groups, which will be stored
6889 // in the instance for the Terminator code.
6890 new_phys_dev_groups = (VkPhysicalDeviceGroupProperties **)loader_instance_heap_calloc(
6891 inst, total_count * sizeof(VkPhysicalDeviceGroupProperties *), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6892 if (NULL == new_phys_dev_groups) {
6893 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6894 "terminator_EnumeratePhysicalDeviceGroups: Failed to allocate new physical device group array of size %d",
6895 total_count);
6896 res = VK_ERROR_OUT_OF_HOST_MEMORY;
6897 goto out;
6898 }
6899
6900 // Create a temporary array (on the stack) to keep track of the
6901 // returned VkPhysicalDevice values.
6902 local_phys_dev_groups = loader_stack_alloc(sizeof(struct loader_physical_device_group_term) * total_count);
6903 // Initialize the memory to something valid
6904 memset(local_phys_dev_groups, 0, sizeof(struct loader_physical_device_group_term) * total_count);
6905
6906 #if defined(_WIN32)
6907 // Get the physical devices supported by platform sorting mechanism into a separate list
6908 res = windows_read_sorted_physical_devices(inst, &sorted_count, &sorted_phys_dev_array);
6909 if (VK_SUCCESS != res) {
6910 goto out;
6911 }
6912 #endif
6913
6914 cur_icd_group_count = 0;
6915 icd_term = inst->icd_terms;
6916 for (uint8_t icd_idx = 0; NULL != icd_term; icd_term = icd_term->next, icd_idx++) {
6917 uint32_t count_this_time = total_count - cur_icd_group_count;
6918
6919 // Get the function pointer to use to call into the ICD. This could be the core or KHR version
6920 if (inst->enabled_known_extensions.khr_device_group_creation) {
6921 fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR;
6922 } else {
6923 fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroups;
6924 }
6925
6926 if (NULL == fpEnumeratePhysicalDeviceGroups) {
6927 icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &count_this_time, NULL);
6928
6929 VkPhysicalDevice *phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * count_this_time);
6930 if (NULL == phys_dev_array) {
6931 loader_log(
6932 inst, VULKAN_LOADER_ERROR_BIT, 0,
6933 "terminator_EnumeratePhysicalDeviceGroups: Failed to allocate local physical device array of size %d",
6934 count_this_time);
6935 res = VK_ERROR_OUT_OF_HOST_MEMORY;
6936 goto out;
6937 }
6938
6939 res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &count_this_time, phys_dev_array);
6940 if (res != VK_SUCCESS) {
6941 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6942 "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of "
6943 "\'EnumeratePhysicalDevices\' to ICD %d to get plain phys dev count.",
6944 icd_idx);
6945 goto out;
6946 }
6947
6948 // Add each GPU as it's own group
6949 for (uint32_t indiv_gpu = 0; indiv_gpu < count_this_time; indiv_gpu++) {
6950 uint32_t cur_index = indiv_gpu + cur_icd_group_count;
6951 local_phys_dev_groups[cur_index].this_icd_term = icd_term;
6952 local_phys_dev_groups[cur_index].icd_index = icd_idx;
6953 local_phys_dev_groups[cur_index].group_props.physicalDeviceCount = 1;
6954 local_phys_dev_groups[cur_index].group_props.physicalDevices[0] = phys_dev_array[indiv_gpu];
6955 }
6956
6957 } else {
6958 res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time, NULL);
6959 if (res != VK_SUCCESS) {
6960 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6961 "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of "
6962 "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get group count.",
6963 icd_idx);
6964 goto out;
6965 }
6966 if (cur_icd_group_count + count_this_time < *pPhysicalDeviceGroupCount) {
6967 // The total amount is still less than the amount of physical device group data passed in
6968 // by the callee. Therefore, we don't have to allocate any temporary structures and we
6969 // can just use the data that was passed in.
6970 res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time,
6971 &pPhysicalDeviceGroupProperties[cur_icd_group_count]);
6972 if (res != VK_SUCCESS) {
6973 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6974 "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of "
6975 "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get group information.",
6976 icd_idx);
6977 goto out;
6978 }
6979 for (uint32_t group = 0; group < count_this_time; ++group) {
6980 uint32_t cur_index = group + cur_icd_group_count;
6981 local_phys_dev_groups[cur_index].group_props = pPhysicalDeviceGroupProperties[cur_index];
6982 local_phys_dev_groups[cur_index].this_icd_term = icd_term;
6983 local_phys_dev_groups[cur_index].icd_index = icd_idx;
6984 }
6985 } else {
6986 // There's not enough space in the callee's allocated pPhysicalDeviceGroupProperties structs,
6987 // so we have to allocate temporary versions to collect all the data. However, we need to make
6988 // sure that at least the ones we do query utilize any pNext data in the callee's version.
6989 VkPhysicalDeviceGroupProperties *tmp_group_props =
6990 loader_stack_alloc(count_this_time * sizeof(VkPhysicalDeviceGroupProperties));
6991 for (uint32_t group = 0; group < count_this_time; group++) {
6992 tmp_group_props[group].sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES;
6993 uint32_t cur_index = group + cur_icd_group_count;
6994 if (*pPhysicalDeviceGroupCount > cur_index) {
6995 tmp_group_props[group].pNext = pPhysicalDeviceGroupProperties[cur_index].pNext;
6996 } else {
6997 tmp_group_props[group].pNext = NULL;
6998 }
6999 tmp_group_props[group].subsetAllocation = false;
7000 }
7001
7002 res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time, tmp_group_props);
7003 if (res != VK_SUCCESS) {
7004 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7005 "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of "
7006 "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get group information for temp data.",
7007 icd_idx);
7008 goto out;
7009 }
7010 for (uint32_t group = 0; group < count_this_time; ++group) {
7011 uint32_t cur_index = group + cur_icd_group_count;
7012 local_phys_dev_groups[cur_index].group_props = tmp_group_props[group];
7013 local_phys_dev_groups[cur_index].this_icd_term = icd_term;
7014 local_phys_dev_groups[cur_index].icd_index = icd_idx;
7015 }
7016 }
7017 if (VK_SUCCESS != res) {
7018 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7019 "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of "
7020 "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get content.",
7021 icd_idx);
7022 goto out;
7023 }
7024 }
7025
7026 cur_icd_group_count += count_this_time;
7027 }
7028
7029 #if defined(LOADER_ENABLE_LINUX_SORT)
7030 if (is_linux_sort_enabled(inst)) {
7031 // Get the physical devices supported by platform sorting mechanism into a separate list
7032 res = linux_sort_physical_device_groups(inst, total_count, local_phys_dev_groups);
7033 }
7034 #elif defined(_WIN32)
7035 // The Windows sorting information is only on physical devices. We need to take that and convert it to the group
7036 // information if it's present.
7037 if (sorted_count > 0) {
7038 res =
7039 windows_sort_physical_device_groups(inst, total_count, local_phys_dev_groups, sorted_count, sorted_phys_dev_array);
7040 }
7041 #endif // LOADER_ENABLE_LINUX_SORT
7042
7043 // Just to be safe, make sure we successfully completed setup_loader_term_phys_devs above
7044 // before attempting to do the following. By verifying that setup_loader_term_phys_devs ran
7045 // first, it guarantees that each physical device will have a loader-specific handle.
7046 if (NULL != inst->phys_devs_term) {
7047 for (uint32_t group = 0; group < total_count; group++) {
7048 for (uint32_t group_gpu = 0; group_gpu < local_phys_dev_groups[group].group_props.physicalDeviceCount;
7049 group_gpu++) {
7050 bool found = false;
7051 for (uint32_t term_gpu = 0; term_gpu < inst->phys_dev_count_term; term_gpu++) {
7052 if (local_phys_dev_groups[group].group_props.physicalDevices[group_gpu] ==
7053 inst->phys_devs_term[term_gpu]->phys_dev) {
7054 local_phys_dev_groups[group].group_props.physicalDevices[group_gpu] =
7055 (VkPhysicalDevice)inst->phys_devs_term[term_gpu];
7056 found = true;
7057 break;
7058 }
7059 }
7060 if (!found) {
7061 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7062 "terminator_EnumeratePhysicalDeviceGroups: Failed to find GPU %d in group %d returned by "
7063 "\'EnumeratePhysicalDeviceGroups\' in list returned by \'EnumeratePhysicalDevices\'",
7064 group_gpu, group);
7065 res = VK_ERROR_INITIALIZATION_FAILED;
7066 goto out;
7067 }
7068 }
7069 }
7070 }
7071
7072 uint32_t idx = 0;
7073
7074 // Copy or create everything to fill the new array of physical device groups
7075 for (uint32_t group = 0; group < total_count; group++) {
7076 // Skip groups which have been included through sorting
7077 if (local_phys_dev_groups[group].group_props.physicalDeviceCount == 0) {
7078 continue;
7079 }
7080
7081 // Find the VkPhysicalDeviceGroupProperties object in local_phys_dev_groups
7082 VkPhysicalDeviceGroupProperties *group_properties = &local_phys_dev_groups[group].group_props;
7083
7084 // Check if this physical device group with the same contents is already in the old buffer
7085 for (uint32_t old_idx = 0; old_idx < inst->phys_dev_group_count_term; old_idx++) {
7086 if (NULL != group_properties && NULL != inst->phys_dev_groups_term[old_idx] &&
7087 group_properties->physicalDeviceCount == inst->phys_dev_groups_term[old_idx]->physicalDeviceCount) {
7088 bool found_all_gpus = true;
7089 for (uint32_t old_gpu = 0; old_gpu < inst->phys_dev_groups_term[old_idx]->physicalDeviceCount; old_gpu++) {
7090 bool found_gpu = false;
7091 for (uint32_t new_gpu = 0; new_gpu < group_properties->physicalDeviceCount; new_gpu++) {
7092 if (group_properties->physicalDevices[new_gpu] ==
7093 inst->phys_dev_groups_term[old_idx]->physicalDevices[old_gpu]) {
7094 found_gpu = true;
7095 break;
7096 }
7097 }
7098
7099 if (!found_gpu) {
7100 found_all_gpus = false;
7101 break;
7102 }
7103 }
7104 if (!found_all_gpus) {
7105 continue;
7106 } else {
7107 new_phys_dev_groups[idx] = inst->phys_dev_groups_term[old_idx];
7108 break;
7109 }
7110 }
7111 }
7112 // If this physical device group isn't in the old buffer, create it
7113 if (group_properties != NULL && NULL == new_phys_dev_groups[idx]) {
7114 new_phys_dev_groups[idx] = (VkPhysicalDeviceGroupProperties *)loader_instance_heap_alloc(
7115 inst, sizeof(VkPhysicalDeviceGroupProperties), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
7116 if (NULL == new_phys_dev_groups[idx]) {
7117 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
7118 "terminator_EnumeratePhysicalDeviceGroups: Failed to allocate physical device group Terminator "
7119 "object %d",
7120 idx);
7121 total_count = idx;
7122 res = VK_ERROR_OUT_OF_HOST_MEMORY;
7123 goto out;
7124 }
7125 memcpy(new_phys_dev_groups[idx], group_properties, sizeof(VkPhysicalDeviceGroupProperties));
7126 }
7127
7128 ++idx;
7129 }
7130 }
7131
7132 out:
7133
7134 if (NULL != pPhysicalDeviceGroupProperties) {
7135 if (VK_SUCCESS != res) {
7136 if (NULL != new_phys_dev_groups) {
7137 // We've encountered an error, so we should free the new buffers.
7138 for (uint32_t i = 0; i < total_count; i++) {
7139 // If an OOM occurred inside the copying of the new physical device groups into the existing array will
7140 // leave some of the old physical device groups in the array which may have been copied into the new array,
7141 // leading to them being freed twice. To avoid this we just make sure to not delete physical device groups
7142 // which were copied.
7143 bool found = false;
7144 if (NULL != inst->phys_devs_term) {
7145 for (uint32_t old_idx = 0; old_idx < inst->phys_dev_group_count_term; old_idx++) {
7146 if (new_phys_dev_groups[i] == inst->phys_dev_groups_term[old_idx]) {
7147 found = true;
7148 break;
7149 }
7150 }
7151 }
7152 if (!found) {
7153 loader_instance_heap_free(inst, new_phys_dev_groups[i]);
7154 }
7155 }
7156 loader_instance_heap_free(inst, new_phys_dev_groups);
7157 }
7158 } else {
7159 if (NULL != inst->phys_dev_groups_term) {
7160 // Free everything in the old array that was not copied into the new array
7161 // here. We can't attempt to do that before here since the previous loop
7162 // looking before the "out:" label may hit an out of memory condition resulting
7163 // in memory leaking.
7164 for (uint32_t i = 0; i < inst->phys_dev_group_count_term; i++) {
7165 bool found = false;
7166 for (uint32_t j = 0; j < total_count; j++) {
7167 if (inst->phys_dev_groups_term[i] == new_phys_dev_groups[j]) {
7168 found = true;
7169 break;
7170 }
7171 }
7172 if (!found) {
7173 loader_instance_heap_free(inst, inst->phys_dev_groups_term[i]);
7174 }
7175 }
7176 loader_instance_heap_free(inst, inst->phys_dev_groups_term);
7177 }
7178
7179 // Swap in the new physical device group list
7180 inst->phys_dev_group_count_term = total_count;
7181 inst->phys_dev_groups_term = new_phys_dev_groups;
7182 }
7183
7184 if (sorted_phys_dev_array != NULL) {
7185 for (uint32_t i = 0; i < sorted_count; ++i) {
7186 if (sorted_phys_dev_array[i].device_count > 0 && sorted_phys_dev_array[i].physical_devices != NULL) {
7187 loader_instance_heap_free(inst, sorted_phys_dev_array[i].physical_devices);
7188 }
7189 }
7190 loader_instance_heap_free(inst, sorted_phys_dev_array);
7191 }
7192
7193 uint32_t copy_count = inst->phys_dev_group_count_term;
7194 if (NULL != pPhysicalDeviceGroupProperties) {
7195 if (copy_count > *pPhysicalDeviceGroupCount) {
7196 copy_count = *pPhysicalDeviceGroupCount;
7197 loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
7198 "terminator_EnumeratePhysicalDeviceGroups : Trimming device count from %d to %d.",
7199 inst->phys_dev_group_count_term, copy_count);
7200 res = VK_INCOMPLETE;
7201 }
7202
7203 for (uint32_t i = 0; i < copy_count; i++) {
7204 memcpy(&pPhysicalDeviceGroupProperties[i], inst->phys_dev_groups_term[i], sizeof(VkPhysicalDeviceGroupProperties));
7205 }
7206 }
7207
7208 *pPhysicalDeviceGroupCount = copy_count;
7209
7210 } else {
7211 *pPhysicalDeviceGroupCount = total_count;
7212 }
7213 return res;
7214 }
7215