1 /*
2 *
3 * Copyright (c) 2014-2022 The Khronos Group Inc.
4 * Copyright (c) 2014-2022 Valve Corporation
5 * Copyright (c) 2014-2022 LunarG, Inc.
6 * Copyright (C) 2015 Google Inc.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19
20 *
21 * Author: Jon Ashburn <jon@lunarg.com>
22 * Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
23 * Author: Mark Young <marky@lunarg.com>
24 * Author: Lenny Komow <lenny@lunarg.com>
25 * Author: Charles Giessen <charles@lunarg.com>
26 *
27 */
28
29 #include "loader.h"
30
31 #include <inttypes.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <stdarg.h>
35 #include <stdbool.h>
36 #include <string.h>
37 #include <stddef.h>
38
39 #if defined(__APPLE__)
40 #include <CoreFoundation/CoreFoundation.h>
41 #include <sys/param.h>
42 #endif
43
44 // Time related functions
45 #include <time.h>
46
47 #include <sys/types.h>
48 #if defined(_WIN32)
49 #include "dirent_on_windows.h"
50 #else // _WIN32
51 #include <dirent.h>
52 #endif // _WIN32
53
54 #include "allocation.h"
55 #include "cJSON.h"
56 #include "debug_utils.h"
57 #include "get_environment.h"
58 #include "gpa_helper.h"
59 #include "log.h"
60 #include "unknown_function_handling.h"
61 #include "vk_loader_platform.h"
62 #include "wsi.h"
63
64 #if defined(WIN32)
65 #include "loader_windows.h"
66 #endif
67 #ifdef LOADER_ENABLE_LINUX_SORT
68 // This header is currently only used when sorting Linux devices, so don't include it otherwise.
69 #include "loader_linux.h"
70 #endif // LOADER_ENABLE_LINUX_SORT
71
72 // Generated file containing all the extension data
73 #include "vk_loader_extensions.c"
74
75 struct loader_struct loader = {0};
76
77 struct activated_layer_info {
78 char *name;
79 char *manifest;
80 char *library;
81 bool is_implicit;
82 char *disable_env;
83 };
84
85 // thread safety lock for accessing global data structures such as "loader"
86 // all entrypoints on the instance chain need to be locked except GPA
87 // additionally CreateDevice and DestroyDevice needs to be locked
88 loader_platform_thread_mutex loader_lock;
89 loader_platform_thread_mutex loader_json_lock;
90 loader_platform_thread_mutex loader_preload_icd_lock;
91 loader_platform_thread_mutex loader_global_instance_list_lock;
92
93 // A list of ICDs that gets initialized when the loader does its global initialization. This list should never be used by anything
94 // other than EnumerateInstanceExtensionProperties(), vkDestroyInstance, and loader_release(). This list does not change
95 // functionality, but the fact that the libraries already been loaded causes any call that needs to load ICD libraries to speed up
96 // significantly. This can have a huge impact when making repeated calls to vkEnumerateInstanceExtensionProperties and
97 // vkCreateInstance.
98 static struct loader_icd_tramp_list scanned_icds;
99
100 LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_init);
101
102 // Creates loader_api_version struct that contains the major and minor fields, setting patch to 0
loader_make_version(uint32_t version)103 loader_api_version loader_make_version(uint32_t version) {
104 loader_api_version out_version;
105 out_version.major = VK_API_VERSION_MAJOR(version);
106 out_version.minor = VK_API_VERSION_MINOR(version);
107 out_version.patch = 0;
108 return out_version;
109 }
110
111 // Creates loader_api_version struct containing the major, minor, and patch fields
loader_make_full_version(uint32_t version)112 loader_api_version loader_make_full_version(uint32_t version) {
113 loader_api_version out_version;
114 out_version.major = VK_API_VERSION_MAJOR(version);
115 out_version.minor = VK_API_VERSION_MINOR(version);
116 out_version.patch = VK_API_VERSION_PATCH(version);
117 return out_version;
118 }
119
loader_combine_version(uint32_t major,uint32_t minor,uint32_t patch)120 loader_api_version loader_combine_version(uint32_t major, uint32_t minor, uint32_t patch) {
121 loader_api_version out_version;
122 out_version.major = (uint16_t)major;
123 out_version.minor = (uint16_t)minor;
124 out_version.patch = (uint16_t)patch;
125 return out_version;
126 }
127
128 // Helper macros for determining if a version is valid or not
loader_check_version_meets_required(loader_api_version required,loader_api_version version)129 bool loader_check_version_meets_required(loader_api_version required, loader_api_version version) {
130 // major version is satisfied
131 return (version.major > required.major) ||
132 // major version is equal, minor version is patch version is gerater to minimum minor
133 (version.major == required.major && version.minor > required.minor) ||
134 // major and minor version are equal, patch version is greater or equal to minimum patch
135 (version.major == required.major && version.minor == required.minor && version.patch >= required.patch);
136 }
137
138 // Wrapper around opendir so that the dirent_on_windows gets the instance it needs
139 // while linux opendir & readdir does not
loader_opendir(const struct loader_instance * instance,const char * name)140 DIR *loader_opendir(const struct loader_instance *instance, const char *name) {
141 #if defined(_WIN32)
142 return opendir(instance ? &instance->alloc_callbacks : NULL, name);
143 #else // _WIN32
144 return opendir(name);
145 #endif // _WIN32
146 }
loader_closedir(const struct loader_instance * instance,DIR * dir)147 int loader_closedir(const struct loader_instance *instance, DIR *dir) {
148 #if defined(_WIN32)
149 return closedir(instance ? &instance->alloc_callbacks : NULL, dir);
150 #else // _WIN32
151 return closedir(dir);
152 #endif // _WIN32
153 }
154
is_json(const char * path,size_t len)155 static bool is_json(const char *path, size_t len) {
156 if (len < 5) {
157 return false;
158 }
159 return !strncmp(path, ".json", 5);
160 }
161
162 // Handle error from to library loading
loader_handle_load_library_error(const struct loader_instance * inst,const char * filename,enum loader_layer_library_status * lib_status)163 void loader_handle_load_library_error(const struct loader_instance *inst, const char *filename,
164 enum loader_layer_library_status *lib_status) {
165 const char *error_message = loader_platform_open_library_error(filename);
166 // If the error is due to incompatible architecture (eg 32 bit vs 64 bit), report it with INFO level
167 // Discussed in Github issue 262 & 644
168 // "wrong ELF class" is a linux error, " with error 193" is a windows error
169 VkFlags err_flag = VULKAN_LOADER_ERROR_BIT;
170 if (strstr(error_message, "wrong ELF class:") != NULL || strstr(error_message, " with error 193") != NULL) {
171 err_flag = VULKAN_LOADER_INFO_BIT;
172 if (NULL != lib_status) {
173 *lib_status = LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE;
174 }
175 } else if (NULL != lib_status) {
176 *lib_status = LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD;
177 }
178 loader_log(inst, err_flag, 0, error_message);
179 }
180
vkSetInstanceDispatch(VkInstance instance,void * object)181 VKAPI_ATTR VkResult VKAPI_CALL vkSetInstanceDispatch(VkInstance instance, void *object) {
182 struct loader_instance *inst = loader_get_instance(instance);
183 if (!inst) {
184 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkSetInstanceDispatch: Can not retrieve Instance dispatch table.");
185 return VK_ERROR_INITIALIZATION_FAILED;
186 }
187 loader_set_dispatch(object, inst->disp);
188 return VK_SUCCESS;
189 }
190
vkSetDeviceDispatch(VkDevice device,void * object)191 VKAPI_ATTR VkResult VKAPI_CALL vkSetDeviceDispatch(VkDevice device, void *object) {
192 struct loader_device *dev;
193 struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, NULL);
194
195 if (NULL == icd_term) {
196 return VK_ERROR_INITIALIZATION_FAILED;
197 }
198 loader_set_dispatch(object, &dev->loader_dispatch);
199 return VK_SUCCESS;
200 }
201
loader_free_layer_properties(const struct loader_instance * inst,struct loader_layer_properties * layer_properties)202 void loader_free_layer_properties(const struct loader_instance *inst, struct loader_layer_properties *layer_properties) {
203 loader_instance_heap_free(inst, layer_properties->component_layer_names);
204 loader_instance_heap_free(inst, layer_properties->override_paths);
205 loader_instance_heap_free(inst, layer_properties->blacklist_layer_names);
206 loader_instance_heap_free(inst, layer_properties->app_key_paths);
207
208 loader_destroy_generic_list(inst, (struct loader_generic_list *)&layer_properties->instance_extension_list);
209
210 if (layer_properties->device_extension_list.capacity > 0 && NULL != layer_properties->device_extension_list.list) {
211 for (uint32_t i = 0; i < layer_properties->device_extension_list.count; i++) {
212 struct loader_dev_ext_props *ext_props = &layer_properties->device_extension_list.list[i];
213 if (ext_props->entrypoint_count > 0) {
214 for (uint32_t j = 0; j < ext_props->entrypoint_count; j++) {
215 loader_instance_heap_free(inst, ext_props->entrypoints[j]);
216 }
217 loader_instance_heap_free(inst, ext_props->entrypoints);
218 }
219 }
220 }
221 loader_destroy_generic_list(inst, (struct loader_generic_list *)&layer_properties->device_extension_list);
222
223 // Make sure to clear out the removed layer, in case new layers are added in the previous location
224 memset(layer_properties, 0, sizeof(struct loader_layer_properties));
225 }
226
227 // Combine path elements, separating each element with the platform-specific
228 // directory separator, and save the combined string to a destination buffer,
229 // not exceeding the given length. Path elements are given as variable args,
230 // with a NULL element terminating the list.
231 //
232 // \returns the total length of the combined string, not including an ASCII
233 // NUL termination character. This length may exceed the available storage:
234 // in this case, the written string will be truncated to avoid a buffer
235 // overrun, and the return value will greater than or equal to the storage
236 // size. A NULL argument may be provided as the destination buffer in order
237 // to determine the required string length without actually writing a string.
loader_platform_combine_path(char * dest,size_t len,...)238 static size_t loader_platform_combine_path(char *dest, size_t len, ...) {
239 size_t required_len = 0;
240 va_list ap;
241 const char *component;
242
243 va_start(ap, len);
244 component = va_arg(ap, const char *);
245 while (component) {
246 if (required_len > 0) {
247 // This path element is not the first non-empty element; prepend
248 // a directory separator if space allows
249 if (dest && required_len + 1 < len) {
250 (void)snprintf(dest + required_len, len - required_len, "%c", DIRECTORY_SYMBOL);
251 }
252 required_len++;
253 }
254
255 if (dest && required_len < len) {
256 strncpy(dest + required_len, component, len - required_len);
257 }
258 required_len += strlen(component);
259 component = va_arg(ap, const char *);
260 }
261
262 va_end(ap);
263
264 // strncpy(3) won't add a NUL terminating byte in the event of truncation.
265 if (dest && required_len >= len) {
266 dest[len - 1] = '\0';
267 }
268
269 return required_len;
270 }
271
272 // Given string of three part form "maj.min.pat" convert to a vulkan version number.
273 // Also can understand four part form "variant.major.minor.patch" if provided.
loader_parse_version_string(char * vers_str)274 static uint32_t loader_parse_version_string(char *vers_str) {
275 uint32_t variant = 0, major = 0, minor = 0, patch = 0;
276 char *vers_tok;
277
278 if (!vers_str) {
279 return 0;
280 }
281
282 vers_tok = strtok(vers_str, ".\"\n\r");
283 if (NULL != vers_tok) {
284 major = (uint16_t)atoi(vers_tok);
285 vers_tok = strtok(NULL, ".\"\n\r");
286 if (NULL != vers_tok) {
287 minor = (uint16_t)atoi(vers_tok);
288 vers_tok = strtok(NULL, ".\"\n\r");
289 if (NULL != vers_tok) {
290 patch = (uint16_t)atoi(vers_tok);
291 vers_tok = strtok(NULL, ".\"\n\r");
292 // check that we are using a 4 part version string
293 if (NULL != vers_tok) {
294 // if we are, move the values over into the correct place
295 variant = major;
296 major = minor;
297 minor = patch;
298 patch = (uint16_t)atoi(vers_tok);
299 }
300 }
301 }
302 }
303
304 return VK_MAKE_API_VERSION(variant, major, minor, patch);
305 }
306
compare_vk_extension_properties(const VkExtensionProperties * op1,const VkExtensionProperties * op2)307 bool compare_vk_extension_properties(const VkExtensionProperties *op1, const VkExtensionProperties *op2) {
308 return strcmp(op1->extensionName, op2->extensionName) == 0 ? true : false;
309 }
310
311 // Search the given ext_array for an extension matching the given vk_ext_prop
has_vk_extension_property_array(const VkExtensionProperties * vk_ext_prop,const uint32_t count,const VkExtensionProperties * ext_array)312 bool has_vk_extension_property_array(const VkExtensionProperties *vk_ext_prop, const uint32_t count,
313 const VkExtensionProperties *ext_array) {
314 for (uint32_t i = 0; i < count; i++) {
315 if (compare_vk_extension_properties(vk_ext_prop, &ext_array[i])) return true;
316 }
317 return false;
318 }
319
320 // Search the given ext_list for an extension matching the given vk_ext_prop
has_vk_extension_property(const VkExtensionProperties * vk_ext_prop,const struct loader_extension_list * ext_list)321 bool has_vk_extension_property(const VkExtensionProperties *vk_ext_prop, const struct loader_extension_list *ext_list) {
322 for (uint32_t i = 0; i < ext_list->count; i++) {
323 if (compare_vk_extension_properties(&ext_list->list[i], vk_ext_prop)) return true;
324 }
325 return false;
326 }
327
328 // Search the given ext_list for a device extension matching the given ext_prop
has_vk_dev_ext_property(const VkExtensionProperties * ext_prop,const struct loader_device_extension_list * ext_list)329 bool has_vk_dev_ext_property(const VkExtensionProperties *ext_prop, const struct loader_device_extension_list *ext_list) {
330 for (uint32_t i = 0; i < ext_list->count; i++) {
331 if (compare_vk_extension_properties(&ext_list->list[i].props, ext_prop)) return true;
332 }
333 return false;
334 }
335
336 // Get the next unused layer property in the list. Init the property to zero.
loader_get_next_layer_property_slot(const struct loader_instance * inst,struct loader_layer_list * layer_list)337 static struct loader_layer_properties *loader_get_next_layer_property_slot(const struct loader_instance *inst,
338 struct loader_layer_list *layer_list) {
339 if (layer_list->capacity == 0) {
340 layer_list->list =
341 loader_instance_heap_calloc(inst, sizeof(struct loader_layer_properties) * 64, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
342 if (layer_list->list == NULL) {
343 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
344 "loader_get_next_layer_property_slot: Out of memory can not add any layer properties to list");
345 return NULL;
346 }
347 layer_list->capacity = sizeof(struct loader_layer_properties) * 64;
348 }
349
350 // Ensure enough room to add an entry
351 if ((layer_list->count + 1) * sizeof(struct loader_layer_properties) > layer_list->capacity) {
352 void *new_ptr = loader_instance_heap_realloc(inst, layer_list->list, layer_list->capacity, layer_list->capacity * 2,
353 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
354 if (NULL == new_ptr) {
355 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_get_next_layer_property_slot: realloc failed for layer list");
356 return NULL;
357 }
358 layer_list->list = new_ptr;
359 memset((uint8_t *)layer_list->list + layer_list->capacity, 0, layer_list->capacity);
360 layer_list->capacity *= 2;
361 }
362
363 layer_list->count++;
364 return &(layer_list->list[layer_list->count - 1]);
365 }
366
367 // Search the given layer list for a layer property matching the given layer name
loader_find_layer_property(const char * name,const struct loader_layer_list * layer_list)368 static struct loader_layer_properties *loader_find_layer_property(const char *name, const struct loader_layer_list *layer_list) {
369 for (uint32_t i = 0; i < layer_list->count; i++) {
370 const VkLayerProperties *item = &layer_list->list[i].info;
371 if (strcmp(name, item->layerName) == 0) return &layer_list->list[i];
372 }
373 return NULL;
374 }
375
376 // Search the given layer list for a layer matching the given layer name
loader_find_layer_name_in_list(const char * name,const struct loader_layer_list * layer_list)377 static bool loader_find_layer_name_in_list(const char *name, const struct loader_layer_list *layer_list) {
378 if (NULL == layer_list) {
379 return false;
380 }
381 if (NULL != loader_find_layer_property(name, layer_list)) {
382 return true;
383 }
384 return false;
385 }
386
387 // Search the given meta-layer's component list for a layer matching the given layer name
loader_find_layer_name_in_meta_layer(const struct loader_instance * inst,const char * layer_name,struct loader_layer_list * layer_list,struct loader_layer_properties * meta_layer_props)388 static bool loader_find_layer_name_in_meta_layer(const struct loader_instance *inst, const char *layer_name,
389 struct loader_layer_list *layer_list,
390 struct loader_layer_properties *meta_layer_props) {
391 for (uint32_t comp_layer = 0; comp_layer < meta_layer_props->num_component_layers; comp_layer++) {
392 if (!strcmp(meta_layer_props->component_layer_names[comp_layer], layer_name)) {
393 return true;
394 }
395 struct loader_layer_properties *comp_layer_props =
396 loader_find_layer_property(meta_layer_props->component_layer_names[comp_layer], layer_list);
397 if (comp_layer_props->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
398 return loader_find_layer_name_in_meta_layer(inst, layer_name, layer_list, comp_layer_props);
399 }
400 }
401 return false;
402 }
403
404 // Search the override layer's blacklist for a layer matching the given layer name
loader_find_layer_name_in_blacklist(const struct loader_instance * inst,const char * layer_name,struct loader_layer_list * layer_list,struct loader_layer_properties * meta_layer_props)405 static bool loader_find_layer_name_in_blacklist(const struct loader_instance *inst, const char *layer_name,
406 struct loader_layer_list *layer_list,
407 struct loader_layer_properties *meta_layer_props) {
408 for (uint32_t black_layer = 0; black_layer < meta_layer_props->num_blacklist_layers; ++black_layer) {
409 if (!strcmp(meta_layer_props->blacklist_layer_names[black_layer], layer_name)) {
410 return true;
411 }
412 }
413 return false;
414 }
415
416 // Remove all layer properties entries from the list
loader_delete_layer_list_and_properties(const struct loader_instance * inst,struct loader_layer_list * layer_list)417 void loader_delete_layer_list_and_properties(const struct loader_instance *inst, struct loader_layer_list *layer_list) {
418 uint32_t i;
419 if (!layer_list) return;
420
421 for (i = 0; i < layer_list->count; i++) {
422 loader_free_layer_properties(inst, &(layer_list->list[i]));
423 }
424 layer_list->count = 0;
425
426 if (layer_list->capacity > 0) {
427 layer_list->capacity = 0;
428 loader_instance_heap_free(inst, layer_list->list);
429 }
430 }
431
loader_remove_layer_in_list(const struct loader_instance * inst,struct loader_layer_list * layer_list,uint32_t layer_to_remove)432 void loader_remove_layer_in_list(const struct loader_instance *inst, struct loader_layer_list *layer_list,
433 uint32_t layer_to_remove) {
434 if (layer_list == NULL || layer_to_remove >= layer_list->count) {
435 return;
436 }
437 loader_free_layer_properties(inst, &(layer_list->list[layer_to_remove]));
438
439 // Remove the current invalid meta-layer from the layer list. Use memmove since we are
440 // overlapping the source and destination addresses.
441 memmove(&layer_list->list[layer_to_remove], &layer_list->list[layer_to_remove + 1],
442 sizeof(struct loader_layer_properties) * (layer_list->count - 1 - layer_to_remove));
443
444 // Decrement the count (because we now have one less) and decrement the loop index since we need to
445 // re-check this index.
446 layer_list->count--;
447 }
448
449 // Remove all layers in the layer list that are blacklisted by the override layer.
450 // NOTE: This should only be called if an override layer is found and not expired.
loader_remove_layers_in_blacklist(const struct loader_instance * inst,struct loader_layer_list * layer_list)451 void loader_remove_layers_in_blacklist(const struct loader_instance *inst, struct loader_layer_list *layer_list) {
452 struct loader_layer_properties *override_prop = loader_find_layer_property(VK_OVERRIDE_LAYER_NAME, layer_list);
453 if (NULL == override_prop) {
454 return;
455 }
456
457 for (int32_t j = 0; j < (int32_t)(layer_list->count); j++) {
458 struct loader_layer_properties cur_layer_prop = layer_list->list[j];
459 const char *cur_layer_name = &cur_layer_prop.info.layerName[0];
460
461 // Skip the override layer itself.
462 if (!strcmp(VK_OVERRIDE_LAYER_NAME, cur_layer_name)) {
463 continue;
464 }
465
466 // If found in the override layer's blacklist, remove it
467 if (loader_find_layer_name_in_blacklist(inst, cur_layer_name, layer_list, override_prop)) {
468 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0,
469 "loader_remove_layers_in_blacklist: Override layer is active and layer %s is in the blacklist inside of it. "
470 "Removing that layer from current layer list.",
471 cur_layer_name);
472 loader_remove_layer_in_list(inst, layer_list, j);
473 j--;
474
475 // Re-do the query for the override layer
476 override_prop = loader_find_layer_property(VK_OVERRIDE_LAYER_NAME, layer_list);
477 }
478 }
479 }
480
481 // Remove all layers in the layer list that are not found inside any implicit meta-layers.
loader_remove_layers_not_in_implicit_meta_layers(const struct loader_instance * inst,struct loader_layer_list * layer_list)482 void loader_remove_layers_not_in_implicit_meta_layers(const struct loader_instance *inst, struct loader_layer_list *layer_list) {
483 int32_t i;
484 int32_t j;
485 int32_t layer_count = (int32_t)(layer_list->count);
486
487 for (i = 0; i < layer_count; i++) {
488 layer_list->list[i].keep = false;
489 }
490
491 for (i = 0; i < layer_count; i++) {
492 struct loader_layer_properties *cur_layer_prop = &layer_list->list[i];
493
494 if (0 == (cur_layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
495 cur_layer_prop->keep = true;
496 continue;
497 }
498 for (j = 0; j < layer_count; j++) {
499 struct loader_layer_properties *layer_to_check = &layer_list->list[j];
500
501 if (i == j) {
502 continue;
503 }
504
505 if (layer_to_check->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
506 // For all layers found in this meta layer, we want to keep them as well.
507 if (loader_find_layer_name_in_meta_layer(inst, cur_layer_prop->info.layerName, layer_list, layer_to_check)) {
508 cur_layer_prop->keep = true;
509 }
510 }
511 }
512 }
513
514 // Remove any layers we don't want to keep (Don't use layer_count here as we need it to be
515 // dynamically updated if we delete a layer property in the list).
516 for (i = 0; i < (int32_t)(layer_list->count); i++) {
517 struct loader_layer_properties *cur_layer_prop = &layer_list->list[i];
518 if (!cur_layer_prop->keep) {
519 loader_log(
520 inst, VULKAN_LOADER_DEBUG_BIT, 0,
521 "loader_remove_layers_not_in_implicit_meta_layers : Implicit meta-layers are active, and layer %s is not list "
522 "inside of any. So removing layer from current layer list.",
523 cur_layer_prop->info.layerName);
524 loader_remove_layer_in_list(inst, layer_list, i);
525 i--;
526 }
527 }
528 }
529
loader_add_instance_extensions(const struct loader_instance * inst,const PFN_vkEnumerateInstanceExtensionProperties fp_get_props,const char * lib_name,struct loader_extension_list * ext_list)530 static VkResult loader_add_instance_extensions(const struct loader_instance *inst,
531 const PFN_vkEnumerateInstanceExtensionProperties fp_get_props, const char *lib_name,
532 struct loader_extension_list *ext_list) {
533 uint32_t i, count = 0;
534 VkExtensionProperties *ext_props;
535 VkResult res = VK_SUCCESS;
536
537 if (!fp_get_props) {
538 // No EnumerateInstanceExtensionProperties defined
539 goto out;
540 }
541
542 res = fp_get_props(NULL, &count, NULL);
543 if (res != VK_SUCCESS) {
544 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
545 "loader_add_instance_extensions: Error getting Instance extension count from %s", lib_name);
546 goto out;
547 }
548
549 if (count == 0) {
550 // No ExtensionProperties to report
551 goto out;
552 }
553
554 ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties));
555 if (NULL == ext_props) {
556 res = VK_ERROR_OUT_OF_HOST_MEMORY;
557 goto out;
558 }
559
560 res = fp_get_props(NULL, &count, ext_props);
561 if (res != VK_SUCCESS) {
562 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_add_instance_extensions: Error getting Instance extensions from %s",
563 lib_name);
564 goto out;
565 }
566
567 for (i = 0; i < count; i++) {
568 bool ext_unsupported = wsi_unsupported_instance_extension(&ext_props[i]);
569 if (!ext_unsupported) {
570 res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]);
571 if (res != VK_SUCCESS) {
572 goto out;
573 }
574 }
575 }
576
577 out:
578 return res;
579 }
580
581 // Initialize ext_list with the physical device extensions.
582 // The extension properties are passed as inputs in count and ext_props.
loader_init_device_extensions(const struct loader_instance * inst,struct loader_physical_device_term * phys_dev_term,uint32_t count,VkExtensionProperties * ext_props,struct loader_extension_list * ext_list)583 static VkResult loader_init_device_extensions(const struct loader_instance *inst, struct loader_physical_device_term *phys_dev_term,
584 uint32_t count, VkExtensionProperties *ext_props,
585 struct loader_extension_list *ext_list) {
586 VkResult res;
587 uint32_t i;
588
589 res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(VkExtensionProperties));
590 if (VK_SUCCESS != res) {
591 return res;
592 }
593
594 for (i = 0; i < count; i++) {
595 res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]);
596 if (res != VK_SUCCESS) return res;
597 }
598
599 return VK_SUCCESS;
600 }
601
loader_add_device_extensions(const struct loader_instance * inst,PFN_vkEnumerateDeviceExtensionProperties fpEnumerateDeviceExtensionProperties,VkPhysicalDevice physical_device,const char * lib_name,struct loader_extension_list * ext_list)602 VkResult loader_add_device_extensions(const struct loader_instance *inst,
603 PFN_vkEnumerateDeviceExtensionProperties fpEnumerateDeviceExtensionProperties,
604 VkPhysicalDevice physical_device, const char *lib_name,
605 struct loader_extension_list *ext_list) {
606 uint32_t i = 0, count = 0;
607 VkResult res = VK_SUCCESS;
608 VkExtensionProperties *ext_props = NULL;
609
610 res = fpEnumerateDeviceExtensionProperties(physical_device, NULL, &count, NULL);
611 if (res != VK_SUCCESS) {
612 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
613 "loader_add_device_extensions: Error getting physical device extension info count from library %s", lib_name);
614 return res;
615 }
616 if (count > 0) {
617 ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties));
618 if (!ext_props) {
619 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
620 "loader_add_device_extensions: Failed to allocate space for device extension properties from library %s.",
621 lib_name);
622 return VK_ERROR_OUT_OF_HOST_MEMORY;
623 }
624 res = fpEnumerateDeviceExtensionProperties(physical_device, NULL, &count, ext_props);
625 if (res != VK_SUCCESS) {
626 return res;
627 }
628 for (i = 0; i < count; i++) {
629 res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]);
630 if (res != VK_SUCCESS) {
631 return res;
632 }
633 }
634 }
635
636 return VK_SUCCESS;
637 }
638
loader_init_generic_list(const struct loader_instance * inst,struct loader_generic_list * list_info,size_t element_size)639 VkResult loader_init_generic_list(const struct loader_instance *inst, struct loader_generic_list *list_info, size_t element_size) {
640 size_t capacity = 32 * element_size;
641 list_info->count = 0;
642 list_info->capacity = 0;
643 list_info->list = loader_instance_heap_calloc(inst, capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
644 if (list_info->list == NULL) {
645 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_init_generic_list: Failed to allocate space for generic list");
646 return VK_ERROR_OUT_OF_HOST_MEMORY;
647 }
648 list_info->capacity = capacity;
649 return VK_SUCCESS;
650 }
651
loader_destroy_generic_list(const struct loader_instance * inst,struct loader_generic_list * list)652 void loader_destroy_generic_list(const struct loader_instance *inst, struct loader_generic_list *list) {
653 loader_instance_heap_free(inst, list->list);
654 list->count = 0;
655 list->capacity = 0;
656 }
657
658 // Append non-duplicate extension properties defined in props to the given ext_list.
659 // Return - Vk_SUCCESS on success
loader_add_to_ext_list(const struct loader_instance * inst,struct loader_extension_list * ext_list,uint32_t prop_list_count,const VkExtensionProperties * props)660 VkResult loader_add_to_ext_list(const struct loader_instance *inst, struct loader_extension_list *ext_list,
661 uint32_t prop_list_count, const VkExtensionProperties *props) {
662 uint32_t i;
663 const VkExtensionProperties *cur_ext;
664
665 if (ext_list->list == NULL || ext_list->capacity == 0) {
666 VkResult res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(VkExtensionProperties));
667 if (VK_SUCCESS != res) {
668 return res;
669 }
670 }
671
672 for (i = 0; i < prop_list_count; i++) {
673 cur_ext = &props[i];
674
675 // look for duplicates
676 if (has_vk_extension_property(cur_ext, ext_list)) {
677 continue;
678 }
679
680 // add to list at end
681 // check for enough capacity
682 if (ext_list->count * sizeof(VkExtensionProperties) >= ext_list->capacity) {
683 void *new_ptr = loader_instance_heap_realloc(inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2,
684 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
685 if (new_ptr == NULL) {
686 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
687 "loader_add_to_ext_list: Failed to reallocate space for extension list");
688 return VK_ERROR_OUT_OF_HOST_MEMORY;
689 }
690 ext_list->list = new_ptr;
691
692 // double capacity
693 ext_list->capacity *= 2;
694 }
695
696 memcpy(&ext_list->list[ext_list->count], cur_ext, sizeof(VkExtensionProperties));
697 ext_list->count++;
698 }
699 return VK_SUCCESS;
700 }
701
702 // Append one extension property defined in props with entrypoints defined in entries to the given
703 // ext_list. Do not append if a duplicate.
704 // Return - Vk_SUCCESS on success
loader_add_to_dev_ext_list(const struct loader_instance * inst,struct loader_device_extension_list * ext_list,const VkExtensionProperties * props,uint32_t entry_count,char ** entrys)705 VkResult loader_add_to_dev_ext_list(const struct loader_instance *inst, struct loader_device_extension_list *ext_list,
706 const VkExtensionProperties *props, uint32_t entry_count, char **entrys) {
707 uint32_t idx;
708 if (ext_list->list == NULL || ext_list->capacity == 0) {
709 VkResult res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(struct loader_dev_ext_props));
710 if (VK_SUCCESS != res) {
711 return res;
712 }
713 }
714
715 // look for duplicates
716 if (has_vk_dev_ext_property(props, ext_list)) {
717 return VK_SUCCESS;
718 }
719
720 idx = ext_list->count;
721 // add to list at end
722 // check for enough capacity
723 if (idx * sizeof(struct loader_dev_ext_props) >= ext_list->capacity) {
724 void *new_ptr = loader_instance_heap_realloc(inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2,
725 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
726
727 if (NULL == new_ptr) {
728 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
729 "loader_add_to_dev_ext_list: Failed to reallocate space for device extension list");
730 return VK_ERROR_OUT_OF_HOST_MEMORY;
731 }
732 ext_list->list = new_ptr;
733
734 // double capacity
735 ext_list->capacity *= 2;
736 }
737
738 memcpy(&ext_list->list[idx].props, props, sizeof(*props));
739 ext_list->list[idx].entrypoint_count = entry_count;
740 if (entry_count == 0) {
741 ext_list->list[idx].entrypoints = NULL;
742 } else {
743 ext_list->list[idx].entrypoints =
744 loader_instance_heap_alloc(inst, sizeof(char *) * entry_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
745 if (ext_list->list[idx].entrypoints == NULL) {
746 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
747 "loader_add_to_dev_ext_list: Failed to allocate space for device extension entrypoint list in list %d", idx);
748 ext_list->list[idx].entrypoint_count = 0;
749 return VK_ERROR_OUT_OF_HOST_MEMORY;
750 }
751 for (uint32_t i = 0; i < entry_count; i++) {
752 ext_list->list[idx].entrypoints[i] =
753 loader_instance_heap_alloc(inst, strlen(entrys[i]) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
754 if (ext_list->list[idx].entrypoints[i] == NULL) {
755 for (uint32_t j = 0; j < i; j++) {
756 loader_instance_heap_free(inst, ext_list->list[idx].entrypoints[j]);
757 }
758 loader_instance_heap_free(inst, ext_list->list[idx].entrypoints);
759 ext_list->list[idx].entrypoint_count = 0;
760 ext_list->list[idx].entrypoints = NULL;
761 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
762 "loader_add_to_dev_ext_list: Failed to allocate space for device extension entrypoint %d name", i);
763 return VK_ERROR_OUT_OF_HOST_MEMORY;
764 }
765 strcpy(ext_list->list[idx].entrypoints[i], entrys[i]);
766 }
767 }
768 ext_list->count++;
769
770 return VK_SUCCESS;
771 }
772
773 // Prototypes needed.
774 bool loader_add_meta_layer(const struct loader_instance *inst, const struct loader_layer_properties *prop,
775 struct loader_layer_list *target_list, struct loader_layer_list *expanded_target_list,
776 const struct loader_layer_list *source_list);
777
778 // Manage lists of VkLayerProperties
loader_init_layer_list(const struct loader_instance * inst,struct loader_layer_list * list)779 static bool loader_init_layer_list(const struct loader_instance *inst, struct loader_layer_list *list) {
780 list->capacity = 32 * sizeof(struct loader_layer_properties);
781 list->list = loader_instance_heap_calloc(inst, list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
782 if (list->list == NULL) {
783 return false;
784 }
785 list->count = 0;
786 return true;
787 }
788
789 // Search the given array of layer names for an entry matching the given VkLayerProperties
loader_names_array_has_layer_property(const VkLayerProperties * vk_layer_prop,uint32_t layer_info_count,struct activated_layer_info * layer_info)790 bool loader_names_array_has_layer_property(const VkLayerProperties *vk_layer_prop, uint32_t layer_info_count,
791 struct activated_layer_info *layer_info) {
792 for (uint32_t i = 0; i < layer_info_count; i++) {
793 if (strcmp(vk_layer_prop->layerName, layer_info[i].name) == 0) {
794 return true;
795 }
796 }
797 return false;
798 }
799
loader_destroy_layer_list(const struct loader_instance * inst,struct loader_device * device,struct loader_layer_list * layer_list)800 void loader_destroy_layer_list(const struct loader_instance *inst, struct loader_device *device,
801 struct loader_layer_list *layer_list) {
802 if (device) {
803 loader_device_heap_free(device, layer_list->list);
804 } else {
805 loader_instance_heap_free(inst, layer_list->list);
806 }
807 layer_list->count = 0;
808 layer_list->capacity = 0;
809 layer_list->list = NULL;
810 }
811
812 // Append layer properties defined in prop_list to the given layer_info list
loader_add_layer_properties_to_list(const struct loader_instance * inst,struct loader_layer_list * list,uint32_t prop_list_count,const struct loader_layer_properties * props)813 VkResult loader_add_layer_properties_to_list(const struct loader_instance *inst, struct loader_layer_list *list,
814 uint32_t prop_list_count, const struct loader_layer_properties *props) {
815 uint32_t i;
816 struct loader_layer_properties *layer;
817
818 if (list->list == NULL || list->capacity == 0) {
819 if (!loader_init_layer_list(inst, list)) {
820 return VK_ERROR_OUT_OF_HOST_MEMORY;
821 }
822 }
823
824 if (list->list == NULL) return VK_SUCCESS;
825
826 for (i = 0; i < prop_list_count; i++) {
827 layer = (struct loader_layer_properties *)&props[i];
828
829 // Check for enough capacity
830 if (((list->count + 1) * sizeof(struct loader_layer_properties)) >= list->capacity) {
831 size_t new_capacity = list->capacity * 2;
832 void *new_ptr =
833 loader_instance_heap_realloc(inst, list->list, list->capacity, new_capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
834 if (NULL == new_ptr) {
835 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
836 "loader_add_layer_properties_to_list: Realloc failed for when attempting to add new layer");
837 return VK_ERROR_OUT_OF_HOST_MEMORY;
838 }
839 list->list = new_ptr;
840 list->capacity = new_capacity;
841 }
842
843 memcpy(&list->list[list->count], layer, sizeof(struct loader_layer_properties));
844 list->count++;
845 }
846
847 return VK_SUCCESS;
848 }
849
850 // Search the given search_list for any layers in the props list. Add these to the
851 // output layer_list.
loader_add_layer_names_to_list(const struct loader_instance * inst,struct loader_layer_list * output_list,struct loader_layer_list * expanded_output_list,uint32_t name_count,const char * const * names,const struct loader_layer_list * source_list)852 static VkResult loader_add_layer_names_to_list(const struct loader_instance *inst, struct loader_layer_list *output_list,
853 struct loader_layer_list *expanded_output_list, uint32_t name_count,
854 const char *const *names, const struct loader_layer_list *source_list) {
855 struct loader_layer_properties *layer_prop;
856 VkResult err = VK_SUCCESS;
857
858 for (uint32_t i = 0; i < name_count; i++) {
859 const char *source_name = names[i];
860 layer_prop = loader_find_layer_property(source_name, source_list);
861 if (NULL == layer_prop) {
862 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
863 "loader_add_layer_names_to_list: Unable to find layer %s", source_name);
864 err = VK_ERROR_LAYER_NOT_PRESENT;
865 continue;
866 }
867
868 // Make sure the layer isn't already in the output_list, skip adding it if it is.
869 if (loader_find_layer_name_in_list(source_name, output_list)) {
870 continue;
871 }
872
873 // If not a meta-layer, simply add it.
874 if (0 == (layer_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
875 loader_add_layer_properties_to_list(inst, output_list, 1, layer_prop);
876 loader_add_layer_properties_to_list(inst, expanded_output_list, 1, layer_prop);
877 } else {
878 loader_add_meta_layer(inst, layer_prop, output_list, expanded_output_list, source_list);
879 }
880 }
881
882 return err;
883 }
884
check_expiration(const struct loader_instance * inst,const struct loader_layer_properties * prop)885 static bool check_expiration(const struct loader_instance *inst, const struct loader_layer_properties *prop) {
886 time_t current = time(NULL);
887 struct tm tm_current = *localtime(¤t);
888
889 struct tm tm_expiration;
890 tm_expiration.tm_sec = 0;
891 tm_expiration.tm_min = prop->expiration.minute;
892 tm_expiration.tm_hour = prop->expiration.hour;
893 tm_expiration.tm_mday = prop->expiration.day;
894 tm_expiration.tm_mon = prop->expiration.month - 1;
895 tm_expiration.tm_year = prop->expiration.year - 1900;
896 tm_expiration.tm_isdst = tm_current.tm_isdst;
897 // wday and yday are ignored by mktime
898 time_t expiration = mktime(&tm_expiration);
899
900 return current < expiration;
901 }
902
903 // Determine if the provided implicit layer should be enabled by querying the appropriate environmental variables.
904 // For an implicit layer, at least a disable environment variable is required.
loader_implicit_layer_is_enabled(const struct loader_instance * inst,const struct loader_layer_properties * prop)905 bool loader_implicit_layer_is_enabled(const struct loader_instance *inst, const struct loader_layer_properties *prop) {
906 bool enable = false;
907 char *env_value = NULL;
908
909 // If no enable_environment variable is specified, this implicit layer is always be enabled by default.
910 if (prop->enable_env_var.name[0] == 0) {
911 enable = true;
912 } else {
913 // Otherwise, only enable this layer if the enable environment variable is defined
914 env_value = loader_getenv(prop->enable_env_var.name, inst);
915 if (env_value && !strcmp(prop->enable_env_var.value, env_value)) {
916 enable = true;
917 }
918 loader_free_getenv(env_value, inst);
919 }
920
921 // The disable_environment has priority over everything else. If it is defined, the layer is always
922 // disabled.
923 env_value = loader_getenv(prop->disable_env_var.name, inst);
924 if (NULL != env_value) {
925 enable = false;
926 }
927 loader_free_getenv(env_value, inst);
928
929 // If this layer has an expiration, check it to determine if this layer has expired.
930 if (prop->has_expiration) {
931 enable = check_expiration(inst, prop);
932 }
933
934 // Enable this layer if it is included in the override layer
935 if (inst != NULL && inst->override_layer_present) {
936 struct loader_layer_properties *override = NULL;
937 for (uint32_t i = 0; i < inst->instance_layer_list.count; ++i) {
938 if (strcmp(inst->instance_layer_list.list[i].info.layerName, VK_OVERRIDE_LAYER_NAME) == 0) {
939 override = &inst->instance_layer_list.list[i];
940 break;
941 }
942 }
943 if (override != NULL) {
944 for (uint32_t i = 0; i < override->num_component_layers; ++i) {
945 if (strcmp(override->component_layer_names[i], prop->info.layerName) == 0) {
946 enable = true;
947 break;
948 }
949 }
950 }
951 }
952
953 return enable;
954 }
955
956 // Check the individual implicit layer for the enable/disable environment variable settings. Only add it after
957 // every check has passed indicating it should be used.
loader_add_implicit_layer(const struct loader_instance * inst,const struct loader_layer_properties * prop,struct loader_layer_list * target_list,struct loader_layer_list * expanded_target_list,const struct loader_layer_list * source_list)958 static void loader_add_implicit_layer(const struct loader_instance *inst, const struct loader_layer_properties *prop,
959 struct loader_layer_list *target_list, struct loader_layer_list *expanded_target_list,
960 const struct loader_layer_list *source_list) {
961 if (loader_implicit_layer_is_enabled(inst, prop)) {
962 if (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
963 loader_add_layer_properties_to_list(inst, target_list, 1, prop);
964 if (NULL != expanded_target_list) {
965 loader_add_layer_properties_to_list(inst, expanded_target_list, 1, prop);
966 }
967 } else {
968 loader_add_meta_layer(inst, prop, target_list, expanded_target_list, source_list);
969 }
970 }
971 }
972
973 // Add the component layers of a meta-layer to the active list of layers
loader_add_meta_layer(const struct loader_instance * inst,const struct loader_layer_properties * prop,struct loader_layer_list * target_list,struct loader_layer_list * expanded_target_list,const struct loader_layer_list * source_list)974 bool loader_add_meta_layer(const struct loader_instance *inst, const struct loader_layer_properties *prop,
975 struct loader_layer_list *target_list, struct loader_layer_list *expanded_target_list,
976 const struct loader_layer_list *source_list) {
977 bool found = true;
978
979 // We need to add all the individual component layers
980 loader_api_version meta_layer_api_version = loader_make_version(prop->info.specVersion);
981 for (uint32_t comp_layer = 0; comp_layer < prop->num_component_layers; comp_layer++) {
982 const struct loader_layer_properties *search_prop =
983 loader_find_layer_property(prop->component_layer_names[comp_layer], source_list);
984 if (search_prop != NULL) {
985 loader_api_version search_prop_version = loader_make_version(prop->info.specVersion);
986 if (!loader_check_version_meets_required(meta_layer_api_version, search_prop_version)) {
987 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
988 "loader_add_meta_layer: Meta-layer API version %u.%u, component layer %s version %u.%u, may have "
989 "incompatibilities (Policy #LLP_LAYER_8)!",
990 meta_layer_api_version.major, meta_layer_api_version.minor, search_prop->info.layerName,
991 search_prop_version.major, search_prop_version.minor);
992 }
993
994 // If the component layer is itself an implicit layer, we need to do the implicit layer enable
995 // checks
996 if (0 == (search_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
997 loader_add_implicit_layer(inst, search_prop, target_list, expanded_target_list, source_list);
998 } else {
999 if (0 != (search_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
1000 found = loader_add_meta_layer(inst, search_prop, target_list, expanded_target_list, source_list);
1001 } else {
1002 loader_add_layer_properties_to_list(inst, target_list, 1, search_prop);
1003 if (NULL != expanded_target_list) {
1004 loader_add_layer_properties_to_list(inst, expanded_target_list, 1, search_prop);
1005 }
1006 }
1007 }
1008 } else {
1009 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1010 "loader_add_meta_layer: Failed to find layer name %s component layer %s to activate (Policy #LLP_LAYER_7)",
1011 prop->component_layer_names[comp_layer], prop->component_layer_names[comp_layer]);
1012 found = false;
1013 }
1014 }
1015
1016 // Add this layer to the overall target list (not the expanded one)
1017 if (found) {
1018 loader_add_layer_properties_to_list(inst, target_list, 1, prop);
1019 }
1020
1021 return found;
1022 }
1023
1024 // Search the source_list for any layer with a name that matches the given name and a type
1025 // that matches the given type. Add all matching layers to the target_list.
loader_add_layer_name_to_list(const struct loader_instance * inst,const char * name,const enum layer_type_flags type_flags,const struct loader_layer_list * source_list,struct loader_layer_list * target_list,struct loader_layer_list * expanded_target_list)1026 VkResult loader_add_layer_name_to_list(const struct loader_instance *inst, const char *name, const enum layer_type_flags type_flags,
1027 const struct loader_layer_list *source_list, struct loader_layer_list *target_list,
1028 struct loader_layer_list *expanded_target_list) {
1029 VkResult res = VK_SUCCESS;
1030 bool found = false;
1031 for (uint32_t i = 0; i < source_list->count; i++) {
1032 struct loader_layer_properties *source_prop = &source_list->list[i];
1033 if (0 == strcmp(source_prop->info.layerName, name) && (source_prop->type_flags & type_flags) == type_flags) {
1034 // If not a meta-layer, simply add it.
1035 if (0 == (source_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
1036 if (VK_SUCCESS == loader_add_layer_properties_to_list(inst, target_list, 1, source_prop)) {
1037 found = true;
1038 }
1039 if (VK_SUCCESS == loader_add_layer_properties_to_list(inst, expanded_target_list, 1, source_prop)) {
1040 found = true;
1041 }
1042 } else {
1043 found = loader_add_meta_layer(inst, source_prop, target_list, expanded_target_list, source_list);
1044 }
1045 }
1046 }
1047 if (!found) {
1048 if (strcmp(name, "VK_LAYER_LUNARG_standard_validation")) {
1049 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1050 "loader_add_layer_name_to_list: Failed to find layer name %s to activate", name);
1051 } else {
1052 res = VK_ERROR_LAYER_NOT_PRESENT;
1053 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1054 "Layer VK_LAYER_LUNARG_standard_validation has been changed to VK_LAYER_KHRONOS_validation. Please use the "
1055 "new version of the layer.");
1056 }
1057 }
1058 return res;
1059 }
1060
get_extension_property(const char * name,const struct loader_extension_list * list)1061 static VkExtensionProperties *get_extension_property(const char *name, const struct loader_extension_list *list) {
1062 for (uint32_t i = 0; i < list->count; i++) {
1063 if (strcmp(name, list->list[i].extensionName) == 0) return &list->list[i];
1064 }
1065 return NULL;
1066 }
1067
get_dev_extension_property(const char * name,const struct loader_device_extension_list * list)1068 static VkExtensionProperties *get_dev_extension_property(const char *name, const struct loader_device_extension_list *list) {
1069 for (uint32_t i = 0; i < list->count; i++) {
1070 if (strcmp(name, list->list[i].props.extensionName) == 0) return &list->list[i].props;
1071 }
1072 return NULL;
1073 }
1074
1075 // For Instance extensions implemented within the loader (i.e. DEBUG_REPORT
1076 // the extension must provide two entry points for the loader to use:
1077 // - "trampoline" entry point - this is the address returned by GetProcAddr
1078 // and will always do what's necessary to support a
1079 // global call.
1080 // - "terminator" function - this function will be put at the end of the
1081 // instance chain and will contain the necessary logic
1082 // to call / process the extension for the appropriate
1083 // ICDs that are available.
1084 // There is no generic mechanism for including these functions, the references
1085 // must be placed into the appropriate loader entry points.
1086 // GetInstanceProcAddr: call extension GetInstanceProcAddr to check for GetProcAddr
1087 // requests
1088 // loader_coalesce_extensions(void) - add extension records to the list of global
1089 // extension available to the app.
1090 // instance_disp - add function pointer for terminator function
1091 // to this array.
1092 // The extension itself should be in a separate file that will be linked directly
1093 // with the loader.
loader_get_icd_loader_instance_extensions(const struct loader_instance * inst,struct loader_icd_tramp_list * icd_tramp_list,struct loader_extension_list * inst_exts)1094 VkResult loader_get_icd_loader_instance_extensions(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list,
1095 struct loader_extension_list *inst_exts) {
1096 struct loader_extension_list icd_exts;
1097 VkResult res = VK_SUCCESS;
1098 char *env_value;
1099 bool filter_extensions = true;
1100
1101 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "Build ICD instance extension list");
1102
1103 // Check if a user wants to disable the instance extension filtering behavior
1104 env_value = loader_getenv("VK_LOADER_DISABLE_INST_EXT_FILTER", inst);
1105 if (NULL != env_value && atoi(env_value) != 0) {
1106 filter_extensions = false;
1107 }
1108 loader_free_getenv(env_value, inst);
1109
1110 // traverse scanned icd list adding non-duplicate extensions to the list
1111 for (uint32_t i = 0; i < icd_tramp_list->count; i++) {
1112 res = loader_init_generic_list(inst, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
1113 if (VK_SUCCESS != res) {
1114 goto out;
1115 }
1116 res = loader_add_instance_extensions(inst, icd_tramp_list->scanned_list[i].EnumerateInstanceExtensionProperties,
1117 icd_tramp_list->scanned_list[i].lib_name, &icd_exts);
1118 if (VK_SUCCESS == res) {
1119 if (filter_extensions) {
1120 // Remove any extensions not recognized by the loader
1121 for (int32_t j = 0; j < (int32_t)icd_exts.count; j++) {
1122 // See if the extension is in the list of supported extensions
1123 bool found = false;
1124 for (uint32_t k = 0; LOADER_INSTANCE_EXTENSIONS[k] != NULL; k++) {
1125 if (strcmp(icd_exts.list[j].extensionName, LOADER_INSTANCE_EXTENSIONS[k]) == 0) {
1126 found = true;
1127 break;
1128 }
1129 }
1130
1131 // If it isn't in the list, remove it
1132 if (!found) {
1133 for (uint32_t k = j + 1; k < icd_exts.count; k++) {
1134 icd_exts.list[k - 1] = icd_exts.list[k];
1135 }
1136 --icd_exts.count;
1137 --j;
1138 }
1139 }
1140 }
1141
1142 res = loader_add_to_ext_list(inst, inst_exts, icd_exts.count, icd_exts.list);
1143 }
1144 loader_destroy_generic_list(inst, (struct loader_generic_list *)&icd_exts);
1145 if (VK_SUCCESS != res) {
1146 goto out;
1147 }
1148 };
1149
1150 // Traverse loader's extensions, adding non-duplicate extensions to the list
1151 add_debug_extensions_to_ext_list(inst, inst_exts);
1152
1153 static const VkExtensionProperties portability_enumeration_extension_info[] = {
1154 {VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME, VK_KHR_PORTABILITY_ENUMERATION_SPEC_VERSION}};
1155
1156 // Add VK_KHR_portability_subset
1157 loader_add_to_ext_list(inst, inst_exts, sizeof(portability_enumeration_extension_info) / sizeof(VkExtensionProperties),
1158 portability_enumeration_extension_info);
1159
1160 out:
1161 return res;
1162 }
1163
loader_get_icd_and_device(const void * device,struct loader_device ** found_dev,uint32_t * icd_index)1164 struct loader_icd_term *loader_get_icd_and_device(const void *device, struct loader_device **found_dev, uint32_t *icd_index) {
1165 loader_platform_thread_lock_mutex(&loader_global_instance_list_lock);
1166 *found_dev = NULL;
1167 for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) {
1168 uint32_t index = 0;
1169 for (struct loader_icd_term *icd_term = inst->icd_terms; icd_term; icd_term = icd_term->next) {
1170 for (struct loader_device *dev = icd_term->logical_device_list; dev; dev = dev->next)
1171 // Value comparison of device prevents object wrapping by layers
1172 if (loader_get_dispatch(dev->icd_device) == loader_get_dispatch(device) ||
1173 (dev->chain_device != VK_NULL_HANDLE &&
1174 loader_get_dispatch(dev->chain_device) == loader_get_dispatch(device))) {
1175 *found_dev = dev;
1176 if (NULL != icd_index) {
1177 *icd_index = index;
1178 }
1179 loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock);
1180 return icd_term;
1181 }
1182 index++;
1183 }
1184 }
1185 loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock);
1186 return NULL;
1187 }
1188
loader_destroy_logical_device(const struct loader_instance * inst,struct loader_device * dev,const VkAllocationCallbacks * pAllocator)1189 void loader_destroy_logical_device(const struct loader_instance *inst, struct loader_device *dev,
1190 const VkAllocationCallbacks *pAllocator) {
1191 if (pAllocator) {
1192 dev->alloc_callbacks = *pAllocator;
1193 }
1194 loader_destroy_layer_list(inst, dev, &dev->expanded_activated_layer_list);
1195 loader_destroy_layer_list(inst, dev, &dev->app_activated_layer_list);
1196 loader_device_heap_free(dev, dev);
1197 }
1198
loader_create_logical_device(const struct loader_instance * inst,const VkAllocationCallbacks * pAllocator)1199 struct loader_device *loader_create_logical_device(const struct loader_instance *inst, const VkAllocationCallbacks *pAllocator) {
1200 struct loader_device *new_dev;
1201 new_dev = loader_calloc(pAllocator, sizeof(struct loader_device), VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1202
1203 if (!new_dev) {
1204 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_create_logical_device: Failed to alloc struct loader_device");
1205 return NULL;
1206 }
1207
1208 if (pAllocator) {
1209 new_dev->alloc_callbacks = *pAllocator;
1210 }
1211
1212 return new_dev;
1213 }
1214
loader_add_logical_device(const struct loader_instance * inst,struct loader_icd_term * icd_term,struct loader_device * dev)1215 void loader_add_logical_device(const struct loader_instance *inst, struct loader_icd_term *icd_term, struct loader_device *dev) {
1216 dev->next = icd_term->logical_device_list;
1217 icd_term->logical_device_list = dev;
1218 }
1219
loader_remove_logical_device(const struct loader_instance * inst,struct loader_icd_term * icd_term,struct loader_device * found_dev,const VkAllocationCallbacks * pAllocator)1220 void loader_remove_logical_device(const struct loader_instance *inst, struct loader_icd_term *icd_term,
1221 struct loader_device *found_dev, const VkAllocationCallbacks *pAllocator) {
1222 struct loader_device *dev, *prev_dev;
1223
1224 if (!icd_term || !found_dev) return;
1225
1226 prev_dev = NULL;
1227 dev = icd_term->logical_device_list;
1228 while (dev && dev != found_dev) {
1229 prev_dev = dev;
1230 dev = dev->next;
1231 }
1232
1233 if (prev_dev)
1234 prev_dev->next = found_dev->next;
1235 else
1236 icd_term->logical_device_list = found_dev->next;
1237 loader_destroy_logical_device(inst, found_dev, pAllocator);
1238 }
1239
loader_icd_destroy(struct loader_instance * ptr_inst,struct loader_icd_term * icd_term,const VkAllocationCallbacks * pAllocator)1240 void loader_icd_destroy(struct loader_instance *ptr_inst, struct loader_icd_term *icd_term,
1241 const VkAllocationCallbacks *pAllocator) {
1242 ptr_inst->total_icd_count--;
1243 for (struct loader_device *dev = icd_term->logical_device_list; dev;) {
1244 struct loader_device *next_dev = dev->next;
1245 loader_destroy_logical_device(ptr_inst, dev, pAllocator);
1246 dev = next_dev;
1247 }
1248
1249 loader_instance_heap_free(ptr_inst, icd_term);
1250 }
1251
loader_icd_add(struct loader_instance * ptr_inst,const struct loader_scanned_icd * scanned_icd)1252 static struct loader_icd_term *loader_icd_add(struct loader_instance *ptr_inst, const struct loader_scanned_icd *scanned_icd) {
1253 struct loader_icd_term *icd_term;
1254
1255 icd_term = loader_instance_heap_calloc(ptr_inst, sizeof(struct loader_icd_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1256 if (!icd_term) {
1257 return NULL;
1258 }
1259
1260 icd_term->scanned_icd = scanned_icd;
1261 icd_term->this_instance = ptr_inst;
1262
1263 // Prepend to the list
1264 icd_term->next = ptr_inst->icd_terms;
1265 ptr_inst->icd_terms = icd_term;
1266 ptr_inst->total_icd_count++;
1267
1268 return icd_term;
1269 }
1270
1271 // Determine the ICD interface version to use.
1272 // @param icd
1273 // @param pVersion Output parameter indicating which version to use or 0 if
1274 // the negotiation API is not supported by the ICD
1275 // @return bool indicating true if the selected interface version is supported
1276 // by the loader, false indicates the version is not supported
loader_get_icd_interface_version(PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version,uint32_t * pVersion)1277 bool loader_get_icd_interface_version(PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version, uint32_t *pVersion) {
1278 if (fp_negotiate_icd_version == NULL) {
1279 // ICD does not support the negotiation API, it supports version 0 or 1
1280 // calling code must determine if it is version 0 or 1
1281 *pVersion = 0;
1282 } else {
1283 // ICD supports the negotiation API, so call it with the loader's
1284 // latest version supported
1285 *pVersion = CURRENT_LOADER_ICD_INTERFACE_VERSION;
1286 VkResult result = fp_negotiate_icd_version(pVersion);
1287
1288 if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
1289 // ICD no longer supports the loader's latest interface version so
1290 // fail loading the ICD
1291 return false;
1292 }
1293 }
1294
1295 #if MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION > 0
1296 if (*pVersion < MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION) {
1297 // Loader no longer supports the ICD's latest interface version so fail
1298 // loading the ICD
1299 return false;
1300 }
1301 #endif
1302 return true;
1303 }
1304
loader_scanned_icd_clear(const struct loader_instance * inst,struct loader_icd_tramp_list * icd_tramp_list)1305 void loader_scanned_icd_clear(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list) {
1306 if (0 != icd_tramp_list->capacity) {
1307 for (uint32_t i = 0; i < icd_tramp_list->count; i++) {
1308 loader_platform_close_library(icd_tramp_list->scanned_list[i].handle);
1309 loader_instance_heap_free(inst, icd_tramp_list->scanned_list[i].lib_name);
1310 }
1311 loader_instance_heap_free(inst, icd_tramp_list->scanned_list);
1312 icd_tramp_list->capacity = 0;
1313 icd_tramp_list->count = 0;
1314 icd_tramp_list->scanned_list = NULL;
1315 }
1316 }
1317
loader_scanned_icd_init(const struct loader_instance * inst,struct loader_icd_tramp_list * icd_tramp_list)1318 static VkResult loader_scanned_icd_init(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list) {
1319 VkResult err = VK_SUCCESS;
1320 loader_scanned_icd_clear(inst, icd_tramp_list);
1321 icd_tramp_list->capacity = 8 * sizeof(struct loader_scanned_icd);
1322 icd_tramp_list->scanned_list = loader_instance_heap_alloc(inst, icd_tramp_list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1323 if (NULL == icd_tramp_list->scanned_list) {
1324 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1325 "loader_scanned_icd_init: Realloc failed for layer list when attempting to add new layer");
1326 err = VK_ERROR_OUT_OF_HOST_MEMORY;
1327 }
1328 return err;
1329 }
1330
loader_scanned_icd_add(const struct loader_instance * inst,struct loader_icd_tramp_list * icd_tramp_list,const char * filename,uint32_t api_version,enum loader_layer_library_status * lib_status)1331 static VkResult loader_scanned_icd_add(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list,
1332 const char *filename, uint32_t api_version, enum loader_layer_library_status *lib_status) {
1333 loader_platform_dl_handle handle;
1334 PFN_vkCreateInstance fp_create_inst;
1335 PFN_vkEnumerateInstanceExtensionProperties fp_get_inst_ext_props;
1336 PFN_vkGetInstanceProcAddr fp_get_proc_addr;
1337 PFN_GetPhysicalDeviceProcAddr fp_get_phys_dev_proc_addr = NULL;
1338 PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version;
1339 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1340 PFN_vk_icdEnumerateAdapterPhysicalDevices fp_enum_dxgi_adapter_phys_devs = NULL;
1341 #endif
1342 struct loader_scanned_icd *new_scanned_icd;
1343 uint32_t interface_vers;
1344 VkResult res = VK_SUCCESS;
1345
1346 // TODO implement smarter opening/closing of libraries. For now this
1347 // function leaves libraries open and the scanned_icd_clear closes them
1348 #if defined(__Fuchsia__)
1349 handle = loader_platform_open_driver(filename);
1350 #else
1351 handle = loader_platform_open_library(filename);
1352 #endif
1353 if (NULL == handle) {
1354 loader_handle_load_library_error(inst, filename, lib_status);
1355 res = VK_ERROR_INCOMPATIBLE_DRIVER;
1356 goto out;
1357 }
1358
1359 // Get and settle on an ICD interface version
1360 fp_negotiate_icd_version = loader_platform_get_proc_address(handle, "vk_icdNegotiateLoaderICDInterfaceVersion");
1361
1362 if (!loader_get_icd_interface_version(fp_negotiate_icd_version, &interface_vers)) {
1363 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1364 "loader_scanned_icd_add: ICD %s doesn't support interface version compatible with loader, skip this ICD.",
1365 filename);
1366 goto out;
1367 }
1368
1369 fp_get_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetInstanceProcAddr");
1370 if (NULL == fp_get_proc_addr) {
1371 if (interface_vers != 0) {
1372 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1373 "loader_scanned_icd_add: ICD %s reports an interface version of %d but doesn't export "
1374 "vk_icdGetInstanceProcAddr, skip "
1375 "this ICD.",
1376 filename, interface_vers);
1377 goto out;
1378 }
1379 // Use deprecated interface from version 0
1380 fp_get_proc_addr = loader_platform_get_proc_address(handle, "vkGetInstanceProcAddr");
1381 if (NULL == fp_get_proc_addr) {
1382 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1383 "loader_scanned_icd_add: Attempt to retrieve either \'vkGetInstanceProcAddr\' or "
1384 "\'vk_icdGetInstanceProcAddr\' from ICD %s failed.",
1385 filename);
1386 goto out;
1387 } else {
1388 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
1389 "loader_scanned_icd_add: Using deprecated ICD interface of \'vkGetInstanceProcAddr\' instead of "
1390 "\'vk_icdGetInstanceProcAddr\' for ICD %s",
1391 filename);
1392 }
1393 fp_create_inst = loader_platform_get_proc_address(handle, "vkCreateInstance");
1394 if (NULL == fp_create_inst) {
1395 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1396 "loader_scanned_icd_add: Failed querying \'vkCreateInstance\' via dlsym/loadlibrary for ICD %s", filename);
1397 goto out;
1398 }
1399 fp_get_inst_ext_props = loader_platform_get_proc_address(handle, "vkEnumerateInstanceExtensionProperties");
1400 if (NULL == fp_get_inst_ext_props) {
1401 loader_log(
1402 inst, VULKAN_LOADER_ERROR_BIT, 0,
1403 "loader_scanned_icd_add: Could not get \'vkEnumerateInstanceExtensionProperties\' via dlsym/loadlibrary for ICD %s",
1404 filename);
1405 goto out;
1406 }
1407 } else {
1408 // Use newer interface version 1 or later
1409 if (interface_vers == 0) {
1410 interface_vers = 1;
1411 }
1412
1413 fp_create_inst = (PFN_vkCreateInstance)fp_get_proc_addr(NULL, "vkCreateInstance");
1414 if (NULL == fp_create_inst) {
1415 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1416 "loader_scanned_icd_add: Could not get \'vkCreateInstance\' via \'vk_icdGetInstanceProcAddr\' for ICD %s",
1417 filename);
1418 goto out;
1419 }
1420 fp_get_inst_ext_props =
1421 (PFN_vkEnumerateInstanceExtensionProperties)fp_get_proc_addr(NULL, "vkEnumerateInstanceExtensionProperties");
1422 if (NULL == fp_get_inst_ext_props) {
1423 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1424 "loader_scanned_icd_add: Could not get \'vkEnumerateInstanceExtensionProperties\' via "
1425 "\'vk_icdGetInstanceProcAddr\' for ICD %s",
1426 filename);
1427 goto out;
1428 }
1429 fp_get_phys_dev_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetPhysicalDeviceProcAddr");
1430 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1431 if (interface_vers >= 6) {
1432 fp_enum_dxgi_adapter_phys_devs = loader_platform_get_proc_address(handle, "vk_icdEnumerateAdapterPhysicalDevices");
1433 }
1434 #endif
1435 }
1436
1437 // check for enough capacity
1438 if ((icd_tramp_list->count * sizeof(struct loader_scanned_icd)) >= icd_tramp_list->capacity) {
1439 void *new_ptr = loader_instance_heap_realloc(inst, icd_tramp_list->scanned_list, icd_tramp_list->capacity,
1440 icd_tramp_list->capacity * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1441 if (NULL == new_ptr) {
1442 res = VK_ERROR_OUT_OF_HOST_MEMORY;
1443 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_scanned_icd_add: Realloc failed on icd library list for ICD %s",
1444 filename);
1445 goto out;
1446 }
1447 icd_tramp_list->scanned_list = new_ptr;
1448
1449 // double capacity
1450 icd_tramp_list->capacity *= 2;
1451 }
1452
1453 loader_api_version api_version_struct = loader_make_version(api_version);
1454 if (interface_vers <= 4 && loader_check_version_meets_required(LOADER_VERSION_1_1_0, api_version_struct)) {
1455 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
1456 "loader_scanned_icd_add: Driver %s supports Vulkan %u.%u, but only supports loader interface version %u."
1457 " Interface version 5 or newer required to support this version of Vulkan (Policy #LDP_DRIVER_7)",
1458 filename, api_version_struct.major, api_version_struct.minor, interface_vers);
1459 }
1460
1461 new_scanned_icd = &(icd_tramp_list->scanned_list[icd_tramp_list->count]);
1462 new_scanned_icd->handle = handle;
1463 new_scanned_icd->api_version = api_version;
1464 new_scanned_icd->GetInstanceProcAddr = fp_get_proc_addr;
1465 new_scanned_icd->GetPhysicalDeviceProcAddr = fp_get_phys_dev_proc_addr;
1466 new_scanned_icd->EnumerateInstanceExtensionProperties = fp_get_inst_ext_props;
1467 new_scanned_icd->CreateInstance = fp_create_inst;
1468 #if defined(VK_USE_PLATFORM_WIN32_KHR)
1469 new_scanned_icd->EnumerateAdapterPhysicalDevices = fp_enum_dxgi_adapter_phys_devs;
1470 #endif
1471 new_scanned_icd->interface_version = interface_vers;
1472
1473 new_scanned_icd->lib_name = (char *)loader_instance_heap_alloc(inst, strlen(filename) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1474 if (NULL == new_scanned_icd->lib_name) {
1475 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_scanned_icd_add: Out of memory can't add ICD %s", filename);
1476 res = VK_ERROR_OUT_OF_HOST_MEMORY;
1477 goto out;
1478 }
1479 strcpy(new_scanned_icd->lib_name, filename);
1480 icd_tramp_list->count++;
1481
1482 out:
1483
1484 return res;
1485 }
1486
loader_initialize(void)1487 void loader_initialize(void) {
1488 // initialize mutexes
1489 loader_platform_thread_create_mutex(&loader_lock);
1490 loader_platform_thread_create_mutex(&loader_json_lock);
1491 loader_platform_thread_create_mutex(&loader_preload_icd_lock);
1492 loader_platform_thread_create_mutex(&loader_global_instance_list_lock);
1493
1494 // initialize logging
1495 loader_debug_init();
1496 #if defined(_WIN32)
1497 windows_initialization();
1498 #endif
1499
1500 loader_api_version version = loader_make_full_version(VK_HEADER_VERSION_COMPLETE);
1501 loader_log(NULL, VULKAN_LOADER_INFO_BIT, 0, "Vulkan Loader Version %d.%d.%d", version.major, version.minor, version.patch);
1502
1503 #if defined(GIT_BRANCH_NAME) && defined(GIT_TAG_INFO)
1504 loader_log(NULL, VULKAN_LOADER_INFO_BIT, 0, "[Vulkan Loader Git - Tag: " GIT_BRANCH_NAME ", Branch/Commit: " GIT_TAG_INFO "]");
1505 #endif
1506 }
1507
loader_release()1508 void loader_release() {
1509 // Guarantee release of the preloaded ICD libraries. This may have already been called in vkDestroyInstance.
1510 loader_unload_preloaded_icds();
1511
1512 // release mutexes
1513 loader_platform_thread_delete_mutex(&loader_lock);
1514 loader_platform_thread_delete_mutex(&loader_json_lock);
1515 loader_platform_thread_delete_mutex(&loader_preload_icd_lock);
1516 loader_platform_thread_delete_mutex(&loader_global_instance_list_lock);
1517 }
1518
1519 // Preload the ICD libraries that are likely to be needed so we don't repeatedly load/unload them later
loader_preload_icds(void)1520 void loader_preload_icds(void) {
1521 loader_platform_thread_lock_mutex(&loader_preload_icd_lock);
1522
1523 // Already preloaded, skip loading again.
1524 if (scanned_icds.scanned_list != NULL) {
1525 loader_platform_thread_unlock_mutex(&loader_preload_icd_lock);
1526 return;
1527 }
1528
1529 memset(&scanned_icds, 0, sizeof(scanned_icds));
1530 VkResult result = loader_icd_scan(NULL, &scanned_icds, NULL);
1531 if (result != VK_SUCCESS) {
1532 loader_scanned_icd_clear(NULL, &scanned_icds);
1533 }
1534 loader_platform_thread_unlock_mutex(&loader_preload_icd_lock);
1535 }
1536
1537 // Release the ICD libraries that were preloaded
loader_unload_preloaded_icds(void)1538 void loader_unload_preloaded_icds(void) {
1539 loader_platform_thread_lock_mutex(&loader_preload_icd_lock);
1540 loader_scanned_icd_clear(NULL, &scanned_icds);
1541 loader_platform_thread_unlock_mutex(&loader_preload_icd_lock);
1542 }
1543
1544 #if !defined(_WIN32)
loader_init_library()1545 __attribute__((constructor)) void loader_init_library() { loader_initialize(); }
1546
loader_free_library()1547 __attribute__((destructor)) void loader_free_library() { loader_release(); }
1548 #endif
1549
1550 // Get next file or dirname given a string list or registry key path
1551 //
1552 // \returns
1553 // A pointer to first char in the next path.
1554 // The next path (or NULL) in the list is returned in next_path.
1555 // Note: input string is modified in some cases. PASS IN A COPY!
loader_get_next_path(char * path)1556 char *loader_get_next_path(char *path) {
1557 uint32_t len;
1558 char *next;
1559
1560 if (path == NULL) return NULL;
1561 next = strchr(path, PATH_SEPARATOR);
1562 if (next == NULL) {
1563 len = (uint32_t)strlen(path);
1564 next = path + len;
1565 } else {
1566 *next = '\0';
1567 next++;
1568 }
1569
1570 return next;
1571 }
1572
1573 // Given a path which is absolute or relative, expand the path if relative or
1574 // leave the path unmodified if absolute. The base path to prepend to relative
1575 // paths is given in rel_base.
1576 //
1577 // @return - A string in out_fullpath of the full absolute path
loader_expand_path(const char * path,const char * rel_base,size_t out_size,char * out_fullpath)1578 static void loader_expand_path(const char *path, const char *rel_base, size_t out_size, char *out_fullpath) {
1579 if (loader_platform_is_path_absolute(path)) {
1580 // do not prepend a base to an absolute path
1581 rel_base = "";
1582 }
1583
1584 loader_platform_combine_path(out_fullpath, out_size, rel_base, path, NULL);
1585 }
1586
1587 // Given a filename (file) and a list of paths (dir), try to find an existing
1588 // file in the paths. If filename already is a path then no searching in the given paths.
1589 //
1590 // @return - A string in out_fullpath of either the full path or file.
loader_get_fullpath(const char * file,const char * in_dirs,size_t out_size,char * out_fullpath)1591 static void loader_get_fullpath(const char *file, const char *in_dirs, size_t out_size, char *out_fullpath) {
1592 if (!loader_platform_is_path(file) && *in_dirs) {
1593 char *dirs_copy, *dir, *next_dir;
1594
1595 dirs_copy = loader_stack_alloc(strlen(in_dirs) + 1);
1596 strcpy(dirs_copy, in_dirs);
1597
1598 // find if file exists after prepending paths in given list
1599 // for (dir = dirs_copy; *dir && (next_dir = loader_get_next_path(dir)); dir = next_dir) {
1600 dir = dirs_copy;
1601 next_dir = loader_get_next_path(dir);
1602 while (*dir && next_dir) {
1603 loader_platform_combine_path(out_fullpath, out_size, dir, file, NULL);
1604 if (loader_platform_file_exists(out_fullpath)) {
1605 return;
1606 }
1607 dir = next_dir;
1608 next_dir = loader_get_next_path(dir);
1609 }
1610 }
1611
1612 (void)snprintf(out_fullpath, out_size, "%s", file);
1613 }
1614
1615 // Read a JSON file into a buffer.
1616 //
1617 // @return - A pointer to a cJSON object representing the JSON parse tree.
1618 // This returned buffer should be freed by caller.
loader_get_json(const struct loader_instance * inst,const char * filename,cJSON ** json)1619 static VkResult loader_get_json(const struct loader_instance *inst, const char *filename, cJSON **json) {
1620 FILE *file = NULL;
1621 char *json_buf = NULL;
1622 size_t len;
1623 VkResult res = VK_SUCCESS;
1624
1625 assert(json != NULL);
1626
1627 *json = NULL;
1628
1629 #if defined(_WIN32)
1630 int filename_utf16_size = MultiByteToWideChar(CP_UTF8, 0, filename, -1, NULL, 0);
1631 if (filename_utf16_size > 0) {
1632 wchar_t *filename_utf16 = (wchar_t *)loader_stack_alloc(filename_utf16_size * sizeof(wchar_t));
1633 if (MultiByteToWideChar(CP_UTF8, 0, filename, -1, filename_utf16, filename_utf16_size) == filename_utf16_size) {
1634 file = _wfopen(filename_utf16, L"rb");
1635 }
1636 }
1637 #else
1638 file = fopen(filename, "rb");
1639 #endif
1640
1641 if (!file) {
1642 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_get_json: Failed to open JSON file %s", filename);
1643 res = VK_ERROR_INITIALIZATION_FAILED;
1644 goto out;
1645 }
1646 // NOTE: We can't just use fseek(file, 0, SEEK_END) because that isn't guaranteed to be supported on all systems
1647 size_t fread_ret_count = 0;
1648 do {
1649 char buffer[256];
1650 fread_ret_count = fread(buffer, 1, 256, file);
1651 } while (fread_ret_count == 256 && !feof(file));
1652 len = ftell(file);
1653 fseek(file, 0, SEEK_SET);
1654 json_buf = (char *)loader_instance_heap_alloc(inst, len + 1, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1655 if (json_buf == NULL) {
1656 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1657 "loader_get_json: Failed to allocate space for JSON file %s buffer of length %d", filename, len);
1658 res = VK_ERROR_OUT_OF_HOST_MEMORY;
1659 goto out;
1660 }
1661 if (fread(json_buf, sizeof(char), len, file) != len) {
1662 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_get_json: Failed to read JSON file %s.", filename);
1663 res = VK_ERROR_INITIALIZATION_FAILED;
1664 goto out;
1665 }
1666 json_buf[len] = '\0';
1667
1668 // Can't be a valid json if the string is of length zero
1669 if (len == 0) {
1670 res = VK_ERROR_INITIALIZATION_FAILED;
1671 goto out;
1672 }
1673 // Parse text from file
1674 *json = cJSON_Parse(inst ? &inst->alloc_callbacks : NULL, json_buf);
1675 if (*json == NULL) {
1676 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1677 "loader_get_json: Failed to parse JSON file %s, this is usually because something ran out of memory.", filename);
1678 res = VK_ERROR_OUT_OF_HOST_MEMORY;
1679 goto out;
1680 }
1681
1682 out:
1683 loader_instance_heap_free(inst, json_buf);
1684 if (NULL != file) {
1685 fclose(file);
1686 }
1687
1688 return res;
1689 }
1690
1691 // Verify that all component layers in a meta-layer are valid.
verify_meta_layer_component_layers(const struct loader_instance * inst,struct loader_layer_properties * prop,struct loader_layer_list * instance_layers)1692 static bool verify_meta_layer_component_layers(const struct loader_instance *inst, struct loader_layer_properties *prop,
1693 struct loader_layer_list *instance_layers) {
1694 bool success = true;
1695 loader_api_version meta_layer_version = loader_make_version(prop->info.specVersion);
1696
1697 for (uint32_t comp_layer = 0; comp_layer < prop->num_component_layers; comp_layer++) {
1698 struct loader_layer_properties *comp_prop =
1699 loader_find_layer_property(prop->component_layer_names[comp_layer], instance_layers);
1700 if (comp_prop == NULL) {
1701 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
1702 "verify_meta_layer_component_layers: Meta-layer %s can't find component layer %s at index %d."
1703 " Skipping this layer.",
1704 prop->info.layerName, prop->component_layer_names[comp_layer], comp_layer);
1705
1706 success = false;
1707 break;
1708 }
1709
1710 // Check the version of each layer, they need to be at least MAJOR and MINOR
1711 loader_api_version comp_prop_version = loader_make_version(comp_prop->info.specVersion);
1712 if (!loader_check_version_meets_required(meta_layer_version, comp_prop_version)) {
1713 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
1714 "verify_meta_layer_component_layers: Meta-layer uses API version %d.%d, but component "
1715 "layer %d has API version %d.%d that is lower. Skipping this layer.",
1716 meta_layer_version.major, meta_layer_version.minor, comp_layer, comp_prop_version.major,
1717 comp_prop_version.minor);
1718
1719 success = false;
1720 break;
1721 }
1722
1723 // Make sure the layer isn't using it's own name
1724 if (!strcmp(prop->info.layerName, prop->component_layer_names[comp_layer])) {
1725 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
1726 "verify_meta_layer_component_layers: Meta-layer %s lists itself in its component layer "
1727 "list at index %d. Skipping this layer.",
1728 prop->info.layerName, comp_layer);
1729
1730 success = false;
1731 break;
1732 }
1733 if (comp_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
1734 loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
1735 "verify_meta_layer_component_layers: Adding meta-layer %s which also contains meta-layer %s",
1736 prop->info.layerName, comp_prop->info.layerName);
1737
1738 // Make sure if the layer is using a meta-layer in its component list that we also verify that.
1739 if (!verify_meta_layer_component_layers(inst, comp_prop, instance_layers)) {
1740 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
1741 "Meta-layer %s component layer %s can not find all component layers."
1742 " Skipping this layer.",
1743 prop->info.layerName, prop->component_layer_names[comp_layer]);
1744 success = false;
1745 break;
1746 }
1747 }
1748
1749 // Add any instance and device extensions from component layers to this layer
1750 // list, so that anyone querying extensions will only need to look at the meta-layer
1751 for (uint32_t ext = 0; ext < comp_prop->instance_extension_list.count; ext++) {
1752 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "Meta-layer %s component layer %s adding instance extension %s",
1753 prop->info.layerName, prop->component_layer_names[comp_layer],
1754 comp_prop->instance_extension_list.list[ext].extensionName);
1755
1756 if (!has_vk_extension_property(&comp_prop->instance_extension_list.list[ext], &prop->instance_extension_list)) {
1757 loader_add_to_ext_list(inst, &prop->instance_extension_list, 1, &comp_prop->instance_extension_list.list[ext]);
1758 }
1759 }
1760
1761 for (uint32_t ext = 0; ext < comp_prop->device_extension_list.count; ext++) {
1762 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "Meta-layer %s component layer %s adding device extension %s",
1763 prop->info.layerName, prop->component_layer_names[comp_layer],
1764 comp_prop->device_extension_list.list[ext].props.extensionName);
1765
1766 if (!has_vk_dev_ext_property(&comp_prop->device_extension_list.list[ext].props, &prop->device_extension_list)) {
1767 loader_add_to_dev_ext_list(inst, &prop->device_extension_list, &comp_prop->device_extension_list.list[ext].props, 0,
1768 NULL);
1769 }
1770 }
1771 }
1772 if (success) {
1773 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1774 "Meta-layer %s all %d component layers appear to be valid.", prop->info.layerName, prop->num_component_layers);
1775
1776 // If layer logging is on, list the internals included in the meta-layer
1777 if ((loader_get_debug_level() & VULKAN_LOADER_LAYER_BIT) != 0) {
1778 for (uint32_t comp_layer = 0; comp_layer < prop->num_component_layers; comp_layer++) {
1779 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " [%d] %s", comp_layer, prop->component_layer_names[comp_layer]);
1780 }
1781 }
1782 }
1783 return success;
1784 }
1785
1786 // Verify that all meta-layers in a layer list are valid.
verify_all_meta_layers(struct loader_instance * inst,struct loader_layer_list * instance_layers,bool * override_layer_present)1787 static void verify_all_meta_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers,
1788 bool *override_layer_present) {
1789 *override_layer_present = false;
1790 for (int32_t i = 0; i < (int32_t)instance_layers->count; i++) {
1791 struct loader_layer_properties *prop = &instance_layers->list[i];
1792
1793 // If this is a meta-layer, make sure it is valid
1794 if ((prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) &&
1795 !verify_meta_layer_component_layers(inst, prop, instance_layers)) {
1796 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0,
1797 "Removing meta-layer %s from instance layer list since it appears invalid.", prop->info.layerName);
1798
1799 loader_remove_layer_in_list(inst, instance_layers, i);
1800 i--;
1801
1802 } else if (prop->is_override && loader_implicit_layer_is_enabled(inst, prop)) {
1803 *override_layer_present = true;
1804 }
1805 }
1806 }
1807
1808 // If the current working directory matches any app_key_path of the layers, remove all other override layers.
1809 // Otherwise if no matching app_key was found, remove all but the global override layer, which has no app_key_path.
remove_all_non_valid_override_layers(struct loader_instance * inst,struct loader_layer_list * instance_layers)1810 static void remove_all_non_valid_override_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers) {
1811 if (instance_layers == NULL) {
1812 return;
1813 }
1814
1815 char cur_path[MAX_STRING_SIZE];
1816 char *ret = loader_platform_executable_path(cur_path, sizeof(cur_path));
1817 if (ret == NULL) {
1818 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
1819 "remove_all_non_valid_override_layers: Failed to get executable path and name");
1820 return;
1821 }
1822
1823 // Find out if there is an override layer with same the app_key_path as the path to the current executable.
1824 // If more than one is found, remove it and use the first layer
1825 // Remove any layers which aren't global and do not have the same app_key_path as the path to the current executable.
1826 bool found_active_override_layer = false;
1827 int global_layer_index = -1;
1828 for (uint32_t i = 0; i < instance_layers->count; i++) {
1829 struct loader_layer_properties *props = &instance_layers->list[i];
1830 if (strcmp(props->info.layerName, VK_OVERRIDE_LAYER_NAME) == 0) {
1831 if (props->num_app_key_paths > 0) { // not the global layer
1832 for (uint32_t j = 0; j < props->num_app_key_paths; j++) {
1833 if (strcmp(props->app_key_paths[j], cur_path) == 0) {
1834 if (!found_active_override_layer) {
1835 found_active_override_layer = true;
1836 } else {
1837 loader_log(
1838 inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1839 "remove_all_non_valid_override_layers: Multiple override layers where the same path in app_keys "
1840 "was found. Using the first layer found");
1841
1842 // Remove duplicate active override layers that have the same app_key_path
1843 loader_remove_layer_in_list(inst, instance_layers, i);
1844 i--;
1845 }
1846 }
1847 }
1848 if (!found_active_override_layer) {
1849 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1850 "--Override layer found but not used because app \'%s\' is not in \'app_keys\' list!", cur_path);
1851
1852 // Remove non-global override layers that don't have an app_key that matches cur_path
1853 loader_remove_layer_in_list(inst, instance_layers, i);
1854 i--;
1855 }
1856 } else {
1857 if (global_layer_index == -1) {
1858 global_layer_index = i;
1859 } else {
1860 loader_log(
1861 inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
1862 "remove_all_non_valid_override_layers: Multiple global override layers found. Using the first global "
1863 "layer found");
1864 loader_remove_layer_in_list(inst, instance_layers, i);
1865 i--;
1866 }
1867 }
1868 }
1869 }
1870 // Remove global layer if layer with same the app_key_path as the path to the current executable is found
1871 if (found_active_override_layer && global_layer_index >= 0) {
1872 loader_remove_layer_in_list(inst, instance_layers, global_layer_index);
1873 }
1874 // Should be at most 1 override layer in the list now.
1875 if (found_active_override_layer) {
1876 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Using the override layer for app key %s", cur_path);
1877 } else if (global_layer_index >= 0) {
1878 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Using the global override layer");
1879 }
1880 }
1881
loader_read_layer_json(const struct loader_instance * inst,struct loader_layer_list * layer_instance_list,cJSON * layer_node,loader_api_version version,cJSON * item,bool is_implicit,char * filename)1882 static VkResult loader_read_layer_json(const struct loader_instance *inst, struct loader_layer_list *layer_instance_list,
1883 cJSON *layer_node, loader_api_version version, cJSON *item, bool is_implicit,
1884 char *filename) {
1885 char *temp;
1886 char *name, *type, *library_path_str, *api_version;
1887 char *implementation_version, *description;
1888 cJSON *ext_item;
1889 cJSON *library_path;
1890 cJSON *component_layers;
1891 cJSON *override_paths;
1892 cJSON *blacklisted_layers;
1893 cJSON *disable_environment = NULL;
1894 VkExtensionProperties ext_prop;
1895 VkResult result = VK_ERROR_INITIALIZATION_FAILED;
1896 struct loader_layer_properties *props = NULL;
1897 uint32_t props_index = 0;
1898 int i, j;
1899
1900 // The following are required in the "layer" object:
1901 // (required) "name"
1902 // (required) "type"
1903 // (required) "library_path"
1904 // (required) "api_version"
1905 // (required) "implementation_version"
1906 // (required) "description"
1907 // (required for implicit layers) "disable_environment"
1908 #define GET_JSON_OBJECT(node, var) \
1909 { \
1910 var = cJSON_GetObjectItem(node, #var); \
1911 if (var == NULL) { \
1912 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, \
1913 "Didn't find required layer object %s in manifest " \
1914 "JSON file, skipping this layer", \
1915 #var); \
1916 goto out; \
1917 } \
1918 }
1919 #define GET_JSON_ITEM(inst, node, var) \
1920 { \
1921 item = cJSON_GetObjectItem(node, #var); \
1922 if (item == NULL) { \
1923 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, \
1924 "Didn't find required layer value %s in manifest JSON " \
1925 "file, skipping this layer", \
1926 #var); \
1927 goto out; \
1928 } \
1929 temp = cJSON_Print(item); \
1930 if (temp == NULL) { \
1931 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, \
1932 "Problem accessing layer value %s in manifest JSON " \
1933 "file, skipping this layer", \
1934 #var); \
1935 result = VK_ERROR_OUT_OF_HOST_MEMORY; \
1936 goto out; \
1937 } \
1938 temp[strlen(temp) - 1] = '\0'; \
1939 var = loader_stack_alloc(strlen(temp) + 1); \
1940 strcpy(var, &temp[1]); \
1941 loader_instance_heap_free(inst, temp); \
1942 }
1943 GET_JSON_ITEM(inst, layer_node, name)
1944 GET_JSON_ITEM(inst, layer_node, type)
1945 GET_JSON_ITEM(inst, layer_node, api_version)
1946 GET_JSON_ITEM(inst, layer_node, implementation_version)
1947 GET_JSON_ITEM(inst, layer_node, description)
1948
1949 // Add list entry
1950 if (!strcmp(type, "DEVICE")) {
1951 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Device layers are deprecated. Skipping this layer");
1952 goto out;
1953 }
1954
1955 // Allow either GLOBAL or INSTANCE type interchangeably to handle
1956 // layers that must work with older loaders
1957 if (!strcmp(type, "INSTANCE") || !strcmp(type, "GLOBAL")) {
1958 if (layer_instance_list == NULL) {
1959 goto out;
1960 }
1961 props = loader_get_next_layer_property_slot(inst, layer_instance_list);
1962 if (NULL == props) {
1963 // Error already triggered in loader_get_next_layer_property_slot.
1964 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1965 goto out;
1966 }
1967 props_index = layer_instance_list->count - 1;
1968 props->type_flags = VK_LAYER_TYPE_FLAG_INSTANCE_LAYER;
1969 if (!is_implicit) {
1970 props->type_flags |= VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER;
1971 }
1972 } else {
1973 goto out;
1974 }
1975
1976 // Expiration date for override layer. Field starte with JSON file 1.1.2 and
1977 // is completely optional. So, no check put in place.
1978 if (!strcmp(name, VK_OVERRIDE_LAYER_NAME)) {
1979 cJSON *expiration;
1980 if (!loader_check_version_meets_required(loader_combine_version(1, 1, 2), version)) {
1981 loader_log(
1982 inst, VULKAN_LOADER_WARN_BIT, 0,
1983 "Override layer expiration date not added until version 1.1.2. Please update JSON file version appropriately.");
1984 }
1985
1986 props->is_override = true;
1987 expiration = cJSON_GetObjectItem(layer_node, "expiration_date");
1988 if (NULL != expiration) {
1989 char date_copy[32];
1990 uint8_t cur_item = 0;
1991
1992 // Get the string for the current item
1993 temp = cJSON_Print(expiration);
1994 if (temp == NULL) {
1995 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
1996 "Problem accessing layer value 'expiration_date' in manifest JSON file, skipping this layer");
1997 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1998 goto out;
1999 }
2000 temp[strlen(temp) - 1] = '\0';
2001 strcpy(date_copy, &temp[1]);
2002 loader_instance_heap_free(inst, temp);
2003
2004 if (strlen(date_copy) == 16) {
2005 char *cur_start = &date_copy[0];
2006 char *next_dash = strchr(date_copy, '-');
2007 if (NULL != next_dash) {
2008 while (cur_item < 5 && strlen(cur_start)) {
2009 if (next_dash != NULL) {
2010 *next_dash = '\0';
2011 }
2012 switch (cur_item) {
2013 case 0: // Year
2014 props->expiration.year = (uint16_t)atoi(cur_start);
2015 break;
2016 case 1: // Month
2017 props->expiration.month = (uint8_t)atoi(cur_start);
2018 break;
2019 case 2: // Day
2020 props->expiration.day = (uint8_t)atoi(cur_start);
2021 break;
2022 case 3: // Hour
2023 props->expiration.hour = (uint8_t)atoi(cur_start);
2024 break;
2025 case 4: // Minute
2026 props->expiration.minute = (uint8_t)atoi(cur_start);
2027 props->has_expiration = true;
2028 break;
2029 default: // Ignore
2030 break;
2031 }
2032 if (next_dash != NULL) {
2033 cur_start = next_dash + 1;
2034 next_dash = strchr(cur_start, '-');
2035 }
2036 cur_item++;
2037 }
2038 }
2039 }
2040 }
2041 }
2042
2043 // Library path no longer required unless component_layers is also not defined
2044 library_path = cJSON_GetObjectItem(layer_node, "library_path");
2045 component_layers = cJSON_GetObjectItem(layer_node, "component_layers");
2046 if (NULL != library_path) {
2047 if (NULL != component_layers) {
2048 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2049 "Indicating meta-layer-specific component_layers, but also defining layer library path. Both are not "
2050 "compatible, so skipping this layer");
2051 goto out;
2052 }
2053 props->num_component_layers = 0;
2054 props->component_layer_names = NULL;
2055
2056 temp = cJSON_Print(library_path);
2057 if (NULL == temp) {
2058 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2059 "Problem accessing layer value library_path in manifest JSON file, skipping this layer");
2060 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2061 goto out;
2062 }
2063 temp[strlen(temp) - 1] = '\0';
2064 library_path_str = loader_stack_alloc(strlen(temp) + 1);
2065 strcpy(library_path_str, &temp[1]);
2066 loader_instance_heap_free(inst, temp);
2067
2068 strncpy(props->manifest_file_name, filename, MAX_STRING_SIZE);
2069 char *fullpath = props->lib_name;
2070 char *rel_base;
2071 if (NULL != library_path_str) {
2072 if (loader_platform_is_path(library_path_str)) {
2073 // A relative or absolute path
2074 char *name_copy = loader_stack_alloc(strlen(filename) + 1);
2075 strcpy(name_copy, filename);
2076 rel_base = loader_platform_dirname(name_copy);
2077 loader_expand_path(library_path_str, rel_base, MAX_STRING_SIZE, fullpath);
2078 } else {
2079 // A filename which is assumed in a system directory
2080 loader_get_fullpath(library_path_str, "", MAX_STRING_SIZE, fullpath);
2081 }
2082 }
2083 } else if (NULL != component_layers) {
2084 if (!loader_check_version_meets_required(LOADER_VERSION_1_1_0, version)) {
2085 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2086 "Indicating meta-layer-specific component_layers, but using older JSON file version.");
2087 }
2088 int count = cJSON_GetArraySize(component_layers);
2089 props->num_component_layers = count;
2090
2091 // Allocate buffer for layer names
2092 props->component_layer_names =
2093 loader_instance_heap_alloc(inst, sizeof(char[MAX_STRING_SIZE]) * count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
2094 if (NULL == props->component_layer_names && count > 0) {
2095 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2096 goto out;
2097 }
2098
2099 // Copy the component layers into the array
2100 for (i = 0; i < count; i++) {
2101 cJSON *comp_layer = cJSON_GetArrayItem(component_layers, i);
2102 if (NULL != comp_layer) {
2103 temp = cJSON_Print(comp_layer);
2104 if (NULL == temp) {
2105 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2106 goto out;
2107 }
2108 temp[strlen(temp) - 1] = '\0';
2109 strncpy(props->component_layer_names[i], temp + 1, MAX_STRING_SIZE - 1);
2110 props->component_layer_names[i][MAX_STRING_SIZE - 1] = '\0';
2111 loader_instance_heap_free(inst, temp);
2112 }
2113 }
2114
2115 // This is now, officially, a meta-layer
2116 props->type_flags |= VK_LAYER_TYPE_FLAG_META_LAYER;
2117 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Encountered meta-layer %s", name);
2118
2119 // Make sure we set up other things so we head down the correct branches below
2120 library_path_str = NULL;
2121 } else {
2122 loader_log(
2123 inst, VULKAN_LOADER_WARN_BIT, 0,
2124 "Layer missing both library_path and component_layers fields. One or the other MUST be defined. Skipping this layer");
2125 goto out;
2126 }
2127
2128 props->num_blacklist_layers = 0;
2129 props->blacklist_layer_names = NULL;
2130 blacklisted_layers = cJSON_GetObjectItem(layer_node, "blacklisted_layers");
2131 if (blacklisted_layers != NULL) {
2132 if (strcmp(name, VK_OVERRIDE_LAYER_NAME)) {
2133 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2134 "Layer %s contains a blacklist, but a blacklist can only be provided by the override metalayer. This "
2135 "blacklist will be ignored.",
2136 name);
2137 } else {
2138 props->num_blacklist_layers = cJSON_GetArraySize(blacklisted_layers);
2139 if (props->num_blacklist_layers > 0) {
2140 // Allocate the blacklist array
2141 props->blacklist_layer_names = loader_instance_heap_alloc(
2142 inst, sizeof(char[MAX_STRING_SIZE]) * props->num_blacklist_layers, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
2143 if (props->blacklist_layer_names == NULL && props->num_blacklist_layers > 0) {
2144 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2145 goto out;
2146 }
2147
2148 // Copy the blacklisted layers into the array
2149 for (i = 0; i < (int)props->num_blacklist_layers; ++i) {
2150 cJSON *black_layer = cJSON_GetArrayItem(blacklisted_layers, i);
2151 if (black_layer == NULL) {
2152 continue;
2153 }
2154 temp = cJSON_Print(black_layer);
2155 if (temp == NULL) {
2156 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2157 goto out;
2158 }
2159 temp[strlen(temp) - 1] = '\0';
2160 strncpy(props->blacklist_layer_names[i], temp + 1, MAX_STRING_SIZE - 1);
2161 props->blacklist_layer_names[i][MAX_STRING_SIZE - 1] = '\0';
2162 loader_instance_heap_free(inst, temp);
2163 }
2164 }
2165 }
2166 }
2167
2168 override_paths = cJSON_GetObjectItem(layer_node, "override_paths");
2169 if (NULL != override_paths) {
2170 if (!loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) {
2171 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2172 "Indicating meta-layer-specific override paths, but using older JSON file version.");
2173 }
2174 int count = cJSON_GetArraySize(override_paths);
2175 props->num_override_paths = count;
2176 if (count > 0) {
2177 // Allocate buffer for override paths
2178 props->override_paths =
2179 loader_instance_heap_alloc(inst, sizeof(char[MAX_STRING_SIZE]) * count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
2180 if (NULL == props->override_paths && count > 0) {
2181 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2182 goto out;
2183 }
2184
2185 // Copy the override paths into the array
2186 for (i = 0; i < count; i++) {
2187 cJSON *override_path = cJSON_GetArrayItem(override_paths, i);
2188 if (NULL != override_path) {
2189 temp = cJSON_Print(override_path);
2190 if (NULL == temp) {
2191 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2192 goto out;
2193 }
2194 temp[strlen(temp) - 1] = '\0';
2195 strncpy(props->override_paths[i], temp + 1, MAX_STRING_SIZE - 1);
2196 props->override_paths[i][MAX_STRING_SIZE - 1] = '\0';
2197 loader_instance_heap_free(inst, temp);
2198 }
2199 }
2200 }
2201 }
2202
2203 if (is_implicit) {
2204 GET_JSON_OBJECT(layer_node, disable_environment)
2205 }
2206 #undef GET_JSON_ITEM
2207 #undef GET_JSON_OBJECT
2208
2209 strncpy(props->info.layerName, name, sizeof(props->info.layerName));
2210 props->info.layerName[sizeof(props->info.layerName) - 1] = '\0';
2211 if (0 != strncmp(props->info.layerName, "VK_LAYER_", 9)) {
2212 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0, "Layer name %s does not conform to naming standard (Policy #LLP_LAYER_3)",
2213 props->info.layerName);
2214 }
2215 props->info.specVersion = loader_parse_version_string(api_version);
2216 props->info.implementationVersion = atoi(implementation_version);
2217 strncpy((char *)props->info.description, description, sizeof(props->info.description));
2218 props->info.description[sizeof(props->info.description) - 1] = '\0';
2219 if (is_implicit) {
2220 if (!disable_environment || !disable_environment->child) {
2221 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2222 "Didn't find required layer child value disable_environment in manifest JSON file, skipping this layer "
2223 "(Policy #LLP_LAYER_9)");
2224 goto out;
2225 }
2226 strncpy(props->disable_env_var.name, disable_environment->child->string, sizeof(props->disable_env_var.name));
2227 props->disable_env_var.name[sizeof(props->disable_env_var.name) - 1] = '\0';
2228 strncpy(props->disable_env_var.value, disable_environment->child->valuestring, sizeof(props->disable_env_var.value));
2229 props->disable_env_var.value[sizeof(props->disable_env_var.value) - 1] = '\0';
2230 }
2231
2232 // Make sure the layer's manifest doesn't contain a non zero variant value
2233 if (VK_API_VERSION_VARIANT(props->info.specVersion) != 0) {
2234 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2235 "Layer %s has an \'api_version\' field which contains a non-zero variant value of %d. "
2236 " Skipping Layer.",
2237 props->info.layerName, VK_API_VERSION_VARIANT(props->info.specVersion));
2238 goto out;
2239 }
2240
2241 // Now get all optional items and objects and put in list:
2242 // functions
2243 // instance_extensions
2244 // device_extensions
2245 // enable_environment (implicit layers only)
2246 // library_arch
2247 #define GET_JSON_OBJECT(node, var) \
2248 { var = cJSON_GetObjectItem(node, #var); }
2249 #define GET_JSON_ITEM(inst, node, var) \
2250 { \
2251 item = cJSON_GetObjectItem(node, #var); \
2252 if (item != NULL) { \
2253 temp = cJSON_Print(item); \
2254 if (temp != NULL) { \
2255 temp[strlen(temp) - 1] = '\0'; \
2256 var = loader_stack_alloc(strlen(temp) + 1); \
2257 strcpy(var, &temp[1]); \
2258 loader_instance_heap_free(inst, temp); \
2259 } else { \
2260 result = VK_ERROR_OUT_OF_HOST_MEMORY; \
2261 goto out; \
2262 } \
2263 } \
2264 }
2265
2266 cJSON *instance_extensions, *device_extensions, *functions, *enable_environment;
2267 cJSON *entrypoints = NULL;
2268 char *vkGetInstanceProcAddr = NULL;
2269 char *vkGetDeviceProcAddr = NULL;
2270 char *vkNegotiateLoaderLayerInterfaceVersion = NULL;
2271 char *spec_version = NULL;
2272 char **entry_array = NULL;
2273 char *library_arch = NULL;
2274 cJSON *app_keys = NULL;
2275
2276 // Layer interface functions
2277 // vkGetInstanceProcAddr
2278 // vkGetDeviceProcAddr
2279 // vkNegotiateLoaderLayerInterfaceVersion (starting with JSON file 1.1.0)
2280 GET_JSON_OBJECT(layer_node, functions)
2281 if (functions != NULL) {
2282 if (loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) {
2283 GET_JSON_ITEM(inst, functions, vkNegotiateLoaderLayerInterfaceVersion)
2284 if (vkNegotiateLoaderLayerInterfaceVersion != NULL)
2285 strncpy(props->functions.str_negotiate_interface, vkNegotiateLoaderLayerInterfaceVersion,
2286 sizeof(props->functions.str_negotiate_interface));
2287 props->functions.str_negotiate_interface[sizeof(props->functions.str_negotiate_interface) - 1] = '\0';
2288 } else {
2289 props->functions.str_negotiate_interface[0] = '\0';
2290 }
2291 GET_JSON_ITEM(inst, functions, vkGetInstanceProcAddr)
2292 GET_JSON_ITEM(inst, functions, vkGetDeviceProcAddr)
2293 if (vkGetInstanceProcAddr != NULL) {
2294 strncpy(props->functions.str_gipa, vkGetInstanceProcAddr, sizeof(props->functions.str_gipa));
2295 if (loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) {
2296 loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
2297 "Layer \"%s\" using deprecated \'vkGetInstanceProcAddr\' tag which was deprecated starting with JSON "
2298 "file version 1.1.0. The new vkNegotiateLoaderLayerInterfaceVersion function is preferred, though for "
2299 "compatibility reasons it may be desirable to continue using the deprecated tag.",
2300 name);
2301 }
2302 }
2303 props->functions.str_gipa[sizeof(props->functions.str_gipa) - 1] = '\0';
2304 if (vkGetDeviceProcAddr != NULL) {
2305 strncpy(props->functions.str_gdpa, vkGetDeviceProcAddr, sizeof(props->functions.str_gdpa));
2306 if (loader_check_version_meets_required(loader_combine_version(1, 1, 0), version)) {
2307 loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
2308 "Layer \"%s\" using deprecated \'vkGetDeviceProcAddr\' tag which was deprecated starting with JSON "
2309 "file version 1.1.0. The new vkNegotiateLoaderLayerInterfaceVersion function is preferred, though for "
2310 "compatibility reasons it may be desirable to continue using the deprecated tag.",
2311 name);
2312 }
2313 }
2314 props->functions.str_gdpa[sizeof(props->functions.str_gdpa) - 1] = '\0';
2315 }
2316
2317 // instance_extensions
2318 // array of {
2319 // name
2320 // spec_version
2321 // }
2322 GET_JSON_OBJECT(layer_node, instance_extensions)
2323 if (instance_extensions != NULL) {
2324 int count = cJSON_GetArraySize(instance_extensions);
2325 for (i = 0; i < count; i++) {
2326 ext_item = cJSON_GetArrayItem(instance_extensions, i);
2327 GET_JSON_ITEM(inst, ext_item, name)
2328 if (name != NULL) {
2329 strncpy(ext_prop.extensionName, name, sizeof(ext_prop.extensionName));
2330 ext_prop.extensionName[sizeof(ext_prop.extensionName) - 1] = '\0';
2331 }
2332 GET_JSON_ITEM(inst, ext_item, spec_version)
2333 if (NULL != spec_version) {
2334 ext_prop.specVersion = atoi(spec_version);
2335 } else {
2336 ext_prop.specVersion = 0;
2337 }
2338 bool ext_unsupported = wsi_unsupported_instance_extension(&ext_prop);
2339 if (!ext_unsupported) {
2340 loader_add_to_ext_list(inst, &props->instance_extension_list, 1, &ext_prop);
2341 }
2342 }
2343 }
2344
2345 // device_extensions
2346 // array of {
2347 // name
2348 // spec_version
2349 // entrypoints
2350 // }
2351 GET_JSON_OBJECT(layer_node, device_extensions)
2352 if (device_extensions != NULL) {
2353 int count = cJSON_GetArraySize(device_extensions);
2354 for (i = 0; i < count; i++) {
2355 ext_item = cJSON_GetArrayItem(device_extensions, i);
2356 GET_JSON_ITEM(inst, ext_item, name)
2357 GET_JSON_ITEM(inst, ext_item, spec_version)
2358 if (name != NULL) {
2359 strncpy(ext_prop.extensionName, name, sizeof(ext_prop.extensionName));
2360 ext_prop.extensionName[sizeof(ext_prop.extensionName) - 1] = '\0';
2361 }
2362 if (NULL != spec_version) {
2363 ext_prop.specVersion = atoi(spec_version);
2364 } else {
2365 ext_prop.specVersion = 0;
2366 }
2367 // entrypoints = cJSON_GetObjectItem(ext_item, "entrypoints");
2368 GET_JSON_OBJECT(ext_item, entrypoints)
2369 int entry_count;
2370 if (entrypoints == NULL) {
2371 loader_add_to_dev_ext_list(inst, &props->device_extension_list, &ext_prop, 0, NULL);
2372 continue;
2373 }
2374 entry_count = cJSON_GetArraySize(entrypoints);
2375 if (entry_count) {
2376 entry_array = (char **)loader_stack_alloc(sizeof(char *) * entry_count);
2377 }
2378 for (j = 0; j < entry_count; j++) {
2379 ext_item = cJSON_GetArrayItem(entrypoints, j);
2380 if (ext_item != NULL) {
2381 temp = cJSON_Print(ext_item);
2382 if (NULL == temp) {
2383 entry_array[j] = NULL;
2384 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2385 goto out;
2386 }
2387 temp[strlen(temp) - 1] = '\0';
2388 entry_array[j] = loader_stack_alloc(strlen(temp) + 1);
2389 strcpy(entry_array[j], &temp[1]);
2390 loader_instance_heap_free(inst, temp);
2391 }
2392 }
2393 loader_add_to_dev_ext_list(inst, &props->device_extension_list, &ext_prop, entry_count, entry_array);
2394 }
2395 }
2396 if (is_implicit) {
2397 GET_JSON_OBJECT(layer_node, enable_environment)
2398
2399 // enable_environment is optional
2400 if (enable_environment) {
2401 strncpy(props->enable_env_var.name, enable_environment->child->string, sizeof(props->enable_env_var.name));
2402 props->enable_env_var.name[sizeof(props->enable_env_var.name) - 1] = '\0';
2403 strncpy(props->enable_env_var.value, enable_environment->child->valuestring, sizeof(props->enable_env_var.value));
2404 props->enable_env_var.value[sizeof(props->enable_env_var.value) - 1] = '\0';
2405 }
2406 }
2407
2408 // Read in the pre-instance stuff
2409 cJSON *pre_instance = cJSON_GetObjectItem(layer_node, "pre_instance_functions");
2410 if (NULL != pre_instance) {
2411 // Supported versions started in 1.1.2, so anything newer
2412 if (!loader_check_version_meets_required(loader_combine_version(1, 1, 2), version)) {
2413 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
2414 "Found pre_instance_functions section in layer from \"%s\". This section is only valid in manifest version "
2415 "1.1.2 or later. The section will be ignored",
2416 filename);
2417 } else if (!is_implicit) {
2418 loader_log(inst, VULKAN_LOADER_WARN_BIT, 0,
2419 "Found pre_instance_functions section in explicit layer from \"%s\". This section is only valid in implicit "
2420 "layers. The section will be ignored",
2421 filename);
2422 } else {
2423 cJSON *inst_ext_json = cJSON_GetObjectItem(pre_instance, "vkEnumerateInstanceExtensionProperties");
2424 if (NULL != inst_ext_json) {
2425 char *inst_ext_name = cJSON_Print(inst_ext_json);
2426 if (NULL == inst_ext_name) {
2427 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2428 goto out;
2429 }
2430 size_t len = strlen(inst_ext_name) >= MAX_STRING_SIZE ? MAX_STRING_SIZE - 3 : strlen(inst_ext_name) - 2;
2431 strncpy(props->pre_instance_functions.enumerate_instance_extension_properties, inst_ext_name + 1, len);
2432 props->pre_instance_functions.enumerate_instance_extension_properties[len] = '\0';
2433 loader_instance_heap_free(inst, inst_ext_name);
2434 }
2435
2436 cJSON *inst_layer_json = cJSON_GetObjectItem(pre_instance, "vkEnumerateInstanceLayerProperties");
2437 if (NULL != inst_layer_json) {
2438 char *inst_layer_name = cJSON_Print(inst_layer_json);
2439 if (NULL == inst_layer_name) {
2440 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2441 goto out;
2442 }
2443 size_t len = strlen(inst_layer_name) >= MAX_STRING_SIZE ? MAX_STRING_SIZE - 3 : strlen(inst_layer_name) - 2;
2444 strncpy(props->pre_instance_functions.enumerate_instance_layer_properties, inst_layer_name + 1, len);
2445 props->pre_instance_functions.enumerate_instance_layer_properties[len] = '\0';
2446 loader_instance_heap_free(inst, inst_layer_name);
2447 }
2448
2449 cJSON *inst_version_json = cJSON_GetObjectItem(pre_instance, "vkEnumerateInstanceVersion");
2450 if (NULL != inst_version_json) {
2451 char *inst_version_name = cJSON_Print(inst_version_json);
2452 if (NULL == inst_version_name) {
2453 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2454 goto out;
2455 }
2456 size_t len = strlen(inst_version_name) >= MAX_STRING_SIZE ? MAX_STRING_SIZE - 3 : strlen(inst_version_name) - 2;
2457 strncpy(props->pre_instance_functions.enumerate_instance_version, inst_version_name + 1, len);
2458 props->pre_instance_functions.enumerate_instance_version[len] = '\0';
2459 loader_instance_heap_free(inst, inst_version_name);
2460 }
2461 }
2462 }
2463
2464 props->num_app_key_paths = 0;
2465 props->app_key_paths = NULL;
2466 app_keys = cJSON_GetObjectItem(layer_node, "app_keys");
2467 if (app_keys != NULL) {
2468 if (strcmp(name, VK_OVERRIDE_LAYER_NAME)) {
2469 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2470 "Layer %s contains app_keys, but any app_keys can only be provided by the override metalayer. "
2471 "These will be ignored.",
2472 name);
2473 } else {
2474 props->num_app_key_paths = cJSON_GetArraySize(app_keys);
2475
2476 // Allocate the blacklist array
2477 props->app_key_paths = loader_instance_heap_alloc(inst, sizeof(char[MAX_STRING_SIZE]) * props->num_app_key_paths,
2478 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
2479 if (props->app_key_paths == NULL) {
2480 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2481 goto out;
2482 }
2483
2484 // Copy the app_key_paths into the array
2485 for (i = 0; i < (int)props->num_app_key_paths; ++i) {
2486 cJSON *app_key_path = cJSON_GetArrayItem(app_keys, i);
2487 if (app_key_path == NULL) {
2488 continue;
2489 }
2490 temp = cJSON_Print(app_key_path);
2491 if (temp == NULL) {
2492 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2493 goto out;
2494 }
2495 temp[strlen(temp) - 1] = '\0';
2496 strncpy(props->app_key_paths[i], temp + 1, MAX_STRING_SIZE - 1);
2497 props->app_key_paths[i][MAX_STRING_SIZE - 1] = '\0';
2498 loader_instance_heap_free(inst, temp);
2499 }
2500 }
2501 }
2502
2503 GET_JSON_ITEM(inst, layer_node, library_arch)
2504 if (library_arch != NULL) {
2505 if ((strncmp(library_arch, "32", 2) == 0 && sizeof(void *) != 4) ||
2506 (strncmp(library_arch, "64", 2) == 0 && sizeof(void *) != 8)) {
2507 loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
2508 "Layer library architecture doesn't match the current running architecture, skipping this layer");
2509 goto out;
2510 }
2511 }
2512
2513 result = VK_SUCCESS;
2514
2515 out:
2516 #undef GET_JSON_ITEM
2517 #undef GET_JSON_OBJECT
2518
2519 if (VK_SUCCESS != result && NULL != props) {
2520 // Make sure to free anything that was allocated
2521 loader_remove_layer_in_list(inst, layer_instance_list, props_index);
2522 }
2523
2524 return result;
2525 }
2526
is_valid_layer_json_version(const loader_api_version * layer_json)2527 static inline bool is_valid_layer_json_version(const loader_api_version *layer_json) {
2528 // Supported versions are: 1.0.0, 1.0.1, 1.1.0 - 1.1.2, and 1.2.0 - 1.2.1.
2529 if ((layer_json->major == 1 && layer_json->minor == 2 && layer_json->patch < 2) ||
2530 (layer_json->major == 1 && layer_json->minor == 1 && layer_json->patch < 3) ||
2531 (layer_json->major == 1 && layer_json->minor == 0 && layer_json->patch < 2)) {
2532 return true;
2533 }
2534 return false;
2535 }
2536
2537 // Given a cJSON struct (json) of the top level JSON object from layer manifest
2538 // file, add entry to the layer_list. Fill out the layer_properties in this list
2539 // entry from the input cJSON object.
2540 //
2541 // \returns
2542 // void
2543 // layer_list has a new entry and initialized accordingly.
2544 // If the json input object does not have all the required fields no entry
2545 // is added to the list.
loader_add_layer_properties(const struct loader_instance * inst,struct loader_layer_list * layer_instance_list,cJSON * json,bool is_implicit,char * filename)2546 static VkResult loader_add_layer_properties(const struct loader_instance *inst, struct loader_layer_list *layer_instance_list,
2547 cJSON *json, bool is_implicit, char *filename) {
2548 // The following Fields in layer manifest file that are required:
2549 // - "file_format_version"
2550 // - If more than one "layer" object are used, then the "layers" array is
2551 // required
2552 VkResult result = VK_ERROR_INITIALIZATION_FAILED;
2553 cJSON *item, *layers_node, *layer_node;
2554 loader_api_version json_version = {0, 0, 0};
2555 char *file_vers = NULL;
2556 // Make sure sure the top level json value is an object
2557 if (!json || json->type != 6) {
2558 goto out;
2559 }
2560 item = cJSON_GetObjectItem(json, "file_format_version");
2561 if (item == NULL) {
2562 goto out;
2563 }
2564 file_vers = cJSON_PrintUnformatted(item);
2565 if (NULL == file_vers) {
2566 goto out;
2567 }
2568 loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, "Found manifest file %s (file version %s)", filename, file_vers);
2569 // Get the major/minor/and patch as integers for easier comparison
2570 json_version = loader_make_full_version(loader_parse_version_string(file_vers));
2571
2572 if (!is_valid_layer_json_version(&json_version)) {
2573 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2574 "loader_add_layer_properties: %s has unknown layer manifest file version %d.%d.%d. May cause errors.", filename,
2575 json_version.major, json_version.minor, json_version.patch);
2576 }
2577
2578 // If "layers" is present, read in the array of layer objects
2579 layers_node = cJSON_GetObjectItem(json, "layers");
2580 if (layers_node != NULL) {
2581 int numItems = cJSON_GetArraySize(layers_node);
2582 // Supported versions started in 1.0.1, so anything newer
2583 if (!loader_check_version_meets_required(loader_combine_version(1, 0, 1), json_version)) {
2584 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2585 "loader_add_layer_properties: \'layers\' tag not supported until file version 1.0.1, but %s is reporting "
2586 "version %s",
2587 filename, file_vers);
2588 }
2589 for (int curLayer = 0; curLayer < numItems; curLayer++) {
2590 layer_node = cJSON_GetArrayItem(layers_node, curLayer);
2591 if (layer_node == NULL) {
2592 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2593 "loader_add_layer_properties: Can not find 'layers' array element %d object in manifest JSON file %s. "
2594 "Skipping this file",
2595 curLayer, filename);
2596 goto out;
2597 }
2598 result = loader_read_layer_json(inst, layer_instance_list, layer_node, json_version, item, is_implicit, filename);
2599 }
2600 } else {
2601 // Otherwise, try to read in individual layers
2602 layer_node = cJSON_GetObjectItem(json, "layer");
2603 if (layer_node == NULL) {
2604 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2605 "loader_add_layer_properties: Can not find 'layer' object in manifest JSON file %s. Skipping this file.",
2606 filename);
2607 goto out;
2608 }
2609 // Loop through all "layer" objects in the file to get a count of them
2610 // first.
2611 uint16_t layer_count = 0;
2612 cJSON *tempNode = layer_node;
2613 do {
2614 tempNode = tempNode->next;
2615 layer_count++;
2616 } while (tempNode != NULL);
2617
2618 // Throw a warning if we encounter multiple "layer" objects in file
2619 // versions newer than 1.0.0. Having multiple objects with the same
2620 // name at the same level is actually a JSON standard violation.
2621 if (layer_count > 1 && loader_check_version_meets_required(loader_combine_version(1, 0, 1), json_version)) {
2622 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
2623 "loader_add_layer_properties: Multiple 'layer' nodes are deprecated starting in file version \"1.0.1\". "
2624 "Please use 'layers' : [] array instead in %s.",
2625 filename);
2626 } else {
2627 do {
2628 result = loader_read_layer_json(inst, layer_instance_list, layer_node, json_version, item, is_implicit, filename);
2629 layer_node = layer_node->next;
2630 } while (layer_node != NULL);
2631 }
2632 }
2633
2634 out:
2635 loader_instance_heap_free(inst, file_vers);
2636
2637 return result;
2638 }
2639
determine_data_file_path_size(const char * cur_path,size_t relative_path_size)2640 static inline size_t determine_data_file_path_size(const char *cur_path, size_t relative_path_size) {
2641 size_t path_size = 0;
2642
2643 if (NULL != cur_path) {
2644 // For each folder in cur_path, (detected by finding additional
2645 // path separators in the string) we need to add the relative path on
2646 // the end. Plus, leave an additional two slots on the end to add an
2647 // additional directory slash and path separator if needed
2648 path_size += strlen(cur_path) + relative_path_size + 2;
2649 for (const char *x = cur_path; *x; ++x) {
2650 if (*x == PATH_SEPARATOR) {
2651 path_size += relative_path_size + 2;
2652 }
2653 }
2654 }
2655
2656 return path_size;
2657 }
2658
copy_data_file_info(const char * cur_path,const char * relative_path,size_t relative_path_size,char ** output_path)2659 static inline void copy_data_file_info(const char *cur_path, const char *relative_path, size_t relative_path_size,
2660 char **output_path) {
2661 if (NULL != cur_path) {
2662 uint32_t start = 0;
2663 uint32_t stop = 0;
2664 char *cur_write = *output_path;
2665
2666 while (cur_path[start] != '\0') {
2667 while (cur_path[start] == PATH_SEPARATOR) {
2668 start++;
2669 }
2670 stop = start;
2671 while (cur_path[stop] != PATH_SEPARATOR && cur_path[stop] != '\0') {
2672 stop++;
2673 }
2674 const size_t s = stop - start;
2675 if (s) {
2676 memcpy(cur_write, &cur_path[start], s);
2677 cur_write += s;
2678
2679 // If this is a specific JSON file, just add it and don't add any
2680 // relative path or directory symbol to it.
2681 if (!is_json(cur_write - 5, s)) {
2682 // Add the relative directory if present.
2683 if (relative_path_size > 0) {
2684 // If last symbol written was not a directory symbol, add it.
2685 if (*(cur_write - 1) != DIRECTORY_SYMBOL) {
2686 *cur_write++ = DIRECTORY_SYMBOL;
2687 }
2688 memcpy(cur_write, relative_path, relative_path_size);
2689 cur_write += relative_path_size;
2690 }
2691 }
2692
2693 *cur_write++ = PATH_SEPARATOR;
2694 start = stop;
2695 }
2696 }
2697 *output_path = cur_write;
2698 }
2699 }
2700
2701 // Check to see if there's enough space in the data file list. If not, add some.
check_and_adjust_data_file_list(const struct loader_instance * inst,struct loader_data_files * out_files)2702 static inline VkResult check_and_adjust_data_file_list(const struct loader_instance *inst, struct loader_data_files *out_files) {
2703 if (out_files->count == 0) {
2704 out_files->filename_list = loader_instance_heap_alloc(inst, 64 * sizeof(char *), VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2705 if (NULL == out_files->filename_list) {
2706 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
2707 "check_and_adjust_data_file_list: Failed to allocate space for manifest file name list");
2708 return VK_ERROR_OUT_OF_HOST_MEMORY;
2709 }
2710 out_files->alloc_count = 64;
2711 } else if (out_files->count == out_files->alloc_count) {
2712 size_t new_size = out_files->alloc_count * sizeof(char *) * 2;
2713 void *new_ptr = loader_instance_heap_realloc(inst, out_files->filename_list, out_files->alloc_count * sizeof(char *),
2714 new_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2715 if (NULL == new_ptr) {
2716 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
2717 "check_and_adjust_data_file_list: Failed to reallocate space for manifest file name list");
2718 return VK_ERROR_OUT_OF_HOST_MEMORY;
2719 }
2720 out_files->filename_list = new_ptr;
2721 out_files->alloc_count *= 2;
2722 }
2723
2724 return VK_SUCCESS;
2725 }
2726
2727 // add file_name to the out_files manifest list. Assumes its a valid manifest file name
add_manifest_file(const struct loader_instance * inst,const char * file_name,struct loader_data_files * out_files)2728 static VkResult add_manifest_file(const struct loader_instance *inst, const char *file_name, struct loader_data_files *out_files) {
2729 VkResult vk_result = VK_SUCCESS;
2730
2731 // Check and allocate space in the manifest list if necessary
2732 vk_result = check_and_adjust_data_file_list(inst, out_files);
2733 if (VK_SUCCESS != vk_result) {
2734 goto out;
2735 }
2736
2737 out_files->filename_list[out_files->count] =
2738 loader_instance_heap_alloc(inst, strlen(file_name) + 1, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2739 if (out_files->filename_list[out_files->count] == NULL) {
2740 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "add_manifest_file: Failed to allocate space for manifest file %d list",
2741 out_files->count);
2742 vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
2743 goto out;
2744 }
2745
2746 strcpy(out_files->filename_list[out_files->count++], file_name);
2747
2748 out:
2749 return vk_result;
2750 }
2751
2752 // If the file found is a manifest file name, add it to the out_files manifest list.
add_if_manifest_file(const struct loader_instance * inst,const char * file_name,struct loader_data_files * out_files)2753 static VkResult add_if_manifest_file(const struct loader_instance *inst, const char *file_name,
2754 struct loader_data_files *out_files) {
2755 VkResult vk_result = VK_SUCCESS;
2756
2757 assert(NULL != file_name && "add_if_manifest_file: Received NULL pointer for file_name");
2758 assert(NULL != out_files && "add_if_manifest_file: Received NULL pointer for out_files");
2759
2760 // Look for files ending with ".json" suffix
2761 size_t name_len = strlen(file_name);
2762 const char *name_suffix = file_name + name_len - 5;
2763 if (!is_json(name_suffix, name_len)) {
2764 // Use incomplete to indicate invalid name, but to keep going.
2765 vk_result = VK_INCOMPLETE;
2766 goto out;
2767 }
2768
2769 vk_result = add_manifest_file(inst, file_name, out_files);
2770
2771 out:
2772
2773 return vk_result;
2774 }
2775
2776 // Add any files found in the search_path. If any path in the search path points to a specific JSON, attempt to
2777 // only open that one JSON. Otherwise, if the path is a folder, search the folder for JSON files.
add_data_files(const struct loader_instance * inst,char * search_path,struct loader_data_files * out_files,bool use_first_found_manifest)2778 VkResult add_data_files(const struct loader_instance *inst, char *search_path, struct loader_data_files *out_files,
2779 bool use_first_found_manifest) {
2780 VkResult vk_result = VK_SUCCESS;
2781 DIR *dir_stream = NULL;
2782 struct dirent *dir_entry;
2783 char *cur_file;
2784 char *next_file;
2785 char *name;
2786 char full_path[2048];
2787 #ifndef _WIN32
2788 char temp_path[2048];
2789 #endif
2790
2791 // Now, parse the paths
2792 next_file = search_path;
2793 while (NULL != next_file && *next_file != '\0') {
2794 name = NULL;
2795 cur_file = next_file;
2796 next_file = loader_get_next_path(cur_file);
2797
2798 // Is this a JSON file, then try to open it.
2799 size_t len = strlen(cur_file);
2800 if (is_json(cur_file + len - 5, len)) {
2801 #ifdef _WIN32
2802 name = cur_file;
2803 #else
2804 // Only Linux has relative paths, make a copy of location so it isn't modified
2805 size_t str_len;
2806 if (NULL != next_file) {
2807 str_len = next_file - cur_file + 1;
2808 } else {
2809 str_len = strlen(cur_file) + 1;
2810 }
2811 if (str_len > sizeof(temp_path)) {
2812 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "add_data_files: Path to %s too long\n", cur_file);
2813 continue;
2814 }
2815 strcpy(temp_path, cur_file);
2816 name = temp_path;
2817 #endif
2818 loader_get_fullpath(cur_file, name, sizeof(full_path), full_path);
2819 name = full_path;
2820
2821 VkResult local_res;
2822 local_res = add_if_manifest_file(inst, name, out_files);
2823
2824 // Incomplete means this was not a valid data file.
2825 if (local_res == VK_INCOMPLETE) {
2826 continue;
2827 } else if (local_res != VK_SUCCESS) {
2828 vk_result = local_res;
2829 break;
2830 }
2831 } else { // Otherwise, treat it as a directory
2832 dir_stream = loader_opendir(inst, cur_file);
2833 if (NULL == dir_stream) {
2834 continue;
2835 }
2836 while (1) {
2837 dir_entry = readdir(dir_stream);
2838 if (NULL == dir_entry) {
2839 break;
2840 }
2841
2842 name = &(dir_entry->d_name[0]);
2843 loader_get_fullpath(name, cur_file, sizeof(full_path), full_path);
2844 name = full_path;
2845
2846 VkResult local_res;
2847 local_res = add_if_manifest_file(inst, name, out_files);
2848
2849 // Incomplete means this was not a valid data file.
2850 if (local_res == VK_INCOMPLETE) {
2851 continue;
2852 } else if (local_res != VK_SUCCESS) {
2853 vk_result = local_res;
2854 break;
2855 }
2856 }
2857 loader_closedir(inst, dir_stream);
2858 if (vk_result != VK_SUCCESS) {
2859 goto out;
2860 }
2861 }
2862 if (use_first_found_manifest && out_files->count > 0) {
2863 break;
2864 }
2865 }
2866
2867 out:
2868
2869 return vk_result;
2870 }
2871
2872 // Look for data files in the provided paths, but first check the environment override to determine if we should use that
2873 // instead.
read_data_files_in_search_paths(const struct loader_instance * inst,enum loader_data_files_type manifest_type,const char * path_override,bool * override_active,struct loader_data_files * out_files)2874 static VkResult read_data_files_in_search_paths(const struct loader_instance *inst, enum loader_data_files_type manifest_type,
2875 const char *path_override, bool *override_active,
2876 struct loader_data_files *out_files) {
2877 VkResult vk_result = VK_SUCCESS;
2878 char *override_env = NULL;
2879 const char *override_path = NULL;
2880 char *relative_location = NULL;
2881 char *additional_env = NULL;
2882 size_t search_path_size = 0;
2883 char *search_path = NULL;
2884 char *cur_path_ptr = NULL;
2885 bool use_first_found_manifest = false;
2886 #ifndef _WIN32
2887 size_t rel_size = 0; // unused in windows, dont declare so no compiler warnings are generated
2888 #endif
2889
2890 #if defined(_WIN32)
2891 char *package_path = NULL;
2892 #else
2893 // Determine how much space is needed to generate the full search path
2894 // for the current manifest files.
2895 char *xdg_config_home = loader_secure_getenv("XDG_CONFIG_HOME", inst);
2896 char *xdg_config_dirs = loader_secure_getenv("XDG_CONFIG_DIRS", inst);
2897
2898 #if !defined(__Fuchsia__) && !defined(__QNXNTO__)
2899 if (NULL == xdg_config_dirs || '\0' == xdg_config_dirs[0]) {
2900 xdg_config_dirs = FALLBACK_CONFIG_DIRS;
2901 }
2902 #endif
2903
2904 char *xdg_data_home = loader_secure_getenv("XDG_DATA_HOME", inst);
2905 char *xdg_data_dirs = loader_secure_getenv("XDG_DATA_DIRS", inst);
2906
2907 #if !defined(__Fuchsia__) && !defined(__QNXNTO__)
2908 if (NULL == xdg_data_dirs || '\0' == xdg_data_dirs[0]) {
2909 xdg_data_dirs = FALLBACK_DATA_DIRS;
2910 }
2911 #endif
2912
2913 char *home = NULL;
2914 char *default_data_home = NULL;
2915 char *default_config_home = NULL;
2916 char *home_data_dir = NULL;
2917 char *home_config_dir = NULL;
2918
2919 // Only use HOME if XDG_DATA_HOME is not present on the system
2920 home = loader_secure_getenv("HOME", inst);
2921 if (home != NULL) {
2922 if (NULL == xdg_config_home || '\0' == xdg_config_home[0]) {
2923 const char config_suffix[] = "/.config";
2924 default_config_home =
2925 loader_instance_heap_alloc(inst, strlen(home) + strlen(config_suffix) + 1, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2926 if (default_config_home == NULL) {
2927 vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
2928 goto out;
2929 }
2930 strcpy(default_config_home, home);
2931 strcat(default_config_home, config_suffix);
2932 }
2933 if (NULL == xdg_data_home || '\0' == xdg_data_home[0]) {
2934 const char data_suffix[] = "/.local/share";
2935 default_data_home =
2936 loader_instance_heap_alloc(inst, strlen(home) + strlen(data_suffix) + 1, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2937 if (default_data_home == NULL) {
2938 vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
2939 goto out;
2940 }
2941 strcpy(default_data_home, home);
2942 strcat(default_data_home, data_suffix);
2943 }
2944 }
2945
2946 if (NULL != default_config_home) {
2947 home_config_dir = default_config_home;
2948 } else {
2949 home_config_dir = xdg_config_home;
2950 }
2951 if (NULL != default_data_home) {
2952 home_data_dir = default_data_home;
2953 } else {
2954 home_data_dir = xdg_data_home;
2955 }
2956 #endif // !_WIN32
2957
2958 switch (manifest_type) {
2959 case LOADER_DATA_FILE_MANIFEST_DRIVER:
2960 override_env = loader_secure_getenv(VK_DRIVER_FILES_ENV_VAR, inst);
2961 if (NULL == override_env) {
2962 // Not there, so fall back to the old name
2963 override_env = loader_secure_getenv(VK_ICD_FILENAMES_ENV_VAR, inst);
2964 }
2965 additional_env = loader_secure_getenv(VK_ADDITIONAL_DRIVER_FILES_ENV_VAR, inst);
2966 relative_location = VK_DRIVERS_INFO_RELATIVE_DIR;
2967 #if defined(_WIN32)
2968 package_path = windows_get_app_package_manifest_path(inst);
2969 #endif
2970 break;
2971 case LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER:
2972 relative_location = VK_ILAYERS_INFO_RELATIVE_DIR;
2973 break;
2974 case LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER:
2975 override_env = loader_secure_getenv(VK_LAYER_PATH_ENV_VAR, inst);
2976 additional_env = loader_secure_getenv(VK_ADDITIONAL_LAYER_PATH_ENV_VAR, inst);
2977 relative_location = VK_ELAYERS_INFO_RELATIVE_DIR;
2978 break;
2979 default:
2980 assert(false && "Shouldn't get here!");
2981 break;
2982 }
2983
2984 if (path_override != NULL) {
2985 override_path = path_override;
2986 } else if (override_env != NULL) {
2987 override_path = override_env;
2988 }
2989
2990 // Add two by default for NULL terminator and one path separator on end (just in case)
2991 search_path_size = 2;
2992
2993 // If there's an override, use that (and the local folder if required) and nothing else
2994 if (NULL != override_path) {
2995 // Local folder and null terminator
2996 search_path_size += strlen(override_path) + 2;
2997 } else {
2998 // Add the size of any additional search paths defined in the additive environment variable
2999 if (NULL != additional_env) {
3000 search_path_size += determine_data_file_path_size(additional_env, 0) + 2;
3001 #if defined(_WIN32)
3002 }
3003 if (NULL != package_path) {
3004 search_path_size += determine_data_file_path_size(package_path, 0) + 2;
3005 }
3006 if (search_path_size == 2) {
3007 goto out;
3008 }
3009 #else // !_WIN32
3010 }
3011
3012 // Add the general search folders (with the appropriate relative folder added)
3013 rel_size = strlen(relative_location);
3014 if (rel_size > 0) {
3015 #if defined(__APPLE__)
3016 search_path_size += MAXPATHLEN;
3017 #endif
3018 // Only add the home folders if defined
3019 if (NULL != home_config_dir) {
3020 search_path_size += determine_data_file_path_size(home_config_dir, rel_size);
3021 }
3022 search_path_size += determine_data_file_path_size(xdg_config_dirs, rel_size);
3023 search_path_size += determine_data_file_path_size(SYSCONFDIR, rel_size);
3024 #if defined(EXTRASYSCONFDIR)
3025 search_path_size += determine_data_file_path_size(EXTRASYSCONFDIR, rel_size);
3026 #endif
3027 // Only add the home folders if defined
3028 if (NULL != home_data_dir) {
3029 search_path_size += determine_data_file_path_size(home_data_dir, rel_size);
3030 }
3031 search_path_size += determine_data_file_path_size(xdg_data_dirs, rel_size);
3032 }
3033 #endif // !_WIN32
3034 }
3035
3036 // Allocate the required space
3037 search_path = loader_instance_heap_calloc(inst, search_path_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3038 if (NULL == search_path) {
3039 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
3040 "read_data_files_in_search_paths: Failed to allocate space for search path of length %d",
3041 (uint32_t)search_path_size);
3042 vk_result = VK_ERROR_OUT_OF_HOST_MEMORY;
3043 goto out;
3044 }
3045
3046 cur_path_ptr = search_path;
3047
3048 // Add the remaining paths to the list
3049 if (NULL != override_path) {
3050 strcpy(cur_path_ptr, override_path);
3051 cur_path_ptr += strlen(override_path);
3052 } else {
3053 // Add any additional search paths defined in the additive environment variable
3054 if (NULL != additional_env) {
3055 copy_data_file_info(additional_env, NULL, 0, &cur_path_ptr);
3056 }
3057
3058 #if defined(_WIN32)
3059 if (NULL != package_path) {
3060 copy_data_file_info(package_path, NULL, 0, &cur_path_ptr);
3061 }
3062 #else
3063 if (rel_size > 0) {
3064 #if defined(__APPLE__)
3065 // Add the bundle's Resources dir to the beginning of the search path.
3066 // Looks for manifests in the bundle first, before any system directories.
3067 CFBundleRef main_bundle = CFBundleGetMainBundle();
3068 if (NULL != main_bundle) {
3069 CFURLRef ref = CFBundleCopyResourcesDirectoryURL(main_bundle);
3070 if (NULL != ref) {
3071 if (CFURLGetFileSystemRepresentation(ref, TRUE, (UInt8 *)cur_path_ptr, search_path_size)) {
3072 cur_path_ptr += strlen(cur_path_ptr);
3073 *cur_path_ptr++ = DIRECTORY_SYMBOL;
3074 memcpy(cur_path_ptr, relative_location, rel_size);
3075 cur_path_ptr += rel_size;
3076 *cur_path_ptr++ = PATH_SEPARATOR;
3077 // only for ICD manifests
3078 if (override_env != NULL && manifest_type == LOADER_DATA_FILE_MANIFEST_DRIVER) {
3079 use_first_found_manifest = true;
3080 }
3081 }
3082 CFRelease(ref);
3083 }
3084 }
3085 #endif // __APPLE__
3086
3087 // Only add the home folders if not NULL
3088 if (NULL != home_config_dir) {
3089 copy_data_file_info(home_config_dir, relative_location, rel_size, &cur_path_ptr);
3090 }
3091 copy_data_file_info(xdg_config_dirs, relative_location, rel_size, &cur_path_ptr);
3092 copy_data_file_info(SYSCONFDIR, relative_location, rel_size, &cur_path_ptr);
3093 #if defined(EXTRASYSCONFDIR)
3094 copy_data_file_info(EXTRASYSCONFDIR, relative_location, rel_size, &cur_path_ptr);
3095 #endif
3096
3097 // Only add the home folders if not NULL
3098 if (NULL != home_data_dir) {
3099 copy_data_file_info(home_data_dir, relative_location, rel_size, &cur_path_ptr);
3100 }
3101 copy_data_file_info(xdg_data_dirs, relative_location, rel_size, &cur_path_ptr);
3102 }
3103
3104 // Remove the last path separator
3105 --cur_path_ptr;
3106
3107 assert(cur_path_ptr - search_path < (ptrdiff_t)search_path_size);
3108 *cur_path_ptr = '\0';
3109 #endif // !_WIN32
3110 }
3111
3112 // Remove duplicate paths, or it would result in duplicate extensions, duplicate devices, etc.
3113 // This uses minimal memory, but is O(N^2) on the number of paths. Expect only a few paths.
3114 char path_sep_str[2] = {PATH_SEPARATOR, '\0'};
3115 size_t search_path_updated_size = strlen(search_path);
3116 for (size_t first = 0; first < search_path_updated_size;) {
3117 // If this is an empty path, erase it
3118 if (search_path[first] == PATH_SEPARATOR) {
3119 memmove(&search_path[first], &search_path[first + 1], search_path_updated_size - first + 1);
3120 search_path_updated_size -= 1;
3121 continue;
3122 }
3123
3124 size_t first_end = first + 1;
3125 first_end += strcspn(&search_path[first_end], path_sep_str);
3126 for (size_t second = first_end + 1; second < search_path_updated_size;) {
3127 size_t second_end = second + 1;
3128 second_end += strcspn(&search_path[second_end], path_sep_str);
3129 if (first_end - first == second_end - second &&
3130 !strncmp(&search_path[first], &search_path[second], second_end - second)) {
3131 // Found duplicate. Include PATH_SEPARATOR in second_end, then erase it from search_path.
3132 if (search_path[second_end] == PATH_SEPARATOR) {
3133 second_end++;
3134 }
3135 memmove(&search_path[second], &search_path[second_end], search_path_updated_size - second_end + 1);
3136 search_path_updated_size -= second_end - second;
3137 } else {
3138 second = second_end + 1;
3139 }
3140 }
3141 first = first_end + 1;
3142 }
3143 search_path_size = search_path_updated_size;
3144
3145 // Print out the paths being searched if debugging is enabled
3146 uint32_t log_flags = 0;
3147 if (search_path_size > 0) {
3148 char *tmp_search_path = loader_instance_heap_alloc(inst, search_path_size + 1, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3149 if (NULL != tmp_search_path) {
3150 strncpy(tmp_search_path, search_path, search_path_size);
3151 tmp_search_path[search_path_size] = '\0';
3152 if (manifest_type == LOADER_DATA_FILE_MANIFEST_DRIVER) {
3153 log_flags = VULKAN_LOADER_DRIVER_BIT;
3154 loader_log(inst, VULKAN_LOADER_DRIVER_BIT, 0, "Searching for driver manifest files");
3155 } else {
3156 log_flags = VULKAN_LOADER_LAYER_BIT;
3157 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "Searching for layer manifest files");
3158 }
3159 loader_log(inst, log_flags, 0, " In following folders:");
3160 char *cur_file;
3161 char *next_file = tmp_search_path;
3162 while (NULL != next_file && *next_file != '\0') {
3163 cur_file = next_file;
3164 next_file = loader_get_next_path(cur_file);
3165 loader_log(inst, log_flags, 0, " %s", cur_file);
3166 }
3167 loader_instance_heap_free(inst, tmp_search_path);
3168 }
3169 }
3170
3171 // Now, parse the paths and add any manifest files found in them.
3172 vk_result = add_data_files(inst, search_path, out_files, use_first_found_manifest);
3173
3174 if (log_flags != 0 && out_files->count > 0) {
3175 loader_log(inst, log_flags, 0, " Found the following files:");
3176 for (uint32_t cur_file = 0; cur_file < out_files->count; ++cur_file) {
3177 loader_log(inst, log_flags, 0, " %s", out_files->filename_list[cur_file]);
3178 }
3179 } else {
3180 loader_log(inst, log_flags, 0, " Found no files");
3181 }
3182
3183 if (NULL != override_path) {
3184 *override_active = true;
3185 } else {
3186 *override_active = false;
3187 }
3188
3189 out:
3190
3191 loader_free_getenv(additional_env, inst);
3192 loader_free_getenv(override_env, inst);
3193 #if defined(_WIN32)
3194 loader_instance_heap_free(inst, package_path);
3195 #else
3196 loader_free_getenv(xdg_config_home, inst);
3197 loader_free_getenv(xdg_config_dirs, inst);
3198 loader_free_getenv(xdg_data_home, inst);
3199 loader_free_getenv(xdg_data_dirs, inst);
3200 loader_free_getenv(xdg_data_home, inst);
3201 loader_free_getenv(home, inst);
3202 loader_instance_heap_free(inst, default_data_home);
3203 loader_instance_heap_free(inst, default_config_home);
3204 #endif
3205
3206 loader_instance_heap_free(inst, search_path);
3207
3208 return vk_result;
3209 }
3210
3211 // Find the Vulkan library manifest files.
3212 //
3213 // This function scans the appropriate locations for a list of JSON manifest files based on the
3214 // "manifest_type". The location is interpreted as Registry path on Windows and a directory path(s)
3215 // on Linux.
3216 // "home_location" is an additional directory in the users home directory to look at. It is
3217 // expanded into the dir path $XDG_DATA_HOME/home_location or $HOME/.local/share/home_location
3218 // depending on environment variables. This "home_location" is only used on Linux.
3219 //
3220 // \returns
3221 // VKResult
3222 // A string list of manifest files to be opened in out_files param.
3223 // List has a pointer to string for each manifest filename.
3224 // When done using the list in out_files, pointers should be freed.
3225 // Location or override string lists can be either files or directories as
3226 // follows:
3227 // | location | override
3228 // --------------------------------
3229 // Win ICD | files | files
3230 // Win Layer | files | dirs
3231 // Linux ICD | dirs | files
3232 // Linux Layer| dirs | dirs
3233
loader_get_data_files(const struct loader_instance * inst,enum loader_data_files_type manifest_type,const char * path_override,struct loader_data_files * out_files)3234 VkResult loader_get_data_files(const struct loader_instance *inst, enum loader_data_files_type manifest_type,
3235 const char *path_override, struct loader_data_files *out_files) {
3236 VkResult res = VK_SUCCESS;
3237 bool override_active = false;
3238
3239 // Free and init the out_files information so there's no false data left from uninitialized variables.
3240 if (out_files->filename_list != NULL) {
3241 for (uint32_t i = 0; i < out_files->count; i++) {
3242 if (NULL != out_files->filename_list[i]) {
3243 loader_instance_heap_free(inst, out_files->filename_list[i]);
3244 out_files->filename_list[i] = NULL;
3245 }
3246 }
3247 loader_instance_heap_free(inst, out_files->filename_list);
3248 }
3249 out_files->count = 0;
3250 out_files->alloc_count = 0;
3251 out_files->filename_list = NULL;
3252
3253 res = read_data_files_in_search_paths(inst, manifest_type, path_override, &override_active, out_files);
3254 if (VK_SUCCESS != res) {
3255 goto out;
3256 }
3257
3258 #ifdef _WIN32
3259 // Read the registry if the override wasn't active.
3260 if (!override_active) {
3261 bool warn_if_not_present = false;
3262 char *registry_location = NULL;
3263
3264 switch (manifest_type) {
3265 default:
3266 goto out;
3267 case LOADER_DATA_FILE_MANIFEST_DRIVER:
3268 warn_if_not_present = true;
3269 registry_location = VK_DRIVERS_INFO_REGISTRY_LOC;
3270 break;
3271 case LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER:
3272 registry_location = VK_ILAYERS_INFO_REGISTRY_LOC;
3273 break;
3274 case LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER:
3275 warn_if_not_present = true;
3276 registry_location = VK_ELAYERS_INFO_REGISTRY_LOC;
3277 break;
3278 }
3279 VkResult tmp_res =
3280 windows_read_data_files_in_registry(inst, manifest_type, warn_if_not_present, registry_location, out_files);
3281 // Only return an error if there was an error this time, and no manifest files from before.
3282 if (VK_SUCCESS != tmp_res && out_files->count == 0) {
3283 res = tmp_res;
3284 goto out;
3285 }
3286 }
3287 #endif
3288
3289 out:
3290
3291 if (VK_SUCCESS != res && NULL != out_files->filename_list) {
3292 for (uint32_t remove = 0; remove < out_files->count; remove++) {
3293 loader_instance_heap_free(inst, out_files->filename_list[remove]);
3294 }
3295 loader_instance_heap_free(inst, out_files->filename_list);
3296 out_files->count = 0;
3297 out_files->alloc_count = 0;
3298 out_files->filename_list = NULL;
3299 }
3300
3301 return res;
3302 }
3303
3304 struct ICDManifestInfo {
3305 char full_library_path[MAX_STRING_SIZE];
3306 uint32_t version;
3307 };
3308
loader_parse_icd_manifest(const struct loader_instance * inst,char * file_str,struct ICDManifestInfo * icd,bool * skipped_portability_drivers)3309 VkResult loader_parse_icd_manifest(const struct loader_instance *inst, char *file_str, struct ICDManifestInfo *icd,
3310 bool *skipped_portability_drivers) {
3311 VkResult res = VK_SUCCESS;
3312 cJSON *json = NULL;
3313 cJSON *item = NULL, *itemICD = NULL;
3314
3315 char *file_vers_str = NULL;
3316 char *library_path = NULL;
3317 char *library_arch_str = NULL;
3318 char *version_str = NULL;
3319
3320 if (file_str == NULL) {
3321 goto out;
3322 }
3323
3324 res = loader_get_json(inst, file_str, &json);
3325 if (res != VK_SUCCESS || NULL == json) {
3326 goto out;
3327 }
3328
3329 item = cJSON_GetObjectItem(json, "file_format_version");
3330 if (item == NULL) {
3331 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3332 "loader_parse_icd_manifest: ICD JSON %s does not have a \'file_format_version\' field. Skipping ICD JSON.",
3333 file_str);
3334 res = VK_ERROR_INCOMPATIBLE_DRIVER;
3335 goto out;
3336 }
3337
3338 file_vers_str = cJSON_Print(item);
3339 if (NULL == file_vers_str) {
3340 // Only reason the print can fail is if there was an allocation issue
3341 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3342 "loader_parse_icd_manifest: Failed retrieving ICD JSON %s \'file_format_version\' field. Skipping ICD JSON",
3343 file_str);
3344 res = VK_ERROR_OUT_OF_HOST_MEMORY;
3345 goto out;
3346 }
3347 loader_log(inst, VULKAN_LOADER_DRIVER_BIT, 0, "Found ICD manifest file %s, version %s", file_str, file_vers_str);
3348
3349 // Get the version of the driver manifest
3350 loader_api_version json_file_version = loader_make_full_version(loader_parse_version_string(file_vers_str));
3351
3352 // Loader only knows versions 1.0.0 and 1.0.1, anything above it is unknown
3353 if (loader_check_version_meets_required(loader_combine_version(1, 0, 2), json_file_version)) {
3354 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3355 "loader_parse_icd_manifest: %s has unknown icd manifest file version %d.%d.%d. May cause errors.", file_str,
3356 json_file_version.major, json_file_version.minor, json_file_version.patch);
3357 }
3358
3359 itemICD = cJSON_GetObjectItem(json, "ICD");
3360 if (itemICD == NULL) {
3361 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3362 "loader_parse_icd_manifest: Can not find \'ICD\' object in ICD JSON file %s. Skipping ICD JSON", file_str);
3363 res = VK_ERROR_INCOMPATIBLE_DRIVER;
3364 goto out;
3365 }
3366
3367 item = cJSON_GetObjectItem(itemICD, "library_path");
3368 if (item == NULL) {
3369 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3370 "loader_parse_icd_manifest: Failed to find \'library_path\' object in ICD JSON file %s. Skipping ICD JSON.",
3371 file_str);
3372 res = VK_ERROR_INCOMPATIBLE_DRIVER;
3373 goto out;
3374 }
3375 library_path = cJSON_Print(item);
3376 if (!library_path || strlen(library_path) == 0) {
3377 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3378 "loader_parse_icd_manifest: Failed retrieving ICD JSON %s \'library_path\' field. Skipping ICD JSON.", file_str);
3379 res = VK_ERROR_OUT_OF_HOST_MEMORY;
3380 goto out;
3381 }
3382 size_t library_path_len = strlen(library_path);
3383 // cJSON prints everything with quotes so they need to be removed.
3384 // move every char forward one, so the leading quote is replaced.
3385 memmove(library_path, &library_path[1], library_path_len - 2);
3386 // replace end quote with null terminator
3387 library_path[library_path_len - 2] = '\0';
3388
3389 if (strlen(library_path) == 0) {
3390 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3391 "loader_parse_icd_manifest: ICD JSON %s \'library_path\' field is empty. Skipping ICD JSON.", file_str);
3392 res = VK_ERROR_INCOMPATIBLE_DRIVER;
3393 goto out;
3394 }
3395
3396 // Print out the paths being searched if debugging is enabled
3397 loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "Searching for ICD drivers named %s", library_path);
3398 if (loader_platform_is_path(library_path)) {
3399 // a relative or absolute path
3400 char *name_copy = loader_stack_alloc(strlen(file_str) + 1);
3401 char *rel_base;
3402 strcpy(name_copy, file_str);
3403 rel_base = loader_platform_dirname(name_copy);
3404 loader_expand_path(library_path, rel_base, MAX_STRING_SIZE, &icd->full_library_path[0]);
3405 } else {
3406 // a filename which is assumed in a system directory
3407 #if defined(DEFAULT_VK_DRIVERS_PATH)
3408 loader_get_fullpath(library_path, DEFAULT_VK_DRIVERS_PATH, MAX_STRING_SIZE, &icd->full_library_path[0]);
3409 #else
3410 loader_get_fullpath(library_path, "", MAX_STRING_SIZE, &icd->full_library_path[0]);
3411 #endif
3412 }
3413
3414 item = cJSON_GetObjectItem(itemICD, "api_version");
3415 if (item == NULL) {
3416 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3417 "loader_parse_icd_manifest: ICD JSON %s does not have an \'api_version\' field. Skipping ICD JSON.", file_str);
3418 res = VK_ERROR_INCOMPATIBLE_DRIVER;
3419 goto out;
3420 }
3421 version_str = cJSON_Print(item);
3422 if (NULL == version_str) {
3423 // Only reason the print can fail is if there was an allocation issue
3424 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3425 "loader_parse_icd_manifest: Failed retrieving ICD JSON %s \'api_version\' field. Skipping ICD JSON.", file_str);
3426
3427 res = VK_ERROR_OUT_OF_HOST_MEMORY;
3428 goto out;
3429 }
3430 icd->version = loader_parse_version_string(version_str);
3431
3432 if (VK_API_VERSION_VARIANT(icd->version) != 0) {
3433 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3434 "loader_parse_icd_manifest: Driver's ICD JSON %s \'api_version\' field contains a non-zero variant value of %d. "
3435 " Skipping ICD JSON.",
3436 file_str, VK_API_VERSION_VARIANT(icd->version));
3437 res = VK_ERROR_INCOMPATIBLE_DRIVER;
3438 goto out;
3439 }
3440
3441 // Skip over ICD's which contain a true "is_portability_driver" value whenever the application doesn't enable
3442 // portability enumeration.
3443 item = cJSON_GetObjectItem(itemICD, "is_portability_driver");
3444 if (item != NULL && item->type == cJSON_True && inst && !inst->portability_enumeration_enabled) {
3445 if (skipped_portability_drivers) {
3446 *skipped_portability_drivers = true;
3447 }
3448 res = VK_ERROR_INCOMPATIBLE_DRIVER;
3449 goto out;
3450 }
3451
3452 item = cJSON_GetObjectItem(itemICD, "library_arch");
3453 if (item != NULL) {
3454 library_arch_str = cJSON_Print(item);
3455 if (NULL != library_arch_str) {
3456 // cJSON includes the quotes by default, so we need to look for those here
3457 if ((strncmp(library_arch_str, "\"32\"", 4) == 0 && sizeof(void *) != 4) ||
3458 (strncmp(library_arch_str, "\"64\"", 4) == 0 && sizeof(void *) != 8)) {
3459 loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
3460 "loader_parse_icd_manifest: Driver library architecture doesn't match the current running "
3461 "architecture, skipping this driver");
3462 res = VK_ERROR_INCOMPATIBLE_DRIVER;
3463 goto out;
3464 }
3465 }
3466 }
3467 out:
3468 cJSON_Delete(json);
3469 loader_instance_heap_free(inst, file_vers_str);
3470 loader_instance_heap_free(inst, library_path);
3471 loader_instance_heap_free(inst, version_str);
3472 loader_instance_heap_free(inst, library_arch_str);
3473 return res;
3474 }
3475
3476 // Try to find the Vulkan ICD driver(s).
3477 //
3478 // This function scans the default system loader path(s) or path specified by either the
3479 // VK_DRIVER_FILES or VK_ICD_FILENAMES environment variable in order to find loadable
3480 // VK ICDs manifest files.
3481 // From these manifest files it finds the ICD libraries.
3482 //
3483 // skipped_portability_drivers is used to report whether the loader found drivers which report
3484 // portability but the application didn't enable the bit to enumerate them
3485 // Can be NULL
3486 //
3487 // \returns
3488 // Vulkan result
3489 // (on result == VK_SUCCESS) a list of icds that were discovered
loader_icd_scan(const struct loader_instance * inst,struct loader_icd_tramp_list * icd_tramp_list,bool * skipped_portability_drivers)3490 VkResult loader_icd_scan(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list,
3491 bool *skipped_portability_drivers) {
3492 struct loader_data_files manifest_files;
3493 VkResult res = VK_SUCCESS;
3494 bool lockedMutex = false;
3495
3496 memset(&manifest_files, 0, sizeof(struct loader_data_files));
3497
3498 res = loader_scanned_icd_init(inst, icd_tramp_list);
3499 if (VK_SUCCESS != res) {
3500 goto out;
3501 }
3502
3503 // Get a list of manifest files for ICDs
3504 res = loader_get_data_files(inst, LOADER_DATA_FILE_MANIFEST_DRIVER, NULL, &manifest_files);
3505 if (VK_SUCCESS != res || manifest_files.count == 0) {
3506 goto out;
3507 }
3508
3509 loader_platform_thread_lock_mutex(&loader_json_lock);
3510 lockedMutex = true;
3511 for (uint32_t i = 0; i < manifest_files.count; i++) {
3512 VkResult icd_res = VK_SUCCESS;
3513 struct ICDManifestInfo icd;
3514 memset(&icd, 0, sizeof(struct ICDManifestInfo));
3515 icd_res = loader_parse_icd_manifest(inst, manifest_files.filename_list[i], &icd, skipped_portability_drivers);
3516 if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_res) {
3517 res = icd_res;
3518 goto out;
3519 } else if (VK_ERROR_INCOMPATIBLE_DRIVER == icd_res) {
3520 continue;
3521 }
3522
3523 enum loader_layer_library_status lib_status;
3524 icd_res = loader_scanned_icd_add(inst, icd_tramp_list, icd.full_library_path, icd.version, &lib_status);
3525 if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_res) {
3526 res = icd_res;
3527 goto out;
3528 } else if (VK_ERROR_INCOMPATIBLE_DRIVER == icd_res) {
3529 switch (lib_status) {
3530 case LOADER_LAYER_LIB_NOT_LOADED:
3531 case LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD:
3532 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3533 "loader_icd_scan: Failed loading library associated with ICD JSON %s. Ignoring this JSON",
3534 icd.full_library_path);
3535 break;
3536 case LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE: {
3537 loader_log(inst, VULKAN_LOADER_DRIVER_BIT, 0, "Requested layer %s was wrong bit-type. Ignoring this JSON",
3538 icd.full_library_path);
3539 break;
3540 }
3541 case LOADER_LAYER_LIB_SUCCESS_LOADED:
3542 // Shouldn't be able to reach this but if it is, best to report a debug
3543 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
3544 "Shouldn't reach this. A valid version of requested ICD %s was loaded but something bad "
3545 "happened afterwards.",
3546 icd.full_library_path);
3547 break;
3548 }
3549 }
3550 }
3551
3552 out:
3553 if (NULL != manifest_files.filename_list) {
3554 for (uint32_t i = 0; i < manifest_files.count; i++) {
3555 loader_instance_heap_free(inst, manifest_files.filename_list[i]);
3556 }
3557 loader_instance_heap_free(inst, manifest_files.filename_list);
3558 }
3559 if (lockedMutex) {
3560 loader_platform_thread_unlock_mutex(&loader_json_lock);
3561 }
3562
3563 return res;
3564 }
3565
loader_scan_for_layers(struct loader_instance * inst,struct loader_layer_list * instance_layers)3566 void loader_scan_for_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers) {
3567 char *file_str;
3568 struct loader_data_files manifest_files;
3569 cJSON *json;
3570 bool override_layer_valid = false;
3571 char *override_paths = NULL;
3572 uint32_t total_count = 0;
3573
3574 memset(&manifest_files, 0, sizeof(struct loader_data_files));
3575
3576 // Cleanup any previously scanned libraries
3577 loader_delete_layer_list_and_properties(inst, instance_layers);
3578
3579 loader_platform_thread_lock_mutex(&loader_json_lock);
3580
3581 // Get a list of manifest files for any implicit layers
3582 if (VK_SUCCESS != loader_get_data_files(inst, LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER, NULL, &manifest_files)) {
3583 goto out;
3584 }
3585
3586 if (manifest_files.count != 0) {
3587 total_count += manifest_files.count;
3588 for (uint32_t i = 0; i < manifest_files.count; i++) {
3589 file_str = manifest_files.filename_list[i];
3590 if (file_str == NULL) {
3591 continue;
3592 }
3593
3594 // Parse file into JSON struct
3595 VkResult res = loader_get_json(inst, file_str, &json);
3596 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
3597 goto out;
3598 } else if (VK_SUCCESS != res || NULL == json) {
3599 continue;
3600 }
3601
3602 VkResult local_res = loader_add_layer_properties(inst, instance_layers, json, true, file_str);
3603 cJSON_Delete(json);
3604
3605 // If the error is anything other than out of memory we still want to try to load the other layers
3606 if (VK_ERROR_OUT_OF_HOST_MEMORY == local_res) {
3607 goto out;
3608 }
3609 }
3610 }
3611
3612 // Remove any extraneous override layers.
3613 remove_all_non_valid_override_layers(inst, instance_layers);
3614
3615 // Check to see if the override layer is present, and use it's override paths.
3616 for (int32_t i = 0; i < (int32_t)instance_layers->count; i++) {
3617 struct loader_layer_properties *prop = &instance_layers->list[i];
3618 if (prop->is_override && loader_implicit_layer_is_enabled(inst, prop) && prop->num_override_paths > 0) {
3619 char *cur_write_ptr = NULL;
3620 size_t override_path_size = 0;
3621 for (uint32_t j = 0; j < prop->num_override_paths; j++) {
3622 override_path_size += determine_data_file_path_size(prop->override_paths[j], 0);
3623 }
3624 override_paths = loader_instance_heap_alloc(inst, override_path_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3625 if (override_paths == NULL) {
3626 goto out;
3627 }
3628 cur_write_ptr = &override_paths[0];
3629 for (uint32_t j = 0; j < prop->num_override_paths; j++) {
3630 copy_data_file_info(prop->override_paths[j], NULL, 0, &cur_write_ptr);
3631 }
3632 // Remove the last path separator
3633 --cur_write_ptr;
3634 assert(cur_write_ptr - override_paths < (ptrdiff_t)override_path_size);
3635 *cur_write_ptr = '\0';
3636 loader_log(NULL, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
3637 "loader_scan_for_layers: Override layer has override paths set to %s", override_paths);
3638 }
3639 }
3640
3641 // Get a list of manifest files for explicit layers
3642 if (VK_SUCCESS != loader_get_data_files(inst, LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER, override_paths, &manifest_files)) {
3643 goto out;
3644 }
3645
3646 // Make sure we have at least one layer, if not, go ahead and return
3647 if (manifest_files.count == 0 && total_count == 0) {
3648 goto out;
3649 } else {
3650 for (uint32_t i = 0; i < manifest_files.count; i++) {
3651 file_str = manifest_files.filename_list[i];
3652 if (file_str == NULL) {
3653 continue;
3654 }
3655
3656 // Parse file into JSON struct
3657 VkResult res = loader_get_json(inst, file_str, &json);
3658 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
3659 goto out;
3660 } else if (VK_SUCCESS != res || NULL == json) {
3661 continue;
3662 }
3663
3664 VkResult local_res = loader_add_layer_properties(inst, instance_layers, json, false, file_str);
3665 cJSON_Delete(json);
3666
3667 // If the error is anything other than out of memory we still want to try to load the other layers
3668 if (VK_ERROR_OUT_OF_HOST_MEMORY == local_res) {
3669 goto out;
3670 }
3671 }
3672 }
3673
3674 // Verify any meta-layers in the list are valid and all the component layers are
3675 // actually present in the available layer list
3676 verify_all_meta_layers(inst, instance_layers, &override_layer_valid);
3677
3678 if (override_layer_valid) {
3679 loader_remove_layers_in_blacklist(inst, instance_layers);
3680 if (NULL != inst) {
3681 inst->override_layer_present = true;
3682 }
3683 }
3684
3685 out:
3686
3687 loader_instance_heap_free(inst, override_paths);
3688 if (NULL != manifest_files.filename_list) {
3689 for (uint32_t i = 0; i < manifest_files.count; i++) {
3690 loader_instance_heap_free(inst, manifest_files.filename_list[i]);
3691 }
3692 loader_instance_heap_free(inst, manifest_files.filename_list);
3693 }
3694 loader_platform_thread_unlock_mutex(&loader_json_lock);
3695 }
3696
loader_scan_for_implicit_layers(struct loader_instance * inst,struct loader_layer_list * instance_layers)3697 void loader_scan_for_implicit_layers(struct loader_instance *inst, struct loader_layer_list *instance_layers) {
3698 char *file_str;
3699 struct loader_data_files manifest_files;
3700 cJSON *json;
3701 bool override_layer_valid = false;
3702 char *override_paths = NULL;
3703 bool implicit_metalayer_present = false;
3704 bool have_json_lock = false;
3705
3706 // Before we begin anything, init manifest_files to avoid a delete of garbage memory if
3707 // a failure occurs before allocating the manifest filename_list.
3708 memset(&manifest_files, 0, sizeof(struct loader_data_files));
3709
3710 VkResult res = loader_get_data_files(inst, LOADER_DATA_FILE_MANIFEST_IMPLICIT_LAYER, NULL, &manifest_files);
3711 if (VK_SUCCESS != res || manifest_files.count == 0) {
3712 goto out;
3713 }
3714
3715 // Cleanup any previously scanned libraries
3716 loader_delete_layer_list_and_properties(inst, instance_layers);
3717
3718 loader_platform_thread_lock_mutex(&loader_json_lock);
3719 have_json_lock = true;
3720
3721 for (uint32_t i = 0; i < manifest_files.count; i++) {
3722 file_str = manifest_files.filename_list[i];
3723 if (file_str == NULL) {
3724 continue;
3725 }
3726
3727 // parse file into JSON struct
3728 res = loader_get_json(inst, file_str, &json);
3729 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
3730 goto out;
3731 } else if (VK_SUCCESS != res || NULL == json) {
3732 continue;
3733 }
3734
3735 res = loader_add_layer_properties(inst, instance_layers, json, true, file_str);
3736
3737 loader_instance_heap_free(inst, file_str);
3738 manifest_files.filename_list[i] = NULL;
3739 cJSON_Delete(json);
3740
3741 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
3742 goto out;
3743 }
3744 }
3745
3746 // Remove any extraneous override layers.
3747 remove_all_non_valid_override_layers(inst, instance_layers);
3748
3749 // Check to see if either the override layer is present, or another implicit meta-layer.
3750 // Each of these may require explicit layers to be enabled at this time.
3751 for (int32_t i = 0; i < (int32_t)instance_layers->count; i++) {
3752 struct loader_layer_properties *prop = &instance_layers->list[i];
3753 if (prop->is_override && loader_implicit_layer_is_enabled(inst, prop)) {
3754 override_layer_valid = true;
3755 if (prop->num_override_paths > 0) {
3756 char *cur_write_ptr = NULL;
3757 size_t override_path_size = 0;
3758 for (uint32_t j = 0; j < prop->num_override_paths; j++) {
3759 override_path_size += determine_data_file_path_size(prop->override_paths[j], 0);
3760 }
3761 override_paths = loader_instance_heap_alloc(inst, override_path_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
3762 if (override_paths == NULL) {
3763 goto out;
3764 }
3765 cur_write_ptr = &override_paths[0];
3766 for (uint32_t j = 0; j < prop->num_override_paths; j++) {
3767 copy_data_file_info(prop->override_paths[j], NULL, 0, &cur_write_ptr);
3768 }
3769 // Remove the last path separator
3770 --cur_write_ptr;
3771 assert(cur_write_ptr - override_paths < (ptrdiff_t)override_path_size);
3772 *cur_write_ptr = '\0';
3773 loader_log(NULL, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
3774 "loader_scan_for_implicit_layers: Override layer has override paths set to %s", override_paths);
3775 }
3776 } else if (!prop->is_override && prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) {
3777 implicit_metalayer_present = true;
3778 }
3779 }
3780
3781 // If either the override layer or an implicit meta-layer are present, we need to add
3782 // explicit layer info as well. Not to worry, though, all explicit layers not included
3783 // in the override layer will be removed below in loader_remove_layers_in_blacklist().
3784 if (override_layer_valid || implicit_metalayer_present) {
3785 if (VK_SUCCESS != loader_get_data_files(inst, LOADER_DATA_FILE_MANIFEST_EXPLICIT_LAYER, override_paths, &manifest_files)) {
3786 goto out;
3787 }
3788
3789 for (uint32_t i = 0; i < manifest_files.count; i++) {
3790 file_str = manifest_files.filename_list[i];
3791 if (file_str == NULL) {
3792 continue;
3793 }
3794
3795 // parse file into JSON struct
3796 res = loader_get_json(inst, file_str, &json);
3797 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
3798 goto out;
3799 } else if (VK_SUCCESS != res || NULL == json) {
3800 continue;
3801 }
3802
3803 res = loader_add_layer_properties(inst, instance_layers, json, false, file_str);
3804
3805 loader_instance_heap_free(inst, file_str);
3806 manifest_files.filename_list[i] = NULL;
3807 cJSON_Delete(json);
3808
3809 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
3810 goto out;
3811 }
3812 }
3813 }
3814
3815 // Verify any meta-layers in the list are valid and all the component layers are
3816 // actually present in the available layer list
3817 verify_all_meta_layers(inst, instance_layers, &override_layer_valid);
3818
3819 if (override_layer_valid || implicit_metalayer_present) {
3820 loader_remove_layers_not_in_implicit_meta_layers(inst, instance_layers);
3821 if (override_layer_valid && inst != NULL) {
3822 inst->override_layer_present = true;
3823 }
3824 }
3825
3826 out:
3827
3828 loader_instance_heap_free(inst, override_paths);
3829 for (uint32_t i = 0; i < manifest_files.count; i++) {
3830 loader_instance_heap_free(inst, manifest_files.filename_list[i]);
3831 }
3832 loader_instance_heap_free(inst, manifest_files.filename_list);
3833
3834 if (have_json_lock) {
3835 loader_platform_thread_unlock_mutex(&loader_json_lock);
3836 }
3837 }
3838
loader_gpdpa_instance_terminator(VkInstance inst,const char * pName)3839 static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpdpa_instance_terminator(VkInstance inst, const char *pName) {
3840 // inst is not wrapped
3841 if (inst == VK_NULL_HANDLE) {
3842 return NULL;
3843 }
3844 VkLayerInstanceDispatchTable *disp_table = *(VkLayerInstanceDispatchTable **)inst;
3845 void *addr;
3846
3847 if (disp_table == NULL) return NULL;
3848
3849 bool found_name;
3850 addr = loader_lookup_instance_dispatch_table(disp_table, pName, &found_name);
3851 if (found_name) {
3852 return addr;
3853 }
3854
3855 // Check if any drivers support the function, and if so, add it to the unknown function list
3856 addr = loader_phys_dev_ext_gpa_term(loader_get_instance(inst), pName);
3857 if (NULL != addr) return addr;
3858
3859 // Don't call down the chain, this would be an infinite loop
3860 loader_log(NULL, VULKAN_LOADER_DEBUG_BIT, 0, "loader_gpdpa_instance_terminator() unrecognized name %s", pName);
3861 return NULL;
3862 }
3863
loader_gpa_instance_terminator(VkInstance inst,const char * pName)3864 static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpa_instance_terminator(VkInstance inst, const char *pName) {
3865 if (!strcmp(pName, "vkGetInstanceProcAddr")) {
3866 return (PFN_vkVoidFunction)loader_gpa_instance_terminator;
3867 }
3868 if (!strcmp(pName, "vk_layerGetPhysicalDeviceProcAddr")) {
3869 return (PFN_vkVoidFunction)loader_gpdpa_instance_terminator;
3870 }
3871 if (!strcmp(pName, "vkCreateInstance")) {
3872 return (PFN_vkVoidFunction)terminator_CreateInstance;
3873 }
3874 if (!strcmp(pName, "vkCreateDevice")) {
3875 return (PFN_vkVoidFunction)terminator_CreateDevice;
3876 }
3877
3878 // The VK_EXT_debug_utils functions need a special case here so the terminators can still be found from vkGetInstanceProcAddr
3879 if (!strcmp(pName, "vkSetDebugUtilsObjectNameEXT")) {
3880 return (PFN_vkVoidFunction)terminator_SetDebugUtilsObjectNameEXT;
3881 }
3882 if (!strcmp(pName, "vkSetDebugUtilsObjectTagEXT")) {
3883 return (PFN_vkVoidFunction)terminator_SetDebugUtilsObjectTagEXT;
3884 }
3885 if (!strcmp(pName, "vkQueueBeginDebugUtilsLabelEXT")) {
3886 return (PFN_vkVoidFunction)terminator_QueueBeginDebugUtilsLabelEXT;
3887 }
3888 if (!strcmp(pName, "vkQueueEndDebugUtilsLabelEXT")) {
3889 return (PFN_vkVoidFunction)terminator_QueueEndDebugUtilsLabelEXT;
3890 }
3891 if (!strcmp(pName, "vkQueueInsertDebugUtilsLabelEXT")) {
3892 return (PFN_vkVoidFunction)terminator_QueueInsertDebugUtilsLabelEXT;
3893 }
3894 if (!strcmp(pName, "vkCmdBeginDebugUtilsLabelEXT")) {
3895 return (PFN_vkVoidFunction)terminator_CmdBeginDebugUtilsLabelEXT;
3896 }
3897 if (!strcmp(pName, "vkCmdEndDebugUtilsLabelEXT")) {
3898 return (PFN_vkVoidFunction)terminator_CmdEndDebugUtilsLabelEXT;
3899 }
3900 if (!strcmp(pName, "vkCmdInsertDebugUtilsLabelEXT")) {
3901 return (PFN_vkVoidFunction)terminator_CmdInsertDebugUtilsLabelEXT;
3902 }
3903
3904 // inst is not wrapped
3905 if (inst == VK_NULL_HANDLE) {
3906 return NULL;
3907 }
3908 VkLayerInstanceDispatchTable *disp_table = *(VkLayerInstanceDispatchTable **)inst;
3909 void *addr;
3910
3911 if (disp_table == NULL) return NULL;
3912
3913 bool found_name;
3914 addr = loader_lookup_instance_dispatch_table(disp_table, pName, &found_name);
3915 if (found_name) {
3916 return addr;
3917 }
3918
3919 // Check if it is an unknown physical device function, to see if any drivers support it.
3920 addr = loader_phys_dev_ext_gpa_term(loader_get_instance(inst), pName);
3921 if (addr) {
3922 return addr;
3923 }
3924
3925 // Assume it is an unknown device function, check to see if any drivers support it.
3926 addr = loader_dev_ext_gpa_term(loader_get_instance(inst), pName);
3927 if (addr) {
3928 return addr;
3929 }
3930
3931 // Don't call down the chain, this would be an infinite loop
3932 loader_log(NULL, VULKAN_LOADER_DEBUG_BIT, 0, "loader_gpa_instance_terminator() unrecognized name %s", pName);
3933 return NULL;
3934 }
3935
loader_gpa_device_terminator(VkDevice device,const char * pName)3936 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpa_device_terminator(VkDevice device, const char *pName) {
3937 struct loader_device *dev;
3938 struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, NULL);
3939
3940 // Return this function if a layer above here is asking for the vkGetDeviceProcAddr.
3941 // This is so we can properly intercept any device commands needing a terminator.
3942 if (!strcmp(pName, "vkGetDeviceProcAddr")) {
3943 return (PFN_vkVoidFunction)loader_gpa_device_terminator;
3944 }
3945
3946 // NOTE: Device Funcs needing Trampoline/Terminator.
3947 // Overrides for device functions needing a trampoline and
3948 // a terminator because certain device entry-points still need to go
3949 // through a terminator before hitting the ICD. This could be for
3950 // several reasons, but the main one is currently unwrapping an
3951 // object before passing the appropriate info along to the ICD.
3952 // This is why we also have to override the direct ICD call to
3953 // vkGetDeviceProcAddr to intercept those calls.
3954 PFN_vkVoidFunction addr = get_extension_device_proc_terminator(dev, pName);
3955 if (NULL != addr) {
3956 return addr;
3957 }
3958
3959 return icd_term->dispatch.GetDeviceProcAddr(device, pName);
3960 }
3961
loader_get_instance(const VkInstance instance)3962 struct loader_instance *loader_get_instance(const VkInstance instance) {
3963 // look up the loader_instance in our list by comparing dispatch tables, as
3964 // there is no guarantee the instance is still a loader_instance* after any
3965 // layers which wrap the instance object.
3966 const VkLayerInstanceDispatchTable *disp;
3967 struct loader_instance *ptr_instance = (struct loader_instance *)instance;
3968 if (VK_NULL_HANDLE == instance || LOADER_MAGIC_NUMBER != ptr_instance->magic) {
3969 return NULL;
3970 } else {
3971 disp = loader_get_instance_layer_dispatch(instance);
3972 loader_platform_thread_lock_mutex(&loader_global_instance_list_lock);
3973 for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) {
3974 if (&inst->disp->layer_inst_disp == disp) {
3975 ptr_instance = inst;
3976 break;
3977 }
3978 }
3979 loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock);
3980 }
3981 return ptr_instance;
3982 }
3983
loader_open_layer_file(const struct loader_instance * inst,struct loader_layer_properties * prop)3984 static loader_platform_dl_handle loader_open_layer_file(const struct loader_instance *inst, struct loader_layer_properties *prop) {
3985 if ((prop->lib_handle = loader_platform_open_library(prop->lib_name)) == NULL) {
3986 loader_handle_load_library_error(inst, prop->lib_name, &prop->lib_status);
3987 } else {
3988 prop->lib_status = LOADER_LAYER_LIB_SUCCESS_LOADED;
3989 loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Loading layer library %s", prop->lib_name);
3990 }
3991
3992 return prop->lib_handle;
3993 }
3994
loader_close_layer_file(const struct loader_instance * inst,struct loader_layer_properties * prop)3995 static void loader_close_layer_file(const struct loader_instance *inst, struct loader_layer_properties *prop) {
3996 if (prop->lib_handle) {
3997 loader_platform_close_library(prop->lib_handle);
3998 loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Unloading layer library %s", prop->lib_name);
3999 prop->lib_handle = NULL;
4000 }
4001 }
4002
loader_deactivate_layers(const struct loader_instance * instance,struct loader_device * device,struct loader_layer_list * list)4003 void loader_deactivate_layers(const struct loader_instance *instance, struct loader_device *device,
4004 struct loader_layer_list *list) {
4005 // Delete instance list of enabled layers and close any layer libraries
4006 for (uint32_t i = 0; i < list->count; i++) {
4007 struct loader_layer_properties *layer_prop = &list->list[i];
4008
4009 loader_close_layer_file(instance, layer_prop);
4010 }
4011 loader_destroy_layer_list(instance, device, list);
4012 }
4013
4014 // Go through the search_list and find any layers which match type. If layer
4015 // type match is found in then add it to ext_list.
loader_add_implicit_layers(const struct loader_instance * inst,struct loader_layer_list * target_list,struct loader_layer_list * expanded_target_list,const struct loader_layer_list * source_list)4016 static void loader_add_implicit_layers(const struct loader_instance *inst, struct loader_layer_list *target_list,
4017 struct loader_layer_list *expanded_target_list,
4018 const struct loader_layer_list *source_list) {
4019 for (uint32_t src_layer = 0; src_layer < source_list->count; src_layer++) {
4020 const struct loader_layer_properties *prop = &source_list->list[src_layer];
4021 if (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
4022 loader_add_implicit_layer(inst, prop, target_list, expanded_target_list, source_list);
4023 }
4024 }
4025 }
4026
4027 // Get the layer name(s) from the env_name environment variable. If layer is found in
4028 // search_list then add it to layer_list. But only add it to layer_list if type_flags matches.
loader_add_environment_layers(struct loader_instance * inst,const enum layer_type_flags type_flags,const char * env_name,struct loader_layer_list * target_list,struct loader_layer_list * expanded_target_list,const struct loader_layer_list * source_list)4029 static VkResult loader_add_environment_layers(struct loader_instance *inst, const enum layer_type_flags type_flags,
4030 const char *env_name, struct loader_layer_list *target_list,
4031 struct loader_layer_list *expanded_target_list,
4032 const struct loader_layer_list *source_list) {
4033 VkResult res = VK_SUCCESS;
4034 char *next, *name;
4035 char *layer_env = loader_getenv(env_name, inst);
4036 if (layer_env == NULL) {
4037 goto out;
4038 }
4039 name = loader_stack_alloc(strlen(layer_env) + 1);
4040 if (name == NULL) {
4041 goto out;
4042 }
4043 strcpy(name, layer_env);
4044
4045 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4046 "loader_add_environment_layers: Env Var %s defined and adding layers %s", env_name, name);
4047
4048 while (name && *name) {
4049 next = loader_get_next_path(name);
4050 res = loader_add_layer_name_to_list(inst, name, type_flags, source_list, target_list, expanded_target_list);
4051 if (res != VK_SUCCESS) {
4052 goto out;
4053 }
4054 name = next;
4055 }
4056
4057 out:
4058
4059 if (layer_env != NULL) {
4060 loader_free_getenv(layer_env, inst);
4061 }
4062
4063 return res;
4064 }
4065
loader_enable_instance_layers(struct loader_instance * inst,const VkInstanceCreateInfo * pCreateInfo,const struct loader_layer_list * instance_layers)4066 VkResult loader_enable_instance_layers(struct loader_instance *inst, const VkInstanceCreateInfo *pCreateInfo,
4067 const struct loader_layer_list *instance_layers) {
4068 assert(inst && "Cannot have null instance");
4069
4070 if (!loader_init_layer_list(inst, &inst->app_activated_layer_list)) {
4071 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4072 "loader_enable_instance_layers: Failed to initialize application version of the layer list");
4073 return VK_ERROR_OUT_OF_HOST_MEMORY;
4074 }
4075
4076 if (!loader_init_layer_list(inst, &inst->expanded_activated_layer_list)) {
4077 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4078 "loader_enable_instance_layers: Failed to initialize expanded version of the layer list");
4079 return VK_ERROR_OUT_OF_HOST_MEMORY;
4080 }
4081
4082 // Add any implicit layers first
4083 loader_add_implicit_layers(inst, &inst->app_activated_layer_list, &inst->expanded_activated_layer_list, instance_layers);
4084
4085 // Add any layers specified via environment variable next
4086 VkResult err =
4087 loader_add_environment_layers(inst, VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER, "VK_INSTANCE_LAYERS",
4088 &inst->app_activated_layer_list, &inst->expanded_activated_layer_list, instance_layers);
4089 if (err != VK_SUCCESS) {
4090 return err;
4091 }
4092
4093 // Add layers specified by the application
4094 err = loader_add_layer_names_to_list(inst, &inst->app_activated_layer_list, &inst->expanded_activated_layer_list,
4095 pCreateInfo->enabledLayerCount, pCreateInfo->ppEnabledLayerNames, instance_layers);
4096
4097 for (uint32_t i = 0; i < inst->expanded_activated_layer_list.count; i++) {
4098 // Verify that the layer api version is at least that of the application's request, if not, throw a warning since
4099 // undefined behavior could occur.
4100 struct loader_layer_properties *prop = inst->expanded_activated_layer_list.list + i;
4101 loader_api_version prop_spec_version = loader_make_version(prop->info.specVersion);
4102 if (!loader_check_version_meets_required(inst->app_api_version, prop_spec_version)) {
4103 loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4104 "Layer %s uses API version %u.%u which is older than the application specified "
4105 "API version of %u.%u. May cause issues.",
4106 prop->info.layerName, prop_spec_version.major, prop_spec_version.minor, inst->app_api_version.major,
4107 inst->app_api_version.minor);
4108 }
4109 }
4110
4111 return err;
4112 }
4113
4114 // Determine the layer interface version to use.
loader_get_layer_interface_version(PFN_vkNegotiateLoaderLayerInterfaceVersion fp_negotiate_layer_version,VkNegotiateLayerInterface * interface_struct)4115 bool loader_get_layer_interface_version(PFN_vkNegotiateLoaderLayerInterfaceVersion fp_negotiate_layer_version,
4116 VkNegotiateLayerInterface *interface_struct) {
4117 memset(interface_struct, 0, sizeof(VkNegotiateLayerInterface));
4118 interface_struct->sType = LAYER_NEGOTIATE_INTERFACE_STRUCT;
4119 interface_struct->loaderLayerInterfaceVersion = 1;
4120 interface_struct->pNext = NULL;
4121
4122 if (fp_negotiate_layer_version != NULL) {
4123 // Layer supports the negotiation API, so call it with the loader's
4124 // latest version supported
4125 interface_struct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
4126 VkResult result = fp_negotiate_layer_version(interface_struct);
4127
4128 if (result != VK_SUCCESS) {
4129 // Layer no longer supports the loader's latest interface version so
4130 // fail loading the Layer
4131 return false;
4132 }
4133 }
4134
4135 if (interface_struct->loaderLayerInterfaceVersion < MIN_SUPPORTED_LOADER_LAYER_INTERFACE_VERSION) {
4136 // Loader no longer supports the layer's latest interface version so
4137 // fail loading the layer
4138 return false;
4139 }
4140
4141 return true;
4142 }
4143
loader_layer_create_device(VkInstance instance,VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice,PFN_vkGetInstanceProcAddr layerGIPA,PFN_vkGetDeviceProcAddr * nextGDPA)4144 VKAPI_ATTR VkResult VKAPI_CALL loader_layer_create_device(VkInstance instance, VkPhysicalDevice physicalDevice,
4145 const VkDeviceCreateInfo *pCreateInfo,
4146 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice,
4147 PFN_vkGetInstanceProcAddr layerGIPA, PFN_vkGetDeviceProcAddr *nextGDPA) {
4148 VkResult res;
4149 VkPhysicalDevice internal_device = VK_NULL_HANDLE;
4150 struct loader_device *dev = NULL;
4151 struct loader_instance *inst = NULL;
4152
4153 if (instance != VK_NULL_HANDLE) {
4154 inst = loader_get_instance(instance);
4155 internal_device = physicalDevice;
4156 } else {
4157 struct loader_physical_device_tramp *phys_dev = (struct loader_physical_device_tramp *)physicalDevice;
4158 internal_device = phys_dev->phys_dev;
4159 inst = (struct loader_instance *)phys_dev->this_instance;
4160 }
4161
4162 // Get the physical device (ICD) extensions
4163 struct loader_extension_list icd_exts;
4164 icd_exts.list = NULL;
4165 res = loader_init_generic_list(inst, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
4166 if (VK_SUCCESS != res) {
4167 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to create ICD extension list");
4168 goto out;
4169 }
4170
4171 PFN_vkEnumerateDeviceExtensionProperties enumDeviceExtensionProperties = NULL;
4172 if (layerGIPA != NULL) {
4173 enumDeviceExtensionProperties =
4174 (PFN_vkEnumerateDeviceExtensionProperties)layerGIPA(instance, "vkEnumerateDeviceExtensionProperties");
4175 } else {
4176 enumDeviceExtensionProperties = inst->disp->layer_inst_disp.EnumerateDeviceExtensionProperties;
4177 }
4178 res = loader_add_device_extensions(inst, enumDeviceExtensionProperties, internal_device, "Unknown", &icd_exts);
4179 if (res != VK_SUCCESS) {
4180 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to add extensions to list");
4181 goto out;
4182 }
4183
4184 // Make sure requested extensions to be enabled are supported
4185 res = loader_validate_device_extensions(inst, &inst->expanded_activated_layer_list, &icd_exts, pCreateInfo);
4186 if (res != VK_SUCCESS) {
4187 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to validate extensions in list");
4188 goto out;
4189 }
4190
4191 dev = loader_create_logical_device(inst, pAllocator);
4192 if (dev == NULL) {
4193 res = VK_ERROR_OUT_OF_HOST_MEMORY;
4194 goto out;
4195 }
4196
4197 // Copy the application enabled instance layer list into the device
4198 if (NULL != inst->app_activated_layer_list.list) {
4199 dev->app_activated_layer_list.capacity = inst->app_activated_layer_list.capacity;
4200 dev->app_activated_layer_list.count = inst->app_activated_layer_list.count;
4201 dev->app_activated_layer_list.list =
4202 loader_device_heap_alloc(dev, inst->app_activated_layer_list.capacity, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
4203 if (dev->app_activated_layer_list.list == NULL) {
4204 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4205 "vkCreateDevice: Failed to allocate application activated layer list of size %d.",
4206 inst->app_activated_layer_list.capacity);
4207 res = VK_ERROR_OUT_OF_HOST_MEMORY;
4208 goto out;
4209 }
4210 memcpy(dev->app_activated_layer_list.list, inst->app_activated_layer_list.list,
4211 sizeof(*dev->app_activated_layer_list.list) * dev->app_activated_layer_list.count);
4212 } else {
4213 dev->app_activated_layer_list.capacity = 0;
4214 dev->app_activated_layer_list.count = 0;
4215 dev->app_activated_layer_list.list = NULL;
4216 }
4217
4218 // Copy the expanded enabled instance layer list into the device
4219 if (NULL != inst->expanded_activated_layer_list.list) {
4220 dev->expanded_activated_layer_list.capacity = inst->expanded_activated_layer_list.capacity;
4221 dev->expanded_activated_layer_list.count = inst->expanded_activated_layer_list.count;
4222 dev->expanded_activated_layer_list.list =
4223 loader_device_heap_alloc(dev, inst->expanded_activated_layer_list.capacity, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
4224 if (dev->expanded_activated_layer_list.list == NULL) {
4225 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4226 "vkCreateDevice: Failed to allocate expanded activated layer list of size %d.",
4227 inst->expanded_activated_layer_list.capacity);
4228 res = VK_ERROR_OUT_OF_HOST_MEMORY;
4229 goto out;
4230 }
4231 memcpy(dev->expanded_activated_layer_list.list, inst->expanded_activated_layer_list.list,
4232 sizeof(*dev->expanded_activated_layer_list.list) * dev->expanded_activated_layer_list.count);
4233 } else {
4234 dev->expanded_activated_layer_list.capacity = 0;
4235 dev->expanded_activated_layer_list.count = 0;
4236 dev->expanded_activated_layer_list.list = NULL;
4237 }
4238
4239 res = loader_create_device_chain(internal_device, pCreateInfo, pAllocator, inst, dev, layerGIPA, nextGDPA);
4240 if (res != VK_SUCCESS) {
4241 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "vkCreateDevice: Failed to create device chain.");
4242 goto out;
4243 }
4244
4245 *pDevice = dev->chain_device;
4246
4247 // Initialize any device extension dispatch entry's from the instance list
4248 loader_init_dispatch_dev_ext(inst, dev);
4249
4250 // Initialize WSI device extensions as part of core dispatch since loader
4251 // has dedicated trampoline code for these
4252 loader_init_device_extension_dispatch_table(&dev->loader_dispatch, inst->disp->layer_inst_disp.GetInstanceProcAddr,
4253 dev->loader_dispatch.core_dispatch.GetDeviceProcAddr, inst->instance, *pDevice);
4254
4255 out:
4256
4257 // Failure cleanup
4258 if (VK_SUCCESS != res) {
4259 if (NULL != dev) {
4260 // Find the icd_term this device belongs to then remove it from that icd_term.
4261 // Need to iterate the linked lists and remove the device from it. Don't delete
4262 // the device here since it may not have been added to the icd_term and there
4263 // are other allocations attached to it.
4264 struct loader_icd_term *icd_term = inst->icd_terms;
4265 bool found = false;
4266 while (!found && NULL != icd_term) {
4267 struct loader_device *cur_dev = icd_term->logical_device_list;
4268 struct loader_device *prev_dev = NULL;
4269 while (NULL != cur_dev) {
4270 if (cur_dev == dev) {
4271 if (cur_dev == icd_term->logical_device_list) {
4272 icd_term->logical_device_list = cur_dev->next;
4273 } else if (prev_dev) {
4274 prev_dev->next = cur_dev->next;
4275 }
4276
4277 found = true;
4278 break;
4279 }
4280 prev_dev = cur_dev;
4281 cur_dev = cur_dev->next;
4282 }
4283 icd_term = icd_term->next;
4284 }
4285 // Now destroy the device and the allocations associated with it.
4286 loader_destroy_logical_device(inst, dev, pAllocator);
4287 }
4288 }
4289
4290 if (NULL != icd_exts.list) {
4291 loader_destroy_generic_list(inst, (struct loader_generic_list *)&icd_exts);
4292 }
4293 return res;
4294 }
4295
loader_layer_destroy_device(VkDevice device,const VkAllocationCallbacks * pAllocator,PFN_vkDestroyDevice destroyFunction)4296 VKAPI_ATTR void VKAPI_CALL loader_layer_destroy_device(VkDevice device, const VkAllocationCallbacks *pAllocator,
4297 PFN_vkDestroyDevice destroyFunction) {
4298 struct loader_device *dev;
4299
4300 if (device == VK_NULL_HANDLE) {
4301 return;
4302 }
4303
4304 struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, NULL);
4305 const struct loader_instance *inst = icd_term->this_instance;
4306
4307 destroyFunction(device, pAllocator);
4308 dev->chain_device = NULL;
4309 dev->icd_device = NULL;
4310 loader_remove_logical_device(inst, icd_term, dev, pAllocator);
4311 }
4312
4313 // Given the list of layers to activate in the loader_instance
4314 // structure. This function will add a VkLayerInstanceCreateInfo
4315 // structure to the VkInstanceCreateInfo.pNext pointer.
4316 // Each activated layer will have it's own VkLayerInstanceLink
4317 // structure that tells the layer what Get*ProcAddr to call to
4318 // get function pointers to the next layer down.
4319 // Once the chain info has been created this function will
4320 // execute the CreateInstance call chain. Each layer will
4321 // then have an opportunity in it's CreateInstance function
4322 // to setup it's dispatch table when the lower layer returns
4323 // successfully.
4324 // Each layer can wrap or not-wrap the returned VkInstance object
4325 // as it sees fit.
4326 // The instance chain is terminated by a loader function
4327 // that will call CreateInstance on all available ICD's and
4328 // cache those VkInstance objects for future use.
loader_create_instance_chain(const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,struct loader_instance * inst,VkInstance * created_instance)4329 VkResult loader_create_instance_chain(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
4330 struct loader_instance *inst, VkInstance *created_instance) {
4331 uint32_t num_activated_layers = 0;
4332 struct activated_layer_info *activated_layers = NULL;
4333 VkLayerInstanceCreateInfo chain_info;
4334 VkLayerInstanceLink *layer_instance_link_info = NULL;
4335 VkInstanceCreateInfo loader_create_info;
4336 VkResult res;
4337
4338 PFN_vkGetInstanceProcAddr next_gipa = loader_gpa_instance_terminator;
4339 PFN_vkGetInstanceProcAddr cur_gipa = loader_gpa_instance_terminator;
4340 PFN_vkGetDeviceProcAddr cur_gdpa = loader_gpa_device_terminator;
4341 PFN_GetPhysicalDeviceProcAddr next_gpdpa = loader_gpdpa_instance_terminator;
4342 PFN_GetPhysicalDeviceProcAddr cur_gpdpa = loader_gpdpa_instance_terminator;
4343
4344 memcpy(&loader_create_info, pCreateInfo, sizeof(VkInstanceCreateInfo));
4345
4346 if (inst->expanded_activated_layer_list.count > 0) {
4347 chain_info.u.pLayerInfo = NULL;
4348 chain_info.pNext = pCreateInfo->pNext;
4349 chain_info.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
4350 chain_info.function = VK_LAYER_LINK_INFO;
4351 loader_create_info.pNext = &chain_info;
4352
4353 layer_instance_link_info = loader_stack_alloc(sizeof(VkLayerInstanceLink) * inst->expanded_activated_layer_list.count);
4354 if (!layer_instance_link_info) {
4355 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4356 "loader_create_instance_chain: Failed to alloc Instance objects for layer");
4357 return VK_ERROR_OUT_OF_HOST_MEMORY;
4358 }
4359
4360 activated_layers = loader_stack_alloc(sizeof(struct activated_layer_info) * inst->expanded_activated_layer_list.count);
4361 if (!activated_layers) {
4362 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4363 "loader_create_instance_chain: Failed to alloc activated layer storage array");
4364 return VK_ERROR_OUT_OF_HOST_MEMORY;
4365 }
4366
4367 // Create instance chain of enabled layers
4368 for (int32_t i = inst->expanded_activated_layer_list.count - 1; i >= 0; i--) {
4369 struct loader_layer_properties *layer_prop = &inst->expanded_activated_layer_list.list[i];
4370 loader_platform_dl_handle lib_handle;
4371
4372 // Skip it if a Layer with the same name has been already successfully activated
4373 if (loader_names_array_has_layer_property(&layer_prop->info, num_activated_layers, activated_layers)) {
4374 continue;
4375 }
4376
4377 lib_handle = loader_open_layer_file(inst, layer_prop);
4378 if (!lib_handle) {
4379 continue;
4380 }
4381
4382 if (NULL == layer_prop->functions.negotiate_layer_interface) {
4383 PFN_vkNegotiateLoaderLayerInterfaceVersion negotiate_interface = NULL;
4384 bool functions_in_interface = false;
4385 if (strlen(layer_prop->functions.str_negotiate_interface) == 0) {
4386 negotiate_interface = (PFN_vkNegotiateLoaderLayerInterfaceVersion)loader_platform_get_proc_address(
4387 lib_handle, "vkNegotiateLoaderLayerInterfaceVersion");
4388 } else {
4389 negotiate_interface = (PFN_vkNegotiateLoaderLayerInterfaceVersion)loader_platform_get_proc_address(
4390 lib_handle, layer_prop->functions.str_negotiate_interface);
4391 }
4392
4393 // If we can negotiate an interface version, then we can also
4394 // get everything we need from the one function call, so try
4395 // that first, and see if we can get all the function pointers
4396 // necessary from that one call.
4397 if (NULL != negotiate_interface) {
4398 layer_prop->functions.negotiate_layer_interface = negotiate_interface;
4399
4400 VkNegotiateLayerInterface interface_struct;
4401
4402 if (loader_get_layer_interface_version(negotiate_interface, &interface_struct)) {
4403 // Go ahead and set the properties version to the
4404 // correct value.
4405 layer_prop->interface_version = interface_struct.loaderLayerInterfaceVersion;
4406
4407 // If the interface is 2 or newer, we have access to the
4408 // new GetPhysicalDeviceProcAddr function, so grab it,
4409 // and the other necessary functions, from the
4410 // structure.
4411 if (interface_struct.loaderLayerInterfaceVersion > 1) {
4412 cur_gipa = interface_struct.pfnGetInstanceProcAddr;
4413 cur_gdpa = interface_struct.pfnGetDeviceProcAddr;
4414 cur_gpdpa = interface_struct.pfnGetPhysicalDeviceProcAddr;
4415 if (cur_gipa != NULL) {
4416 // We've set the functions, so make sure we
4417 // don't do the unnecessary calls later.
4418 functions_in_interface = true;
4419 }
4420 }
4421 }
4422 }
4423
4424 if (!functions_in_interface) {
4425 if ((cur_gipa = layer_prop->functions.get_instance_proc_addr) == NULL) {
4426 if (strlen(layer_prop->functions.str_gipa) == 0) {
4427 cur_gipa =
4428 (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr");
4429 layer_prop->functions.get_instance_proc_addr = cur_gipa;
4430
4431 if (NULL == cur_gipa) {
4432 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4433 "loader_create_instance_chain: Failed to find \'vkGetInstanceProcAddr\' in layer %s",
4434 layer_prop->lib_name);
4435 continue;
4436 }
4437 } else {
4438 cur_gipa = (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle,
4439 layer_prop->functions.str_gipa);
4440
4441 if (NULL == cur_gipa) {
4442 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4443 "loader_create_instance_chain: Failed to find \'%s\' in layer %s",
4444 layer_prop->functions.str_gipa, layer_prop->lib_name);
4445 continue;
4446 }
4447 }
4448 }
4449 }
4450 }
4451
4452 layer_instance_link_info[num_activated_layers].pNext = chain_info.u.pLayerInfo;
4453 layer_instance_link_info[num_activated_layers].pfnNextGetInstanceProcAddr = next_gipa;
4454 layer_instance_link_info[num_activated_layers].pfnNextGetPhysicalDeviceProcAddr = next_gpdpa;
4455 next_gipa = cur_gipa;
4456 if (layer_prop->interface_version > 1 && cur_gpdpa != NULL) {
4457 layer_prop->functions.get_physical_device_proc_addr = cur_gpdpa;
4458 next_gpdpa = cur_gpdpa;
4459 }
4460 if (layer_prop->interface_version > 1 && cur_gipa != NULL) {
4461 layer_prop->functions.get_instance_proc_addr = cur_gipa;
4462 }
4463 if (layer_prop->interface_version > 1 && cur_gdpa != NULL) {
4464 layer_prop->functions.get_device_proc_addr = cur_gdpa;
4465 }
4466
4467 chain_info.u.pLayerInfo = &layer_instance_link_info[num_activated_layers];
4468
4469 activated_layers[num_activated_layers].name = layer_prop->info.layerName;
4470 activated_layers[num_activated_layers].manifest = layer_prop->manifest_file_name;
4471 activated_layers[num_activated_layers].library = layer_prop->lib_name;
4472 activated_layers[num_activated_layers].is_implicit = !(layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER);
4473 if (activated_layers[num_activated_layers].is_implicit) {
4474 activated_layers[num_activated_layers].disable_env = layer_prop->disable_env_var.name;
4475 }
4476
4477 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Insert instance layer %s (%s)",
4478 layer_prop->info.layerName, layer_prop->lib_name);
4479
4480 num_activated_layers++;
4481 }
4482 }
4483
4484 // Make sure each layer requested by the application was actually loaded
4485 for (uint32_t exp = 0; exp < inst->expanded_activated_layer_list.count; ++exp) {
4486 struct loader_layer_properties *exp_layer_prop = &inst->expanded_activated_layer_list.list[exp];
4487 bool found = false;
4488 for (uint32_t act = 0; act < num_activated_layers; ++act) {
4489 if (!strcmp(activated_layers[act].name, exp_layer_prop->info.layerName)) {
4490 found = true;
4491 break;
4492 }
4493 }
4494 // If it wasn't found, we want to at least log an error. However, if it was enabled by the application directly,
4495 // we want to return a bad layer error.
4496 if (!found) {
4497 bool app_requested = false;
4498 for (uint32_t act = 0; act < pCreateInfo->enabledLayerCount; ++act) {
4499 if (!strcmp(pCreateInfo->ppEnabledLayerNames[act], exp_layer_prop->info.layerName)) {
4500 app_requested = true;
4501 break;
4502 }
4503 }
4504 VkFlags log_flag = VULKAN_LOADER_LAYER_BIT;
4505 char ending = '.';
4506 if (app_requested) {
4507 log_flag |= VULKAN_LOADER_ERROR_BIT;
4508 ending = '!';
4509 } else {
4510 log_flag |= VULKAN_LOADER_INFO_BIT;
4511 }
4512 switch (exp_layer_prop->lib_status) {
4513 case LOADER_LAYER_LIB_NOT_LOADED:
4514 loader_log(inst, log_flag, 0, "Requested layer %s was not loaded%c", exp_layer_prop->info.layerName, ending);
4515 break;
4516 case LOADER_LAYER_LIB_ERROR_WRONG_BIT_TYPE: {
4517 loader_log(inst, log_flag, 0, "Requested layer %s was wrong bit-type%c", exp_layer_prop->info.layerName,
4518 ending);
4519 break;
4520 }
4521 case LOADER_LAYER_LIB_ERROR_FAILED_TO_LOAD:
4522 loader_log(inst, log_flag, 0, "Requested layer %s failed to load%c", exp_layer_prop->info.layerName, ending);
4523 break;
4524 case LOADER_LAYER_LIB_SUCCESS_LOADED:
4525 // Shouldn't be able to reach this but if it is, best to report a debug
4526 loader_log(inst, log_flag, 0,
4527 "Shouldn't reach this. A valid version of requested layer %s was loaded but was not found in the "
4528 "list of activated layers%c",
4529 exp_layer_prop->info.layerName, ending);
4530 break;
4531 }
4532 if (app_requested) {
4533 return VK_ERROR_LAYER_NOT_PRESENT;
4534 }
4535 }
4536 }
4537
4538 VkLoaderFeatureFlags feature_flags = 0;
4539 #if defined(_WIN32)
4540 feature_flags = windows_initialize_dxgi();
4541 #endif
4542
4543 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)next_gipa(*created_instance, "vkCreateInstance");
4544 if (fpCreateInstance) {
4545 VkLayerInstanceCreateInfo instance_dispatch;
4546 instance_dispatch.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
4547 instance_dispatch.pNext = loader_create_info.pNext;
4548 instance_dispatch.function = VK_LOADER_DATA_CALLBACK;
4549 instance_dispatch.u.pfnSetInstanceLoaderData = vkSetInstanceDispatch;
4550
4551 VkLayerInstanceCreateInfo device_callback;
4552 device_callback.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
4553 device_callback.pNext = &instance_dispatch;
4554 device_callback.function = VK_LOADER_LAYER_CREATE_DEVICE_CALLBACK;
4555 device_callback.u.layerDevice.pfnLayerCreateDevice = loader_layer_create_device;
4556 device_callback.u.layerDevice.pfnLayerDestroyDevice = loader_layer_destroy_device;
4557
4558 VkLayerInstanceCreateInfo loader_features;
4559 loader_features.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
4560 loader_features.pNext = &device_callback;
4561 loader_features.function = VK_LOADER_FEATURES;
4562 loader_features.u.loaderFeatures = feature_flags;
4563
4564 loader_create_info.pNext = &loader_features;
4565
4566 // If layer debugging is enabled, let's print out the full callstack with layers in their
4567 // defined order.
4568 if ((loader_get_debug_level() & VULKAN_LOADER_LAYER_BIT) != 0) {
4569 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, "vkCreateInstance layer callstack setup to:");
4570 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " <Application>");
4571 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " ||");
4572 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " <Loader>");
4573 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " ||");
4574 for (uint32_t cur_layer = 0; cur_layer < num_activated_layers; ++cur_layer) {
4575 uint32_t index = num_activated_layers - cur_layer - 1;
4576 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " %s", activated_layers[index].name);
4577 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Type: %s",
4578 activated_layers[index].is_implicit ? "Implicit" : "Explicit");
4579 if (activated_layers[index].is_implicit) {
4580 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Disable Env Var: %s",
4581 activated_layers[index].disable_env);
4582 }
4583 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Manifest: %s", activated_layers[index].manifest);
4584 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Library: %s", activated_layers[index].library);
4585 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " ||");
4586 }
4587 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " <Drivers>\n");
4588 }
4589
4590 res = fpCreateInstance(&loader_create_info, pAllocator, created_instance);
4591 } else {
4592 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_create_instance_chain: Failed to find \'vkCreateInstance\'");
4593 // Couldn't find CreateInstance function!
4594 res = VK_ERROR_INITIALIZATION_FAILED;
4595 }
4596
4597 if (res == VK_SUCCESS) {
4598 loader_init_instance_core_dispatch_table(&inst->disp->layer_inst_disp, next_gipa, *created_instance);
4599 inst->instance = *created_instance;
4600 }
4601
4602 return res;
4603 }
4604
loader_activate_instance_layer_extensions(struct loader_instance * inst,VkInstance created_inst)4605 void loader_activate_instance_layer_extensions(struct loader_instance *inst, VkInstance created_inst) {
4606 loader_init_instance_extension_dispatch_table(&inst->disp->layer_inst_disp, inst->disp->layer_inst_disp.GetInstanceProcAddr,
4607 created_inst);
4608 }
4609
loader_create_device_chain(const VkPhysicalDevice pd,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,const struct loader_instance * inst,struct loader_device * dev,PFN_vkGetInstanceProcAddr callingLayer,PFN_vkGetDeviceProcAddr * layerNextGDPA)4610 VkResult loader_create_device_chain(const VkPhysicalDevice pd, const VkDeviceCreateInfo *pCreateInfo,
4611 const VkAllocationCallbacks *pAllocator, const struct loader_instance *inst,
4612 struct loader_device *dev, PFN_vkGetInstanceProcAddr callingLayer,
4613 PFN_vkGetDeviceProcAddr *layerNextGDPA) {
4614 uint32_t num_activated_layers = 0;
4615 struct activated_layer_info *activated_layers = NULL;
4616 VkLayerDeviceLink *layer_device_link_info;
4617 VkLayerDeviceCreateInfo chain_info;
4618 VkDeviceCreateInfo loader_create_info;
4619 VkDeviceGroupDeviceCreateInfoKHR *original_device_group_create_info_struct = NULL;
4620 VkResult res;
4621
4622 PFN_vkGetDeviceProcAddr fpGDPA = NULL, nextGDPA = loader_gpa_device_terminator;
4623 PFN_vkGetInstanceProcAddr fpGIPA = NULL, nextGIPA = loader_gpa_instance_terminator;
4624
4625 memcpy(&loader_create_info, pCreateInfo, sizeof(VkDeviceCreateInfo));
4626
4627 // Before we continue, we need to find out if the KHR_device_group extension is in the enabled list. If it is, we then
4628 // need to look for the corresponding VkDeviceGroupDeviceCreateInfoKHR struct in the device list. This is because we
4629 // need to replace all the incoming physical device values (which are really loader trampoline physical device values)
4630 // with the layer/ICD version.
4631 {
4632 VkBaseOutStructure *pNext = (VkBaseOutStructure *)loader_create_info.pNext;
4633 VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&loader_create_info;
4634 while (NULL != pNext) {
4635 if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) {
4636 VkDeviceGroupDeviceCreateInfoKHR *cur_struct = (VkDeviceGroupDeviceCreateInfoKHR *)pNext;
4637 if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) {
4638 VkDeviceGroupDeviceCreateInfoKHR *temp_struct = loader_stack_alloc(sizeof(VkDeviceGroupDeviceCreateInfoKHR));
4639 VkPhysicalDevice *phys_dev_array = NULL;
4640 if (NULL == temp_struct) {
4641 return VK_ERROR_OUT_OF_HOST_MEMORY;
4642 }
4643 memcpy(temp_struct, cur_struct, sizeof(VkDeviceGroupDeviceCreateInfoKHR));
4644 phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * cur_struct->physicalDeviceCount);
4645 if (NULL == phys_dev_array) {
4646 return VK_ERROR_OUT_OF_HOST_MEMORY;
4647 }
4648
4649 // Before calling down, replace the incoming physical device values (which are really loader trampoline
4650 // physical devices) with the next layer (or possibly even the terminator) physical device values.
4651 struct loader_physical_device_tramp *cur_tramp;
4652 for (uint32_t phys_dev = 0; phys_dev < cur_struct->physicalDeviceCount; phys_dev++) {
4653 cur_tramp = (struct loader_physical_device_tramp *)cur_struct->pPhysicalDevices[phys_dev];
4654 phys_dev_array[phys_dev] = cur_tramp->phys_dev;
4655 }
4656 temp_struct->pPhysicalDevices = phys_dev_array;
4657
4658 original_device_group_create_info_struct = (VkDeviceGroupDeviceCreateInfoKHR *)pPrev->pNext;
4659
4660 // Replace the old struct in the pNext chain with this one.
4661 pPrev->pNext = (VkBaseOutStructure *)temp_struct;
4662 }
4663 break;
4664 }
4665
4666 pPrev = pNext;
4667 pNext = pNext->pNext;
4668 }
4669 }
4670 if (dev->expanded_activated_layer_list.count > 0) {
4671 layer_device_link_info = loader_stack_alloc(sizeof(VkLayerDeviceLink) * dev->expanded_activated_layer_list.count);
4672 if (!layer_device_link_info) {
4673 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4674 "loader_create_device_chain: Failed to alloc Device objects for layer. Skipping Layer.");
4675 return VK_ERROR_OUT_OF_HOST_MEMORY;
4676 }
4677
4678 activated_layers = loader_stack_alloc(sizeof(struct activated_layer_info) * inst->expanded_activated_layer_list.count);
4679 if (!activated_layers) {
4680 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4681 "loader_create_device_chain: Failed to alloc activated layer storage array");
4682 return VK_ERROR_OUT_OF_HOST_MEMORY;
4683 }
4684
4685 chain_info.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO;
4686 chain_info.function = VK_LAYER_LINK_INFO;
4687 chain_info.u.pLayerInfo = NULL;
4688 chain_info.pNext = loader_create_info.pNext;
4689 loader_create_info.pNext = &chain_info;
4690
4691 // Create instance chain of enabled layers
4692 for (int32_t i = dev->expanded_activated_layer_list.count - 1; i >= 0; i--) {
4693 struct loader_layer_properties *layer_prop = &dev->expanded_activated_layer_list.list[i];
4694 loader_platform_dl_handle lib_handle = layer_prop->lib_handle;
4695
4696 // Skip it if a Layer with the same name has been already successfully activated
4697 if (loader_names_array_has_layer_property(&layer_prop->info, num_activated_layers, activated_layers)) {
4698 continue;
4699 }
4700
4701 // Skip the layer if the handle is NULL - this is likely because the library failed to load but wasn't removed from the
4702 // list.
4703 if (!lib_handle) {
4704 continue;
4705 }
4706
4707 // The Get*ProcAddr pointers will already be filled in if they were received from either the json file or the
4708 // version negotiation
4709 if ((fpGIPA = layer_prop->functions.get_instance_proc_addr) == NULL) {
4710 if (strlen(layer_prop->functions.str_gipa) == 0) {
4711 fpGIPA = (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr");
4712 layer_prop->functions.get_instance_proc_addr = fpGIPA;
4713 } else
4714 fpGIPA =
4715 (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gipa);
4716 if (!fpGIPA) {
4717 loader_log(inst, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4718 "loader_create_device_chain: Failed to find \'vkGetInstanceProcAddr\' in layer %s. Skipping layer.",
4719 layer_prop->lib_name);
4720 continue;
4721 }
4722 }
4723
4724 if (fpGIPA == callingLayer) {
4725 if (layerNextGDPA != NULL) {
4726 *layerNextGDPA = nextGDPA;
4727 }
4728 // Break here because if fpGIPA is the same as callingLayer, that means a layer is trying to create a device, and
4729 // once we don't want to continue any further as the next layer will be the calling layer
4730 break;
4731 }
4732
4733 if ((fpGDPA = layer_prop->functions.get_device_proc_addr) == NULL) {
4734 if (strlen(layer_prop->functions.str_gdpa) == 0) {
4735 fpGDPA = (PFN_vkGetDeviceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetDeviceProcAddr");
4736 layer_prop->functions.get_device_proc_addr = fpGDPA;
4737 } else
4738 fpGDPA = (PFN_vkGetDeviceProcAddr)loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gdpa);
4739 if (!fpGDPA) {
4740 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0,
4741 "Failed to find vkGetDeviceProcAddr in layer %s", layer_prop->lib_name);
4742 continue;
4743 }
4744 }
4745
4746 layer_device_link_info[num_activated_layers].pNext = chain_info.u.pLayerInfo;
4747 layer_device_link_info[num_activated_layers].pfnNextGetInstanceProcAddr = nextGIPA;
4748 layer_device_link_info[num_activated_layers].pfnNextGetDeviceProcAddr = nextGDPA;
4749 chain_info.u.pLayerInfo = &layer_device_link_info[num_activated_layers];
4750 nextGIPA = fpGIPA;
4751 nextGDPA = fpGDPA;
4752
4753 activated_layers[num_activated_layers].name = layer_prop->info.layerName;
4754 activated_layers[num_activated_layers].manifest = layer_prop->manifest_file_name;
4755 activated_layers[num_activated_layers].library = layer_prop->lib_name;
4756 activated_layers[num_activated_layers].is_implicit = !(layer_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER);
4757 if (activated_layers[num_activated_layers].is_implicit) {
4758 activated_layers[num_activated_layers].disable_env = layer_prop->disable_env_var.name;
4759 }
4760
4761 loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_LAYER_BIT, 0, "Inserted device layer %s (%s)",
4762 layer_prop->info.layerName, layer_prop->lib_name);
4763
4764 num_activated_layers++;
4765 }
4766 }
4767
4768 VkDevice created_device = (VkDevice)dev;
4769 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)nextGIPA(inst->instance, "vkCreateDevice");
4770 if (fpCreateDevice) {
4771 VkLayerDeviceCreateInfo create_info_disp;
4772
4773 create_info_disp.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO;
4774 create_info_disp.function = VK_LOADER_DATA_CALLBACK;
4775
4776 create_info_disp.u.pfnSetDeviceLoaderData = vkSetDeviceDispatch;
4777
4778 // If layer debugging is enabled, let's print out the full callstack with layers in their
4779 // defined order.
4780 uint32_t layer_driver_bits = VULKAN_LOADER_LAYER_BIT | VULKAN_LOADER_DRIVER_BIT;
4781 if ((loader_get_debug_level() & layer_driver_bits) != 0) {
4782 loader_log(inst, layer_driver_bits, 0, "vkCreateDevice layer callstack setup to:");
4783 loader_log(inst, layer_driver_bits, 0, " <Application>");
4784 loader_log(inst, layer_driver_bits, 0, " ||");
4785 loader_log(inst, layer_driver_bits, 0, " <Loader>");
4786 loader_log(inst, layer_driver_bits, 0, " ||");
4787 if ((loader_get_debug_level() & VULKAN_LOADER_LAYER_BIT) != 0) {
4788 for (uint32_t cur_layer = 0; cur_layer < num_activated_layers; ++cur_layer) {
4789 uint32_t index = num_activated_layers - cur_layer - 1;
4790 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " %s", activated_layers[index].name);
4791 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Type: %s",
4792 activated_layers[index].is_implicit ? "Implicit" : "Explicit");
4793 if (activated_layers[index].is_implicit) {
4794 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Disable Env Var: %s",
4795 activated_layers[index].disable_env);
4796 }
4797 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Manifest: %s", activated_layers[index].manifest);
4798 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " Library: %s", activated_layers[index].library);
4799 loader_log(inst, VULKAN_LOADER_LAYER_BIT, 0, " ||");
4800 }
4801 }
4802 loader_log(inst, layer_driver_bits, 0, " <Device>");
4803 }
4804 create_info_disp.pNext = loader_create_info.pNext;
4805 loader_create_info.pNext = &create_info_disp;
4806 res = fpCreateDevice(pd, &loader_create_info, pAllocator, &created_device);
4807 if (res != VK_SUCCESS) {
4808 return res;
4809 }
4810 dev->chain_device = created_device;
4811
4812 // Because we changed the pNext chain to use our own VkDeviceGroupDeviceCreateInfoKHR, we need to fixup the chain to point
4813 // back at the original VkDeviceGroupDeviceCreateInfoKHR.
4814 VkBaseOutStructure *pNext = (VkBaseOutStructure *)loader_create_info.pNext;
4815 VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&loader_create_info;
4816 while (NULL != pNext) {
4817 if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) {
4818 VkDeviceGroupDeviceCreateInfoKHR *cur_struct = (VkDeviceGroupDeviceCreateInfoKHR *)pNext;
4819 if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) {
4820 pPrev->pNext = (VkBaseOutStructure *)original_device_group_create_info_struct;
4821 }
4822 break;
4823 }
4824
4825 pPrev = pNext;
4826 pNext = pNext->pNext;
4827 }
4828
4829 } else {
4830 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4831 "loader_create_device_chain: Failed to find \'vkCreateDevice\' in layers or ICD");
4832 // Couldn't find CreateDevice function!
4833 return VK_ERROR_INITIALIZATION_FAILED;
4834 }
4835
4836 // Initialize device dispatch table
4837 loader_init_device_dispatch_table(&dev->loader_dispatch, nextGDPA, dev->chain_device);
4838
4839 return res;
4840 }
4841
loader_validate_layers(const struct loader_instance * inst,const uint32_t layer_count,const char * const * ppEnabledLayerNames,const struct loader_layer_list * list)4842 VkResult loader_validate_layers(const struct loader_instance *inst, const uint32_t layer_count,
4843 const char *const *ppEnabledLayerNames, const struct loader_layer_list *list) {
4844 struct loader_layer_properties *prop;
4845
4846 if (layer_count > 0 && ppEnabledLayerNames == NULL) {
4847 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4848 "loader_validate_instance_layers: ppEnabledLayerNames is NULL but enabledLayerCount is greater than zero");
4849 return VK_ERROR_LAYER_NOT_PRESENT;
4850 }
4851
4852 for (uint32_t i = 0; i < layer_count; i++) {
4853 VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, ppEnabledLayerNames[i]);
4854 if (result != VK_STRING_ERROR_NONE) {
4855 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4856 "loader_validate_layers: ppEnabledLayerNames contains string that is too long or is badly formed");
4857 return VK_ERROR_LAYER_NOT_PRESENT;
4858 }
4859
4860 prop = loader_find_layer_property(ppEnabledLayerNames[i], list);
4861 if (NULL == prop) {
4862 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4863 "loader_validate_layers: Layer %d does not exist in the list of available layers", i);
4864 return VK_ERROR_LAYER_NOT_PRESENT;
4865 }
4866 }
4867 return VK_SUCCESS;
4868 }
4869
loader_validate_instance_extensions(struct loader_instance * inst,const struct loader_extension_list * icd_exts,const struct loader_layer_list * instance_layers,const VkInstanceCreateInfo * pCreateInfo)4870 VkResult loader_validate_instance_extensions(struct loader_instance *inst, const struct loader_extension_list *icd_exts,
4871 const struct loader_layer_list *instance_layers,
4872 const VkInstanceCreateInfo *pCreateInfo) {
4873 VkExtensionProperties *extension_prop;
4874 char *env_value;
4875 bool check_if_known = true;
4876 VkResult res = VK_SUCCESS;
4877
4878 struct loader_layer_list active_layers;
4879 struct loader_layer_list expanded_layers;
4880 memset(&active_layers, 0, sizeof(active_layers));
4881 memset(&expanded_layers, 0, sizeof(expanded_layers));
4882
4883 if (pCreateInfo->enabledExtensionCount > 0 && pCreateInfo->ppEnabledExtensionNames == NULL) {
4884 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4885 "loader_validate_instance_extensions: Instance ppEnabledExtensionNames is NULL but enabledExtensionCount is "
4886 "greater than zero");
4887 return VK_ERROR_EXTENSION_NOT_PRESENT;
4888 }
4889 if (!loader_init_layer_list(inst, &active_layers)) {
4890 res = VK_ERROR_OUT_OF_HOST_MEMORY;
4891 goto out;
4892 }
4893 if (!loader_init_layer_list(inst, &expanded_layers)) {
4894 res = VK_ERROR_OUT_OF_HOST_MEMORY;
4895 goto out;
4896 }
4897
4898 // Build the lists of active layers (including metalayers) and expanded layers (with metalayers resolved to their
4899 // components)
4900 loader_add_implicit_layers(inst, &active_layers, &expanded_layers, instance_layers);
4901 res = loader_add_environment_layers(inst, VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER, ENABLED_LAYERS_ENV, &active_layers,
4902 &expanded_layers, instance_layers);
4903 if (res != VK_SUCCESS) {
4904 goto out;
4905 }
4906 res = loader_add_layer_names_to_list(inst, &active_layers, &expanded_layers, pCreateInfo->enabledLayerCount,
4907 pCreateInfo->ppEnabledLayerNames, instance_layers);
4908 if (VK_SUCCESS != res) {
4909 goto out;
4910 }
4911
4912 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4913 VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]);
4914 if (result != VK_STRING_ERROR_NONE) {
4915 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4916 "loader_validate_instance_extensions: Instance ppEnabledExtensionNames contains "
4917 "string that is too long or is badly formed");
4918 res = VK_ERROR_EXTENSION_NOT_PRESENT;
4919 goto out;
4920 }
4921
4922 // Check if a user wants to disable the instance extension filtering behavior
4923 env_value = loader_getenv("VK_LOADER_DISABLE_INST_EXT_FILTER", inst);
4924 if (NULL != env_value && atoi(env_value) != 0) {
4925 check_if_known = false;
4926 }
4927 loader_free_getenv(env_value, inst);
4928
4929 if (check_if_known) {
4930 // See if the extension is in the list of supported extensions
4931 bool found = false;
4932 for (uint32_t j = 0; LOADER_INSTANCE_EXTENSIONS[j] != NULL; j++) {
4933 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], LOADER_INSTANCE_EXTENSIONS[j]) == 0) {
4934 found = true;
4935 break;
4936 }
4937 }
4938
4939 // If it isn't in the list, return an error
4940 if (!found) {
4941 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4942 "loader_validate_instance_extensions: Extension %s not found in list of known instance extensions.",
4943 pCreateInfo->ppEnabledExtensionNames[i]);
4944 res = VK_ERROR_EXTENSION_NOT_PRESENT;
4945 goto out;
4946 }
4947 }
4948
4949 extension_prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], icd_exts);
4950
4951 if (extension_prop) {
4952 continue;
4953 }
4954
4955 extension_prop = NULL;
4956
4957 // Not in global list, search layer extension lists
4958 struct loader_layer_properties *layer_prop = NULL;
4959 for (uint32_t j = 0; NULL == extension_prop && j < expanded_layers.count; ++j) {
4960 extension_prop =
4961 get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], &expanded_layers.list[j].instance_extension_list);
4962 if (extension_prop) {
4963 // Found the extension in one of the layers enabled by the app.
4964 break;
4965 }
4966
4967 layer_prop = loader_find_layer_property(expanded_layers.list[j].info.layerName, instance_layers);
4968 if (NULL == layer_prop) {
4969 // Should NOT get here, loader_validate_layers should have already filtered this case out.
4970 continue;
4971 }
4972 }
4973
4974 if (!extension_prop) {
4975 // Didn't find extension name in any of the global layers, error out
4976 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
4977 "loader_validate_instance_extensions: Instance extension %s not supported by available ICDs or enabled "
4978 "layers.",
4979 pCreateInfo->ppEnabledExtensionNames[i]);
4980 res = VK_ERROR_EXTENSION_NOT_PRESENT;
4981 goto out;
4982 }
4983 }
4984
4985 out:
4986 loader_destroy_layer_list(inst, NULL, &active_layers);
4987 loader_destroy_layer_list(inst, NULL, &expanded_layers);
4988 return res;
4989 }
4990
loader_validate_device_extensions(struct loader_instance * this_instance,const struct loader_layer_list * activated_device_layers,const struct loader_extension_list * icd_exts,const VkDeviceCreateInfo * pCreateInfo)4991 VkResult loader_validate_device_extensions(struct loader_instance *this_instance,
4992 const struct loader_layer_list *activated_device_layers,
4993 const struct loader_extension_list *icd_exts, const VkDeviceCreateInfo *pCreateInfo) {
4994 VkExtensionProperties *extension_prop;
4995 struct loader_layer_properties *layer_prop;
4996
4997 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4998 VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]);
4999 if (result != VK_STRING_ERROR_NONE) {
5000 loader_log(this_instance, VULKAN_LOADER_ERROR_BIT, 0,
5001 "loader_validate_device_extensions: Device ppEnabledExtensionNames contains "
5002 "string that is too long or is badly formed");
5003 return VK_ERROR_EXTENSION_NOT_PRESENT;
5004 }
5005
5006 const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i];
5007 extension_prop = get_extension_property(extension_name, icd_exts);
5008
5009 if (extension_prop) {
5010 continue;
5011 }
5012
5013 // Not in global list, search activated layer extension lists
5014 for (uint32_t j = 0; j < activated_device_layers->count; j++) {
5015 layer_prop = &activated_device_layers->list[j];
5016
5017 extension_prop = get_dev_extension_property(extension_name, &layer_prop->device_extension_list);
5018 if (extension_prop) {
5019 // Found the extension in one of the layers enabled by the app.
5020 break;
5021 }
5022 }
5023
5024 if (!extension_prop) {
5025 // Didn't find extension name in any of the device layers, error out
5026 loader_log(this_instance, VULKAN_LOADER_ERROR_BIT, 0,
5027 "loader_validate_device_extensions: Device extension %s not supported by selected physical device "
5028 "or enabled layers.",
5029 pCreateInfo->ppEnabledExtensionNames[i]);
5030 return VK_ERROR_EXTENSION_NOT_PRESENT;
5031 }
5032 }
5033 return VK_SUCCESS;
5034 }
5035
5036 // Terminator functions for the Instance chain
5037 // All named terminator_<Vulkan API name>
terminator_CreateInstance(const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkInstance * pInstance)5038 VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
5039 const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
5040 struct loader_icd_term *icd_term;
5041 VkExtensionProperties *prop;
5042 char **filtered_extension_names = NULL;
5043 VkInstanceCreateInfo icd_create_info;
5044 VkResult res = VK_SUCCESS;
5045 bool one_icd_successful = false;
5046
5047 struct loader_instance *ptr_instance = (struct loader_instance *)*pInstance;
5048 if (NULL == ptr_instance) {
5049 loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5050 "terminator_CreateInstance: Loader instance pointer null encountered. Possibly set by active layer. (Policy "
5051 "#LLP_LAYER_21)");
5052 } else if (LOADER_MAGIC_NUMBER != ptr_instance->magic) {
5053 loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5054 "terminator_CreateInstance: Instance pointer (%p) has invalid MAGIC value 0x%08x. Instance value possibly "
5055 "corrupted by active layer (Policy #LLP_LAYER_21). ",
5056 ptr_instance->magic);
5057 }
5058
5059 memcpy(&icd_create_info, pCreateInfo, sizeof(icd_create_info));
5060
5061 icd_create_info.enabledLayerCount = 0;
5062 icd_create_info.ppEnabledLayerNames = NULL;
5063
5064 // NOTE: Need to filter the extensions to only those supported by the ICD.
5065 // No ICD will advertise support for layers. An ICD library could
5066 // support a layer, but it would be independent of the actual ICD,
5067 // just in the same library.
5068 uint32_t extension_count = pCreateInfo->enabledExtensionCount;
5069 #ifdef LOADER_ENABLE_LINUX_SORT
5070 extension_count += 1;
5071 #endif // LOADER_ENABLE_LINUX_SORT
5072 filtered_extension_names = loader_stack_alloc(extension_count * sizeof(char *));
5073 if (!filtered_extension_names) {
5074 loader_log(ptr_instance, VULKAN_LOADER_ERROR_BIT, 0,
5075 "terminator_CreateInstance: Failed create extension name array for %d extensions", extension_count);
5076 res = VK_ERROR_OUT_OF_HOST_MEMORY;
5077 goto out;
5078 }
5079 icd_create_info.ppEnabledExtensionNames = (const char *const *)filtered_extension_names;
5080
5081 // Determine if Get Physical Device Properties 2 is available to this Instance
5082 if (pCreateInfo->pApplicationInfo && pCreateInfo->pApplicationInfo->apiVersion >= VK_API_VERSION_1_1) {
5083 ptr_instance->supports_get_dev_prop_2 = true;
5084 } else {
5085 for (uint32_t j = 0; j < pCreateInfo->enabledExtensionCount; j++) {
5086 if (!strcmp(pCreateInfo->ppEnabledExtensionNames[j], VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
5087 ptr_instance->supports_get_dev_prop_2 = true;
5088 break;
5089 }
5090 }
5091 }
5092
5093 for (uint32_t i = 0; i < ptr_instance->icd_tramp_list.count; i++) {
5094 icd_term = loader_icd_add(ptr_instance, &ptr_instance->icd_tramp_list.scanned_list[i]);
5095 if (NULL == icd_term) {
5096 loader_log(ptr_instance, VULKAN_LOADER_ERROR_BIT, 0,
5097 "terminator_CreateInstance: Failed to add ICD %d to ICD trampoline list.", i);
5098 res = VK_ERROR_OUT_OF_HOST_MEMORY;
5099 goto out;
5100 }
5101
5102 // If any error happens after here, we need to remove the ICD from the list,
5103 // because we've already added it, but haven't validated it
5104
5105 // Make sure that we reset the pApplicationInfo so we don't get an old pointer
5106 icd_create_info.pApplicationInfo = pCreateInfo->pApplicationInfo;
5107 icd_create_info.enabledExtensionCount = 0;
5108 struct loader_extension_list icd_exts;
5109
5110 loader_log(ptr_instance, VULKAN_LOADER_DEBUG_BIT, 0, "Build ICD instance extension list");
5111 // traverse scanned icd list adding non-duplicate extensions to the list
5112 res = loader_init_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
5113 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
5114 // If out of memory, bail immediately.
5115 goto out;
5116 } else if (VK_SUCCESS != res) {
5117 // Something bad happened with this ICD, so free it and try the
5118 // next.
5119 ptr_instance->icd_terms = icd_term->next;
5120 icd_term->next = NULL;
5121 loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5122 continue;
5123 }
5124
5125 res = loader_add_instance_extensions(ptr_instance, icd_term->scanned_icd->EnumerateInstanceExtensionProperties,
5126 icd_term->scanned_icd->lib_name, &icd_exts);
5127 if (VK_SUCCESS != res) {
5128 loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts);
5129 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
5130 // If out of memory, bail immediately.
5131 goto out;
5132 } else {
5133 // Something bad happened with this ICD, so free it and try the next.
5134 ptr_instance->icd_terms = icd_term->next;
5135 icd_term->next = NULL;
5136 loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5137 continue;
5138 }
5139 }
5140
5141 for (uint32_t j = 0; j < pCreateInfo->enabledExtensionCount; j++) {
5142 prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[j], &icd_exts);
5143 if (prop) {
5144 filtered_extension_names[icd_create_info.enabledExtensionCount] = (char *)pCreateInfo->ppEnabledExtensionNames[j];
5145 icd_create_info.enabledExtensionCount++;
5146 }
5147 }
5148 #ifdef LOADER_ENABLE_LINUX_SORT
5149 // Force on "VK_KHR_get_physical_device_properties2" for Linux as we use it for GPU sorting. This
5150 // should be done if the API version of either the application or the driver does not natively support
5151 // the core version of vkGetPhysicalDeviceProperties2 entrypoint.
5152 if ((ptr_instance->app_api_version.major == 1 && ptr_instance->app_api_version.minor == 0) ||
5153 (VK_API_VERSION_MAJOR(icd_term->scanned_icd->api_version) == 1 &&
5154 VK_API_VERSION_MINOR(icd_term->scanned_icd->api_version) == 0)) {
5155 prop = get_extension_property(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, &icd_exts);
5156 if (prop) {
5157 filtered_extension_names[icd_create_info.enabledExtensionCount] =
5158 (char *)VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME;
5159 icd_create_info.enabledExtensionCount++;
5160
5161 // At least one ICD supports this, so the instance should be able to support it
5162 ptr_instance->supports_get_dev_prop_2 = true;
5163 }
5164 }
5165 #endif // LOADER_ENABLE_LINUX_SORT
5166
5167 // Determine if vkGetPhysicalDeviceProperties2 is available to this Instance
5168 if (icd_term->scanned_icd->api_version >= VK_API_VERSION_1_1) {
5169 icd_term->supports_get_dev_prop_2 = true;
5170 } else {
5171 for (uint32_t j = 0; j < icd_create_info.enabledExtensionCount; j++) {
5172 if (!strcmp(filtered_extension_names[j], VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
5173 icd_term->supports_get_dev_prop_2 = true;
5174 break;
5175 }
5176 }
5177 }
5178
5179 loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts);
5180
5181 // Get the driver version from vkEnumerateInstanceVersion
5182 uint32_t icd_version = VK_API_VERSION_1_0;
5183 VkResult icd_result = VK_SUCCESS;
5184 if (icd_term->scanned_icd->api_version >= VK_API_VERSION_1_1) {
5185 PFN_vkEnumerateInstanceVersion icd_enumerate_instance_version =
5186 (PFN_vkEnumerateInstanceVersion)icd_term->scanned_icd->GetInstanceProcAddr(NULL, "vkEnumerateInstanceVersion");
5187 if (icd_enumerate_instance_version != NULL) {
5188 icd_result = icd_enumerate_instance_version(&icd_version);
5189 if (icd_result != VK_SUCCESS) {
5190 icd_version = VK_API_VERSION_1_0;
5191 loader_log(ptr_instance, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5192 "terminator_CreateInstance: ICD \"%s\" vkEnumerateInstanceVersion returned error. The ICD will be "
5193 "treated as a 1.0 ICD",
5194 icd_term->scanned_icd->lib_name);
5195 }
5196 }
5197 }
5198
5199 // Remove the portability enumeration flag bit if the ICD doesn't support the extension
5200 if ((pCreateInfo->flags & VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR) == 1) {
5201 bool supports_portability_enumeration = false;
5202 for (uint32_t j = 0; j < icd_create_info.enabledExtensionCount; j++) {
5203 if (strcmp(filtered_extension_names[j], VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME) == 0) {
5204 supports_portability_enumeration = true;
5205 break;
5206 }
5207 }
5208 // If the icd supports the extension, use the flags as given, otherwise remove the portability bit
5209 icd_create_info.flags = supports_portability_enumeration
5210 ? pCreateInfo->flags
5211 : pCreateInfo->flags & (~VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR);
5212 }
5213
5214 // Create an instance, substituting the version to 1.0 if necessary
5215 VkApplicationInfo icd_app_info;
5216 uint32_t icd_version_nopatch =
5217 VK_MAKE_API_VERSION(0, VK_API_VERSION_MAJOR(icd_version), VK_API_VERSION_MINOR(icd_version), 0);
5218 uint32_t requested_version = (pCreateInfo == NULL || pCreateInfo->pApplicationInfo == NULL)
5219 ? VK_API_VERSION_1_0
5220 : pCreateInfo->pApplicationInfo->apiVersion;
5221 if ((requested_version != 0) && (icd_version_nopatch == VK_API_VERSION_1_0)) {
5222 if (icd_create_info.pApplicationInfo == NULL) {
5223 memset(&icd_app_info, 0, sizeof(icd_app_info));
5224 } else {
5225 memmove(&icd_app_info, icd_create_info.pApplicationInfo, sizeof(icd_app_info));
5226 }
5227 icd_app_info.apiVersion = icd_version;
5228 icd_create_info.pApplicationInfo = &icd_app_info;
5229 }
5230 icd_result =
5231 ptr_instance->icd_tramp_list.scanned_list[i].CreateInstance(&icd_create_info, pAllocator, &(icd_term->instance));
5232 if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_result) {
5233 // If out of memory, bail immediately.
5234 res = VK_ERROR_OUT_OF_HOST_MEMORY;
5235 goto out;
5236 } else if (VK_SUCCESS != icd_result) {
5237 loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5238 "terminator_CreateInstance: Failed to CreateInstance in ICD %d. Skipping ICD.", i);
5239 ptr_instance->icd_terms = icd_term->next;
5240 icd_term->next = NULL;
5241 loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5242 continue;
5243 }
5244
5245 if (!loader_icd_init_entries(icd_term, icd_term->instance,
5246 ptr_instance->icd_tramp_list.scanned_list[i].GetInstanceProcAddr)) {
5247 loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5248 "terminator_CreateInstance: Failed to CreateInstance and find entrypoints with ICD. Skipping ICD.");
5249 ptr_instance->icd_terms = icd_term->next;
5250 icd_term->next = NULL;
5251 loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5252 continue;
5253 }
5254
5255 if (ptr_instance->icd_tramp_list.scanned_list[i].interface_version < 3 &&
5256 (
5257 #ifdef VK_USE_PLATFORM_XLIB_KHR
5258 NULL != icd_term->dispatch.CreateXlibSurfaceKHR ||
5259 #endif // VK_USE_PLATFORM_XLIB_KHR
5260 #ifdef VK_USE_PLATFORM_XCB_KHR
5261 NULL != icd_term->dispatch.CreateXcbSurfaceKHR ||
5262 #endif // VK_USE_PLATFORM_XCB_KHR
5263 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
5264 NULL != icd_term->dispatch.CreateWaylandSurfaceKHR ||
5265 #endif // VK_USE_PLATFORM_WAYLAND_KHR
5266 #ifdef VK_USE_PLATFORM_ANDROID_KHR
5267 NULL != icd_term->dispatch.CreateAndroidSurfaceKHR ||
5268 #endif // VK_USE_PLATFORM_ANDROID_KHR
5269 #ifdef VK_USE_PLATFORM_OHOS
5270 NULL != icd_term->dispatch.CreateSurfaceOHOS ||
5271 #endif // VK_USE_PLATFORM_OHOS
5272 #ifdef VK_USE_PLATFORM_WIN32_KHR
5273 NULL != icd_term->dispatch.CreateWin32SurfaceKHR ||
5274 #endif // VK_USE_PLATFORM_WIN32_KHR
5275 NULL != icd_term->dispatch.DestroySurfaceKHR)) {
5276 loader_log(ptr_instance, VULKAN_LOADER_WARN_BIT, 0,
5277 "terminator_CreateInstance: Driver %s supports interface version %u but still exposes VkSurfaceKHR"
5278 " create/destroy entrypoints (Policy #LDP_DRIVER_8)",
5279 ptr_instance->icd_tramp_list.scanned_list[i].lib_name,
5280 ptr_instance->icd_tramp_list.scanned_list[i].interface_version);
5281 }
5282
5283 // If we made it this far, at least one ICD was successful
5284 one_icd_successful = true;
5285 }
5286
5287 // For vkGetPhysicalDeviceProperties2, at least one ICD needs to support the extension for the
5288 // instance to have it
5289 if (ptr_instance->supports_get_dev_prop_2) {
5290 bool at_least_one_supports = false;
5291 icd_term = ptr_instance->icd_terms;
5292 while (icd_term != NULL) {
5293 if (icd_term->supports_get_dev_prop_2) {
5294 at_least_one_supports = true;
5295 break;
5296 }
5297 icd_term = icd_term->next;
5298 }
5299 if (!at_least_one_supports) {
5300 ptr_instance->supports_get_dev_prop_2 = false;
5301 }
5302 }
5303
5304 // If no ICDs were added to instance list and res is unchanged from it's initial value, the loader was unable to
5305 // find a suitable ICD.
5306 if (VK_SUCCESS == res && (ptr_instance->icd_terms == NULL || !one_icd_successful)) {
5307 loader_log(ptr_instance, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5308 "terminator_CreateInstance: Found no drivers!");
5309 res = VK_ERROR_INCOMPATIBLE_DRIVER;
5310 }
5311
5312 out:
5313
5314 ptr_instance->create_terminator_invalid_extension = false;
5315
5316 if (VK_SUCCESS != res) {
5317 if (VK_ERROR_EXTENSION_NOT_PRESENT == res) {
5318 ptr_instance->create_terminator_invalid_extension = true;
5319 }
5320
5321 while (NULL != ptr_instance->icd_terms) {
5322 icd_term = ptr_instance->icd_terms;
5323 ptr_instance->icd_terms = icd_term->next;
5324 if (NULL != icd_term->instance) {
5325 icd_term->dispatch.DestroyInstance(icd_term->instance, pAllocator);
5326 }
5327 loader_icd_destroy(ptr_instance, icd_term, pAllocator);
5328 }
5329 } else {
5330 // Check for enabled extensions here to setup the loader structures so the loader knows what extensions
5331 // it needs to worry about.
5332 // We do it here and again above the layers in the trampoline function since the trampoline function
5333 // may think different extensions are enabled than what's down here.
5334 // This is why we don't clear inside of these function calls.
5335 // The clearing should actually be handled by the overall memset of the pInstance structure in the
5336 // trampoline.
5337 wsi_create_instance(ptr_instance, pCreateInfo);
5338 check_for_enabled_debug_extensions(ptr_instance, pCreateInfo);
5339 extensions_create_instance(ptr_instance, pCreateInfo);
5340 }
5341
5342 return res;
5343 }
5344
terminator_DestroyInstance(VkInstance instance,const VkAllocationCallbacks * pAllocator)5345 VKAPI_ATTR void VKAPI_CALL terminator_DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
5346 struct loader_instance *ptr_instance = loader_get_instance(instance);
5347 if (NULL == ptr_instance) {
5348 return;
5349 }
5350 struct loader_icd_term *icd_terms = ptr_instance->icd_terms;
5351 struct loader_icd_term *next_icd_term;
5352
5353 // Remove this instance from the list of instances:
5354 struct loader_instance *prev = NULL;
5355 loader_platform_thread_lock_mutex(&loader_global_instance_list_lock);
5356 struct loader_instance *next = loader.instances;
5357 while (next != NULL) {
5358 if (next == ptr_instance) {
5359 // Remove this instance from the list:
5360 if (prev)
5361 prev->next = next->next;
5362 else
5363 loader.instances = next->next;
5364 break;
5365 }
5366 prev = next;
5367 next = next->next;
5368 }
5369 loader_platform_thread_unlock_mutex(&loader_global_instance_list_lock);
5370
5371 while (NULL != icd_terms) {
5372 if (icd_terms->instance) {
5373 icd_terms->dispatch.DestroyInstance(icd_terms->instance, pAllocator);
5374 }
5375 next_icd_term = icd_terms->next;
5376 icd_terms->instance = VK_NULL_HANDLE;
5377 loader_icd_destroy(ptr_instance, icd_terms, pAllocator);
5378
5379 icd_terms = next_icd_term;
5380 }
5381
5382 loader_delete_layer_list_and_properties(ptr_instance, &ptr_instance->instance_layer_list);
5383 loader_scanned_icd_clear(ptr_instance, &ptr_instance->icd_tramp_list);
5384 loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&ptr_instance->ext_list);
5385 if (NULL != ptr_instance->phys_devs_term) {
5386 for (uint32_t i = 0; i < ptr_instance->phys_dev_count_term; i++) {
5387 for (uint32_t j = i + 1; j < ptr_instance->phys_dev_count_term; j++) {
5388 if (ptr_instance->phys_devs_term[i] == ptr_instance->phys_devs_term[j]) {
5389 ptr_instance->phys_devs_term[j] = NULL;
5390 }
5391 }
5392 }
5393 for (uint32_t i = 0; i < ptr_instance->phys_dev_count_term; i++) {
5394 loader_instance_heap_free(ptr_instance, ptr_instance->phys_devs_term[i]);
5395 }
5396 loader_instance_heap_free(ptr_instance, ptr_instance->phys_devs_term);
5397 }
5398 if (NULL != ptr_instance->phys_dev_groups_term) {
5399 for (uint32_t i = 0; i < ptr_instance->phys_dev_group_count_term; i++) {
5400 loader_instance_heap_free(ptr_instance, ptr_instance->phys_dev_groups_term[i]);
5401 }
5402 loader_instance_heap_free(ptr_instance, ptr_instance->phys_dev_groups_term);
5403 }
5404 loader_free_dev_ext_table(ptr_instance);
5405 loader_free_phys_dev_ext_table(ptr_instance);
5406 }
5407
terminator_CreateDevice(VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice)5408 VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo,
5409 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
5410 VkResult res = VK_SUCCESS;
5411 struct loader_physical_device_term *phys_dev_term;
5412 phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
5413 struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
5414
5415 struct loader_device *dev = (struct loader_device *)*pDevice;
5416 PFN_vkCreateDevice fpCreateDevice = icd_term->dispatch.CreateDevice;
5417 struct loader_extension_list icd_exts;
5418
5419 VkBaseOutStructure *caller_dgci_container = NULL;
5420 VkDeviceGroupDeviceCreateInfoKHR *caller_dgci = NULL;
5421
5422 dev->phys_dev_term = phys_dev_term;
5423
5424 icd_exts.list = NULL;
5425
5426 if (fpCreateDevice == NULL) {
5427 loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5428 "terminator_CreateDevice: No vkCreateDevice command exposed by ICD %s", icd_term->scanned_icd->lib_name);
5429 res = VK_ERROR_INITIALIZATION_FAILED;
5430 goto out;
5431 }
5432
5433 VkDeviceCreateInfo localCreateInfo;
5434 memcpy(&localCreateInfo, pCreateInfo, sizeof(localCreateInfo));
5435
5436 // NOTE: Need to filter the extensions to only those supported by the ICD.
5437 // No ICD will advertise support for layers. An ICD library could support a layer,
5438 // but it would be independent of the actual ICD, just in the same library.
5439 char **filtered_extension_names = NULL;
5440 if (0 < pCreateInfo->enabledExtensionCount) {
5441 filtered_extension_names = loader_stack_alloc(pCreateInfo->enabledExtensionCount * sizeof(char *));
5442 if (NULL == filtered_extension_names) {
5443 loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT, 0,
5444 "terminator_CreateDevice: Failed to create extension name storage for %d extensions",
5445 pCreateInfo->enabledExtensionCount);
5446 return VK_ERROR_OUT_OF_HOST_MEMORY;
5447 }
5448 }
5449
5450 localCreateInfo.enabledLayerCount = 0;
5451 localCreateInfo.ppEnabledLayerNames = NULL;
5452
5453 localCreateInfo.enabledExtensionCount = 0;
5454 localCreateInfo.ppEnabledExtensionNames = (const char *const *)filtered_extension_names;
5455
5456 // Get the physical device (ICD) extensions
5457 res = loader_init_generic_list(icd_term->this_instance, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties));
5458 if (VK_SUCCESS != res) {
5459 goto out;
5460 }
5461
5462 res = loader_add_device_extensions(icd_term->this_instance, icd_term->dispatch.EnumerateDeviceExtensionProperties,
5463 phys_dev_term->phys_dev, icd_term->scanned_icd->lib_name, &icd_exts);
5464 if (res != VK_SUCCESS) {
5465 goto out;
5466 }
5467
5468 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
5469 const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i];
5470 VkExtensionProperties *prop = get_extension_property(extension_name, &icd_exts);
5471 if (prop) {
5472 filtered_extension_names[localCreateInfo.enabledExtensionCount] = (char *)extension_name;
5473 localCreateInfo.enabledExtensionCount++;
5474 } else {
5475 loader_log(icd_term->this_instance, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5476 "vkCreateDevice extension %s not available for devices associated with ICD %s", extension_name,
5477 icd_term->scanned_icd->lib_name);
5478 }
5479 }
5480
5481 // Before we continue, If KHX_device_group is the list of enabled and viable extensions, then we then need to look for the
5482 // corresponding VkDeviceGroupDeviceCreateInfo struct in the device list and replace all the physical device values (which
5483 // are really loader physical device terminator values) with the ICD versions.
5484 // if (icd_term->this_instance->enabled_known_extensions.khr_device_group_creation == 1) {
5485 {
5486 VkBaseOutStructure *pNext = (VkBaseOutStructure *)localCreateInfo.pNext;
5487 VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&localCreateInfo;
5488 while (NULL != pNext) {
5489 if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) {
5490 VkDeviceGroupDeviceCreateInfo *cur_struct = (VkDeviceGroupDeviceCreateInfo *)pNext;
5491 if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) {
5492 VkDeviceGroupDeviceCreateInfo *temp_struct = loader_stack_alloc(sizeof(VkDeviceGroupDeviceCreateInfo));
5493 VkPhysicalDevice *phys_dev_array = NULL;
5494 if (NULL == temp_struct) {
5495 return VK_ERROR_OUT_OF_HOST_MEMORY;
5496 }
5497 memcpy(temp_struct, cur_struct, sizeof(VkDeviceGroupDeviceCreateInfo));
5498 phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * cur_struct->physicalDeviceCount);
5499 if (NULL == phys_dev_array) {
5500 return VK_ERROR_OUT_OF_HOST_MEMORY;
5501 }
5502
5503 // Before calling down, replace the incoming physical device values (which are really loader terminator
5504 // physical devices) with the ICDs physical device values.
5505 struct loader_physical_device_term *cur_term;
5506 for (uint32_t phys_dev = 0; phys_dev < cur_struct->physicalDeviceCount; phys_dev++) {
5507 cur_term = (struct loader_physical_device_term *)cur_struct->pPhysicalDevices[phys_dev];
5508 phys_dev_array[phys_dev] = cur_term->phys_dev;
5509 }
5510 temp_struct->pPhysicalDevices = phys_dev_array;
5511
5512 // Keep track of pointers to restore pNext chain before returning
5513 caller_dgci_container = pPrev;
5514 caller_dgci = cur_struct;
5515
5516 // Replace the old struct in the pNext chain with this one.
5517 pPrev->pNext = (VkBaseOutStructure *)temp_struct;
5518 }
5519 break;
5520 }
5521
5522 pPrev = pNext;
5523 pNext = pNext->pNext;
5524 }
5525 }
5526
5527 // Handle loader emulation for structs that are not supported by the ICD:
5528 // Presently, the emulation leaves the pNext chain alone. This means that the ICD will receive items in the chain which
5529 // are not recognized by the ICD. If this causes the ICD to fail, then the items would have to be removed here. The current
5530 // implementation does not remove them because copying the pNext chain would be impossible if the loader does not recognize
5531 // the any of the struct types, as the loader would not know the size to allocate and copy.
5532 // if (icd_term->dispatch.GetPhysicalDeviceFeatures2 == NULL && icd_term->dispatch.GetPhysicalDeviceFeatures2KHR == NULL) {
5533 {
5534 const void *pNext = localCreateInfo.pNext;
5535 while (pNext != NULL) {
5536 switch (*(VkStructureType *)pNext) {
5537 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2: {
5538 const VkPhysicalDeviceFeatures2KHR *features = pNext;
5539
5540 if (icd_term->dispatch.GetPhysicalDeviceFeatures2 == NULL &&
5541 icd_term->dispatch.GetPhysicalDeviceFeatures2KHR == NULL) {
5542 loader_log(icd_term->this_instance, VULKAN_LOADER_INFO_BIT, 0,
5543 "vkCreateDevice: Emulating handling of VkPhysicalDeviceFeatures2 in pNext chain for ICD \"%s\"",
5544 icd_term->scanned_icd->lib_name);
5545
5546 // Verify that VK_KHR_get_physical_device_properties2 is enabled
5547 if (icd_term->this_instance->enabled_known_extensions.khr_get_physical_device_properties2) {
5548 localCreateInfo.pEnabledFeatures = &features->features;
5549 }
5550 }
5551
5552 // Leave this item in the pNext chain for now
5553
5554 pNext = features->pNext;
5555 break;
5556 }
5557
5558 case VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO: {
5559 const VkDeviceGroupDeviceCreateInfoKHR *group_info = pNext;
5560
5561 if (icd_term->dispatch.EnumeratePhysicalDeviceGroups == NULL &&
5562 icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR == NULL) {
5563 loader_log(icd_term->this_instance, VULKAN_LOADER_INFO_BIT, 0,
5564 "vkCreateDevice: Emulating handling of VkPhysicalDeviceGroupProperties in pNext chain for "
5565 "ICD \"%s\"",
5566 icd_term->scanned_icd->lib_name);
5567
5568 // The group must contain only this one device, since physical device groups aren't actually supported
5569 if (group_info->physicalDeviceCount != 1) {
5570 loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT, 0,
5571 "vkCreateDevice: Emulation failed to create device from device group info");
5572 res = VK_ERROR_INITIALIZATION_FAILED;
5573 goto out;
5574 }
5575 }
5576
5577 // Nothing needs to be done here because we're leaving the item in the pNext chain and because the spec
5578 // states that the physicalDevice argument must be included in the device group, and we've already checked
5579 // that it is
5580
5581 pNext = group_info->pNext;
5582 break;
5583 }
5584
5585 // Multiview properties are also allowed, but since VK_KHX_multiview is a device extension, we'll just let the
5586 // ICD handle that error when the user enables the extension here
5587 default: {
5588 const VkBaseInStructure *header = pNext;
5589 pNext = header->pNext;
5590 break;
5591 }
5592 }
5593 }
5594 }
5595
5596 // Every extension that has a loader-defined terminator needs to be marked as enabled or disabled so that we know whether or
5597 // not to return that terminator when vkGetDeviceProcAddr is called
5598 for (uint32_t i = 0; i < localCreateInfo.enabledExtensionCount; ++i) {
5599 if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
5600 dev->extensions.khr_swapchain_enabled = true;
5601 } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME)) {
5602 dev->extensions.khr_display_swapchain_enabled = true;
5603 } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_DEVICE_GROUP_EXTENSION_NAME)) {
5604 dev->extensions.khr_device_group_enabled = true;
5605 } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
5606 dev->extensions.ext_debug_marker_enabled = true;
5607 } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], "VK_EXT_full_screen_exclusive")) {
5608 dev->extensions.ext_full_screen_exclusive_enabled = true;
5609 }
5610 }
5611 dev->extensions.ext_debug_utils_enabled = icd_term->this_instance->enabled_known_extensions.ext_debug_utils;
5612
5613 VkPhysicalDeviceProperties properties;
5614 icd_term->dispatch.GetPhysicalDeviceProperties(phys_dev_term->phys_dev, &properties);
5615 if (!dev->extensions.khr_device_group_enabled) {
5616 if (properties.apiVersion >= VK_API_VERSION_1_1) {
5617 dev->extensions.khr_device_group_enabled = true;
5618 }
5619 }
5620
5621 loader_log(icd_term->this_instance, VULKAN_LOADER_LAYER_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5622 " Using \"%s\" with driver: \"%s\"\n", properties.deviceName, icd_term->scanned_icd->lib_name);
5623
5624 res = fpCreateDevice(phys_dev_term->phys_dev, &localCreateInfo, pAllocator, &dev->icd_device);
5625 if (res != VK_SUCCESS) {
5626 loader_log(icd_term->this_instance, VULKAN_LOADER_ERROR_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
5627 "terminator_CreateDevice: Failed in ICD %s vkCreateDevice call", icd_term->scanned_icd->lib_name);
5628 goto out;
5629 }
5630
5631 *pDevice = dev->icd_device;
5632 loader_add_logical_device(icd_term->this_instance, icd_term, dev);
5633
5634 // Init dispatch pointer in new device object
5635 loader_init_dispatch(*pDevice, &dev->loader_dispatch);
5636
5637 out:
5638 if (NULL != icd_exts.list) {
5639 loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&icd_exts);
5640 }
5641
5642 // Restore pNext pointer to old VkDeviceGroupDeviceCreateInfoKHX
5643 // in the chain to maintain consistency for the caller.
5644 if (caller_dgci_container != NULL) {
5645 caller_dgci_container->pNext = (VkBaseOutStructure *)caller_dgci;
5646 }
5647
5648 return res;
5649 }
5650
5651 // Update the trampoline physical devices with the wrapped version.
5652 // We always want to re-use previous physical device pointers since they may be used by an application
5653 // after returning previously.
setup_loader_tramp_phys_devs(struct loader_instance * inst,uint32_t phys_dev_count,VkPhysicalDevice * phys_devs)5654 VkResult setup_loader_tramp_phys_devs(struct loader_instance *inst, uint32_t phys_dev_count, VkPhysicalDevice *phys_devs) {
5655 VkResult res = VK_SUCCESS;
5656 uint32_t found_count = 0;
5657 uint32_t old_count = inst->phys_dev_count_tramp;
5658 uint32_t new_count = inst->total_gpu_count;
5659 struct loader_physical_device_tramp **new_phys_devs = NULL;
5660
5661 if (0 == phys_dev_count) {
5662 return VK_SUCCESS;
5663 }
5664 if (phys_dev_count > new_count) {
5665 new_count = phys_dev_count;
5666 }
5667
5668 // We want an old to new index array and a new to old index array
5669 int32_t *old_to_new_index = (int32_t *)loader_stack_alloc(sizeof(int32_t) * old_count);
5670 int32_t *new_to_old_index = (int32_t *)loader_stack_alloc(sizeof(int32_t) * new_count);
5671 if (NULL == old_to_new_index || NULL == new_to_old_index) {
5672 return VK_ERROR_OUT_OF_HOST_MEMORY;
5673 }
5674
5675 // Initialize both
5676 for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
5677 old_to_new_index[cur_idx] = -1;
5678 }
5679 for (uint32_t cur_idx = 0; cur_idx < new_count; ++cur_idx) {
5680 new_to_old_index[cur_idx] = -1;
5681 }
5682
5683 // Figure out the old->new and new->old indices
5684 for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
5685 for (uint32_t new_idx = 0; new_idx < phys_dev_count; ++new_idx) {
5686 if (inst->phys_devs_tramp[cur_idx]->phys_dev == phys_devs[new_idx]) {
5687 old_to_new_index[cur_idx] = (int32_t)new_idx;
5688 new_to_old_index[new_idx] = (int32_t)cur_idx;
5689 found_count++;
5690 break;
5691 }
5692 }
5693 }
5694
5695 // If we found exactly the number of items we were looking for as we had before. Then everything
5696 // we already have is good enough and we just need to update the array that was passed in with
5697 // the loader values.
5698 if (found_count == phys_dev_count && 0 != old_count && old_count == new_count) {
5699 for (uint32_t new_idx = 0; new_idx < phys_dev_count; ++new_idx) {
5700 for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
5701 if (old_to_new_index[cur_idx] == (int32_t)new_idx) {
5702 phys_devs[new_idx] = (VkPhysicalDevice)inst->phys_devs_tramp[cur_idx];
5703 break;
5704 }
5705 }
5706 }
5707 // Nothing else to do for this path
5708 res = VK_SUCCESS;
5709 } else {
5710 // Something is different, so do the full path of checking every device and creating a new array to use.
5711 // This can happen if a device was added, or removed, or we hadn't previously queried all the data and we
5712 // have more to store.
5713 new_phys_devs = loader_instance_heap_calloc(inst, sizeof(struct loader_physical_device_tramp *) * new_count,
5714 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
5715 if (NULL == new_phys_devs) {
5716 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5717 "setup_loader_tramp_phys_devs: Failed to allocate new physical device array of size %d", new_count);
5718 res = VK_ERROR_OUT_OF_HOST_MEMORY;
5719 goto out;
5720 }
5721
5722 if (new_count > phys_dev_count) {
5723 found_count = phys_dev_count;
5724 } else {
5725 found_count = new_count;
5726 }
5727
5728 // First try to see if an old item exists that matches the new item. If so, just copy it over.
5729 for (uint32_t new_idx = 0; new_idx < found_count; ++new_idx) {
5730 bool old_item_found = false;
5731 for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
5732 if (old_to_new_index[cur_idx] == (int32_t)new_idx) {
5733 // Copy over old item to correct spot in the new array
5734 new_phys_devs[new_idx] = inst->phys_devs_tramp[cur_idx];
5735 old_item_found = true;
5736 break;
5737 }
5738 }
5739 // Something wasn't found, so it's new so add it to the new list
5740 if (!old_item_found) {
5741 new_phys_devs[new_idx] = loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_tramp),
5742 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
5743 if (NULL == new_phys_devs[new_idx]) {
5744 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5745 "setup_loader_tramp_phys_devs: Failed to allocate new trampoline physical device");
5746 res = VK_ERROR_OUT_OF_HOST_MEMORY;
5747 goto out;
5748 }
5749
5750 // Initialize the new physicalDevice object
5751 loader_set_dispatch((void *)new_phys_devs[new_idx], inst->disp);
5752 new_phys_devs[new_idx]->this_instance = inst;
5753 new_phys_devs[new_idx]->phys_dev = phys_devs[new_idx];
5754 new_phys_devs[new_idx]->magic = PHYS_TRAMP_MAGIC_NUMBER;
5755 }
5756
5757 phys_devs[new_idx] = (VkPhysicalDevice)new_phys_devs[new_idx];
5758 }
5759
5760 // We usually get here if the user array is smaller than the total number of devices, so copy the
5761 // remaining devices we have over to the new array.
5762 uint32_t start = found_count;
5763 for (uint32_t new_idx = start; new_idx < new_count; ++new_idx) {
5764 for (uint32_t cur_idx = 0; cur_idx < old_count; ++cur_idx) {
5765 if (old_to_new_index[cur_idx] == -1) {
5766 new_phys_devs[new_idx] = inst->phys_devs_tramp[cur_idx];
5767 old_to_new_index[cur_idx] = new_idx;
5768 found_count++;
5769 break;
5770 }
5771 }
5772 }
5773 }
5774
5775 out:
5776
5777 if (NULL != new_phys_devs) {
5778 if (VK_SUCCESS != res) {
5779 for (uint32_t new_idx = 0; new_idx < found_count; ++new_idx) {
5780 // If an OOM occurred inside the copying of the new physical devices into the existing array
5781 // will leave some of the old physical devices in the array which may have been copied into
5782 // the new array, leading to them being freed twice. To avoid this we just make sure to not
5783 // delete physical devices which were copied.
5784 bool found = false;
5785 for (uint32_t cur_idx = 0; cur_idx < inst->phys_dev_count_tramp; cur_idx++) {
5786 if (new_phys_devs[new_idx] == inst->phys_devs_tramp[cur_idx]) {
5787 found = true;
5788 break;
5789 }
5790 }
5791 if (!found) {
5792 loader_instance_heap_free(inst, new_phys_devs[new_idx]);
5793 }
5794 }
5795 loader_instance_heap_free(inst, new_phys_devs);
5796 } else {
5797 if (new_count > inst->total_gpu_count) {
5798 inst->total_gpu_count = new_count;
5799 }
5800 // Free everything in the old array that was not copied into the new array
5801 // here. We can't attempt to do that before here since the previous loop
5802 // looking before the "out:" label may hit an out of memory condition resulting
5803 // in memory leaking.
5804 if (NULL != inst->phys_devs_tramp) {
5805 for (uint32_t i = 0; i < inst->phys_dev_count_tramp; i++) {
5806 bool found = false;
5807 for (uint32_t j = 0; j < inst->total_gpu_count; j++) {
5808 if (inst->phys_devs_tramp[i] == new_phys_devs[j]) {
5809 found = true;
5810 break;
5811 }
5812 }
5813 if (!found) {
5814 loader_instance_heap_free(inst, inst->phys_devs_tramp[i]);
5815 }
5816 }
5817 loader_instance_heap_free(inst, inst->phys_devs_tramp);
5818 }
5819 inst->phys_devs_tramp = new_phys_devs;
5820 inst->phys_dev_count_tramp = found_count;
5821 }
5822 }
5823 if (VK_SUCCESS != res) {
5824 inst->total_gpu_count = 0;
5825 }
5826
5827 return res;
5828 }
5829
5830 #ifdef LOADER_ENABLE_LINUX_SORT
is_linux_sort_enabled(struct loader_instance * inst)5831 bool is_linux_sort_enabled(struct loader_instance *inst) {
5832 bool sort_items = inst->supports_get_dev_prop_2;
5833 char *env_value = loader_getenv("VK_LOADER_DISABLE_SELECT", inst);
5834 if (NULL != env_value) {
5835 int32_t int_env_val = atoi(env_value);
5836 loader_free_getenv(env_value, inst);
5837 if (int_env_val != 0) {
5838 sort_items = false;
5839 }
5840 }
5841 return sort_items;
5842 }
5843 #endif // LOADER_ENABLE_LINUX_SORT
5844
5845 // Check if this physical device is already in the old buffer
check_if_phys_dev_already_present(struct loader_instance * inst,VkPhysicalDevice physical_device,uint32_t idx,struct loader_physical_device_term ** new_phys_devs)5846 void check_if_phys_dev_already_present(struct loader_instance *inst, VkPhysicalDevice physical_device, uint32_t idx,
5847 struct loader_physical_device_term **new_phys_devs) {
5848 if (NULL != inst->phys_devs_term) {
5849 for (uint32_t old_idx = 0; old_idx < inst->phys_dev_count_term; old_idx++) {
5850 if (physical_device == inst->phys_devs_term[old_idx]->phys_dev) {
5851 new_phys_devs[idx] = inst->phys_devs_term[old_idx];
5852 break;
5853 }
5854 }
5855 }
5856 }
5857
allocate_new_phys_dev_at_idx(struct loader_instance * inst,VkPhysicalDevice physical_device,struct loader_phys_dev_per_icd * dev_array,uint32_t idx,struct loader_physical_device_term ** new_phys_devs)5858 VkResult allocate_new_phys_dev_at_idx(struct loader_instance *inst, VkPhysicalDevice physical_device,
5859 struct loader_phys_dev_per_icd *dev_array, uint32_t idx,
5860 struct loader_physical_device_term **new_phys_devs) {
5861 if (NULL == new_phys_devs[idx]) {
5862 new_phys_devs[idx] =
5863 loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
5864 if (NULL == new_phys_devs[idx]) {
5865 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5866 "allocate_new_phys_dev_at_idx: Failed to allocate physical device terminator object %d", idx);
5867 return VK_ERROR_OUT_OF_HOST_MEMORY;
5868 }
5869
5870 loader_set_dispatch((void *)new_phys_devs[idx], inst->disp);
5871 new_phys_devs[idx]->this_icd_term = dev_array->icd_term;
5872 new_phys_devs[idx]->icd_index = (uint8_t)(dev_array->icd_index);
5873 new_phys_devs[idx]->phys_dev = physical_device;
5874 }
5875 return VK_SUCCESS;
5876 }
5877
5878 /* Enumerate all physical devices from ICDs and add them to inst->phys_devs_term
5879 *
5880 * There are two methods to find VkPhysicalDevices - vkEnumeratePhysicalDevices and vkEnumerateAdapterPhysicalDevices
5881 * The latter is supported on windows only and on devices supporting ICD Interface Version 6 and greater.
5882 *
5883 * Once all physical devices are acquired, they need to be pulled into a single list of `loader_physical_device_term`'s.
5884 * They also need to be setup - the icd_term, icd_index, phys_dev, and disp (dispatch table) all need the correct data.
5885 * Additionally, we need to keep using already setup physical devices as they may be in use, thus anything enumerated
5886 * that is already in inst->phys_devs_term will be carried over.
5887 */
5888
setup_loader_term_phys_devs(struct loader_instance * inst)5889 VkResult setup_loader_term_phys_devs(struct loader_instance *inst) {
5890 VkResult res = VK_SUCCESS;
5891 struct loader_icd_term *icd_term;
5892 uint32_t icd_idx = 0;
5893 uint32_t windows_sorted_devices_count = 0;
5894 struct loader_phys_dev_per_icd *windows_sorted_devices_array = NULL;
5895 uint32_t icd_count = 0;
5896 struct loader_phys_dev_per_icd *icd_phys_dev_array = NULL;
5897 uint32_t new_phys_devs_count = 0;
5898 struct loader_physical_device_term **new_phys_devs = NULL;
5899
5900 #if defined(_WIN32)
5901 // Get the physical devices supported by platform sorting mechanism into a separate list
5902 res = windows_read_sorted_physical_devices(inst, &windows_sorted_devices_count, &windows_sorted_devices_array);
5903 if (VK_SUCCESS != res) {
5904 goto out;
5905 }
5906 #endif
5907
5908 icd_count = inst->total_icd_count;
5909
5910 // Allocate something to store the physical device characteristics that we read from each ICD.
5911 icd_phys_dev_array = (struct loader_phys_dev_per_icd *)loader_stack_alloc(sizeof(struct loader_phys_dev_per_icd) * icd_count);
5912 if (NULL == icd_phys_dev_array) {
5913 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5914 "setup_loader_term_phys_devs: Failed to allocate temporary ICD Physical device info array of size %d",
5915 icd_count);
5916 res = VK_ERROR_OUT_OF_HOST_MEMORY;
5917 goto out;
5918 }
5919 memset(icd_phys_dev_array, 0, sizeof(struct loader_phys_dev_per_icd) * icd_count);
5920
5921 // For each ICD, query the number of physical devices, and then get an
5922 // internal value for those physical devices.
5923 icd_term = inst->icd_terms;
5924 while (NULL != icd_term) {
5925 // This is the legacy behavior which should be skipped if EnumerateAdapterPhysicalDevices is available
5926 // and we successfully enumerated sorted adapters using windows_read_sorted_physical_devices.
5927 #if defined(VK_USE_PLATFORM_WIN32_KHR)
5928 if (icd_term->scanned_icd->EnumerateAdapterPhysicalDevices != NULL) {
5929 icd_term = icd_term->next;
5930 ++icd_idx;
5931 continue;
5932 }
5933 #endif
5934
5935 res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &icd_phys_dev_array[icd_idx].device_count, NULL);
5936 if (VK_SUCCESS != res) {
5937 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5938 "setup_loader_term_phys_devs: Call to ICD %d's \'vkEnumeratePhysicalDevices\' failed with error 0x%08x",
5939 icd_idx, res);
5940 goto out;
5941 }
5942
5943 icd_phys_dev_array[icd_idx].physical_devices =
5944 (VkPhysicalDevice *)loader_stack_alloc(icd_phys_dev_array[icd_idx].device_count * sizeof(VkPhysicalDevice));
5945 if (NULL == icd_phys_dev_array[icd_idx].physical_devices) {
5946 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5947 "setup_loader_term_phys_devs: Failed to allocate temporary ICD Physical device array for ICD %d of size %d",
5948 icd_idx, icd_phys_dev_array[icd_idx].device_count);
5949 res = VK_ERROR_OUT_OF_HOST_MEMORY;
5950 goto out;
5951 }
5952
5953 res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &(icd_phys_dev_array[icd_idx].device_count),
5954 icd_phys_dev_array[icd_idx].physical_devices);
5955 if (VK_SUCCESS != res) {
5956 goto out;
5957 }
5958 icd_phys_dev_array[icd_idx].icd_term = icd_term;
5959 icd_phys_dev_array[icd_idx].icd_index = icd_idx;
5960 icd_term = icd_term->next;
5961 ++icd_idx;
5962 }
5963
5964 // Add up both the windows sorted and non windows found physical device counts
5965 for (uint32_t i = 0; i < windows_sorted_devices_count; ++i) {
5966 new_phys_devs_count += windows_sorted_devices_array[i].device_count;
5967 }
5968 for (uint32_t i = 0; i < icd_count; ++i) {
5969 new_phys_devs_count += icd_phys_dev_array[i].device_count;
5970 }
5971
5972 // Bail out if there are no physical devices reported
5973 if (0 == new_phys_devs_count) {
5974 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5975 "setup_loader_term_phys_devs: Failed to detect any valid GPUs in the current config");
5976 res = VK_ERROR_INITIALIZATION_FAILED;
5977 goto out;
5978 }
5979
5980 // Create an allocation large enough to hold both the windows sorting enumeration and non-windows physical device enumeration
5981 new_phys_devs = loader_instance_heap_calloc(inst, sizeof(struct loader_physical_device_term *) * new_phys_devs_count,
5982 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
5983 if (NULL == new_phys_devs) {
5984 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
5985 "setup_loader_term_phys_devs: Failed to allocate new physical device array of size %d", new_phys_devs_count);
5986 res = VK_ERROR_OUT_OF_HOST_MEMORY;
5987 goto out;
5988 }
5989
5990 // Current index into the new_phys_devs array - increment whenever we've written in.
5991 uint32_t idx = 0;
5992
5993 // Copy over everything found through sorted enumeration
5994 for (uint32_t i = 0; i < windows_sorted_devices_count; ++i) {
5995 for (uint32_t j = 0; j < windows_sorted_devices_array[i].device_count; ++j) {
5996 check_if_phys_dev_already_present(inst, windows_sorted_devices_array[i].physical_devices[j], idx, new_phys_devs);
5997
5998 res = allocate_new_phys_dev_at_idx(inst, windows_sorted_devices_array[i].physical_devices[j],
5999 &windows_sorted_devices_array[i], idx, new_phys_devs);
6000 if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
6001 goto out;
6002 }
6003 // Increment the count of new physical devices
6004 idx++;
6005 }
6006 }
6007
6008 // Now go through the rest of the physical devices and add them to new_phys_devs
6009 #ifdef LOADER_ENABLE_LINUX_SORT
6010 if (is_linux_sort_enabled(inst)) {
6011 for (uint32_t dev = idx; dev < new_phys_devs_count; ++dev) {
6012 new_phys_devs[dev] =
6013 loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6014 if (NULL == new_phys_devs[dev]) {
6015 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6016 "setup_loader_term_phys_devs: Failed to allocate physical device terminator object %d", dev);
6017 res = VK_ERROR_OUT_OF_HOST_MEMORY;
6018 goto out;
6019 }
6020 }
6021
6022 // Get the physical devices supported by platform sorting mechanism into a separate list
6023 // Pass in a sublist to the function so it only operates on the correct elements. This means passing in a pointer to the
6024 // current next element in new_phys_devs and passing in a `count` of currently unwritten elements
6025 res =
6026 linux_read_sorted_physical_devices(inst, icd_count, icd_phys_dev_array, new_phys_devs_count - idx, &new_phys_devs[idx]);
6027 if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
6028 goto out;
6029 }
6030 // Keep previously allocated physical device info since apps may already be using that!
6031 for (uint32_t new_idx = idx; new_idx < new_phys_devs_count; new_idx++) {
6032 for (uint32_t old_idx = 0; old_idx < inst->phys_dev_count_term; old_idx++) {
6033 if (new_phys_devs[new_idx]->phys_dev == inst->phys_devs_term[old_idx]->phys_dev) {
6034 loader_log(inst, VULKAN_LOADER_DEBUG_BIT | VULKAN_LOADER_DRIVER_BIT, 0,
6035 "Copying old device %u into new device %u", old_idx, new_idx);
6036 // Free the old new_phys_devs info since we're not using it before we assign the new info
6037 loader_instance_heap_free(inst, new_phys_devs[new_idx]);
6038 new_phys_devs[new_idx] = inst->phys_devs_term[old_idx];
6039 break;
6040 }
6041 }
6042 }
6043 // We want the following code to run if either linux sorting is disabled at compile time or runtime
6044 } else {
6045 #endif // LOADER_ENABLE_LINUX_SORT
6046
6047 // Copy over everything found through the non-sorted means.
6048 for (uint32_t i = 0; i < icd_count; ++i) {
6049 for (uint32_t j = 0; j < icd_phys_dev_array[i].device_count; ++j) {
6050 check_if_phys_dev_already_present(inst, icd_phys_dev_array[i].physical_devices[j], idx, new_phys_devs);
6051
6052 // If this physical device isn't in the old buffer, then we need to create it.
6053 res = allocate_new_phys_dev_at_idx(inst, icd_phys_dev_array[i].physical_devices[j], &icd_phys_dev_array[i], idx,
6054 new_phys_devs);
6055 if (res == VK_ERROR_OUT_OF_HOST_MEMORY) {
6056 goto out;
6057 }
6058 // Increment the count of new physical devices
6059 idx++;
6060 }
6061 }
6062 #ifdef LOADER_ENABLE_LINUX_SORT
6063 }
6064 #endif // LOADER_ENABLE_LINUX_SORT
6065 out:
6066
6067 if (VK_SUCCESS != res) {
6068 if (NULL != new_phys_devs) {
6069 // We've encountered an error, so we should free the new buffers.
6070 for (uint32_t i = 0; i < new_phys_devs_count; i++) {
6071 // If an OOM occurred inside the copying of the new physical devices into the existing array
6072 // will leave some of the old physical devices in the array which may have been copied into
6073 // the new array, leading to them being freed twice. To avoid this we just make sure to not
6074 // delete physical devices which were copied.
6075 bool found = false;
6076 if (NULL != inst->phys_devs_term) {
6077 for (uint32_t old_idx = 0; old_idx < inst->phys_dev_count_term; old_idx++) {
6078 if (new_phys_devs[i] == inst->phys_devs_term[old_idx]) {
6079 found = true;
6080 break;
6081 }
6082 }
6083 }
6084 if (!found) {
6085 loader_instance_heap_free(inst, new_phys_devs[i]);
6086 }
6087 }
6088 loader_instance_heap_free(inst, new_phys_devs);
6089 }
6090 inst->total_gpu_count = 0;
6091 } else {
6092 if (NULL != inst->phys_devs_term) {
6093 // Free everything in the old array that was not copied into the new array
6094 // here. We can't attempt to do that before here since the previous loop
6095 // looking before the "out:" label may hit an out of memory condition resulting
6096 // in memory leaking.
6097 for (uint32_t i = 0; i < inst->phys_dev_count_term; i++) {
6098 bool found = false;
6099 for (uint32_t j = 0; j < new_phys_devs_count; j++) {
6100 if (new_phys_devs != NULL && inst->phys_devs_term[i] == new_phys_devs[j]) {
6101 found = true;
6102 break;
6103 }
6104 }
6105 if (!found) {
6106 loader_instance_heap_free(inst, inst->phys_devs_term[i]);
6107 }
6108 }
6109 loader_instance_heap_free(inst, inst->phys_devs_term);
6110 }
6111
6112 // Swap out old and new devices list
6113 inst->phys_dev_count_term = new_phys_devs_count;
6114 inst->phys_devs_term = new_phys_devs;
6115 inst->total_gpu_count = new_phys_devs_count;
6116 }
6117
6118 if (windows_sorted_devices_array != NULL) {
6119 for (uint32_t i = 0; i < windows_sorted_devices_count; ++i) {
6120 if (windows_sorted_devices_array[i].device_count > 0 && windows_sorted_devices_array[i].physical_devices != NULL) {
6121 loader_instance_heap_free(inst, windows_sorted_devices_array[i].physical_devices);
6122 }
6123 }
6124 loader_instance_heap_free(inst, windows_sorted_devices_array);
6125 }
6126
6127 return res;
6128 }
6129
setup_loader_tramp_phys_dev_groups(struct loader_instance * inst,uint32_t group_count,VkPhysicalDeviceGroupProperties * groups)6130 VkResult setup_loader_tramp_phys_dev_groups(struct loader_instance *inst, uint32_t group_count,
6131 VkPhysicalDeviceGroupProperties *groups) {
6132 VkResult res = VK_SUCCESS;
6133 uint32_t cur_idx;
6134 uint32_t dev_idx;
6135
6136 if (0 == group_count) {
6137 return VK_SUCCESS;
6138 }
6139
6140 // Generate a list of all the devices and convert them to the loader ID
6141 uint32_t phys_dev_count = 0;
6142 for (cur_idx = 0; cur_idx < group_count; ++cur_idx) {
6143 phys_dev_count += groups[cur_idx].physicalDeviceCount;
6144 }
6145 VkPhysicalDevice *devices = (VkPhysicalDevice *)loader_stack_alloc(sizeof(VkPhysicalDevice) * phys_dev_count);
6146 if (NULL == devices) {
6147 return VK_ERROR_OUT_OF_HOST_MEMORY;
6148 }
6149
6150 uint32_t cur_device = 0;
6151 for (cur_idx = 0; cur_idx < group_count; ++cur_idx) {
6152 for (dev_idx = 0; dev_idx < groups[cur_idx].physicalDeviceCount; ++dev_idx) {
6153 devices[cur_device++] = groups[cur_idx].physicalDevices[dev_idx];
6154 }
6155 }
6156
6157 // Update the devices based on the loader physical device values.
6158 res = setup_loader_tramp_phys_devs(inst, phys_dev_count, devices);
6159 if (VK_SUCCESS != res) {
6160 return res;
6161 }
6162
6163 // Update the devices in the group structures now
6164 cur_device = 0;
6165 for (cur_idx = 0; cur_idx < group_count; ++cur_idx) {
6166 for (dev_idx = 0; dev_idx < groups[cur_idx].physicalDeviceCount; ++dev_idx) {
6167 groups[cur_idx].physicalDevices[dev_idx] = devices[cur_device++];
6168 }
6169 }
6170
6171 return res;
6172 }
6173
terminator_EnumeratePhysicalDevices(VkInstance instance,uint32_t * pPhysicalDeviceCount,VkPhysicalDevice * pPhysicalDevices)6174 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
6175 VkPhysicalDevice *pPhysicalDevices) {
6176 struct loader_instance *inst = (struct loader_instance *)instance;
6177 VkResult res = VK_SUCCESS;
6178
6179 // Always call the setup loader terminator physical devices because they may
6180 // have changed at any point.
6181 res = setup_loader_term_phys_devs(inst);
6182 if (VK_SUCCESS != res) {
6183 goto out;
6184 }
6185
6186 uint32_t copy_count = inst->phys_dev_count_term;
6187 if (NULL != pPhysicalDevices) {
6188 if (copy_count > *pPhysicalDeviceCount) {
6189 copy_count = *pPhysicalDeviceCount;
6190 loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
6191 "terminator_EnumeratePhysicalDevices : Trimming device count from %d to %d.", inst->phys_dev_count_term,
6192 copy_count);
6193 res = VK_INCOMPLETE;
6194 }
6195
6196 for (uint32_t i = 0; i < copy_count; i++) {
6197 pPhysicalDevices[i] = (VkPhysicalDevice)inst->phys_devs_term[i];
6198 }
6199 }
6200
6201 *pPhysicalDeviceCount = copy_count;
6202
6203 out:
6204
6205 return res;
6206 }
6207
terminator_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,const char * pLayerName,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)6208 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
6209 const char *pLayerName, uint32_t *pPropertyCount,
6210 VkExtensionProperties *pProperties) {
6211 struct loader_physical_device_term *phys_dev_term;
6212
6213 struct loader_layer_list implicit_layer_list = {0};
6214 struct loader_extension_list all_exts = {0};
6215 struct loader_extension_list icd_exts = {0};
6216
6217 // Any layer or trampoline wrapping should be removed at this point in time can just cast to the expected
6218 // type for VkPhysicalDevice.
6219 phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
6220
6221 // if we got here with a non-empty pLayerName, look up the extensions
6222 // from the json
6223 if (pLayerName != NULL && strlen(pLayerName) > 0) {
6224 uint32_t count;
6225 uint32_t copy_size;
6226 const struct loader_instance *inst = phys_dev_term->this_icd_term->this_instance;
6227 struct loader_device_extension_list *dev_ext_list = NULL;
6228 struct loader_device_extension_list local_ext_list;
6229 memset(&local_ext_list, 0, sizeof(local_ext_list));
6230 if (vk_string_validate(MaxLoaderStringLength, pLayerName) == VK_STRING_ERROR_NONE) {
6231 for (uint32_t i = 0; i < inst->instance_layer_list.count; i++) {
6232 struct loader_layer_properties *props = &inst->instance_layer_list.list[i];
6233 if (strcmp(props->info.layerName, pLayerName) == 0) {
6234 dev_ext_list = &props->device_extension_list;
6235 }
6236 }
6237
6238 count = (dev_ext_list == NULL) ? 0 : dev_ext_list->count;
6239 if (pProperties == NULL) {
6240 *pPropertyCount = count;
6241 loader_destroy_generic_list(inst, (struct loader_generic_list *)&local_ext_list);
6242 return VK_SUCCESS;
6243 }
6244
6245 copy_size = *pPropertyCount < count ? *pPropertyCount : count;
6246 for (uint32_t i = 0; i < copy_size; i++) {
6247 memcpy(&pProperties[i], &dev_ext_list->list[i].props, sizeof(VkExtensionProperties));
6248 }
6249 *pPropertyCount = copy_size;
6250
6251 loader_destroy_generic_list(inst, (struct loader_generic_list *)&local_ext_list);
6252 if (copy_size < count) {
6253 return VK_INCOMPLETE;
6254 }
6255 } else {
6256 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6257 "vkEnumerateDeviceExtensionProperties: pLayerName is too long or is badly formed");
6258 return VK_ERROR_EXTENSION_NOT_PRESENT;
6259 }
6260
6261 return VK_SUCCESS;
6262 }
6263
6264 // This case is during the call down the instance chain with pLayerName == NULL
6265 struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
6266 uint32_t icd_ext_count = *pPropertyCount;
6267 VkExtensionProperties *icd_props_list = pProperties;
6268 VkResult res;
6269
6270 if (NULL == icd_props_list) {
6271 // We need to find the count without duplicates. This requires querying the driver for the names of the extensions.
6272 // A small amount of storage is then needed to facilitate the de-duplication.
6273 res = icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &icd_ext_count, NULL);
6274 if (res != VK_SUCCESS) {
6275 goto out;
6276 }
6277 if (icd_ext_count > 0) {
6278 icd_props_list = loader_instance_heap_alloc(icd_term->this_instance, sizeof(VkExtensionProperties) * icd_ext_count,
6279 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
6280 if (NULL == icd_props_list) {
6281 res = VK_ERROR_OUT_OF_HOST_MEMORY;
6282 goto out;
6283 }
6284 }
6285 }
6286
6287 // Get the available device extension count, and if pProperties is not NULL, the extensions as well
6288 res = icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &icd_ext_count, icd_props_list);
6289 if (res != VK_SUCCESS) {
6290 goto out;
6291 }
6292
6293 if (!loader_init_layer_list(icd_term->this_instance, &implicit_layer_list)) {
6294 res = VK_ERROR_OUT_OF_HOST_MEMORY;
6295 goto out;
6296 }
6297
6298 loader_add_implicit_layers(icd_term->this_instance, &implicit_layer_list, NULL, &icd_term->this_instance->instance_layer_list);
6299
6300 // Initialize dev_extension list within the physicalDevice object
6301 res = loader_init_device_extensions(icd_term->this_instance, phys_dev_term, icd_ext_count, icd_props_list, &icd_exts);
6302 if (res != VK_SUCCESS) {
6303 goto out;
6304 }
6305
6306 // We need to determine which implicit layers are active, and then add their extensions. This can't be cached as
6307 // it depends on results of environment variables (which can change).
6308 res = loader_add_to_ext_list(icd_term->this_instance, &all_exts, icd_exts.count, icd_exts.list);
6309 if (res != VK_SUCCESS) {
6310 goto out;
6311 }
6312
6313 loader_add_implicit_layers(icd_term->this_instance, &implicit_layer_list, NULL, &icd_term->this_instance->instance_layer_list);
6314
6315 for (uint32_t i = 0; i < implicit_layer_list.count; i++) {
6316 for (uint32_t j = 0; j < implicit_layer_list.list[i].device_extension_list.count; j++) {
6317 res = loader_add_to_ext_list(icd_term->this_instance, &all_exts, 1,
6318 &implicit_layer_list.list[i].device_extension_list.list[j].props);
6319 if (res != VK_SUCCESS) {
6320 goto out;
6321 }
6322 }
6323 }
6324 uint32_t capacity = *pPropertyCount;
6325 VkExtensionProperties *props = pProperties;
6326
6327 res = VK_SUCCESS;
6328 if (NULL != pProperties) {
6329 for (uint32_t i = 0; i < all_exts.count && i < capacity; i++) {
6330 props[i] = all_exts.list[i];
6331 }
6332
6333 // Wasn't enough space for the extensions, we did partial copy now return VK_INCOMPLETE
6334 if (capacity < all_exts.count) {
6335 res = VK_INCOMPLETE;
6336 } else {
6337 *pPropertyCount = all_exts.count;
6338 }
6339 } else {
6340 *pPropertyCount = all_exts.count;
6341 }
6342
6343 out:
6344
6345 if (NULL != implicit_layer_list.list) {
6346 loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&implicit_layer_list);
6347 }
6348 if (NULL != all_exts.list) {
6349 loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&all_exts);
6350 }
6351 if (NULL != icd_exts.list) {
6352 loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&icd_exts);
6353 }
6354 if (NULL == pProperties && NULL != icd_props_list) {
6355 loader_instance_heap_free(icd_term->this_instance, icd_props_list);
6356 }
6357 return res;
6358 }
6359
vk_string_validate(const int max_length,const char * utf8)6360 VkStringErrorFlags vk_string_validate(const int max_length, const char *utf8) {
6361 VkStringErrorFlags result = VK_STRING_ERROR_NONE;
6362 int num_char_bytes = 0;
6363 int i, j;
6364
6365 if (utf8 == NULL) {
6366 return VK_STRING_ERROR_NULL_PTR;
6367 }
6368
6369 for (i = 0; i <= max_length; i++) {
6370 if (utf8[i] == 0) {
6371 break;
6372 } else if (i == max_length) {
6373 result |= VK_STRING_ERROR_LENGTH;
6374 break;
6375 } else if ((utf8[i] >= 0x20) && (utf8[i] < 0x7f)) {
6376 num_char_bytes = 0;
6377 } else if ((utf8[i] & UTF8_ONE_BYTE_MASK) == UTF8_ONE_BYTE_CODE) {
6378 num_char_bytes = 1;
6379 } else if ((utf8[i] & UTF8_TWO_BYTE_MASK) == UTF8_TWO_BYTE_CODE) {
6380 num_char_bytes = 2;
6381 } else if ((utf8[i] & UTF8_THREE_BYTE_MASK) == UTF8_THREE_BYTE_CODE) {
6382 num_char_bytes = 3;
6383 } else {
6384 result = VK_STRING_ERROR_BAD_DATA;
6385 }
6386
6387 // Validate the following num_char_bytes of data
6388 for (j = 0; (j < num_char_bytes) && (i < max_length); j++) {
6389 if (++i == max_length) {
6390 result |= VK_STRING_ERROR_LENGTH;
6391 break;
6392 }
6393 if ((utf8[i] & UTF8_DATA_BYTE_MASK) != UTF8_DATA_BYTE_CODE) {
6394 result |= VK_STRING_ERROR_BAD_DATA;
6395 }
6396 }
6397 }
6398 return result;
6399 }
6400
terminator_EnumerateInstanceVersion(const VkEnumerateInstanceVersionChain * chain,uint32_t * pApiVersion)6401 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceVersion(const VkEnumerateInstanceVersionChain *chain,
6402 uint32_t *pApiVersion) {
6403 // NOTE: The Vulkan WG doesn't want us checking pApiVersion for NULL, but instead
6404 // prefers us crashing.
6405 *pApiVersion = VK_HEADER_VERSION_COMPLETE;
6406 return VK_SUCCESS;
6407 }
6408
6409 VKAPI_ATTR VkResult VKAPI_CALL
terminator_EnumerateInstanceExtensionProperties(const VkEnumerateInstanceExtensionPropertiesChain * chain,const char * pLayerName,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)6410 terminator_EnumerateInstanceExtensionProperties(const VkEnumerateInstanceExtensionPropertiesChain *chain, const char *pLayerName,
6411 uint32_t *pPropertyCount, VkExtensionProperties *pProperties) {
6412 struct loader_extension_list *global_ext_list = NULL;
6413 struct loader_layer_list instance_layers;
6414 struct loader_extension_list local_ext_list;
6415 struct loader_icd_tramp_list icd_tramp_list;
6416 uint32_t copy_size;
6417 VkResult res = VK_SUCCESS;
6418
6419 memset(&local_ext_list, 0, sizeof(local_ext_list));
6420 memset(&instance_layers, 0, sizeof(instance_layers));
6421 memset(&icd_tramp_list, 0, sizeof(icd_tramp_list));
6422
6423 // Get layer libraries if needed
6424 if (pLayerName && strlen(pLayerName) != 0) {
6425 if (vk_string_validate(MaxLoaderStringLength, pLayerName) != VK_STRING_ERROR_NONE) {
6426 assert(VK_FALSE && "vkEnumerateInstanceExtensionProperties: pLayerName is too long or is badly formed");
6427 res = VK_ERROR_EXTENSION_NOT_PRESENT;
6428 goto out;
6429 }
6430
6431 loader_scan_for_layers(NULL, &instance_layers);
6432 for (uint32_t i = 0; i < instance_layers.count; i++) {
6433 struct loader_layer_properties *props = &instance_layers.list[i];
6434 if (strcmp(props->info.layerName, pLayerName) == 0) {
6435 global_ext_list = &props->instance_extension_list;
6436 break;
6437 }
6438 }
6439 } else {
6440 // Preload ICD libraries so subsequent calls to EnumerateInstanceExtensionProperties don't have to load them
6441 loader_preload_icds();
6442
6443 // Scan/discover all ICD libraries
6444 res = loader_icd_scan(NULL, &icd_tramp_list, NULL);
6445 // EnumerateInstanceExtensionProperties can't return anything other than OOM or VK_ERROR_LAYER_NOT_PRESENT
6446 if ((VK_SUCCESS != res && icd_tramp_list.count > 0) || res == VK_ERROR_OUT_OF_HOST_MEMORY) {
6447 goto out;
6448 }
6449 // Get extensions from all ICD's, merge so no duplicates
6450 res = loader_get_icd_loader_instance_extensions(NULL, &icd_tramp_list, &local_ext_list);
6451 if (VK_SUCCESS != res) {
6452 goto out;
6453 }
6454 loader_scanned_icd_clear(NULL, &icd_tramp_list);
6455
6456 // Append enabled implicit layers.
6457 loader_scan_for_implicit_layers(NULL, &instance_layers);
6458 for (uint32_t i = 0; i < instance_layers.count; i++) {
6459 if (!loader_implicit_layer_is_enabled(NULL, &instance_layers.list[i])) {
6460 continue;
6461 }
6462 struct loader_extension_list *ext_list = &instance_layers.list[i].instance_extension_list;
6463 loader_add_to_ext_list(NULL, &local_ext_list, ext_list->count, ext_list->list);
6464 }
6465
6466 global_ext_list = &local_ext_list;
6467 }
6468
6469 if (global_ext_list == NULL) {
6470 res = VK_ERROR_LAYER_NOT_PRESENT;
6471 goto out;
6472 }
6473
6474 if (pProperties == NULL) {
6475 *pPropertyCount = global_ext_list->count;
6476 goto out;
6477 }
6478
6479 copy_size = *pPropertyCount < global_ext_list->count ? *pPropertyCount : global_ext_list->count;
6480 for (uint32_t i = 0; i < copy_size; i++) {
6481 memcpy(&pProperties[i], &global_ext_list->list[i], sizeof(VkExtensionProperties));
6482 }
6483 *pPropertyCount = copy_size;
6484
6485 if (copy_size < global_ext_list->count) {
6486 res = VK_INCOMPLETE;
6487 goto out;
6488 }
6489
6490 out:
6491 loader_destroy_generic_list(NULL, (struct loader_generic_list *)&icd_tramp_list);
6492 loader_destroy_generic_list(NULL, (struct loader_generic_list *)&local_ext_list);
6493 loader_delete_layer_list_and_properties(NULL, &instance_layers);
6494 return res;
6495 }
6496
terminator_EnumerateInstanceLayerProperties(const VkEnumerateInstanceLayerPropertiesChain * chain,uint32_t * pPropertyCount,VkLayerProperties * pProperties)6497 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceLayerProperties(const VkEnumerateInstanceLayerPropertiesChain *chain,
6498 uint32_t *pPropertyCount,
6499 VkLayerProperties *pProperties) {
6500 VkResult result = VK_SUCCESS;
6501 struct loader_layer_list instance_layer_list;
6502
6503 LOADER_PLATFORM_THREAD_ONCE(&once_init, loader_initialize);
6504
6505 uint32_t copy_size;
6506
6507 // Get layer libraries
6508 memset(&instance_layer_list, 0, sizeof(instance_layer_list));
6509 loader_scan_for_layers(NULL, &instance_layer_list);
6510
6511 if (pProperties == NULL) {
6512 *pPropertyCount = instance_layer_list.count;
6513 goto out;
6514 }
6515
6516 copy_size = (*pPropertyCount < instance_layer_list.count) ? *pPropertyCount : instance_layer_list.count;
6517 for (uint32_t i = 0; i < copy_size; i++) {
6518 memcpy(&pProperties[i], &instance_layer_list.list[i].info, sizeof(VkLayerProperties));
6519 }
6520
6521 *pPropertyCount = copy_size;
6522
6523 if (copy_size < instance_layer_list.count) {
6524 result = VK_INCOMPLETE;
6525 goto out;
6526 }
6527
6528 out:
6529
6530 loader_delete_layer_list_and_properties(NULL, &instance_layer_list);
6531 return result;
6532 }
6533
6534 // ---- Vulkan Core 1.1 terminators
6535
terminator_EnumeratePhysicalDeviceGroups(VkInstance instance,uint32_t * pPhysicalDeviceGroupCount,VkPhysicalDeviceGroupProperties * pPhysicalDeviceGroupProperties)6536 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumeratePhysicalDeviceGroups(
6537 VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties) {
6538 struct loader_instance *inst = (struct loader_instance *)instance;
6539
6540 VkResult res = VK_SUCCESS;
6541 struct loader_icd_term *icd_term;
6542 uint32_t total_count = 0;
6543 uint32_t cur_icd_group_count = 0;
6544 VkPhysicalDeviceGroupPropertiesKHR **new_phys_dev_groups = NULL;
6545 struct loader_physical_device_group_term *local_phys_dev_groups = NULL;
6546 PFN_vkEnumeratePhysicalDeviceGroups fpEnumeratePhysicalDeviceGroups = NULL;
6547 struct loader_phys_dev_per_icd *sorted_phys_dev_array = NULL;
6548 uint32_t sorted_count = 0;
6549
6550 // For each ICD, query the number of physical device groups, and then get an
6551 // internal value for those physical devices.
6552 icd_term = inst->icd_terms;
6553 for (uint32_t icd_idx = 0; NULL != icd_term; icd_term = icd_term->next, icd_idx++) {
6554 // Get the function pointer to use to call into the ICD. This could be the core or KHR version
6555 if (inst->enabled_known_extensions.khr_device_group_creation) {
6556 fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR;
6557 } else {
6558 fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroups;
6559 }
6560
6561 cur_icd_group_count = 0;
6562 if (NULL == fpEnumeratePhysicalDeviceGroups) {
6563 // Treat each ICD's GPU as it's own group if the extension isn't supported
6564 res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &cur_icd_group_count, NULL);
6565 if (res != VK_SUCCESS) {
6566 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6567 "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of \'EnumeratePhysicalDevices\' "
6568 "to ICD %d to get plain phys dev count.",
6569 icd_idx);
6570 continue;
6571 }
6572 } else {
6573 // Query the actual group info
6574 res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &cur_icd_group_count, NULL);
6575 if (res != VK_SUCCESS) {
6576 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6577 "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of "
6578 "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get count.",
6579 icd_idx);
6580 continue;
6581 }
6582 }
6583 total_count += cur_icd_group_count;
6584 }
6585
6586 // If GPUs not sorted yet, look through them and generate list of all available GPUs
6587 if (0 == total_count || 0 == inst->total_gpu_count) {
6588 res = setup_loader_term_phys_devs(inst);
6589 if (VK_SUCCESS != res) {
6590 goto out;
6591 }
6592 }
6593
6594 if (NULL != pPhysicalDeviceGroupProperties) {
6595 // Create an array for the new physical device groups, which will be stored
6596 // in the instance for the Terminator code.
6597 new_phys_dev_groups = (VkPhysicalDeviceGroupProperties **)loader_instance_heap_calloc(
6598 inst, total_count * sizeof(VkPhysicalDeviceGroupProperties *), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6599 if (NULL == new_phys_dev_groups) {
6600 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6601 "terminator_EnumeratePhysicalDeviceGroups: Failed to allocate new physical device group array of size %d",
6602 total_count);
6603 res = VK_ERROR_OUT_OF_HOST_MEMORY;
6604 goto out;
6605 }
6606
6607 // Create a temporary array (on the stack) to keep track of the
6608 // returned VkPhysicalDevice values.
6609 local_phys_dev_groups = loader_stack_alloc(sizeof(struct loader_physical_device_group_term) * total_count);
6610 // Initialize the memory to something valid
6611 memset(local_phys_dev_groups, 0, sizeof(struct loader_physical_device_group_term) * total_count);
6612
6613 #if defined(_WIN32)
6614 // Get the physical devices supported by platform sorting mechanism into a separate list
6615 res = windows_read_sorted_physical_devices(inst, &sorted_count, &sorted_phys_dev_array);
6616 if (VK_SUCCESS != res) {
6617 goto out;
6618 }
6619 #endif
6620
6621 cur_icd_group_count = 0;
6622 icd_term = inst->icd_terms;
6623 for (uint8_t icd_idx = 0; NULL != icd_term; icd_term = icd_term->next, icd_idx++) {
6624 uint32_t count_this_time = total_count - cur_icd_group_count;
6625
6626 // Get the function pointer to use to call into the ICD. This could be the core or KHR version
6627 if (inst->enabled_known_extensions.khr_device_group_creation) {
6628 fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR;
6629 } else {
6630 fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroups;
6631 }
6632
6633 if (NULL == fpEnumeratePhysicalDeviceGroups) {
6634 icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &count_this_time, NULL);
6635
6636 VkPhysicalDevice *phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * count_this_time);
6637 if (NULL == phys_dev_array) {
6638 loader_log(
6639 inst, VULKAN_LOADER_ERROR_BIT, 0,
6640 "terminator_EnumeratePhysicalDeviceGroups: Failed to allocate local physical device array of size %d",
6641 count_this_time);
6642 res = VK_ERROR_OUT_OF_HOST_MEMORY;
6643 goto out;
6644 }
6645
6646 res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &count_this_time, phys_dev_array);
6647 if (res != VK_SUCCESS) {
6648 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6649 "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of "
6650 "\'EnumeratePhysicalDevices\' to ICD %d to get plain phys dev count.",
6651 icd_idx);
6652 goto out;
6653 }
6654
6655 // Add each GPU as it's own group
6656 for (uint32_t indiv_gpu = 0; indiv_gpu < count_this_time; indiv_gpu++) {
6657 uint32_t cur_index = indiv_gpu + cur_icd_group_count;
6658 local_phys_dev_groups[cur_index].this_icd_term = icd_term;
6659 local_phys_dev_groups[cur_index].icd_index = icd_idx;
6660 local_phys_dev_groups[cur_index].group_props.physicalDeviceCount = 1;
6661 local_phys_dev_groups[cur_index].group_props.physicalDevices[0] = phys_dev_array[indiv_gpu];
6662 }
6663
6664 } else {
6665 res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time, NULL);
6666 if (res != VK_SUCCESS) {
6667 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6668 "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of "
6669 "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get group count.",
6670 icd_idx);
6671 goto out;
6672 }
6673 if (cur_icd_group_count + count_this_time < *pPhysicalDeviceGroupCount) {
6674 // The total amount is still less than the amount of physical device group data passed in
6675 // by the callee. Therefore, we don't have to allocate any temporary structures and we
6676 // can just use the data that was passed in.
6677 res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time,
6678 &pPhysicalDeviceGroupProperties[cur_icd_group_count]);
6679 if (res != VK_SUCCESS) {
6680 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6681 "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of "
6682 "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get group information.",
6683 icd_idx);
6684 goto out;
6685 }
6686 for (uint32_t group = 0; group < count_this_time; ++group) {
6687 uint32_t cur_index = group + cur_icd_group_count;
6688 local_phys_dev_groups[cur_index].group_props = pPhysicalDeviceGroupProperties[cur_index];
6689 local_phys_dev_groups[cur_index].this_icd_term = icd_term;
6690 local_phys_dev_groups[cur_index].icd_index = icd_idx;
6691 }
6692 } else {
6693 // There's not enough space in the callee's allocated pPhysicalDeviceGroupProperties structs,
6694 // so we have to allocate temporary versions to collect all the data. However, we need to make
6695 // sure that at least the ones we do query utilize any pNext data in the callee's version.
6696 VkPhysicalDeviceGroupProperties *tmp_group_props =
6697 loader_stack_alloc(count_this_time * sizeof(VkPhysicalDeviceGroupProperties));
6698 for (uint32_t group = 0; group < count_this_time; group++) {
6699 tmp_group_props[group].sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES_KHR;
6700 uint32_t cur_index = group + cur_icd_group_count;
6701 if (*pPhysicalDeviceGroupCount > cur_index) {
6702 tmp_group_props[group].pNext = pPhysicalDeviceGroupProperties[cur_index].pNext;
6703 } else {
6704 tmp_group_props[group].pNext = NULL;
6705 }
6706 tmp_group_props[group].subsetAllocation = false;
6707 }
6708
6709 res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time, tmp_group_props);
6710 if (res != VK_SUCCESS) {
6711 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6712 "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of "
6713 "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get group information for temp data.",
6714 icd_idx);
6715 goto out;
6716 }
6717 for (uint32_t group = 0; group < count_this_time; ++group) {
6718 uint32_t cur_index = group + cur_icd_group_count;
6719 local_phys_dev_groups[cur_index].group_props = tmp_group_props[group];
6720 local_phys_dev_groups[cur_index].this_icd_term = icd_term;
6721 local_phys_dev_groups[cur_index].icd_index = icd_idx;
6722 }
6723 }
6724 if (VK_SUCCESS != res) {
6725 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6726 "terminator_EnumeratePhysicalDeviceGroups: Failed during dispatch call of "
6727 "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get content.",
6728 icd_idx);
6729 goto out;
6730 }
6731 }
6732
6733 cur_icd_group_count += count_this_time;
6734 }
6735
6736 #ifdef LOADER_ENABLE_LINUX_SORT
6737 if (is_linux_sort_enabled(inst)) {
6738 // Get the physical devices supported by platform sorting mechanism into a separate list
6739 res = linux_sort_physical_device_groups(inst, total_count, local_phys_dev_groups);
6740 }
6741 #elif defined(_WIN32)
6742 // The Windows sorting information is only on physical devices. We need to take that and convert it to the group
6743 // information if it's present.
6744 if (sorted_count > 0) {
6745 res =
6746 windows_sort_physical_device_groups(inst, total_count, local_phys_dev_groups, sorted_count, sorted_phys_dev_array);
6747 }
6748 #endif // LOADER_ENABLE_LINUX_SORT
6749
6750 // Just to be safe, make sure we successfully completed setup_loader_term_phys_devs above
6751 // before attempting to do the following. By verifying that setup_loader_term_phys_devs ran
6752 // first, it guarantees that each physical device will have a loader-specific handle.
6753 if (NULL != inst->phys_devs_term) {
6754 for (uint32_t group = 0; group < total_count; group++) {
6755 for (uint32_t group_gpu = 0; group_gpu < local_phys_dev_groups[group].group_props.physicalDeviceCount;
6756 group_gpu++) {
6757 bool found = false;
6758 for (uint32_t term_gpu = 0; term_gpu < inst->phys_dev_count_term; term_gpu++) {
6759 if (local_phys_dev_groups[group].group_props.physicalDevices[group_gpu] ==
6760 inst->phys_devs_term[term_gpu]->phys_dev) {
6761 local_phys_dev_groups[group].group_props.physicalDevices[group_gpu] =
6762 (VkPhysicalDevice)inst->phys_devs_term[term_gpu];
6763 found = true;
6764 break;
6765 }
6766 }
6767 if (!found) {
6768 loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
6769 "terminator_EnumeratePhysicalDeviceGroups: Failed to find GPU %d in group %d returned by "
6770 "\'EnumeratePhysicalDeviceGroups\' in list returned by \'EnumeratePhysicalDevices\'",
6771 group_gpu, group);
6772 res = VK_ERROR_INITIALIZATION_FAILED;
6773 goto out;
6774 }
6775 }
6776 }
6777 }
6778
6779 uint32_t idx = 0;
6780
6781 // Copy or create everything to fill the new array of physical device groups
6782 for (uint32_t group = 0; group < total_count; group++) {
6783 // Skip groups which have been included through sorting
6784 if (local_phys_dev_groups[group].group_props.physicalDeviceCount == 0) {
6785 continue;
6786 }
6787
6788 // Find the VkPhysicalDeviceGroupProperties object in local_phys_dev_groups
6789 VkPhysicalDeviceGroupProperties *group_properties = &local_phys_dev_groups[group].group_props;
6790
6791 // Check if this physical device group with the same contents is already in the old buffer
6792 for (uint32_t old_idx = 0; old_idx < inst->phys_dev_group_count_term; old_idx++) {
6793 if (NULL != group_properties && NULL != inst->phys_dev_groups_term[old_idx] &&
6794 group_properties->physicalDeviceCount == inst->phys_dev_groups_term[old_idx]->physicalDeviceCount) {
6795 bool found_all_gpus = true;
6796 for (uint32_t old_gpu = 0; old_gpu < inst->phys_dev_groups_term[old_idx]->physicalDeviceCount; old_gpu++) {
6797 bool found_gpu = false;
6798 for (uint32_t new_gpu = 0; new_gpu < group_properties->physicalDeviceCount; new_gpu++) {
6799 if (group_properties->physicalDevices[new_gpu] ==
6800 inst->phys_dev_groups_term[old_idx]->physicalDevices[old_gpu]) {
6801 found_gpu = true;
6802 break;
6803 }
6804 }
6805
6806 if (!found_gpu) {
6807 found_all_gpus = false;
6808 break;
6809 }
6810 }
6811 if (!found_all_gpus) {
6812 continue;
6813 } else {
6814 new_phys_dev_groups[idx] = inst->phys_dev_groups_term[old_idx];
6815 break;
6816 }
6817 }
6818 }
6819 // If this physical device group isn't in the old buffer, create it
6820 if (group_properties != NULL && NULL == new_phys_dev_groups[idx]) {
6821 new_phys_dev_groups[idx] = (VkPhysicalDeviceGroupPropertiesKHR *)loader_instance_heap_alloc(
6822 inst, sizeof(VkPhysicalDeviceGroupPropertiesKHR), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
6823 if (NULL == new_phys_dev_groups[idx]) {
6824 loader_log(
6825 inst, VULKAN_LOADER_ERROR_BIT, 0,
6826 "terminator_EnumeratePhysicalDeviceGroups: Failed to allocate physical device group Terminator object %d",
6827 idx);
6828 total_count = idx;
6829 res = VK_ERROR_OUT_OF_HOST_MEMORY;
6830 goto out;
6831 }
6832 memcpy(new_phys_dev_groups[idx], group_properties, sizeof(VkPhysicalDeviceGroupPropertiesKHR));
6833 }
6834
6835 ++idx;
6836 }
6837 }
6838
6839 out:
6840
6841 if (NULL != pPhysicalDeviceGroupProperties) {
6842 if (VK_SUCCESS != res) {
6843 if (NULL != new_phys_dev_groups) {
6844 // We've encountered an error, so we should free the new buffers.
6845 for (uint32_t i = 0; i < total_count; i++) {
6846 // If an OOM occurred inside the copying of the new physical device groups into the existing array will leave
6847 // some of the old physical device groups in the array which may have been copied into the new array, leading to
6848 // them being freed twice. To avoid this we just make sure to not delete physical device groups which were
6849 // copied.
6850 bool found = false;
6851 if (NULL != inst->phys_devs_term) {
6852 for (uint32_t old_idx = 0; old_idx < inst->phys_dev_group_count_term; old_idx++) {
6853 if (new_phys_dev_groups[i] == inst->phys_dev_groups_term[old_idx]) {
6854 found = true;
6855 break;
6856 }
6857 }
6858 }
6859 if (!found) {
6860 loader_instance_heap_free(inst, new_phys_dev_groups[i]);
6861 }
6862 }
6863 loader_instance_heap_free(inst, new_phys_dev_groups);
6864 }
6865 } else {
6866 if (NULL != inst->phys_dev_groups_term) {
6867 // Free everything in the old array that was not copied into the new array
6868 // here. We can't attempt to do that before here since the previous loop
6869 // looking before the "out:" label may hit an out of memory condition resulting
6870 // in memory leaking.
6871 for (uint32_t i = 0; i < inst->phys_dev_group_count_term; i++) {
6872 bool found = false;
6873 for (uint32_t j = 0; j < total_count; j++) {
6874 if (inst->phys_dev_groups_term[i] == new_phys_dev_groups[j]) {
6875 found = true;
6876 break;
6877 }
6878 }
6879 if (!found) {
6880 loader_instance_heap_free(inst, inst->phys_dev_groups_term[i]);
6881 }
6882 }
6883 loader_instance_heap_free(inst, inst->phys_dev_groups_term);
6884 }
6885
6886 // Swap in the new physical device group list
6887 inst->phys_dev_group_count_term = total_count;
6888 inst->phys_dev_groups_term = new_phys_dev_groups;
6889 }
6890
6891 if (sorted_phys_dev_array != NULL) {
6892 for (uint32_t i = 0; i < sorted_count; ++i) {
6893 if (sorted_phys_dev_array[i].device_count > 0 && sorted_phys_dev_array[i].physical_devices != NULL) {
6894 loader_instance_heap_free(inst, sorted_phys_dev_array[i].physical_devices);
6895 }
6896 }
6897 loader_instance_heap_free(inst, sorted_phys_dev_array);
6898 }
6899
6900 uint32_t copy_count = inst->phys_dev_group_count_term;
6901 if (NULL != pPhysicalDeviceGroupProperties) {
6902 if (copy_count > *pPhysicalDeviceGroupCount) {
6903 copy_count = *pPhysicalDeviceGroupCount;
6904 loader_log(inst, VULKAN_LOADER_INFO_BIT, 0,
6905 "terminator_EnumeratePhysicalDeviceGroups : Trimming device count from %d to %d.",
6906 inst->phys_dev_group_count_term, copy_count);
6907 res = VK_INCOMPLETE;
6908 }
6909
6910 for (uint32_t i = 0; i < copy_count; i++) {
6911 memcpy(&pPhysicalDeviceGroupProperties[i], inst->phys_dev_groups_term[i], sizeof(VkPhysicalDeviceGroupProperties));
6912 }
6913 }
6914
6915 *pPhysicalDeviceGroupCount = copy_count;
6916
6917 } else {
6918 *pPhysicalDeviceGroupCount = total_count;
6919 }
6920 return res;
6921 }
6922