1 /*
2 *
3 * Copyright (c) 2014-2016 The Khronos Group Inc.
4 * Copyright (c) 2014-2016 Valve Corporation
5 * Copyright (c) 2014-2016 LunarG, Inc.
6 * Copyright (C) 2015 Google Inc.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19
20 *
21 * Author: Jon Ashburn <jon@lunarg.com>
22 * Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
23 *
24 */
25
26 #define _GNU_SOURCE
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <stdarg.h>
30 #include <stdbool.h>
31 #include <string.h>
32
33 #include <sys/types.h>
34 #if defined(_WIN32)
35 #include "dirent_on_windows.h"
36 #else // _WIN32
37 #include <dirent.h>
38 #endif // _WIN32
39 #include "vk_loader_platform.h"
40 #include "loader.h"
41 #include "gpa_helper.h"
42 #include "table_ops.h"
43 #include "debug_report.h"
44 #include "wsi.h"
45 #include "extensions.h"
46 #include "vulkan/vk_icd.h"
47 #include "cJSON.h"
48 #include "murmurhash.h"
49
50 #if defined(__GNUC__)
51 #if (__GLIBC__ < 2) || ((__GLIBC__ == 2) && (__GLIBC_MINOR__ < 17))
52 #define secure_getenv __secure_getenv
53 #endif
54 #endif
55
56 struct loader_struct loader = {0};
57 // TLS for instance for alloc/free callbacks
58 THREAD_LOCAL_DECL struct loader_instance *tls_instance;
59
60 static size_t loader_platform_combine_path(char *dest, size_t len, ...);
61
62 struct loader_phys_dev_per_icd {
63 uint32_t count;
64 VkPhysicalDevice *phys_devs;
65 struct loader_icd *this_icd;
66 };
67
68 enum loader_debug {
69 LOADER_INFO_BIT = 0x01,
70 LOADER_WARN_BIT = 0x02,
71 LOADER_PERF_BIT = 0x04,
72 LOADER_ERROR_BIT = 0x08,
73 LOADER_DEBUG_BIT = 0x10,
74 };
75
76 uint32_t g_loader_debug = 0;
77 uint32_t g_loader_log_msgs = 0;
78
79 // thread safety lock for accessing global data structures such as "loader"
80 // all entrypoints on the instance chain need to be locked except GPA
81 // additionally CreateDevice and DestroyDevice needs to be locked
82 loader_platform_thread_mutex loader_lock;
83 loader_platform_thread_mutex loader_json_lock;
84
85 const char *std_validation_str = "VK_LAYER_LUNARG_standard_validation";
86
87 // This table contains the loader's instance dispatch table, which contains
88 // default functions if no instance layers are activated. This contains
89 // pointers to "terminator functions".
90 const VkLayerInstanceDispatchTable instance_disp = {
91 .GetInstanceProcAddr = vkGetInstanceProcAddr,
92 .DestroyInstance = terminator_DestroyInstance,
93 .EnumeratePhysicalDevices = terminator_EnumeratePhysicalDevices,
94 .GetPhysicalDeviceFeatures = terminator_GetPhysicalDeviceFeatures,
95 .GetPhysicalDeviceFormatProperties =
96 terminator_GetPhysicalDeviceFormatProperties,
97 .GetPhysicalDeviceImageFormatProperties =
98 terminator_GetPhysicalDeviceImageFormatProperties,
99 .GetPhysicalDeviceProperties = terminator_GetPhysicalDeviceProperties,
100 .GetPhysicalDeviceQueueFamilyProperties =
101 terminator_GetPhysicalDeviceQueueFamilyProperties,
102 .GetPhysicalDeviceMemoryProperties =
103 terminator_GetPhysicalDeviceMemoryProperties,
104 .EnumerateDeviceExtensionProperties =
105 terminator_EnumerateDeviceExtensionProperties,
106 .EnumerateDeviceLayerProperties = terminator_EnumerateDeviceLayerProperties,
107 .GetPhysicalDeviceSparseImageFormatProperties =
108 terminator_GetPhysicalDeviceSparseImageFormatProperties,
109 .DestroySurfaceKHR = terminator_DestroySurfaceKHR,
110 .GetPhysicalDeviceSurfaceSupportKHR =
111 terminator_GetPhysicalDeviceSurfaceSupportKHR,
112 .GetPhysicalDeviceSurfaceCapabilitiesKHR =
113 terminator_GetPhysicalDeviceSurfaceCapabilitiesKHR,
114 .GetPhysicalDeviceSurfaceFormatsKHR =
115 terminator_GetPhysicalDeviceSurfaceFormatsKHR,
116 .GetPhysicalDeviceSurfacePresentModesKHR =
117 terminator_GetPhysicalDeviceSurfacePresentModesKHR,
118 .CreateDebugReportCallbackEXT = terminator_CreateDebugReportCallback,
119 .DestroyDebugReportCallbackEXT = terminator_DestroyDebugReportCallback,
120 .DebugReportMessageEXT = terminator_DebugReportMessage,
121 .GetPhysicalDeviceExternalImageFormatPropertiesNV =
122 terminator_GetPhysicalDeviceExternalImageFormatPropertiesNV,
123 #ifdef VK_USE_PLATFORM_MIR_KHR
124 .CreateMirSurfaceKHR = terminator_CreateMirSurfaceKHR,
125 .GetPhysicalDeviceMirPresentationSupportKHR =
126 terminator_GetPhysicalDeviceMirPresentationSupportKHR,
127 #endif
128 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
129 .CreateWaylandSurfaceKHR = terminator_CreateWaylandSurfaceKHR,
130 .GetPhysicalDeviceWaylandPresentationSupportKHR =
131 terminator_GetPhysicalDeviceWaylandPresentationSupportKHR,
132 #endif
133 #ifdef VK_USE_PLATFORM_WIN32_KHR
134 .CreateWin32SurfaceKHR = terminator_CreateWin32SurfaceKHR,
135 .GetPhysicalDeviceWin32PresentationSupportKHR =
136 terminator_GetPhysicalDeviceWin32PresentationSupportKHR,
137 #endif
138 #ifdef VK_USE_PLATFORM_XCB_KHR
139 .CreateXcbSurfaceKHR = terminator_CreateXcbSurfaceKHR,
140 .GetPhysicalDeviceXcbPresentationSupportKHR =
141 terminator_GetPhysicalDeviceXcbPresentationSupportKHR,
142 #endif
143 #ifdef VK_USE_PLATFORM_XLIB_KHR
144 .CreateXlibSurfaceKHR = terminator_CreateXlibSurfaceKHR,
145 .GetPhysicalDeviceXlibPresentationSupportKHR =
146 terminator_GetPhysicalDeviceXlibPresentationSupportKHR,
147 #endif
148 #ifdef VK_USE_PLATFORM_ANDROID_KHR
149 .CreateAndroidSurfaceKHR = terminator_CreateAndroidSurfaceKHR,
150 #endif
151 .GetPhysicalDeviceDisplayPropertiesKHR =
152 terminator_GetPhysicalDeviceDisplayPropertiesKHR,
153 .GetPhysicalDeviceDisplayPlanePropertiesKHR =
154 terminator_GetPhysicalDeviceDisplayPlanePropertiesKHR,
155 .GetDisplayPlaneSupportedDisplaysKHR =
156 terminator_GetDisplayPlaneSupportedDisplaysKHR,
157 .GetDisplayModePropertiesKHR = terminator_GetDisplayModePropertiesKHR,
158 .CreateDisplayModeKHR = terminator_CreateDisplayModeKHR,
159 .GetDisplayPlaneCapabilitiesKHR = terminator_GetDisplayPlaneCapabilitiesKHR,
160 .CreateDisplayPlaneSurfaceKHR = terminator_CreateDisplayPlaneSurfaceKHR,
161 };
162
163 LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_init);
164
loader_instance_heap_alloc(const struct loader_instance * instance,size_t size,VkSystemAllocationScope alloc_scope)165 void *loader_instance_heap_alloc(const struct loader_instance *instance,
166 size_t size,
167 VkSystemAllocationScope alloc_scope) {
168 void *pMemory = NULL;
169 #if (DEBUG_DISABLE_APP_ALLOCATORS == 1)
170 {
171 #else
172 if (instance && instance->alloc_callbacks.pfnAllocation) {
173 /* These are internal structures, so it's best to align everything to
174 * the largest unit size which is the size of a uint64_t.
175 */
176 pMemory = instance->alloc_callbacks.pfnAllocation(
177 instance->alloc_callbacks.pUserData, size, sizeof(uint64_t),
178 alloc_scope);
179 } else {
180 #endif
181 pMemory = malloc(size);
182 }
183 return pMemory;
184 }
185
186 void loader_instance_heap_free(const struct loader_instance *instance,
187 void *pMemory) {
188 if (pMemory != NULL) {
189 #if (DEBUG_DISABLE_APP_ALLOCATORS == 1)
190 {
191 #else
192 if (instance && instance->alloc_callbacks.pfnFree) {
193 instance->alloc_callbacks.pfnFree(
194 instance->alloc_callbacks.pUserData, pMemory);
195 } else {
196 #endif
197 free(pMemory);
198 }
199 }
200 }
201
202 void *loader_instance_heap_realloc(const struct loader_instance *instance,
203 void *pMemory, size_t orig_size, size_t size,
204 VkSystemAllocationScope alloc_scope) {
205 void *pNewMem = NULL;
206 if (pMemory == NULL || orig_size == 0) {
207 pNewMem = loader_instance_heap_alloc(instance, size, alloc_scope);
208 } else if (size == 0) {
209 loader_instance_heap_free(instance, pMemory);
210 #if (DEBUG_DISABLE_APP_ALLOCATORS == 1)
211 #else
212 } else if (instance && instance->alloc_callbacks.pfnReallocation) {
213 /* These are internal structures, so it's best to align everything to
214 * the largest unit size which is the size of a uint64_t.
215 */
216 pNewMem = instance->alloc_callbacks.pfnReallocation(
217 instance->alloc_callbacks.pUserData, pMemory, size,
218 sizeof(uint64_t), alloc_scope);
219 #endif
220 } else {
221 pNewMem = realloc(pMemory, size);
222 }
223 return pNewMem;
224 }
225
226 void *loader_instance_tls_heap_alloc(size_t size) {
227 return loader_instance_heap_alloc(tls_instance, size,
228 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
229 }
230
231 void loader_instance_tls_heap_free(void *pMemory) {
232 loader_instance_heap_free(tls_instance, pMemory);
233 }
234
235 void *loader_device_heap_alloc(const struct loader_device *device, size_t size,
236 VkSystemAllocationScope alloc_scope) {
237 void *pMemory = NULL;
238 #if (DEBUG_DISABLE_APP_ALLOCATORS == 1)
239 {
240 #else
241 if (device && device->alloc_callbacks.pfnAllocation) {
242 /* These are internal structures, so it's best to align everything to
243 * the largest unit size which is the size of a uint64_t.
244 */
245 pMemory = device->alloc_callbacks.pfnAllocation(
246 device->alloc_callbacks.pUserData, size, sizeof(uint64_t),
247 alloc_scope);
248 } else {
249 #endif
250 pMemory = malloc(size);
251 }
252 return pMemory;
253 }
254
255 void loader_device_heap_free(const struct loader_device *device,
256 void *pMemory) {
257 if (pMemory != NULL) {
258 #if (DEBUG_DISABLE_APP_ALLOCATORS == 1)
259 {
260 #else
261 if (device && device->alloc_callbacks.pfnFree) {
262 device->alloc_callbacks.pfnFree(device->alloc_callbacks.pUserData,
263 pMemory);
264 } else {
265 #endif
266 free(pMemory);
267 }
268 }
269 }
270
271 void *loader_device_heap_realloc(const struct loader_device *device,
272 void *pMemory, size_t orig_size, size_t size,
273 VkSystemAllocationScope alloc_scope) {
274 void *pNewMem = NULL;
275 if (pMemory == NULL || orig_size == 0) {
276 pNewMem = loader_device_heap_alloc(device, size, alloc_scope);
277 } else if (size == 0) {
278 loader_device_heap_free(device, pMemory);
279 #if (DEBUG_DISABLE_APP_ALLOCATORS == 1)
280 #else
281 } else if (device && device->alloc_callbacks.pfnReallocation) {
282 /* These are internal structures, so it's best to align everything to
283 * the largest unit size which is the size of a uint64_t.
284 */
285 pNewMem = device->alloc_callbacks.pfnReallocation(
286 device->alloc_callbacks.pUserData, pMemory, size, sizeof(uint64_t),
287 alloc_scope);
288 #endif
289 } else {
290 pNewMem = realloc(pMemory, size);
291 }
292 return pNewMem;
293 }
294
295 // Environment variables
296 #if defined(__linux__)
297
298 static inline char *loader_getenv(const char *name,
299 const struct loader_instance *inst) {
300 // No allocation of memory necessary for Linux, but we should at least touch
301 // the inst pointer to get rid of compiler warnings.
302 (void)inst;
303 return getenv(name);
304 }
305 static inline void loader_free_getenv(const char *val,
306 const struct loader_instance *inst) {
307 // No freeing of memory necessary for Linux, but we should at least touch
308 // the val and inst pointers to get rid of compiler warnings.
309 (void)val;
310 (void)inst;
311 }
312
313 #elif defined(WIN32)
314
315 static inline char *loader_getenv(const char *name,
316 const struct loader_instance *inst) {
317 char *retVal;
318 DWORD valSize;
319
320 valSize = GetEnvironmentVariableA(name, NULL, 0);
321
322 // valSize DOES include the null terminator, so for any set variable
323 // will always be at least 1. If it's 0, the variable wasn't set.
324 if (valSize == 0)
325 return NULL;
326
327 // Allocate the space necessary for the registry entry
328 if (NULL != inst && NULL != inst->alloc_callbacks.pfnAllocation) {
329 retVal = (char *)inst->alloc_callbacks.pfnAllocation(
330 inst->alloc_callbacks.pUserData, valSize, sizeof(char *),
331 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
332 } else {
333 retVal = (char *)malloc(valSize);
334 }
335
336 if (NULL != retVal) {
337 GetEnvironmentVariableA(name, retVal, valSize);
338 }
339
340 return retVal;
341 }
342
343 static inline void loader_free_getenv(char *val,
344 const struct loader_instance *inst) {
345 if (NULL != inst && NULL != inst->alloc_callbacks.pfnFree) {
346 inst->alloc_callbacks.pfnFree(inst->alloc_callbacks.pUserData, val);
347 } else {
348 free((void *)val);
349 }
350 }
351
352 #else
353
354 static inline char *loader_getenv(const char *name,
355 const struct loader_instance *inst) {
356 // stub func
357 (void)inst;
358 (void)name;
359 return NULL;
360 }
361 static inline void loader_free_getenv(const char *val,
362 const struct loader_instance *inst) {
363 // stub func
364 (void)val;
365 (void)inst;
366 }
367
368 #endif
369
370 void loader_log(const struct loader_instance *inst, VkFlags msg_type,
371 int32_t msg_code, const char *format, ...) {
372 char msg[512];
373 va_list ap;
374 int ret;
375
376 va_start(ap, format);
377 ret = vsnprintf(msg, sizeof(msg), format, ap);
378 if ((ret >= (int)sizeof(msg)) || ret < 0) {
379 msg[sizeof(msg) - 1] = '\0';
380 }
381 va_end(ap);
382
383 if (inst) {
384 util_DebugReportMessage(inst, msg_type,
385 VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
386 (uint64_t)inst, 0, msg_code, "loader", msg);
387 }
388
389 if (!(msg_type & g_loader_log_msgs)) {
390 return;
391 }
392
393 #if defined(WIN32)
394 OutputDebugString(msg);
395 OutputDebugString("\n");
396 #endif
397 fputs(msg, stderr);
398 fputc('\n', stderr);
399 }
400
401 VKAPI_ATTR VkResult VKAPI_CALL
402 vkSetInstanceDispatch(VkInstance instance, void *object) {
403
404 struct loader_instance *inst = loader_get_instance(instance);
405 if (!inst) {
406 return VK_ERROR_INITIALIZATION_FAILED;
407 }
408 loader_set_dispatch(object, inst->disp);
409 return VK_SUCCESS;
410 }
411
412 VKAPI_ATTR VkResult VKAPI_CALL
413 vkSetDeviceDispatch(VkDevice device, void *object) {
414 struct loader_device *dev;
415 struct loader_icd *icd = loader_get_icd_and_device(device, &dev, NULL);
416
417 if (!icd) {
418 return VK_ERROR_INITIALIZATION_FAILED;
419 }
420 loader_set_dispatch(object, &dev->loader_dispatch);
421 return VK_SUCCESS;
422 }
423
424 #if defined(WIN32)
425 static char *loader_get_next_path(char *path);
426 /**
427 * Find the list of registry files (names within a key) in key "location".
428 *
429 * This function looks in the registry (hive = DEFAULT_VK_REGISTRY_HIVE) key as
430 *given in "location"
431 * for a list or name/values which are added to a returned list (function return
432 *value).
433 * The DWORD values within the key must be 0 or they are skipped.
434 * Function return is a string with a ';' separated list of filenames.
435 * Function return is NULL if no valid name/value pairs are found in the key,
436 * or the key is not found.
437 *
438 * \returns
439 * A string list of filenames as pointer.
440 * When done using the returned string list, pointer should be freed.
441 */
442 static char *loader_get_registry_files(const struct loader_instance *inst,
443 char *location) {
444 LONG rtn_value;
445 HKEY hive, key;
446 DWORD access_flags;
447 char name[2048];
448 char *out = NULL;
449 char *loc = location;
450 char *next;
451 DWORD idx = 0;
452 DWORD name_size = sizeof(name);
453 DWORD value;
454 DWORD total_size = 4096;
455 DWORD value_size = sizeof(value);
456
457 while (*loc) {
458 next = loader_get_next_path(loc);
459 hive = DEFAULT_VK_REGISTRY_HIVE;
460 access_flags = KEY_QUERY_VALUE;
461 rtn_value = RegOpenKeyEx(hive, loc, 0, access_flags, &key);
462 if (rtn_value != ERROR_SUCCESS) {
463 // We still couldn't find the key, so give up:
464 loc = next;
465 continue;
466 }
467
468 while ((rtn_value = RegEnumValue(key, idx++, name, &name_size, NULL,
469 NULL, (LPBYTE)&value, &value_size)) ==
470 ERROR_SUCCESS) {
471 if (value_size == sizeof(value) && value == 0) {
472 if (out == NULL) {
473 out = loader_instance_heap_alloc(
474 inst, total_size, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
475 if (NULL == out) {
476 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
477 "Out of memory can't alloc space for registry data");
478 return NULL;
479 }
480 out[0] = '\0';
481 } else if (strlen(out) + name_size + 1 > total_size) {
482 out = loader_instance_heap_realloc(
483 inst, out, total_size, total_size * 2,
484 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
485 if (NULL == out) {
486 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
487 "Out of memory can't realloc space for registry data");
488 return NULL;
489 }
490 total_size *= 2;
491 }
492 if (strlen(out) == 0)
493 snprintf(out, name_size + 1, "%s", name);
494 else
495 snprintf(out + strlen(out), name_size + 2, "%c%s",
496 PATH_SEPERATOR, name);
497 }
498 name_size = 2048;
499 }
500 loc = next;
501 }
502
503 return out;
504 }
505
506 #endif // WIN32
507
508 /**
509 * Combine path elements, separating each element with the platform-specific
510 * directory separator, and save the combined string to a destination buffer,
511 * not exceeding the given length. Path elements are given as variadic args,
512 * with a NULL element terminating the list.
513 *
514 * \returns the total length of the combined string, not including an ASCII
515 * NUL termination character. This length may exceed the available storage:
516 * in this case, the written string will be truncated to avoid a buffer
517 * overrun, and the return value will greater than or equal to the storage
518 * size. A NULL argument may be provided as the destination buffer in order
519 * to determine the required string length without actually writing a string.
520 */
521
522 static size_t loader_platform_combine_path(char *dest, size_t len, ...) {
523 size_t required_len = 0;
524 va_list ap;
525 const char *component;
526
527 va_start(ap, len);
528
529 while ((component = va_arg(ap, const char *))) {
530 if (required_len > 0) {
531 // This path element is not the first non-empty element; prepend
532 // a directory separator if space allows
533 if (dest && required_len + 1 < len) {
534 snprintf(dest + required_len, len - required_len, "%c",
535 DIRECTORY_SYMBOL);
536 }
537 required_len++;
538 }
539
540 if (dest && required_len < len) {
541 strncpy(dest + required_len, component, len - required_len);
542 }
543 required_len += strlen(component);
544 }
545
546 va_end(ap);
547
548 // strncpy(3) won't add a NUL terminating byte in the event of truncation.
549 if (dest && required_len >= len) {
550 dest[len - 1] = '\0';
551 }
552
553 return required_len;
554 }
555
556 /**
557 * Given string of three part form "maj.min.pat" convert to a vulkan version
558 * number.
559 */
560 static uint32_t loader_make_version(char *vers_str) {
561 uint32_t vers = 0, major = 0, minor = 0, patch = 0;
562 char *vers_tok;
563
564 if (!vers_str) {
565 return vers;
566 }
567
568 vers_tok = strtok(vers_str, ".\"\n\r");
569 if (NULL != vers_tok) {
570 major = (uint16_t)atoi(vers_tok);
571 vers_tok = strtok(NULL, ".\"\n\r");
572 if (NULL != vers_tok) {
573 minor = (uint16_t)atoi(vers_tok);
574 vers_tok = strtok(NULL, ".\"\n\r");
575 if (NULL != vers_tok) {
576 patch = (uint16_t)atoi(vers_tok);
577 }
578 }
579 }
580
581 return VK_MAKE_VERSION(major, minor, patch);
582 }
583
584 bool compare_vk_extension_properties(const VkExtensionProperties *op1,
585 const VkExtensionProperties *op2) {
586 return strcmp(op1->extensionName, op2->extensionName) == 0 ? true : false;
587 }
588
589 /**
590 * Search the given ext_array for an extension
591 * matching the given vk_ext_prop
592 */
593 bool has_vk_extension_property_array(const VkExtensionProperties *vk_ext_prop,
594 const uint32_t count,
595 const VkExtensionProperties *ext_array) {
596 for (uint32_t i = 0; i < count; i++) {
597 if (compare_vk_extension_properties(vk_ext_prop, &ext_array[i]))
598 return true;
599 }
600 return false;
601 }
602
603 /**
604 * Search the given ext_list for an extension
605 * matching the given vk_ext_prop
606 */
607 bool has_vk_extension_property(const VkExtensionProperties *vk_ext_prop,
608 const struct loader_extension_list *ext_list) {
609 for (uint32_t i = 0; i < ext_list->count; i++) {
610 if (compare_vk_extension_properties(&ext_list->list[i], vk_ext_prop))
611 return true;
612 }
613 return false;
614 }
615
616 /**
617 * Search the given ext_list for a device extension matching the given ext_prop
618 */
619 bool has_vk_dev_ext_property(
620 const VkExtensionProperties *ext_prop,
621 const struct loader_device_extension_list *ext_list) {
622 for (uint32_t i = 0; i < ext_list->count; i++) {
623 if (compare_vk_extension_properties(&ext_list->list[i].props, ext_prop))
624 return true;
625 }
626 return false;
627 }
628
629 /*
630 * Search the given layer list for a layer matching the given layer name
631 */
632 static struct loader_layer_properties *
633 loader_get_layer_property(const char *name,
634 const struct loader_layer_list *layer_list) {
635 for (uint32_t i = 0; i < layer_list->count; i++) {
636 const VkLayerProperties *item = &layer_list->list[i].info;
637 if (strcmp(name, item->layerName) == 0)
638 return &layer_list->list[i];
639 }
640 return NULL;
641 }
642
643 /**
644 * Get the next unused layer property in the list. Init the property to zero.
645 */
646 static struct loader_layer_properties *
647 loader_get_next_layer_property(const struct loader_instance *inst,
648 struct loader_layer_list *layer_list) {
649 if (layer_list->capacity == 0) {
650 layer_list->list =
651 loader_instance_heap_alloc(
652 inst, sizeof(struct loader_layer_properties) * 64,
653 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
654 if (layer_list->list == NULL) {
655 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
656 "Out of memory can't add any layer properties to list");
657 return NULL;
658 }
659 memset(layer_list->list, 0,
660 sizeof(struct loader_layer_properties) * 64);
661 layer_list->capacity = sizeof(struct loader_layer_properties) * 64;
662 }
663
664 // ensure enough room to add an entry
665 if ((layer_list->count + 1) * sizeof(struct loader_layer_properties) >
666 layer_list->capacity) {
667 layer_list->list = loader_instance_heap_realloc(
668 inst, layer_list->list, layer_list->capacity,
669 layer_list->capacity * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
670 if (layer_list->list == NULL) {
671 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
672 "realloc failed for layer list");
673 return NULL;
674 }
675 layer_list->capacity *= 2;
676 }
677
678 layer_list->count++;
679 return &(layer_list->list[layer_list->count - 1]);
680 }
681
682 /**
683 * Remove all layer properties entrys from the list
684 */
685 void loader_delete_layer_properties(const struct loader_instance *inst,
686 struct loader_layer_list *layer_list) {
687 uint32_t i, j;
688 struct loader_device_extension_list *dev_ext_list;
689 if (!layer_list)
690 return;
691
692 for (i = 0; i < layer_list->count; i++) {
693 loader_destroy_generic_list(
694 inst, (struct loader_generic_list *)&layer_list->list[i]
695 .instance_extension_list);
696 dev_ext_list = &layer_list->list[i].device_extension_list;
697 if (dev_ext_list->capacity > 0 &&
698 NULL != dev_ext_list->list &&
699 dev_ext_list->list->entrypoint_count > 0) {
700 for (j = 0; j < dev_ext_list->list->entrypoint_count; j++) {
701 loader_instance_heap_free(inst, dev_ext_list->list->entrypoints[j]);
702 }
703 loader_instance_heap_free(inst, dev_ext_list->list->entrypoints);
704 }
705 loader_destroy_generic_list(inst,
706 (struct loader_generic_list *)dev_ext_list);
707 }
708 layer_list->count = 0;
709
710 if (layer_list->capacity > 0) {
711 layer_list->capacity = 0;
712 loader_instance_heap_free(inst, layer_list->list);
713 }
714 }
715
716 static VkResult loader_add_instance_extensions(
717 const struct loader_instance *inst,
718 const PFN_vkEnumerateInstanceExtensionProperties fp_get_props,
719 const char *lib_name, struct loader_extension_list *ext_list) {
720 uint32_t i, count = 0;
721 VkExtensionProperties *ext_props;
722 VkResult res = VK_SUCCESS;
723
724 if (!fp_get_props) {
725 /* No EnumerateInstanceExtensionProperties defined */
726 goto out;
727 }
728
729 res = fp_get_props(NULL, &count, NULL);
730 if (res != VK_SUCCESS) {
731 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
732 "Error getting Instance extension count from %s", lib_name);
733 goto out;
734 }
735
736 if (count == 0) {
737 /* No ExtensionProperties to report */
738 goto out;
739 }
740
741 ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties));
742
743 res = fp_get_props(NULL, &count, ext_props);
744 if (res != VK_SUCCESS) {
745 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
746 "Error getting Instance extensions from %s", lib_name);
747 goto out;
748 }
749
750 for (i = 0; i < count; i++) {
751 char spec_version[64];
752
753 bool ext_unsupported =
754 wsi_unsupported_instance_extension(&ext_props[i]);
755 if (!ext_unsupported) {
756 snprintf(spec_version, sizeof(spec_version), "%d.%d.%d",
757 VK_MAJOR(ext_props[i].specVersion),
758 VK_MINOR(ext_props[i].specVersion),
759 VK_PATCH(ext_props[i].specVersion));
760 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0,
761 "Instance Extension: %s (%s) version %s",
762 ext_props[i].extensionName, lib_name, spec_version);
763 res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]);
764 if (res != VK_SUCCESS) {
765 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
766 "Failed to add %s to Instance extension list",
767 lib_name);
768 goto out;
769 }
770 }
771 }
772 out:
773 return res;
774 }
775
776 /*
777 * Initialize ext_list with the physical device extensions.
778 * The extension properties are passed as inputs in count and ext_props.
779 */
780 static VkResult
781 loader_init_device_extensions(const struct loader_instance *inst,
782 struct loader_physical_device *phys_dev,
783 uint32_t count, VkExtensionProperties *ext_props,
784 struct loader_extension_list *ext_list) {
785 VkResult res;
786 uint32_t i;
787
788 res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list,
789 sizeof(VkExtensionProperties));
790 if (VK_SUCCESS != res) {
791 return res;
792 }
793
794 for (i = 0; i < count; i++) {
795 char spec_version[64];
796
797 snprintf(spec_version, sizeof(spec_version), "%d.%d.%d",
798 VK_MAJOR(ext_props[i].specVersion),
799 VK_MINOR(ext_props[i].specVersion),
800 VK_PATCH(ext_props[i].specVersion));
801 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0,
802 "Device Extension: %s (%s) version %s",
803 ext_props[i].extensionName,
804 phys_dev->this_icd->this_icd_lib->lib_name, spec_version);
805 res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]);
806 if (res != VK_SUCCESS)
807 return res;
808 }
809
810 return VK_SUCCESS;
811 }
812
813 VkResult loader_add_device_extensions(const struct loader_instance *inst,
814 PFN_vkEnumerateDeviceExtensionProperties
815 fpEnumerateDeviceExtensionProperties,
816 VkPhysicalDevice physical_device,
817 const char *lib_name,
818 struct loader_extension_list *ext_list) {
819 uint32_t i, count;
820 VkResult res;
821 VkExtensionProperties *ext_props;
822
823 res = fpEnumerateDeviceExtensionProperties(physical_device, NULL, &count,
824 NULL);
825 if (res == VK_SUCCESS && count > 0) {
826 ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties));
827 if (!ext_props) {
828 return VK_ERROR_OUT_OF_HOST_MEMORY;
829 }
830 res = fpEnumerateDeviceExtensionProperties(physical_device, NULL,
831 &count, ext_props);
832 if (res != VK_SUCCESS) {
833 return res;
834 }
835 for (i = 0; i < count; i++) {
836 char spec_version[64];
837
838 snprintf(spec_version, sizeof(spec_version), "%d.%d.%d",
839 VK_MAJOR(ext_props[i].specVersion),
840 VK_MINOR(ext_props[i].specVersion),
841 VK_PATCH(ext_props[i].specVersion));
842 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0,
843 "Device Extension: %s (%s) version %s",
844 ext_props[i].extensionName, lib_name, spec_version);
845 res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]);
846 if (res != VK_SUCCESS)
847 return res;
848 }
849 } else {
850 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
851 "Error getting physical device extension info count from "
852 "library %s",
853 lib_name);
854 return res;
855 }
856
857 return VK_SUCCESS;
858 }
859
860 VkResult loader_init_generic_list(const struct loader_instance *inst,
861 struct loader_generic_list *list_info,
862 size_t element_size) {
863 size_t capacity = 32 * element_size;
864 list_info->count = 0;
865 list_info->capacity = 0;
866 list_info->list = loader_instance_heap_alloc(
867 inst, capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
868 if (list_info->list == NULL) {
869 return VK_ERROR_OUT_OF_HOST_MEMORY;
870 }
871 memset(list_info->list, 0, capacity);
872 list_info->capacity = capacity;
873 return VK_SUCCESS;
874 }
875
876 void loader_destroy_generic_list(const struct loader_instance *inst,
877 struct loader_generic_list *list) {
878 loader_instance_heap_free(inst, list->list);
879 list->count = 0;
880 list->capacity = 0;
881 }
882
883 /*
884 * Append non-duplicate extension properties defined in props
885 * to the given ext_list.
886 * Return
887 * Vk_SUCCESS on success
888 */
889 VkResult loader_add_to_ext_list(const struct loader_instance *inst,
890 struct loader_extension_list *ext_list,
891 uint32_t prop_list_count,
892 const VkExtensionProperties *props) {
893 uint32_t i;
894 const VkExtensionProperties *cur_ext;
895
896 if (ext_list->list == NULL || ext_list->capacity == 0) {
897 VkResult res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list,
898 sizeof(VkExtensionProperties));
899 if (VK_SUCCESS != res) {
900 return res;
901 }
902 }
903
904 for (i = 0; i < prop_list_count; i++) {
905 cur_ext = &props[i];
906
907 // look for duplicates
908 if (has_vk_extension_property(cur_ext, ext_list)) {
909 continue;
910 }
911
912 // add to list at end
913 // check for enough capacity
914 if (ext_list->count * sizeof(VkExtensionProperties) >=
915 ext_list->capacity) {
916
917 ext_list->list = loader_instance_heap_realloc(
918 inst, ext_list->list, ext_list->capacity,
919 ext_list->capacity * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
920
921 if (ext_list->list == NULL)
922 return VK_ERROR_OUT_OF_HOST_MEMORY;
923
924 // double capacity
925 ext_list->capacity *= 2;
926 }
927
928 memcpy(&ext_list->list[ext_list->count], cur_ext,
929 sizeof(VkExtensionProperties));
930 ext_list->count++;
931 }
932 return VK_SUCCESS;
933 }
934
935 /*
936 * Append one extension property defined in props with entrypoints
937 * defined in entrys to the given ext_list. Do not append if a duplicate
938 * Return
939 * Vk_SUCCESS on success
940 */
941 VkResult
942 loader_add_to_dev_ext_list(const struct loader_instance *inst,
943 struct loader_device_extension_list *ext_list,
944 const VkExtensionProperties *props,
945 uint32_t entry_count, char **entrys) {
946 uint32_t idx;
947 if (ext_list->list == NULL || ext_list->capacity == 0) {
948 VkResult res = loader_init_generic_list(
949 inst, (struct loader_generic_list *)ext_list,
950 sizeof(struct loader_dev_ext_props));
951 if (VK_SUCCESS != res) {
952 return res;
953 }
954 }
955
956 // look for duplicates
957 if (has_vk_dev_ext_property(props, ext_list)) {
958 return VK_SUCCESS;
959 }
960
961 idx = ext_list->count;
962 // add to list at end
963 // check for enough capacity
964 if (idx * sizeof(struct loader_dev_ext_props) >= ext_list->capacity) {
965
966 ext_list->list = loader_instance_heap_realloc(
967 inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2,
968 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
969
970 if (ext_list->list == NULL)
971 return VK_ERROR_OUT_OF_HOST_MEMORY;
972
973 // double capacity
974 ext_list->capacity *= 2;
975 }
976
977 memcpy(&ext_list->list[idx].props, props,
978 sizeof(struct loader_dev_ext_props));
979 ext_list->list[idx].entrypoint_count = entry_count;
980 ext_list->list[idx].entrypoints =
981 loader_instance_heap_alloc(inst, sizeof(char *) * entry_count,
982 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
983 if (ext_list->list[idx].entrypoints == NULL) {
984 ext_list->list[idx].entrypoint_count = 0;
985 return VK_ERROR_OUT_OF_HOST_MEMORY;
986 }
987 for (uint32_t i = 0; i < entry_count; i++) {
988 ext_list->list[idx].entrypoints[i] = loader_instance_heap_alloc(
989 inst, strlen(entrys[i]) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
990 if (ext_list->list[idx].entrypoints[i] == NULL) {
991 for (uint32_t j = 0; j < i; j++) {
992 loader_instance_heap_free(inst,
993 ext_list->list[idx].entrypoints[j]);
994 }
995 loader_instance_heap_free(inst, ext_list->list[idx].entrypoints);
996 ext_list->list[idx].entrypoint_count = 0;
997 ext_list->list[idx].entrypoints = NULL;
998 return VK_ERROR_OUT_OF_HOST_MEMORY;
999 }
1000 strcpy(ext_list->list[idx].entrypoints[i], entrys[i]);
1001 }
1002 ext_list->count++;
1003
1004 return VK_SUCCESS;
1005 }
1006
1007 /**
1008 * Search the given search_list for any layers in the props list.
1009 * Add these to the output layer_list. Don't add duplicates to the output
1010 * layer_list.
1011 */
1012 static VkResult
1013 loader_add_layer_names_to_list(const struct loader_instance *inst,
1014 struct loader_layer_list *output_list,
1015 uint32_t name_count, const char *const *names,
1016 const struct loader_layer_list *search_list) {
1017 struct loader_layer_properties *layer_prop;
1018 VkResult err = VK_SUCCESS;
1019
1020 for (uint32_t i = 0; i < name_count; i++) {
1021 const char *search_target = names[i];
1022 layer_prop = loader_get_layer_property(search_target, search_list);
1023 if (!layer_prop) {
1024 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
1025 "Unable to find layer %s", search_target);
1026 err = VK_ERROR_LAYER_NOT_PRESENT;
1027 continue;
1028 }
1029
1030 err = loader_add_to_layer_list(inst, output_list, 1, layer_prop);
1031 }
1032
1033 return err;
1034 }
1035
1036 /*
1037 * Manage lists of VkLayerProperties
1038 */
1039 static bool loader_init_layer_list(const struct loader_instance *inst,
1040 struct loader_layer_list *list) {
1041 list->capacity = 32 * sizeof(struct loader_layer_properties);
1042 list->list = loader_instance_heap_alloc(
1043 inst, list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1044 if (list->list == NULL) {
1045 return false;
1046 }
1047 memset(list->list, 0, list->capacity);
1048 list->count = 0;
1049 return true;
1050 }
1051
1052 void loader_destroy_layer_list(const struct loader_instance *inst,
1053 struct loader_device *device,
1054 struct loader_layer_list *layer_list) {
1055 if (device) {
1056 loader_device_heap_free(device, layer_list->list);
1057 } else {
1058 loader_instance_heap_free(inst, layer_list->list);
1059 }
1060 layer_list->count = 0;
1061 layer_list->capacity = 0;
1062 }
1063
1064 /*
1065 * Search the given layer list for a list
1066 * matching the given VkLayerProperties
1067 */
1068 bool has_vk_layer_property(const VkLayerProperties *vk_layer_prop,
1069 const struct loader_layer_list *list) {
1070 for (uint32_t i = 0; i < list->count; i++) {
1071 if (strcmp(vk_layer_prop->layerName, list->list[i].info.layerName) == 0)
1072 return true;
1073 }
1074 return false;
1075 }
1076
1077 /*
1078 * Search the given layer list for a layer
1079 * matching the given name
1080 */
1081 bool has_layer_name(const char *name, const struct loader_layer_list *list) {
1082 for (uint32_t i = 0; i < list->count; i++) {
1083 if (strcmp(name, list->list[i].info.layerName) == 0)
1084 return true;
1085 }
1086 return false;
1087 }
1088
1089 /*
1090 * Append non-duplicate layer properties defined in prop_list
1091 * to the given layer_info list
1092 */
1093 VkResult loader_add_to_layer_list(const struct loader_instance *inst,
1094 struct loader_layer_list *list,
1095 uint32_t prop_list_count,
1096 const struct loader_layer_properties *props) {
1097 uint32_t i;
1098 struct loader_layer_properties *layer;
1099
1100 if (list->list == NULL || list->capacity == 0) {
1101 loader_init_layer_list(inst, list);
1102 }
1103
1104 if (list->list == NULL)
1105 return VK_SUCCESS;
1106
1107 for (i = 0; i < prop_list_count; i++) {
1108 layer = (struct loader_layer_properties *)&props[i];
1109
1110 // look for duplicates
1111 if (has_vk_layer_property(&layer->info, list)) {
1112 continue;
1113 }
1114
1115 // add to list at end
1116 // check for enough capacity
1117 if (list->count * sizeof(struct loader_layer_properties) >=
1118 list->capacity) {
1119
1120 list->list = loader_instance_heap_realloc(
1121 inst, list->list, list->capacity, list->capacity * 2,
1122 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1123 if (NULL == list->list) {
1124 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
1125 "realloc failed for layer list when attempting to "
1126 "add new layer");
1127 return VK_ERROR_OUT_OF_HOST_MEMORY;
1128 }
1129 // double capacity
1130 list->capacity *= 2;
1131 }
1132
1133 memcpy(&list->list[list->count], layer,
1134 sizeof(struct loader_layer_properties));
1135 list->count++;
1136 }
1137
1138 return VK_SUCCESS;
1139 }
1140
1141 /**
1142 * Search the search_list for any layer with a name
1143 * that matches the given name and a type that matches the given type
1144 * Add all matching layers to the found_list
1145 * Do not add if found loader_layer_properties is already
1146 * on the found_list.
1147 */
1148 void loader_find_layer_name_add_list(
1149 const struct loader_instance *inst, const char *name,
1150 const enum layer_type type, const struct loader_layer_list *search_list,
1151 struct loader_layer_list *found_list) {
1152 bool found = false;
1153 for (uint32_t i = 0; i < search_list->count; i++) {
1154 struct loader_layer_properties *layer_prop = &search_list->list[i];
1155 if (0 == strcmp(layer_prop->info.layerName, name) &&
1156 (layer_prop->type & type)) {
1157 /* Found a layer with the same name, add to found_list */
1158 if (VK_SUCCESS == loader_add_to_layer_list(inst, found_list, 1, layer_prop)) {
1159 found = true;
1160 }
1161 }
1162 }
1163 if (!found) {
1164 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
1165 "Warning, couldn't find layer name %s to activate", name);
1166 }
1167 }
1168
1169 static VkExtensionProperties *
1170 get_extension_property(const char *name,
1171 const struct loader_extension_list *list) {
1172 for (uint32_t i = 0; i < list->count; i++) {
1173 if (strcmp(name, list->list[i].extensionName) == 0)
1174 return &list->list[i];
1175 }
1176 return NULL;
1177 }
1178
1179 static VkExtensionProperties *
1180 get_dev_extension_property(const char *name,
1181 const struct loader_device_extension_list *list) {
1182 for (uint32_t i = 0; i < list->count; i++) {
1183 if (strcmp(name, list->list[i].props.extensionName) == 0)
1184 return &list->list[i].props;
1185 }
1186 return NULL;
1187 }
1188
1189 /*
1190 * For Instance extensions implemented within the loader (i.e. DEBUG_REPORT
1191 * the extension must provide two entry points for the loader to use:
1192 * - "trampoline" entry point - this is the address returned by GetProcAddr
1193 * and will always do what's necessary to support a global call.
1194 * - "terminator" function - this function will be put at the end of the
1195 * instance chain and will contain the necessary logic to call / process
1196 * the extension for the appropriate ICDs that are available.
1197 * There is no generic mechanism for including these functions, the references
1198 * must be placed into the appropriate loader entry points.
1199 * GetInstanceProcAddr: call extension GetInstanceProcAddr to check for
1200 * GetProcAddr requests
1201 * loader_coalesce_extensions(void) - add extension records to the list of
1202 * global
1203 * extension available to the app.
1204 * instance_disp - add function pointer for terminator function to this array.
1205 * The extension itself should be in a separate file that will be
1206 * linked directly with the loader.
1207 */
1208
1209 VkResult loader_get_icd_loader_instance_extensions(
1210 const struct loader_instance *inst, struct loader_icd_libs *icd_libs,
1211 struct loader_extension_list *inst_exts) {
1212 struct loader_extension_list icd_exts;
1213 VkResult res = VK_SUCCESS;
1214
1215 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0,
1216 "Build ICD instance extension list");
1217
1218 // traverse scanned icd list adding non-duplicate extensions to the list
1219 for (uint32_t i = 0; i < icd_libs->count; i++) {
1220 res = loader_init_generic_list(inst,
1221 (struct loader_generic_list *)&icd_exts,
1222 sizeof(VkExtensionProperties));
1223 if (VK_SUCCESS != res) {
1224 goto out;
1225 }
1226 res = loader_add_instance_extensions(
1227 inst, icd_libs->list[i].EnumerateInstanceExtensionProperties,
1228 icd_libs->list[i].lib_name, &icd_exts);
1229 if (VK_SUCCESS == res) {
1230 res = loader_add_to_ext_list(inst, inst_exts, icd_exts.count,
1231 icd_exts.list);
1232 }
1233 loader_destroy_generic_list(inst,
1234 (struct loader_generic_list *)&icd_exts);
1235 if (VK_SUCCESS != res) {
1236 goto out;
1237 }
1238 };
1239
1240 // Traverse loader's extensions, adding non-duplicate extensions to the list
1241 debug_report_add_instance_extensions(inst, inst_exts);
1242
1243 out:
1244 return res;
1245 }
1246
1247 struct loader_icd *loader_get_icd_and_device(const VkDevice device,
1248 struct loader_device **found_dev,
1249 uint32_t *icd_index) {
1250 *found_dev = NULL;
1251 uint32_t index = 0;
1252 for (struct loader_instance *inst = loader.instances; inst;
1253 inst = inst->next) {
1254 for (struct loader_icd *icd = inst->icds; icd; icd = icd->next) {
1255 for (struct loader_device *dev = icd->logical_device_list; dev;
1256 dev = dev->next)
1257 /* Value comparison of device prevents object wrapping by layers
1258 */
1259 if (loader_get_dispatch(dev->device) ==
1260 loader_get_dispatch(device)) {
1261 *found_dev = dev;
1262 if (NULL != icd_index) {
1263 *icd_index = index;
1264 }
1265 return icd;
1266 }
1267 index++;
1268 }
1269 }
1270 return NULL;
1271 }
1272
1273 void loader_destroy_logical_device(const struct loader_instance *inst,
1274 struct loader_device *dev,
1275 const VkAllocationCallbacks *pAllocator) {
1276 if (pAllocator) {
1277 dev->alloc_callbacks = *pAllocator;
1278 }
1279 if (NULL != dev->activated_layer_list.list) {
1280 loader_deactivate_layers(inst, dev, &dev->activated_layer_list);
1281 }
1282 loader_device_heap_free(dev, dev);
1283 }
1284
1285 struct loader_device *
1286 loader_create_logical_device(const struct loader_instance *inst,
1287 const VkAllocationCallbacks *pAllocator) {
1288 struct loader_device *new_dev;
1289 #if (DEBUG_DISABLE_APP_ALLOCATORS == 1)
1290 {
1291 #else
1292 if (pAllocator) {
1293 new_dev = (struct loader_device *)pAllocator->pfnAllocation(
1294 pAllocator->pUserData, sizeof(struct loader_device), sizeof(int *),
1295 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1296 } else {
1297 #endif
1298 new_dev = (struct loader_device *)malloc(sizeof(struct loader_device));
1299 }
1300
1301 if (!new_dev) {
1302 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
1303 "Failed to alloc struct loader-device");
1304 return NULL;
1305 }
1306
1307 memset(new_dev, 0, sizeof(struct loader_device));
1308 if (pAllocator) {
1309 new_dev->alloc_callbacks = *pAllocator;
1310 }
1311
1312 return new_dev;
1313 }
1314
1315 void loader_add_logical_device(const struct loader_instance *inst,
1316 struct loader_icd *icd,
1317 struct loader_device *dev) {
1318 dev->next = icd->logical_device_list;
1319 icd->logical_device_list = dev;
1320 }
1321
1322 void loader_remove_logical_device(const struct loader_instance *inst,
1323 struct loader_icd *icd,
1324 struct loader_device *found_dev,
1325 const VkAllocationCallbacks *pAllocator) {
1326 struct loader_device *dev, *prev_dev;
1327
1328 if (!icd || !found_dev)
1329 return;
1330
1331 prev_dev = NULL;
1332 dev = icd->logical_device_list;
1333 while (dev && dev != found_dev) {
1334 prev_dev = dev;
1335 dev = dev->next;
1336 }
1337
1338 if (prev_dev)
1339 prev_dev->next = found_dev->next;
1340 else
1341 icd->logical_device_list = found_dev->next;
1342 loader_destroy_logical_device(inst, found_dev, pAllocator);
1343 }
1344
1345 static void loader_icd_destroy(struct loader_instance *ptr_inst,
1346 struct loader_icd *icd,
1347 const VkAllocationCallbacks *pAllocator) {
1348 ptr_inst->total_icd_count--;
1349 for (struct loader_device *dev = icd->logical_device_list; dev;) {
1350 struct loader_device *next_dev = dev->next;
1351 loader_destroy_logical_device(ptr_inst, dev, pAllocator);
1352 dev = next_dev;
1353 }
1354
1355 loader_instance_heap_free(ptr_inst, icd);
1356 }
1357
1358 static struct loader_icd *
1359 loader_icd_create(const struct loader_instance *inst) {
1360 struct loader_icd *icd;
1361
1362 icd = loader_instance_heap_alloc(inst, sizeof(struct loader_icd),
1363 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1364 if (!icd) {
1365 return NULL;
1366 }
1367
1368 memset(icd, 0, sizeof(struct loader_icd));
1369
1370 return icd;
1371 }
1372
1373 static struct loader_icd *
1374 loader_icd_add(struct loader_instance *ptr_inst,
1375 const struct loader_scanned_icds *icd_lib) {
1376 struct loader_icd *icd;
1377
1378 icd = loader_icd_create(ptr_inst);
1379 if (!icd) {
1380 return NULL;
1381 }
1382
1383 icd->this_icd_lib = icd_lib;
1384 icd->this_instance = ptr_inst;
1385
1386 /* prepend to the list */
1387 icd->next = ptr_inst->icds;
1388 ptr_inst->icds = icd;
1389 ptr_inst->total_icd_count++;
1390
1391 return icd;
1392 }
1393 /**
1394 * Determine the ICD interface version to use.
1395 * @param icd
1396 * @param pVersion Output parameter indicating which version to use or 0 if
1397 * the negotiation API is not supported by the ICD
1398 * @return bool indicating true if the selected interface version is supported
1399 * by the loader, false indicates the version is not supported
1400 * version 0 doesn't support vk_icdGetInstanceProcAddr nor
1401 * vk_icdNegotiateLoaderICDInterfaceVersion
1402 * version 1 supports vk_icdGetInstanceProcAddr
1403 * version 2 supports vk_icdNegotiateLoaderICDInterfaceVersion
1404 */
1405 bool loader_get_icd_interface_version(
1406 PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version,
1407 uint32_t *pVersion) {
1408
1409 if (fp_negotiate_icd_version == NULL) {
1410 // ICD does not support the negotiation API, it supports version 0 or 1
1411 // calling code must determine if it is version 0 or 1
1412 *pVersion = 0;
1413 } else {
1414 // ICD supports the negotiation API, so call it with the loader's
1415 // latest version supported
1416 *pVersion = CURRENT_LOADER_ICD_INTERFACE_VERSION;
1417 VkResult result = fp_negotiate_icd_version(pVersion);
1418
1419 if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
1420 // ICD no longer supports the loader's latest interface version so
1421 // fail loading the ICD
1422 return false;
1423 }
1424 }
1425
1426 #if MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION > 0
1427 if (*pVersion < MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION) {
1428 // Loader no longer supports the ICD's latest interface version so fail
1429 // loading the ICD
1430 return false;
1431 }
1432 #endif
1433 return true;
1434 }
1435
1436 void loader_scanned_icd_clear(const struct loader_instance *inst,
1437 struct loader_icd_libs *icd_libs) {
1438 if (icd_libs->capacity == 0)
1439 return;
1440 for (uint32_t i = 0; i < icd_libs->count; i++) {
1441 loader_platform_close_library(icd_libs->list[i].handle);
1442 loader_instance_heap_free(inst, icd_libs->list[i].lib_name);
1443 }
1444 loader_instance_heap_free(inst, icd_libs->list);
1445 icd_libs->capacity = 0;
1446 icd_libs->count = 0;
1447 icd_libs->list = NULL;
1448 }
1449
1450 static VkResult loader_scanned_icd_init(const struct loader_instance *inst,
1451 struct loader_icd_libs *icd_libs) {
1452 VkResult err = VK_SUCCESS;
1453 loader_scanned_icd_clear(inst, icd_libs);
1454 icd_libs->capacity = 8 * sizeof(struct loader_scanned_icds);
1455 icd_libs->list = loader_instance_heap_alloc(
1456 inst, icd_libs->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1457 if (NULL == icd_libs->list) {
1458 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
1459 "realloc failed for layer list when attempting to add new layer");
1460 err = VK_ERROR_OUT_OF_HOST_MEMORY;
1461 }
1462 return err;
1463 }
1464
1465 static VkResult loader_scanned_icd_add(const struct loader_instance *inst,
1466 struct loader_icd_libs *icd_libs,
1467 const char *filename,
1468 uint32_t api_version) {
1469 loader_platform_dl_handle handle;
1470 PFN_vkCreateInstance fp_create_inst;
1471 PFN_vkEnumerateInstanceExtensionProperties fp_get_inst_ext_props;
1472 PFN_vkGetInstanceProcAddr fp_get_proc_addr;
1473 PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version;
1474 struct loader_scanned_icds *new_node;
1475 uint32_t interface_vers;
1476 VkResult res = VK_SUCCESS;
1477
1478 /* TODO implement smarter opening/closing of libraries. For now this
1479 * function leaves libraries open and the scanned_icd_clear closes them */
1480 handle = loader_platform_open_library(filename);
1481 if (!handle) {
1482 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
1483 loader_platform_open_library_error(filename));
1484 goto out;
1485 }
1486
1487 // Get and settle on an ICD interface version
1488 fp_negotiate_icd_version = loader_platform_get_proc_address(
1489 handle, "vk_icdNegotiateLoaderICDInterfaceVersion");
1490
1491 if (!loader_get_icd_interface_version(fp_negotiate_icd_version,
1492 &interface_vers)) {
1493 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
1494 "ICD (%s) doesn't support interface version compatible"
1495 "with loader, skip this ICD %s",
1496 filename);
1497 goto out;
1498 }
1499
1500 fp_get_proc_addr =
1501 loader_platform_get_proc_address(handle, "vk_icdGetInstanceProcAddr");
1502 if (!fp_get_proc_addr) {
1503 assert(interface_vers == 0);
1504 // Use deprecated interface from version 0
1505 fp_get_proc_addr =
1506 loader_platform_get_proc_address(handle, "vkGetInstanceProcAddr");
1507 if (!fp_get_proc_addr) {
1508 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
1509 loader_platform_get_proc_address_error(
1510 "vk_icdGetInstanceProcAddr"));
1511 goto out;
1512 } else {
1513 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
1514 "Using deprecated ICD interface of "
1515 "vkGetInstanceProcAddr instead of "
1516 "vk_icdGetInstanceProcAddr for ICD %s",
1517 filename);
1518 }
1519 fp_create_inst =
1520 loader_platform_get_proc_address(handle, "vkCreateInstance");
1521 if (!fp_create_inst) {
1522 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
1523 "Couldn't get vkCreateInstance via dlsym/loadlibrary "
1524 "for ICD %s",
1525 filename);
1526 goto out;
1527 }
1528 fp_get_inst_ext_props = loader_platform_get_proc_address(
1529 handle, "vkEnumerateInstanceExtensionProperties");
1530 if (!fp_get_inst_ext_props) {
1531 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
1532 "Couldn't get vkEnumerateInstanceExtensionProperties "
1533 "via dlsym/loadlibrary for ICD %s",
1534 filename);
1535 goto out;
1536 }
1537 } else {
1538 // Use newer interface version 1 or later
1539 if (interface_vers == 0)
1540 interface_vers = 1;
1541
1542 fp_create_inst =
1543 (PFN_vkCreateInstance)fp_get_proc_addr(NULL, "vkCreateInstance");
1544 if (!fp_create_inst) {
1545 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
1546 "Couldn't get vkCreateInstance via "
1547 "vk_icdGetInstanceProcAddr for ICD %s",
1548 filename);
1549 goto out;
1550 }
1551 fp_get_inst_ext_props =
1552 (PFN_vkEnumerateInstanceExtensionProperties)fp_get_proc_addr(
1553 NULL, "vkEnumerateInstanceExtensionProperties");
1554 if (!fp_get_inst_ext_props) {
1555 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
1556 "Couldn't get vkEnumerateInstanceExtensionProperties "
1557 "via vk_icdGetInstanceProcAddr for ICD %s",
1558 filename);
1559 goto out;
1560 }
1561 }
1562
1563 // check for enough capacity
1564 if ((icd_libs->count * sizeof(struct loader_scanned_icds)) >=
1565 icd_libs->capacity) {
1566
1567 icd_libs->list = loader_instance_heap_realloc(
1568 inst, icd_libs->list, icd_libs->capacity, icd_libs->capacity * 2,
1569 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1570 if (NULL == icd_libs->list) {
1571 res = VK_ERROR_OUT_OF_HOST_MEMORY;
1572 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
1573 "realloc failed on icd library list");
1574 goto out;
1575 }
1576 // double capacity
1577 icd_libs->capacity *= 2;
1578 }
1579 new_node = &(icd_libs->list[icd_libs->count]);
1580
1581 new_node->handle = handle;
1582 new_node->api_version = api_version;
1583 new_node->GetInstanceProcAddr = fp_get_proc_addr;
1584 new_node->EnumerateInstanceExtensionProperties = fp_get_inst_ext_props;
1585 new_node->CreateInstance = fp_create_inst;
1586 new_node->interface_version = interface_vers;
1587
1588 new_node->lib_name = (char *)loader_instance_heap_alloc(
1589 inst, strlen(filename) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1590 if (NULL == new_node->lib_name) {
1591 res = VK_ERROR_OUT_OF_HOST_MEMORY;
1592 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
1593 "Out of memory can't add icd");
1594 goto out;
1595 }
1596 strcpy(new_node->lib_name, filename);
1597 icd_libs->count++;
1598
1599 out:
1600
1601 return res;
1602 }
1603
1604 static bool loader_icd_init_entrys(struct loader_icd *icd, VkInstance inst,
1605 const PFN_vkGetInstanceProcAddr fp_gipa) {
1606 /* initialize entrypoint function pointers */
1607
1608 #define LOOKUP_GIPA(func, required) \
1609 do { \
1610 icd->func = (PFN_vk##func)fp_gipa(inst, "vk" #func); \
1611 if (!icd->func && required) { \
1612 loader_log((struct loader_instance *)inst, \
1613 VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, \
1614 loader_platform_get_proc_address_error("vk" #func)); \
1615 return false; \
1616 } \
1617 } while (0)
1618
1619 LOOKUP_GIPA(GetDeviceProcAddr, true);
1620 LOOKUP_GIPA(DestroyInstance, true);
1621 LOOKUP_GIPA(EnumeratePhysicalDevices, true);
1622 LOOKUP_GIPA(GetPhysicalDeviceFeatures, true);
1623 LOOKUP_GIPA(GetPhysicalDeviceFormatProperties, true);
1624 LOOKUP_GIPA(GetPhysicalDeviceImageFormatProperties, true);
1625 LOOKUP_GIPA(CreateDevice, true);
1626 LOOKUP_GIPA(GetPhysicalDeviceProperties, true);
1627 LOOKUP_GIPA(GetPhysicalDeviceMemoryProperties, true);
1628 LOOKUP_GIPA(GetPhysicalDeviceQueueFamilyProperties, true);
1629 LOOKUP_GIPA(EnumerateDeviceExtensionProperties, true);
1630 LOOKUP_GIPA(GetPhysicalDeviceSparseImageFormatProperties, true);
1631 LOOKUP_GIPA(CreateDebugReportCallbackEXT, false);
1632 LOOKUP_GIPA(DestroyDebugReportCallbackEXT, false);
1633 LOOKUP_GIPA(GetPhysicalDeviceSurfaceSupportKHR, false);
1634 LOOKUP_GIPA(GetPhysicalDeviceSurfaceCapabilitiesKHR, false);
1635 LOOKUP_GIPA(GetPhysicalDeviceSurfaceFormatsKHR, false);
1636 LOOKUP_GIPA(GetPhysicalDeviceSurfacePresentModesKHR, false);
1637 LOOKUP_GIPA(GetPhysicalDeviceDisplayPropertiesKHR, false);
1638 LOOKUP_GIPA(GetDisplayModePropertiesKHR, false);
1639 LOOKUP_GIPA(CreateDisplayPlaneSurfaceKHR, false);
1640 LOOKUP_GIPA(GetPhysicalDeviceDisplayPlanePropertiesKHR, false);
1641 LOOKUP_GIPA(GetDisplayPlaneSupportedDisplaysKHR, false);
1642 LOOKUP_GIPA(CreateDisplayModeKHR, false);
1643 LOOKUP_GIPA(GetDisplayPlaneCapabilitiesKHR, false);
1644 LOOKUP_GIPA(DestroySurfaceKHR, false);
1645 LOOKUP_GIPA(CreateSwapchainKHR, false);
1646 #ifdef VK_USE_PLATFORM_WIN32_KHR
1647 LOOKUP_GIPA(CreateWin32SurfaceKHR, false);
1648 LOOKUP_GIPA(GetPhysicalDeviceWin32PresentationSupportKHR, false);
1649 #endif
1650 #ifdef VK_USE_PLATFORM_XCB_KHR
1651 LOOKUP_GIPA(CreateXcbSurfaceKHR, false);
1652 LOOKUP_GIPA(GetPhysicalDeviceXcbPresentationSupportKHR, false);
1653 #endif
1654 #ifdef VK_USE_PLATFORM_XLIB_KHR
1655 LOOKUP_GIPA(CreateXlibSurfaceKHR, false);
1656 LOOKUP_GIPA(GetPhysicalDeviceXlibPresentationSupportKHR, false);
1657 #endif
1658 #ifdef VK_USE_PLATFORM_MIR_KHR
1659 LOOKUP_GIPA(CreateMirSurfaceKHR, false);
1660 LOOKUP_GIPA(GetPhysicalDeviceMirPresentationSupportKHR, false);
1661 #endif
1662 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
1663 LOOKUP_GIPA(CreateWaylandSurfaceKHR, false);
1664 LOOKUP_GIPA(GetPhysicalDeviceWaylandPresentationSupportKHR, false);
1665 #endif
1666 LOOKUP_GIPA(GetPhysicalDeviceExternalImageFormatPropertiesNV, false);
1667
1668 #undef LOOKUP_GIPA
1669
1670 return true;
1671 }
1672
1673 static void loader_debug_init(void) {
1674 char *env, *orig;
1675
1676 if (g_loader_debug > 0)
1677 return;
1678
1679 g_loader_debug = 0;
1680
1681 /* parse comma-separated debug options */
1682 orig = env = loader_getenv("VK_LOADER_DEBUG", NULL);
1683 while (env) {
1684 char *p = strchr(env, ',');
1685 size_t len;
1686
1687 if (p)
1688 len = p - env;
1689 else
1690 len = strlen(env);
1691
1692 if (len > 0) {
1693 if (strncmp(env, "all", len) == 0) {
1694 g_loader_debug = ~0u;
1695 g_loader_log_msgs = ~0u;
1696 } else if (strncmp(env, "warn", len) == 0) {
1697 g_loader_debug |= LOADER_WARN_BIT;
1698 g_loader_log_msgs |= VK_DEBUG_REPORT_WARNING_BIT_EXT;
1699 } else if (strncmp(env, "info", len) == 0) {
1700 g_loader_debug |= LOADER_INFO_BIT;
1701 g_loader_log_msgs |= VK_DEBUG_REPORT_INFORMATION_BIT_EXT;
1702 } else if (strncmp(env, "perf", len) == 0) {
1703 g_loader_debug |= LOADER_PERF_BIT;
1704 g_loader_log_msgs |=
1705 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
1706 } else if (strncmp(env, "error", len) == 0) {
1707 g_loader_debug |= LOADER_ERROR_BIT;
1708 g_loader_log_msgs |= VK_DEBUG_REPORT_ERROR_BIT_EXT;
1709 } else if (strncmp(env, "debug", len) == 0) {
1710 g_loader_debug |= LOADER_DEBUG_BIT;
1711 g_loader_log_msgs |= VK_DEBUG_REPORT_DEBUG_BIT_EXT;
1712 }
1713 }
1714
1715 if (!p)
1716 break;
1717
1718 env = p + 1;
1719 }
1720
1721 loader_free_getenv(orig, NULL);
1722 }
1723
1724 void loader_initialize(void) {
1725 // initialize mutexs
1726 loader_platform_thread_create_mutex(&loader_lock);
1727 loader_platform_thread_create_mutex(&loader_json_lock);
1728
1729 // initialize logging
1730 loader_debug_init();
1731
1732 // initial cJSON to use alloc callbacks
1733 cJSON_Hooks alloc_fns = {
1734 .malloc_fn = loader_instance_tls_heap_alloc,
1735 .free_fn = loader_instance_tls_heap_free,
1736 };
1737 cJSON_InitHooks(&alloc_fns);
1738 }
1739
1740 struct loader_manifest_files {
1741 uint32_t count;
1742 char **filename_list;
1743 };
1744
1745 /**
1746 * Get next file or dirname given a string list or registry key path
1747 *
1748 * \returns
1749 * A pointer to first char in the next path.
1750 * The next path (or NULL) in the list is returned in next_path.
1751 * Note: input string is modified in some cases. PASS IN A COPY!
1752 */
1753 static char *loader_get_next_path(char *path) {
1754 uint32_t len;
1755 char *next;
1756
1757 if (path == NULL)
1758 return NULL;
1759 next = strchr(path, PATH_SEPERATOR);
1760 if (next == NULL) {
1761 len = (uint32_t)strlen(path);
1762 next = path + len;
1763 } else {
1764 *next = '\0';
1765 next++;
1766 }
1767
1768 return next;
1769 }
1770
1771 /**
1772 * Given a path which is absolute or relative, expand the path if relative or
1773 * leave the path unmodified if absolute. The base path to prepend to relative
1774 * paths is given in rel_base.
1775 *
1776 * \returns
1777 * A string in out_fullpath of the full absolute path
1778 */
1779 static void loader_expand_path(const char *path, const char *rel_base,
1780 size_t out_size, char *out_fullpath) {
1781 if (loader_platform_is_path_absolute(path)) {
1782 // do not prepend a base to an absolute path
1783 rel_base = "";
1784 }
1785
1786 loader_platform_combine_path(out_fullpath, out_size, rel_base, path, NULL);
1787 }
1788
1789 /**
1790 * Given a filename (file) and a list of paths (dir), try to find an existing
1791 * file in the paths. If filename already is a path then no
1792 * searching in the given paths.
1793 *
1794 * \returns
1795 * A string in out_fullpath of either the full path or file.
1796 */
1797 static void loader_get_fullpath(const char *file, const char *dirs,
1798 size_t out_size, char *out_fullpath) {
1799 if (!loader_platform_is_path(file) && *dirs) {
1800 char *dirs_copy, *dir, *next_dir;
1801
1802 dirs_copy = loader_stack_alloc(strlen(dirs) + 1);
1803 strcpy(dirs_copy, dirs);
1804
1805 // find if file exists after prepending paths in given list
1806 for (dir = dirs_copy; *dir && (next_dir = loader_get_next_path(dir));
1807 dir = next_dir) {
1808 loader_platform_combine_path(out_fullpath, out_size, dir, file,
1809 NULL);
1810 if (loader_platform_file_exists(out_fullpath)) {
1811 return;
1812 }
1813 }
1814 }
1815
1816 snprintf(out_fullpath, out_size, "%s", file);
1817 }
1818
1819 /**
1820 * Read a JSON file into a buffer.
1821 *
1822 * \returns
1823 * A pointer to a cJSON object representing the JSON parse tree.
1824 * This returned buffer should be freed by caller.
1825 */
1826 static VkResult loader_get_json(const struct loader_instance *inst,
1827 const char *filename, cJSON **json) {
1828 FILE *file = NULL;
1829 char *json_buf;
1830 size_t len;
1831 VkResult res = VK_SUCCESS;
1832
1833 if (NULL == json) {
1834 res = VK_ERROR_INITIALIZATION_FAILED;
1835 goto out;
1836 }
1837
1838 *json = NULL;
1839
1840 file = fopen(filename, "rb");
1841 if (!file) {
1842 res = VK_ERROR_INITIALIZATION_FAILED;
1843 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
1844 "Couldn't open JSON file %s", filename);
1845 goto out;
1846 }
1847 fseek(file, 0, SEEK_END);
1848 len = ftell(file);
1849 fseek(file, 0, SEEK_SET);
1850 json_buf = (char *)loader_stack_alloc(len + 1);
1851 if (json_buf == NULL) {
1852 res = VK_ERROR_OUT_OF_HOST_MEMORY;
1853 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
1854 "Out of memory can't get JSON file");
1855 goto out;
1856 }
1857 if (fread(json_buf, sizeof(char), len, file) != len) {
1858 res = VK_ERROR_INITIALIZATION_FAILED;
1859 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
1860 "fread failed can't get JSON file");
1861 goto out;
1862 }
1863 json_buf[len] = '\0';
1864
1865 // parse text from file
1866 *json = cJSON_Parse(json_buf);
1867 if (*json == NULL) {
1868 res = VK_ERROR_OUT_OF_HOST_MEMORY;
1869 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
1870 "Can't parse JSON file %s", filename);
1871 goto out;
1872 }
1873
1874 out:
1875 if (NULL != file) {
1876 fclose(file);
1877 }
1878
1879 return res;
1880 }
1881
1882 /**
1883 * Do a deep copy of the loader_layer_properties structure.
1884 */
1885 VkResult loader_copy_layer_properties(const struct loader_instance *inst,
1886 struct loader_layer_properties *dst,
1887 struct loader_layer_properties *src) {
1888 uint32_t cnt, i;
1889 memcpy(dst, src, sizeof(*src));
1890 dst->instance_extension_list.list =
1891 loader_instance_heap_alloc(inst, sizeof(VkExtensionProperties) *
1892 src->instance_extension_list.count,
1893 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1894 if (NULL == dst->instance_extension_list.list) {
1895 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
1896 "alloc failed for instance extension list");
1897 return VK_ERROR_OUT_OF_HOST_MEMORY;
1898 }
1899 dst->instance_extension_list.capacity =
1900 sizeof(VkExtensionProperties) * src->instance_extension_list.count;
1901 memcpy(dst->instance_extension_list.list, src->instance_extension_list.list,
1902 dst->instance_extension_list.capacity);
1903 dst->device_extension_list.list =
1904 loader_instance_heap_alloc(inst, sizeof(struct loader_dev_ext_props) *
1905 src->device_extension_list.count,
1906 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1907 if (NULL == dst->device_extension_list.list) {
1908 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
1909 "alloc failed for device extension list");
1910 return VK_ERROR_OUT_OF_HOST_MEMORY;
1911 }
1912 memset(dst->device_extension_list.list, 0, sizeof(struct loader_dev_ext_props) *
1913 src->device_extension_list.count);
1914
1915 dst->device_extension_list.capacity =
1916 sizeof(struct loader_dev_ext_props) * src->device_extension_list.count;
1917 memcpy(dst->device_extension_list.list, src->device_extension_list.list,
1918 dst->device_extension_list.capacity);
1919 if (src->device_extension_list.count > 0 &&
1920 src->device_extension_list.list->entrypoint_count > 0) {
1921 cnt = src->device_extension_list.list->entrypoint_count;
1922 dst->device_extension_list.list->entrypoints =
1923 loader_instance_heap_alloc(inst, sizeof(char *) * cnt,
1924 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1925 if (NULL == dst->device_extension_list.list->entrypoints) {
1926 loader_log(
1927 inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
1928 "alloc failed for device extension list entrypoint array");
1929 return VK_ERROR_OUT_OF_HOST_MEMORY;
1930 }
1931 memset(dst->device_extension_list.list->entrypoints, 0, sizeof(char *) * cnt);
1932
1933 for (i = 0; i < cnt; i++) {
1934 dst->device_extension_list.list->entrypoints[i] =
1935 loader_instance_heap_alloc(
1936 inst,
1937 strlen(src->device_extension_list.list->entrypoints[i]) + 1,
1938 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1939 if (NULL == dst->device_extension_list.list->entrypoints[i]) {
1940 loader_log(
1941 inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
1942 "alloc failed for device extension list entrypoint %d", i);
1943 return VK_ERROR_OUT_OF_HOST_MEMORY;
1944 }
1945 strcpy(dst->device_extension_list.list->entrypoints[i],
1946 src->device_extension_list.list->entrypoints[i]);
1947 }
1948 }
1949
1950 return VK_SUCCESS;
1951 }
1952
1953 static bool
1954 loader_find_layer_name_list(const char *name,
1955 const struct loader_layer_list *layer_list) {
1956 if (!layer_list)
1957 return false;
1958 for (uint32_t j = 0; j < layer_list->count; j++)
1959 if (!strcmp(name, layer_list->list[j].info.layerName))
1960 return true;
1961 return false;
1962 }
1963
1964 static bool loader_find_layer_name(const char *name, uint32_t layer_count,
1965 const char **layer_list) {
1966 if (!layer_list)
1967 return false;
1968 for (uint32_t j = 0; j < layer_count; j++)
1969 if (!strcmp(name, layer_list[j]))
1970 return true;
1971 return false;
1972 }
1973
1974 bool loader_find_layer_name_array(
1975 const char *name, uint32_t layer_count,
1976 const char layer_list[][VK_MAX_EXTENSION_NAME_SIZE]) {
1977 if (!layer_list)
1978 return false;
1979 for (uint32_t j = 0; j < layer_count; j++)
1980 if (!strcmp(name, layer_list[j]))
1981 return true;
1982 return false;
1983 }
1984
1985 /**
1986 * Searches through an array of layer names (ppp_layer_names) looking for a
1987 * layer key_name.
1988 * If not found then simply returns updating nothing.
1989 * Otherwise, it uses expand_count, expand_names adding them to layer names.
1990 * Any duplicate (pre-existing) expand_names in layer names are removed.
1991 * Order is otherwise preserved, with the layer key_name being replaced by the
1992 * expand_names.
1993 * @param inst
1994 * @param layer_count
1995 * @param ppp_layer_names
1996 */
1997 VkResult loader_expand_layer_names(
1998 struct loader_instance *inst, const char *key_name, uint32_t expand_count,
1999 const char expand_names[][VK_MAX_EXTENSION_NAME_SIZE],
2000 uint32_t *layer_count, char const *const **ppp_layer_names) {
2001
2002 char const *const *pp_src_layers = *ppp_layer_names;
2003
2004 if (!loader_find_layer_name(key_name, *layer_count,
2005 (char const **)pp_src_layers)) {
2006 inst->activated_layers_are_std_val = false;
2007 return VK_SUCCESS; // didn't find the key_name in the list.
2008 }
2009
2010 loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
2011 "Found meta layer %s, replacing with actual layer group",
2012 key_name);
2013
2014 inst->activated_layers_are_std_val = true;
2015 char const **pp_dst_layers = loader_instance_heap_alloc(
2016 inst, (expand_count + *layer_count - 1) * sizeof(char const *),
2017 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2018 if (NULL == pp_dst_layers) {
2019 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
2020 "alloc failed for dst layer array");
2021 return VK_ERROR_OUT_OF_HOST_MEMORY;
2022 }
2023
2024 // copy layers from src to dst, stripping key_name and anything in
2025 // expand_names.
2026 uint32_t src_index, dst_index = 0;
2027 for (src_index = 0; src_index < *layer_count; src_index++) {
2028 if (loader_find_layer_name_array(pp_src_layers[src_index], expand_count,
2029 expand_names)) {
2030 continue;
2031 }
2032
2033 if (!strcmp(pp_src_layers[src_index], key_name)) {
2034 // insert all expand_names in place of key_name
2035 uint32_t expand_index;
2036 for (expand_index = 0; expand_index < expand_count;
2037 expand_index++) {
2038 pp_dst_layers[dst_index++] = expand_names[expand_index];
2039 }
2040 continue;
2041 }
2042
2043 pp_dst_layers[dst_index++] = pp_src_layers[src_index];
2044 }
2045
2046 *ppp_layer_names = pp_dst_layers;
2047 *layer_count = dst_index;
2048
2049 return VK_SUCCESS;
2050 }
2051
2052 void loader_delete_shadow_inst_layer_names(const struct loader_instance *inst,
2053 const VkInstanceCreateInfo *orig,
2054 VkInstanceCreateInfo *ours) {
2055 /* Free the layer names array iff we had to reallocate it */
2056 if (orig->ppEnabledLayerNames != ours->ppEnabledLayerNames) {
2057 loader_instance_heap_free(inst, (void *)ours->ppEnabledLayerNames);
2058 }
2059 }
2060
2061 void loader_init_std_validation_props(struct loader_layer_properties *props) {
2062 memset(props, 0, sizeof(struct loader_layer_properties));
2063 props->type = VK_LAYER_TYPE_META_EXPLICT;
2064 strncpy(props->info.description, "LunarG Standard Validation Layer",
2065 sizeof (props->info.description));
2066 props->info.implementationVersion = 1;
2067 strncpy(props->info.layerName, std_validation_str,
2068 sizeof (props->info.layerName));
2069 // TODO what about specVersion? for now insert loader's built version
2070 props->info.specVersion = VK_MAKE_VERSION(1, 0, VK_HEADER_VERSION);
2071 }
2072
2073 /**
2074 * Searches through the existing instance layer lists looking for
2075 * the set of required layer names. If found then it adds a meta property to the
2076 * layer list.
2077 * Assumes the required layers are the same for both instance and device lists.
2078 * @param inst
2079 * @param layer_count number of layers in layer_names
2080 * @param layer_names array of required layer names
2081 * @param layer_instance_list
2082 */
2083 static void loader_add_layer_property_meta(
2084 const struct loader_instance *inst, uint32_t layer_count,
2085 const char layer_names[][VK_MAX_EXTENSION_NAME_SIZE],
2086 struct loader_layer_list *layer_instance_list) {
2087 uint32_t i;
2088 bool found;
2089 struct loader_layer_list *layer_list;
2090
2091 if (0 == layer_count || (!layer_instance_list))
2092 return;
2093 if (layer_instance_list && (layer_count > layer_instance_list->count))
2094 return;
2095
2096
2097 layer_list = layer_instance_list;
2098
2099 found = true;
2100 if (layer_list == NULL)
2101 return;
2102 for (i = 0; i < layer_count; i++) {
2103 if (loader_find_layer_name_list(layer_names[i], layer_list))
2104 continue;
2105 found = false;
2106 break;
2107 }
2108
2109 struct loader_layer_properties *props;
2110 if (found) {
2111 props = loader_get_next_layer_property(inst, layer_list);
2112 if (NULL == props) {
2113 // Error already triggered in loader_get_next_layer_property.
2114 return;
2115 }
2116 loader_init_std_validation_props(props);
2117
2118 }
2119
2120 }
2121
2122 static void loader_read_json_layer(
2123 const struct loader_instance *inst,
2124 struct loader_layer_list *layer_instance_list, cJSON *layer_node,
2125 cJSON *item, cJSON *disable_environment, bool is_implicit, char *filename) {
2126 char *temp;
2127 char *name, *type, *library_path, *api_version;
2128 char *implementation_version, *description;
2129 cJSON *ext_item;
2130 VkExtensionProperties ext_prop;
2131
2132 /*
2133 * The following are required in the "layer" object:
2134 * (required) "name"
2135 * (required) "type"
2136 * (required) “library_path”
2137 * (required) “api_version”
2138 * (required) “implementation_version”
2139 * (required) “description”
2140 * (required for implicit layers) “disable_environment”
2141 */
2142
2143 #define GET_JSON_OBJECT(node, var) \
2144 { \
2145 var = cJSON_GetObjectItem(node, #var); \
2146 if (var == NULL) { \
2147 layer_node = layer_node->next; \
2148 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, \
2149 "Didn't find required layer object %s in manifest " \
2150 "JSON file, skipping this layer", \
2151 #var); \
2152 return; \
2153 } \
2154 }
2155 #define GET_JSON_ITEM(node, var) \
2156 { \
2157 item = cJSON_GetObjectItem(node, #var); \
2158 if (item == NULL) { \
2159 layer_node = layer_node->next; \
2160 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, \
2161 "Didn't find required layer value %s in manifest JSON " \
2162 "file, skipping this layer", \
2163 #var); \
2164 return; \
2165 } \
2166 temp = cJSON_Print(item); \
2167 if (temp == NULL) { \
2168 layer_node = layer_node->next; \
2169 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, \
2170 "Problem accessing layer value %s in manifest JSON " \
2171 "file, skipping this layer", \
2172 #var); \
2173 return; \
2174 } \
2175 temp[strlen(temp) - 1] = '\0'; \
2176 var = loader_stack_alloc(strlen(temp) + 1); \
2177 strcpy(var, &temp[1]); \
2178 cJSON_Free(temp); \
2179 }
2180 GET_JSON_ITEM(layer_node, name)
2181 GET_JSON_ITEM(layer_node, type)
2182 GET_JSON_ITEM(layer_node, library_path)
2183 GET_JSON_ITEM(layer_node, api_version)
2184 GET_JSON_ITEM(layer_node, implementation_version)
2185 GET_JSON_ITEM(layer_node, description)
2186 if (is_implicit) {
2187 GET_JSON_OBJECT(layer_node, disable_environment)
2188 }
2189 #undef GET_JSON_ITEM
2190 #undef GET_JSON_OBJECT
2191
2192 // add list entry
2193 struct loader_layer_properties *props = NULL;
2194 if (!strcmp(type, "DEVICE")) {
2195 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
2196 "Device layers are deprecated skipping this layer");
2197 layer_node = layer_node->next;
2198 return;
2199 }
2200 // Allow either GLOBAL or INSTANCE type interchangeably to handle
2201 // layers that must work with older loaders
2202 if (!strcmp(type, "INSTANCE") || !strcmp(type, "GLOBAL")) {
2203 if (layer_instance_list == NULL) {
2204 layer_node = layer_node->next;
2205 return;
2206 }
2207 props = loader_get_next_layer_property(inst, layer_instance_list);
2208 if (NULL == props) {
2209 // Error already triggered in loader_get_next_layer_property.
2210 return;
2211 }
2212 props->type = (is_implicit) ? VK_LAYER_TYPE_INSTANCE_IMPLICIT
2213 : VK_LAYER_TYPE_INSTANCE_EXPLICIT;
2214 }
2215
2216 if (props == NULL) {
2217 layer_node = layer_node->next;
2218 return;
2219 }
2220
2221 strncpy(props->info.layerName, name, sizeof(props->info.layerName));
2222 props->info.layerName[sizeof(props->info.layerName) - 1] = '\0';
2223
2224 char *fullpath = props->lib_name;
2225 char *rel_base;
2226 if (loader_platform_is_path(library_path)) {
2227 // a relative or absolute path
2228 char *name_copy = loader_stack_alloc(strlen(filename) + 1);
2229 strcpy(name_copy, filename);
2230 rel_base = loader_platform_dirname(name_copy);
2231 loader_expand_path(library_path, rel_base, MAX_STRING_SIZE, fullpath);
2232 } else {
2233 // a filename which is assumed in a system directory
2234 loader_get_fullpath(library_path, DEFAULT_VK_LAYERS_PATH,
2235 MAX_STRING_SIZE, fullpath);
2236 }
2237 props->info.specVersion = loader_make_version(api_version);
2238 props->info.implementationVersion = atoi(implementation_version);
2239 strncpy((char *)props->info.description, description,
2240 sizeof(props->info.description));
2241 props->info.description[sizeof(props->info.description) - 1] = '\0';
2242 if (is_implicit) {
2243 if (!disable_environment || !disable_environment->child) {
2244 loader_log(
2245 inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
2246 "Didn't find required layer child value disable_environment"
2247 "in manifest JSON file, skipping this layer");
2248 layer_node = layer_node->next;
2249 return;
2250 }
2251 strncpy(props->disable_env_var.name, disable_environment->child->string,
2252 sizeof(props->disable_env_var.name));
2253 props->disable_env_var.name[sizeof(props->disable_env_var.name) - 1] =
2254 '\0';
2255 strncpy(props->disable_env_var.value,
2256 disable_environment->child->valuestring,
2257 sizeof(props->disable_env_var.value));
2258 props->disable_env_var.value[sizeof(props->disable_env_var.value) - 1] =
2259 '\0';
2260 }
2261
2262 /**
2263 * Now get all optional items and objects and put in list:
2264 * functions
2265 * instance_extensions
2266 * device_extensions
2267 * enable_environment (implicit layers only)
2268 */
2269 #define GET_JSON_OBJECT(node, var) \
2270 { var = cJSON_GetObjectItem(node, #var); }
2271 #define GET_JSON_ITEM(node, var) \
2272 { \
2273 item = cJSON_GetObjectItem(node, #var); \
2274 if (item != NULL) { \
2275 temp = cJSON_Print(item); \
2276 if (temp != NULL) { \
2277 temp[strlen(temp) - 1] = '\0'; \
2278 var = loader_stack_alloc(strlen(temp) + 1); \
2279 strcpy(var, &temp[1]); \
2280 cJSON_Free(temp); \
2281 } \
2282 } \
2283 }
2284
2285 cJSON *instance_extensions, *device_extensions, *functions,
2286 *enable_environment;
2287 cJSON *entrypoints;
2288 char *vkGetInstanceProcAddr, *vkGetDeviceProcAddr, *spec_version;
2289 char **entry_array;
2290 vkGetInstanceProcAddr = NULL;
2291 vkGetDeviceProcAddr = NULL;
2292 spec_version = NULL;
2293 entrypoints = NULL;
2294 entry_array = NULL;
2295 int i, j;
2296
2297 /**
2298 * functions
2299 * vkGetInstanceProcAddr
2300 * vkGetDeviceProcAddr
2301 */
2302 GET_JSON_OBJECT(layer_node, functions)
2303 if (functions != NULL) {
2304 GET_JSON_ITEM(functions, vkGetInstanceProcAddr)
2305 GET_JSON_ITEM(functions, vkGetDeviceProcAddr)
2306 if (vkGetInstanceProcAddr != NULL)
2307 strncpy(props->functions.str_gipa, vkGetInstanceProcAddr,
2308 sizeof(props->functions.str_gipa));
2309 props->functions.str_gipa[sizeof(props->functions.str_gipa) - 1] = '\0';
2310 if (vkGetDeviceProcAddr != NULL)
2311 strncpy(props->functions.str_gdpa, vkGetDeviceProcAddr,
2312 sizeof(props->functions.str_gdpa));
2313 props->functions.str_gdpa[sizeof(props->functions.str_gdpa) - 1] = '\0';
2314 }
2315 /**
2316 * instance_extensions
2317 * array of
2318 * name
2319 * spec_version
2320 */
2321 GET_JSON_OBJECT(layer_node, instance_extensions)
2322 if (instance_extensions != NULL) {
2323 int count = cJSON_GetArraySize(instance_extensions);
2324 for (i = 0; i < count; i++) {
2325 ext_item = cJSON_GetArrayItem(instance_extensions, i);
2326 GET_JSON_ITEM(ext_item, name)
2327 if (name != NULL) {
2328 strncpy(ext_prop.extensionName, name,
2329 sizeof(ext_prop.extensionName));
2330 ext_prop.extensionName[sizeof(ext_prop.extensionName) - 1] =
2331 '\0';
2332 }
2333 GET_JSON_ITEM(ext_item, spec_version)
2334 if (NULL != spec_version) {
2335 ext_prop.specVersion = atoi(spec_version);
2336 } else {
2337 ext_prop.specVersion = 0;
2338 }
2339 bool ext_unsupported =
2340 wsi_unsupported_instance_extension(&ext_prop);
2341 if (!ext_unsupported) {
2342 loader_add_to_ext_list(inst, &props->instance_extension_list, 1,
2343 &ext_prop);
2344 }
2345 }
2346 }
2347 /**
2348 * device_extensions
2349 * array of
2350 * name
2351 * spec_version
2352 * entrypoints
2353 */
2354 GET_JSON_OBJECT(layer_node, device_extensions)
2355 if (device_extensions != NULL) {
2356 int count = cJSON_GetArraySize(device_extensions);
2357 for (i = 0; i < count; i++) {
2358 ext_item = cJSON_GetArrayItem(device_extensions, i);
2359 GET_JSON_ITEM(ext_item, name)
2360 GET_JSON_ITEM(ext_item, spec_version)
2361 if (name != NULL) {
2362 strncpy(ext_prop.extensionName, name,
2363 sizeof(ext_prop.extensionName));
2364 ext_prop.extensionName[sizeof(ext_prop.extensionName) - 1] =
2365 '\0';
2366 }
2367 if (NULL != spec_version) {
2368 ext_prop.specVersion = atoi(spec_version);
2369 } else {
2370 ext_prop.specVersion = 0;
2371 }
2372 // entrypoints = cJSON_GetObjectItem(ext_item, "entrypoints");
2373 GET_JSON_OBJECT(ext_item, entrypoints)
2374 int entry_count;
2375 if (entrypoints == NULL) {
2376 loader_add_to_dev_ext_list(inst, &props->device_extension_list,
2377 &ext_prop, 0, NULL);
2378 continue;
2379 }
2380 entry_count = cJSON_GetArraySize(entrypoints);
2381 if (entry_count) {
2382 entry_array =
2383 (char **)loader_stack_alloc(sizeof(char *) * entry_count);
2384 }
2385 for (j = 0; j < entry_count; j++) {
2386 ext_item = cJSON_GetArrayItem(entrypoints, j);
2387 if (ext_item != NULL) {
2388 temp = cJSON_Print(ext_item);
2389 if (NULL == temp) {
2390 entry_array[j] = NULL;
2391 continue;
2392 }
2393 temp[strlen(temp) - 1] = '\0';
2394 entry_array[j] = loader_stack_alloc(strlen(temp) + 1);
2395 strcpy(entry_array[j], &temp[1]);
2396 cJSON_Free(temp);
2397 }
2398 }
2399 loader_add_to_dev_ext_list(inst, &props->device_extension_list,
2400 &ext_prop, entry_count, entry_array);
2401 }
2402 }
2403 if (is_implicit) {
2404 GET_JSON_OBJECT(layer_node, enable_environment)
2405
2406 // enable_environment is optional
2407 if (enable_environment) {
2408 strncpy(props->enable_env_var.name,
2409 enable_environment->child->string,
2410 sizeof(props->enable_env_var.name));
2411 props->enable_env_var.name[sizeof(props->enable_env_var.name) - 1] =
2412 '\0';
2413 strncpy(props->enable_env_var.value,
2414 enable_environment->child->valuestring,
2415 sizeof(props->enable_env_var.value));
2416 props->enable_env_var
2417 .value[sizeof(props->enable_env_var.value) - 1] = '\0';
2418 }
2419 }
2420 #undef GET_JSON_ITEM
2421 #undef GET_JSON_OBJECT
2422 }
2423
2424 /**
2425 * Given a cJSON struct (json) of the top level JSON object from layer manifest
2426 * file, add entry to the layer_list. Fill out the layer_properties in this list
2427 * entry from the input cJSON object.
2428 *
2429 * \returns
2430 * void
2431 * layer_list has a new entry and initialized accordingly.
2432 * If the json input object does not have all the required fields no entry
2433 * is added to the list.
2434 */
2435 static void
2436 loader_add_layer_properties(const struct loader_instance *inst,
2437 struct loader_layer_list *layer_instance_list,
2438 cJSON *json, bool is_implicit, char *filename) {
2439 /* Fields in layer manifest file that are required:
2440 * (required) “file_format_version”
2441 *
2442 * If more than one "layer" object are to be used, use the "layers" array
2443 * instead.
2444 *
2445 * First get all required items and if any missing abort
2446 */
2447
2448 cJSON *item, *layers_node, *layer_node;
2449 uint16_t file_major_vers = 0;
2450 uint16_t file_minor_vers = 0;
2451 uint16_t file_patch_vers = 0;
2452 char *vers_tok;
2453 cJSON *disable_environment = NULL;
2454 item = cJSON_GetObjectItem(json, "file_format_version");
2455 if (item == NULL) {
2456 return;
2457 }
2458 char *file_vers = cJSON_PrintUnformatted(item);
2459 if (NULL == file_vers) {
2460 return;
2461 }
2462 loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
2463 "Found manifest file %s, version %s", filename, file_vers);
2464 // Get the major/minor/and patch as integers for easier comparison
2465 vers_tok = strtok(file_vers, ".\"\n\r");
2466 if (NULL != vers_tok) {
2467 file_major_vers = (uint16_t)atoi(vers_tok);
2468 vers_tok = strtok(NULL, ".\"\n\r");
2469 if (NULL != vers_tok) {
2470 file_minor_vers = (uint16_t)atoi(vers_tok);
2471 vers_tok = strtok(NULL, ".\"\n\r");
2472 if (NULL != vers_tok) {
2473 file_patch_vers = (uint16_t)atoi(vers_tok);
2474 }
2475 }
2476 }
2477 if (file_major_vers != 1 || file_minor_vers != 0 || file_patch_vers > 1) {
2478 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
2479 "%s Unexpected manifest file version (expected 1.0.0 or "
2480 "1.0.1), may cause errors",
2481 filename);
2482 }
2483 cJSON_Free(file_vers);
2484 // If "layers" is present, read in the array of layer objects
2485 layers_node = cJSON_GetObjectItem(json, "layers");
2486 if (layers_node != NULL) {
2487 int numItems = cJSON_GetArraySize(layers_node);
2488 if (file_major_vers == 1 && file_minor_vers == 0 &&
2489 file_patch_vers == 0) {
2490 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
2491 "\"layers\" tag not officially added until file version "
2492 "1.0.1, but %s is reporting version %s",
2493 filename, file_vers);
2494 }
2495 for (int curLayer = 0; curLayer < numItems; curLayer++) {
2496 layer_node = cJSON_GetArrayItem(layers_node, curLayer);
2497 if (layer_node == NULL) {
2498 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
2499 "Can't find \"layers\" array element %d object in "
2500 "manifest JSON file %s, skipping this file",
2501 curLayer, filename);
2502 return;
2503 }
2504 loader_read_json_layer(inst, layer_instance_list, layer_node, item,
2505 disable_environment, is_implicit, filename);
2506 }
2507 } else {
2508 // Otherwise, try to read in individual layers
2509 layer_node = cJSON_GetObjectItem(json, "layer");
2510 if (layer_node == NULL) {
2511 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
2512 "Can't find \"layer\" object in manifest JSON file %s, "
2513 "skipping this file",
2514 filename);
2515 return;
2516 }
2517 // Loop through all "layer" objects in the file to get a count of them
2518 // first.
2519 uint16_t layer_count = 0;
2520 cJSON *tempNode = layer_node;
2521 do {
2522 tempNode = tempNode->next;
2523 layer_count++;
2524 } while (tempNode != NULL);
2525 /*
2526 * Throw a warning if we encounter multiple "layer" objects in file
2527 * versions newer than 1.0.0. Having multiple objects with the same
2528 * name at the same level is actually a JSON standard violation.
2529 */
2530 if (layer_count > 1 &&
2531 (file_major_vers > 1 ||
2532 !(file_minor_vers == 0 && file_patch_vers == 0))) {
2533 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
2534 "Multiple \"layer\" nodes are deprecated starting in "
2535 "file version \"1.0.1\". Please use \"layers\" : [] "
2536 "array instead in %s.",
2537 filename);
2538 } else {
2539 do {
2540 loader_read_json_layer(inst, layer_instance_list, layer_node,
2541 item, disable_environment, is_implicit,
2542 filename);
2543 layer_node = layer_node->next;
2544 } while (layer_node != NULL);
2545 }
2546 }
2547 return;
2548 }
2549
2550 /**
2551 * Find the Vulkan library manifest files.
2552 *
2553 * This function scans the "location" or "env_override" directories/files
2554 * for a list of JSON manifest files. If env_override is non-NULL
2555 * and has a valid value. Then the location is ignored. Otherwise
2556 * location is used to look for manifest files. The location
2557 * is interpreted as Registry path on Windows and a directory path(s)
2558 * on Linux. "home_location" is an additional directory in the users home
2559 * directory to look at. It is expanded into the dir path
2560 * $XDG_DATA_HOME/home_location or $HOME/.local/share/home_location depending
2561 * on environment variables. This "home_location" is only used on Linux.
2562 *
2563 * \returns
2564 * VKResult
2565 * A string list of manifest files to be opened in out_files param.
2566 * List has a pointer to string for each manifest filename.
2567 * When done using the list in out_files, pointers should be freed.
2568 * Location or override string lists can be either files or directories as
2569 *follows:
2570 * | location | override
2571 * --------------------------------
2572 * Win ICD | files | files
2573 * Win Layer | files | dirs
2574 * Linux ICD | dirs | files
2575 * Linux Layer| dirs | dirs
2576 */
2577 static VkResult loader_get_manifest_files(
2578 const struct loader_instance *inst, const char *env_override,
2579 char *source_override, bool is_layer, const char *location,
2580 const char *home_location, struct loader_manifest_files *out_files) {
2581 char * override = NULL;
2582 char *loc, *orig_loc = NULL;
2583 char *reg = NULL;
2584 char *file, *next_file, *name;
2585 size_t alloced_count = 64;
2586 char full_path[2048];
2587 DIR *sysdir = NULL;
2588 bool list_is_dirs = false;
2589 struct dirent *dent;
2590 VkResult res = VK_SUCCESS;
2591
2592 out_files->count = 0;
2593 out_files->filename_list = NULL;
2594
2595 if (source_override != NULL) {
2596 override = source_override;
2597 } else if (env_override != NULL &&
2598 (override = loader_getenv(env_override, inst))) {
2599 #if !defined(_WIN32)
2600 if (geteuid() != getuid() || getegid() != getgid()) {
2601 /* Don't allow setuid apps to use the env var: */
2602 loader_free_getenv(override, inst);
2603 override = NULL;
2604 }
2605 #endif
2606 }
2607
2608 #if !defined(_WIN32)
2609 if (location == NULL && home_location == NULL) {
2610 #else
2611 home_location = NULL;
2612 if (location == NULL) {
2613 #endif
2614 loader_log(
2615 inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
2616 "Can't get manifest files with NULL location, env_override=%s",
2617 env_override);
2618 res = VK_ERROR_INITIALIZATION_FAILED;
2619 goto out;
2620 }
2621
2622 #if defined(_WIN32)
2623 list_is_dirs = (is_layer && override != NULL) ? true : false;
2624 #else
2625 list_is_dirs = (override == NULL || is_layer) ? true : false;
2626 #endif
2627 // Make a copy of the input we are using so it is not modified
2628 // Also handle getting the location(s) from registry on Windows
2629 if (override == NULL) {
2630 loc = loader_stack_alloc(strlen(location) + 1);
2631 if (loc == NULL) {
2632 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
2633 "Out of memory can't get manifest files");
2634 res = VK_ERROR_OUT_OF_HOST_MEMORY;
2635 goto out;
2636 }
2637 strcpy(loc, location);
2638 #if defined(_WIN32)
2639 reg = loader_get_registry_files(inst, loc);
2640 if (reg == NULL) {
2641 if (!is_layer) {
2642 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
2643 "Registry lookup failed can't get ICD manifest "
2644 "files, do you have a Vulkan driver installed");
2645 // This typically only fails when out of memory, which is
2646 // critical
2647 // if this is for the loader.
2648 res = VK_ERROR_OUT_OF_HOST_MEMORY;
2649 } else {
2650 // warning only for layers
2651 loader_log(
2652 inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
2653 "Registry lookup failed can't get layer manifest files");
2654 // Return success for now since it's not critical for layers
2655 res = VK_SUCCESS;
2656 }
2657 goto out;
2658 }
2659 orig_loc = loc;
2660 loc = reg;
2661 #endif
2662 } else {
2663 loc = loader_stack_alloc(strlen(override) + 1);
2664 if (loc == NULL) {
2665 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
2666 "Out of memory can't get manifest files");
2667 res = VK_ERROR_OUT_OF_HOST_MEMORY;
2668 goto out;
2669 }
2670 strcpy(loc, override);
2671 if (source_override == NULL) {
2672 loader_free_getenv(override, inst);
2673 }
2674 }
2675
2676 // Print out the paths being searched if debugging is enabled
2677 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0,
2678 "Searching the following paths for manifest files: %s\n", loc);
2679
2680 file = loc;
2681 while (*file) {
2682 next_file = loader_get_next_path(file);
2683 if (list_is_dirs) {
2684 sysdir = opendir(file);
2685 name = NULL;
2686 if (sysdir) {
2687 dent = readdir(sysdir);
2688 if (dent == NULL)
2689 break;
2690 name = &(dent->d_name[0]);
2691 loader_get_fullpath(name, file, sizeof(full_path), full_path);
2692 name = full_path;
2693 }
2694 } else {
2695 #if defined(_WIN32)
2696 name = file;
2697 #else
2698 // only Linux has relative paths
2699 char *dir;
2700 // make a copy of location so it isn't modified
2701 dir = loader_stack_alloc(strlen(loc) + 1);
2702 if (dir == NULL) {
2703 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
2704 "Out of memory can't get manifest files");
2705 goto out;
2706 }
2707 strcpy(dir, loc);
2708
2709 loader_get_fullpath(file, dir, sizeof(full_path), full_path);
2710
2711 name = full_path;
2712 #endif
2713 }
2714 while (name) {
2715 /* Look for files ending with ".json" suffix */
2716 uint32_t nlen = (uint32_t)strlen(name);
2717 const char *suf = name + nlen - 5;
2718 if ((nlen > 5) && !strncmp(suf, ".json", 5)) {
2719 if (out_files->count == 0) {
2720 out_files->filename_list = loader_instance_heap_alloc(
2721 inst, alloced_count * sizeof(char *),
2722 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2723 } else if (out_files->count == alloced_count) {
2724 out_files->filename_list = loader_instance_heap_realloc(
2725 inst, out_files->filename_list,
2726 alloced_count * sizeof(char *),
2727 alloced_count * sizeof(char *) * 2,
2728 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2729 alloced_count *= 2;
2730 }
2731 if (out_files->filename_list == NULL) {
2732 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
2733 "Out of memory can't alloc manifest file list");
2734 res = VK_ERROR_OUT_OF_HOST_MEMORY;
2735 goto out;
2736 }
2737 out_files->filename_list[out_files->count] =
2738 loader_instance_heap_alloc(
2739 inst, strlen(name) + 1,
2740 VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
2741 if (out_files->filename_list[out_files->count] == NULL) {
2742 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
2743 "Out of memory can't get manifest files");
2744 res = VK_ERROR_OUT_OF_HOST_MEMORY;
2745 goto out;
2746 }
2747 strcpy(out_files->filename_list[out_files->count], name);
2748 out_files->count++;
2749 } else if (!list_is_dirs) {
2750 loader_log(
2751 inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
2752 "Skipping manifest file %s, file name must end in .json",
2753 name);
2754 }
2755 if (list_is_dirs) {
2756 dent = readdir(sysdir);
2757 if (dent == NULL) {
2758 break;
2759 }
2760 name = &(dent->d_name[0]);
2761 loader_get_fullpath(name, file, sizeof(full_path), full_path);
2762 name = full_path;
2763 } else {
2764 break;
2765 }
2766 }
2767 if (sysdir) {
2768 closedir(sysdir);
2769 sysdir = NULL;
2770 }
2771 file = next_file;
2772 #if !defined(_WIN32)
2773 if (home_location != NULL &&
2774 (next_file == NULL || *next_file == '\0') && override == NULL) {
2775 char *xdgdatahome = secure_getenv("XDG_DATA_HOME");
2776 size_t len;
2777 if (xdgdatahome != NULL) {
2778
2779 char *home_loc = loader_stack_alloc(strlen(xdgdatahome) + 2 +
2780 strlen(home_location));
2781 if (home_loc == NULL) {
2782 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
2783 "Out of memory can't get manifest files");
2784 res = VK_ERROR_OUT_OF_HOST_MEMORY;
2785 goto out;
2786 }
2787 strcpy(home_loc, xdgdatahome);
2788 // Add directory separator if needed
2789 if (home_location[0] != DIRECTORY_SYMBOL) {
2790 len = strlen(home_loc);
2791 home_loc[len] = DIRECTORY_SYMBOL;
2792 home_loc[len + 1] = '\0';
2793 }
2794 strcat(home_loc, home_location);
2795 file = home_loc;
2796 next_file = loader_get_next_path(file);
2797 home_location = NULL;
2798
2799 loader_log(
2800 inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0,
2801 "Searching the following path for manifest files: %s\n",
2802 home_loc);
2803 list_is_dirs = true;
2804
2805 } else {
2806
2807 char *home = secure_getenv("HOME");
2808 if (home != NULL) {
2809 char *home_loc = loader_stack_alloc(strlen(home) + 16 +
2810 strlen(home_location));
2811 if (home_loc == NULL) {
2812 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
2813 "Out of memory can't get manifest files");
2814 res = VK_ERROR_OUT_OF_HOST_MEMORY;
2815 goto out;
2816 }
2817 strcpy(home_loc, home);
2818
2819 len = strlen(home);
2820 if (home[len] != DIRECTORY_SYMBOL) {
2821 home_loc[len] = DIRECTORY_SYMBOL;
2822 home_loc[len + 1] = '\0';
2823 }
2824 strcat(home_loc, ".local/share");
2825
2826 if (home_location[0] != DIRECTORY_SYMBOL) {
2827 len = strlen(home_loc);
2828 home_loc[len] = DIRECTORY_SYMBOL;
2829 home_loc[len + 1] = '\0';
2830 }
2831 strcat(home_loc, home_location);
2832 file = home_loc;
2833 next_file = loader_get_next_path(file);
2834 home_location = NULL;
2835
2836 loader_log(
2837 inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0,
2838 "Searching the following path for manifest files: %s\n",
2839 home_loc);
2840 list_is_dirs = true;
2841 } else {
2842 // without knowing HOME, we just.. give up
2843 }
2844 }
2845 }
2846 #endif
2847 }
2848
2849 out:
2850 if (VK_SUCCESS != res && NULL != out_files->filename_list) {
2851 for (uint32_t remove = 0; remove < out_files->count; remove++) {
2852 loader_instance_heap_free(inst, out_files->filename_list[remove]);
2853 }
2854 loader_instance_heap_free(inst, out_files->filename_list);
2855 out_files->count = 0;
2856 out_files->filename_list = NULL;
2857 }
2858
2859 if (NULL != sysdir) {
2860 closedir(sysdir);
2861 }
2862
2863 if (NULL != reg && reg != orig_loc) {
2864 loader_instance_heap_free(inst, reg);
2865 }
2866 return res;
2867 }
2868
2869 void loader_init_icd_lib_list() {}
2870
2871 void loader_destroy_icd_lib_list() {}
2872 /**
2873 * Try to find the Vulkan ICD driver(s).
2874 *
2875 * This function scans the default system loader path(s) or path
2876 * specified by the \c VK_ICD_FILENAMES environment variable in
2877 * order to find loadable VK ICDs manifest files. From these
2878 * manifest files it finds the ICD libraries.
2879 *
2880 * \returns
2881 * Vulkan result
2882 * (on result == VK_SUCCESS) a list of icds that were discovered
2883 */
2884 VkResult loader_icd_scan(const struct loader_instance *inst,
2885 struct loader_icd_libs *icds) {
2886 char *file_str;
2887 uint16_t file_major_vers = 0;
2888 uint16_t file_minor_vers = 0;
2889 uint16_t file_patch_vers = 0;
2890 char *vers_tok;
2891 struct loader_manifest_files manifest_files;
2892 VkResult res = VK_SUCCESS;
2893 bool lockedMutex = false;
2894 cJSON *json = NULL;
2895 uint32_t num_good_icds = 0;
2896
2897 memset(&manifest_files, 0, sizeof(struct loader_manifest_files));
2898
2899 res = loader_scanned_icd_init(inst, icds);
2900 if (VK_SUCCESS != res) {
2901 goto out;
2902 }
2903
2904 // Get a list of manifest files for ICDs
2905 res = loader_get_manifest_files(inst, "VK_ICD_FILENAMES", NULL, false,
2906 DEFAULT_VK_DRIVERS_INFO,
2907 HOME_VK_DRIVERS_INFO, &manifest_files);
2908 if (VK_SUCCESS != res || manifest_files.count == 0) {
2909 goto out;
2910 }
2911 loader_platform_thread_lock_mutex(&loader_json_lock);
2912 lockedMutex = true;
2913 for (uint32_t i = 0; i < manifest_files.count; i++) {
2914 file_str = manifest_files.filename_list[i];
2915 if (file_str == NULL) {
2916 continue;
2917 }
2918
2919 res = loader_get_json(inst, file_str, &json);
2920 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
2921 break;
2922 } else if (VK_SUCCESS != res || NULL == json) {
2923 continue;
2924 }
2925
2926 cJSON *item, *itemICD;
2927 item = cJSON_GetObjectItem(json, "file_format_version");
2928 if (item == NULL) {
2929 if (num_good_icds == 0) {
2930 res = VK_ERROR_INITIALIZATION_FAILED;
2931 }
2932 cJSON_Delete(json);
2933 json = NULL;
2934 continue;
2935 }
2936 char *file_vers = cJSON_Print(item);
2937 if (NULL == file_vers) {
2938 // Only reason the print can fail is if there was an allocation
2939 // issue
2940 if (num_good_icds == 0) {
2941 res = VK_ERROR_OUT_OF_HOST_MEMORY;
2942 }
2943 cJSON_Delete(json);
2944 json = NULL;
2945 continue;
2946 }
2947 loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
2948 "Found manifest file %s, version %s", file_str, file_vers);
2949 // Get the major/minor/and patch as integers for easier comparison
2950 vers_tok = strtok(file_vers, ".\"\n\r");
2951 if (NULL != vers_tok) {
2952 file_major_vers = (uint16_t)atoi(vers_tok);
2953 vers_tok = strtok(NULL, ".\"\n\r");
2954 if (NULL != vers_tok) {
2955 file_minor_vers = (uint16_t)atoi(vers_tok);
2956 vers_tok = strtok(NULL, ".\"\n\r");
2957 if (NULL != vers_tok) {
2958 file_patch_vers = (uint16_t)atoi(vers_tok);
2959 }
2960 }
2961 }
2962 if (file_major_vers != 1 || file_minor_vers != 0 || file_patch_vers > 1)
2963 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
2964 "Unexpected manifest file version (expected 1.0.0 or "
2965 "1.0.1), may "
2966 "cause errors");
2967 cJSON_Free(file_vers);
2968 itemICD = cJSON_GetObjectItem(json, "ICD");
2969 if (itemICD != NULL) {
2970 item = cJSON_GetObjectItem(itemICD, "library_path");
2971 if (item != NULL) {
2972 char *temp = cJSON_Print(item);
2973 if (!temp || strlen(temp) == 0) {
2974 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
2975 "Can't find \"library_path\" in ICD JSON file "
2976 "%s, skipping",
2977 file_str);
2978 if (num_good_icds == 0) {
2979 res = VK_ERROR_OUT_OF_HOST_MEMORY;
2980 }
2981 cJSON_Free(temp);
2982 cJSON_Delete(json);
2983 json = NULL;
2984 continue;
2985 }
2986 // strip out extra quotes
2987 temp[strlen(temp) - 1] = '\0';
2988 char *library_path = loader_stack_alloc(strlen(temp) + 1);
2989 if (NULL == library_path) {
2990 loader_log(
2991 inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
2992 "Can't allocate space for \"library_path\" in ICD "
2993 "JSON file %s, skipping",
2994 file_str);
2995 res = VK_ERROR_OUT_OF_HOST_MEMORY;
2996 cJSON_Free(temp);
2997 cJSON_Delete(json);
2998 json = NULL;
2999 goto out;
3000 }
3001 strcpy(library_path, &temp[1]);
3002 cJSON_Free(temp);
3003 if (strlen(library_path) == 0) {
3004 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
3005 "Can't find \"library_path\" in ICD JSON file "
3006 "%s, skipping",
3007 file_str);
3008 cJSON_Delete(json);
3009 json = NULL;
3010 continue;
3011 }
3012 char fullpath[MAX_STRING_SIZE];
3013 // Print out the paths being searched if debugging is enabled
3014 loader_log(
3015 inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0,
3016 "Searching for ICD drivers named %s default dir %s\n",
3017 library_path, DEFAULT_VK_DRIVERS_PATH);
3018 if (loader_platform_is_path(library_path)) {
3019 // a relative or absolute path
3020 char *name_copy = loader_stack_alloc(strlen(file_str) + 1);
3021 char *rel_base;
3022 strcpy(name_copy, file_str);
3023 rel_base = loader_platform_dirname(name_copy);
3024 loader_expand_path(library_path, rel_base, sizeof(fullpath),
3025 fullpath);
3026 } else {
3027 // a filename which is assumed in a system directory
3028 loader_get_fullpath(library_path, DEFAULT_VK_DRIVERS_PATH,
3029 sizeof(fullpath), fullpath);
3030 }
3031
3032 uint32_t vers = 0;
3033 item = cJSON_GetObjectItem(itemICD, "api_version");
3034 if (item != NULL) {
3035 temp = cJSON_Print(item);
3036 if (NULL == temp) {
3037 // Only reason the print can fail is if there was an
3038 // allocation issue
3039 res = VK_ERROR_OUT_OF_HOST_MEMORY;
3040 goto out;
3041 }
3042 vers = loader_make_version(temp);
3043 cJSON_Free(temp);
3044 }
3045 res = loader_scanned_icd_add(inst, icds, fullpath, vers);
3046 if (VK_SUCCESS != res) {
3047 goto out;
3048 }
3049 num_good_icds++;
3050 } else {
3051 loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
3052 "Can't find \"library_path\" object in ICD JSON "
3053 "file %s, skipping",
3054 file_str);
3055 }
3056 } else {
3057 loader_log(
3058 inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
3059 "Can't find \"ICD\" object in ICD JSON file %s, skipping",
3060 file_str);
3061 }
3062
3063 cJSON_Delete(json);
3064 json = NULL;
3065 }
3066
3067 out:
3068 if (NULL != json) {
3069 cJSON_Delete(json);
3070 }
3071 if (NULL != manifest_files.filename_list) {
3072 for (uint32_t i = 0; i < manifest_files.count; i++) {
3073 if (NULL != manifest_files.filename_list[i]) {
3074 loader_instance_heap_free(inst,
3075 manifest_files.filename_list[i]);
3076 }
3077 }
3078 loader_instance_heap_free(inst, manifest_files.filename_list);
3079 }
3080 if (lockedMutex) {
3081 loader_platform_thread_unlock_mutex(&loader_json_lock);
3082 }
3083 return res;
3084 }
3085
3086 void loader_layer_scan(const struct loader_instance *inst,
3087 struct loader_layer_list *instance_layers) {
3088 char *file_str;
3089 struct loader_manifest_files
3090 manifest_files[2]; // [0] = explicit, [1] = implicit
3091 cJSON *json;
3092 uint32_t implicit;
3093 bool lockedMutex = false;
3094
3095 memset(manifest_files, 0, sizeof(struct loader_manifest_files) * 2);
3096
3097 // Get a list of manifest files for explicit layers
3098 if (VK_SUCCESS !=
3099 loader_get_manifest_files(inst, LAYERS_PATH_ENV, LAYERS_SOURCE_PATH,
3100 true, DEFAULT_VK_ELAYERS_INFO,
3101 HOME_VK_ELAYERS_INFO, &manifest_files[0])) {
3102 goto out;
3103 }
3104
3105 // Get a list of manifest files for any implicit layers
3106 // Pass NULL for environment variable override - implicit layers are not
3107 // overridden by LAYERS_PATH_ENV
3108 if (VK_SUCCESS != loader_get_manifest_files(
3109 inst, NULL, NULL, true, DEFAULT_VK_ILAYERS_INFO,
3110 HOME_VK_ILAYERS_INFO, &manifest_files[1])) {
3111 goto out;
3112 }
3113
3114 // Make sure we have at least one layer, if not, go ahead and return
3115 if (manifest_files[0].count == 0 && manifest_files[1].count == 0) {
3116 goto out;
3117 }
3118
3119 // cleanup any previously scanned libraries
3120 loader_delete_layer_properties(inst, instance_layers);
3121
3122 loader_platform_thread_lock_mutex(&loader_json_lock);
3123 lockedMutex = true;
3124 for (implicit = 0; implicit < 2; implicit++) {
3125 for (uint32_t i = 0; i < manifest_files[implicit].count; i++) {
3126 file_str = manifest_files[implicit].filename_list[i];
3127 if (file_str == NULL)
3128 continue;
3129
3130 // parse file into JSON struct
3131 VkResult res = loader_get_json(inst, file_str, &json);
3132 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
3133 break;
3134 } else if (VK_SUCCESS != res || NULL == json) {
3135 continue;
3136 }
3137
3138 loader_add_layer_properties(inst, instance_layers, json,
3139 (implicit == 1), file_str);
3140 cJSON_Delete(json);
3141 }
3142 }
3143
3144 // add a meta layer for validation if the validation layers are all present
3145 loader_add_layer_property_meta(inst, sizeof(std_validation_names) /
3146 sizeof(std_validation_names[0]),
3147 std_validation_names, instance_layers);
3148
3149 out:
3150
3151 for (uint32_t manFile = 0; manFile < 2; manFile++) {
3152 if (NULL != manifest_files[manFile].filename_list) {
3153 for (uint32_t i = 0; i < manifest_files[manFile].count; i++) {
3154 if (NULL != manifest_files[manFile].filename_list[i]) {
3155 loader_instance_heap_free(
3156 inst, manifest_files[manFile].filename_list[i]);
3157 }
3158 }
3159 loader_instance_heap_free(inst,
3160 manifest_files[manFile].filename_list);
3161 }
3162 }
3163 if (lockedMutex) {
3164 loader_platform_thread_unlock_mutex(&loader_json_lock);
3165 }
3166 }
3167
3168 void loader_implicit_layer_scan(const struct loader_instance *inst,
3169 struct loader_layer_list *instance_layers) {
3170 char *file_str;
3171 struct loader_manifest_files manifest_files;
3172 cJSON *json;
3173 uint32_t i;
3174
3175 // Pass NULL for environment variable override - implicit layers are not
3176 // overridden by LAYERS_PATH_ENV
3177 VkResult res = loader_get_manifest_files(
3178 inst, NULL, NULL, true, DEFAULT_VK_ILAYERS_INFO, HOME_VK_ILAYERS_INFO,
3179 &manifest_files);
3180 if (VK_SUCCESS != res || manifest_files.count == 0) {
3181 return;
3182 }
3183
3184 /* cleanup any previously scanned libraries */
3185 loader_delete_layer_properties(inst, instance_layers);
3186
3187 loader_platform_thread_lock_mutex(&loader_json_lock);
3188
3189 for (i = 0; i < manifest_files.count; i++) {
3190 file_str = manifest_files.filename_list[i];
3191 if (file_str == NULL) {
3192 continue;
3193 }
3194
3195 // parse file into JSON struct
3196 res = loader_get_json(inst, file_str, &json);
3197 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
3198 break;
3199 } else if (VK_SUCCESS != res || NULL == json) {
3200 continue;
3201 }
3202
3203 loader_add_layer_properties(inst, instance_layers, json, true,
3204 file_str);
3205
3206 loader_instance_heap_free(inst, file_str);
3207 cJSON_Delete(json);
3208 }
3209 loader_instance_heap_free(inst, manifest_files.filename_list);
3210
3211 // add a meta layer for validation if the validation layers are all present
3212 loader_add_layer_property_meta(inst, sizeof(std_validation_names) /
3213 sizeof(std_validation_names[0]),
3214 std_validation_names, instance_layers);
3215
3216 loader_platform_thread_unlock_mutex(&loader_json_lock);
3217 }
3218
3219 static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
3220 loader_gpa_instance_internal(VkInstance inst, const char *pName) {
3221 if (!strcmp(pName, "vkGetInstanceProcAddr"))
3222 return (void *)loader_gpa_instance_internal;
3223 if (!strcmp(pName, "vkCreateInstance"))
3224 return (void *)terminator_CreateInstance;
3225 if (!strcmp(pName, "vkCreateDevice"))
3226 return (void *)terminator_CreateDevice;
3227
3228 // inst is not wrapped
3229 if (inst == VK_NULL_HANDLE) {
3230 return NULL;
3231 }
3232 VkLayerInstanceDispatchTable *disp_table =
3233 *(VkLayerInstanceDispatchTable **)inst;
3234 void *addr;
3235
3236 if (disp_table == NULL)
3237 return NULL;
3238
3239 bool found_name;
3240 addr =
3241 loader_lookup_instance_dispatch_table(disp_table, pName, &found_name);
3242 if (found_name) {
3243 return addr;
3244 }
3245
3246 // Don't call down the chain, this would be an infinite loop
3247 loader_log(NULL, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
3248 "loader_gpa_instance_internal() unrecognized name %s", pName);
3249 return NULL;
3250 }
3251
3252 void loader_override_terminating_device_proc(
3253 VkDevice device, struct loader_dev_dispatch_table *disp_table) {
3254 struct loader_device *dev;
3255 struct loader_icd *icd = loader_get_icd_and_device(device, &dev, NULL);
3256
3257 // Certain device entry-points still need to go through a terminator before
3258 // hitting the ICD. This could be for several reasons, but the main one
3259 // is currently unwrapping an object before passing the appropriate info
3260 // along to the ICD.
3261 if ((PFN_vkVoidFunction)disp_table->core_dispatch.CreateSwapchainKHR ==
3262 (PFN_vkVoidFunction)icd->GetDeviceProcAddr(device,
3263 "vkCreateSwapchainKHR")) {
3264 disp_table->core_dispatch.CreateSwapchainKHR =
3265 terminator_vkCreateSwapchainKHR;
3266 }
3267 }
3268
3269 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
3270 loader_gpa_device_internal(VkDevice device, const char *pName) {
3271 struct loader_device *dev;
3272 struct loader_icd *icd = loader_get_icd_and_device(device, &dev, NULL);
3273
3274 // Certain device entry-points still need to go through a terminator before
3275 // hitting the ICD. This could be for several reasons, but the main one
3276 // is currently unwrapping an object before passing the appropriate info
3277 // along to the ICD.
3278 if (!strcmp(pName, "vkCreateSwapchainKHR")) {
3279 return (PFN_vkVoidFunction)terminator_vkCreateSwapchainKHR;
3280 }
3281
3282 return icd->GetDeviceProcAddr(device, pName);
3283 }
3284
3285 /**
3286 * Initialize device_ext dispatch table entry as follows:
3287 * If dev == NULL find all logical devices created within this instance and
3288 * init the entry (given by idx) in the ext dispatch table.
3289 * If dev != NULL only initialize the entry in the given dev's dispatch table.
3290 * The initialization value is gotten by calling down the device chain with
3291 * GDPA.
3292 * If GDPA returns NULL then don't initialize the dispatch table entry.
3293 */
3294 static void loader_init_dispatch_dev_ext_entry(struct loader_instance *inst,
3295 struct loader_device *dev,
3296 uint32_t idx,
3297 const char *funcName)
3298
3299 {
3300 void *gdpa_value;
3301 if (dev != NULL) {
3302 gdpa_value = dev->loader_dispatch.core_dispatch.GetDeviceProcAddr(
3303 dev->device, funcName);
3304 if (gdpa_value != NULL)
3305 dev->loader_dispatch.ext_dispatch.dev_ext[idx] =
3306 (PFN_vkDevExt)gdpa_value;
3307 } else {
3308 for (uint32_t i = 0; i < inst->total_icd_count; i++) {
3309 struct loader_icd *icd = &inst->icds[i];
3310 struct loader_device *ldev = icd->logical_device_list;
3311 while (ldev) {
3312 gdpa_value =
3313 ldev->loader_dispatch.core_dispatch.GetDeviceProcAddr(
3314 ldev->device, funcName);
3315 if (gdpa_value != NULL)
3316 ldev->loader_dispatch.ext_dispatch.dev_ext[idx] =
3317 (PFN_vkDevExt)gdpa_value;
3318 ldev = ldev->next;
3319 }
3320 }
3321 }
3322 }
3323
3324 /**
3325 * Find all dev extension in the hash table and initialize the dispatch table
3326 * for dev for each of those extension entrypoints found in hash table.
3327
3328 */
3329 void loader_init_dispatch_dev_ext(struct loader_instance *inst,
3330 struct loader_device *dev) {
3331 for (uint32_t i = 0; i < MAX_NUM_DEV_EXTS; i++) {
3332 if (inst->disp_hash[i].func_name != NULL)
3333 loader_init_dispatch_dev_ext_entry(inst, dev, i,
3334 inst->disp_hash[i].func_name);
3335 }
3336 }
3337
3338 static bool loader_check_icds_for_address(struct loader_instance *inst,
3339 const char *funcName) {
3340 struct loader_icd *icd;
3341 icd = inst->icds;
3342 while (icd) {
3343 if (icd->this_icd_lib->GetInstanceProcAddr(icd->instance, funcName))
3344 // this icd supports funcName
3345 return true;
3346 icd = icd->next;
3347 }
3348
3349 return false;
3350 }
3351
3352 static bool loader_check_layer_list_for_address(
3353 const struct loader_layer_list *const layers, const char *funcName) {
3354 // Iterate over the layers.
3355 for (uint32_t layer = 0; layer < layers->count; ++layer) {
3356 // Iterate over the extensions.
3357 const struct loader_device_extension_list *const extensions =
3358 &(layers->list[layer].device_extension_list);
3359 for (uint32_t extension = 0; extension < extensions->count;
3360 ++extension) {
3361 // Iterate over the entry points.
3362 const struct loader_dev_ext_props *const property =
3363 &(extensions->list[extension]);
3364 for (uint32_t entry = 0; entry < property->entrypoint_count;
3365 ++entry) {
3366 if (strcmp(property->entrypoints[entry], funcName) == 0) {
3367 return true;
3368 }
3369 }
3370 }
3371 }
3372
3373 return false;
3374 }
3375
3376 static void loader_free_dev_ext_table(struct loader_instance *inst) {
3377 for (uint32_t i = 0; i < MAX_NUM_DEV_EXTS; i++) {
3378 loader_instance_heap_free(inst, inst->disp_hash[i].func_name);
3379 loader_instance_heap_free(inst, inst->disp_hash[i].list.index);
3380 }
3381 memset(inst->disp_hash, 0, sizeof(inst->disp_hash));
3382 }
3383
3384 static bool loader_add_dev_ext_table(struct loader_instance *inst,
3385 uint32_t *ptr_idx, const char *funcName) {
3386 uint32_t i;
3387 uint32_t idx = *ptr_idx;
3388 struct loader_dispatch_hash_list *list = &inst->disp_hash[idx].list;
3389
3390 if (!inst->disp_hash[idx].func_name) {
3391 // no entry here at this idx, so use it
3392 assert(list->capacity == 0);
3393 inst->disp_hash[idx].func_name = (char *)loader_instance_heap_alloc(
3394 inst, strlen(funcName) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
3395 if (inst->disp_hash[idx].func_name == NULL) {
3396 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
3397 "loader_add_dev_ext_table() can't allocate memory for "
3398 "func_name");
3399 return false;
3400 }
3401 strncpy(inst->disp_hash[idx].func_name, funcName, strlen(funcName) + 1);
3402 return true;
3403 }
3404
3405 // check for enough capacity
3406 if (list->capacity == 0) {
3407 list->index = loader_instance_heap_alloc(inst, 8 * sizeof(*(list->index)),
3408 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
3409 if (list->index == NULL) {
3410 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
3411 "loader_add_dev_ext_table() can't allocate list memory");
3412 return false;
3413 }
3414 list->capacity = 8 * sizeof(*(list->index));
3415 } else if (list->capacity < (list->count + 1) * sizeof(*(list->index))) {
3416 list->index = loader_instance_heap_realloc(inst, list->index, list->capacity,
3417 list->capacity * 2,
3418 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
3419 if (list->index == NULL) {
3420 loader_log(
3421 inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
3422 "loader_add_dev_ext_table() can't reallocate list memory");
3423 return false;
3424 }
3425 list->capacity *= 2;
3426 }
3427
3428 // find an unused index in the hash table and use it
3429 i = (idx + 1) % MAX_NUM_DEV_EXTS;
3430 do {
3431 if (!inst->disp_hash[i].func_name) {
3432 assert(inst->disp_hash[i].list.capacity == 0);
3433 inst->disp_hash[i].func_name =
3434 (char *)loader_instance_heap_alloc(inst, strlen(funcName) + 1,
3435 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
3436 if (inst->disp_hash[i].func_name == NULL) {
3437 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
3438 "loader_add_dev_ext_table() can't rallocate "
3439 "func_name memory");
3440 return false;
3441 }
3442 strncpy(inst->disp_hash[i].func_name, funcName,
3443 strlen(funcName) + 1);
3444 list->index[list->count] = i;
3445 list->count++;
3446 *ptr_idx = i;
3447 return true;
3448 }
3449 i = (i + 1) % MAX_NUM_DEV_EXTS;
3450 } while (i != idx);
3451
3452 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
3453 "loader_add_dev_ext_table() couldn't insert into hash table; is "
3454 "it full?");
3455 return false;
3456 }
3457
3458 static bool loader_name_in_dev_ext_table(struct loader_instance *inst,
3459 uint32_t *idx, const char *funcName) {
3460 uint32_t alt_idx;
3461 if (inst->disp_hash[*idx].func_name &&
3462 !strcmp(inst->disp_hash[*idx].func_name, funcName))
3463 return true;
3464
3465 // funcName wasn't at the primary spot in the hash table
3466 // search the list of secondary locations (shallow search, not deep search)
3467 for (uint32_t i = 0; i < inst->disp_hash[*idx].list.count; i++) {
3468 alt_idx = inst->disp_hash[*idx].list.index[i];
3469 if (!strcmp(inst->disp_hash[*idx].func_name, funcName)) {
3470 *idx = alt_idx;
3471 return true;
3472 }
3473 }
3474
3475 return false;
3476 }
3477
3478 /**
3479 * This function returns generic trampoline code address for unknown entry
3480 * points.
3481 * Presumably, these unknown entry points (as given by funcName) are device
3482 * extension entrypoints. A hash table is used to keep a list of unknown entry
3483 * points and their mapping to the device extension dispatch table
3484 * (struct loader_dev_ext_dispatch_table).
3485 * \returns
3486 * For a given entry point string (funcName), if an existing mapping is found
3487 * the
3488 * trampoline address for that mapping is returned. Otherwise, this unknown
3489 * entry point
3490 * has not been seen yet. Next check if a layer or ICD supports it. If so then
3491 * a
3492 * new entry in the hash table is initialized and that trampoline address for
3493 * the new entry is returned. Null is returned if the hash table is full or
3494 * if no discovered layer or ICD returns a non-NULL GetProcAddr for it.
3495 */
3496 void *loader_dev_ext_gpa(struct loader_instance *inst, const char *funcName) {
3497 uint32_t idx;
3498 uint32_t seed = 0;
3499
3500 idx = murmurhash(funcName, strlen(funcName), seed) % MAX_NUM_DEV_EXTS;
3501
3502 if (loader_name_in_dev_ext_table(inst, &idx, funcName))
3503 // found funcName already in hash
3504 return loader_get_dev_ext_trampoline(idx);
3505
3506 // Check if funcName is supported in either ICDs or a layer library
3507 if (!loader_check_icds_for_address(inst, funcName) &&
3508 !loader_check_layer_list_for_address(&inst->instance_layer_list, funcName)) {
3509 // if support found in layers continue on
3510 return NULL;
3511 }
3512
3513 if (loader_add_dev_ext_table(inst, &idx, funcName)) {
3514 // successfully added new table entry
3515 // init any dev dispatch table entrys as needed
3516 loader_init_dispatch_dev_ext_entry(inst, NULL, idx, funcName);
3517 return loader_get_dev_ext_trampoline(idx);
3518 }
3519
3520 return NULL;
3521 }
3522
3523 struct loader_instance *loader_get_instance(const VkInstance instance) {
3524 /* look up the loader_instance in our list by comparing dispatch tables, as
3525 * there is no guarantee the instance is still a loader_instance* after any
3526 * layers which wrap the instance object.
3527 */
3528 const VkLayerInstanceDispatchTable *disp;
3529 struct loader_instance *ptr_instance = NULL;
3530 disp = loader_get_instance_dispatch(instance);
3531 for (struct loader_instance *inst = loader.instances; inst;
3532 inst = inst->next) {
3533 if (inst->disp == disp) {
3534 ptr_instance = inst;
3535 break;
3536 }
3537 }
3538 return ptr_instance;
3539 }
3540
3541 static loader_platform_dl_handle
3542 loader_open_layer_lib(const struct loader_instance *inst, const char *chain_type,
3543 struct loader_layer_properties *prop) {
3544
3545 if ((prop->lib_handle = loader_platform_open_library(prop->lib_name)) ==
3546 NULL) {
3547 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
3548 loader_platform_open_library_error(prop->lib_name));
3549 } else {
3550 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0,
3551 "Chain: %s: Loading layer library %s", chain_type,
3552 prop->lib_name);
3553 }
3554
3555 return prop->lib_handle;
3556 }
3557
3558 static void
3559 loader_close_layer_lib(const struct loader_instance *inst,
3560 struct loader_layer_properties *prop) {
3561
3562 if (prop->lib_handle) {
3563 loader_platform_close_library(prop->lib_handle);
3564 loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0,
3565 "Unloading layer library %s", prop->lib_name);
3566 prop->lib_handle = NULL;
3567 }
3568 }
3569
3570 void loader_deactivate_layers(const struct loader_instance *instance,
3571 struct loader_device *device,
3572 struct loader_layer_list *list) {
3573 /* delete instance list of enabled layers and close any layer libraries */
3574 for (uint32_t i = 0; i < list->count; i++) {
3575 struct loader_layer_properties *layer_prop = &list->list[i];
3576
3577 loader_close_layer_lib(instance, layer_prop);
3578 }
3579 loader_destroy_layer_list(instance, device, list);
3580 }
3581
3582 /**
3583 * Go through the search_list and find any layers which match type. If layer
3584 * type match is found in then add it to ext_list.
3585 */
3586 static void
3587 loader_add_layer_implicit(const struct loader_instance *inst,
3588 const enum layer_type type,
3589 struct loader_layer_list *list,
3590 const struct loader_layer_list *search_list) {
3591 bool enable;
3592 char *env_value;
3593 uint32_t i;
3594 for (i = 0; i < search_list->count; i++) {
3595 const struct loader_layer_properties *prop = &search_list->list[i];
3596 if (prop->type & type) {
3597 /* Found an implicit layer, see if it should be enabled */
3598 enable = false;
3599
3600 // if no enable_environment variable is specified, this implicit
3601 // layer
3602 // should always be enabled. Otherwise check if the variable is set
3603 if (prop->enable_env_var.name[0] == 0) {
3604 enable = true;
3605 } else {
3606 env_value = loader_getenv(prop->enable_env_var.name, inst);
3607 if (env_value && !strcmp(prop->enable_env_var.value, env_value))
3608 enable = true;
3609 loader_free_getenv(env_value, inst);
3610 }
3611
3612 // disable_environment has priority, i.e. if both enable and disable
3613 // environment variables are set, the layer is disabled. Implicit
3614 // layers
3615 // are required to have a disable_environment variables
3616 env_value = loader_getenv(prop->disable_env_var.name, inst);
3617 if (env_value) {
3618 enable = false;
3619 }
3620 loader_free_getenv(env_value, inst);
3621
3622 if (enable) {
3623 loader_add_to_layer_list(inst, list, 1, prop);
3624 }
3625 }
3626 }
3627 }
3628
3629 /**
3630 * Get the layer name(s) from the env_name environment variable. If layer
3631 * is found in search_list then add it to layer_list. But only add it to
3632 * layer_list if type matches.
3633 */
3634 static void loader_add_layer_env(struct loader_instance *inst,
3635 const enum layer_type type,
3636 const char *env_name,
3637 struct loader_layer_list *layer_list,
3638 const struct loader_layer_list *search_list) {
3639 char *layerEnv;
3640 char *next, *name;
3641
3642 layerEnv = loader_getenv(env_name, inst);
3643 if (layerEnv == NULL) {
3644 return;
3645 }
3646 name = loader_stack_alloc(strlen(layerEnv) + 1);
3647 if (name == NULL) {
3648 return;
3649 }
3650 strcpy(name, layerEnv);
3651
3652 loader_free_getenv(layerEnv, inst);
3653
3654 while (name && *name) {
3655 next = loader_get_next_path(name);
3656 if (!strcmp(std_validation_str, name)) {
3657 /* add meta list of layers
3658 don't attempt to remove duplicate layers already added by app or
3659 env var
3660 */
3661 loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
3662 "Expanding meta layer %s found in environment variable",
3663 std_validation_str);
3664 if (type == VK_LAYER_TYPE_INSTANCE_EXPLICIT)
3665 inst->activated_layers_are_std_val = true;
3666 for (uint32_t i = 0; i < sizeof(std_validation_names) /
3667 sizeof(std_validation_names[0]);
3668 i++) {
3669 loader_find_layer_name_add_list(inst, std_validation_names[i],
3670 type, search_list, layer_list);
3671 }
3672 } else {
3673 loader_find_layer_name_add_list(inst, name, type, search_list,
3674 layer_list);
3675 }
3676 name = next;
3677 }
3678
3679 return;
3680 }
3681
3682 VkResult
3683 loader_enable_instance_layers(struct loader_instance *inst,
3684 const VkInstanceCreateInfo *pCreateInfo,
3685 const struct loader_layer_list *instance_layers) {
3686 VkResult err;
3687
3688 assert(inst && "Cannot have null instance");
3689
3690 if (!loader_init_layer_list(inst, &inst->activated_layer_list)) {
3691 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
3692 "Failed to alloc Instance activated layer list");
3693 return VK_ERROR_OUT_OF_HOST_MEMORY;
3694 }
3695
3696 /* Add any implicit layers first */
3697 loader_add_layer_implicit(inst, VK_LAYER_TYPE_INSTANCE_IMPLICIT,
3698 &inst->activated_layer_list, instance_layers);
3699
3700 /* Add any layers specified via environment variable next */
3701 loader_add_layer_env(inst, VK_LAYER_TYPE_INSTANCE_EXPLICIT,
3702 "VK_INSTANCE_LAYERS", &inst->activated_layer_list,
3703 instance_layers);
3704
3705 /* Add layers specified by the application */
3706 err = loader_add_layer_names_to_list(
3707 inst, &inst->activated_layer_list, pCreateInfo->enabledLayerCount,
3708 pCreateInfo->ppEnabledLayerNames, instance_layers);
3709
3710 return err;
3711 }
3712
3713 /*
3714 * Given the list of layers to activate in the loader_instance
3715 * structure. This function will add a VkLayerInstanceCreateInfo
3716 * structure to the VkInstanceCreateInfo.pNext pointer.
3717 * Each activated layer will have it's own VkLayerInstanceLink
3718 * structure that tells the layer what Get*ProcAddr to call to
3719 * get function pointers to the next layer down.
3720 * Once the chain info has been created this function will
3721 * execute the CreateInstance call chain. Each layer will
3722 * then have an opportunity in it's CreateInstance function
3723 * to setup it's dispatch table when the lower layer returns
3724 * successfully.
3725 * Each layer can wrap or not-wrap the returned VkInstance object
3726 * as it sees fit.
3727 * The instance chain is terminated by a loader function
3728 * that will call CreateInstance on all available ICD's and
3729 * cache those VkInstance objects for future use.
3730 */
3731 VkResult loader_create_instance_chain(const VkInstanceCreateInfo *pCreateInfo,
3732 const VkAllocationCallbacks *pAllocator,
3733 struct loader_instance *inst,
3734 VkInstance *created_instance) {
3735 uint32_t activated_layers = 0;
3736 VkLayerInstanceCreateInfo chain_info;
3737 VkLayerInstanceLink *layer_instance_link_info = NULL;
3738 VkInstanceCreateInfo loader_create_info;
3739 VkResult res;
3740
3741 PFN_vkGetInstanceProcAddr nextGIPA = loader_gpa_instance_internal;
3742 PFN_vkGetInstanceProcAddr fpGIPA = loader_gpa_instance_internal;
3743
3744 memcpy(&loader_create_info, pCreateInfo, sizeof(VkInstanceCreateInfo));
3745
3746 if (inst->activated_layer_list.count > 0) {
3747
3748 chain_info.u.pLayerInfo = NULL;
3749 chain_info.pNext = pCreateInfo->pNext;
3750 chain_info.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
3751 chain_info.function = VK_LAYER_LINK_INFO;
3752 loader_create_info.pNext = &chain_info;
3753
3754 layer_instance_link_info = loader_stack_alloc(
3755 sizeof(VkLayerInstanceLink) * inst->activated_layer_list.count);
3756 if (!layer_instance_link_info) {
3757 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
3758 "Failed to alloc Instance objects for layer");
3759 return VK_ERROR_OUT_OF_HOST_MEMORY;
3760 }
3761
3762 /* Create instance chain of enabled layers */
3763 for (int32_t i = inst->activated_layer_list.count - 1; i >= 0; i--) {
3764 struct loader_layer_properties *layer_prop =
3765 &inst->activated_layer_list.list[i];
3766 loader_platform_dl_handle lib_handle;
3767
3768 lib_handle = loader_open_layer_lib(inst, "instance", layer_prop);
3769 if (!lib_handle)
3770 continue;
3771 if ((fpGIPA = layer_prop->functions.get_instance_proc_addr) ==
3772 NULL) {
3773 if (layer_prop->functions.str_gipa == NULL ||
3774 strlen(layer_prop->functions.str_gipa) == 0) {
3775 fpGIPA = (PFN_vkGetInstanceProcAddr)
3776 loader_platform_get_proc_address(
3777 lib_handle, "vkGetInstanceProcAddr");
3778 layer_prop->functions.get_instance_proc_addr = fpGIPA;
3779 } else
3780 fpGIPA = (PFN_vkGetInstanceProcAddr)
3781 loader_platform_get_proc_address(
3782 lib_handle, layer_prop->functions.str_gipa);
3783 if (!fpGIPA) {
3784 loader_log(
3785 inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
3786 "Failed to find vkGetInstanceProcAddr in layer %s",
3787 layer_prop->lib_name);
3788 continue;
3789 }
3790 }
3791
3792 layer_instance_link_info[activated_layers].pNext =
3793 chain_info.u.pLayerInfo;
3794 layer_instance_link_info[activated_layers]
3795 .pfnNextGetInstanceProcAddr = nextGIPA;
3796 chain_info.u.pLayerInfo =
3797 &layer_instance_link_info[activated_layers];
3798 nextGIPA = fpGIPA;
3799
3800 loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
3801 "Insert instance layer %s (%s)",
3802 layer_prop->info.layerName, layer_prop->lib_name);
3803
3804 activated_layers++;
3805 }
3806 }
3807
3808 PFN_vkCreateInstance fpCreateInstance =
3809 (PFN_vkCreateInstance)nextGIPA(*created_instance, "vkCreateInstance");
3810 if (fpCreateInstance) {
3811 VkLayerInstanceCreateInfo create_info_disp;
3812
3813 create_info_disp.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO;
3814 create_info_disp.function = VK_LOADER_DATA_CALLBACK;
3815
3816 create_info_disp.u.pfnSetInstanceLoaderData = vkSetInstanceDispatch;
3817
3818 create_info_disp.pNext = loader_create_info.pNext;
3819 loader_create_info.pNext = &create_info_disp;
3820 res =
3821 fpCreateInstance(&loader_create_info, pAllocator, created_instance);
3822 } else {
3823 // Couldn't find CreateInstance function!
3824 res = VK_ERROR_INITIALIZATION_FAILED;
3825 }
3826
3827 if (res != VK_SUCCESS) {
3828 // TODO: Need to clean up here
3829 } else {
3830 loader_init_instance_core_dispatch_table(inst->disp, nextGIPA,
3831 *created_instance);
3832 inst->instance = *created_instance;
3833 }
3834
3835 return res;
3836 }
3837
3838 void loader_activate_instance_layer_extensions(struct loader_instance *inst,
3839 VkInstance created_inst) {
3840
3841 loader_init_instance_extension_dispatch_table(
3842 inst->disp, inst->disp->GetInstanceProcAddr, created_inst);
3843 }
3844
3845 VkResult
3846 loader_create_device_chain(const struct loader_physical_device_tramp *pd,
3847 const VkDeviceCreateInfo *pCreateInfo,
3848 const VkAllocationCallbacks *pAllocator,
3849 const struct loader_instance *inst,
3850 struct loader_device *dev) {
3851 uint32_t activated_layers = 0;
3852 VkLayerDeviceLink *layer_device_link_info;
3853 VkLayerDeviceCreateInfo chain_info;
3854 VkDeviceCreateInfo loader_create_info;
3855 VkResult res;
3856
3857 PFN_vkGetDeviceProcAddr fpGDPA, nextGDPA = loader_gpa_device_internal;
3858 PFN_vkGetInstanceProcAddr fpGIPA, nextGIPA = loader_gpa_instance_internal;
3859
3860 memcpy(&loader_create_info, pCreateInfo, sizeof(VkDeviceCreateInfo));
3861
3862 layer_device_link_info = loader_stack_alloc(
3863 sizeof(VkLayerDeviceLink) * dev->activated_layer_list.count);
3864 if (!layer_device_link_info) {
3865 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
3866 "Failed to alloc Device objects for layer");
3867 return VK_ERROR_OUT_OF_HOST_MEMORY;
3868 }
3869
3870 if (dev->activated_layer_list.count > 0) {
3871 chain_info.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO;
3872 chain_info.function = VK_LAYER_LINK_INFO;
3873 chain_info.u.pLayerInfo = NULL;
3874 chain_info.pNext = pCreateInfo->pNext;
3875 loader_create_info.pNext = &chain_info;
3876
3877 /* Create instance chain of enabled layers */
3878 for (int32_t i = dev->activated_layer_list.count - 1; i >= 0; i--) {
3879 struct loader_layer_properties *layer_prop =
3880 &dev->activated_layer_list.list[i];
3881 loader_platform_dl_handle lib_handle;
3882
3883 lib_handle = loader_open_layer_lib(inst, "device", layer_prop);
3884 if (!lib_handle)
3885 continue;
3886 if ((fpGIPA = layer_prop->functions.get_instance_proc_addr) ==
3887 NULL) {
3888 if (layer_prop->functions.str_gipa == NULL ||
3889 strlen(layer_prop->functions.str_gipa) == 0) {
3890 fpGIPA = (PFN_vkGetInstanceProcAddr)
3891 loader_platform_get_proc_address(
3892 lib_handle, "vkGetInstanceProcAddr");
3893 layer_prop->functions.get_instance_proc_addr = fpGIPA;
3894 } else
3895 fpGIPA = (PFN_vkGetInstanceProcAddr)
3896 loader_platform_get_proc_address(
3897 lib_handle, layer_prop->functions.str_gipa);
3898 if (!fpGIPA) {
3899 loader_log(
3900 inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
3901 "Failed to find vkGetInstanceProcAddr in layer %s",
3902 layer_prop->lib_name);
3903 continue;
3904 }
3905 }
3906 if ((fpGDPA = layer_prop->functions.get_device_proc_addr) == NULL) {
3907 if (layer_prop->functions.str_gdpa == NULL ||
3908 strlen(layer_prop->functions.str_gdpa) == 0) {
3909 fpGDPA = (PFN_vkGetDeviceProcAddr)
3910 loader_platform_get_proc_address(lib_handle,
3911 "vkGetDeviceProcAddr");
3912 layer_prop->functions.get_device_proc_addr = fpGDPA;
3913 } else
3914 fpGDPA = (PFN_vkGetDeviceProcAddr)
3915 loader_platform_get_proc_address(
3916 lib_handle, layer_prop->functions.str_gdpa);
3917 if (!fpGDPA) {
3918 loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
3919 "Failed to find vkGetDeviceProcAddr in layer %s",
3920 layer_prop->lib_name);
3921 continue;
3922 }
3923 }
3924
3925 layer_device_link_info[activated_layers].pNext =
3926 chain_info.u.pLayerInfo;
3927 layer_device_link_info[activated_layers]
3928 .pfnNextGetInstanceProcAddr = nextGIPA;
3929 layer_device_link_info[activated_layers].pfnNextGetDeviceProcAddr =
3930 nextGDPA;
3931 chain_info.u.pLayerInfo = &layer_device_link_info[activated_layers];
3932 nextGIPA = fpGIPA;
3933 nextGDPA = fpGDPA;
3934
3935 loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0,
3936 "Insert device layer %s (%s)",
3937 layer_prop->info.layerName, layer_prop->lib_name);
3938
3939 activated_layers++;
3940 }
3941 }
3942
3943 VkDevice created_device = (VkDevice)dev;
3944 PFN_vkCreateDevice fpCreateDevice =
3945 (PFN_vkCreateDevice)nextGIPA(inst->instance, "vkCreateDevice");
3946 if (fpCreateDevice) {
3947 VkLayerDeviceCreateInfo create_info_disp;
3948
3949 create_info_disp.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO;
3950 create_info_disp.function = VK_LOADER_DATA_CALLBACK;
3951
3952 create_info_disp.u.pfnSetDeviceLoaderData = vkSetDeviceDispatch;
3953
3954 create_info_disp.pNext = loader_create_info.pNext;
3955 loader_create_info.pNext = &create_info_disp;
3956 res = fpCreateDevice(pd->phys_dev, &loader_create_info, pAllocator,
3957 &created_device);
3958 if (res != VK_SUCCESS) {
3959 return res;
3960 }
3961 dev->device = created_device;
3962 } else {
3963 // Couldn't find CreateDevice function!
3964 return VK_ERROR_INITIALIZATION_FAILED;
3965 }
3966
3967 /* Initialize device dispatch table */
3968 loader_init_device_dispatch_table(&dev->loader_dispatch, nextGDPA,
3969 dev->device);
3970
3971 return res;
3972 }
3973
3974 VkResult loader_validate_layers(const struct loader_instance *inst,
3975 const uint32_t layer_count,
3976 const char *const *ppEnabledLayerNames,
3977 const struct loader_layer_list *list) {
3978 struct loader_layer_properties *prop;
3979
3980 for (uint32_t i = 0; i < layer_count; i++) {
3981 VkStringErrorFlags result =
3982 vk_string_validate(MaxLoaderStringLength, ppEnabledLayerNames[i]);
3983 if (result != VK_STRING_ERROR_NONE) {
3984 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
3985 "Loader: Device ppEnabledLayerNames contains string "
3986 "that is too long or is badly formed");
3987 return VK_ERROR_LAYER_NOT_PRESENT;
3988 }
3989
3990 prop = loader_get_layer_property(ppEnabledLayerNames[i], list);
3991 if (!prop) {
3992 return VK_ERROR_LAYER_NOT_PRESENT;
3993 }
3994 }
3995 return VK_SUCCESS;
3996 }
3997
3998 VkResult loader_validate_instance_extensions(
3999 const struct loader_instance *inst,
4000 const struct loader_extension_list *icd_exts,
4001 const struct loader_layer_list *instance_layer,
4002 const VkInstanceCreateInfo *pCreateInfo) {
4003
4004 VkExtensionProperties *extension_prop;
4005 struct loader_layer_properties *layer_prop;
4006
4007 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4008 VkStringErrorFlags result = vk_string_validate(
4009 MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]);
4010 if (result != VK_STRING_ERROR_NONE) {
4011 loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
4012 "Loader: Instance ppEnabledExtensionNames contains "
4013 "string that is too long or is badly formed");
4014 return VK_ERROR_EXTENSION_NOT_PRESENT;
4015 }
4016
4017 extension_prop = get_extension_property(
4018 pCreateInfo->ppEnabledExtensionNames[i], icd_exts);
4019
4020 if (extension_prop) {
4021 continue;
4022 }
4023
4024 extension_prop = NULL;
4025
4026 /* Not in global list, search layer extension lists */
4027 for (uint32_t j = 0; j < pCreateInfo->enabledLayerCount; j++) {
4028 layer_prop = loader_get_layer_property(
4029 pCreateInfo->ppEnabledLayerNames[j], instance_layer);
4030 if (!layer_prop) {
4031 /* Should NOT get here, loader_validate_layers
4032 * should have already filtered this case out.
4033 */
4034 continue;
4035 }
4036
4037 extension_prop =
4038 get_extension_property(pCreateInfo->ppEnabledExtensionNames[i],
4039 &layer_prop->instance_extension_list);
4040 if (extension_prop) {
4041 /* Found the extension in one of the layers enabled by the app.
4042 */
4043 break;
4044 }
4045 }
4046
4047 if (!extension_prop) {
4048 /* Didn't find extension name in any of the global layers, error out
4049 */
4050 return VK_ERROR_EXTENSION_NOT_PRESENT;
4051 }
4052 }
4053 return VK_SUCCESS;
4054 }
4055
4056 VkResult loader_validate_device_extensions(
4057 struct loader_physical_device_tramp *phys_dev,
4058 const struct loader_layer_list *activated_device_layers,
4059 const struct loader_extension_list *icd_exts,
4060 const VkDeviceCreateInfo *pCreateInfo) {
4061 VkExtensionProperties *extension_prop;
4062 struct loader_layer_properties *layer_prop;
4063
4064 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4065
4066 VkStringErrorFlags result = vk_string_validate(
4067 MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]);
4068 if (result != VK_STRING_ERROR_NONE) {
4069 loader_log(phys_dev->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4070 0, "Loader: Device ppEnabledExtensionNames contains "
4071 "string that is too long or is badly formed");
4072 return VK_ERROR_EXTENSION_NOT_PRESENT;
4073 }
4074
4075 const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i];
4076 extension_prop = get_extension_property(extension_name, icd_exts);
4077
4078 if (extension_prop) {
4079 continue;
4080 }
4081
4082 /* Not in global list, search activated layer extension lists */
4083 for (uint32_t j = 0; j < activated_device_layers->count; j++) {
4084 layer_prop = &activated_device_layers->list[j];
4085
4086 extension_prop = get_dev_extension_property(
4087 extension_name, &layer_prop->device_extension_list);
4088 if (extension_prop) {
4089 /* Found the extension in one of the layers enabled by the app.
4090 */
4091 break;
4092 }
4093 }
4094
4095 if (!extension_prop) {
4096 /* Didn't find extension name in any of the device layers, error out
4097 */
4098 return VK_ERROR_EXTENSION_NOT_PRESENT;
4099 }
4100 }
4101 return VK_SUCCESS;
4102 }
4103
4104 /**
4105 * Terminator functions for the Instance chain
4106 * All named terminator_<Vulakn API name>
4107 */
4108 VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateInstance(
4109 const VkInstanceCreateInfo *pCreateInfo,
4110 const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4111 struct loader_icd *icd;
4112 VkExtensionProperties *prop;
4113 char **filtered_extension_names = NULL;
4114 VkInstanceCreateInfo icd_create_info;
4115 VkResult res = VK_SUCCESS;
4116
4117 struct loader_instance *ptr_instance = (struct loader_instance *)*pInstance;
4118 memcpy(&icd_create_info, pCreateInfo, sizeof(icd_create_info));
4119
4120 icd_create_info.enabledLayerCount = 0;
4121 icd_create_info.ppEnabledLayerNames = NULL;
4122
4123 /*
4124 * NOTE: Need to filter the extensions to only those
4125 * supported by the ICD.
4126 * No ICD will advertise support for layers. An ICD
4127 * library could support a layer, but it would be
4128 * independent of the actual ICD, just in the same library.
4129 */
4130 filtered_extension_names =
4131 loader_stack_alloc(pCreateInfo->enabledExtensionCount * sizeof(char *));
4132 if (!filtered_extension_names) {
4133 res = VK_ERROR_OUT_OF_HOST_MEMORY;
4134 goto out;
4135 }
4136 icd_create_info.ppEnabledExtensionNames =
4137 (const char *const *)filtered_extension_names;
4138
4139 for (uint32_t i = 0; i < ptr_instance->icd_libs.count; i++) {
4140 icd = loader_icd_add(ptr_instance, &ptr_instance->icd_libs.list[i]);
4141 if (NULL == icd) {
4142 res = VK_ERROR_OUT_OF_HOST_MEMORY;
4143 goto out;
4144 }
4145 icd_create_info.enabledExtensionCount = 0;
4146 struct loader_extension_list icd_exts;
4147
4148 loader_log(ptr_instance, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0,
4149 "Build ICD instance extension list");
4150 // traverse scanned icd list adding non-duplicate extensions to the
4151 // list
4152 res = loader_init_generic_list(ptr_instance,
4153 (struct loader_generic_list *)&icd_exts,
4154 sizeof(VkExtensionProperties));
4155 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
4156 // If out of memory, bail immediately.
4157 goto out;
4158 } else if (VK_SUCCESS != res) {
4159 // Something bad happened with this ICD, so free it and try the
4160 // next.
4161 ptr_instance->icds = icd->next;
4162 icd->next = NULL;
4163 loader_icd_destroy(ptr_instance, icd, pAllocator);
4164 continue;
4165 }
4166
4167 res = loader_add_instance_extensions(
4168 ptr_instance,
4169 icd->this_icd_lib->EnumerateInstanceExtensionProperties,
4170 icd->this_icd_lib->lib_name, &icd_exts);
4171 if (VK_SUCCESS != res) {
4172 loader_destroy_generic_list(ptr_instance,
4173 (struct loader_generic_list *)&icd_exts);
4174 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
4175 // If out of memory, bail immediately.
4176 goto out;
4177 } else {
4178 // Something bad happened with this ICD, so free it and try
4179 // the next.
4180 ptr_instance->icds = icd->next;
4181 icd->next = NULL;
4182 loader_icd_destroy(ptr_instance, icd, pAllocator);
4183 continue;
4184 }
4185 }
4186
4187 for (uint32_t j = 0; j < pCreateInfo->enabledExtensionCount; j++) {
4188 prop = get_extension_property(
4189 pCreateInfo->ppEnabledExtensionNames[j], &icd_exts);
4190 if (prop) {
4191 filtered_extension_names[icd_create_info
4192 .enabledExtensionCount] =
4193 (char *)pCreateInfo->ppEnabledExtensionNames[j];
4194 icd_create_info.enabledExtensionCount++;
4195 }
4196 }
4197
4198 loader_destroy_generic_list(ptr_instance,
4199 (struct loader_generic_list *)&icd_exts);
4200
4201 res = ptr_instance->icd_libs.list[i].CreateInstance(
4202 &icd_create_info, pAllocator, &(icd->instance));
4203 if (VK_ERROR_OUT_OF_HOST_MEMORY == res) {
4204 // If out of memory, bail immediately.
4205 goto out;
4206 } else if (VK_SUCCESS != res) {
4207 loader_log(ptr_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
4208 "ICD ignored: failed to CreateInstance in ICD %d", i);
4209 ptr_instance->icds = icd->next;
4210 icd->next = NULL;
4211 loader_icd_destroy(ptr_instance, icd, pAllocator);
4212 continue;
4213 }
4214
4215 if (!loader_icd_init_entrys(
4216 icd, icd->instance,
4217 ptr_instance->icd_libs.list[i].GetInstanceProcAddr)) {
4218 loader_log(ptr_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
4219 "ICD ignored: failed to CreateInstance and find "
4220 "entrypoints with ICD");
4221 continue;
4222 }
4223 }
4224
4225 /*
4226 * If no ICDs were added to instance list and res is unchanged
4227 * from it's initial value, the loader was unable to find
4228 * a suitable ICD.
4229 */
4230 if (VK_SUCCESS == res && ptr_instance->icds == NULL) {
4231 res = VK_ERROR_INCOMPATIBLE_DRIVER;
4232 }
4233
4234 out:
4235
4236 if (VK_SUCCESS != res) {
4237 while (NULL != ptr_instance->icds) {
4238 icd = ptr_instance->icds;
4239 ptr_instance->icds = icd->next;
4240 if (NULL != icd->instance) {
4241 icd->DestroyInstance(icd->instance, pAllocator);
4242 }
4243 loader_icd_destroy(ptr_instance, icd, pAllocator);
4244 }
4245 }
4246
4247 return res;
4248 }
4249
4250 VKAPI_ATTR void VKAPI_CALL terminator_DestroyInstance(
4251 VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4252 struct loader_instance *ptr_instance = loader_instance(instance);
4253 struct loader_icd *icds = ptr_instance->icds;
4254 struct loader_icd *next_icd;
4255
4256 // Remove this instance from the list of instances:
4257 struct loader_instance *prev = NULL;
4258 struct loader_instance *next = loader.instances;
4259 while (next != NULL) {
4260 if (next == ptr_instance) {
4261 // Remove this instance from the list:
4262 if (prev)
4263 prev->next = next->next;
4264 else
4265 loader.instances = next->next;
4266 break;
4267 }
4268 prev = next;
4269 next = next->next;
4270 }
4271
4272 while (icds) {
4273 if (icds->instance) {
4274 icds->DestroyInstance(icds->instance, pAllocator);
4275 }
4276 next_icd = icds->next;
4277 icds->instance = VK_NULL_HANDLE;
4278 loader_icd_destroy(ptr_instance, icds, pAllocator);
4279
4280 icds = next_icd;
4281 }
4282
4283 loader_delete_layer_properties(ptr_instance,
4284 &ptr_instance->instance_layer_list);
4285 loader_scanned_icd_clear(ptr_instance, &ptr_instance->icd_libs);
4286 loader_destroy_generic_list(
4287 ptr_instance, (struct loader_generic_list *)&ptr_instance->ext_list);
4288 if (ptr_instance->phys_devs_term)
4289 loader_instance_heap_free(ptr_instance, ptr_instance->phys_devs_term);
4290 loader_free_dev_ext_table(ptr_instance);
4291 }
4292
4293 VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDevice(
4294 VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo,
4295 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4296 VkResult res = VK_SUCCESS;
4297 struct loader_physical_device *phys_dev;
4298 phys_dev = (struct loader_physical_device *)physicalDevice;
4299
4300 struct loader_device *dev = (struct loader_device *)*pDevice;
4301 PFN_vkCreateDevice fpCreateDevice = phys_dev->this_icd->CreateDevice;
4302 struct loader_extension_list icd_exts;
4303
4304 icd_exts.list = NULL;
4305
4306 if (fpCreateDevice == NULL) {
4307 loader_log(phys_dev->this_icd->this_instance,
4308 VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
4309 "No vkCreateDevice command exposed by ICD %s",
4310 phys_dev->this_icd->this_icd_lib->lib_name);
4311 res = VK_ERROR_INITIALIZATION_FAILED;
4312 goto out;
4313 }
4314
4315 VkDeviceCreateInfo localCreateInfo;
4316 memcpy(&localCreateInfo, pCreateInfo, sizeof(localCreateInfo));
4317
4318 /*
4319 * NOTE: Need to filter the extensions to only those
4320 * supported by the ICD.
4321 * No ICD will advertise support for layers. An ICD
4322 * library could support a layer, but it would be
4323 * independent of the actual ICD, just in the same library.
4324 */
4325 char **filtered_extension_names = NULL;
4326 filtered_extension_names =
4327 loader_stack_alloc(pCreateInfo->enabledExtensionCount * sizeof(char *));
4328 if (!filtered_extension_names) {
4329 return VK_ERROR_OUT_OF_HOST_MEMORY;
4330 }
4331
4332 localCreateInfo.enabledLayerCount = 0;
4333 localCreateInfo.ppEnabledLayerNames = NULL;
4334
4335 localCreateInfo.enabledExtensionCount = 0;
4336 localCreateInfo.ppEnabledExtensionNames =
4337 (const char *const *)filtered_extension_names;
4338
4339 /* Get the physical device (ICD) extensions */
4340 res = loader_init_generic_list(phys_dev->this_icd->this_instance,
4341 (struct loader_generic_list *)&icd_exts,
4342 sizeof(VkExtensionProperties));
4343 if (VK_SUCCESS != res) {
4344 goto out;
4345 }
4346
4347 res = loader_add_device_extensions(
4348 phys_dev->this_icd->this_instance,
4349 phys_dev->this_icd->EnumerateDeviceExtensionProperties,
4350 phys_dev->phys_dev, phys_dev->this_icd->this_icd_lib->lib_name,
4351 &icd_exts);
4352 if (res != VK_SUCCESS) {
4353 goto out;
4354 }
4355
4356 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4357 const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i];
4358 VkExtensionProperties *prop =
4359 get_extension_property(extension_name, &icd_exts);
4360 if (prop) {
4361 filtered_extension_names[localCreateInfo.enabledExtensionCount] =
4362 (char *)extension_name;
4363 localCreateInfo.enabledExtensionCount++;
4364 } else {
4365 loader_log(phys_dev->this_icd->this_instance,
4366 VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
4367 "vkCreateDevice extension %s not available for "
4368 "devices associated with ICD %s",
4369 extension_name,
4370 phys_dev->this_icd->this_icd_lib->lib_name);
4371 }
4372 }
4373
4374 res = fpCreateDevice(phys_dev->phys_dev, &localCreateInfo, pAllocator,
4375 &dev->device);
4376 if (res != VK_SUCCESS) {
4377 loader_log(phys_dev->this_icd->this_instance,
4378 VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
4379 "vkCreateDevice call failed in ICD %s",
4380 phys_dev->this_icd->this_icd_lib->lib_name);
4381 goto out;
4382 }
4383
4384 *pDevice = dev->device;
4385 loader_add_logical_device(phys_dev->this_icd->this_instance,
4386 phys_dev->this_icd, dev);
4387
4388 /* Init dispatch pointer in new device object */
4389 loader_init_dispatch(*pDevice, &dev->loader_dispatch);
4390
4391 out:
4392 if (NULL != icd_exts.list) {
4393 loader_destroy_generic_list(phys_dev->this_icd->this_instance,
4394 (struct loader_generic_list *)&icd_exts);
4395 }
4396
4397 return res;
4398 }
4399
4400 VKAPI_ATTR VkResult VKAPI_CALL
4401 terminator_EnumeratePhysicalDevices(VkInstance instance,
4402 uint32_t *pPhysicalDeviceCount,
4403 VkPhysicalDevice *pPhysicalDevices) {
4404 uint32_t i;
4405 struct loader_instance *inst = (struct loader_instance *)instance;
4406 VkResult res = VK_SUCCESS;
4407
4408 struct loader_icd *icd;
4409 struct loader_phys_dev_per_icd *phys_devs;
4410
4411 inst->total_gpu_count = 0;
4412 phys_devs = (struct loader_phys_dev_per_icd *)loader_stack_alloc(
4413 sizeof(struct loader_phys_dev_per_icd) * inst->total_icd_count);
4414 if (!phys_devs)
4415 return VK_ERROR_OUT_OF_HOST_MEMORY;
4416
4417 icd = inst->icds;
4418 for (i = 0; i < inst->total_icd_count; i++) {
4419 assert(icd);
4420 res = icd->EnumeratePhysicalDevices(icd->instance, &phys_devs[i].count,
4421 NULL);
4422 if (res != VK_SUCCESS)
4423 return res;
4424 icd = icd->next;
4425 }
4426
4427 icd = inst->icds;
4428 for (i = 0; i < inst->total_icd_count; i++) {
4429 assert(icd);
4430 phys_devs[i].phys_devs = (VkPhysicalDevice *)loader_stack_alloc(
4431 phys_devs[i].count * sizeof(VkPhysicalDevice));
4432 if (!phys_devs[i].phys_devs) {
4433 return VK_ERROR_OUT_OF_HOST_MEMORY;
4434 }
4435 res = icd->EnumeratePhysicalDevices(
4436 icd->instance, &(phys_devs[i].count), phys_devs[i].phys_devs);
4437 if ((res == VK_SUCCESS)) {
4438 inst->total_gpu_count += phys_devs[i].count;
4439 } else {
4440 return res;
4441 }
4442 phys_devs[i].this_icd = icd;
4443 icd = icd->next;
4444 }
4445
4446 uint32_t copy_count = inst->total_gpu_count;
4447
4448 if (NULL != pPhysicalDevices) {
4449 // Initialize the output pPhysicalDevices with wrapped loader
4450 // terminator physicalDevice objects; save this list of
4451 // wrapped objects in instance struct for later cleanup and
4452 // use by trampoline code
4453 uint32_t j, idx = 0;
4454
4455 if (copy_count > *pPhysicalDeviceCount) {
4456 copy_count = *pPhysicalDeviceCount;
4457 }
4458
4459 if (inst->phys_devs_term) {
4460 loader_instance_heap_free(inst, inst->phys_devs_term);
4461 inst->phys_devs_term = NULL;
4462 }
4463
4464 if (inst->total_gpu_count > 0) {
4465 inst->phys_devs_term = loader_instance_heap_alloc(
4466 inst, sizeof(struct loader_physical_device) * inst->total_gpu_count,
4467 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
4468 if (!inst->phys_devs_term) {
4469 return VK_ERROR_OUT_OF_HOST_MEMORY;
4470 }
4471 }
4472
4473 for (i = 0; idx < inst->total_gpu_count && i < inst->total_icd_count; i++) {
4474 for (j = 0; j < phys_devs[i].count && idx < inst->total_gpu_count; j++) {
4475 loader_set_dispatch((void *)&inst->phys_devs_term[idx],
4476 inst->disp);
4477 inst->phys_devs_term[idx].this_icd = phys_devs[i].this_icd;
4478 inst->phys_devs_term[idx].icd_index = (uint8_t)(i);
4479 inst->phys_devs_term[idx].phys_dev = phys_devs[i].phys_devs[j];
4480 if (idx < copy_count) {
4481 pPhysicalDevices[idx] =
4482 (VkPhysicalDevice)&inst->phys_devs_term[idx];
4483 }
4484 idx++;
4485 }
4486 }
4487
4488 if (copy_count < inst->total_gpu_count) {
4489 res = VK_INCOMPLETE;
4490 }
4491 }
4492
4493 *pPhysicalDeviceCount = copy_count;
4494
4495 return res;
4496 }
4497
4498 VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceProperties(
4499 VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties *pProperties) {
4500 struct loader_physical_device *phys_dev =
4501 (struct loader_physical_device *)physicalDevice;
4502 struct loader_icd *icd = phys_dev->this_icd;
4503
4504 if (icd->GetPhysicalDeviceProperties)
4505 icd->GetPhysicalDeviceProperties(phys_dev->phys_dev, pProperties);
4506 }
4507
4508 VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceQueueFamilyProperties(
4509 VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount,
4510 VkQueueFamilyProperties *pProperties) {
4511 struct loader_physical_device *phys_dev =
4512 (struct loader_physical_device *)physicalDevice;
4513 struct loader_icd *icd = phys_dev->this_icd;
4514
4515 if (icd->GetPhysicalDeviceQueueFamilyProperties)
4516 icd->GetPhysicalDeviceQueueFamilyProperties(
4517 phys_dev->phys_dev, pQueueFamilyPropertyCount, pProperties);
4518 }
4519
4520 VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceMemoryProperties(
4521 VkPhysicalDevice physicalDevice,
4522 VkPhysicalDeviceMemoryProperties *pProperties) {
4523 struct loader_physical_device *phys_dev =
4524 (struct loader_physical_device *)physicalDevice;
4525 struct loader_icd *icd = phys_dev->this_icd;
4526
4527 if (icd->GetPhysicalDeviceMemoryProperties)
4528 icd->GetPhysicalDeviceMemoryProperties(phys_dev->phys_dev, pProperties);
4529 }
4530
4531 VKAPI_ATTR void VKAPI_CALL
4532 terminator_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
4533 VkPhysicalDeviceFeatures *pFeatures) {
4534 struct loader_physical_device *phys_dev =
4535 (struct loader_physical_device *)physicalDevice;
4536 struct loader_icd *icd = phys_dev->this_icd;
4537
4538 if (icd->GetPhysicalDeviceFeatures)
4539 icd->GetPhysicalDeviceFeatures(phys_dev->phys_dev, pFeatures);
4540 }
4541
4542 VKAPI_ATTR void VKAPI_CALL
4543 terminator_GetPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice,
4544 VkFormat format,
4545 VkFormatProperties *pFormatInfo) {
4546 struct loader_physical_device *phys_dev =
4547 (struct loader_physical_device *)physicalDevice;
4548 struct loader_icd *icd = phys_dev->this_icd;
4549
4550 if (icd->GetPhysicalDeviceFormatProperties)
4551 icd->GetPhysicalDeviceFormatProperties(phys_dev->phys_dev, format,
4552 pFormatInfo);
4553 }
4554
4555 VKAPI_ATTR VkResult VKAPI_CALL
4556 terminator_GetPhysicalDeviceImageFormatProperties(
4557 VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type,
4558 VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags,
4559 VkImageFormatProperties *pImageFormatProperties) {
4560 struct loader_physical_device *phys_dev =
4561 (struct loader_physical_device *)physicalDevice;
4562 struct loader_icd *icd = phys_dev->this_icd;
4563
4564 if (!icd->GetPhysicalDeviceImageFormatProperties)
4565 return VK_ERROR_INITIALIZATION_FAILED;
4566
4567 return icd->GetPhysicalDeviceImageFormatProperties(
4568 phys_dev->phys_dev, format, type, tiling, usage, flags,
4569 pImageFormatProperties);
4570 }
4571
4572 VKAPI_ATTR void VKAPI_CALL
4573 terminator_GetPhysicalDeviceSparseImageFormatProperties(
4574 VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type,
4575 VkSampleCountFlagBits samples, VkImageUsageFlags usage,
4576 VkImageTiling tiling, uint32_t *pNumProperties,
4577 VkSparseImageFormatProperties *pProperties) {
4578 struct loader_physical_device *phys_dev =
4579 (struct loader_physical_device *)physicalDevice;
4580 struct loader_icd *icd = phys_dev->this_icd;
4581
4582 if (icd->GetPhysicalDeviceSparseImageFormatProperties)
4583 icd->GetPhysicalDeviceSparseImageFormatProperties(
4584 phys_dev->phys_dev, format, type, samples, usage, tiling,
4585 pNumProperties, pProperties);
4586 }
4587
4588 VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateDeviceExtensionProperties(
4589 VkPhysicalDevice physicalDevice, const char *pLayerName,
4590 uint32_t *pPropertyCount, VkExtensionProperties *pProperties) {
4591 struct loader_physical_device *phys_dev;
4592
4593 struct loader_layer_list implicit_layer_list = {0};
4594 struct loader_extension_list all_exts = {0};
4595 struct loader_extension_list icd_exts = {0};
4596
4597 assert(pLayerName == NULL || strlen(pLayerName) == 0);
4598
4599 /* Any layer or trampoline wrapping should be removed at this point in time
4600 * can just cast to the expected type for VkPhysicalDevice. */
4601 phys_dev = (struct loader_physical_device *)physicalDevice;
4602
4603 /* this case is during the call down the instance chain with pLayerName
4604 * == NULL*/
4605 struct loader_icd *icd = phys_dev->this_icd;
4606 uint32_t icd_ext_count = *pPropertyCount;
4607 VkResult res;
4608
4609 /* get device extensions */
4610 res = icd->EnumerateDeviceExtensionProperties(phys_dev->phys_dev, NULL,
4611 &icd_ext_count, pProperties);
4612 if (res != VK_SUCCESS) {
4613 goto out;
4614 }
4615
4616 if (!loader_init_layer_list(icd->this_instance, &implicit_layer_list)) {
4617 res = VK_ERROR_OUT_OF_HOST_MEMORY;
4618 goto out;
4619 }
4620
4621 loader_add_layer_implicit(
4622 icd->this_instance, VK_LAYER_TYPE_INSTANCE_IMPLICIT,
4623 &implicit_layer_list, &icd->this_instance->instance_layer_list);
4624 /* we need to determine which implicit layers are active,
4625 * and then add their extensions. This can't be cached as
4626 * it depends on results of environment variables (which can change).
4627 */
4628 if (pProperties != NULL) {
4629 /* initialize dev_extension list within the physicalDevice object */
4630 res = loader_init_device_extensions(icd->this_instance, phys_dev,
4631 icd_ext_count, pProperties,
4632 &icd_exts);
4633 if (res != VK_SUCCESS) {
4634 goto out;
4635 }
4636
4637 /* we need to determine which implicit layers are active,
4638 * and then add their extensions. This can't be cached as
4639 * it depends on results of environment variables (which can
4640 * change).
4641 */
4642 res = loader_add_to_ext_list(icd->this_instance, &all_exts,
4643 icd_exts.count, icd_exts.list);
4644 if (res != VK_SUCCESS) {
4645 goto out;
4646 }
4647
4648 loader_add_layer_implicit(
4649 icd->this_instance, VK_LAYER_TYPE_INSTANCE_IMPLICIT,
4650 &implicit_layer_list, &icd->this_instance->instance_layer_list);
4651
4652 for (uint32_t i = 0; i < implicit_layer_list.count; i++) {
4653 for (uint32_t j = 0;
4654 j < implicit_layer_list.list[i].device_extension_list.count;
4655 j++) {
4656 res = loader_add_to_ext_list(icd->this_instance, &all_exts, 1,
4657 &implicit_layer_list.list[i]
4658 .device_extension_list.list[j]
4659 .props);
4660 if (res != VK_SUCCESS) {
4661 goto out;
4662 }
4663 }
4664 }
4665 uint32_t capacity = *pPropertyCount;
4666 VkExtensionProperties *props = pProperties;
4667
4668 for (uint32_t i = 0; i < all_exts.count && i < capacity; i++) {
4669 props[i] = all_exts.list[i];
4670 }
4671 /* wasn't enough space for the extensions, we did partial copy now
4672 * return VK_INCOMPLETE */
4673 if (capacity < all_exts.count) {
4674 res = VK_INCOMPLETE;
4675 } else {
4676 *pPropertyCount = all_exts.count;
4677 }
4678 } else {
4679 /* just return the count; need to add in the count of implicit layer
4680 * extensions
4681 * don't worry about duplicates being added in the count */
4682 *pPropertyCount = icd_ext_count;
4683
4684 for (uint32_t i = 0; i < implicit_layer_list.count; i++) {
4685 *pPropertyCount +=
4686 implicit_layer_list.list[i].device_extension_list.count;
4687 }
4688 res = VK_SUCCESS;
4689 }
4690
4691 out:
4692
4693 if (NULL != implicit_layer_list.list) {
4694 loader_destroy_generic_list(
4695 icd->this_instance,
4696 (struct loader_generic_list *)&implicit_layer_list);
4697 }
4698 if (NULL != all_exts.list) {
4699 loader_destroy_generic_list(icd->this_instance,
4700 (struct loader_generic_list *)&all_exts);
4701 }
4702 if (NULL != icd_exts.list) {
4703 loader_destroy_generic_list(icd->this_instance,
4704 (struct loader_generic_list *)&icd_exts);
4705 }
4706
4707 return res;
4708 }
4709
4710 VKAPI_ATTR VkResult VKAPI_CALL
4711 terminator_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
4712 uint32_t *pPropertyCount,
4713 VkLayerProperties *pProperties) {
4714
4715 // should never get here this call isn't dispatched down the chain
4716 return VK_ERROR_INITIALIZATION_FAILED;
4717 }
4718
4719 VkStringErrorFlags vk_string_validate(const int max_length, const char *utf8) {
4720 VkStringErrorFlags result = VK_STRING_ERROR_NONE;
4721 int num_char_bytes = 0;
4722 int i, j;
4723
4724 for (i = 0; i < max_length; i++) {
4725 if (utf8[i] == 0) {
4726 break;
4727 } else if ((utf8[i] >= 0x20) && (utf8[i] < 0x7f)) {
4728 num_char_bytes = 0;
4729 } else if ((utf8[i] & UTF8_ONE_BYTE_MASK) == UTF8_ONE_BYTE_CODE) {
4730 num_char_bytes = 1;
4731 } else if ((utf8[i] & UTF8_TWO_BYTE_MASK) == UTF8_TWO_BYTE_CODE) {
4732 num_char_bytes = 2;
4733 } else if ((utf8[i] & UTF8_THREE_BYTE_MASK) == UTF8_THREE_BYTE_CODE) {
4734 num_char_bytes = 3;
4735 } else {
4736 result = VK_STRING_ERROR_BAD_DATA;
4737 }
4738
4739 // Validate the following num_char_bytes of data
4740 for (j = 0; (j < num_char_bytes) && (i < max_length); j++) {
4741 if (++i == max_length) {
4742 result |= VK_STRING_ERROR_LENGTH;
4743 break;
4744 }
4745 if ((utf8[i] & UTF8_DATA_BYTE_MASK) != UTF8_DATA_BYTE_CODE) {
4746 result |= VK_STRING_ERROR_BAD_DATA;
4747 }
4748 }
4749 }
4750 return result;
4751 }
4752