• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2017 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/vk/VulkanInterface.h"
9 #include "tools/gpu/vk/VkTestMemoryAllocator.h"
10 #include "tools/gpu/vk/VkTestUtils.h"
11 
12 #ifdef SK_VULKAN
13 
14 #ifndef SK_GPU_TOOLS_VK_LIBRARY_NAME
15     #if defined _WIN32
16         #define SK_GPU_TOOLS_VK_LIBRARY_NAME vulkan-1.dll
17     #elif defined SK_BUILD_FOR_MAC
18         #define SK_GPU_TOOLS_VK_LIBRARY_NAME libvk_swiftshader.dylib
19     #else
20         #define SK_GPU_TOOLS_VK_LIBRARY_NAME        libvulkan.so
21         #define SK_GPU_TOOLS_VK_LIBRARY_NAME_BACKUP libvulkan.so.1
22     #endif
23 #endif
24 
25 #define STRINGIFY2(S) #S
26 #define STRINGIFY(S) STRINGIFY2(S)
27 
28 #include <algorithm>
29 
30 #if defined(__GLIBC__)
31 #include <execinfo.h>
32 #endif
33 #include "include/gpu/vk/VulkanBackendContext.h"
34 #include "include/gpu/vk/VulkanExtensions.h"
35 #include "src/base/SkAutoMalloc.h"
36 #include "src/ports/SkOSLibrary.h"
37 
38 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
39 #include <sanitizer/lsan_interface.h>
40 #endif
41 
42 using namespace skia_private;
43 
44 namespace sk_gpu_test {
45 
LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr * instProc)46 bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc) {
47     static void* vkLib = nullptr;
48     static PFN_vkGetInstanceProcAddr localInstProc = nullptr;
49     if (!vkLib) {
50         vkLib = SkLoadDynamicLibrary(STRINGIFY(SK_GPU_TOOLS_VK_LIBRARY_NAME));
51         if (!vkLib) {
52             // vulkaninfo tries to load the library from two places, so we do as well
53             // https://github.com/KhronosGroup/Vulkan-Tools/blob/078d44e4664b7efa0b6c96ebced1995c4425d57a/vulkaninfo/vulkaninfo.h#L249
54 #ifdef SK_GPU_TOOLS_VK_LIBRARY_NAME_BACKUP
55             vkLib = SkLoadDynamicLibrary(STRINGIFY(SK_GPU_TOOLS_VK_LIBRARY_NAME_BACKUP));
56             if (!vkLib) {
57                 return false;
58             }
59 #else
60             return false;
61 #endif
62         }
63         localInstProc = (PFN_vkGetInstanceProcAddr) SkGetProcedureAddress(vkLib,
64                                                                           "vkGetInstanceProcAddr");
65     }
66     if (!localInstProc) {
67         return false;
68     }
69     *instProc = localInstProc;
70     return true;
71 }
72 
73 ////////////////////////////////////////////////////////////////////////////////
74 // Helper code to set up Vulkan context objects
75 
76 #ifdef SK_ENABLE_VK_LAYERS
77 const char* kDebugLayerNames[] = {
78     // single merged layer
79     "VK_LAYER_KHRONOS_validation",
80     // not included in standard_validation
81     //"VK_LAYER_LUNARG_api_dump",
82     //"VK_LAYER_LUNARG_vktrace",
83     //"VK_LAYER_LUNARG_screenshot",
84 };
85 
remove_patch_version(uint32_t specVersion)86 static uint32_t remove_patch_version(uint32_t specVersion) {
87     return (specVersion >> 12) << 12;
88 }
89 
90 // Returns the index into layers array for the layer we want. Returns -1 if not supported.
should_include_debug_layer(const char * layerName,uint32_t layerCount,VkLayerProperties * layers,uint32_t version)91 static int should_include_debug_layer(const char* layerName,
92                                        uint32_t layerCount, VkLayerProperties* layers,
93                                        uint32_t version) {
94     for (uint32_t i = 0; i < layerCount; ++i) {
95         if (!strcmp(layerName, layers[i].layerName)) {
96             // Since the layers intercept the vulkan calls and forward them on, we need to make sure
97             // layer was written against a version that isn't older than the version of Vulkan we're
98             // using so that it has all the api entry points.
99             if (version <= remove_patch_version(layers[i].specVersion)) {
100                 return i;
101             }
102             return -1;
103         }
104 
105     }
106     return -1;
107 }
108 
print_backtrace()109 static void print_backtrace() {
110 #if defined(__GLIBC__)
111     void* stack[64];
112     int count = backtrace(stack, std::size(stack));
113     backtrace_symbols_fd(stack, count, 2);
114 #else
115     // Please add implementations for other platforms.
116 #endif
117 }
118 
DebugReportCallback(VkDebugReportFlagsEXT flags,VkDebugReportObjectTypeEXT objectType,uint64_t object,size_t location,int32_t messageCode,const char * pLayerPrefix,const char * pMessage,void * pUserData)119 VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
120     VkDebugReportFlagsEXT       flags,
121     VkDebugReportObjectTypeEXT  objectType,
122     uint64_t                    object,
123     size_t                      location,
124     int32_t                     messageCode,
125     const char*                 pLayerPrefix,
126     const char*                 pMessage,
127     void*                       pUserData) {
128     if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
129         // See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/1887
130         if (strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01521") ||
131             strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01522")) {
132             return VK_FALSE;
133         }
134         // See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/2171
135         if (strstr(pMessage, "VUID-vkCmdDraw-None-02686") ||
136             strstr(pMessage, "VUID-vkCmdDrawIndexed-None-02686")) {
137             return VK_FALSE;
138         }
139         SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
140         print_backtrace();
141         SkDEBUGFAIL("Vulkan debug layer error");
142         return VK_TRUE; // skip further layers
143     } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
144         SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
145         print_backtrace();
146     } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
147         SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
148         print_backtrace();
149     } else {
150         SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
151     }
152     return VK_FALSE;
153 }
154 #endif
155 
156 #define ACQUIRE_VK_INST_PROC_LOCAL(name, instance)                                 \
157     PFN_vk##name grVk##name =                                                      \
158         reinterpret_cast<PFN_vk##name>(getInstProc(instance, "vk" #name));         \
159     do {                                                                           \
160         if (grVk##name == nullptr) {                                               \
161             SkDebugf("Function ptr for vk%s could not be acquired\n", #name);      \
162             return false;                                                          \
163         }                                                                          \
164     } while (0)
165 
init_instance_extensions_and_layers(PFN_vkGetInstanceProcAddr getInstProc,uint32_t specVersion,TArray<VkExtensionProperties> * instanceExtensions,TArray<VkLayerProperties> * instanceLayers)166 static bool init_instance_extensions_and_layers(PFN_vkGetInstanceProcAddr getInstProc,
167                                                 uint32_t specVersion,
168                                                 TArray<VkExtensionProperties>* instanceExtensions,
169                                                 TArray<VkLayerProperties>* instanceLayers) {
170     if (getInstProc == nullptr) {
171         return false;
172     }
173 
174     ACQUIRE_VK_INST_PROC_LOCAL(EnumerateInstanceExtensionProperties, VK_NULL_HANDLE);
175     ACQUIRE_VK_INST_PROC_LOCAL(EnumerateInstanceLayerProperties, VK_NULL_HANDLE);
176 
177     VkResult res;
178     uint32_t layerCount = 0;
179 #ifdef SK_ENABLE_VK_LAYERS
180     // instance layers
181     res = grVkEnumerateInstanceLayerProperties(&layerCount, nullptr);
182     if (VK_SUCCESS != res) {
183         return false;
184     }
185     VkLayerProperties* layers = new VkLayerProperties[layerCount];
186     res = grVkEnumerateInstanceLayerProperties(&layerCount, layers);
187     if (VK_SUCCESS != res) {
188         delete[] layers;
189         return false;
190     }
191 
192     uint32_t nonPatchVersion = remove_patch_version(specVersion);
193     for (size_t i = 0; i < std::size(kDebugLayerNames); ++i) {
194         int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
195                                              nonPatchVersion);
196         if (idx != -1) {
197             instanceLayers->push_back() = layers[idx];
198         }
199     }
200     delete[] layers;
201 #endif
202 
203     // instance extensions
204     // via Vulkan implementation and implicitly enabled layers
205     {
206         uint32_t extensionCount = 0;
207         res = grVkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
208         if (VK_SUCCESS != res) {
209             return false;
210         }
211         VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
212         res = grVkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions);
213         if (VK_SUCCESS != res) {
214             delete[] extensions;
215             return false;
216         }
217         for (uint32_t i = 0; i < extensionCount; ++i) {
218             instanceExtensions->push_back() = extensions[i];
219         }
220         delete [] extensions;
221     }
222 
223     // via explicitly enabled layers
224     layerCount = instanceLayers->size();
225     for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
226         uint32_t extensionCount = 0;
227         res = grVkEnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
228                                                        &extensionCount, nullptr);
229         if (VK_SUCCESS != res) {
230             return false;
231         }
232         VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
233         res = grVkEnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
234                                                        &extensionCount, extensions);
235         if (VK_SUCCESS != res) {
236             delete[] extensions;
237             return false;
238         }
239         for (uint32_t i = 0; i < extensionCount; ++i) {
240             instanceExtensions->push_back() = extensions[i];
241         }
242         delete[] extensions;
243     }
244 
245     return true;
246 }
247 
248 #define GET_PROC_LOCAL(F, inst, device) PFN_vk ## F F = (PFN_vk ## F) getProc("vk" #F, inst, device)
249 
init_device_extensions_and_layers(const skgpu::VulkanGetProc & getProc,uint32_t specVersion,VkInstance inst,VkPhysicalDevice physDev,TArray<VkExtensionProperties> * deviceExtensions,TArray<VkLayerProperties> * deviceLayers)250 static bool init_device_extensions_and_layers(const skgpu::VulkanGetProc& getProc,
251                                               uint32_t specVersion, VkInstance inst,
252                                               VkPhysicalDevice physDev,
253                                               TArray<VkExtensionProperties>* deviceExtensions,
254                                               TArray<VkLayerProperties>* deviceLayers) {
255     if (getProc == nullptr) {
256         return false;
257     }
258 
259     GET_PROC_LOCAL(EnumerateDeviceExtensionProperties, inst, VK_NULL_HANDLE);
260     GET_PROC_LOCAL(EnumerateDeviceLayerProperties, inst, VK_NULL_HANDLE);
261 
262     if (!EnumerateDeviceExtensionProperties ||
263         !EnumerateDeviceLayerProperties) {
264         return false;
265     }
266 
267     VkResult res;
268     // device layers
269     uint32_t layerCount = 0;
270 #ifdef SK_ENABLE_VK_LAYERS
271     res = EnumerateDeviceLayerProperties(physDev, &layerCount, nullptr);
272     if (VK_SUCCESS != res) {
273         return false;
274     }
275     VkLayerProperties* layers = new VkLayerProperties[layerCount];
276     res = EnumerateDeviceLayerProperties(physDev, &layerCount, layers);
277     if (VK_SUCCESS != res) {
278         delete[] layers;
279         return false;
280     }
281 
282     uint32_t nonPatchVersion = remove_patch_version(specVersion);
283     for (size_t i = 0; i < std::size(kDebugLayerNames); ++i) {
284         int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
285                                              nonPatchVersion);
286         if (idx != -1) {
287             deviceLayers->push_back() = layers[idx];
288         }
289     }
290     delete[] layers;
291 #endif
292 
293     // device extensions
294     // via Vulkan implementation and implicitly enabled layers
295     {
296         uint32_t extensionCount = 0;
297         res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, nullptr);
298         if (VK_SUCCESS != res) {
299             return false;
300         }
301         VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
302         res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, extensions);
303         if (VK_SUCCESS != res) {
304             delete[] extensions;
305             return false;
306         }
307         for (uint32_t i = 0; i < extensionCount; ++i) {
308             deviceExtensions->push_back() = extensions[i];
309         }
310         delete[] extensions;
311     }
312 
313     // via explicitly enabled layers
314     layerCount = deviceLayers->size();
315     for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
316         uint32_t extensionCount = 0;
317         res = EnumerateDeviceExtensionProperties(physDev,
318             (*deviceLayers)[layerIndex].layerName,
319             &extensionCount, nullptr);
320         if (VK_SUCCESS != res) {
321             return false;
322         }
323         VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
324         res = EnumerateDeviceExtensionProperties(physDev,
325             (*deviceLayers)[layerIndex].layerName,
326             &extensionCount, extensions);
327         if (VK_SUCCESS != res) {
328             delete[] extensions;
329             return false;
330         }
331         for (uint32_t i = 0; i < extensionCount; ++i) {
332             deviceExtensions->push_back() = extensions[i];
333         }
334         delete[] extensions;
335     }
336 
337     return true;
338 }
339 
340 #define ACQUIRE_VK_INST_PROC_NOCHECK(name, instance) \
341     PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getInstProc(instance, "vk" #name))
342 
343 #define ACQUIRE_VK_INST_PROC(name, instance) \
344     PFN_vk##name grVk##name =                                                          \
345         reinterpret_cast<PFN_vk##name>(getInstProc(instance, "vk" #name));             \
346     do {                                                                               \
347         if (grVk##name == nullptr) {                                                   \
348             SkDebugf("Function ptr for vk%s could not be acquired\n", #name);          \
349             if (inst != VK_NULL_HANDLE) {                                              \
350                 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); \
351             }                                                                          \
352             return false;                                                              \
353         }                                                                              \
354     } while (0)
355 
356 #define ACQUIRE_VK_PROC_NOCHECK(name, instance, device) \
357     PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device))
358 
359 #define ACQUIRE_VK_PROC(name, instance, device)                                        \
360     PFN_vk##name grVk##name =                                                          \
361             reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device));     \
362     do {                                                                               \
363         if (grVk##name == nullptr) {                                                   \
364             SkDebugf("Function ptr for vk%s could not be acquired\n", #name);          \
365             if (inst != VK_NULL_HANDLE) {                                              \
366                 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); \
367             }                                                                          \
368             return false;                                                              \
369         }                                                                              \
370     } while (0)
371 
372 #define ACQUIRE_VK_PROC_LOCAL(name, instance, device)                              \
373     PFN_vk##name grVk##name =                                                      \
374             reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
375     do {                                                                           \
376         if (grVk##name == nullptr) {                                               \
377             SkDebugf("Function ptr for vk%s could not be acquired\n", #name);      \
378             return false;                                                          \
379         }                                                                          \
380     } while (0)
381 
destroy_instance(PFN_vkGetInstanceProcAddr getInstProc,VkInstance inst,VkDebugReportCallbackEXT * debugCallback,bool hasDebugExtension)382 static bool destroy_instance(PFN_vkGetInstanceProcAddr getInstProc, VkInstance inst,
383                              VkDebugReportCallbackEXT* debugCallback,
384                              bool hasDebugExtension) {
385     if (hasDebugExtension && *debugCallback != VK_NULL_HANDLE) {
386         ACQUIRE_VK_INST_PROC_LOCAL(DestroyDebugReportCallbackEXT, inst);
387         grVkDestroyDebugReportCallbackEXT(inst, *debugCallback, nullptr);
388         *debugCallback = VK_NULL_HANDLE;
389     }
390     ACQUIRE_VK_INST_PROC_LOCAL(DestroyInstance, inst);
391     grVkDestroyInstance(inst, nullptr);
392     return true;
393 }
394 
setup_features(const skgpu::VulkanGetProc & getProc,VkInstance inst,VkPhysicalDevice physDev,uint32_t physDeviceVersion,skgpu::VulkanExtensions * extensions,VkPhysicalDeviceFeatures2 * features,bool isProtected)395 static bool setup_features(const skgpu::VulkanGetProc& getProc, VkInstance inst,
396                            VkPhysicalDevice physDev, uint32_t physDeviceVersion,
397                            skgpu::VulkanExtensions* extensions, VkPhysicalDeviceFeatures2* features,
398                            bool isProtected) {
399     SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
400              extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1));
401 
402     // Setup all extension feature structs we may want to use.
403     void** tailPNext = &features->pNext;
404 
405     // If |isProtected| is given, attach that first
406     VkPhysicalDeviceProtectedMemoryFeatures* protectedMemoryFeatures = nullptr;
407     if (isProtected) {
408         SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0));
409         protectedMemoryFeatures =
410           (VkPhysicalDeviceProtectedMemoryFeatures*)sk_malloc_throw(
411               sizeof(VkPhysicalDeviceProtectedMemoryFeatures));
412         protectedMemoryFeatures->sType =
413           VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES;
414         protectedMemoryFeatures->pNext = nullptr;
415         *tailPNext = protectedMemoryFeatures;
416         tailPNext = &protectedMemoryFeatures->pNext;
417     }
418 
419     VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend = nullptr;
420     if (extensions->hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
421         blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) sk_malloc_throw(
422                 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
423         blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
424         blend->pNext = nullptr;
425         *tailPNext = blend;
426         tailPNext = &blend->pNext;
427     }
428 
429     VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature = nullptr;
430     if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
431         extensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) {
432         ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) sk_malloc_throw(
433                 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
434         ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
435         ycbcrFeature->pNext = nullptr;
436         ycbcrFeature->samplerYcbcrConversion = VK_TRUE;
437         *tailPNext = ycbcrFeature;
438         tailPNext = &ycbcrFeature->pNext;
439     }
440 
441     if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
442         ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2, inst, VK_NULL_HANDLE);
443         grVkGetPhysicalDeviceFeatures2(physDev, features);
444     } else {
445         SkASSERT(extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
446                                           1));
447         ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2KHR, inst, VK_NULL_HANDLE);
448         grVkGetPhysicalDeviceFeatures2KHR(physDev, features);
449     }
450 
451     if (isProtected) {
452         if (!protectedMemoryFeatures->protectedMemory) {
453             return false;
454         }
455     }
456     return true;
457     // If we want to disable any extension features do so here.
458 }
459 
CreateVkBackendContext(PFN_vkGetInstanceProcAddr getInstProc,skgpu::VulkanBackendContext * ctx,skgpu::VulkanExtensions * extensions,VkPhysicalDeviceFeatures2 * features,VkDebugReportCallbackEXT * debugCallback,uint32_t * presentQueueIndexPtr,const CanPresentFn & canPresent,bool isProtected)460 bool CreateVkBackendContext(PFN_vkGetInstanceProcAddr getInstProc,
461                             skgpu::VulkanBackendContext* ctx,
462                             skgpu::VulkanExtensions* extensions,
463                             VkPhysicalDeviceFeatures2* features,
464                             VkDebugReportCallbackEXT* debugCallback,
465                             uint32_t* presentQueueIndexPtr,
466                             const CanPresentFn& canPresent,
467                             bool isProtected) {
468     VkResult err;
469 
470     ACQUIRE_VK_INST_PROC_NOCHECK(EnumerateInstanceVersion, VK_NULL_HANDLE);
471     uint32_t instanceVersion = 0;
472     if (!grVkEnumerateInstanceVersion) {
473         instanceVersion = VK_MAKE_VERSION(1, 0, 0);
474     } else {
475         err = grVkEnumerateInstanceVersion(&instanceVersion);
476         if (err) {
477             SkDebugf("failed to enumerate instance version. Err: %d\n", err);
478             return false;
479         }
480     }
481     SkASSERT(instanceVersion >= VK_MAKE_VERSION(1, 0, 0));
482     if (isProtected && instanceVersion < VK_MAKE_VERSION(1, 1, 0)) {
483         SkDebugf("protected requires vk instance version 1.1\n");
484         return false;
485     }
486 
487     uint32_t apiVersion = VK_MAKE_VERSION(1, 0, 0);
488     if (instanceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
489         // If the instance version is 1.0 we must have the apiVersion also be 1.0. However, if the
490         // instance version is 1.1 or higher, we can set the apiVersion to be whatever the highest
491         // api we may use in skia (technically it can be arbitrary). So for now we set it to 1.1
492         // since that is the highest vulkan version.
493         apiVersion = VK_MAKE_VERSION(1, 1, 0);
494     }
495 
496     instanceVersion = std::min(instanceVersion, apiVersion);
497 
498     STArray<2, VkPhysicalDevice> physDevs;
499     VkDevice device;
500     VkInstance inst = VK_NULL_HANDLE;
501 
502     const VkApplicationInfo app_info = {
503         VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
504         nullptr,                            // pNext
505         "vktest",                           // pApplicationName
506         0,                                  // applicationVersion
507         "vktest",                           // pEngineName
508         0,                                  // engineVerison
509         apiVersion,                         // apiVersion
510     };
511 
512     TArray<VkLayerProperties> instanceLayers;
513     TArray<VkExtensionProperties> instanceExtensions;
514 
515     if (!init_instance_extensions_and_layers(getInstProc, instanceVersion,
516                                              &instanceExtensions,
517                                              &instanceLayers)) {
518         return false;
519     }
520 
521     TArray<const char*> instanceLayerNames;
522     TArray<const char*> instanceExtensionNames;
523     for (int i = 0; i < instanceLayers.size(); ++i) {
524         instanceLayerNames.push_back(instanceLayers[i].layerName);
525     }
526     for (int i = 0; i < instanceExtensions.size(); ++i) {
527         if (strncmp(instanceExtensions[i].extensionName, "VK_KHX", 6) != 0) {
528             instanceExtensionNames.push_back(instanceExtensions[i].extensionName);
529         }
530     }
531 
532     const VkInstanceCreateInfo instance_create = {
533         VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,    // sType
534         nullptr,                                   // pNext
535         0,                                         // flags
536         &app_info,                                 // pApplicationInfo
537         (uint32_t) instanceLayerNames.size(),      // enabledLayerNameCount
538         instanceLayerNames.begin(),                // ppEnabledLayerNames
539         (uint32_t) instanceExtensionNames.size(),  // enabledExtensionNameCount
540         instanceExtensionNames.begin(),            // ppEnabledExtensionNames
541     };
542 
543     bool hasDebugExtension = false;
544 
545     ACQUIRE_VK_INST_PROC(CreateInstance, VK_NULL_HANDLE);
546     err = grVkCreateInstance(&instance_create, nullptr, &inst);
547     if (err < 0) {
548         SkDebugf("vkCreateInstance failed: %d\n", err);
549         return false;
550     }
551 
552     ACQUIRE_VK_INST_PROC(GetDeviceProcAddr, inst);
553     auto getProc = [getInstProc, grVkGetDeviceProcAddr](const char* proc_name,
554                                                         VkInstance instance, VkDevice device) {
555         if (device != VK_NULL_HANDLE) {
556             return grVkGetDeviceProcAddr(device, proc_name);
557         }
558         return getInstProc(instance, proc_name);
559     };
560 
561 #ifdef SK_ENABLE_VK_LAYERS
562     *debugCallback = VK_NULL_HANDLE;
563     for (int i = 0; i < instanceExtensionNames.size() && !hasDebugExtension; ++i) {
564         if (!strcmp(instanceExtensionNames[i], VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
565             hasDebugExtension = true;
566         }
567     }
568     if (hasDebugExtension) {
569         // Setup callback creation information
570         VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
571         callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
572         callbackCreateInfo.pNext = nullptr;
573         callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
574                                    VK_DEBUG_REPORT_WARNING_BIT_EXT |
575                                    // VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
576                                    // VK_DEBUG_REPORT_DEBUG_BIT_EXT |
577                                    VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
578         callbackCreateInfo.pfnCallback = &DebugReportCallback;
579         callbackCreateInfo.pUserData = nullptr;
580 
581         ACQUIRE_VK_PROC(CreateDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
582         // Register the callback
583         grVkCreateDebugReportCallbackEXT(inst, &callbackCreateInfo, nullptr, debugCallback);
584     }
585 #endif
586 
587     ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE);
588     ACQUIRE_VK_PROC(GetPhysicalDeviceProperties, inst, VK_NULL_HANDLE);
589     ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE);
590     ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE);
591     ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE);
592     ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE);
593     ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE);
594     ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE);
595 
596     uint32_t gpuCount;
597     err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
598     if (err) {
599         SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
600         destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
601         return false;
602     }
603     if (!gpuCount) {
604         SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n");
605         destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
606         return false;
607     }
608     // Allocate enough storage for all available physical devices. We should be able to just ask for
609     // the first one, but a bug in RenderDoc (https://github.com/baldurk/renderdoc/issues/2766)
610     // will smash the stack if we do that.
611     physDevs.resize(gpuCount);
612     err = grVkEnumeratePhysicalDevices(inst, &gpuCount, physDevs.data());
613     if (err) {
614         SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
615         destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
616         return false;
617     }
618     // We just use the first physical device.
619     // TODO: find best match for our needs
620     VkPhysicalDevice physDev = physDevs.front();
621 
622     VkPhysicalDeviceProperties physDeviceProperties;
623     grVkGetPhysicalDeviceProperties(physDev, &physDeviceProperties);
624     uint32_t physDeviceVersion = std::min(physDeviceProperties.apiVersion, apiVersion);
625 
626     if (isProtected && physDeviceVersion < VK_MAKE_VERSION(1, 1, 0)) {
627         SkDebugf("protected requires vk physical device version 1.1\n");
628         destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
629         return false;
630     }
631 
632     // query to get the initial queue props size
633     uint32_t queueCount;
634     grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
635     if (!queueCount) {
636         SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
637         destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
638         return false;
639     }
640 
641     SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
642     // now get the actual queue props
643     VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
644 
645     grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
646 
647     // iterate to find the graphics queue
648     uint32_t graphicsQueueIndex = queueCount;
649     for (uint32_t i = 0; i < queueCount; i++) {
650         if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
651             graphicsQueueIndex = i;
652             break;
653         }
654     }
655     if (graphicsQueueIndex == queueCount) {
656         SkDebugf("Could not find any supported graphics queues.\n");
657         destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
658         return false;
659     }
660 
661     // iterate to find the present queue, if needed
662     uint32_t presentQueueIndex = queueCount;
663     if (presentQueueIndexPtr && canPresent) {
664         for (uint32_t i = 0; i < queueCount; i++) {
665             if (canPresent(inst, physDev, i)) {
666                 presentQueueIndex = i;
667                 break;
668             }
669         }
670         if (presentQueueIndex == queueCount) {
671             SkDebugf("Could not find any supported present queues.\n");
672             destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
673             return false;
674         }
675         *presentQueueIndexPtr = presentQueueIndex;
676     } else {
677         // Just setting this so we end up make a single queue for graphics since there was no
678         // request for a present queue.
679         presentQueueIndex = graphicsQueueIndex;
680     }
681 
682     TArray<VkLayerProperties> deviceLayers;
683     TArray<VkExtensionProperties> deviceExtensions;
684     if (!init_device_extensions_and_layers(getProc, physDeviceVersion,
685                                            inst, physDev,
686                                            &deviceExtensions,
687                                            &deviceLayers)) {
688         destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
689         return false;
690     }
691 
692     TArray<const char*> deviceLayerNames;
693     TArray<const char*> deviceExtensionNames;
694     for (int i = 0; i < deviceLayers.size(); ++i) {
695         deviceLayerNames.push_back(deviceLayers[i].layerName);
696     }
697 
698     // We can't have both VK_KHR_buffer_device_address and VK_EXT_buffer_device_address as
699     // extensions. So see if we have the KHR version and if so don't push back the EXT version in
700     // the next loop.
701     bool hasKHRBufferDeviceAddress = false;
702     for (int i = 0; i < deviceExtensions.size(); ++i) {
703         if (!strcmp(deviceExtensions[i].extensionName, "VK_KHR_buffer_device_address")) {
704             hasKHRBufferDeviceAddress = true;
705             break;
706         }
707     }
708 
709     for (int i = 0; i < deviceExtensions.size(); ++i) {
710         // Don't use experimental extensions since they typically don't work with debug layers and
711         // often are missing dependecy requirements for other extensions. Additionally, these are
712         // often left behind in the driver even after they've been promoted to real extensions.
713         if (0 != strncmp(deviceExtensions[i].extensionName, "VK_KHX", 6) &&
714             0 != strncmp(deviceExtensions[i].extensionName, "VK_NVX", 6)) {
715 
716             // There are some extensions that are not supported by the debug layers which result in
717             // many warnings even though we don't actually use them. It's easiest to just
718             // avoid enabling those.
719             if (0 == strcmp(deviceExtensions[i].extensionName, "VK_EXT_provoking_vertex")     ||
720                 0 == strcmp(deviceExtensions[i].extensionName, "VK_EXT_shader_object")        ||
721                 0 == strcmp(deviceExtensions[i].extensionName, "VK_KHR_dynamic_rendering")    ||
722                 0 == strcmp(deviceExtensions[i].extensionName, "VK_NV_acquire_winrt_display") ||
723                 0 == strcmp(deviceExtensions[i].extensionName, "VK_NV_cuda_kernel_launch")    ||
724                 0 == strcmp(deviceExtensions[i].extensionName, "VK_NV_low_latency")           ||
725                 0 == strcmp(deviceExtensions[i].extensionName, "VK_NV_present_barrier")) {
726                 continue;
727             }
728 
729             if (!hasKHRBufferDeviceAddress ||
730                 0 != strcmp(deviceExtensions[i].extensionName, "VK_EXT_buffer_device_address")) {
731                 deviceExtensionNames.push_back(deviceExtensions[i].extensionName);
732             }
733         }
734     }
735 
736     extensions->init(getProc, inst, physDev,
737                      (uint32_t) instanceExtensionNames.size(),
738                      instanceExtensionNames.begin(),
739                      (uint32_t) deviceExtensionNames.size(),
740                      deviceExtensionNames.begin());
741 
742     memset(features, 0, sizeof(VkPhysicalDeviceFeatures2));
743     features->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
744     features->pNext = nullptr;
745 
746     VkPhysicalDeviceFeatures* deviceFeatures = &features->features;
747     void* pointerToFeatures = nullptr;
748     if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
749         extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)) {
750         if (!setup_features(getProc, inst, physDev, physDeviceVersion, extensions, features,
751                             isProtected)) {
752             destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
753             return false;
754         }
755 
756         // If we set the pNext of the VkDeviceCreateInfo to our VkPhysicalDeviceFeatures2 struct,
757         // the device creation will use that instead of the ppEnabledFeatures.
758         pointerToFeatures = features;
759     } else {
760         grVkGetPhysicalDeviceFeatures(physDev, deviceFeatures);
761     }
762 
763     // this looks like it would slow things down,
764     // and we can't depend on it on all platforms
765     deviceFeatures->robustBufferAccess = VK_FALSE;
766 
767     VkDeviceQueueCreateFlags flags = isProtected ? VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0;
768     float queuePriorities[1] = { 0.0 };
769     // Here we assume no need for swapchain queue
770     // If one is needed, the client will need its own setup code
771     const VkDeviceQueueCreateInfo queueInfo[2] = {
772         {
773             VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
774             nullptr,                                    // pNext
775             flags,                                      // VkDeviceQueueCreateFlags
776             graphicsQueueIndex,                         // queueFamilyIndex
777             1,                                          // queueCount
778             queuePriorities,                            // pQueuePriorities
779 
780         },
781         {
782             VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
783             nullptr,                                    // pNext
784             0,                                          // VkDeviceQueueCreateFlags
785             presentQueueIndex,                          // queueFamilyIndex
786             1,                                          // queueCount
787             queuePriorities,                            // pQueuePriorities
788         }
789     };
790     uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
791 
792     const VkDeviceCreateInfo deviceInfo = {
793         VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,        // sType
794         pointerToFeatures,                           // pNext
795         0,                                           // VkDeviceCreateFlags
796         queueInfoCount,                              // queueCreateInfoCount
797         queueInfo,                                   // pQueueCreateInfos
798         (uint32_t) deviceLayerNames.size(),          // layerCount
799         deviceLayerNames.begin(),                    // ppEnabledLayerNames
800         (uint32_t) deviceExtensionNames.size(),      // extensionCount
801         deviceExtensionNames.begin(),                // ppEnabledExtensionNames
802         pointerToFeatures ? nullptr : deviceFeatures // ppEnabledFeatures
803     };
804 
805     {
806 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
807         // skia:8712
808         __lsan::ScopedDisabler lsanDisabler;
809 #endif
810         err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device);
811     }
812     if (err) {
813         SkDebugf("CreateDevice failed: %d\n", err);
814         destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
815         return false;
816     }
817 
818     VkQueue queue;
819     if (isProtected) {
820         ACQUIRE_VK_PROC(GetDeviceQueue2, inst, device);
821         SkASSERT(grVkGetDeviceQueue2 != nullptr);
822         VkDeviceQueueInfo2 queue_info2 = {
823             VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,          // sType
824             nullptr,                                        // pNext
825             VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT,           // flags
826             graphicsQueueIndex,                             // queueFamilyIndex
827             0                                               // queueIndex
828         };
829         grVkGetDeviceQueue2(device, &queue_info2, &queue);
830     } else {
831         grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
832     }
833 
834     skgpu::VulkanInterface interface = skgpu::VulkanInterface(
835             getProc, inst, device, instanceVersion, physDeviceVersion, extensions);
836     SkASSERT(interface.validate(instanceVersion, physDeviceVersion, extensions));
837 
838     sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocator = VkTestMemoryAllocator::Make(
839             inst, physDev, device, physDeviceVersion, extensions, &interface);
840 
841     ctx->fInstance = inst;
842     ctx->fPhysicalDevice = physDev;
843     ctx->fDevice = device;
844     ctx->fQueue = queue;
845     ctx->fGraphicsQueueIndex = graphicsQueueIndex;
846     ctx->fMaxAPIVersion = apiVersion;
847     ctx->fVkExtensions = extensions;
848     ctx->fDeviceFeatures2 = features;
849     ctx->fGetProc = getProc;
850     ctx->fProtectedContext = skgpu::Protected(isProtected);
851     ctx->fMemoryAllocator = memoryAllocator;
852 
853     return true;
854 }
855 
FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2 * features)856 void FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2* features) {
857     // All Vulkan structs that could be part of the features chain will start with the
858     // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
859     // so we can get access to the pNext for the next struct.
860     struct CommonVulkanHeader {
861         VkStructureType sType;
862         void*           pNext;
863     };
864 
865     void* pNext = features->pNext;
866     while (pNext) {
867         void* current = pNext;
868         pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
869         sk_free(current);
870     }
871 }
872 
873 }  // namespace sk_gpu_test
874 
875 #endif
876