• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2017 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "tools/gpu/vk/VkTestUtils.h"
9 
10 #ifdef SK_VULKAN
11 
12 #ifndef SK_GPU_TOOLS_VK_LIBRARY_NAME
13     #if defined _WIN32
14         #define SK_GPU_TOOLS_VK_LIBRARY_NAME "vulkan-1.dll"
15     #else
16         #define SK_GPU_TOOLS_VK_LIBRARY_NAME "libvulkan.so"
17     #endif
18 #endif
19 
20 #include "include/gpu/vk/GrVkBackendContext.h"
21 #include "include/gpu/vk/GrVkExtensions.h"
22 #include "src/core/SkAutoMalloc.h"
23 #include "src/ports/SkOSLibrary.h"
24 
25 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
26 #include <sanitizer/lsan_interface.h>
27 #endif
28 
29 namespace sk_gpu_test {
30 
LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr * instProc,PFN_vkGetDeviceProcAddr * devProc)31 bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc,
32                                       PFN_vkGetDeviceProcAddr* devProc) {
33 #ifdef SK_MOLTENVK
34     // MoltenVK is a statically linked framework, so there is no Vulkan library to load.
35     *instProc = &vkGetInstanceProcAddr;
36     *devProc = &vkGetDeviceProcAddr;
37     return true;
38 #else
39     static void* vkLib = nullptr;
40     static PFN_vkGetInstanceProcAddr localInstProc = nullptr;
41     static PFN_vkGetDeviceProcAddr localDevProc = nullptr;
42     if (!vkLib) {
43         vkLib = DynamicLoadLibrary(SK_GPU_TOOLS_VK_LIBRARY_NAME);
44         if (!vkLib) {
45             return false;
46         }
47         localInstProc = (PFN_vkGetInstanceProcAddr) GetProcedureAddress(vkLib,
48                                                                         "vkGetInstanceProcAddr");
49         localDevProc = (PFN_vkGetDeviceProcAddr) GetProcedureAddress(vkLib,
50                                                                      "vkGetDeviceProcAddr");
51     }
52     if (!localInstProc || !localDevProc) {
53         return false;
54     }
55     *instProc = localInstProc;
56     *devProc = localDevProc;
57     return true;
58 #endif
59 }
60 
61 ////////////////////////////////////////////////////////////////////////////////
62 // Helper code to set up Vulkan context objects
63 
64 #ifdef SK_ENABLE_VK_LAYERS
65 const char* kDebugLayerNames[] = {
66     // elements of VK_LAYER_LUNARG_standard_validation
67     "VK_LAYER_GOOGLE_threading",
68     "VK_LAYER_LUNARG_parameter_validation",
69     "VK_LAYER_LUNARG_object_tracker",
70     "VK_LAYER_LUNARG_core_validation",
71     "VK_LAYER_GOOGLE_unique_objects",
72     // not included in standard_validation
73     //"VK_LAYER_LUNARG_api_dump",
74     //"VK_LAYER_LUNARG_vktrace",
75     //"VK_LAYER_LUNARG_screenshot",
76 };
77 
remove_patch_version(uint32_t specVersion)78 static uint32_t remove_patch_version(uint32_t specVersion) {
79     return (specVersion >> 12) << 12;
80 }
81 
82 // Returns the index into layers array for the layer we want. Returns -1 if not supported.
should_include_debug_layer(const char * layerName,uint32_t layerCount,VkLayerProperties * layers,uint32_t version)83 static int should_include_debug_layer(const char* layerName,
84                                        uint32_t layerCount, VkLayerProperties* layers,
85                                        uint32_t version) {
86     for (uint32_t i = 0; i < layerCount; ++i) {
87         if (!strcmp(layerName, layers[i].layerName)) {
88             // Since the layers intercept the vulkan calls and forward them on, we need to make sure
89             // layer was written against a version that isn't older than the version of Vulkan we're
90             // using so that it has all the api entry points.
91             if (version <= remove_patch_version(layers[i].specVersion)) {
92                 return i;
93             }
94             return -1;
95         }
96 
97     }
98     return -1;
99 }
100 
DebugReportCallback(VkDebugReportFlagsEXT flags,VkDebugReportObjectTypeEXT objectType,uint64_t object,size_t location,int32_t messageCode,const char * pLayerPrefix,const char * pMessage,void * pUserData)101 VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
102     VkDebugReportFlagsEXT       flags,
103     VkDebugReportObjectTypeEXT  objectType,
104     uint64_t                    object,
105     size_t                      location,
106     int32_t                     messageCode,
107     const char*                 pLayerPrefix,
108     const char*                 pMessage,
109     void*                       pUserData) {
110     if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
111         SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
112         return VK_TRUE; // skip further layers
113     } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
114         // There is currently a bug in the spec which doesn't have
115         // VK_STRUCTURE_TYPE_BLEND_OPERATION_ADVANCED_FEATURES_EXT as an allowable pNext struct in
116         // VkDeviceCreateInfo. So we ignore that warning since it is wrong.
117         if (!strstr(pMessage,
118                     "pCreateInfo->pNext chain includes a structure with unexpected VkStructureType "
119                     "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT")) {
120             SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
121         }
122     } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
123         SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
124     } else {
125         SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
126     }
127     return VK_FALSE;
128 }
129 #endif
130 
131 #define GET_PROC_LOCAL(F, inst, device) PFN_vk ## F F = (PFN_vk ## F) getProc("vk" #F, inst, device)
132 
init_instance_extensions_and_layers(GrVkGetProc getProc,uint32_t specVersion,SkTArray<VkExtensionProperties> * instanceExtensions,SkTArray<VkLayerProperties> * instanceLayers)133 static bool init_instance_extensions_and_layers(GrVkGetProc getProc,
134                                                 uint32_t specVersion,
135                                                 SkTArray<VkExtensionProperties>* instanceExtensions,
136                                                 SkTArray<VkLayerProperties>* instanceLayers) {
137     if (getProc == nullptr) {
138         return false;
139     }
140 
141     GET_PROC_LOCAL(EnumerateInstanceExtensionProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
142     GET_PROC_LOCAL(EnumerateInstanceLayerProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
143 
144     if (!EnumerateInstanceExtensionProperties ||
145         !EnumerateInstanceLayerProperties) {
146         return false;
147     }
148 
149     VkResult res;
150     uint32_t layerCount = 0;
151 #ifdef SK_ENABLE_VK_LAYERS
152     // instance layers
153     res = EnumerateInstanceLayerProperties(&layerCount, nullptr);
154     if (VK_SUCCESS != res) {
155         return false;
156     }
157     VkLayerProperties* layers = new VkLayerProperties[layerCount];
158     res = EnumerateInstanceLayerProperties(&layerCount, layers);
159     if (VK_SUCCESS != res) {
160         delete[] layers;
161         return false;
162     }
163 
164     uint32_t nonPatchVersion = remove_patch_version(specVersion);
165     for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
166         int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
167                                              nonPatchVersion);
168         if (idx != -1) {
169             instanceLayers->push_back() = layers[idx];
170         }
171     }
172     delete[] layers;
173 #endif
174 
175     // instance extensions
176     // via Vulkan implementation and implicitly enabled layers
177     uint32_t extensionCount = 0;
178     res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
179     if (VK_SUCCESS != res) {
180         return false;
181     }
182     VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
183     res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions);
184     if (VK_SUCCESS != res) {
185         delete[] extensions;
186         return false;
187     }
188     for (uint32_t i = 0; i < extensionCount; ++i) {
189         instanceExtensions->push_back() = extensions[i];
190     }
191     delete [] extensions;
192 
193     // via explicitly enabled layers
194     layerCount = instanceLayers->count();
195     for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
196         uint32_t extensionCount = 0;
197         res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
198                                                    &extensionCount, nullptr);
199         if (VK_SUCCESS != res) {
200             return false;
201         }
202         VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
203         res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
204                                                    &extensionCount, extensions);
205         if (VK_SUCCESS != res) {
206             delete[] extensions;
207             return false;
208         }
209         for (uint32_t i = 0; i < extensionCount; ++i) {
210             instanceExtensions->push_back() = extensions[i];
211         }
212         delete[] extensions;
213     }
214 
215     return true;
216 }
217 
init_device_extensions_and_layers(GrVkGetProc getProc,uint32_t specVersion,VkInstance inst,VkPhysicalDevice physDev,SkTArray<VkExtensionProperties> * deviceExtensions,SkTArray<VkLayerProperties> * deviceLayers)218 static bool init_device_extensions_and_layers(GrVkGetProc getProc, uint32_t specVersion,
219                                               VkInstance inst, VkPhysicalDevice physDev,
220                                               SkTArray<VkExtensionProperties>* deviceExtensions,
221                                               SkTArray<VkLayerProperties>* deviceLayers) {
222     if (getProc == nullptr) {
223         return false;
224     }
225 
226     GET_PROC_LOCAL(EnumerateDeviceExtensionProperties, inst, VK_NULL_HANDLE);
227     GET_PROC_LOCAL(EnumerateDeviceLayerProperties, inst, VK_NULL_HANDLE);
228 
229     if (!EnumerateDeviceExtensionProperties ||
230         !EnumerateDeviceLayerProperties) {
231         return false;
232     }
233 
234     VkResult res;
235     // device layers
236     uint32_t layerCount = 0;
237 #ifdef SK_ENABLE_VK_LAYERS
238     res = EnumerateDeviceLayerProperties(physDev, &layerCount, nullptr);
239     if (VK_SUCCESS != res) {
240         return false;
241     }
242     VkLayerProperties* layers = new VkLayerProperties[layerCount];
243     res = EnumerateDeviceLayerProperties(physDev, &layerCount, layers);
244     if (VK_SUCCESS != res) {
245         delete[] layers;
246         return false;
247     }
248 
249     uint32_t nonPatchVersion = remove_patch_version(specVersion);
250     for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
251         int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
252                                              nonPatchVersion);
253         if (idx != -1) {
254             deviceLayers->push_back() = layers[idx];
255         }
256     }
257     delete[] layers;
258 #endif
259 
260     // device extensions
261     // via Vulkan implementation and implicitly enabled layers
262     uint32_t extensionCount = 0;
263     res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, nullptr);
264     if (VK_SUCCESS != res) {
265         return false;
266     }
267     VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
268     res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, extensions);
269     if (VK_SUCCESS != res) {
270         delete[] extensions;
271         return false;
272     }
273     for (uint32_t i = 0; i < extensionCount; ++i) {
274         deviceExtensions->push_back() = extensions[i];
275     }
276     delete[] extensions;
277 
278     // via explicitly enabled layers
279     layerCount = deviceLayers->count();
280     for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
281         uint32_t extensionCount = 0;
282         res = EnumerateDeviceExtensionProperties(physDev,
283             (*deviceLayers)[layerIndex].layerName,
284             &extensionCount, nullptr);
285         if (VK_SUCCESS != res) {
286             return false;
287         }
288         VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
289         res = EnumerateDeviceExtensionProperties(physDev,
290             (*deviceLayers)[layerIndex].layerName,
291             &extensionCount, extensions);
292         if (VK_SUCCESS != res) {
293             delete[] extensions;
294             return false;
295         }
296         for (uint32_t i = 0; i < extensionCount; ++i) {
297             deviceExtensions->push_back() = extensions[i];
298         }
299         delete[] extensions;
300     }
301 
302     return true;
303 }
304 
305 #define ACQUIRE_VK_PROC_NOCHECK(name, instance, device) \
306     PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device))
307 
308 #define ACQUIRE_VK_PROC(name, instance, device)                                    \
309     PFN_vk##name grVk##name =                                                      \
310             reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
311     do {                                                                           \
312         if (grVk##name == nullptr) {                                               \
313             SkDebugf("Function ptr for vk%s could not be acquired\n", #name);      \
314             if (device != VK_NULL_HANDLE) {                                        \
315                 destroy_instance(getProc, inst, debugCallback, hasDebugExtension); \
316             }                                                                      \
317             return false;                                                          \
318         }                                                                          \
319     } while (0)
320 
321 #define ACQUIRE_VK_PROC_LOCAL(name, instance, device)                              \
322     PFN_vk##name grVk##name =                                                      \
323             reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
324     do {                                                                           \
325         if (grVk##name == nullptr) {                                               \
326             SkDebugf("Function ptr for vk%s could not be acquired\n", #name);      \
327             return false;                                                                \
328         }                                                                          \
329     } while (0)
330 
destroy_instance(GrVkGetProc getProc,VkInstance inst,VkDebugReportCallbackEXT * debugCallback,bool hasDebugExtension)331 static bool destroy_instance(GrVkGetProc getProc, VkInstance inst,
332                              VkDebugReportCallbackEXT* debugCallback,
333                              bool hasDebugExtension) {
334     if (hasDebugExtension && *debugCallback != VK_NULL_HANDLE) {
335         ACQUIRE_VK_PROC_LOCAL(DestroyDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
336         grVkDestroyDebugReportCallbackEXT(inst, *debugCallback, nullptr);
337         *debugCallback = VK_NULL_HANDLE;
338     }
339     ACQUIRE_VK_PROC_LOCAL(DestroyInstance, inst, VK_NULL_HANDLE);
340     grVkDestroyInstance(inst, nullptr);
341     return true;
342 }
343 
setup_features(GrVkGetProc getProc,VkInstance inst,VkPhysicalDevice physDev,uint32_t physDeviceVersion,GrVkExtensions * extensions,VkPhysicalDeviceFeatures2 * features,bool isProtected)344 static bool setup_features(GrVkGetProc getProc, VkInstance inst, VkPhysicalDevice physDev,
345                            uint32_t physDeviceVersion, GrVkExtensions* extensions,
346                            VkPhysicalDeviceFeatures2* features, bool isProtected) {
347     SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
348              extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1));
349 
350     // Setup all extension feature structs we may want to use.
351     void** tailPNext = &features->pNext;
352 
353     // If |isProtected| is given, attach that first
354     VkPhysicalDeviceProtectedMemoryFeatures* protectedMemoryFeatures = nullptr;
355     if (isProtected) {
356         SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0));
357         protectedMemoryFeatures =
358           (VkPhysicalDeviceProtectedMemoryFeatures*)sk_malloc_throw(
359               sizeof(VkPhysicalDeviceProtectedMemoryFeatures));
360         protectedMemoryFeatures->sType =
361           VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES;
362         protectedMemoryFeatures->pNext = nullptr;
363         *tailPNext = protectedMemoryFeatures;
364         tailPNext = &protectedMemoryFeatures->pNext;
365     }
366 
367     VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend = nullptr;
368     if (extensions->hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
369         blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) sk_malloc_throw(
370                 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
371         blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
372         blend->pNext = nullptr;
373         *tailPNext = blend;
374         tailPNext = &blend->pNext;
375     }
376 
377     VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature = nullptr;
378     if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
379         extensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) {
380         ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) sk_malloc_throw(
381                 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
382         ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
383         ycbcrFeature->pNext = nullptr;
384         ycbcrFeature->samplerYcbcrConversion = VK_TRUE;
385         *tailPNext = ycbcrFeature;
386         tailPNext = &ycbcrFeature->pNext;
387     }
388 
389     if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
390         ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2, inst, VK_NULL_HANDLE);
391         grVkGetPhysicalDeviceFeatures2(physDev, features);
392     } else {
393         SkASSERT(extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
394                                           1));
395         ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2KHR, inst, VK_NULL_HANDLE);
396         grVkGetPhysicalDeviceFeatures2KHR(physDev, features);
397     }
398 
399     if (isProtected) {
400         if (!protectedMemoryFeatures->protectedMemory) {
401             return false;
402         }
403     }
404     return true;
405     // If we want to disable any extension features do so here.
406 }
407 
CreateVkBackendContext(GrVkGetProc getProc,GrVkBackendContext * ctx,GrVkExtensions * extensions,VkPhysicalDeviceFeatures2 * features,VkDebugReportCallbackEXT * debugCallback,uint32_t * presentQueueIndexPtr,CanPresentFn canPresent,bool isProtected)408 bool CreateVkBackendContext(GrVkGetProc getProc,
409                             GrVkBackendContext* ctx,
410                             GrVkExtensions* extensions,
411                             VkPhysicalDeviceFeatures2* features,
412                             VkDebugReportCallbackEXT* debugCallback,
413                             uint32_t* presentQueueIndexPtr,
414                             CanPresentFn canPresent,
415                             bool isProtected) {
416     VkResult err;
417 
418     ACQUIRE_VK_PROC_NOCHECK(EnumerateInstanceVersion, VK_NULL_HANDLE, VK_NULL_HANDLE);
419     uint32_t instanceVersion = 0;
420     if (!grVkEnumerateInstanceVersion) {
421         instanceVersion = VK_MAKE_VERSION(1, 0, 0);
422     } else {
423         err = grVkEnumerateInstanceVersion(&instanceVersion);
424         if (err) {
425             SkDebugf("failed to enumerate instance version. Err: %d\n", err);
426             return false;
427         }
428     }
429     SkASSERT(instanceVersion >= VK_MAKE_VERSION(1, 0, 0));
430     if (isProtected && instanceVersion < VK_MAKE_VERSION(1, 1, 0)) {
431         SkDebugf("protected requires vk instance version 1.1\n");
432         return false;
433     }
434 
435     uint32_t apiVersion = VK_MAKE_VERSION(1, 0, 0);
436     if (instanceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
437         // If the instance version is 1.0 we must have the apiVersion also be 1.0. However, if the
438         // instance version is 1.1 or higher, we can set the apiVersion to be whatever the highest
439         // api we may use in skia (technically it can be arbitrary). So for now we set it to 1.1
440         // since that is the highest vulkan version.
441         apiVersion = VK_MAKE_VERSION(1, 1, 0);
442     }
443 
444     instanceVersion = SkTMin(instanceVersion, apiVersion);
445 
446     VkPhysicalDevice physDev;
447     VkDevice device;
448     VkInstance inst;
449 
450     const VkApplicationInfo app_info = {
451         VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
452         nullptr,                            // pNext
453         "vktest",                           // pApplicationName
454         0,                                  // applicationVersion
455         "vktest",                           // pEngineName
456         0,                                  // engineVerison
457         apiVersion,                         // apiVersion
458     };
459 
460     SkTArray<VkLayerProperties> instanceLayers;
461     SkTArray<VkExtensionProperties> instanceExtensions;
462 
463     if (!init_instance_extensions_and_layers(getProc, instanceVersion,
464                                              &instanceExtensions,
465                                              &instanceLayers)) {
466         return false;
467     }
468 
469     SkTArray<const char*> instanceLayerNames;
470     SkTArray<const char*> instanceExtensionNames;
471     for (int i = 0; i < instanceLayers.count(); ++i) {
472         instanceLayerNames.push_back(instanceLayers[i].layerName);
473     }
474     for (int i = 0; i < instanceExtensions.count(); ++i) {
475         if (strncmp(instanceExtensions[i].extensionName, "VK_KHX", 6)) {
476             instanceExtensionNames.push_back(instanceExtensions[i].extensionName);
477         }
478     }
479 
480     const VkInstanceCreateInfo instance_create = {
481         VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,    // sType
482         nullptr,                                   // pNext
483         0,                                         // flags
484         &app_info,                                 // pApplicationInfo
485         (uint32_t) instanceLayerNames.count(),     // enabledLayerNameCount
486         instanceLayerNames.begin(),                // ppEnabledLayerNames
487         (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount
488         instanceExtensionNames.begin(),            // ppEnabledExtensionNames
489     };
490 
491     bool hasDebugExtension = false;
492 
493     ACQUIRE_VK_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE);
494     err = grVkCreateInstance(&instance_create, nullptr, &inst);
495     if (err < 0) {
496         SkDebugf("vkCreateInstance failed: %d\n", err);
497         return false;
498     }
499 
500 #ifdef SK_ENABLE_VK_LAYERS
501     *debugCallback = VK_NULL_HANDLE;
502     for (int i = 0; i < instanceExtensionNames.count() && !hasDebugExtension; ++i) {
503         if (!strcmp(instanceExtensionNames[i], VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
504             hasDebugExtension = true;
505         }
506     }
507     if (hasDebugExtension) {
508         // Setup callback creation information
509         VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
510         callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
511         callbackCreateInfo.pNext = nullptr;
512         callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
513                                    VK_DEBUG_REPORT_WARNING_BIT_EXT |
514                                    // VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
515                                    // VK_DEBUG_REPORT_DEBUG_BIT_EXT |
516                                    VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
517         callbackCreateInfo.pfnCallback = &DebugReportCallback;
518         callbackCreateInfo.pUserData = nullptr;
519 
520         ACQUIRE_VK_PROC(CreateDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
521         // Register the callback
522         grVkCreateDebugReportCallbackEXT(inst, &callbackCreateInfo, nullptr, debugCallback);
523     }
524 #endif
525 
526     ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE);
527     ACQUIRE_VK_PROC(GetPhysicalDeviceProperties, inst, VK_NULL_HANDLE);
528     ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE);
529     ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE);
530     ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE);
531     ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE);
532     ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE);
533     ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE);
534 
535     uint32_t gpuCount;
536     err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
537     if (err) {
538         SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
539         destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
540         return false;
541     }
542     if (!gpuCount) {
543         SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n");
544         destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
545         return false;
546     }
547     // Just returning the first physical device instead of getting the whole array.
548     // TODO: find best match for our needs
549     gpuCount = 1;
550     err = grVkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
551     // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
552     if (err && VK_INCOMPLETE != err) {
553         SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
554         destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
555         return false;
556     }
557 
558     VkPhysicalDeviceProperties physDeviceProperties;
559     grVkGetPhysicalDeviceProperties(physDev, &physDeviceProperties);
560     int physDeviceVersion = SkTMin(physDeviceProperties.apiVersion, apiVersion);
561 
562     if (isProtected && physDeviceVersion < VK_MAKE_VERSION(1, 1, 0)) {
563         SkDebugf("protected requires vk physical device version 1.1\n");
564         destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
565         return false;
566     }
567 
568     // query to get the initial queue props size
569     uint32_t queueCount;
570     grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
571     if (!queueCount) {
572         SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
573         destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
574         return false;
575     }
576 
577     SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
578     // now get the actual queue props
579     VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
580 
581     grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
582 
583     // iterate to find the graphics queue
584     uint32_t graphicsQueueIndex = queueCount;
585     for (uint32_t i = 0; i < queueCount; i++) {
586         if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
587             graphicsQueueIndex = i;
588             break;
589         }
590     }
591     if (graphicsQueueIndex == queueCount) {
592         SkDebugf("Could not find any supported graphics queues.\n");
593         destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
594         return false;
595     }
596 
597     // iterate to find the present queue, if needed
598     uint32_t presentQueueIndex = queueCount;
599     if (presentQueueIndexPtr && canPresent) {
600         for (uint32_t i = 0; i < queueCount; i++) {
601             if (canPresent(inst, physDev, i)) {
602                 presentQueueIndex = i;
603                 break;
604             }
605         }
606         if (presentQueueIndex == queueCount) {
607             SkDebugf("Could not find any supported present queues.\n");
608             destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
609             return false;
610         }
611         *presentQueueIndexPtr = presentQueueIndex;
612     } else {
613         // Just setting this so we end up make a single queue for graphics since there was no
614         // request for a present queue.
615         presentQueueIndex = graphicsQueueIndex;
616     }
617 
618     SkTArray<VkLayerProperties> deviceLayers;
619     SkTArray<VkExtensionProperties> deviceExtensions;
620     if (!init_device_extensions_and_layers(getProc, physDeviceVersion,
621                                            inst, physDev,
622                                            &deviceExtensions,
623                                            &deviceLayers)) {
624         destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
625         return false;
626     }
627 
628     SkTArray<const char*> deviceLayerNames;
629     SkTArray<const char*> deviceExtensionNames;
630     for (int i = 0; i < deviceLayers.count(); ++i) {
631         deviceLayerNames.push_back(deviceLayers[i].layerName);
632     }
633     for (int i = 0; i < deviceExtensions.count(); ++i) {
634         // Don't use experimental extensions since they typically don't work with debug layers and
635         // often are missing dependecy requirements for other extensions. Additionally, these are
636         // often left behind in the driver even after they've been promoted to real extensions.
637         if (strncmp(deviceExtensions[i].extensionName, "VK_KHX", 6) &&
638             strncmp(deviceExtensions[i].extensionName, "VK_NVX", 6)) {
639             deviceExtensionNames.push_back(deviceExtensions[i].extensionName);
640         }
641     }
642 
643     extensions->init(getProc, inst, physDev,
644                      (uint32_t) instanceExtensionNames.count(),
645                      instanceExtensionNames.begin(),
646                      (uint32_t) deviceExtensionNames.count(),
647                      deviceExtensionNames.begin());
648 
649     memset(features, 0, sizeof(VkPhysicalDeviceFeatures2));
650     features->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
651     features->pNext = nullptr;
652 
653     VkPhysicalDeviceFeatures* deviceFeatures = &features->features;
654     void* pointerToFeatures = nullptr;
655     if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
656         extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)) {
657         if (!setup_features(getProc, inst, physDev, physDeviceVersion, extensions, features,
658                           isProtected)) {
659             destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
660             return false;
661         }
662 
663         // If we set the pNext of the VkDeviceCreateInfo to our VkPhysicalDeviceFeatures2 struct,
664         // the device creation will use that instead of the ppEnabledFeatures.
665         pointerToFeatures = features;
666     } else {
667         grVkGetPhysicalDeviceFeatures(physDev, deviceFeatures);
668     }
669 
670     // this looks like it would slow things down,
671     // and we can't depend on it on all platforms
672     deviceFeatures->robustBufferAccess = VK_FALSE;
673 
674     VkDeviceQueueCreateFlags flags = isProtected ? VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0;
675     float queuePriorities[1] = { 0.0 };
676     // Here we assume no need for swapchain queue
677     // If one is needed, the client will need its own setup code
678     const VkDeviceQueueCreateInfo queueInfo[2] = {
679         {
680             VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
681             nullptr,                                    // pNext
682             flags,                                      // VkDeviceQueueCreateFlags
683             graphicsQueueIndex,                         // queueFamilyIndex
684             1,                                          // queueCount
685             queuePriorities,                            // pQueuePriorities
686 
687         },
688         {
689             VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
690             nullptr,                                    // pNext
691             0,                                          // VkDeviceQueueCreateFlags
692             presentQueueIndex,                          // queueFamilyIndex
693             1,                                          // queueCount
694             queuePriorities,                            // pQueuePriorities
695         }
696     };
697     uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
698 
699     const VkDeviceCreateInfo deviceInfo = {
700         VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,        // sType
701         pointerToFeatures,                           // pNext
702         0,                                           // VkDeviceCreateFlags
703         queueInfoCount,                              // queueCreateInfoCount
704         queueInfo,                                   // pQueueCreateInfos
705         (uint32_t) deviceLayerNames.count(),         // layerCount
706         deviceLayerNames.begin(),                    // ppEnabledLayerNames
707         (uint32_t) deviceExtensionNames.count(),     // extensionCount
708         deviceExtensionNames.begin(),                // ppEnabledExtensionNames
709         pointerToFeatures ? nullptr : deviceFeatures // ppEnabledFeatures
710     };
711 
712     {
713 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
714         // skia:8712
715         __lsan::ScopedDisabler lsanDisabler;
716 #endif
717         err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device);
718     }
719     if (err) {
720         SkDebugf("CreateDevice failed: %d\n", err);
721         destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
722         return false;
723     }
724 
725     VkQueue queue;
726     if (isProtected) {
727         ACQUIRE_VK_PROC(GetDeviceQueue2, inst, device);
728         SkASSERT(grVkGetDeviceQueue2 != nullptr);
729         VkDeviceQueueInfo2 queue_info2 = {
730             VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,          // sType
731             nullptr,                                        // pNext
732             VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT,           // flags
733             graphicsQueueIndex,                             // queueFamilyIndex
734             0                                               // queueIndex
735         };
736         grVkGetDeviceQueue2(device, &queue_info2, &queue);
737     } else {
738         grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
739     }
740 
741     ctx->fInstance = inst;
742     ctx->fPhysicalDevice = physDev;
743     ctx->fDevice = device;
744     ctx->fQueue = queue;
745     ctx->fGraphicsQueueIndex = graphicsQueueIndex;
746     ctx->fMaxAPIVersion = apiVersion;
747     ctx->fVkExtensions = extensions;
748     ctx->fDeviceFeatures2 = features;
749     ctx->fGetProc = getProc;
750     ctx->fOwnsInstanceAndDevice = false;
751     ctx->fProtectedContext = isProtected ? GrProtected::kYes : GrProtected::kNo;
752 
753     return true;
754 }
755 
FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2 * features)756 void FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2* features) {
757     // All Vulkan structs that could be part of the features chain will start with the
758     // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
759     // so we can get access to the pNext for the next struct.
760     struct CommonVulkanHeader {
761         VkStructureType sType;
762         void*           pNext;
763     };
764 
765     void* pNext = features->pNext;
766     while (pNext) {
767         void* current = pNext;
768         pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
769         sk_free(current);
770     }
771 }
772 
773 }
774 
775 #endif
776