1 /*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "tools/gpu/vk/VkTestUtils.h"
9
10 #ifdef SK_VULKAN
11
12 #ifndef SK_GPU_TOOLS_VK_LIBRARY_NAME
13 #if defined _WIN32
14 #define SK_GPU_TOOLS_VK_LIBRARY_NAME vulkan-1.dll
15 #elif defined SK_BUILD_FOR_MAC
16 #define SK_GPU_TOOLS_VK_LIBRARY_NAME libvk_swiftshader.dylib
17 #else
18 #define SK_GPU_TOOLS_VK_LIBRARY_NAME libvulkan.so
19 #define SK_GPU_TOOLS_VK_LIBRARY_NAME_BACKUP libvulkan.so.1
20 #endif
21 #endif
22
23 #define STRINGIFY2(S) #S
24 #define STRINGIFY(S) STRINGIFY2(S)
25
26 #include <algorithm>
27
28 #if defined(__GLIBC__)
29 #include <execinfo.h>
30 #endif
31 #include "include/gpu/vk/GrVkBackendContext.h"
32 #include "include/gpu/vk/VulkanBackendContext.h"
33 #include "include/gpu/vk/VulkanExtensions.h"
34 #include "src/base/SkAutoMalloc.h"
35 #include "src/ports/SkOSLibrary.h"
36
37 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
38 #include <sanitizer/lsan_interface.h>
39 #endif
40
41 namespace sk_gpu_test {
42
LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr * instProc)43 bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc) {
44 static void* vkLib = nullptr;
45 static PFN_vkGetInstanceProcAddr localInstProc = nullptr;
46 if (!vkLib) {
47 vkLib = SkLoadDynamicLibrary(STRINGIFY(SK_GPU_TOOLS_VK_LIBRARY_NAME));
48 if (!vkLib) {
49 // vulkaninfo tries to load the library from two places, so we do as well
50 // https://github.com/KhronosGroup/Vulkan-Tools/blob/078d44e4664b7efa0b6c96ebced1995c4425d57a/vulkaninfo/vulkaninfo.h#L249
51 #ifdef SK_GPU_TOOLS_VK_LIBRARY_NAME_BACKUP
52 vkLib = SkLoadDynamicLibrary(STRINGIFY(SK_GPU_TOOLS_VK_LIBRARY_NAME_BACKUP));
53 if (!vkLib) {
54 return false;
55 }
56 #else
57 return false;
58 #endif
59 }
60 localInstProc = (PFN_vkGetInstanceProcAddr) SkGetProcedureAddress(vkLib,
61 "vkGetInstanceProcAddr");
62 }
63 if (!localInstProc) {
64 return false;
65 }
66 *instProc = localInstProc;
67 return true;
68 }
69
70 ////////////////////////////////////////////////////////////////////////////////
71 // Helper code to set up Vulkan context objects
72
73 #ifdef SK_ENABLE_VK_LAYERS
74 const char* kDebugLayerNames[] = {
75 // single merged layer
76 "VK_LAYER_KHRONOS_validation",
77 // not included in standard_validation
78 //"VK_LAYER_LUNARG_api_dump",
79 //"VK_LAYER_LUNARG_vktrace",
80 //"VK_LAYER_LUNARG_screenshot",
81 };
82
remove_patch_version(uint32_t specVersion)83 static uint32_t remove_patch_version(uint32_t specVersion) {
84 return (specVersion >> 12) << 12;
85 }
86
87 // Returns the index into layers array for the layer we want. Returns -1 if not supported.
should_include_debug_layer(const char * layerName,uint32_t layerCount,VkLayerProperties * layers,uint32_t version)88 static int should_include_debug_layer(const char* layerName,
89 uint32_t layerCount, VkLayerProperties* layers,
90 uint32_t version) {
91 for (uint32_t i = 0; i < layerCount; ++i) {
92 if (!strcmp(layerName, layers[i].layerName)) {
93 // Since the layers intercept the vulkan calls and forward them on, we need to make sure
94 // layer was written against a version that isn't older than the version of Vulkan we're
95 // using so that it has all the api entry points.
96 if (version <= remove_patch_version(layers[i].specVersion)) {
97 return i;
98 }
99 return -1;
100 }
101
102 }
103 return -1;
104 }
105
print_backtrace()106 static void print_backtrace() {
107 #if defined(__GLIBC__)
108 void* stack[64];
109 int count = backtrace(stack, std::size(stack));
110 backtrace_symbols_fd(stack, count, 2);
111 #else
112 // Please add implementations for other platforms.
113 #endif
114 }
115
DebugReportCallback(VkDebugReportFlagsEXT flags,VkDebugReportObjectTypeEXT objectType,uint64_t object,size_t location,int32_t messageCode,const char * pLayerPrefix,const char * pMessage,void * pUserData)116 VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
117 VkDebugReportFlagsEXT flags,
118 VkDebugReportObjectTypeEXT objectType,
119 uint64_t object,
120 size_t location,
121 int32_t messageCode,
122 const char* pLayerPrefix,
123 const char* pMessage,
124 void* pUserData) {
125 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
126 // See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/1887
127 if (strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01521") ||
128 strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01522")) {
129 return VK_FALSE;
130 }
131 // See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/2171
132 if (strstr(pMessage, "VUID-vkCmdDraw-None-02686") ||
133 strstr(pMessage, "VUID-vkCmdDrawIndexed-None-02686")) {
134 return VK_FALSE;
135 }
136 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
137 print_backtrace();
138 SkDEBUGFAIL("Vulkan debug layer error");
139 return VK_TRUE; // skip further layers
140 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
141 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
142 print_backtrace();
143 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
144 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
145 print_backtrace();
146 } else {
147 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
148 }
149 return VK_FALSE;
150 }
151 #endif
152
153 #define ACQUIRE_VK_INST_PROC_LOCAL(name, instance) \
154 PFN_vk##name grVk##name = \
155 reinterpret_cast<PFN_vk##name>(getInstProc(instance, "vk" #name)); \
156 do { \
157 if (grVk##name == nullptr) { \
158 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
159 return false; \
160 } \
161 } while (0)
162
init_instance_extensions_and_layers(PFN_vkGetInstanceProcAddr getInstProc,uint32_t specVersion,SkTArray<VkExtensionProperties> * instanceExtensions,SkTArray<VkLayerProperties> * instanceLayers)163 static bool init_instance_extensions_and_layers(PFN_vkGetInstanceProcAddr getInstProc,
164 uint32_t specVersion,
165 SkTArray<VkExtensionProperties>* instanceExtensions,
166 SkTArray<VkLayerProperties>* instanceLayers) {
167 if (getInstProc == nullptr) {
168 return false;
169 }
170
171 ACQUIRE_VK_INST_PROC_LOCAL(EnumerateInstanceExtensionProperties, VK_NULL_HANDLE);
172 ACQUIRE_VK_INST_PROC_LOCAL(EnumerateInstanceLayerProperties, VK_NULL_HANDLE);
173
174 VkResult res;
175 uint32_t layerCount = 0;
176 #ifdef SK_ENABLE_VK_LAYERS
177 // instance layers
178 res = grVkEnumerateInstanceLayerProperties(&layerCount, nullptr);
179 if (VK_SUCCESS != res) {
180 return false;
181 }
182 VkLayerProperties* layers = new VkLayerProperties[layerCount];
183 res = grVkEnumerateInstanceLayerProperties(&layerCount, layers);
184 if (VK_SUCCESS != res) {
185 delete[] layers;
186 return false;
187 }
188
189 uint32_t nonPatchVersion = remove_patch_version(specVersion);
190 for (size_t i = 0; i < std::size(kDebugLayerNames); ++i) {
191 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
192 nonPatchVersion);
193 if (idx != -1) {
194 instanceLayers->push_back() = layers[idx];
195 }
196 }
197 delete[] layers;
198 #endif
199
200 // instance extensions
201 // via Vulkan implementation and implicitly enabled layers
202 {
203 uint32_t extensionCount = 0;
204 res = grVkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
205 if (VK_SUCCESS != res) {
206 return false;
207 }
208 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
209 res = grVkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions);
210 if (VK_SUCCESS != res) {
211 delete[] extensions;
212 return false;
213 }
214 for (uint32_t i = 0; i < extensionCount; ++i) {
215 instanceExtensions->push_back() = extensions[i];
216 }
217 delete [] extensions;
218 }
219
220 // via explicitly enabled layers
221 layerCount = instanceLayers->size();
222 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
223 uint32_t extensionCount = 0;
224 res = grVkEnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
225 &extensionCount, nullptr);
226 if (VK_SUCCESS != res) {
227 return false;
228 }
229 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
230 res = grVkEnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
231 &extensionCount, extensions);
232 if (VK_SUCCESS != res) {
233 delete[] extensions;
234 return false;
235 }
236 for (uint32_t i = 0; i < extensionCount; ++i) {
237 instanceExtensions->push_back() = extensions[i];
238 }
239 delete[] extensions;
240 }
241
242 return true;
243 }
244
245 #define GET_PROC_LOCAL(F, inst, device) PFN_vk ## F F = (PFN_vk ## F) getProc("vk" #F, inst, device)
246
init_device_extensions_and_layers(skgpu::VulkanGetProc getProc,uint32_t specVersion,VkInstance inst,VkPhysicalDevice physDev,SkTArray<VkExtensionProperties> * deviceExtensions,SkTArray<VkLayerProperties> * deviceLayers)247 static bool init_device_extensions_and_layers(skgpu::VulkanGetProc getProc, uint32_t specVersion,
248 VkInstance inst, VkPhysicalDevice physDev,
249 SkTArray<VkExtensionProperties>* deviceExtensions,
250 SkTArray<VkLayerProperties>* deviceLayers) {
251 if (getProc == nullptr) {
252 return false;
253 }
254
255 GET_PROC_LOCAL(EnumerateDeviceExtensionProperties, inst, VK_NULL_HANDLE);
256 GET_PROC_LOCAL(EnumerateDeviceLayerProperties, inst, VK_NULL_HANDLE);
257
258 if (!EnumerateDeviceExtensionProperties ||
259 !EnumerateDeviceLayerProperties) {
260 return false;
261 }
262
263 VkResult res;
264 // device layers
265 uint32_t layerCount = 0;
266 #ifdef SK_ENABLE_VK_LAYERS
267 res = EnumerateDeviceLayerProperties(physDev, &layerCount, nullptr);
268 if (VK_SUCCESS != res) {
269 return false;
270 }
271 VkLayerProperties* layers = new VkLayerProperties[layerCount];
272 res = EnumerateDeviceLayerProperties(physDev, &layerCount, layers);
273 if (VK_SUCCESS != res) {
274 delete[] layers;
275 return false;
276 }
277
278 uint32_t nonPatchVersion = remove_patch_version(specVersion);
279 for (size_t i = 0; i < std::size(kDebugLayerNames); ++i) {
280 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
281 nonPatchVersion);
282 if (idx != -1) {
283 deviceLayers->push_back() = layers[idx];
284 }
285 }
286 delete[] layers;
287 #endif
288
289 // device extensions
290 // via Vulkan implementation and implicitly enabled layers
291 {
292 uint32_t extensionCount = 0;
293 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, nullptr);
294 if (VK_SUCCESS != res) {
295 return false;
296 }
297 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
298 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, extensions);
299 if (VK_SUCCESS != res) {
300 delete[] extensions;
301 return false;
302 }
303 for (uint32_t i = 0; i < extensionCount; ++i) {
304 deviceExtensions->push_back() = extensions[i];
305 }
306 delete[] extensions;
307 }
308
309 // via explicitly enabled layers
310 layerCount = deviceLayers->size();
311 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
312 uint32_t extensionCount = 0;
313 res = EnumerateDeviceExtensionProperties(physDev,
314 (*deviceLayers)[layerIndex].layerName,
315 &extensionCount, nullptr);
316 if (VK_SUCCESS != res) {
317 return false;
318 }
319 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
320 res = EnumerateDeviceExtensionProperties(physDev,
321 (*deviceLayers)[layerIndex].layerName,
322 &extensionCount, extensions);
323 if (VK_SUCCESS != res) {
324 delete[] extensions;
325 return false;
326 }
327 for (uint32_t i = 0; i < extensionCount; ++i) {
328 deviceExtensions->push_back() = extensions[i];
329 }
330 delete[] extensions;
331 }
332
333 return true;
334 }
335
336 #define ACQUIRE_VK_INST_PROC_NOCHECK(name, instance) \
337 PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getInstProc(instance, "vk" #name))
338
339 #define ACQUIRE_VK_INST_PROC(name, instance) \
340 PFN_vk##name grVk##name = \
341 reinterpret_cast<PFN_vk##name>(getInstProc(instance, "vk" #name)); \
342 do { \
343 if (grVk##name == nullptr) { \
344 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
345 if (inst != VK_NULL_HANDLE) { \
346 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); \
347 } \
348 return false; \
349 } \
350 } while (0)
351
352 #define ACQUIRE_VK_PROC_NOCHECK(name, instance, device) \
353 PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device))
354
355 #define ACQUIRE_VK_PROC(name, instance, device) \
356 PFN_vk##name grVk##name = \
357 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
358 do { \
359 if (grVk##name == nullptr) { \
360 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
361 if (inst != VK_NULL_HANDLE) { \
362 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); \
363 } \
364 return false; \
365 } \
366 } while (0)
367
368 #define ACQUIRE_VK_PROC_LOCAL(name, instance, device) \
369 PFN_vk##name grVk##name = \
370 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
371 do { \
372 if (grVk##name == nullptr) { \
373 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
374 return false; \
375 } \
376 } while (0)
377
destroy_instance(PFN_vkGetInstanceProcAddr getInstProc,VkInstance inst,VkDebugReportCallbackEXT * debugCallback,bool hasDebugExtension)378 static bool destroy_instance(PFN_vkGetInstanceProcAddr getInstProc, VkInstance inst,
379 VkDebugReportCallbackEXT* debugCallback,
380 bool hasDebugExtension) {
381 if (hasDebugExtension && *debugCallback != VK_NULL_HANDLE) {
382 ACQUIRE_VK_INST_PROC_LOCAL(DestroyDebugReportCallbackEXT, inst);
383 grVkDestroyDebugReportCallbackEXT(inst, *debugCallback, nullptr);
384 *debugCallback = VK_NULL_HANDLE;
385 }
386 ACQUIRE_VK_INST_PROC_LOCAL(DestroyInstance, inst);
387 grVkDestroyInstance(inst, nullptr);
388 return true;
389 }
390
setup_features(skgpu::VulkanGetProc getProc,VkInstance inst,VkPhysicalDevice physDev,uint32_t physDeviceVersion,skgpu::VulkanExtensions * extensions,VkPhysicalDeviceFeatures2 * features,bool isProtected)391 static bool setup_features(skgpu::VulkanGetProc getProc, VkInstance inst, VkPhysicalDevice physDev,
392 uint32_t physDeviceVersion, skgpu::VulkanExtensions* extensions,
393 VkPhysicalDeviceFeatures2* features, bool isProtected) {
394 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
395 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1));
396
397 // Setup all extension feature structs we may want to use.
398 void** tailPNext = &features->pNext;
399
400 // If |isProtected| is given, attach that first
401 VkPhysicalDeviceProtectedMemoryFeatures* protectedMemoryFeatures = nullptr;
402 if (isProtected) {
403 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0));
404 protectedMemoryFeatures =
405 (VkPhysicalDeviceProtectedMemoryFeatures*)sk_malloc_throw(
406 sizeof(VkPhysicalDeviceProtectedMemoryFeatures));
407 protectedMemoryFeatures->sType =
408 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES;
409 protectedMemoryFeatures->pNext = nullptr;
410 *tailPNext = protectedMemoryFeatures;
411 tailPNext = &protectedMemoryFeatures->pNext;
412 }
413
414 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend = nullptr;
415 if (extensions->hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
416 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) sk_malloc_throw(
417 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
418 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
419 blend->pNext = nullptr;
420 *tailPNext = blend;
421 tailPNext = &blend->pNext;
422 }
423
424 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature = nullptr;
425 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
426 extensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) {
427 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) sk_malloc_throw(
428 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
429 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
430 ycbcrFeature->pNext = nullptr;
431 ycbcrFeature->samplerYcbcrConversion = VK_TRUE;
432 *tailPNext = ycbcrFeature;
433 tailPNext = &ycbcrFeature->pNext;
434 }
435
436 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
437 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2, inst, VK_NULL_HANDLE);
438 grVkGetPhysicalDeviceFeatures2(physDev, features);
439 } else {
440 SkASSERT(extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
441 1));
442 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2KHR, inst, VK_NULL_HANDLE);
443 grVkGetPhysicalDeviceFeatures2KHR(physDev, features);
444 }
445
446 if (isProtected) {
447 if (!protectedMemoryFeatures->protectedMemory) {
448 return false;
449 }
450 }
451 return true;
452 // If we want to disable any extension features do so here.
453 }
454
CreateVkBackendContext(PFN_vkGetInstanceProcAddr getInstProc,GrVkBackendContext * ctx,skgpu::VulkanExtensions * extensions,VkPhysicalDeviceFeatures2 * features,VkDebugReportCallbackEXT * debugCallback,uint32_t * presentQueueIndexPtr,CanPresentFn canPresent,bool isProtected)455 bool CreateVkBackendContext(PFN_vkGetInstanceProcAddr getInstProc,
456 GrVkBackendContext* ctx,
457 skgpu::VulkanExtensions* extensions,
458 VkPhysicalDeviceFeatures2* features,
459 VkDebugReportCallbackEXT* debugCallback,
460 uint32_t* presentQueueIndexPtr,
461 CanPresentFn canPresent,
462 bool isProtected) {
463 skgpu::VulkanBackendContext skgpuCtx;
464 if (!CreateVkBackendContext(getInstProc,
465 &skgpuCtx,
466 extensions,
467 features,
468 debugCallback,
469 presentQueueIndexPtr,
470 canPresent,
471 isProtected)) {
472 return false;
473 }
474 ctx->fInstance = skgpuCtx.fInstance;
475 ctx->fPhysicalDevice = skgpuCtx.fPhysicalDevice;
476 ctx->fDevice = skgpuCtx.fDevice;
477 ctx->fQueue = skgpuCtx.fQueue;
478 ctx->fGraphicsQueueIndex = skgpuCtx.fGraphicsQueueIndex;
479 ctx->fMaxAPIVersion = skgpuCtx.fMaxAPIVersion;
480 ctx->fVkExtensions = skgpuCtx.fVkExtensions;
481 ctx->fDeviceFeatures2 = skgpuCtx.fDeviceFeatures2;
482 ctx->fGetProc = skgpuCtx.fGetProc;
483 ctx->fOwnsInstanceAndDevice = false;
484 ctx->fProtectedContext =
485 skgpuCtx.fProtectedContext == skgpu::Protected::kYes ? GrProtected::kYes
486 : GrProtected::kNo;
487 return true;
488 }
489
CreateVkBackendContext(PFN_vkGetInstanceProcAddr getInstProc,skgpu::VulkanBackendContext * ctx,skgpu::VulkanExtensions * extensions,VkPhysicalDeviceFeatures2 * features,VkDebugReportCallbackEXT * debugCallback,uint32_t * presentQueueIndexPtr,CanPresentFn canPresent,bool isProtected)490 bool CreateVkBackendContext(PFN_vkGetInstanceProcAddr getInstProc,
491 skgpu::VulkanBackendContext* ctx,
492 skgpu::VulkanExtensions* extensions,
493 VkPhysicalDeviceFeatures2* features,
494 VkDebugReportCallbackEXT* debugCallback,
495 uint32_t* presentQueueIndexPtr,
496 CanPresentFn canPresent,
497 bool isProtected) {
498 VkResult err;
499
500 ACQUIRE_VK_INST_PROC_NOCHECK(EnumerateInstanceVersion, VK_NULL_HANDLE);
501 uint32_t instanceVersion = 0;
502 if (!grVkEnumerateInstanceVersion) {
503 instanceVersion = VK_MAKE_VERSION(1, 0, 0);
504 } else {
505 err = grVkEnumerateInstanceVersion(&instanceVersion);
506 if (err) {
507 SkDebugf("failed to enumerate instance version. Err: %d\n", err);
508 return false;
509 }
510 }
511 SkASSERT(instanceVersion >= VK_MAKE_VERSION(1, 0, 0));
512 if (isProtected && instanceVersion < VK_MAKE_VERSION(1, 1, 0)) {
513 SkDebugf("protected requires vk instance version 1.1\n");
514 return false;
515 }
516
517 uint32_t apiVersion = VK_MAKE_VERSION(1, 0, 0);
518 if (instanceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
519 // If the instance version is 1.0 we must have the apiVersion also be 1.0. However, if the
520 // instance version is 1.1 or higher, we can set the apiVersion to be whatever the highest
521 // api we may use in skia (technically it can be arbitrary). So for now we set it to 1.1
522 // since that is the highest vulkan version.
523 apiVersion = VK_MAKE_VERSION(1, 1, 0);
524 }
525
526 instanceVersion = std::min(instanceVersion, apiVersion);
527
528 SkSTArray<2, VkPhysicalDevice> physDevs;
529 VkDevice device;
530 VkInstance inst = VK_NULL_HANDLE;
531
532 const VkApplicationInfo app_info = {
533 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
534 nullptr, // pNext
535 "vktest", // pApplicationName
536 0, // applicationVersion
537 "vktest", // pEngineName
538 0, // engineVerison
539 apiVersion, // apiVersion
540 };
541
542 SkTArray<VkLayerProperties> instanceLayers;
543 SkTArray<VkExtensionProperties> instanceExtensions;
544
545 if (!init_instance_extensions_and_layers(getInstProc, instanceVersion,
546 &instanceExtensions,
547 &instanceLayers)) {
548 return false;
549 }
550
551 SkTArray<const char*> instanceLayerNames;
552 SkTArray<const char*> instanceExtensionNames;
553 for (int i = 0; i < instanceLayers.size(); ++i) {
554 instanceLayerNames.push_back(instanceLayers[i].layerName);
555 }
556 for (int i = 0; i < instanceExtensions.size(); ++i) {
557 if (strncmp(instanceExtensions[i].extensionName, "VK_KHX", 6) != 0) {
558 instanceExtensionNames.push_back(instanceExtensions[i].extensionName);
559 }
560 }
561
562 const VkInstanceCreateInfo instance_create = {
563 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
564 nullptr, // pNext
565 0, // flags
566 &app_info, // pApplicationInfo
567 (uint32_t) instanceLayerNames.size(), // enabledLayerNameCount
568 instanceLayerNames.begin(), // ppEnabledLayerNames
569 (uint32_t) instanceExtensionNames.size(), // enabledExtensionNameCount
570 instanceExtensionNames.begin(), // ppEnabledExtensionNames
571 };
572
573 bool hasDebugExtension = false;
574
575 ACQUIRE_VK_INST_PROC(CreateInstance, VK_NULL_HANDLE);
576 err = grVkCreateInstance(&instance_create, nullptr, &inst);
577 if (err < 0) {
578 SkDebugf("vkCreateInstance failed: %d\n", err);
579 return false;
580 }
581
582 ACQUIRE_VK_INST_PROC(GetDeviceProcAddr, inst);
583 auto getProc = [getInstProc, grVkGetDeviceProcAddr](const char* proc_name,
584 VkInstance instance, VkDevice device) {
585 if (device != VK_NULL_HANDLE) {
586 return grVkGetDeviceProcAddr(device, proc_name);
587 }
588 return getInstProc(instance, proc_name);
589 };
590
591 #ifdef SK_ENABLE_VK_LAYERS
592 *debugCallback = VK_NULL_HANDLE;
593 for (int i = 0; i < instanceExtensionNames.size() && !hasDebugExtension; ++i) {
594 if (!strcmp(instanceExtensionNames[i], VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
595 hasDebugExtension = true;
596 }
597 }
598 if (hasDebugExtension) {
599 // Setup callback creation information
600 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
601 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
602 callbackCreateInfo.pNext = nullptr;
603 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
604 VK_DEBUG_REPORT_WARNING_BIT_EXT |
605 // VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
606 // VK_DEBUG_REPORT_DEBUG_BIT_EXT |
607 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
608 callbackCreateInfo.pfnCallback = &DebugReportCallback;
609 callbackCreateInfo.pUserData = nullptr;
610
611 ACQUIRE_VK_PROC(CreateDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
612 // Register the callback
613 grVkCreateDebugReportCallbackEXT(inst, &callbackCreateInfo, nullptr, debugCallback);
614 }
615 #endif
616
617 ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE);
618 ACQUIRE_VK_PROC(GetPhysicalDeviceProperties, inst, VK_NULL_HANDLE);
619 ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE);
620 ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE);
621 ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE);
622 ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE);
623 ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE);
624 ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE);
625
626 uint32_t gpuCount;
627 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
628 if (err) {
629 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
630 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
631 return false;
632 }
633 if (!gpuCount) {
634 SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n");
635 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
636 return false;
637 }
638 // Allocate enough storage for all available physical devices. We should be able to just ask for
639 // the first one, but a bug in RenderDoc (https://github.com/baldurk/renderdoc/issues/2766)
640 // will smash the stack if we do that.
641 physDevs.resize(gpuCount);
642 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, physDevs.data());
643 if (err) {
644 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
645 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
646 return false;
647 }
648 // We just use the first physical device.
649 // TODO: find best match for our needs
650 VkPhysicalDevice physDev = physDevs.front();
651
652 VkPhysicalDeviceProperties physDeviceProperties;
653 grVkGetPhysicalDeviceProperties(physDev, &physDeviceProperties);
654 uint32_t physDeviceVersion = std::min(physDeviceProperties.apiVersion, apiVersion);
655
656 if (isProtected && physDeviceVersion < VK_MAKE_VERSION(1, 1, 0)) {
657 SkDebugf("protected requires vk physical device version 1.1\n");
658 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
659 return false;
660 }
661
662 // query to get the initial queue props size
663 uint32_t queueCount;
664 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
665 if (!queueCount) {
666 SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
667 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
668 return false;
669 }
670
671 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
672 // now get the actual queue props
673 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
674
675 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
676
677 // iterate to find the graphics queue
678 uint32_t graphicsQueueIndex = queueCount;
679 for (uint32_t i = 0; i < queueCount; i++) {
680 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
681 graphicsQueueIndex = i;
682 break;
683 }
684 }
685 if (graphicsQueueIndex == queueCount) {
686 SkDebugf("Could not find any supported graphics queues.\n");
687 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
688 return false;
689 }
690
691 // iterate to find the present queue, if needed
692 uint32_t presentQueueIndex = queueCount;
693 if (presentQueueIndexPtr && canPresent) {
694 for (uint32_t i = 0; i < queueCount; i++) {
695 if (canPresent(inst, physDev, i)) {
696 presentQueueIndex = i;
697 break;
698 }
699 }
700 if (presentQueueIndex == queueCount) {
701 SkDebugf("Could not find any supported present queues.\n");
702 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
703 return false;
704 }
705 *presentQueueIndexPtr = presentQueueIndex;
706 } else {
707 // Just setting this so we end up make a single queue for graphics since there was no
708 // request for a present queue.
709 presentQueueIndex = graphicsQueueIndex;
710 }
711
712 SkTArray<VkLayerProperties> deviceLayers;
713 SkTArray<VkExtensionProperties> deviceExtensions;
714 if (!init_device_extensions_and_layers(getProc, physDeviceVersion,
715 inst, physDev,
716 &deviceExtensions,
717 &deviceLayers)) {
718 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
719 return false;
720 }
721
722 SkTArray<const char*> deviceLayerNames;
723 SkTArray<const char*> deviceExtensionNames;
724 for (int i = 0; i < deviceLayers.size(); ++i) {
725 deviceLayerNames.push_back(deviceLayers[i].layerName);
726 }
727
728 // We can't have both VK_KHR_buffer_device_address and VK_EXT_buffer_device_address as
729 // extensions. So see if we have the KHR version and if so don't push back the EXT version in
730 // the next loop.
731 bool hasKHRBufferDeviceAddress = false;
732 for (int i = 0; i < deviceExtensions.size(); ++i) {
733 if (!strcmp(deviceExtensions[i].extensionName, "VK_KHR_buffer_device_address")) {
734 hasKHRBufferDeviceAddress = true;
735 break;
736 }
737 }
738
739 for (int i = 0; i < deviceExtensions.size(); ++i) {
740 // Don't use experimental extensions since they typically don't work with debug layers and
741 // often are missing dependecy requirements for other extensions. Additionally, these are
742 // often left behind in the driver even after they've been promoted to real extensions.
743 if (0 != strncmp(deviceExtensions[i].extensionName, "VK_KHX", 6) &&
744 0 != strncmp(deviceExtensions[i].extensionName, "VK_NVX", 6)) {
745
746 // This is an nvidia extension that isn't supported by the debug layers so we get lots
747 // of warnings. We don't actually use it, so it is easiest to just not enable it.
748 if (0 == strcmp(deviceExtensions[i].extensionName, "VK_NV_low_latency") ||
749 0 == strcmp(deviceExtensions[i].extensionName, "VK_NV_acquire_winrt_display") ||
750 0 == strcmp(deviceExtensions[i].extensionName, "VK_NV_cuda_kernel_launch") ||
751 0 == strcmp(deviceExtensions[i].extensionName, "VK_EXT_provoking_vertex")) {
752 continue;
753 }
754
755 if (!hasKHRBufferDeviceAddress ||
756 0 != strcmp(deviceExtensions[i].extensionName, "VK_EXT_buffer_device_address")) {
757 deviceExtensionNames.push_back(deviceExtensions[i].extensionName);
758 }
759 }
760 }
761
762 extensions->init(getProc, inst, physDev,
763 (uint32_t) instanceExtensionNames.size(),
764 instanceExtensionNames.begin(),
765 (uint32_t) deviceExtensionNames.size(),
766 deviceExtensionNames.begin());
767
768 memset(features, 0, sizeof(VkPhysicalDeviceFeatures2));
769 features->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
770 features->pNext = nullptr;
771
772 VkPhysicalDeviceFeatures* deviceFeatures = &features->features;
773 void* pointerToFeatures = nullptr;
774 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
775 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)) {
776 if (!setup_features(getProc, inst, physDev, physDeviceVersion, extensions, features,
777 isProtected)) {
778 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
779 return false;
780 }
781
782 // If we set the pNext of the VkDeviceCreateInfo to our VkPhysicalDeviceFeatures2 struct,
783 // the device creation will use that instead of the ppEnabledFeatures.
784 pointerToFeatures = features;
785 } else {
786 grVkGetPhysicalDeviceFeatures(physDev, deviceFeatures);
787 }
788
789 // this looks like it would slow things down,
790 // and we can't depend on it on all platforms
791 deviceFeatures->robustBufferAccess = VK_FALSE;
792
793 VkDeviceQueueCreateFlags flags = isProtected ? VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0;
794 float queuePriorities[1] = { 0.0 };
795 // Here we assume no need for swapchain queue
796 // If one is needed, the client will need its own setup code
797 const VkDeviceQueueCreateInfo queueInfo[2] = {
798 {
799 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
800 nullptr, // pNext
801 flags, // VkDeviceQueueCreateFlags
802 graphicsQueueIndex, // queueFamilyIndex
803 1, // queueCount
804 queuePriorities, // pQueuePriorities
805
806 },
807 {
808 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
809 nullptr, // pNext
810 0, // VkDeviceQueueCreateFlags
811 presentQueueIndex, // queueFamilyIndex
812 1, // queueCount
813 queuePriorities, // pQueuePriorities
814 }
815 };
816 uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
817
818 const VkDeviceCreateInfo deviceInfo = {
819 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
820 pointerToFeatures, // pNext
821 0, // VkDeviceCreateFlags
822 queueInfoCount, // queueCreateInfoCount
823 queueInfo, // pQueueCreateInfos
824 (uint32_t) deviceLayerNames.size(), // layerCount
825 deviceLayerNames.begin(), // ppEnabledLayerNames
826 (uint32_t) deviceExtensionNames.size(), // extensionCount
827 deviceExtensionNames.begin(), // ppEnabledExtensionNames
828 pointerToFeatures ? nullptr : deviceFeatures // ppEnabledFeatures
829 };
830
831 {
832 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
833 // skia:8712
834 __lsan::ScopedDisabler lsanDisabler;
835 #endif
836 err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device);
837 }
838 if (err) {
839 SkDebugf("CreateDevice failed: %d\n", err);
840 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
841 return false;
842 }
843
844 VkQueue queue;
845 if (isProtected) {
846 ACQUIRE_VK_PROC(GetDeviceQueue2, inst, device);
847 SkASSERT(grVkGetDeviceQueue2 != nullptr);
848 VkDeviceQueueInfo2 queue_info2 = {
849 VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2, // sType
850 nullptr, // pNext
851 VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT, // flags
852 graphicsQueueIndex, // queueFamilyIndex
853 0 // queueIndex
854 };
855 grVkGetDeviceQueue2(device, &queue_info2, &queue);
856 } else {
857 grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
858 }
859
860 ctx->fInstance = inst;
861 ctx->fPhysicalDevice = physDev;
862 ctx->fDevice = device;
863 ctx->fQueue = queue;
864 ctx->fGraphicsQueueIndex = graphicsQueueIndex;
865 ctx->fMaxAPIVersion = apiVersion;
866 ctx->fVkExtensions = extensions;
867 ctx->fDeviceFeatures2 = features;
868 ctx->fGetProc = getProc;
869 ctx->fProtectedContext = isProtected ? skgpu::Protected::kYes : skgpu::Protected::kNo;
870
871 return true;
872 }
873
FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2 * features)874 void FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2* features) {
875 // All Vulkan structs that could be part of the features chain will start with the
876 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
877 // so we can get access to the pNext for the next struct.
878 struct CommonVulkanHeader {
879 VkStructureType sType;
880 void* pNext;
881 };
882
883 void* pNext = features->pNext;
884 while (pNext) {
885 void* current = pNext;
886 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
887 sk_free(current);
888 }
889 }
890
891 } // namespace sk_gpu_test
892
893 #endif
894