1 /*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "tools/gpu/vk/VkTestUtils.h"
9
10 #ifdef SK_VULKAN
11
12 #ifndef SK_GPU_TOOLS_VK_LIBRARY_NAME
13 #if defined _WIN32
14 #define SK_GPU_TOOLS_VK_LIBRARY_NAME vulkan-1.dll
15 #elif defined SK_BUILD_FOR_MAC
16 #define SK_GPU_TOOLS_VK_LIBRARY_NAME libvk_swiftshader.dylib
17 #else
18 #define SK_GPU_TOOLS_VK_LIBRARY_NAME libvulkan.so
19 #endif
20 #endif
21
22 #define STRINGIFY2(S) #S
23 #define STRINGIFY(S) STRINGIFY2(S)
24
25 #include <algorithm>
26
27 #if defined(SK_BUILD_FOR_UNIX)
28 #include <execinfo.h>
29 #endif
30 #include "include/gpu/vk/GrVkBackendContext.h"
31 #include "include/gpu/vk/GrVkExtensions.h"
32 #include "src/core/SkAutoMalloc.h"
33 #include "src/ports/SkOSLibrary.h"
34
35 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
36 #include <sanitizer/lsan_interface.h>
37 #endif
38
39 namespace sk_gpu_test {
40
LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr * instProc)41 bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc) {
42 static void* vkLib = nullptr;
43 static PFN_vkGetInstanceProcAddr localInstProc = nullptr;
44 if (!vkLib) {
45 vkLib = SkLoadDynamicLibrary(STRINGIFY(SK_GPU_TOOLS_VK_LIBRARY_NAME));
46 if (!vkLib) {
47 return false;
48 }
49 localInstProc = (PFN_vkGetInstanceProcAddr) SkGetProcedureAddress(vkLib,
50 "vkGetInstanceProcAddr");
51 }
52 if (!localInstProc) {
53 return false;
54 }
55 *instProc = localInstProc;
56 return true;
57 }
58
59 ////////////////////////////////////////////////////////////////////////////////
60 // Helper code to set up Vulkan context objects
61
62 #ifdef SK_ENABLE_VK_LAYERS
63 const char* kDebugLayerNames[] = {
64 // single merged layer
65 "VK_LAYER_KHRONOS_validation",
66 // not included in standard_validation
67 //"VK_LAYER_LUNARG_api_dump",
68 //"VK_LAYER_LUNARG_vktrace",
69 //"VK_LAYER_LUNARG_screenshot",
70 };
71
remove_patch_version(uint32_t specVersion)72 static uint32_t remove_patch_version(uint32_t specVersion) {
73 return (specVersion >> 12) << 12;
74 }
75
76 // Returns the index into layers array for the layer we want. Returns -1 if not supported.
should_include_debug_layer(const char * layerName,uint32_t layerCount,VkLayerProperties * layers,uint32_t version)77 static int should_include_debug_layer(const char* layerName,
78 uint32_t layerCount, VkLayerProperties* layers,
79 uint32_t version) {
80 for (uint32_t i = 0; i < layerCount; ++i) {
81 if (!strcmp(layerName, layers[i].layerName)) {
82 // Since the layers intercept the vulkan calls and forward them on, we need to make sure
83 // layer was written against a version that isn't older than the version of Vulkan we're
84 // using so that it has all the api entry points.
85 if (version <= remove_patch_version(layers[i].specVersion)) {
86 return i;
87 }
88 return -1;
89 }
90
91 }
92 return -1;
93 }
94
print_backtrace()95 static void print_backtrace() {
96 #if defined(SK_BUILD_FOR_UNIX)
97 void* stack[64];
98 int count = backtrace(stack, SK_ARRAY_COUNT(stack));
99 backtrace_symbols_fd(stack, count, 2);
100 #else
101 // Please add implementations for other platforms.
102 #endif
103 }
104
DebugReportCallback(VkDebugReportFlagsEXT flags,VkDebugReportObjectTypeEXT objectType,uint64_t object,size_t location,int32_t messageCode,const char * pLayerPrefix,const char * pMessage,void * pUserData)105 VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
106 VkDebugReportFlagsEXT flags,
107 VkDebugReportObjectTypeEXT objectType,
108 uint64_t object,
109 size_t location,
110 int32_t messageCode,
111 const char* pLayerPrefix,
112 const char* pMessage,
113 void* pUserData) {
114 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
115 // See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/1887
116 if (strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01521") ||
117 strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01522")) {
118 return VK_FALSE;
119 }
120 // See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/2171
121 if (strstr(pMessage, "VUID-vkCmdDraw-None-02686") ||
122 strstr(pMessage, "VUID-vkCmdDrawIndexed-None-02686")) {
123 return VK_FALSE;
124 }
125 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
126 print_backtrace();
127 SkDEBUGFAIL("Vulkan debug layer error");
128 return VK_TRUE; // skip further layers
129 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
130 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
131 print_backtrace();
132 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
133 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
134 print_backtrace();
135 } else {
136 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
137 }
138 return VK_FALSE;
139 }
140 #endif
141
142 #define ACQUIRE_VK_INST_PROC_LOCAL(name, instance) \
143 PFN_vk##name grVk##name = \
144 reinterpret_cast<PFN_vk##name>(getInstProc(instance, "vk" #name)); \
145 do { \
146 if (grVk##name == nullptr) { \
147 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
148 return false; \
149 } \
150 } while (0)
151
init_instance_extensions_and_layers(PFN_vkGetInstanceProcAddr getInstProc,uint32_t specVersion,SkTArray<VkExtensionProperties> * instanceExtensions,SkTArray<VkLayerProperties> * instanceLayers)152 static bool init_instance_extensions_and_layers(PFN_vkGetInstanceProcAddr getInstProc,
153 uint32_t specVersion,
154 SkTArray<VkExtensionProperties>* instanceExtensions,
155 SkTArray<VkLayerProperties>* instanceLayers) {
156 if (getInstProc == nullptr) {
157 return false;
158 }
159
160 ACQUIRE_VK_INST_PROC_LOCAL(EnumerateInstanceExtensionProperties, VK_NULL_HANDLE);
161 ACQUIRE_VK_INST_PROC_LOCAL(EnumerateInstanceLayerProperties, VK_NULL_HANDLE);
162
163 VkResult res;
164 uint32_t layerCount = 0;
165 #ifdef SK_ENABLE_VK_LAYERS
166 // instance layers
167 res = grVkEnumerateInstanceLayerProperties(&layerCount, nullptr);
168 if (VK_SUCCESS != res) {
169 return false;
170 }
171 VkLayerProperties* layers = new VkLayerProperties[layerCount];
172 res = grVkEnumerateInstanceLayerProperties(&layerCount, layers);
173 if (VK_SUCCESS != res) {
174 delete[] layers;
175 return false;
176 }
177
178 uint32_t nonPatchVersion = remove_patch_version(specVersion);
179 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
180 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
181 nonPatchVersion);
182 if (idx != -1) {
183 instanceLayers->push_back() = layers[idx];
184 }
185 }
186 delete[] layers;
187 #endif
188
189 // instance extensions
190 // via Vulkan implementation and implicitly enabled layers
191 {
192 uint32_t extensionCount = 0;
193 res = grVkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
194 if (VK_SUCCESS != res) {
195 return false;
196 }
197 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
198 res = grVkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions);
199 if (VK_SUCCESS != res) {
200 delete[] extensions;
201 return false;
202 }
203 for (uint32_t i = 0; i < extensionCount; ++i) {
204 instanceExtensions->push_back() = extensions[i];
205 }
206 delete [] extensions;
207 }
208
209 // via explicitly enabled layers
210 layerCount = instanceLayers->count();
211 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
212 uint32_t extensionCount = 0;
213 res = grVkEnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
214 &extensionCount, nullptr);
215 if (VK_SUCCESS != res) {
216 return false;
217 }
218 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
219 res = grVkEnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
220 &extensionCount, extensions);
221 if (VK_SUCCESS != res) {
222 delete[] extensions;
223 return false;
224 }
225 for (uint32_t i = 0; i < extensionCount; ++i) {
226 instanceExtensions->push_back() = extensions[i];
227 }
228 delete[] extensions;
229 }
230
231 return true;
232 }
233
234 #define GET_PROC_LOCAL(F, inst, device) PFN_vk ## F F = (PFN_vk ## F) getProc("vk" #F, inst, device)
235
init_device_extensions_and_layers(GrVkGetProc getProc,uint32_t specVersion,VkInstance inst,VkPhysicalDevice physDev,SkTArray<VkExtensionProperties> * deviceExtensions,SkTArray<VkLayerProperties> * deviceLayers)236 static bool init_device_extensions_and_layers(GrVkGetProc getProc, uint32_t specVersion,
237 VkInstance inst, VkPhysicalDevice physDev,
238 SkTArray<VkExtensionProperties>* deviceExtensions,
239 SkTArray<VkLayerProperties>* deviceLayers) {
240 if (getProc == nullptr) {
241 return false;
242 }
243
244 GET_PROC_LOCAL(EnumerateDeviceExtensionProperties, inst, VK_NULL_HANDLE);
245 GET_PROC_LOCAL(EnumerateDeviceLayerProperties, inst, VK_NULL_HANDLE);
246
247 if (!EnumerateDeviceExtensionProperties ||
248 !EnumerateDeviceLayerProperties) {
249 return false;
250 }
251
252 VkResult res;
253 // device layers
254 uint32_t layerCount = 0;
255 #ifdef SK_ENABLE_VK_LAYERS
256 res = EnumerateDeviceLayerProperties(physDev, &layerCount, nullptr);
257 if (VK_SUCCESS != res) {
258 return false;
259 }
260 VkLayerProperties* layers = new VkLayerProperties[layerCount];
261 res = EnumerateDeviceLayerProperties(physDev, &layerCount, layers);
262 if (VK_SUCCESS != res) {
263 delete[] layers;
264 return false;
265 }
266
267 uint32_t nonPatchVersion = remove_patch_version(specVersion);
268 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
269 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
270 nonPatchVersion);
271 if (idx != -1) {
272 deviceLayers->push_back() = layers[idx];
273 }
274 }
275 delete[] layers;
276 #endif
277
278 // device extensions
279 // via Vulkan implementation and implicitly enabled layers
280 {
281 uint32_t extensionCount = 0;
282 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, nullptr);
283 if (VK_SUCCESS != res) {
284 return false;
285 }
286 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
287 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, extensions);
288 if (VK_SUCCESS != res) {
289 delete[] extensions;
290 return false;
291 }
292 for (uint32_t i = 0; i < extensionCount; ++i) {
293 deviceExtensions->push_back() = extensions[i];
294 }
295 delete[] extensions;
296 }
297
298 // via explicitly enabled layers
299 layerCount = deviceLayers->count();
300 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
301 uint32_t extensionCount = 0;
302 res = EnumerateDeviceExtensionProperties(physDev,
303 (*deviceLayers)[layerIndex].layerName,
304 &extensionCount, nullptr);
305 if (VK_SUCCESS != res) {
306 return false;
307 }
308 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
309 res = EnumerateDeviceExtensionProperties(physDev,
310 (*deviceLayers)[layerIndex].layerName,
311 &extensionCount, extensions);
312 if (VK_SUCCESS != res) {
313 delete[] extensions;
314 return false;
315 }
316 for (uint32_t i = 0; i < extensionCount; ++i) {
317 deviceExtensions->push_back() = extensions[i];
318 }
319 delete[] extensions;
320 }
321
322 return true;
323 }
324
325 #define ACQUIRE_VK_INST_PROC_NOCHECK(name, instance) \
326 PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getInstProc(instance, "vk" #name))
327
328 #define ACQUIRE_VK_INST_PROC(name, instance) \
329 PFN_vk##name grVk##name = \
330 reinterpret_cast<PFN_vk##name>(getInstProc(instance, "vk" #name)); \
331 do { \
332 if (grVk##name == nullptr) { \
333 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
334 if (inst != VK_NULL_HANDLE) { \
335 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); \
336 } \
337 return false; \
338 } \
339 } while (0)
340
341 #define ACQUIRE_VK_PROC_NOCHECK(name, instance, device) \
342 PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device))
343
344 #define ACQUIRE_VK_PROC(name, instance, device) \
345 PFN_vk##name grVk##name = \
346 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
347 do { \
348 if (grVk##name == nullptr) { \
349 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
350 if (inst != VK_NULL_HANDLE) { \
351 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension); \
352 } \
353 return false; \
354 } \
355 } while (0)
356
357 #define ACQUIRE_VK_PROC_LOCAL(name, instance, device) \
358 PFN_vk##name grVk##name = \
359 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
360 do { \
361 if (grVk##name == nullptr) { \
362 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
363 return false; \
364 } \
365 } while (0)
366
destroy_instance(PFN_vkGetInstanceProcAddr getInstProc,VkInstance inst,VkDebugReportCallbackEXT * debugCallback,bool hasDebugExtension)367 static bool destroy_instance(PFN_vkGetInstanceProcAddr getInstProc, VkInstance inst,
368 VkDebugReportCallbackEXT* debugCallback,
369 bool hasDebugExtension) {
370 if (hasDebugExtension && *debugCallback != VK_NULL_HANDLE) {
371 ACQUIRE_VK_INST_PROC_LOCAL(DestroyDebugReportCallbackEXT, inst);
372 grVkDestroyDebugReportCallbackEXT(inst, *debugCallback, nullptr);
373 *debugCallback = VK_NULL_HANDLE;
374 }
375 ACQUIRE_VK_INST_PROC_LOCAL(DestroyInstance, inst);
376 grVkDestroyInstance(inst, nullptr);
377 return true;
378 }
379
setup_features(GrVkGetProc getProc,VkInstance inst,VkPhysicalDevice physDev,uint32_t physDeviceVersion,GrVkExtensions * extensions,VkPhysicalDeviceFeatures2 * features,bool isProtected)380 static bool setup_features(GrVkGetProc getProc, VkInstance inst, VkPhysicalDevice physDev,
381 uint32_t physDeviceVersion, GrVkExtensions* extensions,
382 VkPhysicalDeviceFeatures2* features, bool isProtected) {
383 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
384 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1));
385
386 // Setup all extension feature structs we may want to use.
387 void** tailPNext = &features->pNext;
388
389 // If |isProtected| is given, attach that first
390 VkPhysicalDeviceProtectedMemoryFeatures* protectedMemoryFeatures = nullptr;
391 if (isProtected) {
392 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0));
393 protectedMemoryFeatures =
394 (VkPhysicalDeviceProtectedMemoryFeatures*)sk_malloc_throw(
395 sizeof(VkPhysicalDeviceProtectedMemoryFeatures));
396 protectedMemoryFeatures->sType =
397 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES;
398 protectedMemoryFeatures->pNext = nullptr;
399 *tailPNext = protectedMemoryFeatures;
400 tailPNext = &protectedMemoryFeatures->pNext;
401 }
402
403 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend = nullptr;
404 if (extensions->hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
405 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) sk_malloc_throw(
406 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
407 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
408 blend->pNext = nullptr;
409 *tailPNext = blend;
410 tailPNext = &blend->pNext;
411 }
412
413 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature = nullptr;
414 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
415 extensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) {
416 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) sk_malloc_throw(
417 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
418 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
419 ycbcrFeature->pNext = nullptr;
420 ycbcrFeature->samplerYcbcrConversion = VK_TRUE;
421 *tailPNext = ycbcrFeature;
422 tailPNext = &ycbcrFeature->pNext;
423 }
424
425 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
426 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2, inst, VK_NULL_HANDLE);
427 grVkGetPhysicalDeviceFeatures2(physDev, features);
428 } else {
429 SkASSERT(extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
430 1));
431 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2KHR, inst, VK_NULL_HANDLE);
432 grVkGetPhysicalDeviceFeatures2KHR(physDev, features);
433 }
434
435 if (isProtected) {
436 if (!protectedMemoryFeatures->protectedMemory) {
437 return false;
438 }
439 }
440 return true;
441 // If we want to disable any extension features do so here.
442 }
443
CreateVkBackendContext(PFN_vkGetInstanceProcAddr getInstProc,GrVkBackendContext * ctx,GrVkExtensions * extensions,VkPhysicalDeviceFeatures2 * features,VkDebugReportCallbackEXT * debugCallback,uint32_t * presentQueueIndexPtr,CanPresentFn canPresent,bool isProtected)444 bool CreateVkBackendContext(PFN_vkGetInstanceProcAddr getInstProc,
445 GrVkBackendContext* ctx,
446 GrVkExtensions* extensions,
447 VkPhysicalDeviceFeatures2* features,
448 VkDebugReportCallbackEXT* debugCallback,
449 uint32_t* presentQueueIndexPtr,
450 CanPresentFn canPresent,
451 bool isProtected) {
452 VkResult err;
453
454 ACQUIRE_VK_INST_PROC_NOCHECK(EnumerateInstanceVersion, VK_NULL_HANDLE);
455 uint32_t instanceVersion = 0;
456 if (!grVkEnumerateInstanceVersion) {
457 instanceVersion = VK_MAKE_VERSION(1, 0, 0);
458 } else {
459 err = grVkEnumerateInstanceVersion(&instanceVersion);
460 if (err) {
461 SkDebugf("failed to enumerate instance version. Err: %d\n", err);
462 return false;
463 }
464 }
465 SkASSERT(instanceVersion >= VK_MAKE_VERSION(1, 0, 0));
466 if (isProtected && instanceVersion < VK_MAKE_VERSION(1, 1, 0)) {
467 SkDebugf("protected requires vk instance version 1.1\n");
468 return false;
469 }
470
471 uint32_t apiVersion = VK_MAKE_VERSION(1, 0, 0);
472 if (instanceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
473 // If the instance version is 1.0 we must have the apiVersion also be 1.0. However, if the
474 // instance version is 1.1 or higher, we can set the apiVersion to be whatever the highest
475 // api we may use in skia (technically it can be arbitrary). So for now we set it to 1.1
476 // since that is the highest vulkan version.
477 apiVersion = VK_MAKE_VERSION(1, 1, 0);
478 }
479
480 instanceVersion = std::min(instanceVersion, apiVersion);
481
482 VkPhysicalDevice physDev;
483 VkDevice device;
484 VkInstance inst = VK_NULL_HANDLE;
485
486 const VkApplicationInfo app_info = {
487 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
488 nullptr, // pNext
489 "vktest", // pApplicationName
490 0, // applicationVersion
491 "vktest", // pEngineName
492 0, // engineVerison
493 apiVersion, // apiVersion
494 };
495
496 SkTArray<VkLayerProperties> instanceLayers;
497 SkTArray<VkExtensionProperties> instanceExtensions;
498
499 if (!init_instance_extensions_and_layers(getInstProc, instanceVersion,
500 &instanceExtensions,
501 &instanceLayers)) {
502 return false;
503 }
504
505 SkTArray<const char*> instanceLayerNames;
506 SkTArray<const char*> instanceExtensionNames;
507 for (int i = 0; i < instanceLayers.count(); ++i) {
508 instanceLayerNames.push_back(instanceLayers[i].layerName);
509 }
510 for (int i = 0; i < instanceExtensions.count(); ++i) {
511 if (strncmp(instanceExtensions[i].extensionName, "VK_KHX", 6) != 0) {
512 instanceExtensionNames.push_back(instanceExtensions[i].extensionName);
513 }
514 }
515
516 const VkInstanceCreateInfo instance_create = {
517 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
518 nullptr, // pNext
519 0, // flags
520 &app_info, // pApplicationInfo
521 (uint32_t) instanceLayerNames.count(), // enabledLayerNameCount
522 instanceLayerNames.begin(), // ppEnabledLayerNames
523 (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount
524 instanceExtensionNames.begin(), // ppEnabledExtensionNames
525 };
526
527 bool hasDebugExtension = false;
528
529 ACQUIRE_VK_INST_PROC(CreateInstance, VK_NULL_HANDLE);
530 err = grVkCreateInstance(&instance_create, nullptr, &inst);
531 if (err < 0) {
532 SkDebugf("vkCreateInstance failed: %d\n", err);
533 return false;
534 }
535
536 ACQUIRE_VK_INST_PROC(GetDeviceProcAddr, inst);
537 auto getProc = [getInstProc, grVkGetDeviceProcAddr](const char* proc_name,
538 VkInstance instance, VkDevice device) {
539 if (device != VK_NULL_HANDLE) {
540 return grVkGetDeviceProcAddr(device, proc_name);
541 }
542 return getInstProc(instance, proc_name);
543 };
544
545 #ifdef SK_ENABLE_VK_LAYERS
546 *debugCallback = VK_NULL_HANDLE;
547 for (int i = 0; i < instanceExtensionNames.count() && !hasDebugExtension; ++i) {
548 if (!strcmp(instanceExtensionNames[i], VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
549 hasDebugExtension = true;
550 }
551 }
552 if (hasDebugExtension) {
553 // Setup callback creation information
554 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
555 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
556 callbackCreateInfo.pNext = nullptr;
557 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
558 VK_DEBUG_REPORT_WARNING_BIT_EXT |
559 // VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
560 // VK_DEBUG_REPORT_DEBUG_BIT_EXT |
561 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
562 callbackCreateInfo.pfnCallback = &DebugReportCallback;
563 callbackCreateInfo.pUserData = nullptr;
564
565 ACQUIRE_VK_PROC(CreateDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
566 // Register the callback
567 grVkCreateDebugReportCallbackEXT(inst, &callbackCreateInfo, nullptr, debugCallback);
568 }
569 #endif
570
571 ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE);
572 ACQUIRE_VK_PROC(GetPhysicalDeviceProperties, inst, VK_NULL_HANDLE);
573 ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE);
574 ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE);
575 ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE);
576 ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE);
577 ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE);
578 ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE);
579
580 uint32_t gpuCount;
581 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
582 if (err) {
583 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
584 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
585 return false;
586 }
587 if (!gpuCount) {
588 SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n");
589 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
590 return false;
591 }
592 // Just returning the first physical device instead of getting the whole array.
593 // TODO: find best match for our needs
594 gpuCount = 1;
595 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
596 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
597 if (err && VK_INCOMPLETE != err) {
598 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
599 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
600 return false;
601 }
602
603 VkPhysicalDeviceProperties physDeviceProperties;
604 grVkGetPhysicalDeviceProperties(physDev, &physDeviceProperties);
605 uint32_t physDeviceVersion = std::min(physDeviceProperties.apiVersion, apiVersion);
606
607 if (isProtected && physDeviceVersion < VK_MAKE_VERSION(1, 1, 0)) {
608 SkDebugf("protected requires vk physical device version 1.1\n");
609 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
610 return false;
611 }
612
613 // query to get the initial queue props size
614 uint32_t queueCount;
615 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
616 if (!queueCount) {
617 SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
618 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
619 return false;
620 }
621
622 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
623 // now get the actual queue props
624 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
625
626 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
627
628 // iterate to find the graphics queue
629 uint32_t graphicsQueueIndex = queueCount;
630 for (uint32_t i = 0; i < queueCount; i++) {
631 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
632 graphicsQueueIndex = i;
633 break;
634 }
635 }
636 if (graphicsQueueIndex == queueCount) {
637 SkDebugf("Could not find any supported graphics queues.\n");
638 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
639 return false;
640 }
641
642 // iterate to find the present queue, if needed
643 uint32_t presentQueueIndex = queueCount;
644 if (presentQueueIndexPtr && canPresent) {
645 for (uint32_t i = 0; i < queueCount; i++) {
646 if (canPresent(inst, physDev, i)) {
647 presentQueueIndex = i;
648 break;
649 }
650 }
651 if (presentQueueIndex == queueCount) {
652 SkDebugf("Could not find any supported present queues.\n");
653 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
654 return false;
655 }
656 *presentQueueIndexPtr = presentQueueIndex;
657 } else {
658 // Just setting this so we end up make a single queue for graphics since there was no
659 // request for a present queue.
660 presentQueueIndex = graphicsQueueIndex;
661 }
662
663 SkTArray<VkLayerProperties> deviceLayers;
664 SkTArray<VkExtensionProperties> deviceExtensions;
665 if (!init_device_extensions_and_layers(getProc, physDeviceVersion,
666 inst, physDev,
667 &deviceExtensions,
668 &deviceLayers)) {
669 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
670 return false;
671 }
672
673 SkTArray<const char*> deviceLayerNames;
674 SkTArray<const char*> deviceExtensionNames;
675 for (int i = 0; i < deviceLayers.count(); ++i) {
676 deviceLayerNames.push_back(deviceLayers[i].layerName);
677 }
678
679 // We can't have both VK_KHR_buffer_device_address and VK_EXT_buffer_device_address as
680 // extensions. So see if we have the KHR version and if so don't push back the EXT version in
681 // the next loop.
682 bool hasKHRBufferDeviceAddress = false;
683 for (int i = 0; i < deviceExtensions.count(); ++i) {
684 if (!strcmp(deviceExtensions[i].extensionName, "VK_KHR_buffer_device_address")) {
685 hasKHRBufferDeviceAddress = true;
686 break;
687 }
688 }
689
690 for (int i = 0; i < deviceExtensions.count(); ++i) {
691 // Don't use experimental extensions since they typically don't work with debug layers and
692 // often are missing dependecy requirements for other extensions. Additionally, these are
693 // often left behind in the driver even after they've been promoted to real extensions.
694 if (0 != strncmp(deviceExtensions[i].extensionName, "VK_KHX", 6) &&
695 0 != strncmp(deviceExtensions[i].extensionName, "VK_NVX", 6)) {
696
697 // This is an nvidia extension that isn't supported by the debug layers so we get lots
698 // of warnings. We don't actually use it, so it is easiest to just not enable it.
699 if (0 == strcmp(deviceExtensions[i].extensionName, "VK_NV_low_latency") ||
700 0 == strcmp(deviceExtensions[i].extensionName, "VK_NV_acquire_winrt_display") ||
701 0 == strcmp(deviceExtensions[i].extensionName, "VK_NV_cuda_kernel_launch") ||
702 0 == strcmp(deviceExtensions[i].extensionName, "VK_EXT_provoking_vertex")) {
703 continue;
704 }
705
706 if (!hasKHRBufferDeviceAddress ||
707 0 != strcmp(deviceExtensions[i].extensionName, "VK_EXT_buffer_device_address")) {
708 deviceExtensionNames.push_back(deviceExtensions[i].extensionName);
709 }
710 }
711 }
712
713 extensions->init(getProc, inst, physDev,
714 (uint32_t) instanceExtensionNames.count(),
715 instanceExtensionNames.begin(),
716 (uint32_t) deviceExtensionNames.count(),
717 deviceExtensionNames.begin());
718
719 memset(features, 0, sizeof(VkPhysicalDeviceFeatures2));
720 features->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
721 features->pNext = nullptr;
722
723 VkPhysicalDeviceFeatures* deviceFeatures = &features->features;
724 void* pointerToFeatures = nullptr;
725 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
726 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)) {
727 if (!setup_features(getProc, inst, physDev, physDeviceVersion, extensions, features,
728 isProtected)) {
729 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
730 return false;
731 }
732
733 // If we set the pNext of the VkDeviceCreateInfo to our VkPhysicalDeviceFeatures2 struct,
734 // the device creation will use that instead of the ppEnabledFeatures.
735 pointerToFeatures = features;
736 } else {
737 grVkGetPhysicalDeviceFeatures(physDev, deviceFeatures);
738 }
739
740 // this looks like it would slow things down,
741 // and we can't depend on it on all platforms
742 deviceFeatures->robustBufferAccess = VK_FALSE;
743
744 VkDeviceQueueCreateFlags flags = isProtected ? VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0;
745 float queuePriorities[1] = { 0.0 };
746 // Here we assume no need for swapchain queue
747 // If one is needed, the client will need its own setup code
748 const VkDeviceQueueCreateInfo queueInfo[2] = {
749 {
750 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
751 nullptr, // pNext
752 flags, // VkDeviceQueueCreateFlags
753 graphicsQueueIndex, // queueFamilyIndex
754 1, // queueCount
755 queuePriorities, // pQueuePriorities
756
757 },
758 {
759 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
760 nullptr, // pNext
761 0, // VkDeviceQueueCreateFlags
762 presentQueueIndex, // queueFamilyIndex
763 1, // queueCount
764 queuePriorities, // pQueuePriorities
765 }
766 };
767 uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
768
769 const VkDeviceCreateInfo deviceInfo = {
770 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
771 pointerToFeatures, // pNext
772 0, // VkDeviceCreateFlags
773 queueInfoCount, // queueCreateInfoCount
774 queueInfo, // pQueueCreateInfos
775 (uint32_t) deviceLayerNames.count(), // layerCount
776 deviceLayerNames.begin(), // ppEnabledLayerNames
777 (uint32_t) deviceExtensionNames.count(), // extensionCount
778 deviceExtensionNames.begin(), // ppEnabledExtensionNames
779 pointerToFeatures ? nullptr : deviceFeatures // ppEnabledFeatures
780 };
781
782 {
783 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
784 // skia:8712
785 __lsan::ScopedDisabler lsanDisabler;
786 #endif
787 err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device);
788 }
789 if (err) {
790 SkDebugf("CreateDevice failed: %d\n", err);
791 destroy_instance(getInstProc, inst, debugCallback, hasDebugExtension);
792 return false;
793 }
794
795 VkQueue queue;
796 if (isProtected) {
797 ACQUIRE_VK_PROC(GetDeviceQueue2, inst, device);
798 SkASSERT(grVkGetDeviceQueue2 != nullptr);
799 VkDeviceQueueInfo2 queue_info2 = {
800 VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2, // sType
801 nullptr, // pNext
802 VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT, // flags
803 graphicsQueueIndex, // queueFamilyIndex
804 0 // queueIndex
805 };
806 grVkGetDeviceQueue2(device, &queue_info2, &queue);
807 } else {
808 grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
809 }
810
811 ctx->fInstance = inst;
812 ctx->fPhysicalDevice = physDev;
813 ctx->fDevice = device;
814 ctx->fQueue = queue;
815 ctx->fGraphicsQueueIndex = graphicsQueueIndex;
816 ctx->fMaxAPIVersion = apiVersion;
817 ctx->fVkExtensions = extensions;
818 ctx->fDeviceFeatures2 = features;
819 ctx->fGetProc = getProc;
820 ctx->fOwnsInstanceAndDevice = false;
821 ctx->fProtectedContext = isProtected ? GrProtected::kYes : GrProtected::kNo;
822
823 return true;
824 }
825
FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2 * features)826 void FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2* features) {
827 // All Vulkan structs that could be part of the features chain will start with the
828 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
829 // so we can get access to the pNext for the next struct.
830 struct CommonVulkanHeader {
831 VkStructureType sType;
832 void* pNext;
833 };
834
835 void* pNext = features->pNext;
836 while (pNext) {
837 void* current = pNext;
838 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
839 sk_free(current);
840 }
841 }
842
843 } // namespace sk_gpu_test
844
845 #endif
846