1 /*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "tools/gpu/vk/VkTestUtils.h"
9
10 #ifdef SK_VULKAN
11
12 #ifndef SK_GPU_TOOLS_VK_LIBRARY_NAME
13 #if defined _WIN32
14 #define SK_GPU_TOOLS_VK_LIBRARY_NAME "vulkan-1.dll"
15 #else
16 #define SK_GPU_TOOLS_VK_LIBRARY_NAME "libvulkan.so"
17 #endif
18 #endif
19
20 #include <algorithm>
21
22 #if defined(SK_BUILD_FOR_UNIX)
23 #include <execinfo.h>
24 #endif
25 #include "include/gpu/vk/GrVkBackendContext.h"
26 #include "include/gpu/vk/GrVkExtensions.h"
27 #include "src/core/SkAutoMalloc.h"
28 #include "src/ports/SkOSLibrary.h"
29
30 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
31 #include <sanitizer/lsan_interface.h>
32 #endif
33
34 namespace sk_gpu_test {
35
LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr * instProc,PFN_vkGetDeviceProcAddr * devProc)36 bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc,
37 PFN_vkGetDeviceProcAddr* devProc) {
38 static void* vkLib = nullptr;
39 static PFN_vkGetInstanceProcAddr localInstProc = nullptr;
40 static PFN_vkGetDeviceProcAddr localDevProc = nullptr;
41 if (!vkLib) {
42 vkLib = SkLoadDynamicLibrary(SK_GPU_TOOLS_VK_LIBRARY_NAME);
43 if (!vkLib) {
44 return false;
45 }
46 localInstProc = (PFN_vkGetInstanceProcAddr) SkGetProcedureAddress(vkLib,
47 "vkGetInstanceProcAddr");
48 localDevProc = (PFN_vkGetDeviceProcAddr) SkGetProcedureAddress(vkLib,
49 "vkGetDeviceProcAddr");
50 }
51 if (!localInstProc || !localDevProc) {
52 return false;
53 }
54 *instProc = localInstProc;
55 *devProc = localDevProc;
56 return true;
57 }
58
59 ////////////////////////////////////////////////////////////////////////////////
60 // Helper code to set up Vulkan context objects
61
62 #ifdef SK_ENABLE_VK_LAYERS
63 const char* kDebugLayerNames[] = {
64 // single merged layer
65 "VK_LAYER_KHRONOS_validation",
66 // not included in standard_validation
67 //"VK_LAYER_LUNARG_api_dump",
68 //"VK_LAYER_LUNARG_vktrace",
69 //"VK_LAYER_LUNARG_screenshot",
70 };
71
remove_patch_version(uint32_t specVersion)72 static uint32_t remove_patch_version(uint32_t specVersion) {
73 return (specVersion >> 12) << 12;
74 }
75
76 // Returns the index into layers array for the layer we want. Returns -1 if not supported.
should_include_debug_layer(const char * layerName,uint32_t layerCount,VkLayerProperties * layers,uint32_t version)77 static int should_include_debug_layer(const char* layerName,
78 uint32_t layerCount, VkLayerProperties* layers,
79 uint32_t version) {
80 for (uint32_t i = 0; i < layerCount; ++i) {
81 if (!strcmp(layerName, layers[i].layerName)) {
82 // Since the layers intercept the vulkan calls and forward them on, we need to make sure
83 // layer was written against a version that isn't older than the version of Vulkan we're
84 // using so that it has all the api entry points.
85 if (version <= remove_patch_version(layers[i].specVersion)) {
86 return i;
87 }
88 return -1;
89 }
90
91 }
92 return -1;
93 }
94
print_backtrace()95 static void print_backtrace() {
96 #if defined(SK_BUILD_FOR_UNIX)
97 void* stack[64];
98 int count = backtrace(stack, SK_ARRAY_COUNT(stack));
99 backtrace_symbols_fd(stack, count, 2);
100 #else
101 // Please add implementations for other platforms.
102 #endif
103 }
104
DebugReportCallback(VkDebugReportFlagsEXT flags,VkDebugReportObjectTypeEXT objectType,uint64_t object,size_t location,int32_t messageCode,const char * pLayerPrefix,const char * pMessage,void * pUserData)105 VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
106 VkDebugReportFlagsEXT flags,
107 VkDebugReportObjectTypeEXT objectType,
108 uint64_t object,
109 size_t location,
110 int32_t messageCode,
111 const char* pLayerPrefix,
112 const char* pMessage,
113 void* pUserData) {
114 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
115 // See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/1887
116 if (strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01521") ||
117 strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01522")) {
118 return VK_FALSE;
119 }
120 // See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/2171
121 if (strstr(pMessage, "VUID-vkCmdDraw-None-02686") ||
122 strstr(pMessage, "VUID-vkCmdDrawIndexed-None-02686")) {
123 return VK_FALSE;
124 }
125 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
126 print_backtrace();
127 SkDEBUGFAIL("Vulkan debug layer error");
128 return VK_TRUE; // skip further layers
129 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
130 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
131 print_backtrace();
132 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
133 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
134 print_backtrace();
135 } else {
136 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
137 }
138 return VK_FALSE;
139 }
140 #endif
141
142 #define GET_PROC_LOCAL(F, inst, device) PFN_vk ## F F = (PFN_vk ## F) getProc("vk" #F, inst, device)
143
init_instance_extensions_and_layers(GrVkGetProc getProc,uint32_t specVersion,SkTArray<VkExtensionProperties> * instanceExtensions,SkTArray<VkLayerProperties> * instanceLayers)144 static bool init_instance_extensions_and_layers(GrVkGetProc getProc,
145 uint32_t specVersion,
146 SkTArray<VkExtensionProperties>* instanceExtensions,
147 SkTArray<VkLayerProperties>* instanceLayers) {
148 if (getProc == nullptr) {
149 return false;
150 }
151
152 GET_PROC_LOCAL(EnumerateInstanceExtensionProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
153 GET_PROC_LOCAL(EnumerateInstanceLayerProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
154
155 if (!EnumerateInstanceExtensionProperties ||
156 !EnumerateInstanceLayerProperties) {
157 return false;
158 }
159
160 VkResult res;
161 uint32_t layerCount = 0;
162 #ifdef SK_ENABLE_VK_LAYERS
163 // instance layers
164 res = EnumerateInstanceLayerProperties(&layerCount, nullptr);
165 if (VK_SUCCESS != res) {
166 return false;
167 }
168 VkLayerProperties* layers = new VkLayerProperties[layerCount];
169 res = EnumerateInstanceLayerProperties(&layerCount, layers);
170 if (VK_SUCCESS != res) {
171 delete[] layers;
172 return false;
173 }
174
175 uint32_t nonPatchVersion = remove_patch_version(specVersion);
176 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
177 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
178 nonPatchVersion);
179 if (idx != -1) {
180 instanceLayers->push_back() = layers[idx];
181 }
182 }
183 delete[] layers;
184 #endif
185
186 // instance extensions
187 // via Vulkan implementation and implicitly enabled layers
188 uint32_t extensionCount = 0;
189 res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
190 if (VK_SUCCESS != res) {
191 return false;
192 }
193 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
194 res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions);
195 if (VK_SUCCESS != res) {
196 delete[] extensions;
197 return false;
198 }
199 for (uint32_t i = 0; i < extensionCount; ++i) {
200 instanceExtensions->push_back() = extensions[i];
201 }
202 delete [] extensions;
203
204 // via explicitly enabled layers
205 layerCount = instanceLayers->count();
206 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
207 uint32_t extensionCount = 0;
208 res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
209 &extensionCount, nullptr);
210 if (VK_SUCCESS != res) {
211 return false;
212 }
213 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
214 res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
215 &extensionCount, extensions);
216 if (VK_SUCCESS != res) {
217 delete[] extensions;
218 return false;
219 }
220 for (uint32_t i = 0; i < extensionCount; ++i) {
221 instanceExtensions->push_back() = extensions[i];
222 }
223 delete[] extensions;
224 }
225
226 return true;
227 }
228
init_device_extensions_and_layers(GrVkGetProc getProc,uint32_t specVersion,VkInstance inst,VkPhysicalDevice physDev,SkTArray<VkExtensionProperties> * deviceExtensions,SkTArray<VkLayerProperties> * deviceLayers)229 static bool init_device_extensions_and_layers(GrVkGetProc getProc, uint32_t specVersion,
230 VkInstance inst, VkPhysicalDevice physDev,
231 SkTArray<VkExtensionProperties>* deviceExtensions,
232 SkTArray<VkLayerProperties>* deviceLayers) {
233 if (getProc == nullptr) {
234 return false;
235 }
236
237 GET_PROC_LOCAL(EnumerateDeviceExtensionProperties, inst, VK_NULL_HANDLE);
238 GET_PROC_LOCAL(EnumerateDeviceLayerProperties, inst, VK_NULL_HANDLE);
239
240 if (!EnumerateDeviceExtensionProperties ||
241 !EnumerateDeviceLayerProperties) {
242 return false;
243 }
244
245 VkResult res;
246 // device layers
247 uint32_t layerCount = 0;
248 #ifdef SK_ENABLE_VK_LAYERS
249 res = EnumerateDeviceLayerProperties(physDev, &layerCount, nullptr);
250 if (VK_SUCCESS != res) {
251 return false;
252 }
253 VkLayerProperties* layers = new VkLayerProperties[layerCount];
254 res = EnumerateDeviceLayerProperties(physDev, &layerCount, layers);
255 if (VK_SUCCESS != res) {
256 delete[] layers;
257 return false;
258 }
259
260 uint32_t nonPatchVersion = remove_patch_version(specVersion);
261 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
262 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
263 nonPatchVersion);
264 if (idx != -1) {
265 deviceLayers->push_back() = layers[idx];
266 }
267 }
268 delete[] layers;
269 #endif
270
271 // device extensions
272 // via Vulkan implementation and implicitly enabled layers
273 uint32_t extensionCount = 0;
274 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, nullptr);
275 if (VK_SUCCESS != res) {
276 return false;
277 }
278 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
279 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, extensions);
280 if (VK_SUCCESS != res) {
281 delete[] extensions;
282 return false;
283 }
284 for (uint32_t i = 0; i < extensionCount; ++i) {
285 deviceExtensions->push_back() = extensions[i];
286 }
287 delete[] extensions;
288
289 // via explicitly enabled layers
290 layerCount = deviceLayers->count();
291 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
292 uint32_t extensionCount = 0;
293 res = EnumerateDeviceExtensionProperties(physDev,
294 (*deviceLayers)[layerIndex].layerName,
295 &extensionCount, nullptr);
296 if (VK_SUCCESS != res) {
297 return false;
298 }
299 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
300 res = EnumerateDeviceExtensionProperties(physDev,
301 (*deviceLayers)[layerIndex].layerName,
302 &extensionCount, extensions);
303 if (VK_SUCCESS != res) {
304 delete[] extensions;
305 return false;
306 }
307 for (uint32_t i = 0; i < extensionCount; ++i) {
308 deviceExtensions->push_back() = extensions[i];
309 }
310 delete[] extensions;
311 }
312
313 return true;
314 }
315
316 #define ACQUIRE_VK_PROC_NOCHECK(name, instance, device) \
317 PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device))
318
319 #define ACQUIRE_VK_PROC(name, instance, device) \
320 PFN_vk##name grVk##name = \
321 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
322 do { \
323 if (grVk##name == nullptr) { \
324 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
325 if (device != VK_NULL_HANDLE) { \
326 destroy_instance(getProc, inst, debugCallback, hasDebugExtension); \
327 } \
328 return false; \
329 } \
330 } while (0)
331
332 #define ACQUIRE_VK_PROC_LOCAL(name, instance, device) \
333 PFN_vk##name grVk##name = \
334 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
335 do { \
336 if (grVk##name == nullptr) { \
337 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
338 return false; \
339 } \
340 } while (0)
341
destroy_instance(GrVkGetProc getProc,VkInstance inst,VkDebugReportCallbackEXT * debugCallback,bool hasDebugExtension)342 static bool destroy_instance(GrVkGetProc getProc, VkInstance inst,
343 VkDebugReportCallbackEXT* debugCallback,
344 bool hasDebugExtension) {
345 if (hasDebugExtension && *debugCallback != VK_NULL_HANDLE) {
346 ACQUIRE_VK_PROC_LOCAL(DestroyDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
347 grVkDestroyDebugReportCallbackEXT(inst, *debugCallback, nullptr);
348 *debugCallback = VK_NULL_HANDLE;
349 }
350 ACQUIRE_VK_PROC_LOCAL(DestroyInstance, inst, VK_NULL_HANDLE);
351 grVkDestroyInstance(inst, nullptr);
352 return true;
353 }
354
setup_features(GrVkGetProc getProc,VkInstance inst,VkPhysicalDevice physDev,uint32_t physDeviceVersion,GrVkExtensions * extensions,VkPhysicalDeviceFeatures2 * features,bool isProtected)355 static bool setup_features(GrVkGetProc getProc, VkInstance inst, VkPhysicalDevice physDev,
356 uint32_t physDeviceVersion, GrVkExtensions* extensions,
357 VkPhysicalDeviceFeatures2* features, bool isProtected) {
358 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
359 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1));
360
361 // Setup all extension feature structs we may want to use.
362 void** tailPNext = &features->pNext;
363
364 // If |isProtected| is given, attach that first
365 VkPhysicalDeviceProtectedMemoryFeatures* protectedMemoryFeatures = nullptr;
366 if (isProtected) {
367 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0));
368 protectedMemoryFeatures =
369 (VkPhysicalDeviceProtectedMemoryFeatures*)sk_malloc_throw(
370 sizeof(VkPhysicalDeviceProtectedMemoryFeatures));
371 protectedMemoryFeatures->sType =
372 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES;
373 protectedMemoryFeatures->pNext = nullptr;
374 *tailPNext = protectedMemoryFeatures;
375 tailPNext = &protectedMemoryFeatures->pNext;
376 }
377
378 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend = nullptr;
379 if (extensions->hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
380 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) sk_malloc_throw(
381 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
382 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
383 blend->pNext = nullptr;
384 *tailPNext = blend;
385 tailPNext = &blend->pNext;
386 }
387
388 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature = nullptr;
389 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
390 extensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) {
391 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) sk_malloc_throw(
392 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
393 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
394 ycbcrFeature->pNext = nullptr;
395 ycbcrFeature->samplerYcbcrConversion = VK_TRUE;
396 *tailPNext = ycbcrFeature;
397 tailPNext = &ycbcrFeature->pNext;
398 }
399
400 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
401 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2, inst, VK_NULL_HANDLE);
402 grVkGetPhysicalDeviceFeatures2(physDev, features);
403 } else {
404 SkASSERT(extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
405 1));
406 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2KHR, inst, VK_NULL_HANDLE);
407 grVkGetPhysicalDeviceFeatures2KHR(physDev, features);
408 }
409
410 if (isProtected) {
411 if (!protectedMemoryFeatures->protectedMemory) {
412 return false;
413 }
414 }
415 return true;
416 // If we want to disable any extension features do so here.
417 }
418
CreateVkBackendContext(GrVkGetProc getProc,GrVkBackendContext * ctx,GrVkExtensions * extensions,VkPhysicalDeviceFeatures2 * features,VkDebugReportCallbackEXT * debugCallback,uint32_t * presentQueueIndexPtr,CanPresentFn canPresent,bool isProtected)419 bool CreateVkBackendContext(GrVkGetProc getProc,
420 GrVkBackendContext* ctx,
421 GrVkExtensions* extensions,
422 VkPhysicalDeviceFeatures2* features,
423 VkDebugReportCallbackEXT* debugCallback,
424 uint32_t* presentQueueIndexPtr,
425 CanPresentFn canPresent,
426 bool isProtected) {
427 VkResult err;
428
429 ACQUIRE_VK_PROC_NOCHECK(EnumerateInstanceVersion, VK_NULL_HANDLE, VK_NULL_HANDLE);
430 uint32_t instanceVersion = 0;
431 if (!grVkEnumerateInstanceVersion) {
432 instanceVersion = VK_MAKE_VERSION(1, 0, 0);
433 } else {
434 err = grVkEnumerateInstanceVersion(&instanceVersion);
435 if (err) {
436 SkDebugf("failed to enumerate instance version. Err: %d\n", err);
437 return false;
438 }
439 }
440 SkASSERT(instanceVersion >= VK_MAKE_VERSION(1, 0, 0));
441 if (isProtected && instanceVersion < VK_MAKE_VERSION(1, 1, 0)) {
442 SkDebugf("protected requires vk instance version 1.1\n");
443 return false;
444 }
445
446 uint32_t apiVersion = VK_MAKE_VERSION(1, 0, 0);
447 if (instanceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
448 // If the instance version is 1.0 we must have the apiVersion also be 1.0. However, if the
449 // instance version is 1.1 or higher, we can set the apiVersion to be whatever the highest
450 // api we may use in skia (technically it can be arbitrary). So for now we set it to 1.1
451 // since that is the highest vulkan version.
452 apiVersion = VK_MAKE_VERSION(1, 1, 0);
453 }
454
455 instanceVersion = std::min(instanceVersion, apiVersion);
456
457 VkPhysicalDevice physDev;
458 VkDevice device;
459 VkInstance inst;
460
461 const VkApplicationInfo app_info = {
462 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
463 nullptr, // pNext
464 "vktest", // pApplicationName
465 0, // applicationVersion
466 "vktest", // pEngineName
467 0, // engineVerison
468 apiVersion, // apiVersion
469 };
470
471 SkTArray<VkLayerProperties> instanceLayers;
472 SkTArray<VkExtensionProperties> instanceExtensions;
473
474 if (!init_instance_extensions_and_layers(getProc, instanceVersion,
475 &instanceExtensions,
476 &instanceLayers)) {
477 return false;
478 }
479
480 SkTArray<const char*> instanceLayerNames;
481 SkTArray<const char*> instanceExtensionNames;
482 for (int i = 0; i < instanceLayers.count(); ++i) {
483 instanceLayerNames.push_back(instanceLayers[i].layerName);
484 }
485 for (int i = 0; i < instanceExtensions.count(); ++i) {
486 if (strncmp(instanceExtensions[i].extensionName, "VK_KHX", 6) != 0) {
487 instanceExtensionNames.push_back(instanceExtensions[i].extensionName);
488 }
489 }
490
491 const VkInstanceCreateInfo instance_create = {
492 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
493 nullptr, // pNext
494 0, // flags
495 &app_info, // pApplicationInfo
496 (uint32_t) instanceLayerNames.count(), // enabledLayerNameCount
497 instanceLayerNames.begin(), // ppEnabledLayerNames
498 (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount
499 instanceExtensionNames.begin(), // ppEnabledExtensionNames
500 };
501
502 bool hasDebugExtension = false;
503
504 ACQUIRE_VK_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE);
505 err = grVkCreateInstance(&instance_create, nullptr, &inst);
506 if (err < 0) {
507 SkDebugf("vkCreateInstance failed: %d\n", err);
508 return false;
509 }
510
511 #ifdef SK_ENABLE_VK_LAYERS
512 *debugCallback = VK_NULL_HANDLE;
513 for (int i = 0; i < instanceExtensionNames.count() && !hasDebugExtension; ++i) {
514 if (!strcmp(instanceExtensionNames[i], VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
515 hasDebugExtension = true;
516 }
517 }
518 if (hasDebugExtension) {
519 // Setup callback creation information
520 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
521 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
522 callbackCreateInfo.pNext = nullptr;
523 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
524 VK_DEBUG_REPORT_WARNING_BIT_EXT |
525 // VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
526 // VK_DEBUG_REPORT_DEBUG_BIT_EXT |
527 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
528 callbackCreateInfo.pfnCallback = &DebugReportCallback;
529 callbackCreateInfo.pUserData = nullptr;
530
531 ACQUIRE_VK_PROC(CreateDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
532 // Register the callback
533 grVkCreateDebugReportCallbackEXT(inst, &callbackCreateInfo, nullptr, debugCallback);
534 }
535 #endif
536
537 ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE);
538 ACQUIRE_VK_PROC(GetPhysicalDeviceProperties, inst, VK_NULL_HANDLE);
539 ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE);
540 ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE);
541 ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE);
542 ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE);
543 ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE);
544 ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE);
545
546 uint32_t gpuCount;
547 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
548 if (err) {
549 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
550 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
551 return false;
552 }
553 if (!gpuCount) {
554 SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n");
555 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
556 return false;
557 }
558 // Just returning the first physical device instead of getting the whole array.
559 // TODO: find best match for our needs
560 gpuCount = 1;
561 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
562 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
563 if (err && VK_INCOMPLETE != err) {
564 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
565 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
566 return false;
567 }
568
569 VkPhysicalDeviceProperties physDeviceProperties;
570 grVkGetPhysicalDeviceProperties(physDev, &physDeviceProperties);
571 int physDeviceVersion = std::min(physDeviceProperties.apiVersion, apiVersion);
572
573 if (isProtected && physDeviceVersion < VK_MAKE_VERSION(1, 1, 0)) {
574 SkDebugf("protected requires vk physical device version 1.1\n");
575 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
576 return false;
577 }
578
579 // query to get the initial queue props size
580 uint32_t queueCount;
581 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
582 if (!queueCount) {
583 SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
584 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
585 return false;
586 }
587
588 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
589 // now get the actual queue props
590 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
591
592 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
593
594 // iterate to find the graphics queue
595 uint32_t graphicsQueueIndex = queueCount;
596 for (uint32_t i = 0; i < queueCount; i++) {
597 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
598 graphicsQueueIndex = i;
599 break;
600 }
601 }
602 if (graphicsQueueIndex == queueCount) {
603 SkDebugf("Could not find any supported graphics queues.\n");
604 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
605 return false;
606 }
607
608 // iterate to find the present queue, if needed
609 uint32_t presentQueueIndex = queueCount;
610 if (presentQueueIndexPtr && canPresent) {
611 for (uint32_t i = 0; i < queueCount; i++) {
612 if (canPresent(inst, physDev, i)) {
613 presentQueueIndex = i;
614 break;
615 }
616 }
617 if (presentQueueIndex == queueCount) {
618 SkDebugf("Could not find any supported present queues.\n");
619 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
620 return false;
621 }
622 *presentQueueIndexPtr = presentQueueIndex;
623 } else {
624 // Just setting this so we end up make a single queue for graphics since there was no
625 // request for a present queue.
626 presentQueueIndex = graphicsQueueIndex;
627 }
628
629 SkTArray<VkLayerProperties> deviceLayers;
630 SkTArray<VkExtensionProperties> deviceExtensions;
631 if (!init_device_extensions_and_layers(getProc, physDeviceVersion,
632 inst, physDev,
633 &deviceExtensions,
634 &deviceLayers)) {
635 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
636 return false;
637 }
638
639 SkTArray<const char*> deviceLayerNames;
640 SkTArray<const char*> deviceExtensionNames;
641 for (int i = 0; i < deviceLayers.count(); ++i) {
642 deviceLayerNames.push_back(deviceLayers[i].layerName);
643 }
644
645 // We can't have both VK_KHR_buffer_device_address and VK_EXT_buffer_device_address as
646 // extensions. So see if we have the KHR version and if so don't push back the EXT version in
647 // the next loop.
648 bool hasKHRBufferDeviceAddress = false;
649 for (int i = 0; i < deviceExtensions.count(); ++i) {
650 if (!strcmp(deviceExtensions[i].extensionName, "VK_KHR_buffer_device_address")) {
651 hasKHRBufferDeviceAddress = true;
652 break;
653 }
654 }
655
656 for (int i = 0; i < deviceExtensions.count(); ++i) {
657 // Don't use experimental extensions since they typically don't work with debug layers and
658 // often are missing dependecy requirements for other extensions. Additionally, these are
659 // often left behind in the driver even after they've been promoted to real extensions.
660 if (0 != strncmp(deviceExtensions[i].extensionName, "VK_KHX", 6) &&
661 0 != strncmp(deviceExtensions[i].extensionName, "VK_NVX", 6)) {
662
663 // This is an nvidia extension that isn't supported by the debug layers so we get lots
664 // of warnings. We don't actually use it, so it is easiest to just not enable it.
665 if (0 == strcmp(deviceExtensions[i].extensionName, "VK_NV_low_latency")) {
666 continue;
667 }
668
669 if (!hasKHRBufferDeviceAddress ||
670 0 != strcmp(deviceExtensions[i].extensionName, "VK_EXT_buffer_device_address")) {
671 deviceExtensionNames.push_back(deviceExtensions[i].extensionName);
672 }
673 }
674 }
675
676 extensions->init(getProc, inst, physDev,
677 (uint32_t) instanceExtensionNames.count(),
678 instanceExtensionNames.begin(),
679 (uint32_t) deviceExtensionNames.count(),
680 deviceExtensionNames.begin());
681
682 memset(features, 0, sizeof(VkPhysicalDeviceFeatures2));
683 features->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
684 features->pNext = nullptr;
685
686 VkPhysicalDeviceFeatures* deviceFeatures = &features->features;
687 void* pointerToFeatures = nullptr;
688 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
689 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)) {
690 if (!setup_features(getProc, inst, physDev, physDeviceVersion, extensions, features,
691 isProtected)) {
692 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
693 return false;
694 }
695
696 // If we set the pNext of the VkDeviceCreateInfo to our VkPhysicalDeviceFeatures2 struct,
697 // the device creation will use that instead of the ppEnabledFeatures.
698 pointerToFeatures = features;
699 } else {
700 grVkGetPhysicalDeviceFeatures(physDev, deviceFeatures);
701 }
702
703 // this looks like it would slow things down,
704 // and we can't depend on it on all platforms
705 deviceFeatures->robustBufferAccess = VK_FALSE;
706
707 VkDeviceQueueCreateFlags flags = isProtected ? VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0;
708 float queuePriorities[1] = { 0.0 };
709 // Here we assume no need for swapchain queue
710 // If one is needed, the client will need its own setup code
711 const VkDeviceQueueCreateInfo queueInfo[2] = {
712 {
713 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
714 nullptr, // pNext
715 flags, // VkDeviceQueueCreateFlags
716 graphicsQueueIndex, // queueFamilyIndex
717 1, // queueCount
718 queuePriorities, // pQueuePriorities
719
720 },
721 {
722 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
723 nullptr, // pNext
724 0, // VkDeviceQueueCreateFlags
725 presentQueueIndex, // queueFamilyIndex
726 1, // queueCount
727 queuePriorities, // pQueuePriorities
728 }
729 };
730 uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
731
732 const VkDeviceCreateInfo deviceInfo = {
733 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
734 pointerToFeatures, // pNext
735 0, // VkDeviceCreateFlags
736 queueInfoCount, // queueCreateInfoCount
737 queueInfo, // pQueueCreateInfos
738 (uint32_t) deviceLayerNames.count(), // layerCount
739 deviceLayerNames.begin(), // ppEnabledLayerNames
740 (uint32_t) deviceExtensionNames.count(), // extensionCount
741 deviceExtensionNames.begin(), // ppEnabledExtensionNames
742 pointerToFeatures ? nullptr : deviceFeatures // ppEnabledFeatures
743 };
744
745 {
746 #if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
747 // skia:8712
748 __lsan::ScopedDisabler lsanDisabler;
749 #endif
750 err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device);
751 }
752 if (err) {
753 SkDebugf("CreateDevice failed: %d\n", err);
754 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
755 return false;
756 }
757
758 VkQueue queue;
759 if (isProtected) {
760 ACQUIRE_VK_PROC(GetDeviceQueue2, inst, device);
761 SkASSERT(grVkGetDeviceQueue2 != nullptr);
762 VkDeviceQueueInfo2 queue_info2 = {
763 VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2, // sType
764 nullptr, // pNext
765 VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT, // flags
766 graphicsQueueIndex, // queueFamilyIndex
767 0 // queueIndex
768 };
769 grVkGetDeviceQueue2(device, &queue_info2, &queue);
770 } else {
771 grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
772 }
773
774 ctx->fInstance = inst;
775 ctx->fPhysicalDevice = physDev;
776 ctx->fDevice = device;
777 ctx->fQueue = queue;
778 ctx->fGraphicsQueueIndex = graphicsQueueIndex;
779 ctx->fMaxAPIVersion = apiVersion;
780 ctx->fVkExtensions = extensions;
781 ctx->fDeviceFeatures2 = features;
782 ctx->fGetProc = getProc;
783 ctx->fOwnsInstanceAndDevice = false;
784 ctx->fProtectedContext = isProtected ? GrProtected::kYes : GrProtected::kNo;
785
786 return true;
787 }
788
FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2 * features)789 void FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2* features) {
790 // All Vulkan structs that could be part of the features chain will start with the
791 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
792 // so we can get access to the pNext for the next struct.
793 struct CommonVulkanHeader {
794 VkStructureType sType;
795 void* pNext;
796 };
797
798 void* pNext = features->pNext;
799 while (pNext) {
800 void* current = pNext;
801 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
802 sk_free(current);
803 }
804 }
805
806 } // namespace sk_gpu_test
807
808 #endif
809