1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "SkAutoMalloc.h"
9 #include "vk/GrVkBackendContext.h"
10 #include "vk/GrVkExtensions.h"
11 #include "vk/GrVkInterface.h"
12 #include "vk/GrVkUtil.h"
13
14 ////////////////////////////////////////////////////////////////////////////////
15 // Helper code to set up Vulkan context objects
16
17 #ifdef SK_ENABLE_VK_LAYERS
18 const char* kDebugLayerNames[] = {
19 // elements of VK_LAYER_LUNARG_standard_validation
20 "VK_LAYER_GOOGLE_threading",
21 "VK_LAYER_LUNARG_parameter_validation",
22 "VK_LAYER_LUNARG_object_tracker",
23 "VK_LAYER_LUNARG_image",
24 "VK_LAYER_LUNARG_core_validation",
25 "VK_LAYER_LUNARG_swapchain",
26 "VK_LAYER_GOOGLE_unique_objects",
27 // not included in standard_validation
28 //"VK_LAYER_LUNARG_api_dump",
29 //"VK_LAYER_LUNARG_vktrace",
30 //"VK_LAYER_LUNARG_screenshot",
31 };
32 #endif
33
34 // the minimum version of Vulkan supported
35 #ifdef SK_BUILD_FOR_ANDROID
36 const uint32_t kGrVkMinimumVersion = VK_MAKE_VERSION(1, 0, 3);
37 #else
38 const uint32_t kGrVkMinimumVersion = VK_MAKE_VERSION(1, 0, 8);
39 #endif
40
41 #define ACQUIRE_VK_PROC(name, instance, device) \
42 PFN_vk##name grVk##name = \
43 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
44 if (grVk##name == nullptr) { \
45 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
46 return nullptr; \
47 }
48
49 // Create the base Vulkan objects needed by the GrVkGpu object
Create(uint32_t * presentQueueIndexPtr,CanPresentFn canPresent,GrVkInterface::GetProc getProc)50 const GrVkBackendContext* GrVkBackendContext::Create(uint32_t* presentQueueIndexPtr,
51 CanPresentFn canPresent,
52 GrVkInterface::GetProc getProc) {
53 if (!getProc) {
54 return nullptr;
55 }
56 SkASSERT(getProc);
57
58 VkPhysicalDevice physDev;
59 VkDevice device;
60 VkInstance inst;
61 VkResult err;
62
63 const VkApplicationInfo app_info = {
64 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
65 nullptr, // pNext
66 "vktest", // pApplicationName
67 0, // applicationVersion
68 "vktest", // pEngineName
69 0, // engineVerison
70 kGrVkMinimumVersion, // apiVersion
71 };
72
73 GrVkExtensions extensions(getProc);
74 extensions.initInstance(kGrVkMinimumVersion);
75
76 SkTArray<const char*> instanceLayerNames;
77 SkTArray<const char*> instanceExtensionNames;
78 uint32_t extensionFlags = 0;
79 #ifdef SK_ENABLE_VK_LAYERS
80 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
81 if (extensions.hasInstanceLayer(kDebugLayerNames[i])) {
82 instanceLayerNames.push_back(kDebugLayerNames[i]);
83 }
84 }
85 if (extensions.hasInstanceExtension(VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
86 instanceExtensionNames.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME);
87 extensionFlags |= kEXT_debug_report_GrVkExtensionFlag;
88 }
89 #endif
90
91 if (extensions.hasInstanceExtension(VK_KHR_SURFACE_EXTENSION_NAME)) {
92 instanceExtensionNames.push_back(VK_KHR_SURFACE_EXTENSION_NAME);
93 extensionFlags |= kKHR_surface_GrVkExtensionFlag;
94 }
95 if (extensions.hasInstanceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
96 instanceExtensionNames.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
97 extensionFlags |= kKHR_swapchain_GrVkExtensionFlag;
98 }
99 #ifdef SK_BUILD_FOR_WIN
100 if (extensions.hasInstanceExtension(VK_KHR_WIN32_SURFACE_EXTENSION_NAME)) {
101 instanceExtensionNames.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME);
102 extensionFlags |= kKHR_win32_surface_GrVkExtensionFlag;
103 }
104 #elif defined(SK_BUILD_FOR_ANDROID)
105 if (extensions.hasInstanceExtension(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
106 instanceExtensionNames.push_back(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME);
107 extensionFlags |= kKHR_android_surface_GrVkExtensionFlag;
108 }
109 #elif defined(SK_BUILD_FOR_UNIX) && !defined(__Fuchsia__)
110 if (extensions.hasInstanceExtension(VK_KHR_XCB_SURFACE_EXTENSION_NAME)) {
111 instanceExtensionNames.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME);
112 extensionFlags |= kKHR_xcb_surface_GrVkExtensionFlag;
113 }
114 #endif
115
116 const VkInstanceCreateInfo instance_create = {
117 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
118 nullptr, // pNext
119 0, // flags
120 &app_info, // pApplicationInfo
121 (uint32_t) instanceLayerNames.count(), // enabledLayerNameCount
122 instanceLayerNames.begin(), // ppEnabledLayerNames
123 (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount
124 instanceExtensionNames.begin(), // ppEnabledExtensionNames
125 };
126
127 ACQUIRE_VK_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE);
128 err = grVkCreateInstance(&instance_create, nullptr, &inst);
129 if (err < 0) {
130 SkDebugf("vkCreateInstance failed: %d\n", err);
131 return nullptr;
132 }
133
134 ACQUIRE_VK_PROC(DestroyInstance, inst, VK_NULL_HANDLE);
135 ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE);
136 ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE);
137 ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE);
138 ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE);
139 ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE);
140 ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE);
141 ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE);
142
143 uint32_t gpuCount;
144 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
145 if (err) {
146 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
147 grVkDestroyInstance(inst, nullptr);
148 return nullptr;
149 }
150 if (!gpuCount) {
151 SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n");
152 grVkDestroyInstance(inst, nullptr);
153 return nullptr;
154 }
155 // Just returning the first physical device instead of getting the whole array.
156 // TODO: find best match for our needs
157 gpuCount = 1;
158 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
159 if (err) {
160 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
161 grVkDestroyInstance(inst, nullptr);
162 return nullptr;
163 }
164
165 // query to get the initial queue props size
166 uint32_t queueCount;
167 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
168 if (!queueCount) {
169 SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
170 grVkDestroyInstance(inst, nullptr);
171 return nullptr;
172 }
173
174 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
175 // now get the actual queue props
176 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
177
178 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
179
180 // iterate to find the graphics queue
181 uint32_t graphicsQueueIndex = queueCount;
182 for (uint32_t i = 0; i < queueCount; i++) {
183 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
184 graphicsQueueIndex = i;
185 break;
186 }
187 }
188 if (graphicsQueueIndex == queueCount) {
189 SkDebugf("Could not find any supported graphics queues.\n");
190 grVkDestroyInstance(inst, nullptr);
191 return nullptr;
192 }
193
194 // iterate to find the present queue, if needed
195 uint32_t presentQueueIndex = queueCount;
196 if (presentQueueIndexPtr && canPresent) {
197 for (uint32_t i = 0; i < queueCount; i++) {
198 if (canPresent(inst, physDev, i)) {
199 presentQueueIndex = i;
200 break;
201 }
202 }
203 if (presentQueueIndex == queueCount) {
204 SkDebugf("Could not find any supported present queues.\n");
205 grVkDestroyInstance(inst, nullptr);
206 return nullptr;
207 }
208 *presentQueueIndexPtr = presentQueueIndex;
209 } else {
210 // Just setting this so we end up make a single queue for graphics since there was no
211 // request for a present queue.
212 presentQueueIndex = graphicsQueueIndex;
213 }
214
215 extensions.initDevice(kGrVkMinimumVersion, inst, physDev);
216
217 SkTArray<const char*> deviceLayerNames;
218 SkTArray<const char*> deviceExtensionNames;
219 #ifdef SK_ENABLE_VK_LAYERS
220 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
221 if (extensions.hasDeviceLayer(kDebugLayerNames[i])) {
222 deviceLayerNames.push_back(kDebugLayerNames[i]);
223 }
224 }
225 #endif
226 if (extensions.hasDeviceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
227 deviceExtensionNames.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
228 extensionFlags |= kKHR_swapchain_GrVkExtensionFlag;
229 }
230 if (extensions.hasDeviceExtension("VK_NV_glsl_shader")) {
231 deviceExtensionNames.push_back("VK_NV_glsl_shader");
232 extensionFlags |= kNV_glsl_shader_GrVkExtensionFlag;
233 }
234
235 // query to get the physical device properties
236 VkPhysicalDeviceFeatures deviceFeatures;
237 grVkGetPhysicalDeviceFeatures(physDev, &deviceFeatures);
238 // this looks like it would slow things down,
239 // and we can't depend on it on all platforms
240 deviceFeatures.robustBufferAccess = VK_FALSE;
241
242 uint32_t featureFlags = 0;
243 if (deviceFeatures.geometryShader) {
244 featureFlags |= kGeometryShader_GrVkFeatureFlag;
245 }
246 if (deviceFeatures.dualSrcBlend) {
247 featureFlags |= kDualSrcBlend_GrVkFeatureFlag;
248 }
249 if (deviceFeatures.sampleRateShading) {
250 featureFlags |= kSampleRateShading_GrVkFeatureFlag;
251 }
252
253 float queuePriorities[1] = { 0.0 };
254 // Here we assume no need for swapchain queue
255 // If one is needed, the client will need its own setup code
256 const VkDeviceQueueCreateInfo queueInfo[2] = {
257 {
258 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
259 nullptr, // pNext
260 0, // VkDeviceQueueCreateFlags
261 graphicsQueueIndex, // queueFamilyIndex
262 1, // queueCount
263 queuePriorities, // pQueuePriorities
264 },
265 {
266 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
267 nullptr, // pNext
268 0, // VkDeviceQueueCreateFlags
269 presentQueueIndex, // queueFamilyIndex
270 1, // queueCount
271 queuePriorities, // pQueuePriorities
272 }
273 };
274 uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
275
276 const VkDeviceCreateInfo deviceInfo = {
277 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
278 nullptr, // pNext
279 0, // VkDeviceCreateFlags
280 queueInfoCount, // queueCreateInfoCount
281 queueInfo, // pQueueCreateInfos
282 (uint32_t) deviceLayerNames.count(), // layerCount
283 deviceLayerNames.begin(), // ppEnabledLayerNames
284 (uint32_t) deviceExtensionNames.count(), // extensionCount
285 deviceExtensionNames.begin(), // ppEnabledExtensionNames
286 &deviceFeatures // ppEnabledFeatures
287 };
288
289 err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device);
290 if (err) {
291 SkDebugf("CreateDevice failed: %d\n", err);
292 grVkDestroyInstance(inst, nullptr);
293 return nullptr;
294 }
295
296 auto interface =
297 sk_make_sp<GrVkInterface>(getProc, inst, device, extensionFlags);
298 if (!interface->validate(extensionFlags)) {
299 SkDebugf("Vulkan interface validation failed\n");
300 grVkDeviceWaitIdle(device);
301 grVkDestroyDevice(device, nullptr);
302 grVkDestroyInstance(inst, nullptr);
303 return nullptr;
304 }
305
306 VkQueue queue;
307 grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
308
309 GrVkBackendContext* ctx = new GrVkBackendContext();
310 ctx->fInstance = inst;
311 ctx->fPhysicalDevice = physDev;
312 ctx->fDevice = device;
313 ctx->fQueue = queue;
314 ctx->fGraphicsQueueIndex = graphicsQueueIndex;
315 ctx->fMinAPIVersion = kGrVkMinimumVersion;
316 ctx->fExtensions = extensionFlags;
317 ctx->fFeatures = featureFlags;
318 ctx->fInterface.reset(interface.release());
319 ctx->fOwnsInstanceAndDevice = true;
320
321 return ctx;
322 }
323
~GrVkBackendContext()324 GrVkBackendContext::~GrVkBackendContext() {
325 if (fInterface == nullptr || !fOwnsInstanceAndDevice) {
326 return;
327 }
328
329 fInterface->fFunctions.fDeviceWaitIdle(fDevice);
330 fInterface->fFunctions.fDestroyDevice(fDevice, nullptr);
331 fDevice = VK_NULL_HANDLE;
332 fInterface->fFunctions.fDestroyInstance(fInstance, nullptr);
333 fInstance = VK_NULL_HANDLE;
334 }
335