• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 Google LLC
3  * SPDX-License-Identifier: MIT
4  *
5  * based in part on anv and radv which are:
6  * Copyright © 2015 Intel Corporation
7  * Copyright © 2016 Red Hat.
8  * Copyright © 2016 Bas Nieuwenhuizen
9  */
10 
11 #include "vn_instance.h"
12 
13 #include "util/driconf.h"
14 #include "venus-protocol/vn_protocol_driver_info.h"
15 #include "venus-protocol/vn_protocol_driver_instance.h"
16 #include "venus-protocol/vn_protocol_driver_transport.h"
17 
18 #include "vn_icd.h"
19 #include "vn_physical_device.h"
20 #include "vn_renderer.h"
21 #include "vn_ring.h"
22 
23 /*
24  * Instance extensions add instance-level or physical-device-level
25  * functionalities.  It seems renderer support is either unnecessary or
26  * optional.  We should be able to advertise them or lie about them locally.
27  */
28 static const struct vk_instance_extension_table
29    vn_instance_supported_extensions = {
30       /* promoted to VK_VERSION_1_1 */
31       .KHR_device_group_creation = true,
32       .KHR_external_fence_capabilities = true,
33       .KHR_external_memory_capabilities = true,
34       .KHR_external_semaphore_capabilities = true,
35       .KHR_get_physical_device_properties2 = true,
36 
37 #ifdef VN_USE_WSI_PLATFORM
38       .KHR_get_surface_capabilities2 = true,
39       .KHR_surface = true,
40       .KHR_surface_protected_capabilities = true,
41 #endif
42 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
43       .KHR_wayland_surface = true,
44 #endif
45 #ifdef VK_USE_PLATFORM_XCB_KHR
46       .KHR_xcb_surface = true,
47 #endif
48 #ifdef VK_USE_PLATFORM_XLIB_KHR
49       .KHR_xlib_surface = true,
50 #endif
51 #ifndef VK_USE_PLATFORM_WIN32_KHR
52       .EXT_headless_surface = true,
53 #endif
54    };
55 
56 static const driOptionDescription vn_dri_options[] = {
57    /* clang-format off */
58    DRI_CONF_SECTION_PERFORMANCE
59       DRI_CONF_VK_X11_ENSURE_MIN_IMAGE_COUNT(false)
60       DRI_CONF_VK_X11_OVERRIDE_MIN_IMAGE_COUNT(0)
61       DRI_CONF_VK_X11_STRICT_IMAGE_COUNT(false)
62       DRI_CONF_VK_XWAYLAND_WAIT_READY(true)
63       DRI_CONF_VENUS_IMPLICIT_FENCING(false)
64       DRI_CONF_VENUS_WSI_MULTI_PLANE_MODIFIERS(false)
65    DRI_CONF_SECTION_END
66    DRI_CONF_SECTION_DEBUG
67       DRI_CONF_VK_WSI_FORCE_BGRA8_UNORM_FIRST(false)
68       DRI_CONF_VK_WSI_FORCE_SWAPCHAIN_TO_CURRENT_EXTENT(false)
69    DRI_CONF_SECTION_END
70    /* clang-format on */
71 };
72 
73 static VkResult
vn_instance_init_renderer_versions(struct vn_instance * instance)74 vn_instance_init_renderer_versions(struct vn_instance *instance)
75 {
76    uint32_t instance_version = 0;
77    VkResult result = vn_call_vkEnumerateInstanceVersion(instance->ring.ring,
78                                                         &instance_version);
79    if (result != VK_SUCCESS) {
80       if (VN_DEBUG(INIT))
81          vn_log(instance, "failed to enumerate renderer instance version");
82       return result;
83    }
84 
85    if (instance_version < VN_MIN_RENDERER_VERSION) {
86       if (VN_DEBUG(INIT)) {
87          vn_log(instance, "unsupported renderer instance version %d.%d",
88                 VK_VERSION_MAJOR(instance_version),
89                 VK_VERSION_MINOR(instance_version));
90       }
91       return VK_ERROR_INITIALIZATION_FAILED;
92    }
93 
94    if (VN_DEBUG(INIT)) {
95       vn_log(instance, "renderer instance version %d.%d.%d",
96              VK_VERSION_MAJOR(instance_version),
97              VK_VERSION_MINOR(instance_version),
98              VK_VERSION_PATCH(instance_version));
99    }
100 
101    /* request at least VN_MIN_RENDERER_VERSION internally */
102    instance->renderer_api_version =
103       MAX2(instance->base.base.app_info.api_version, VN_MIN_RENDERER_VERSION);
104 
105    /* instance version for internal use is capped */
106    instance_version = MIN3(instance_version, instance->renderer_api_version,
107                            instance->renderer->info.vk_xml_version);
108    assert(instance_version >= VN_MIN_RENDERER_VERSION);
109 
110    instance->renderer_version = instance_version;
111 
112    return VK_SUCCESS;
113 }
114 
115 static inline void
vn_instance_fini_ring(struct vn_instance * instance)116 vn_instance_fini_ring(struct vn_instance *instance)
117 {
118    mtx_destroy(&instance->ring.roundtrip_mutex);
119 
120    vn_watchdog_fini(&instance->ring.watchdog);
121 
122    list_for_each_entry_safe(struct vn_tls_ring, tls_ring,
123                             &instance->ring.tls_rings, vk_head)
124       vn_tls_destroy_ring(tls_ring);
125 
126    vn_ring_destroy(instance->ring.ring);
127 }
128 
129 static VkResult
vn_instance_init_ring(struct vn_instance * instance)130 vn_instance_init_ring(struct vn_instance *instance)
131 {
132    /* 32-bit seqno for renderer roundtrips */
133    static const size_t extra_size = sizeof(uint32_t);
134 
135    /* default instance ring size */
136    static const size_t buf_size = 128 * 1024;
137 
138    /* order of 4 for performant async cmd enqueue */
139    static const uint8_t direct_order = 4;
140 
141    struct vn_ring_layout layout;
142    vn_ring_get_layout(buf_size, extra_size, &layout);
143 
144    instance->ring.ring = vn_ring_create(instance, &layout, direct_order);
145    if (!instance->ring.ring)
146       return VK_ERROR_OUT_OF_HOST_MEMORY;
147 
148    list_inithead(&instance->ring.tls_rings);
149 
150    vn_watchdog_init(&instance->ring.watchdog);
151 
152    mtx_init(&instance->ring.roundtrip_mutex, mtx_plain);
153    instance->ring.roundtrip_next = 1;
154 
155    return VK_SUCCESS;
156 }
157 
158 static VkResult
vn_instance_init_renderer(struct vn_instance * instance)159 vn_instance_init_renderer(struct vn_instance *instance)
160 {
161    const VkAllocationCallbacks *alloc = &instance->base.base.alloc;
162 
163    VkResult result = vn_renderer_create(instance, alloc, &instance->renderer);
164    if (result != VK_SUCCESS)
165       return result;
166 
167    struct vn_renderer_info *renderer_info = &instance->renderer->info;
168    uint32_t version = vn_info_wire_format_version();
169    if (renderer_info->wire_format_version != version) {
170       if (VN_DEBUG(INIT)) {
171          vn_log(instance, "wire format version %d != %d",
172                 renderer_info->wire_format_version, version);
173       }
174       return VK_ERROR_INITIALIZATION_FAILED;
175    }
176 
177    version = vn_info_vk_xml_version();
178    if (renderer_info->vk_xml_version > version)
179       renderer_info->vk_xml_version = version;
180    if (renderer_info->vk_xml_version < VN_MIN_RENDERER_VERSION) {
181       if (VN_DEBUG(INIT)) {
182          vn_log(instance, "vk xml version %d.%d.%d < %d.%d.%d",
183                 VK_VERSION_MAJOR(renderer_info->vk_xml_version),
184                 VK_VERSION_MINOR(renderer_info->vk_xml_version),
185                 VK_VERSION_PATCH(renderer_info->vk_xml_version),
186                 VK_VERSION_MAJOR(VN_MIN_RENDERER_VERSION),
187                 VK_VERSION_MINOR(VN_MIN_RENDERER_VERSION),
188                 VK_VERSION_PATCH(VN_MIN_RENDERER_VERSION));
189       }
190       return VK_ERROR_INITIALIZATION_FAILED;
191    }
192 
193    uint32_t spec_version =
194       vn_extension_get_spec_version("VK_EXT_command_serialization");
195    if (renderer_info->vk_ext_command_serialization_spec_version >
196        spec_version) {
197       renderer_info->vk_ext_command_serialization_spec_version = spec_version;
198    }
199 
200    spec_version = vn_extension_get_spec_version("VK_MESA_venus_protocol");
201    if (renderer_info->vk_mesa_venus_protocol_spec_version > spec_version)
202       renderer_info->vk_mesa_venus_protocol_spec_version = spec_version;
203 
204    if (VN_DEBUG(INIT)) {
205       vn_log(instance, "connected to renderer");
206       vn_log(instance, "wire format version %d",
207              renderer_info->wire_format_version);
208       vn_log(instance, "vk xml version %d.%d.%d",
209              VK_VERSION_MAJOR(renderer_info->vk_xml_version),
210              VK_VERSION_MINOR(renderer_info->vk_xml_version),
211              VK_VERSION_PATCH(renderer_info->vk_xml_version));
212       vn_log(instance, "VK_EXT_command_serialization spec version %d",
213              renderer_info->vk_ext_command_serialization_spec_version);
214       vn_log(instance, "VK_MESA_venus_protocol spec version %d",
215              renderer_info->vk_mesa_venus_protocol_spec_version);
216       vn_log(instance, "supports blob id 0: %d",
217              renderer_info->supports_blob_id_0);
218       vn_log(instance, "allow_vk_wait_syncs: %d",
219              renderer_info->allow_vk_wait_syncs);
220       vn_log(instance, "supports_multiple_timelines: %d",
221              renderer_info->supports_multiple_timelines);
222    }
223 
224    return VK_SUCCESS;
225 }
226 
227 VkResult
vn_instance_submit_roundtrip(struct vn_instance * instance,uint64_t * roundtrip_seqno)228 vn_instance_submit_roundtrip(struct vn_instance *instance,
229                              uint64_t *roundtrip_seqno)
230 {
231    const uint64_t ring_id = vn_ring_get_id(instance->ring.ring);
232    uint32_t local_data[8];
233    struct vn_cs_encoder local_enc =
234       VN_CS_ENCODER_INITIALIZER_LOCAL(local_data, sizeof(local_data));
235 
236    mtx_lock(&instance->ring.roundtrip_mutex);
237    const uint64_t seqno = instance->ring.roundtrip_next++;
238    vn_encode_vkSubmitVirtqueueSeqnoMESA(&local_enc, 0, ring_id, seqno);
239    VkResult result = vn_renderer_submit_simple(
240       instance->renderer, local_data, vn_cs_encoder_get_len(&local_enc));
241    mtx_unlock(&instance->ring.roundtrip_mutex);
242 
243    *roundtrip_seqno = seqno;
244    return result;
245 }
246 
247 void
vn_instance_wait_roundtrip(struct vn_instance * instance,uint64_t roundtrip_seqno)248 vn_instance_wait_roundtrip(struct vn_instance *instance,
249                            uint64_t roundtrip_seqno)
250 {
251    vn_async_vkWaitVirtqueueSeqnoMESA(instance->ring.ring, roundtrip_seqno);
252 }
253 
254 /* instance commands */
255 
256 VkResult
vn_EnumerateInstanceVersion(uint32_t * pApiVersion)257 vn_EnumerateInstanceVersion(uint32_t *pApiVersion)
258 {
259    *pApiVersion = VN_MAX_API_VERSION;
260    return VK_SUCCESS;
261 }
262 
263 VkResult
vn_EnumerateInstanceExtensionProperties(const char * pLayerName,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)264 vn_EnumerateInstanceExtensionProperties(const char *pLayerName,
265                                         uint32_t *pPropertyCount,
266                                         VkExtensionProperties *pProperties)
267 {
268    if (pLayerName)
269       return vn_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
270 
271    return vk_enumerate_instance_extension_properties(
272       &vn_instance_supported_extensions, pPropertyCount, pProperties);
273 }
274 
275 VkResult
vn_EnumerateInstanceLayerProperties(uint32_t * pPropertyCount,VkLayerProperties * pProperties)276 vn_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
277                                     VkLayerProperties *pProperties)
278 {
279    *pPropertyCount = 0;
280    return VK_SUCCESS;
281 }
282 
283 VkResult
vn_CreateInstance(const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkInstance * pInstance)284 vn_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
285                   const VkAllocationCallbacks *pAllocator,
286                   VkInstance *pInstance)
287 {
288    vn_trace_init();
289    VN_TRACE_FUNC();
290 
291    const VkAllocationCallbacks *alloc =
292       pAllocator ? pAllocator : vk_default_allocator();
293    struct vn_instance *instance;
294    VkResult result;
295 
296    vn_env_init();
297 
298    instance = vk_zalloc(alloc, sizeof(*instance), VN_DEFAULT_ALIGN,
299                         VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
300    if (!instance)
301       return vn_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
302 
303    struct vk_instance_dispatch_table dispatch_table;
304    vk_instance_dispatch_table_from_entrypoints(
305       &dispatch_table, &vn_instance_entrypoints, true);
306    vk_instance_dispatch_table_from_entrypoints(
307       &dispatch_table, &wsi_instance_entrypoints, false);
308    result = vn_instance_base_init(&instance->base,
309                                   &vn_instance_supported_extensions,
310                                   &dispatch_table, pCreateInfo, alloc);
311    if (result != VK_SUCCESS) {
312       vk_free(alloc, instance);
313       return vn_error(NULL, result);
314    }
315 
316    /* ring_idx = 0 reserved for CPU timeline */
317    instance->ring_idx_used_mask = 0x1;
318 
319    mtx_init(&instance->physical_device.mutex, mtx_plain);
320    mtx_init(&instance->ring_idx_mutex, mtx_plain);
321 
322    if (!vn_icd_supports_api_version(
323           instance->base.base.app_info.api_version)) {
324       result = VK_ERROR_INCOMPATIBLE_DRIVER;
325       goto out_mtx_destroy;
326    }
327 
328    if (pCreateInfo->enabledLayerCount) {
329       result = VK_ERROR_LAYER_NOT_PRESENT;
330       goto out_mtx_destroy;
331    }
332 
333    result = vn_instance_init_renderer(instance);
334    if (result != VK_SUCCESS)
335       goto out_mtx_destroy;
336 
337    vn_cs_renderer_protocol_info_init(instance);
338 
339    vn_renderer_shmem_pool_init(instance->renderer, &instance->cs_shmem_pool,
340                                8u << 20);
341 
342    vn_renderer_shmem_pool_init(instance->renderer,
343                                &instance->reply_shmem_pool, 1u << 20);
344 
345    result = vn_instance_init_ring(instance);
346    if (result != VK_SUCCESS)
347       goto out_shmem_pool_fini;
348 
349    result = vn_instance_init_renderer_versions(instance);
350    if (result != VK_SUCCESS)
351       goto out_ring_fini;
352 
353    VkInstanceCreateInfo local_create_info = *pCreateInfo;
354    local_create_info.ppEnabledExtensionNames = NULL;
355    local_create_info.enabledExtensionCount = 0;
356    pCreateInfo = &local_create_info;
357 
358    VkApplicationInfo local_app_info;
359    if (instance->base.base.app_info.api_version <
360        instance->renderer_api_version) {
361       if (pCreateInfo->pApplicationInfo) {
362          local_app_info = *pCreateInfo->pApplicationInfo;
363          local_app_info.apiVersion = instance->renderer_api_version;
364       } else {
365          local_app_info = (const VkApplicationInfo){
366             .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
367             .apiVersion = instance->renderer_api_version,
368          };
369       }
370       local_create_info.pApplicationInfo = &local_app_info;
371    }
372 
373    VkInstance instance_handle = vn_instance_to_handle(instance);
374    result = vn_call_vkCreateInstance(instance->ring.ring, pCreateInfo, NULL,
375                                      &instance_handle);
376    if (result != VK_SUCCESS)
377       goto out_ring_fini;
378 
379    driParseOptionInfo(&instance->available_dri_options, vn_dri_options,
380                       ARRAY_SIZE(vn_dri_options));
381    driParseConfigFiles(&instance->dri_options,
382                        &instance->available_dri_options, 0, "venus", NULL,
383                        NULL, instance->base.base.app_info.app_name,
384                        instance->base.base.app_info.app_version,
385                        instance->base.base.app_info.engine_name,
386                        instance->base.base.app_info.engine_version);
387 
388    instance->renderer->info.has_implicit_fencing =
389       driQueryOptionb(&instance->dri_options, "venus_implicit_fencing");
390    instance->enable_wsi_multi_plane_modifiers = driQueryOptionb(
391       &instance->dri_options, "venus_wsi_multi_plane_modifiers");
392 
393    if (VN_DEBUG(INIT)) {
394       vn_log(instance, "supports multi-plane wsi format modifiers: %s",
395              instance->enable_wsi_multi_plane_modifiers ? "yes" : "no");
396    }
397 
398    const char *engine_name = instance->base.base.app_info.engine_name;
399    if (engine_name) {
400       instance->engine_is_zink = strcmp(engine_name, "mesa zink") == 0;
401    }
402 
403    *pInstance = instance_handle;
404 
405    return VK_SUCCESS;
406 
407 out_ring_fini:
408    vn_instance_fini_ring(instance);
409 
410 out_shmem_pool_fini:
411    vn_renderer_shmem_pool_fini(instance->renderer,
412                                &instance->reply_shmem_pool);
413    vn_renderer_shmem_pool_fini(instance->renderer, &instance->cs_shmem_pool);
414    vn_renderer_destroy(instance->renderer, alloc);
415 
416 out_mtx_destroy:
417    mtx_destroy(&instance->physical_device.mutex);
418    mtx_destroy(&instance->ring_idx_mutex);
419 
420    vn_instance_base_fini(&instance->base);
421    vk_free(alloc, instance);
422 
423    return vn_error(NULL, result);
424 }
425 
426 void
vn_DestroyInstance(VkInstance _instance,const VkAllocationCallbacks * pAllocator)427 vn_DestroyInstance(VkInstance _instance,
428                    const VkAllocationCallbacks *pAllocator)
429 {
430    VN_TRACE_FUNC();
431    struct vn_instance *instance = vn_instance_from_handle(_instance);
432    const VkAllocationCallbacks *alloc =
433       pAllocator ? pAllocator : &instance->base.base.alloc;
434 
435    if (!instance)
436       return;
437 
438    if (instance->physical_device.initialized) {
439       for (uint32_t i = 0; i < instance->physical_device.device_count; i++)
440          vn_physical_device_fini(&instance->physical_device.devices[i]);
441       vk_free(alloc, instance->physical_device.devices);
442       vk_free(alloc, instance->physical_device.groups);
443    }
444    mtx_destroy(&instance->physical_device.mutex);
445    mtx_destroy(&instance->ring_idx_mutex);
446 
447    vn_call_vkDestroyInstance(instance->ring.ring, _instance, NULL);
448 
449    vn_instance_fini_ring(instance);
450 
451    vn_renderer_shmem_pool_fini(instance->renderer,
452                                &instance->reply_shmem_pool);
453 
454    vn_renderer_shmem_pool_fini(instance->renderer, &instance->cs_shmem_pool);
455 
456    vn_renderer_destroy(instance->renderer, alloc);
457 
458    driDestroyOptionCache(&instance->dri_options);
459    driDestroyOptionInfo(&instance->available_dri_options);
460 
461    vn_instance_base_fini(&instance->base);
462    vk_free(alloc, instance);
463 }
464 
465 PFN_vkVoidFunction
vn_GetInstanceProcAddr(VkInstance _instance,const char * pName)466 vn_GetInstanceProcAddr(VkInstance _instance, const char *pName)
467 {
468    struct vn_instance *instance = vn_instance_from_handle(_instance);
469    return vk_instance_get_proc_addr(&instance->base.base,
470                                     &vn_instance_entrypoints, pName);
471 }
472