• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 Google LLC
3  * SPDX-License-Identifier: MIT
4  *
5  * based in part on anv and radv which are:
6  * Copyright © 2015 Intel Corporation
7  * Copyright © 2016 Red Hat.
8  * Copyright © 2016 Bas Nieuwenhuizen
9  */
10 
11 #include "vn_instance.h"
12 
13 #include "util/driconf.h"
14 #include "venus-protocol/vn_protocol_driver_info.h"
15 #include "venus-protocol/vn_protocol_driver_instance.h"
16 #include "venus-protocol/vn_protocol_driver_transport.h"
17 
18 #include "vn_icd.h"
19 #include "vn_physical_device.h"
20 #include "vn_renderer.h"
21 
22 #define VN_INSTANCE_LARGE_RING_SIZE (64 * 1024)
23 #define VN_INSTANCE_LARGE_RING_DIRECT_THRESHOLD                              \
24    (VN_INSTANCE_LARGE_RING_SIZE / 16)
25 
26 /* this must not exceed 2KiB for the ring to fit in a 4K page */
27 #define VN_INSTANCE_RING_SIZE (2 * 1024)
28 #define VN_INSTANCE_RING_DIRECT_THRESHOLD (VN_INSTANCE_RING_SIZE / 8)
29 
30 /*
31  * Instance extensions add instance-level or physical-device-level
32  * functionalities.  It seems renderer support is either unnecessary or
33  * optional.  We should be able to advertise them or lie about them locally.
34  */
35 static const struct vk_instance_extension_table
36    vn_instance_supported_extensions = {
37       /* promoted to VK_VERSION_1_1 */
38       .KHR_device_group_creation = true,
39       .KHR_external_fence_capabilities = true,
40       .KHR_external_memory_capabilities = true,
41       .KHR_external_semaphore_capabilities = true,
42       .KHR_get_physical_device_properties2 = true,
43 
44 #ifdef VN_USE_WSI_PLATFORM
45       .KHR_get_surface_capabilities2 = true,
46       .KHR_surface = true,
47       .KHR_surface_protected_capabilities = true,
48 #endif
49 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
50       .KHR_wayland_surface = true,
51 #endif
52 #ifdef VK_USE_PLATFORM_XCB_KHR
53       .KHR_xcb_surface = true,
54 #endif
55 #ifdef VK_USE_PLATFORM_XLIB_KHR
56       .KHR_xlib_surface = true,
57 #endif
58    };
59 
60 static const driOptionDescription vn_dri_options[] = {
61    /* clang-format off */
62    DRI_CONF_SECTION_PERFORMANCE
63       DRI_CONF_VK_X11_ENSURE_MIN_IMAGE_COUNT(false)
64       DRI_CONF_VK_X11_OVERRIDE_MIN_IMAGE_COUNT(0)
65       DRI_CONF_VK_X11_STRICT_IMAGE_COUNT(false)
66       DRI_CONF_VK_XWAYLAND_WAIT_READY(true)
67       DRI_CONF_VENUS_IMPLICIT_FENCING(false)
68    DRI_CONF_SECTION_END
69    DRI_CONF_SECTION_DEBUG
70       DRI_CONF_VK_WSI_FORCE_BGRA8_UNORM_FIRST(false)
71    DRI_CONF_SECTION_END
72    /* clang-format on */
73 };
74 
75 static VkResult
vn_instance_init_renderer_versions(struct vn_instance * instance)76 vn_instance_init_renderer_versions(struct vn_instance *instance)
77 {
78    uint32_t instance_version = 0;
79    VkResult result =
80       vn_call_vkEnumerateInstanceVersion(instance, &instance_version);
81    if (result != VK_SUCCESS) {
82       if (VN_DEBUG(INIT))
83          vn_log(instance, "failed to enumerate renderer instance version");
84       return result;
85    }
86 
87    if (instance_version < VN_MIN_RENDERER_VERSION) {
88       if (VN_DEBUG(INIT)) {
89          vn_log(instance, "unsupported renderer instance version %d.%d",
90                 VK_VERSION_MAJOR(instance_version),
91                 VK_VERSION_MINOR(instance_version));
92       }
93       return VK_ERROR_INITIALIZATION_FAILED;
94    }
95 
96    if (VN_DEBUG(INIT)) {
97       vn_log(instance, "renderer instance version %d.%d.%d",
98              VK_VERSION_MAJOR(instance_version),
99              VK_VERSION_MINOR(instance_version),
100              VK_VERSION_PATCH(instance_version));
101    }
102 
103    /* request at least VN_MIN_RENDERER_VERSION internally */
104    instance->renderer_api_version =
105       MAX2(instance->base.base.app_info.api_version, VN_MIN_RENDERER_VERSION);
106 
107    /* instance version for internal use is capped */
108    instance_version = MIN3(instance_version, instance->renderer_api_version,
109                            instance->renderer->info.vk_xml_version);
110    assert(instance_version >= VN_MIN_RENDERER_VERSION);
111 
112    instance->renderer_version = instance_version;
113 
114    return VK_SUCCESS;
115 }
116 
117 static VkResult
vn_instance_init_ring(struct vn_instance * instance)118 vn_instance_init_ring(struct vn_instance *instance)
119 {
120    const size_t buf_size = instance->experimental.largeRing
121                               ? VN_INSTANCE_LARGE_RING_SIZE
122                               : VN_INSTANCE_RING_SIZE;
123    /* 32-bit seqno for renderer roundtrips */
124    const size_t extra_size = sizeof(uint32_t);
125    struct vn_ring_layout layout;
126    vn_ring_get_layout(buf_size, extra_size, &layout);
127 
128    instance->ring.shmem =
129       vn_renderer_shmem_create(instance->renderer, layout.shmem_size);
130    if (!instance->ring.shmem) {
131       if (VN_DEBUG(INIT))
132          vn_log(instance, "failed to allocate/map ring shmem");
133       return VK_ERROR_OUT_OF_HOST_MEMORY;
134    }
135 
136    mtx_init(&instance->ring.mutex, mtx_plain);
137 
138    struct vn_ring *ring = &instance->ring.ring;
139    vn_ring_init(ring, instance->renderer, &layout,
140                 instance->ring.shmem->mmap_ptr);
141 
142    instance->ring.id = (uintptr_t)ring;
143 
144    const struct VkRingCreateInfoMESA info = {
145       .sType = VK_STRUCTURE_TYPE_RING_CREATE_INFO_MESA,
146       .resourceId = instance->ring.shmem->res_id,
147       .size = layout.shmem_size,
148       .idleTimeout = 50ull * 1000 * 1000,
149       .headOffset = layout.head_offset,
150       .tailOffset = layout.tail_offset,
151       .statusOffset = layout.status_offset,
152       .bufferOffset = layout.buffer_offset,
153       .bufferSize = layout.buffer_size,
154       .extraOffset = layout.extra_offset,
155       .extraSize = layout.extra_size,
156    };
157 
158    uint32_t create_ring_data[64];
159    struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
160       create_ring_data, sizeof(create_ring_data));
161    vn_encode_vkCreateRingMESA(&local_enc, 0, instance->ring.id, &info);
162    vn_renderer_submit_simple(instance->renderer, create_ring_data,
163                              vn_cs_encoder_get_len(&local_enc));
164 
165    vn_cs_encoder_init(&instance->ring.upload, instance,
166                       VN_CS_ENCODER_STORAGE_SHMEM_ARRAY, 1 * 1024 * 1024);
167 
168    mtx_init(&instance->ring.roundtrip_mutex, mtx_plain);
169    instance->ring.roundtrip_next = 1;
170 
171    return VK_SUCCESS;
172 }
173 
174 static struct vn_renderer_shmem *
175 vn_instance_get_reply_shmem_locked(struct vn_instance *instance,
176                                    size_t size,
177                                    void **ptr);
178 
179 static VkResult
vn_instance_init_experimental_features(struct vn_instance * instance)180 vn_instance_init_experimental_features(struct vn_instance *instance)
181 {
182    if (instance->renderer->info.vk_mesa_venus_protocol_spec_version !=
183        100000) {
184       if (VN_DEBUG(INIT))
185          vn_log(instance, "renderer supports no experimental features");
186       return VK_SUCCESS;
187    }
188 
189    size_t struct_size = sizeof(instance->experimental);
190 
191    /* prepare the reply shmem */
192    const size_t reply_size =
193       vn_sizeof_vkGetVenusExperimentalFeatureData100000MESA_reply(
194          &struct_size, &instance->experimental);
195    void *reply_ptr;
196    struct vn_renderer_shmem *reply_shmem =
197       vn_instance_get_reply_shmem_locked(instance, reply_size, &reply_ptr);
198    if (!reply_shmem)
199       return VK_ERROR_OUT_OF_HOST_MEMORY;
200 
201    /* encode the command */
202    uint32_t local_data[16];
203    struct vn_cs_encoder local_enc =
204       VN_CS_ENCODER_INITIALIZER_LOCAL(local_data, sizeof(local_data));
205    vn_encode_vkGetVenusExperimentalFeatureData100000MESA(
206       &local_enc, VK_COMMAND_GENERATE_REPLY_BIT_EXT, &struct_size,
207       &instance->experimental);
208 
209    VkResult result = vn_renderer_submit_simple_sync(
210       instance->renderer, local_data, vn_cs_encoder_get_len(&local_enc));
211    if (result != VK_SUCCESS) {
212       vn_renderer_shmem_unref(instance->renderer, reply_shmem);
213       return result;
214    }
215 
216    struct vn_cs_decoder reply_dec =
217       VN_CS_DECODER_INITIALIZER(reply_ptr, reply_size);
218    vn_decode_vkGetVenusExperimentalFeatureData100000MESA_reply(
219       &reply_dec, &struct_size, &instance->experimental);
220    vn_renderer_shmem_unref(instance->renderer, reply_shmem);
221 
222    if (VN_DEBUG(INIT)) {
223       vn_log(instance,
224              "VkVenusExperimentalFeatures100000MESA is as below:"
225              "\n\tmemoryResourceAllocationSize = %u"
226              "\n\tglobalFencing = %u"
227              "\n\tlargeRing = %u",
228              instance->experimental.memoryResourceAllocationSize,
229              instance->experimental.globalFencing,
230              instance->experimental.largeRing);
231    }
232 
233    return VK_SUCCESS;
234 }
235 
236 static VkResult
vn_instance_init_renderer(struct vn_instance * instance)237 vn_instance_init_renderer(struct vn_instance *instance)
238 {
239    const VkAllocationCallbacks *alloc = &instance->base.base.alloc;
240 
241    VkResult result = vn_renderer_create(instance, alloc, &instance->renderer);
242    if (result != VK_SUCCESS)
243       return result;
244 
245    struct vn_renderer_info *renderer_info = &instance->renderer->info;
246    uint32_t version = vn_info_wire_format_version();
247    if (renderer_info->wire_format_version != version) {
248       if (VN_DEBUG(INIT)) {
249          vn_log(instance, "wire format version %d != %d",
250                 renderer_info->wire_format_version, version);
251       }
252       return VK_ERROR_INITIALIZATION_FAILED;
253    }
254 
255    version = vn_info_vk_xml_version();
256    if (renderer_info->vk_xml_version > version)
257       renderer_info->vk_xml_version = version;
258    if (renderer_info->vk_xml_version < VN_MIN_RENDERER_VERSION) {
259       if (VN_DEBUG(INIT)) {
260          vn_log(instance, "vk xml version %d.%d.%d < %d.%d.%d",
261                 VK_VERSION_MAJOR(renderer_info->vk_xml_version),
262                 VK_VERSION_MINOR(renderer_info->vk_xml_version),
263                 VK_VERSION_PATCH(renderer_info->vk_xml_version),
264                 VK_VERSION_MAJOR(VN_MIN_RENDERER_VERSION),
265                 VK_VERSION_MINOR(VN_MIN_RENDERER_VERSION),
266                 VK_VERSION_PATCH(VN_MIN_RENDERER_VERSION));
267       }
268       return VK_ERROR_INITIALIZATION_FAILED;
269    }
270 
271    uint32_t spec_version =
272       vn_extension_get_spec_version("VK_EXT_command_serialization");
273    if (renderer_info->vk_ext_command_serialization_spec_version >
274        spec_version) {
275       renderer_info->vk_ext_command_serialization_spec_version = spec_version;
276    }
277 
278    spec_version = vn_extension_get_spec_version("VK_MESA_venus_protocol");
279    if (renderer_info->vk_mesa_venus_protocol_spec_version > spec_version)
280       renderer_info->vk_mesa_venus_protocol_spec_version = spec_version;
281 
282    if (VN_DEBUG(INIT)) {
283       vn_log(instance, "connected to renderer");
284       vn_log(instance, "wire format version %d",
285              renderer_info->wire_format_version);
286       vn_log(instance, "vk xml version %d.%d.%d",
287              VK_VERSION_MAJOR(renderer_info->vk_xml_version),
288              VK_VERSION_MINOR(renderer_info->vk_xml_version),
289              VK_VERSION_PATCH(renderer_info->vk_xml_version));
290       vn_log(instance, "VK_EXT_command_serialization spec version %d",
291              renderer_info->vk_ext_command_serialization_spec_version);
292       vn_log(instance, "VK_MESA_venus_protocol spec version %d",
293              renderer_info->vk_mesa_venus_protocol_spec_version);
294       vn_log(instance, "supports blob id 0: %d",
295              renderer_info->supports_blob_id_0);
296       vn_log(instance, "allow_vk_wait_syncs: %d",
297              renderer_info->allow_vk_wait_syncs);
298    }
299 
300    return VK_SUCCESS;
301 }
302 
303 VkResult
vn_instance_submit_roundtrip(struct vn_instance * instance,uint32_t * roundtrip_seqno)304 vn_instance_submit_roundtrip(struct vn_instance *instance,
305                              uint32_t *roundtrip_seqno)
306 {
307    uint32_t write_ring_extra_data[8];
308    struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
309       write_ring_extra_data, sizeof(write_ring_extra_data));
310 
311    /* submit a vkWriteRingExtraMESA through the renderer */
312    mtx_lock(&instance->ring.roundtrip_mutex);
313    const uint32_t seqno = instance->ring.roundtrip_next++;
314    vn_encode_vkWriteRingExtraMESA(&local_enc, 0, instance->ring.id, 0, seqno);
315    VkResult result =
316       vn_renderer_submit_simple(instance->renderer, write_ring_extra_data,
317                                 vn_cs_encoder_get_len(&local_enc));
318    mtx_unlock(&instance->ring.roundtrip_mutex);
319 
320    *roundtrip_seqno = seqno;
321    return result;
322 }
323 
324 static bool
roundtrip_seqno_ge(uint32_t a,uint32_t b)325 roundtrip_seqno_ge(uint32_t a, uint32_t b)
326 {
327    /* a >= b, but deal with wrapping as well */
328    return (a - b) <= INT32_MAX;
329 }
330 
331 void
vn_instance_wait_roundtrip(struct vn_instance * instance,uint32_t roundtrip_seqno)332 vn_instance_wait_roundtrip(struct vn_instance *instance,
333                            uint32_t roundtrip_seqno)
334 {
335    VN_TRACE_FUNC();
336    const struct vn_ring *ring = &instance->ring.ring;
337    const volatile atomic_uint *ptr = ring->shared.extra;
338    uint32_t iter = 0;
339    do {
340       const uint32_t cur = atomic_load_explicit(ptr, memory_order_acquire);
341       if (roundtrip_seqno_ge(cur, roundtrip_seqno))
342          break;
343       vn_relax(&iter, "roundtrip");
344    } while (true);
345 }
346 
347 struct vn_instance_submission {
348    const struct vn_cs_encoder *cs;
349    struct vn_ring_submit *submit;
350 
351    struct {
352       struct vn_cs_encoder cs;
353       struct vn_cs_encoder_buffer buffer;
354       uint32_t data[64];
355    } indirect;
356 };
357 
358 static const struct vn_cs_encoder *
vn_instance_submission_get_cs(struct vn_instance_submission * submit,const struct vn_cs_encoder * cs,bool direct)359 vn_instance_submission_get_cs(struct vn_instance_submission *submit,
360                               const struct vn_cs_encoder *cs,
361                               bool direct)
362 {
363    if (direct)
364       return cs;
365 
366    VkCommandStreamDescriptionMESA local_descs[8];
367    VkCommandStreamDescriptionMESA *descs = local_descs;
368    if (cs->buffer_count > ARRAY_SIZE(local_descs)) {
369       descs =
370          malloc(sizeof(VkCommandStreamDescriptionMESA) * cs->buffer_count);
371       if (!descs)
372          return NULL;
373    }
374 
375    uint32_t desc_count = 0;
376    for (uint32_t i = 0; i < cs->buffer_count; i++) {
377       const struct vn_cs_encoder_buffer *buf = &cs->buffers[i];
378       if (buf->committed_size) {
379          descs[desc_count++] = (VkCommandStreamDescriptionMESA){
380             .resourceId = buf->shmem->res_id,
381             .offset = buf->offset,
382             .size = buf->committed_size,
383          };
384       }
385    }
386 
387    const size_t exec_size = vn_sizeof_vkExecuteCommandStreamsMESA(
388       desc_count, descs, NULL, 0, NULL, 0);
389    void *exec_data = submit->indirect.data;
390    if (exec_size > sizeof(submit->indirect.data)) {
391       exec_data = malloc(exec_size);
392       if (!exec_data) {
393          if (descs != local_descs)
394             free(descs);
395          return NULL;
396       }
397    }
398 
399    submit->indirect.buffer = VN_CS_ENCODER_BUFFER_INITIALIZER(exec_data);
400    submit->indirect.cs =
401       VN_CS_ENCODER_INITIALIZER(&submit->indirect.buffer, exec_size);
402    vn_encode_vkExecuteCommandStreamsMESA(&submit->indirect.cs, 0, desc_count,
403                                          descs, NULL, 0, NULL, 0);
404    vn_cs_encoder_commit(&submit->indirect.cs);
405 
406    if (descs != local_descs)
407       free(descs);
408 
409    return &submit->indirect.cs;
410 }
411 
412 static struct vn_ring_submit *
vn_instance_submission_get_ring_submit(struct vn_ring * ring,const struct vn_cs_encoder * cs,struct vn_renderer_shmem * extra_shmem,bool direct)413 vn_instance_submission_get_ring_submit(struct vn_ring *ring,
414                                        const struct vn_cs_encoder *cs,
415                                        struct vn_renderer_shmem *extra_shmem,
416                                        bool direct)
417 {
418    const uint32_t shmem_count =
419       (direct ? 0 : cs->buffer_count) + (extra_shmem ? 1 : 0);
420    struct vn_ring_submit *submit = vn_ring_get_submit(ring, shmem_count);
421    if (!submit)
422       return NULL;
423 
424    submit->shmem_count = shmem_count;
425    if (!direct) {
426       for (uint32_t i = 0; i < cs->buffer_count; i++) {
427          submit->shmems[i] =
428             vn_renderer_shmem_ref(ring->renderer, cs->buffers[i].shmem);
429       }
430    }
431    if (extra_shmem) {
432       submit->shmems[shmem_count - 1] =
433          vn_renderer_shmem_ref(ring->renderer, extra_shmem);
434    }
435 
436    return submit;
437 }
438 
439 static void
vn_instance_submission_cleanup(struct vn_instance_submission * submit)440 vn_instance_submission_cleanup(struct vn_instance_submission *submit)
441 {
442    if (submit->cs == &submit->indirect.cs &&
443        submit->indirect.buffer.base != submit->indirect.data)
444       free(submit->indirect.buffer.base);
445 }
446 
447 static VkResult
vn_instance_submission_prepare(struct vn_instance_submission * submit,const struct vn_cs_encoder * cs,struct vn_ring * ring,struct vn_renderer_shmem * extra_shmem,bool direct)448 vn_instance_submission_prepare(struct vn_instance_submission *submit,
449                                const struct vn_cs_encoder *cs,
450                                struct vn_ring *ring,
451                                struct vn_renderer_shmem *extra_shmem,
452                                bool direct)
453 {
454    submit->cs = vn_instance_submission_get_cs(submit, cs, direct);
455    if (!submit->cs)
456       return VK_ERROR_OUT_OF_HOST_MEMORY;
457 
458    submit->submit =
459       vn_instance_submission_get_ring_submit(ring, cs, extra_shmem, direct);
460    if (!submit->submit) {
461       vn_instance_submission_cleanup(submit);
462       return VK_ERROR_OUT_OF_HOST_MEMORY;
463    }
464 
465    return VK_SUCCESS;
466 }
467 
468 static bool
vn_instance_submission_can_direct(const struct vn_instance * instance,const struct vn_cs_encoder * cs)469 vn_instance_submission_can_direct(const struct vn_instance *instance,
470                                   const struct vn_cs_encoder *cs)
471 {
472    const size_t threshold = instance->experimental.largeRing
473                                ? VN_INSTANCE_LARGE_RING_DIRECT_THRESHOLD
474                                : VN_INSTANCE_RING_DIRECT_THRESHOLD;
475    return vn_cs_encoder_get_len(cs) <= threshold;
476 }
477 
478 static struct vn_cs_encoder *
vn_instance_ring_cs_upload_locked(struct vn_instance * instance,const struct vn_cs_encoder * cs)479 vn_instance_ring_cs_upload_locked(struct vn_instance *instance,
480                                   const struct vn_cs_encoder *cs)
481 {
482    VN_TRACE_FUNC();
483    assert(cs->storage_type == VN_CS_ENCODER_STORAGE_POINTER &&
484           cs->buffer_count == 1);
485    const void *cs_data = cs->buffers[0].base;
486    const size_t cs_size = cs->total_committed_size;
487    assert(cs_size == vn_cs_encoder_get_len(cs));
488 
489    struct vn_cs_encoder *upload = &instance->ring.upload;
490    vn_cs_encoder_reset(upload);
491 
492    if (!vn_cs_encoder_reserve(upload, cs_size))
493       return NULL;
494 
495    vn_cs_encoder_write(upload, cs_size, cs_data, cs_size);
496    vn_cs_encoder_commit(upload);
497 
498    if (unlikely(!instance->renderer->info.supports_blob_id_0))
499       vn_instance_wait_roundtrip(instance, upload->current_buffer_roundtrip);
500 
501    return upload;
502 }
503 
504 static VkResult
vn_instance_ring_submit_locked(struct vn_instance * instance,const struct vn_cs_encoder * cs,struct vn_renderer_shmem * extra_shmem,uint32_t * ring_seqno)505 vn_instance_ring_submit_locked(struct vn_instance *instance,
506                                const struct vn_cs_encoder *cs,
507                                struct vn_renderer_shmem *extra_shmem,
508                                uint32_t *ring_seqno)
509 {
510    struct vn_ring *ring = &instance->ring.ring;
511 
512    const bool direct = vn_instance_submission_can_direct(instance, cs);
513    if (!direct && cs->storage_type == VN_CS_ENCODER_STORAGE_POINTER) {
514       cs = vn_instance_ring_cs_upload_locked(instance, cs);
515       if (!cs)
516          return VK_ERROR_OUT_OF_HOST_MEMORY;
517       assert(cs->storage_type != VN_CS_ENCODER_STORAGE_POINTER);
518    }
519 
520    struct vn_instance_submission submit;
521    VkResult result =
522       vn_instance_submission_prepare(&submit, cs, ring, extra_shmem, direct);
523    if (result != VK_SUCCESS)
524       return result;
525 
526    uint32_t seqno;
527    const bool notify = vn_ring_submit(ring, submit.submit, submit.cs, &seqno);
528    if (notify) {
529       uint32_t notify_ring_data[8];
530       struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
531          notify_ring_data, sizeof(notify_ring_data));
532       vn_encode_vkNotifyRingMESA(&local_enc, 0, instance->ring.id, seqno, 0);
533       vn_renderer_submit_simple(instance->renderer, notify_ring_data,
534                                 vn_cs_encoder_get_len(&local_enc));
535    }
536 
537    vn_instance_submission_cleanup(&submit);
538 
539    if (ring_seqno)
540       *ring_seqno = seqno;
541 
542    return VK_SUCCESS;
543 }
544 
545 VkResult
vn_instance_ring_submit(struct vn_instance * instance,const struct vn_cs_encoder * cs)546 vn_instance_ring_submit(struct vn_instance *instance,
547                         const struct vn_cs_encoder *cs)
548 {
549    mtx_lock(&instance->ring.mutex);
550    VkResult result = vn_instance_ring_submit_locked(instance, cs, NULL, NULL);
551    mtx_unlock(&instance->ring.mutex);
552 
553    return result;
554 }
555 
556 static struct vn_renderer_shmem *
vn_instance_get_reply_shmem_locked(struct vn_instance * instance,size_t size,void ** out_ptr)557 vn_instance_get_reply_shmem_locked(struct vn_instance *instance,
558                                    size_t size,
559                                    void **out_ptr)
560 {
561    VN_TRACE_FUNC();
562    struct vn_renderer_shmem_pool *pool = &instance->reply_shmem_pool;
563    const struct vn_renderer_shmem *saved_pool_shmem = pool->shmem;
564 
565    size_t offset;
566    struct vn_renderer_shmem *shmem =
567       vn_renderer_shmem_pool_alloc(instance->renderer, pool, size, &offset);
568    if (!shmem)
569       return NULL;
570 
571    assert(shmem == pool->shmem);
572    *out_ptr = shmem->mmap_ptr + offset;
573 
574    if (shmem != saved_pool_shmem) {
575       uint32_t set_reply_command_stream_data[16];
576       struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
577          set_reply_command_stream_data,
578          sizeof(set_reply_command_stream_data));
579       const struct VkCommandStreamDescriptionMESA stream = {
580          .resourceId = shmem->res_id,
581          .size = pool->size,
582       };
583       vn_encode_vkSetReplyCommandStreamMESA(&local_enc, 0, &stream);
584       vn_cs_encoder_commit(&local_enc);
585 
586       /* vn_instance_init_experimental_features calls this before the ring is
587        * created
588        */
589       if (likely(instance->ring.id)) {
590          if (unlikely(!instance->renderer->info.supports_blob_id_0))
591             vn_instance_roundtrip(instance);
592 
593          vn_instance_ring_submit_locked(instance, &local_enc, NULL, NULL);
594       } else {
595          vn_renderer_submit_simple(instance->renderer,
596                                    set_reply_command_stream_data,
597                                    vn_cs_encoder_get_len(&local_enc));
598       }
599    }
600 
601    /* TODO avoid this seek command and go lock-free? */
602    uint32_t seek_reply_command_stream_data[8];
603    struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
604       seek_reply_command_stream_data, sizeof(seek_reply_command_stream_data));
605    vn_encode_vkSeekReplyCommandStreamMESA(&local_enc, 0, offset);
606    vn_cs_encoder_commit(&local_enc);
607 
608    /* vn_instance_init_experimental_features calls this before the ring is
609     * created
610     */
611    if (likely(instance->ring.id)) {
612       vn_instance_ring_submit_locked(instance, &local_enc, NULL, NULL);
613    } else {
614       vn_renderer_submit_simple(instance->renderer,
615                                 seek_reply_command_stream_data,
616                                 vn_cs_encoder_get_len(&local_enc));
617    }
618 
619    return shmem;
620 }
621 
622 void
vn_instance_submit_command(struct vn_instance * instance,struct vn_instance_submit_command * submit)623 vn_instance_submit_command(struct vn_instance *instance,
624                            struct vn_instance_submit_command *submit)
625 {
626    void *reply_ptr = NULL;
627    submit->reply_shmem = NULL;
628 
629    mtx_lock(&instance->ring.mutex);
630 
631    if (vn_cs_encoder_is_empty(&submit->command))
632       goto fail;
633    vn_cs_encoder_commit(&submit->command);
634 
635    if (submit->reply_size) {
636       submit->reply_shmem = vn_instance_get_reply_shmem_locked(
637          instance, submit->reply_size, &reply_ptr);
638       if (!submit->reply_shmem)
639          goto fail;
640    }
641 
642    uint32_t ring_seqno;
643    VkResult result = vn_instance_ring_submit_locked(
644       instance, &submit->command, submit->reply_shmem, &ring_seqno);
645 
646    mtx_unlock(&instance->ring.mutex);
647 
648    submit->reply = VN_CS_DECODER_INITIALIZER(reply_ptr, submit->reply_size);
649 
650    if (submit->reply_size && result == VK_SUCCESS)
651       vn_ring_wait(&instance->ring.ring, ring_seqno);
652 
653    return;
654 
655 fail:
656    instance->ring.command_dropped++;
657    mtx_unlock(&instance->ring.mutex);
658 }
659 
660 /* instance commands */
661 
662 VkResult
vn_EnumerateInstanceVersion(uint32_t * pApiVersion)663 vn_EnumerateInstanceVersion(uint32_t *pApiVersion)
664 {
665    *pApiVersion = VN_MAX_API_VERSION;
666    return VK_SUCCESS;
667 }
668 
669 VkResult
vn_EnumerateInstanceExtensionProperties(const char * pLayerName,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)670 vn_EnumerateInstanceExtensionProperties(const char *pLayerName,
671                                         uint32_t *pPropertyCount,
672                                         VkExtensionProperties *pProperties)
673 {
674    if (pLayerName)
675       return vn_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
676 
677    return vk_enumerate_instance_extension_properties(
678       &vn_instance_supported_extensions, pPropertyCount, pProperties);
679 }
680 
681 VkResult
vn_EnumerateInstanceLayerProperties(uint32_t * pPropertyCount,VkLayerProperties * pProperties)682 vn_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
683                                     VkLayerProperties *pProperties)
684 {
685    *pPropertyCount = 0;
686    return VK_SUCCESS;
687 }
688 
689 VkResult
vn_CreateInstance(const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkInstance * pInstance)690 vn_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
691                   const VkAllocationCallbacks *pAllocator,
692                   VkInstance *pInstance)
693 {
694    VN_TRACE_FUNC();
695    const VkAllocationCallbacks *alloc =
696       pAllocator ? pAllocator : vk_default_allocator();
697    struct vn_instance *instance;
698    VkResult result;
699 
700    vn_env_init();
701    vn_trace_init();
702 
703    instance = vk_zalloc(alloc, sizeof(*instance), VN_DEFAULT_ALIGN,
704                         VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
705    if (!instance)
706       return vn_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
707 
708    struct vk_instance_dispatch_table dispatch_table;
709    vk_instance_dispatch_table_from_entrypoints(
710       &dispatch_table, &vn_instance_entrypoints, true);
711    vk_instance_dispatch_table_from_entrypoints(
712       &dispatch_table, &wsi_instance_entrypoints, false);
713    result = vn_instance_base_init(&instance->base,
714                                   &vn_instance_supported_extensions,
715                                   &dispatch_table, pCreateInfo, alloc);
716    if (result != VK_SUCCESS) {
717       vk_free(alloc, instance);
718       return vn_error(NULL, result);
719    }
720 
721    mtx_init(&instance->physical_device.mutex, mtx_plain);
722    mtx_init(&instance->cs_shmem.mutex, mtx_plain);
723 
724    if (!vn_icd_supports_api_version(
725           instance->base.base.app_info.api_version)) {
726       result = VK_ERROR_INCOMPATIBLE_DRIVER;
727       goto fail;
728    }
729 
730    if (pCreateInfo->enabledLayerCount) {
731       result = VK_ERROR_LAYER_NOT_PRESENT;
732       goto fail;
733    }
734 
735    result = vn_instance_init_renderer(instance);
736    if (result != VK_SUCCESS)
737       goto fail;
738 
739    vn_cs_renderer_protocol_info_init(instance);
740 
741    vn_renderer_shmem_pool_init(instance->renderer,
742                                &instance->reply_shmem_pool, 1u << 20);
743 
744    result = vn_instance_init_experimental_features(instance);
745    if (result != VK_SUCCESS)
746       goto fail;
747 
748    result = vn_instance_init_ring(instance);
749    if (result != VK_SUCCESS)
750       goto fail;
751 
752    result = vn_instance_init_renderer_versions(instance);
753    if (result != VK_SUCCESS)
754       goto fail;
755 
756    vn_renderer_shmem_pool_init(instance->renderer, &instance->cs_shmem.pool,
757                                8u << 20);
758 
759    VkInstanceCreateInfo local_create_info = *pCreateInfo;
760    local_create_info.ppEnabledExtensionNames = NULL;
761    local_create_info.enabledExtensionCount = 0;
762    pCreateInfo = &local_create_info;
763 
764    VkApplicationInfo local_app_info;
765    if (instance->base.base.app_info.api_version <
766        instance->renderer_api_version) {
767       if (pCreateInfo->pApplicationInfo) {
768          local_app_info = *pCreateInfo->pApplicationInfo;
769          local_app_info.apiVersion = instance->renderer_api_version;
770       } else {
771          local_app_info = (const VkApplicationInfo){
772             .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
773             .apiVersion = instance->renderer_api_version,
774          };
775       }
776       local_create_info.pApplicationInfo = &local_app_info;
777    }
778 
779    VkInstance instance_handle = vn_instance_to_handle(instance);
780    result =
781       vn_call_vkCreateInstance(instance, pCreateInfo, NULL, &instance_handle);
782    if (result != VK_SUCCESS)
783       goto fail;
784 
785    driParseOptionInfo(&instance->available_dri_options, vn_dri_options,
786                       ARRAY_SIZE(vn_dri_options));
787    driParseConfigFiles(&instance->dri_options,
788                        &instance->available_dri_options, 0, "venus", NULL,
789                        NULL, instance->base.base.app_info.app_name,
790                        instance->base.base.app_info.app_version,
791                        instance->base.base.app_info.engine_name,
792                        instance->base.base.app_info.engine_version);
793 
794    instance->renderer->info.has_implicit_fencing =
795       driQueryOptionb(&instance->dri_options, "venus_implicit_fencing");
796 
797    *pInstance = instance_handle;
798 
799    return VK_SUCCESS;
800 
801 fail:
802    if (instance->ring.shmem) {
803       uint32_t destroy_ring_data[4];
804       struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
805          destroy_ring_data, sizeof(destroy_ring_data));
806       vn_encode_vkDestroyRingMESA(&local_enc, 0, instance->ring.id);
807       vn_renderer_submit_simple(instance->renderer, destroy_ring_data,
808                                 vn_cs_encoder_get_len(&local_enc));
809 
810       mtx_destroy(&instance->ring.roundtrip_mutex);
811       vn_cs_encoder_fini(&instance->ring.upload);
812       vn_renderer_shmem_unref(instance->renderer, instance->ring.shmem);
813       vn_ring_fini(&instance->ring.ring);
814       mtx_destroy(&instance->ring.mutex);
815    }
816 
817    vn_renderer_shmem_pool_fini(instance->renderer,
818                                &instance->reply_shmem_pool);
819 
820    if (instance->renderer)
821       vn_renderer_destroy(instance->renderer, alloc);
822 
823    mtx_destroy(&instance->physical_device.mutex);
824    mtx_destroy(&instance->cs_shmem.mutex);
825 
826    vn_instance_base_fini(&instance->base);
827    vk_free(alloc, instance);
828 
829    return vn_error(NULL, result);
830 }
831 
832 void
vn_DestroyInstance(VkInstance _instance,const VkAllocationCallbacks * pAllocator)833 vn_DestroyInstance(VkInstance _instance,
834                    const VkAllocationCallbacks *pAllocator)
835 {
836    VN_TRACE_FUNC();
837    struct vn_instance *instance = vn_instance_from_handle(_instance);
838    const VkAllocationCallbacks *alloc =
839       pAllocator ? pAllocator : &instance->base.base.alloc;
840 
841    if (!instance)
842       return;
843 
844    if (instance->physical_device.initialized) {
845       for (uint32_t i = 0; i < instance->physical_device.device_count; i++)
846          vn_physical_device_fini(&instance->physical_device.devices[i]);
847       vk_free(alloc, instance->physical_device.devices);
848       vk_free(alloc, instance->physical_device.groups);
849    }
850    mtx_destroy(&instance->physical_device.mutex);
851 
852    vn_call_vkDestroyInstance(instance, _instance, NULL);
853 
854    vn_renderer_shmem_pool_fini(instance->renderer, &instance->cs_shmem.pool);
855    mtx_destroy(&instance->cs_shmem.mutex);
856 
857    uint32_t destroy_ring_data[4];
858    struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
859       destroy_ring_data, sizeof(destroy_ring_data));
860    vn_encode_vkDestroyRingMESA(&local_enc, 0, instance->ring.id);
861    vn_renderer_submit_simple(instance->renderer, destroy_ring_data,
862                              vn_cs_encoder_get_len(&local_enc));
863 
864    mtx_destroy(&instance->ring.roundtrip_mutex);
865    vn_cs_encoder_fini(&instance->ring.upload);
866    vn_ring_fini(&instance->ring.ring);
867    mtx_destroy(&instance->ring.mutex);
868    vn_renderer_shmem_unref(instance->renderer, instance->ring.shmem);
869 
870    vn_renderer_shmem_pool_fini(instance->renderer,
871                                &instance->reply_shmem_pool);
872 
873    vn_renderer_destroy(instance->renderer, alloc);
874 
875    driDestroyOptionCache(&instance->dri_options);
876    driDestroyOptionInfo(&instance->available_dri_options);
877 
878    vn_instance_base_fini(&instance->base);
879    vk_free(alloc, instance);
880 }
881 
882 PFN_vkVoidFunction
vn_GetInstanceProcAddr(VkInstance _instance,const char * pName)883 vn_GetInstanceProcAddr(VkInstance _instance, const char *pName)
884 {
885    struct vn_instance *instance = vn_instance_from_handle(_instance);
886    return vk_instance_get_proc_addr(&instance->base.base,
887                                     &vn_instance_entrypoints, pName);
888 }
889