1 /*
2 * Copyright 2019 Google LLC
3 * SPDX-License-Identifier: MIT
4 *
5 * based in part on anv and radv which are:
6 * Copyright © 2015 Intel Corporation
7 * Copyright © 2016 Red Hat.
8 * Copyright © 2016 Bas Nieuwenhuizen
9 */
10
11 #include "vn_instance.h"
12
13 #include "util/driconf.h"
14 #include "venus-protocol/vn_protocol_driver_info.h"
15 #include "venus-protocol/vn_protocol_driver_instance.h"
16 #include "venus-protocol/vn_protocol_driver_transport.h"
17
18 #include "vn_icd.h"
19 #include "vn_physical_device.h"
20 #include "vn_renderer.h"
21
22 #define VN_INSTANCE_LARGE_RING_SIZE (64 * 1024)
23 #define VN_INSTANCE_LARGE_RING_DIRECT_THRESHOLD \
24 (VN_INSTANCE_LARGE_RING_SIZE / 16)
25
26 /* this must not exceed 2KiB for the ring to fit in a 4K page */
27 #define VN_INSTANCE_RING_SIZE (2 * 1024)
28 #define VN_INSTANCE_RING_DIRECT_THRESHOLD (VN_INSTANCE_RING_SIZE / 8)
29
30 /*
31 * Instance extensions add instance-level or physical-device-level
32 * functionalities. It seems renderer support is either unnecessary or
33 * optional. We should be able to advertise them or lie about them locally.
34 */
35 static const struct vk_instance_extension_table
36 vn_instance_supported_extensions = {
37 /* promoted to VK_VERSION_1_1 */
38 .KHR_device_group_creation = true,
39 .KHR_external_fence_capabilities = true,
40 .KHR_external_memory_capabilities = true,
41 .KHR_external_semaphore_capabilities = true,
42 .KHR_get_physical_device_properties2 = true,
43
44 #ifdef VN_USE_WSI_PLATFORM
45 .KHR_get_surface_capabilities2 = true,
46 .KHR_surface = true,
47 .KHR_surface_protected_capabilities = true,
48 #endif
49 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
50 .KHR_wayland_surface = true,
51 #endif
52 #ifdef VK_USE_PLATFORM_XCB_KHR
53 .KHR_xcb_surface = true,
54 #endif
55 #ifdef VK_USE_PLATFORM_XLIB_KHR
56 .KHR_xlib_surface = true,
57 #endif
58 };
59
60 static const driOptionDescription vn_dri_options[] = {
61 /* clang-format off */
62 DRI_CONF_SECTION_PERFORMANCE
63 DRI_CONF_VK_X11_ENSURE_MIN_IMAGE_COUNT(false)
64 DRI_CONF_VK_X11_OVERRIDE_MIN_IMAGE_COUNT(0)
65 DRI_CONF_VK_X11_STRICT_IMAGE_COUNT(false)
66 DRI_CONF_SECTION_END
67 DRI_CONF_SECTION_DEBUG
68 DRI_CONF_VK_WSI_FORCE_BGRA8_UNORM_FIRST(false)
69 DRI_CONF_SECTION_END
70 /* clang-format on */
71 };
72
73 static VkResult
vn_instance_init_renderer_versions(struct vn_instance * instance)74 vn_instance_init_renderer_versions(struct vn_instance *instance)
75 {
76 uint32_t instance_version = 0;
77 VkResult result =
78 vn_call_vkEnumerateInstanceVersion(instance, &instance_version);
79 if (result != VK_SUCCESS) {
80 if (VN_DEBUG(INIT))
81 vn_log(instance, "failed to enumerate renderer instance version");
82 return result;
83 }
84
85 if (instance_version < VN_MIN_RENDERER_VERSION) {
86 if (VN_DEBUG(INIT)) {
87 vn_log(instance, "unsupported renderer instance version %d.%d",
88 VK_VERSION_MAJOR(instance_version),
89 VK_VERSION_MINOR(instance_version));
90 }
91 return VK_ERROR_INITIALIZATION_FAILED;
92 }
93
94 if (VN_DEBUG(INIT)) {
95 vn_log(instance, "renderer instance version %d.%d.%d",
96 VK_VERSION_MAJOR(instance_version),
97 VK_VERSION_MINOR(instance_version),
98 VK_VERSION_PATCH(instance_version));
99 }
100
101 /* request at least VN_MIN_RENDERER_VERSION internally */
102 instance->renderer_api_version =
103 MAX2(instance->base.base.app_info.api_version, VN_MIN_RENDERER_VERSION);
104
105 /* instance version for internal use is capped */
106 instance_version = MIN3(instance_version, instance->renderer_api_version,
107 instance->renderer_info.vk_xml_version);
108 assert(instance_version >= VN_MIN_RENDERER_VERSION);
109
110 instance->renderer_version = instance_version;
111
112 return VK_SUCCESS;
113 }
114
115 static VkResult
vn_instance_init_ring(struct vn_instance * instance)116 vn_instance_init_ring(struct vn_instance *instance)
117 {
118 const size_t buf_size = instance->experimental.largeRing
119 ? VN_INSTANCE_LARGE_RING_SIZE
120 : VN_INSTANCE_RING_SIZE;
121 /* 32-bit seqno for renderer roundtrips */
122 const size_t extra_size = sizeof(uint32_t);
123 struct vn_ring_layout layout;
124 vn_ring_get_layout(buf_size, extra_size, &layout);
125
126 instance->ring.shmem =
127 vn_renderer_shmem_create(instance->renderer, layout.shmem_size);
128 if (!instance->ring.shmem) {
129 if (VN_DEBUG(INIT))
130 vn_log(instance, "failed to allocate/map ring shmem");
131 return VK_ERROR_OUT_OF_HOST_MEMORY;
132 }
133
134 mtx_init(&instance->ring.mutex, mtx_plain);
135
136 struct vn_ring *ring = &instance->ring.ring;
137 vn_ring_init(ring, instance->renderer, &layout,
138 instance->ring.shmem->mmap_ptr);
139
140 instance->ring.id = (uintptr_t)ring;
141
142 const struct VkRingCreateInfoMESA info = {
143 .sType = VK_STRUCTURE_TYPE_RING_CREATE_INFO_MESA,
144 .resourceId = instance->ring.shmem->res_id,
145 .size = layout.shmem_size,
146 .idleTimeout = 50ull * 1000 * 1000,
147 .headOffset = layout.head_offset,
148 .tailOffset = layout.tail_offset,
149 .statusOffset = layout.status_offset,
150 .bufferOffset = layout.buffer_offset,
151 .bufferSize = layout.buffer_size,
152 .extraOffset = layout.extra_offset,
153 .extraSize = layout.extra_size,
154 };
155
156 uint32_t create_ring_data[64];
157 struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
158 create_ring_data, sizeof(create_ring_data));
159 vn_encode_vkCreateRingMESA(&local_enc, 0, instance->ring.id, &info);
160 vn_renderer_submit_simple(instance->renderer, create_ring_data,
161 vn_cs_encoder_get_len(&local_enc));
162
163 vn_cs_encoder_init_indirect(&instance->ring.upload, instance,
164 1 * 1024 * 1024);
165
166 mtx_init(&instance->ring.roundtrip_mutex, mtx_plain);
167 instance->ring.roundtrip_next = 1;
168
169 return VK_SUCCESS;
170 }
171
172 static struct vn_renderer_shmem *
173 vn_instance_get_reply_shmem_locked(struct vn_instance *instance,
174 size_t size,
175 void **ptr);
176
177 static VkResult
vn_instance_init_experimental_features(struct vn_instance * instance)178 vn_instance_init_experimental_features(struct vn_instance *instance)
179 {
180 if (instance->renderer_info.vk_mesa_venus_protocol_spec_version !=
181 100000) {
182 if (VN_DEBUG(INIT))
183 vn_log(instance, "renderer supports no experimental features");
184 return VK_SUCCESS;
185 }
186
187 size_t struct_size = sizeof(instance->experimental);
188
189 /* prepare the reply shmem */
190 const size_t reply_size =
191 vn_sizeof_vkGetVenusExperimentalFeatureData100000MESA_reply(
192 &struct_size, &instance->experimental);
193 void *reply_ptr;
194 struct vn_renderer_shmem *reply_shmem =
195 vn_instance_get_reply_shmem_locked(instance, reply_size, &reply_ptr);
196 if (!reply_shmem)
197 return VK_ERROR_OUT_OF_HOST_MEMORY;
198
199 /* encode the command */
200 uint32_t local_data[16];
201 struct vn_cs_encoder local_enc =
202 VN_CS_ENCODER_INITIALIZER_LOCAL(local_data, sizeof(local_data));
203 vn_encode_vkGetVenusExperimentalFeatureData100000MESA(
204 &local_enc, VK_COMMAND_GENERATE_REPLY_BIT_EXT, &struct_size,
205 &instance->experimental);
206
207 VkResult result = vn_renderer_submit_simple_sync(
208 instance->renderer, local_data, vn_cs_encoder_get_len(&local_enc));
209 if (result != VK_SUCCESS) {
210 vn_renderer_shmem_unref(instance->renderer, reply_shmem);
211 return result;
212 }
213
214 struct vn_cs_decoder reply_dec =
215 VN_CS_DECODER_INITIALIZER(reply_ptr, reply_size);
216 vn_decode_vkGetVenusExperimentalFeatureData100000MESA_reply(
217 &reply_dec, &struct_size, &instance->experimental);
218 vn_renderer_shmem_unref(instance->renderer, reply_shmem);
219
220 if (VN_DEBUG(INIT)) {
221 vn_log(instance,
222 "VkVenusExperimentalFeatures100000MESA is as below:"
223 "\n\tmemoryResourceAllocationSize = %u"
224 "\n\tglobalFencing = %u"
225 "\n\tlargeRing = %u",
226 instance->experimental.memoryResourceAllocationSize,
227 instance->experimental.globalFencing,
228 instance->experimental.largeRing);
229 }
230
231 return VK_SUCCESS;
232 }
233
234 static VkResult
vn_instance_init_renderer(struct vn_instance * instance)235 vn_instance_init_renderer(struct vn_instance *instance)
236 {
237 const VkAllocationCallbacks *alloc = &instance->base.base.alloc;
238
239 VkResult result = vn_renderer_create(instance, alloc, &instance->renderer);
240 if (result != VK_SUCCESS)
241 return result;
242
243 vn_renderer_get_info(instance->renderer, &instance->renderer_info);
244
245 uint32_t version = vn_info_wire_format_version();
246 if (instance->renderer_info.wire_format_version != version) {
247 if (VN_DEBUG(INIT)) {
248 vn_log(instance, "wire format version %d != %d",
249 instance->renderer_info.wire_format_version, version);
250 }
251 return VK_ERROR_INITIALIZATION_FAILED;
252 }
253
254 version = vn_info_vk_xml_version();
255 if (instance->renderer_info.vk_xml_version > version)
256 instance->renderer_info.vk_xml_version = version;
257 if (instance->renderer_info.vk_xml_version < VN_MIN_RENDERER_VERSION) {
258 if (VN_DEBUG(INIT)) {
259 vn_log(instance, "vk xml version %d.%d.%d < %d.%d.%d",
260 VK_VERSION_MAJOR(instance->renderer_info.vk_xml_version),
261 VK_VERSION_MINOR(instance->renderer_info.vk_xml_version),
262 VK_VERSION_PATCH(instance->renderer_info.vk_xml_version),
263 VK_VERSION_MAJOR(VN_MIN_RENDERER_VERSION),
264 VK_VERSION_MINOR(VN_MIN_RENDERER_VERSION),
265 VK_VERSION_PATCH(VN_MIN_RENDERER_VERSION));
266 }
267 return VK_ERROR_INITIALIZATION_FAILED;
268 }
269
270 version = vn_info_extension_spec_version("VK_EXT_command_serialization");
271 if (instance->renderer_info.vk_ext_command_serialization_spec_version >
272 version) {
273 instance->renderer_info.vk_ext_command_serialization_spec_version =
274 version;
275 }
276
277 version = vn_info_extension_spec_version("VK_MESA_venus_protocol");
278 if (instance->renderer_info.vk_mesa_venus_protocol_spec_version >
279 version) {
280 instance->renderer_info.vk_mesa_venus_protocol_spec_version = version;
281 }
282
283 if (VN_DEBUG(INIT)) {
284 vn_log(instance, "connected to renderer");
285 vn_log(instance, "wire format version %d",
286 instance->renderer_info.wire_format_version);
287 vn_log(instance, "vk xml version %d.%d.%d",
288 VK_VERSION_MAJOR(instance->renderer_info.vk_xml_version),
289 VK_VERSION_MINOR(instance->renderer_info.vk_xml_version),
290 VK_VERSION_PATCH(instance->renderer_info.vk_xml_version));
291 vn_log(
292 instance, "VK_EXT_command_serialization spec version %d",
293 instance->renderer_info.vk_ext_command_serialization_spec_version);
294 vn_log(instance, "VK_MESA_venus_protocol spec version %d",
295 instance->renderer_info.vk_mesa_venus_protocol_spec_version);
296 }
297
298 return VK_SUCCESS;
299 }
300
301 VkResult
vn_instance_submit_roundtrip(struct vn_instance * instance,uint32_t * roundtrip_seqno)302 vn_instance_submit_roundtrip(struct vn_instance *instance,
303 uint32_t *roundtrip_seqno)
304 {
305 uint32_t write_ring_extra_data[8];
306 struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
307 write_ring_extra_data, sizeof(write_ring_extra_data));
308
309 /* submit a vkWriteRingExtraMESA through the renderer */
310 mtx_lock(&instance->ring.roundtrip_mutex);
311 const uint32_t seqno = instance->ring.roundtrip_next++;
312 vn_encode_vkWriteRingExtraMESA(&local_enc, 0, instance->ring.id, 0, seqno);
313 VkResult result =
314 vn_renderer_submit_simple(instance->renderer, write_ring_extra_data,
315 vn_cs_encoder_get_len(&local_enc));
316 mtx_unlock(&instance->ring.roundtrip_mutex);
317
318 *roundtrip_seqno = seqno;
319 return result;
320 }
321
322 void
vn_instance_wait_roundtrip(struct vn_instance * instance,uint32_t roundtrip_seqno)323 vn_instance_wait_roundtrip(struct vn_instance *instance,
324 uint32_t roundtrip_seqno)
325 {
326 const struct vn_ring *ring = &instance->ring.ring;
327 const volatile atomic_uint *ptr = ring->shared.extra;
328 uint32_t iter = 0;
329 do {
330 const uint32_t cur = atomic_load_explicit(ptr, memory_order_acquire);
331 if (cur >= roundtrip_seqno || roundtrip_seqno - cur >= INT32_MAX)
332 break;
333 vn_relax(&iter, "roundtrip");
334 } while (true);
335 }
336
337 struct vn_instance_submission {
338 const struct vn_cs_encoder *cs;
339 struct vn_ring_submit *submit;
340
341 struct {
342 struct vn_cs_encoder cs;
343 struct vn_cs_encoder_buffer buffer;
344 uint32_t data[64];
345 } indirect;
346 };
347
348 static const struct vn_cs_encoder *
vn_instance_submission_get_cs(struct vn_instance_submission * submit,const struct vn_cs_encoder * cs,bool direct)349 vn_instance_submission_get_cs(struct vn_instance_submission *submit,
350 const struct vn_cs_encoder *cs,
351 bool direct)
352 {
353 if (direct)
354 return cs;
355
356 VkCommandStreamDescriptionMESA local_descs[8];
357 VkCommandStreamDescriptionMESA *descs = local_descs;
358 if (cs->buffer_count > ARRAY_SIZE(local_descs)) {
359 descs =
360 malloc(sizeof(VkCommandStreamDescriptionMESA) * cs->buffer_count);
361 if (!descs)
362 return NULL;
363 }
364
365 uint32_t desc_count = 0;
366 for (uint32_t i = 0; i < cs->buffer_count; i++) {
367 const struct vn_cs_encoder_buffer *buf = &cs->buffers[i];
368 if (buf->committed_size) {
369 descs[desc_count++] = (VkCommandStreamDescriptionMESA){
370 .resourceId = buf->shmem->res_id,
371 .offset = buf->offset,
372 .size = buf->committed_size,
373 };
374 }
375 }
376
377 const size_t exec_size = vn_sizeof_vkExecuteCommandStreamsMESA(
378 desc_count, descs, NULL, 0, NULL, 0);
379 void *exec_data = submit->indirect.data;
380 if (exec_size > sizeof(submit->indirect.data)) {
381 exec_data = malloc(exec_size);
382 if (!exec_data) {
383 if (descs != local_descs)
384 free(descs);
385 return NULL;
386 }
387 }
388
389 submit->indirect.buffer = VN_CS_ENCODER_BUFFER_INITIALIZER(exec_data);
390 submit->indirect.cs =
391 VN_CS_ENCODER_INITIALIZER(&submit->indirect.buffer, exec_size);
392 vn_encode_vkExecuteCommandStreamsMESA(&submit->indirect.cs, 0, desc_count,
393 descs, NULL, 0, NULL, 0);
394 vn_cs_encoder_commit(&submit->indirect.cs);
395
396 if (descs != local_descs)
397 free(descs);
398
399 return &submit->indirect.cs;
400 }
401
402 static struct vn_ring_submit *
vn_instance_submission_get_ring_submit(struct vn_ring * ring,const struct vn_cs_encoder * cs,struct vn_renderer_shmem * extra_shmem,bool direct)403 vn_instance_submission_get_ring_submit(struct vn_ring *ring,
404 const struct vn_cs_encoder *cs,
405 struct vn_renderer_shmem *extra_shmem,
406 bool direct)
407 {
408 const uint32_t shmem_count =
409 (direct ? 0 : cs->buffer_count) + (extra_shmem ? 1 : 0);
410 struct vn_ring_submit *submit = vn_ring_get_submit(ring, shmem_count);
411 if (!submit)
412 return NULL;
413
414 submit->shmem_count = shmem_count;
415 if (!direct) {
416 for (uint32_t i = 0; i < cs->buffer_count; i++) {
417 submit->shmems[i] =
418 vn_renderer_shmem_ref(ring->renderer, cs->buffers[i].shmem);
419 }
420 }
421 if (extra_shmem) {
422 submit->shmems[shmem_count - 1] =
423 vn_renderer_shmem_ref(ring->renderer, extra_shmem);
424 }
425
426 return submit;
427 }
428
429 static void
vn_instance_submission_cleanup(struct vn_instance_submission * submit)430 vn_instance_submission_cleanup(struct vn_instance_submission *submit)
431 {
432 if (submit->cs == &submit->indirect.cs &&
433 submit->indirect.buffer.base != submit->indirect.data)
434 free(submit->indirect.buffer.base);
435 }
436
437 static VkResult
vn_instance_submission_prepare(struct vn_instance_submission * submit,const struct vn_cs_encoder * cs,struct vn_ring * ring,struct vn_renderer_shmem * extra_shmem,bool direct)438 vn_instance_submission_prepare(struct vn_instance_submission *submit,
439 const struct vn_cs_encoder *cs,
440 struct vn_ring *ring,
441 struct vn_renderer_shmem *extra_shmem,
442 bool direct)
443 {
444 submit->cs = vn_instance_submission_get_cs(submit, cs, direct);
445 if (!submit->cs)
446 return VK_ERROR_OUT_OF_HOST_MEMORY;
447
448 submit->submit =
449 vn_instance_submission_get_ring_submit(ring, cs, extra_shmem, direct);
450 if (!submit->submit) {
451 vn_instance_submission_cleanup(submit);
452 return VK_ERROR_OUT_OF_HOST_MEMORY;
453 }
454
455 return VK_SUCCESS;
456 }
457
458 static bool
vn_instance_submission_can_direct(const struct vn_instance * instance,const struct vn_cs_encoder * cs)459 vn_instance_submission_can_direct(const struct vn_instance *instance,
460 const struct vn_cs_encoder *cs)
461 {
462 const size_t threshold = instance->experimental.largeRing
463 ? VN_INSTANCE_LARGE_RING_DIRECT_THRESHOLD
464 : VN_INSTANCE_RING_DIRECT_THRESHOLD;
465 return vn_cs_encoder_get_len(cs) <= threshold;
466 }
467
468 static struct vn_cs_encoder *
vn_instance_ring_cs_upload_locked(struct vn_instance * instance,const struct vn_cs_encoder * cs)469 vn_instance_ring_cs_upload_locked(struct vn_instance *instance,
470 const struct vn_cs_encoder *cs)
471 {
472 assert(!cs->indirect && cs->buffer_count == 1);
473 const void *cs_data = cs->buffers[0].base;
474 const size_t cs_size = cs->total_committed_size;
475 assert(cs_size == vn_cs_encoder_get_len(cs));
476
477 struct vn_cs_encoder *upload = &instance->ring.upload;
478 vn_cs_encoder_reset(upload);
479
480 if (!vn_cs_encoder_reserve(upload, cs_size))
481 return NULL;
482
483 vn_cs_encoder_write(upload, cs_size, cs_data, cs_size);
484 vn_cs_encoder_commit(upload);
485 vn_instance_wait_roundtrip(instance, upload->current_buffer_roundtrip);
486
487 return upload;
488 }
489
490 static VkResult
vn_instance_ring_submit_locked(struct vn_instance * instance,const struct vn_cs_encoder * cs,struct vn_renderer_shmem * extra_shmem,uint32_t * ring_seqno)491 vn_instance_ring_submit_locked(struct vn_instance *instance,
492 const struct vn_cs_encoder *cs,
493 struct vn_renderer_shmem *extra_shmem,
494 uint32_t *ring_seqno)
495 {
496 struct vn_ring *ring = &instance->ring.ring;
497
498 const bool direct = vn_instance_submission_can_direct(instance, cs);
499 if (!direct && !cs->indirect) {
500 cs = vn_instance_ring_cs_upload_locked(instance, cs);
501 if (!cs)
502 return VK_ERROR_OUT_OF_HOST_MEMORY;
503 assert(cs->indirect);
504 }
505
506 struct vn_instance_submission submit;
507 VkResult result =
508 vn_instance_submission_prepare(&submit, cs, ring, extra_shmem, direct);
509 if (result != VK_SUCCESS)
510 return result;
511
512 uint32_t seqno;
513 const bool notify = vn_ring_submit(ring, submit.submit, submit.cs, &seqno);
514 if (notify) {
515 uint32_t notify_ring_data[8];
516 struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
517 notify_ring_data, sizeof(notify_ring_data));
518 vn_encode_vkNotifyRingMESA(&local_enc, 0, instance->ring.id, seqno, 0);
519 vn_renderer_submit_simple(instance->renderer, notify_ring_data,
520 vn_cs_encoder_get_len(&local_enc));
521 }
522
523 vn_instance_submission_cleanup(&submit);
524
525 if (ring_seqno)
526 *ring_seqno = seqno;
527
528 return VK_SUCCESS;
529 }
530
531 VkResult
vn_instance_ring_submit(struct vn_instance * instance,const struct vn_cs_encoder * cs)532 vn_instance_ring_submit(struct vn_instance *instance,
533 const struct vn_cs_encoder *cs)
534 {
535 mtx_lock(&instance->ring.mutex);
536 VkResult result = vn_instance_ring_submit_locked(instance, cs, NULL, NULL);
537 mtx_unlock(&instance->ring.mutex);
538
539 return result;
540 }
541
542 static bool
vn_instance_grow_reply_shmem_locked(struct vn_instance * instance,size_t size)543 vn_instance_grow_reply_shmem_locked(struct vn_instance *instance, size_t size)
544 {
545 const size_t min_shmem_size = 1 << 20;
546
547 size_t shmem_size =
548 instance->reply.size ? instance->reply.size : min_shmem_size;
549 while (shmem_size < size) {
550 shmem_size <<= 1;
551 if (!shmem_size)
552 return false;
553 }
554
555 struct vn_renderer_shmem *shmem =
556 vn_renderer_shmem_create(instance->renderer, shmem_size);
557 if (!shmem)
558 return false;
559
560 if (instance->reply.shmem)
561 vn_renderer_shmem_unref(instance->renderer, instance->reply.shmem);
562 instance->reply.shmem = shmem;
563 instance->reply.size = shmem_size;
564 instance->reply.used = 0;
565 instance->reply.ptr = shmem->mmap_ptr;
566
567 return true;
568 }
569
570 static struct vn_renderer_shmem *
vn_instance_get_reply_shmem_locked(struct vn_instance * instance,size_t size,void ** ptr)571 vn_instance_get_reply_shmem_locked(struct vn_instance *instance,
572 size_t size,
573 void **ptr)
574 {
575 if (unlikely(instance->reply.used + size > instance->reply.size)) {
576 if (!vn_instance_grow_reply_shmem_locked(instance, size))
577 return NULL;
578
579 uint32_t set_reply_command_stream_data[16];
580 struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
581 set_reply_command_stream_data,
582 sizeof(set_reply_command_stream_data));
583 const struct VkCommandStreamDescriptionMESA stream = {
584 .resourceId = instance->reply.shmem->res_id,
585 .size = instance->reply.size,
586 };
587 vn_encode_vkSetReplyCommandStreamMESA(&local_enc, 0, &stream);
588 vn_cs_encoder_commit(&local_enc);
589
590 if (likely(instance->ring.id)) {
591 vn_instance_roundtrip(instance);
592 vn_instance_ring_submit_locked(instance, &local_enc, NULL, NULL);
593 } else {
594 vn_renderer_submit_simple(instance->renderer,
595 set_reply_command_stream_data,
596 vn_cs_encoder_get_len(&local_enc));
597 }
598 }
599
600 /* TODO avoid this seek command and go lock-free? */
601 uint32_t seek_reply_command_stream_data[8];
602 struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
603 seek_reply_command_stream_data, sizeof(seek_reply_command_stream_data));
604 const size_t offset = instance->reply.used;
605 vn_encode_vkSeekReplyCommandStreamMESA(&local_enc, 0, offset);
606 vn_cs_encoder_commit(&local_enc);
607
608 if (likely(instance->ring.id)) {
609 vn_instance_ring_submit_locked(instance, &local_enc, NULL, NULL);
610 } else {
611 vn_renderer_submit_simple(instance->renderer,
612 seek_reply_command_stream_data,
613 vn_cs_encoder_get_len(&local_enc));
614 }
615
616 *ptr = instance->reply.ptr + offset;
617 instance->reply.used += size;
618
619 return vn_renderer_shmem_ref(instance->renderer, instance->reply.shmem);
620 }
621
622 void
vn_instance_submit_command(struct vn_instance * instance,struct vn_instance_submit_command * submit)623 vn_instance_submit_command(struct vn_instance *instance,
624 struct vn_instance_submit_command *submit)
625 {
626 void *reply_ptr = NULL;
627 submit->reply_shmem = NULL;
628
629 mtx_lock(&instance->ring.mutex);
630
631 if (vn_cs_encoder_is_empty(&submit->command))
632 goto fail;
633 vn_cs_encoder_commit(&submit->command);
634
635 if (submit->reply_size) {
636 submit->reply_shmem = vn_instance_get_reply_shmem_locked(
637 instance, submit->reply_size, &reply_ptr);
638 if (!submit->reply_shmem)
639 goto fail;
640 }
641
642 uint32_t ring_seqno;
643 VkResult result = vn_instance_ring_submit_locked(
644 instance, &submit->command, submit->reply_shmem, &ring_seqno);
645
646 mtx_unlock(&instance->ring.mutex);
647
648 submit->reply = VN_CS_DECODER_INITIALIZER(reply_ptr, submit->reply_size);
649
650 if (submit->reply_size && result == VK_SUCCESS)
651 vn_ring_wait(&instance->ring.ring, ring_seqno);
652
653 return;
654
655 fail:
656 instance->ring.command_dropped++;
657 mtx_unlock(&instance->ring.mutex);
658 }
659
660 /* instance commands */
661
662 VkResult
vn_EnumerateInstanceVersion(uint32_t * pApiVersion)663 vn_EnumerateInstanceVersion(uint32_t *pApiVersion)
664 {
665 *pApiVersion = VN_MAX_API_VERSION;
666 return VK_SUCCESS;
667 }
668
669 VkResult
vn_EnumerateInstanceExtensionProperties(const char * pLayerName,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)670 vn_EnumerateInstanceExtensionProperties(const char *pLayerName,
671 uint32_t *pPropertyCount,
672 VkExtensionProperties *pProperties)
673 {
674 if (pLayerName)
675 return vn_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
676
677 return vk_enumerate_instance_extension_properties(
678 &vn_instance_supported_extensions, pPropertyCount, pProperties);
679 }
680
681 VkResult
vn_EnumerateInstanceLayerProperties(uint32_t * pPropertyCount,VkLayerProperties * pProperties)682 vn_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
683 VkLayerProperties *pProperties)
684 {
685 *pPropertyCount = 0;
686 return VK_SUCCESS;
687 }
688
689 VkResult
vn_CreateInstance(const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkInstance * pInstance)690 vn_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
691 const VkAllocationCallbacks *pAllocator,
692 VkInstance *pInstance)
693 {
694 const VkAllocationCallbacks *alloc =
695 pAllocator ? pAllocator : vk_default_allocator();
696 struct vn_instance *instance;
697 VkResult result;
698
699 vn_debug_init();
700 vn_trace_init();
701
702 instance = vk_zalloc(alloc, sizeof(*instance), VN_DEFAULT_ALIGN,
703 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
704 if (!instance)
705 return vn_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
706
707 struct vk_instance_dispatch_table dispatch_table;
708 vk_instance_dispatch_table_from_entrypoints(
709 &dispatch_table, &vn_instance_entrypoints, true);
710 vk_instance_dispatch_table_from_entrypoints(
711 &dispatch_table, &wsi_instance_entrypoints, false);
712 result = vn_instance_base_init(&instance->base,
713 &vn_instance_supported_extensions,
714 &dispatch_table, pCreateInfo, alloc);
715 if (result != VK_SUCCESS) {
716 vk_free(alloc, instance);
717 return vn_error(NULL, result);
718 }
719
720 mtx_init(&instance->physical_device.mutex, mtx_plain);
721
722 if (!vn_icd_supports_api_version(
723 instance->base.base.app_info.api_version)) {
724 result = VK_ERROR_INCOMPATIBLE_DRIVER;
725 goto fail;
726 }
727
728 if (pCreateInfo->enabledLayerCount) {
729 result = VK_ERROR_LAYER_NOT_PRESENT;
730 goto fail;
731 }
732
733 result = vn_instance_init_renderer(instance);
734 if (result != VK_SUCCESS)
735 goto fail;
736
737 result = vn_instance_init_experimental_features(instance);
738 if (result != VK_SUCCESS)
739 goto fail;
740
741 result = vn_instance_init_ring(instance);
742 if (result != VK_SUCCESS)
743 goto fail;
744
745 result = vn_instance_init_renderer_versions(instance);
746 if (result != VK_SUCCESS)
747 goto fail;
748
749 VkInstanceCreateInfo local_create_info = *pCreateInfo;
750 local_create_info.ppEnabledExtensionNames = NULL;
751 local_create_info.enabledExtensionCount = 0;
752 pCreateInfo = &local_create_info;
753
754 VkApplicationInfo local_app_info;
755 if (instance->base.base.app_info.api_version <
756 instance->renderer_api_version) {
757 if (pCreateInfo->pApplicationInfo) {
758 local_app_info = *pCreateInfo->pApplicationInfo;
759 local_app_info.apiVersion = instance->renderer_api_version;
760 } else {
761 local_app_info = (const VkApplicationInfo){
762 .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
763 .apiVersion = instance->renderer_api_version,
764 };
765 }
766 local_create_info.pApplicationInfo = &local_app_info;
767 }
768
769 VkInstance instance_handle = vn_instance_to_handle(instance);
770 result =
771 vn_call_vkCreateInstance(instance, pCreateInfo, NULL, &instance_handle);
772 if (result != VK_SUCCESS)
773 goto fail;
774
775 driParseOptionInfo(&instance->available_dri_options, vn_dri_options,
776 ARRAY_SIZE(vn_dri_options));
777 driParseConfigFiles(&instance->dri_options,
778 &instance->available_dri_options, 0, "venus", NULL, NULL,
779 instance->base.base.app_info.app_name,
780 instance->base.base.app_info.app_version,
781 instance->base.base.app_info.engine_name,
782 instance->base.base.app_info.engine_version);
783
784 *pInstance = instance_handle;
785
786 return VK_SUCCESS;
787
788 fail:
789 if (instance->reply.shmem)
790 vn_renderer_shmem_unref(instance->renderer, instance->reply.shmem);
791
792 if (instance->ring.shmem) {
793 uint32_t destroy_ring_data[4];
794 struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
795 destroy_ring_data, sizeof(destroy_ring_data));
796 vn_encode_vkDestroyRingMESA(&local_enc, 0, instance->ring.id);
797 vn_renderer_submit_simple(instance->renderer, destroy_ring_data,
798 vn_cs_encoder_get_len(&local_enc));
799
800 mtx_destroy(&instance->ring.roundtrip_mutex);
801 vn_cs_encoder_fini(&instance->ring.upload);
802 vn_renderer_shmem_unref(instance->renderer, instance->ring.shmem);
803 vn_ring_fini(&instance->ring.ring);
804 mtx_destroy(&instance->ring.mutex);
805 }
806
807 if (instance->renderer)
808 vn_renderer_destroy(instance->renderer, alloc);
809
810 mtx_destroy(&instance->physical_device.mutex);
811
812 vn_instance_base_fini(&instance->base);
813 vk_free(alloc, instance);
814
815 return vn_error(NULL, result);
816 }
817
818 void
vn_DestroyInstance(VkInstance _instance,const VkAllocationCallbacks * pAllocator)819 vn_DestroyInstance(VkInstance _instance,
820 const VkAllocationCallbacks *pAllocator)
821 {
822 struct vn_instance *instance = vn_instance_from_handle(_instance);
823 const VkAllocationCallbacks *alloc =
824 pAllocator ? pAllocator : &instance->base.base.alloc;
825
826 if (!instance)
827 return;
828
829 if (instance->physical_device.initialized) {
830 for (uint32_t i = 0; i < instance->physical_device.device_count; i++)
831 vn_physical_device_fini(&instance->physical_device.devices[i]);
832 vk_free(alloc, instance->physical_device.devices);
833 vk_free(alloc, instance->physical_device.groups);
834 }
835 mtx_destroy(&instance->physical_device.mutex);
836
837 vn_call_vkDestroyInstance(instance, _instance, NULL);
838
839 vn_renderer_shmem_unref(instance->renderer, instance->reply.shmem);
840
841 uint32_t destroy_ring_data[4];
842 struct vn_cs_encoder local_enc = VN_CS_ENCODER_INITIALIZER_LOCAL(
843 destroy_ring_data, sizeof(destroy_ring_data));
844 vn_encode_vkDestroyRingMESA(&local_enc, 0, instance->ring.id);
845 vn_renderer_submit_simple(instance->renderer, destroy_ring_data,
846 vn_cs_encoder_get_len(&local_enc));
847
848 mtx_destroy(&instance->ring.roundtrip_mutex);
849 vn_cs_encoder_fini(&instance->ring.upload);
850 vn_ring_fini(&instance->ring.ring);
851 mtx_destroy(&instance->ring.mutex);
852 vn_renderer_shmem_unref(instance->renderer, instance->ring.shmem);
853
854 vn_renderer_destroy(instance->renderer, alloc);
855
856 driDestroyOptionCache(&instance->dri_options);
857 driDestroyOptionInfo(&instance->available_dri_options);
858
859 vn_instance_base_fini(&instance->base);
860 vk_free(alloc, instance);
861 }
862
863 PFN_vkVoidFunction
vn_GetInstanceProcAddr(VkInstance _instance,const char * pName)864 vn_GetInstanceProcAddr(VkInstance _instance, const char *pName)
865 {
866 struct vn_instance *instance = vn_instance_from_handle(_instance);
867 return vk_instance_get_proc_addr(&instance->base.base,
868 &vn_instance_entrypoints, pName);
869 }
870