1 /*
2 * Copyright 2020 Google LLC
3 * SPDX-License-Identifier: MIT
4 */
5
6 #include "vkr_queue.h"
7
8 #include "venus-protocol/vn_protocol_renderer_queue.h"
9
10 #include "vkr_physical_device.h"
11 #include "vkr_queue_gen.h"
12
13 struct vkr_queue_sync *
vkr_device_alloc_queue_sync(struct vkr_device * dev,uint32_t fence_flags,uint64_t queue_id,void * fence_cookie)14 vkr_device_alloc_queue_sync(struct vkr_device *dev,
15 uint32_t fence_flags,
16 uint64_t queue_id,
17 void *fence_cookie)
18 {
19 struct vkr_queue_sync *sync;
20
21 if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB)
22 mtx_lock(&dev->free_sync_mutex);
23
24 if (LIST_IS_EMPTY(&dev->free_syncs)) {
25 if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB)
26 mtx_unlock(&dev->free_sync_mutex);
27
28 sync = malloc(sizeof(*sync));
29 if (!sync)
30 return NULL;
31
32 const VkExportFenceCreateInfo export_info = {
33 .sType = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO,
34 .handleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT,
35 };
36 const struct VkFenceCreateInfo create_info = {
37 .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
38 .pNext = dev->physical_device->KHR_external_fence_fd ? &export_info : NULL,
39 };
40 VkResult result =
41 vkCreateFence(dev->base.handle.device, &create_info, NULL, &sync->fence);
42 if (result != VK_SUCCESS) {
43 free(sync);
44 return NULL;
45 }
46 } else {
47 sync = LIST_ENTRY(struct vkr_queue_sync, dev->free_syncs.next, head);
48 list_del(&sync->head);
49
50 if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB)
51 mtx_unlock(&dev->free_sync_mutex);
52
53 vkResetFences(dev->base.handle.device, 1, &sync->fence);
54 }
55
56 sync->flags = fence_flags;
57 sync->queue_id = queue_id;
58 sync->fence_cookie = fence_cookie;
59
60 return sync;
61 }
62
63 void
vkr_device_free_queue_sync(struct vkr_device * dev,struct vkr_queue_sync * sync)64 vkr_device_free_queue_sync(struct vkr_device *dev, struct vkr_queue_sync *sync)
65 {
66 if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB) {
67 mtx_lock(&dev->free_sync_mutex);
68 list_addtail(&sync->head, &dev->free_syncs);
69 mtx_unlock(&dev->free_sync_mutex);
70 } else {
71 list_addtail(&sync->head, &dev->free_syncs);
72 }
73 }
74
75 void
vkr_queue_get_signaled_syncs(struct vkr_queue * queue,struct list_head * retired_syncs,bool * queue_empty)76 vkr_queue_get_signaled_syncs(struct vkr_queue *queue,
77 struct list_head *retired_syncs,
78 bool *queue_empty)
79 {
80 struct vkr_device *dev = queue->device;
81 struct vkr_queue_sync *sync, *tmp;
82
83 assert(!(vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB));
84
85 list_inithead(retired_syncs);
86
87 if (vkr_renderer_flags & VKR_RENDERER_THREAD_SYNC) {
88 mtx_lock(&queue->mutex);
89
90 LIST_FOR_EACH_ENTRY_SAFE (sync, tmp, &queue->signaled_syncs, head) {
91 if (sync->head.next == &queue->signaled_syncs ||
92 !(sync->flags & VIRGL_RENDERER_FENCE_FLAG_MERGEABLE))
93 list_addtail(&sync->head, retired_syncs);
94 else
95 vkr_device_free_queue_sync(dev, sync);
96 }
97 list_inithead(&queue->signaled_syncs);
98
99 *queue_empty = LIST_IS_EMPTY(&queue->pending_syncs);
100
101 mtx_unlock(&queue->mutex);
102 } else {
103 LIST_FOR_EACH_ENTRY_SAFE (sync, tmp, &queue->pending_syncs, head) {
104 VkResult result = vkGetFenceStatus(dev->base.handle.device, sync->fence);
105 if (result == VK_NOT_READY)
106 break;
107
108 bool is_last_sync = sync->head.next == &queue->pending_syncs;
109
110 list_del(&sync->head);
111 if (is_last_sync || !(sync->flags & VIRGL_RENDERER_FENCE_FLAG_MERGEABLE))
112 list_addtail(&sync->head, retired_syncs);
113 else
114 vkr_device_free_queue_sync(dev, sync);
115 }
116
117 *queue_empty = LIST_IS_EMPTY(&queue->pending_syncs);
118 }
119 }
120
121 static void
vkr_queue_sync_retire(struct vkr_context * ctx,struct vkr_device * dev,struct vkr_queue_sync * sync)122 vkr_queue_sync_retire(struct vkr_context *ctx,
123 struct vkr_device *dev,
124 struct vkr_queue_sync *sync)
125 {
126 if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB) {
127 ctx->base.fence_retire(&ctx->base, sync->queue_id, sync->fence_cookie);
128 vkr_device_free_queue_sync(dev, sync);
129 } else {
130 vkDestroyFence(dev->base.handle.device, sync->fence, NULL);
131 sync->fence = VK_NULL_HANDLE;
132
133 /* move to the ctx to be retired and freed at the next retire_fences */
134 list_addtail(&sync->head, &ctx->signaled_syncs);
135 }
136 }
137
138 static void
vkr_queue_retire_all_syncs(struct vkr_context * ctx,struct vkr_queue * queue)139 vkr_queue_retire_all_syncs(struct vkr_context *ctx, struct vkr_queue *queue)
140 {
141 struct vkr_queue_sync *sync, *tmp;
142
143 if (vkr_renderer_flags & VKR_RENDERER_THREAD_SYNC) {
144 mtx_lock(&queue->mutex);
145 queue->join = true;
146 mtx_unlock(&queue->mutex);
147
148 cnd_signal(&queue->cond);
149 thrd_join(queue->thread, NULL);
150
151 LIST_FOR_EACH_ENTRY_SAFE (sync, tmp, &queue->signaled_syncs, head)
152 vkr_queue_sync_retire(ctx, queue->device, sync);
153 } else {
154 assert(LIST_IS_EMPTY(&queue->signaled_syncs));
155 }
156
157 LIST_FOR_EACH_ENTRY_SAFE (sync, tmp, &queue->pending_syncs, head)
158 vkr_queue_sync_retire(ctx, queue->device, sync);
159 }
160
161 void
vkr_queue_destroy(struct vkr_context * ctx,struct vkr_queue * queue)162 vkr_queue_destroy(struct vkr_context *ctx, struct vkr_queue *queue)
163 {
164 /* vkDeviceWaitIdle has been called */
165 vkr_queue_retire_all_syncs(ctx, queue);
166
167 mtx_destroy(&queue->mutex);
168 cnd_destroy(&queue->cond);
169
170 list_del(&queue->busy_head);
171 list_del(&queue->base.track_head);
172
173 if (queue->base.id)
174 vkr_context_remove_object(ctx, &queue->base);
175 else
176 free(queue);
177 }
178
179 static int
vkr_queue_thread(void * arg)180 vkr_queue_thread(void *arg)
181 {
182 struct vkr_queue *queue = arg;
183 struct vkr_context *ctx = queue->context;
184 struct vkr_device *dev = queue->device;
185 const uint64_t ns_per_sec = 1000000000llu;
186 char thread_name[16];
187
188 snprintf(thread_name, ARRAY_SIZE(thread_name), "vkr-queue-%d", ctx->base.ctx_id);
189 pipe_thread_setname(thread_name);
190
191 mtx_lock(&queue->mutex);
192 while (true) {
193 while (LIST_IS_EMPTY(&queue->pending_syncs) && !queue->join)
194 cnd_wait(&queue->cond, &queue->mutex);
195
196 if (queue->join)
197 break;
198
199 struct vkr_queue_sync *sync =
200 LIST_ENTRY(struct vkr_queue_sync, queue->pending_syncs.next, head);
201
202 mtx_unlock(&queue->mutex);
203
204 VkResult result =
205 vkWaitForFences(dev->base.handle.device, 1, &sync->fence, false, ns_per_sec * 3);
206
207 mtx_lock(&queue->mutex);
208
209 if (result == VK_TIMEOUT)
210 continue;
211
212 list_del(&sync->head);
213
214 if (vkr_renderer_flags & VKR_RENDERER_ASYNC_FENCE_CB) {
215 ctx->base.fence_retire(&ctx->base, sync->queue_id, sync->fence_cookie);
216 vkr_device_free_queue_sync(queue->device, sync);
217 } else {
218 list_addtail(&sync->head, &queue->signaled_syncs);
219 write_eventfd(queue->eventfd, 1);
220 }
221 }
222 mtx_unlock(&queue->mutex);
223
224 return 0;
225 }
226
227 struct vkr_queue *
vkr_queue_create(struct vkr_context * ctx,struct vkr_device * dev,VkDeviceQueueCreateFlags flags,uint32_t family,uint32_t index,VkQueue handle)228 vkr_queue_create(struct vkr_context *ctx,
229 struct vkr_device *dev,
230 VkDeviceQueueCreateFlags flags,
231 uint32_t family,
232 uint32_t index,
233 VkQueue handle)
234 {
235 struct vkr_queue *queue;
236 int ret;
237
238 /* id is set to 0 until vkr_queue_assign_object_id */
239 queue = vkr_object_alloc(sizeof(*queue), VK_OBJECT_TYPE_QUEUE, 0);
240 if (!queue)
241 return NULL;
242
243 queue->base.handle.queue = handle;
244
245 queue->context = ctx;
246 queue->device = dev;
247 queue->flags = flags;
248 queue->family = family;
249 queue->index = index;
250
251 list_inithead(&queue->pending_syncs);
252 list_inithead(&queue->signaled_syncs);
253
254 ret = mtx_init(&queue->mutex, mtx_plain);
255 if (ret != thrd_success) {
256 free(queue);
257 return NULL;
258 }
259 ret = cnd_init(&queue->cond);
260 if (ret != thrd_success) {
261 mtx_destroy(&queue->mutex);
262 free(queue);
263 return NULL;
264 }
265
266 if (vkr_renderer_flags & VKR_RENDERER_THREAD_SYNC) {
267 ret = thrd_create(&queue->thread, vkr_queue_thread, queue);
268 if (ret != thrd_success) {
269 mtx_destroy(&queue->mutex);
270 cnd_destroy(&queue->cond);
271 free(queue);
272 return NULL;
273 }
274 queue->eventfd = ctx->fence_eventfd;
275 }
276
277 list_inithead(&queue->busy_head);
278 list_inithead(&queue->base.track_head);
279
280 return queue;
281 }
282
283 static void
vkr_queue_assign_object_id(struct vkr_context * ctx,struct vkr_queue * queue,vkr_object_id id)284 vkr_queue_assign_object_id(struct vkr_context *ctx,
285 struct vkr_queue *queue,
286 vkr_object_id id)
287 {
288 if (queue->base.id) {
289 if (queue->base.id != id)
290 vkr_cs_decoder_set_fatal(&ctx->decoder);
291 return;
292 }
293 if (!vkr_context_validate_object_id(ctx, id))
294 return;
295
296 queue->base.id = id;
297
298 vkr_context_add_object(ctx, &queue->base);
299 }
300
301 static struct vkr_queue *
vkr_device_lookup_queue(struct vkr_device * dev,VkDeviceQueueCreateFlags flags,uint32_t family,uint32_t index)302 vkr_device_lookup_queue(struct vkr_device *dev,
303 VkDeviceQueueCreateFlags flags,
304 uint32_t family,
305 uint32_t index)
306 {
307 struct vkr_queue *queue;
308
309 LIST_FOR_EACH_ENTRY (queue, &dev->queues, base.track_head) {
310 if (queue->flags == flags && queue->family == family && queue->index == index)
311 return queue;
312 }
313
314 return NULL;
315 }
316
317 static void
vkr_dispatch_vkGetDeviceQueue(struct vn_dispatch_context * dispatch,struct vn_command_vkGetDeviceQueue * args)318 vkr_dispatch_vkGetDeviceQueue(struct vn_dispatch_context *dispatch,
319 struct vn_command_vkGetDeviceQueue *args)
320 {
321 struct vkr_context *ctx = dispatch->data;
322
323 struct vkr_device *dev = vkr_device_from_handle(args->device);
324
325 struct vkr_queue *queue = vkr_device_lookup_queue(
326 dev, 0 /* flags */, args->queueFamilyIndex, args->queueIndex);
327 if (!queue) {
328 vkr_cs_decoder_set_fatal(&ctx->decoder);
329 return;
330 }
331
332 const vkr_object_id id =
333 vkr_cs_handle_load_id((const void **)args->pQueue, VK_OBJECT_TYPE_QUEUE);
334 vkr_queue_assign_object_id(ctx, queue, id);
335 }
336
337 static void
vkr_dispatch_vkGetDeviceQueue2(struct vn_dispatch_context * dispatch,struct vn_command_vkGetDeviceQueue2 * args)338 vkr_dispatch_vkGetDeviceQueue2(struct vn_dispatch_context *dispatch,
339 struct vn_command_vkGetDeviceQueue2 *args)
340 {
341 struct vkr_context *ctx = dispatch->data;
342
343 struct vkr_device *dev = vkr_device_from_handle(args->device);
344
345 struct vkr_queue *queue = vkr_device_lookup_queue(dev, args->pQueueInfo->flags,
346 args->pQueueInfo->queueFamilyIndex,
347 args->pQueueInfo->queueIndex);
348 if (!queue) {
349 vkr_cs_decoder_set_fatal(&ctx->decoder);
350 return;
351 }
352
353 const vkr_object_id id =
354 vkr_cs_handle_load_id((const void **)args->pQueue, VK_OBJECT_TYPE_QUEUE);
355 vkr_queue_assign_object_id(ctx, queue, id);
356 }
357
358 static void
vkr_dispatch_vkQueueSubmit(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkQueueSubmit * args)359 vkr_dispatch_vkQueueSubmit(UNUSED struct vn_dispatch_context *dispatch,
360 struct vn_command_vkQueueSubmit *args)
361 {
362 vn_replace_vkQueueSubmit_args_handle(args);
363 args->ret = vkQueueSubmit(args->queue, args->submitCount, args->pSubmits, args->fence);
364 }
365
366 static void
vkr_dispatch_vkQueueBindSparse(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkQueueBindSparse * args)367 vkr_dispatch_vkQueueBindSparse(UNUSED struct vn_dispatch_context *dispatch,
368 struct vn_command_vkQueueBindSparse *args)
369 {
370 vn_replace_vkQueueBindSparse_args_handle(args);
371 args->ret =
372 vkQueueBindSparse(args->queue, args->bindInfoCount, args->pBindInfo, args->fence);
373 }
374
375 static void
vkr_dispatch_vkQueueWaitIdle(struct vn_dispatch_context * dispatch,UNUSED struct vn_command_vkQueueWaitIdle * args)376 vkr_dispatch_vkQueueWaitIdle(struct vn_dispatch_context *dispatch,
377 UNUSED struct vn_command_vkQueueWaitIdle *args)
378 {
379 struct vkr_context *ctx = dispatch->data;
380 /* no blocking call */
381 vkr_cs_decoder_set_fatal(&ctx->decoder);
382 }
383
384 static void
vkr_dispatch_vkCreateFence(struct vn_dispatch_context * dispatch,struct vn_command_vkCreateFence * args)385 vkr_dispatch_vkCreateFence(struct vn_dispatch_context *dispatch,
386 struct vn_command_vkCreateFence *args)
387 {
388 vkr_fence_create_and_add(dispatch->data, args);
389 }
390
391 static void
vkr_dispatch_vkDestroyFence(struct vn_dispatch_context * dispatch,struct vn_command_vkDestroyFence * args)392 vkr_dispatch_vkDestroyFence(struct vn_dispatch_context *dispatch,
393 struct vn_command_vkDestroyFence *args)
394 {
395 vkr_fence_destroy_and_remove(dispatch->data, args);
396 }
397
398 static void
vkr_dispatch_vkResetFences(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkResetFences * args)399 vkr_dispatch_vkResetFences(UNUSED struct vn_dispatch_context *dispatch,
400 struct vn_command_vkResetFences *args)
401 {
402 vn_replace_vkResetFences_args_handle(args);
403 args->ret = vkResetFences(args->device, args->fenceCount, args->pFences);
404 }
405
406 static void
vkr_dispatch_vkGetFenceStatus(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkGetFenceStatus * args)407 vkr_dispatch_vkGetFenceStatus(UNUSED struct vn_dispatch_context *dispatch,
408 struct vn_command_vkGetFenceStatus *args)
409 {
410 vn_replace_vkGetFenceStatus_args_handle(args);
411 args->ret = vkGetFenceStatus(args->device, args->fence);
412 }
413
414 static void
vkr_dispatch_vkWaitForFences(struct vn_dispatch_context * dispatch,struct vn_command_vkWaitForFences * args)415 vkr_dispatch_vkWaitForFences(struct vn_dispatch_context *dispatch,
416 struct vn_command_vkWaitForFences *args)
417 {
418 struct vkr_context *ctx = dispatch->data;
419
420 /* Being single-threaded, we cannot afford potential blocking calls. It
421 * also leads to GPU lost when the wait never returns and can only be
422 * unblocked by a following command (e.g., vkCmdWaitEvents that is
423 * unblocked by a following vkSetEvent).
424 */
425 if (args->timeout) {
426 vkr_cs_decoder_set_fatal(&ctx->decoder);
427 return;
428 }
429
430 vn_replace_vkWaitForFences_args_handle(args);
431 args->ret = vkWaitForFences(args->device, args->fenceCount, args->pFences,
432 args->waitAll, args->timeout);
433 }
434
435 static void
vkr_dispatch_vkCreateSemaphore(struct vn_dispatch_context * dispatch,struct vn_command_vkCreateSemaphore * args)436 vkr_dispatch_vkCreateSemaphore(struct vn_dispatch_context *dispatch,
437 struct vn_command_vkCreateSemaphore *args)
438 {
439 vkr_semaphore_create_and_add(dispatch->data, args);
440 }
441
442 static void
vkr_dispatch_vkDestroySemaphore(struct vn_dispatch_context * dispatch,struct vn_command_vkDestroySemaphore * args)443 vkr_dispatch_vkDestroySemaphore(struct vn_dispatch_context *dispatch,
444 struct vn_command_vkDestroySemaphore *args)
445 {
446 vkr_semaphore_destroy_and_remove(dispatch->data, args);
447 }
448
449 static void
vkr_dispatch_vkGetSemaphoreCounterValue(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkGetSemaphoreCounterValue * args)450 vkr_dispatch_vkGetSemaphoreCounterValue(UNUSED struct vn_dispatch_context *dispatch,
451 struct vn_command_vkGetSemaphoreCounterValue *args)
452 {
453 struct vkr_device *dev = vkr_device_from_handle(args->device);
454
455 vn_replace_vkGetSemaphoreCounterValue_args_handle(args);
456 args->ret = dev->GetSemaphoreCounterValue(args->device, args->semaphore, args->pValue);
457 }
458
459 static void
vkr_dispatch_vkWaitSemaphores(struct vn_dispatch_context * dispatch,struct vn_command_vkWaitSemaphores * args)460 vkr_dispatch_vkWaitSemaphores(struct vn_dispatch_context *dispatch,
461 struct vn_command_vkWaitSemaphores *args)
462 {
463 struct vkr_context *ctx = dispatch->data;
464 struct vkr_device *dev = vkr_device_from_handle(args->device);
465
466 /* no blocking call */
467 if (args->timeout) {
468 vkr_cs_decoder_set_fatal(&ctx->decoder);
469 return;
470 }
471
472 vn_replace_vkWaitSemaphores_args_handle(args);
473 args->ret = dev->WaitSemaphores(args->device, args->pWaitInfo, args->timeout);
474 }
475
476 static void
vkr_dispatch_vkSignalSemaphore(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkSignalSemaphore * args)477 vkr_dispatch_vkSignalSemaphore(UNUSED struct vn_dispatch_context *dispatch,
478 struct vn_command_vkSignalSemaphore *args)
479 {
480 struct vkr_device *dev = vkr_device_from_handle(args->device);
481
482 vn_replace_vkSignalSemaphore_args_handle(args);
483 args->ret = dev->SignalSemaphore(args->device, args->pSignalInfo);
484 }
485
486 static void
vkr_dispatch_vkCreateEvent(struct vn_dispatch_context * dispatch,struct vn_command_vkCreateEvent * args)487 vkr_dispatch_vkCreateEvent(struct vn_dispatch_context *dispatch,
488 struct vn_command_vkCreateEvent *args)
489 {
490 vkr_event_create_and_add(dispatch->data, args);
491 }
492
493 static void
vkr_dispatch_vkDestroyEvent(struct vn_dispatch_context * dispatch,struct vn_command_vkDestroyEvent * args)494 vkr_dispatch_vkDestroyEvent(struct vn_dispatch_context *dispatch,
495 struct vn_command_vkDestroyEvent *args)
496 {
497 vkr_event_destroy_and_remove(dispatch->data, args);
498 }
499
500 static void
vkr_dispatch_vkGetEventStatus(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkGetEventStatus * args)501 vkr_dispatch_vkGetEventStatus(UNUSED struct vn_dispatch_context *dispatch,
502 struct vn_command_vkGetEventStatus *args)
503 {
504 vn_replace_vkGetEventStatus_args_handle(args);
505 args->ret = vkGetEventStatus(args->device, args->event);
506 }
507
508 static void
vkr_dispatch_vkSetEvent(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkSetEvent * args)509 vkr_dispatch_vkSetEvent(UNUSED struct vn_dispatch_context *dispatch,
510 struct vn_command_vkSetEvent *args)
511 {
512 vn_replace_vkSetEvent_args_handle(args);
513 args->ret = vkSetEvent(args->device, args->event);
514 }
515
516 static void
vkr_dispatch_vkResetEvent(UNUSED struct vn_dispatch_context * dispatch,struct vn_command_vkResetEvent * args)517 vkr_dispatch_vkResetEvent(UNUSED struct vn_dispatch_context *dispatch,
518 struct vn_command_vkResetEvent *args)
519 {
520 vn_replace_vkResetEvent_args_handle(args);
521 args->ret = vkResetEvent(args->device, args->event);
522 }
523
524 void
vkr_context_init_queue_dispatch(struct vkr_context * ctx)525 vkr_context_init_queue_dispatch(struct vkr_context *ctx)
526 {
527 struct vn_dispatch_context *dispatch = &ctx->dispatch;
528
529 dispatch->dispatch_vkGetDeviceQueue = vkr_dispatch_vkGetDeviceQueue;
530 dispatch->dispatch_vkGetDeviceQueue2 = vkr_dispatch_vkGetDeviceQueue2;
531 dispatch->dispatch_vkQueueSubmit = vkr_dispatch_vkQueueSubmit;
532 dispatch->dispatch_vkQueueBindSparse = vkr_dispatch_vkQueueBindSparse;
533 dispatch->dispatch_vkQueueWaitIdle = vkr_dispatch_vkQueueWaitIdle;
534 }
535
536 void
vkr_context_init_fence_dispatch(struct vkr_context * ctx)537 vkr_context_init_fence_dispatch(struct vkr_context *ctx)
538 {
539 struct vn_dispatch_context *dispatch = &ctx->dispatch;
540
541 dispatch->dispatch_vkCreateFence = vkr_dispatch_vkCreateFence;
542 dispatch->dispatch_vkDestroyFence = vkr_dispatch_vkDestroyFence;
543 dispatch->dispatch_vkResetFences = vkr_dispatch_vkResetFences;
544 dispatch->dispatch_vkGetFenceStatus = vkr_dispatch_vkGetFenceStatus;
545 dispatch->dispatch_vkWaitForFences = vkr_dispatch_vkWaitForFences;
546 }
547
548 void
vkr_context_init_semaphore_dispatch(struct vkr_context * ctx)549 vkr_context_init_semaphore_dispatch(struct vkr_context *ctx)
550 {
551 struct vn_dispatch_context *dispatch = &ctx->dispatch;
552
553 dispatch->dispatch_vkCreateSemaphore = vkr_dispatch_vkCreateSemaphore;
554 dispatch->dispatch_vkDestroySemaphore = vkr_dispatch_vkDestroySemaphore;
555 dispatch->dispatch_vkGetSemaphoreCounterValue =
556 vkr_dispatch_vkGetSemaphoreCounterValue;
557 dispatch->dispatch_vkWaitSemaphores = vkr_dispatch_vkWaitSemaphores;
558 dispatch->dispatch_vkSignalSemaphore = vkr_dispatch_vkSignalSemaphore;
559 }
560
561 void
vkr_context_init_event_dispatch(struct vkr_context * ctx)562 vkr_context_init_event_dispatch(struct vkr_context *ctx)
563 {
564 struct vn_dispatch_context *dispatch = &ctx->dispatch;
565
566 dispatch->dispatch_vkCreateEvent = vkr_dispatch_vkCreateEvent;
567 dispatch->dispatch_vkDestroyEvent = vkr_dispatch_vkDestroyEvent;
568 dispatch->dispatch_vkGetEventStatus = vkr_dispatch_vkGetEventStatus;
569 dispatch->dispatch_vkSetEvent = vkr_dispatch_vkSetEvent;
570 dispatch->dispatch_vkResetEvent = vkr_dispatch_vkResetEvent;
571 }
572