1 /**************************************************************************
2 *
3 * Copyright (C) 2015 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included
13 * in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 **************************************************************************/
24
25 #ifdef HAVE_CONFIG_H
26 #include "config.h"
27 #endif
28
29 #include <stdlib.h>
30 #include <stdio.h>
31 #include <string.h>
32 #include <time.h>
33 #include <unistd.h>
34 #include <fcntl.h>
35 #include <limits.h>
36
37 #include "virgl_hw.h"
38 #include "virglrenderer.h"
39
40 #include <sys/uio.h>
41 #include <sys/socket.h>
42 #include <sys/mman.h>
43 #ifdef HAVE_EVENTFD_H
44 #include <sys/eventfd.h>
45 #endif
46
47 #include "vtest.h"
48 #include "vtest_shm.h"
49 #include "vtest_protocol.h"
50
51 #include "util.h"
52 #include "util/u_debug.h"
53 #include "util/u_double_list.h"
54 #include "util/u_math.h"
55 #include "util/u_memory.h"
56 #include "util/u_hash_table.h"
57
58 #define VTEST_MAX_SYNC_QUEUE_COUNT 64
59
60 struct vtest_resource {
61 struct list_head head;
62
63 uint32_t server_res_id;
64 uint32_t res_id;
65
66 struct iovec iov;
67 };
68
69 struct vtest_sync {
70 struct list_head head;
71
72 int sync_id;
73 int refcount;
74
75 uint64_t value;
76 };
77
78 struct vtest_sync_queue {
79 struct list_head submits;
80 };
81
82 struct vtest_sync_queue_submit {
83 struct list_head head;
84
85 struct vtest_sync_queue *sync_queue;
86
87 uint32_t count;
88 struct vtest_sync **syncs;
89 uint64_t *values;
90 };
91
92 struct vtest_sync_wait {
93 struct list_head head;
94
95 int fd;
96
97 uint32_t flags;
98 uint64_t valid_before;
99
100 uint32_t count;
101 struct vtest_sync **syncs;
102 uint64_t *values;
103
104 uint32_t signaled_count;
105 };
106
107 struct vtest_context {
108 struct list_head head;
109
110 int ctx_id;
111
112 struct vtest_input *input;
113 int out_fd;
114
115 char *debug_name;
116
117 unsigned protocol_version;
118 unsigned capset_id;
119 bool context_initialized;
120
121 struct util_hash_table *resource_table;
122 struct util_hash_table *sync_table;
123
124 struct vtest_sync_queue sync_queues[VTEST_MAX_SYNC_QUEUE_COUNT];
125
126 struct list_head sync_waits;
127 };
128
129 struct vtest_renderer {
130 const char *rendernode_name;
131 bool multi_clients;
132 uint32_t ctx_flags;
133
134 uint32_t max_length;
135
136 int implicit_fence_submitted;
137 int implicit_fence_completed;
138
139 struct list_head active_contexts;
140 struct list_head free_contexts;
141 int next_context_id;
142
143 struct list_head free_resources;
144 int next_resource_id;
145
146 struct list_head free_syncs;
147 int next_sync_id;
148
149 struct vtest_context *current_context;
150 };
151
152 /*
153 * VCMD_RESOURCE_BUSY_WAIT is used to wait GPU works (VCMD_SUBMIT_CMD) or CPU
154 * works (VCMD_TRANSFER_GET2). A fence is needed only for GPU works.
155 */
vtest_create_implicit_fence(struct vtest_renderer * renderer)156 static void vtest_create_implicit_fence(struct vtest_renderer *renderer)
157 {
158 virgl_renderer_create_fence(++renderer->implicit_fence_submitted, 0);
159 }
160
vtest_write_implicit_fence(UNUSED void * cookie,uint32_t fence_id_in)161 static void vtest_write_implicit_fence(UNUSED void *cookie, uint32_t fence_id_in)
162 {
163 struct vtest_renderer *renderer = (struct vtest_renderer*)cookie;
164 renderer->implicit_fence_completed = fence_id_in;
165 }
166
167 static void vtest_signal_sync_queue(struct vtest_sync_queue *queue,
168 struct vtest_sync_queue_submit *to_submit);
169
vtest_write_context_fence(UNUSED void * cookie,UNUSED uint32_t ctx_id,UNUSED uint64_t queue_id,void * fence_cookie)170 static void vtest_write_context_fence(UNUSED void *cookie,
171 UNUSED uint32_t ctx_id,
172 UNUSED uint64_t queue_id,
173 void *fence_cookie)
174 {
175 struct vtest_sync_queue_submit *submit = fence_cookie;
176 vtest_signal_sync_queue(submit->sync_queue, submit);
177 }
178
vtest_get_drm_fd(void * cookie)179 static int vtest_get_drm_fd(void *cookie)
180 {
181 int fd = -1;
182 struct vtest_renderer *renderer = (struct vtest_renderer*)cookie;
183 if (!renderer->rendernode_name)
184 return -1;
185 fd = open(renderer->rendernode_name, O_RDWR | O_CLOEXEC | O_NOCTTY | O_NONBLOCK);
186 if (fd == -1)
187 fprintf(stderr, "Unable to open rendernode '%s' falling back to default search\n",
188 renderer->rendernode_name);
189 return fd;
190 }
191
192 static struct virgl_renderer_callbacks renderer_cbs = {
193 .version = VIRGL_RENDERER_CALLBACKS_VERSION,
194 .write_fence = vtest_write_implicit_fence,
195 .get_drm_fd = vtest_get_drm_fd,
196 .write_context_fence = vtest_write_context_fence,
197 };
198
199
200 static struct vtest_renderer renderer = {
201 .max_length = UINT_MAX,
202 .next_context_id = 1,
203 .next_resource_id = 1,
204 .next_sync_id = 1,
205 };
206
vtest_new_resource(uint32_t client_res_id)207 static struct vtest_resource *vtest_new_resource(uint32_t client_res_id)
208 {
209 struct vtest_resource *res;
210
211 if (LIST_IS_EMPTY(&renderer.free_resources)) {
212 res = malloc(sizeof(*res));
213 if (!res) {
214 return NULL;
215 }
216
217 res->server_res_id = renderer.next_resource_id++;
218 } else {
219 res = LIST_ENTRY(struct vtest_resource, renderer.free_resources.next, head);
220 list_del(&res->head);
221 }
222
223 res->res_id = client_res_id ? client_res_id : res->server_res_id;
224 res->iov.iov_base = NULL;
225 res->iov.iov_len = 0;
226
227 return res;
228 }
229
vtest_unref_resource(struct vtest_resource * res)230 static void vtest_unref_resource(struct vtest_resource *res)
231 {
232 /* virgl_renderer_ctx_detach_resource and virgl_renderer_resource_detach_iov
233 * are implied
234 */
235 virgl_renderer_resource_unref(res->res_id);
236
237 if (res->iov.iov_base)
238 munmap(res->iov.iov_base, res->iov.iov_len);
239
240 list_add(&res->head, &renderer.free_resources);
241 }
242
vtest_new_sync(uint64_t value)243 static struct vtest_sync *vtest_new_sync(uint64_t value)
244 {
245 struct vtest_sync *sync;
246
247 if (LIST_IS_EMPTY(&renderer.free_syncs)) {
248 sync = malloc(sizeof(*sync));
249 if (!sync) {
250 return NULL;
251 }
252
253 sync->sync_id = renderer.next_sync_id++;
254 } else {
255 sync = LIST_ENTRY(struct vtest_sync, renderer.free_syncs.next, head);
256 list_del(&sync->head);
257 }
258
259 sync->refcount = 1;
260 sync->value = value;
261
262 return sync;
263 }
264
vtest_ref_sync(struct vtest_sync * sync)265 static struct vtest_sync *vtest_ref_sync(struct vtest_sync *sync)
266 {
267 sync->refcount++;
268 return sync;
269 }
270
vtest_unref_sync(struct vtest_sync * sync)271 static void vtest_unref_sync(struct vtest_sync *sync)
272 {
273 assert(sync->refcount);
274 sync->refcount--;
275 if (sync->refcount)
276 return;
277
278 list_add(&sync->head, &renderer.free_syncs);
279 }
280
vtest_free_sync_queue_submit(struct vtest_sync_queue_submit * submit)281 static void vtest_free_sync_queue_submit(struct vtest_sync_queue_submit *submit)
282 {
283 uint32_t i;
284 for (i = 0; i < submit->count; i++)
285 vtest_unref_sync(submit->syncs[i]);
286 free(submit);
287 }
288
vtest_free_sync_wait(struct vtest_sync_wait * wait)289 static void vtest_free_sync_wait(struct vtest_sync_wait *wait)
290 {
291 uint32_t i;
292
293 for (i = 0; i < wait->count; i++) {
294 if (wait->syncs[i])
295 vtest_unref_sync(wait->syncs[i]);
296 }
297 close(wait->fd);
298 free(wait);
299 }
300
301 static unsigned
u32_hash_func(void * key)302 u32_hash_func(void *key)
303 {
304 intptr_t ip = pointer_to_intptr(key);
305 return (unsigned)(ip & 0xffffffff);
306 }
307
308 static int
u32_compare_func(void * key1,void * key2)309 u32_compare_func(void *key1, void *key2)
310 {
311 if (key1 < key2) {
312 return -1;
313 } else if (key1 > key2) {
314 return 1;
315 } else {
316 return 0;
317 }
318 }
319
320 static void
resource_destroy_func(void * value)321 resource_destroy_func(void *value)
322 {
323 struct vtest_resource *res = value;
324 vtest_unref_resource(res);
325 }
326
327 static void
sync_destroy_func(void * value)328 sync_destroy_func(void *value)
329 {
330 struct vtest_sync *sync = value;
331 vtest_unref_sync(sync);
332 }
333
vtest_block_write(int fd,void * buf,int size)334 static int vtest_block_write(int fd, void *buf, int size)
335 {
336 char *ptr = buf;
337 int left;
338 int ret;
339 left = size;
340
341 do {
342 ret = write(fd, ptr, left);
343 if (ret < 0) {
344 return -errno;
345 }
346
347 left -= ret;
348 ptr += ret;
349 } while (left);
350
351 return size;
352 }
353
vtest_block_read(struct vtest_input * input,void * buf,int size)354 int vtest_block_read(struct vtest_input *input, void *buf, int size)
355 {
356 int fd = input->data.fd;
357 char *ptr = buf;
358 int left;
359 int ret;
360 static int savefd = -1;
361
362 left = size;
363 do {
364 ret = read(fd, ptr, left);
365 if (ret <= 0) {
366 return ret == -1 ? -errno : 0;
367 }
368
369 left -= ret;
370 ptr += ret;
371 } while (left);
372
373 if (getenv("VTEST_SAVE")) {
374 if (savefd == -1) {
375 savefd = open(getenv("VTEST_SAVE"),
376 O_CLOEXEC|O_CREAT|O_WRONLY|O_TRUNC|O_DSYNC, S_IRUSR|S_IWUSR);
377 if (savefd == -1) {
378 perror("error opening save file");
379 exit(1);
380 }
381 }
382 if (write(savefd, buf, size) != size) {
383 perror("failed to save");
384 exit(1);
385 }
386 }
387
388 return size;
389 }
390
vtest_send_fd(int socket_fd,int fd)391 static int vtest_send_fd(int socket_fd, int fd)
392 {
393 struct iovec iovec;
394 char buf[CMSG_SPACE(sizeof(int))];
395 char c = 0;
396 struct msghdr msgh = { 0 };
397 memset(buf, 0, sizeof(buf));
398
399 iovec.iov_base = &c;
400 iovec.iov_len = sizeof(char);
401
402 msgh.msg_name = NULL;
403 msgh.msg_namelen = 0;
404 msgh.msg_iov = &iovec;
405 msgh.msg_iovlen = 1;
406 msgh.msg_control = buf;
407 msgh.msg_controllen = sizeof(buf);
408 msgh.msg_flags = 0;
409
410 struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msgh);
411 cmsg->cmsg_level = SOL_SOCKET;
412 cmsg->cmsg_type = SCM_RIGHTS;
413 cmsg->cmsg_len = CMSG_LEN(sizeof(int));
414
415 *((int *) CMSG_DATA(cmsg)) = fd;
416
417 int size = sendmsg(socket_fd, &msgh, 0);
418 if (size < 0) {
419 return report_failure("Failed to send fd", -EINVAL);
420 }
421
422 return 0;
423 }
424
vtest_buf_read(struct vtest_input * input,void * buf,int size)425 int vtest_buf_read(struct vtest_input *input, void *buf, int size)
426 {
427 struct vtest_buffer *inbuf = input->data.buffer;
428 if (size > inbuf->size) {
429 return 0;
430 }
431
432 memcpy(buf, inbuf->buffer, size);
433 inbuf->buffer += size;
434 inbuf->size -= size;
435
436 return size;
437 }
438
vtest_init_renderer(bool multi_clients,int ctx_flags,const char * render_device)439 int vtest_init_renderer(bool multi_clients,
440 int ctx_flags,
441 const char *render_device)
442 {
443 int ret;
444
445 renderer.rendernode_name = render_device;
446 list_inithead(&renderer.active_contexts);
447 list_inithead(&renderer.free_contexts);
448 list_inithead(&renderer.free_resources);
449 list_inithead(&renderer.free_syncs);
450
451 ctx_flags |= VIRGL_RENDERER_THREAD_SYNC |
452 VIRGL_RENDERER_USE_EXTERNAL_BLOB;
453 ret = virgl_renderer_init(&renderer, ctx_flags, &renderer_cbs);
454 if (ret) {
455 fprintf(stderr, "failed to initialise renderer.\n");
456 return -1;
457 }
458
459 renderer.multi_clients = multi_clients;
460 renderer.ctx_flags = ctx_flags;
461
462 return 0;
463 }
464
465 static void vtest_free_context(struct vtest_context *ctx, bool cleanup);
466
vtest_cleanup_renderer(void)467 void vtest_cleanup_renderer(void)
468 {
469 if (renderer.next_context_id > 1) {
470 struct vtest_context *ctx, *tmp;
471
472 LIST_FOR_EACH_ENTRY_SAFE(ctx, tmp, &renderer.active_contexts, head) {
473 vtest_destroy_context(ctx);
474 }
475 LIST_FOR_EACH_ENTRY_SAFE(ctx, tmp, &renderer.free_contexts, head) {
476 vtest_free_context(ctx, true);
477 }
478 list_inithead(&renderer.active_contexts);
479 list_inithead(&renderer.free_contexts);
480
481 renderer.next_context_id = 1;
482 renderer.current_context = NULL;
483 }
484
485 if (renderer.next_resource_id > 1) {
486 struct vtest_resource *res, *tmp;
487
488 LIST_FOR_EACH_ENTRY_SAFE(res, tmp, &renderer.free_resources, head) {
489 free(res);
490 }
491 list_inithead(&renderer.free_resources);
492
493 renderer.next_resource_id = 1;
494 }
495
496 if (renderer.next_sync_id > 1) {
497 struct vtest_sync *sync, *tmp;
498
499 LIST_FOR_EACH_ENTRY_SAFE(sync, tmp, &renderer.free_syncs, head) {
500 assert(!sync->refcount);
501 free(sync);
502 }
503 list_inithead(&renderer.free_syncs);
504
505 renderer.next_sync_id = 1;
506 }
507
508 virgl_renderer_cleanup(&renderer);
509 }
510
vtest_new_context(struct vtest_input * input,int out_fd)511 static struct vtest_context *vtest_new_context(struct vtest_input *input,
512 int out_fd)
513 {
514 struct vtest_context *ctx;
515
516 if (LIST_IS_EMPTY(&renderer.free_contexts)) {
517 uint32_t i;
518
519 ctx = malloc(sizeof(*ctx));
520 if (!ctx) {
521 return NULL;
522 }
523
524 ctx->resource_table = util_hash_table_create(u32_hash_func,
525 u32_compare_func,
526 resource_destroy_func);
527 if (!ctx->resource_table) {
528 free(ctx);
529 return NULL;
530 }
531
532 ctx->sync_table = util_hash_table_create(u32_hash_func,
533 u32_compare_func,
534 sync_destroy_func);
535 if (!ctx->sync_table) {
536 util_hash_table_destroy(ctx->resource_table);
537 free(ctx);
538 return NULL;
539 }
540
541 for (i = 0; i < VTEST_MAX_SYNC_QUEUE_COUNT; i++) {
542 struct vtest_sync_queue *queue = &ctx->sync_queues[i];
543 list_inithead(&queue->submits);
544 }
545
546 list_inithead(&ctx->sync_waits);
547
548 ctx->ctx_id = renderer.next_context_id++;
549 } else {
550 ctx = LIST_ENTRY(struct vtest_context, renderer.free_contexts.next, head);
551 list_del(&ctx->head);
552 }
553
554 ctx->input = input;
555 ctx->out_fd = out_fd;
556
557 ctx->debug_name = NULL;
558 /* By default we support version 0 unless VCMD_PROTOCOL_VERSION is sent */
559 ctx->protocol_version = 0;
560 ctx->capset_id = 0;
561 ctx->context_initialized = false;
562
563 return ctx;
564 }
565
vtest_free_context(struct vtest_context * ctx,bool cleanup)566 static void vtest_free_context(struct vtest_context *ctx, bool cleanup)
567 {
568 if (cleanup) {
569 util_hash_table_destroy(ctx->resource_table);
570 util_hash_table_destroy(ctx->sync_table);
571 free(ctx);
572 } else {
573 list_add(&ctx->head, &renderer.free_contexts);
574 }
575 }
576
vtest_create_context(struct vtest_input * input,int out_fd,uint32_t length,struct vtest_context ** out_ctx)577 int vtest_create_context(struct vtest_input *input, int out_fd,
578 uint32_t length, struct vtest_context **out_ctx)
579 {
580 struct vtest_context *ctx;
581 char *vtestname;
582 int ret;
583
584 if (length > 1024 * 1024) {
585 return -1;
586 }
587
588 ctx = vtest_new_context(input, out_fd);
589 if (!ctx) {
590 return -1;
591 }
592
593 vtestname = calloc(1, length + 1);
594 if (!vtestname) {
595 ret = -1;
596 goto err;
597 }
598
599 ret = ctx->input->read(ctx->input, vtestname, length);
600 if (ret != (int)length) {
601 ret = -1;
602 goto err;
603 }
604
605 ctx->debug_name = vtestname;
606
607 list_addtail(&ctx->head, &renderer.active_contexts);
608 *out_ctx = ctx;
609
610 return 0;
611
612 err:
613 free(vtestname);
614 vtest_free_context(ctx, false);
615 return ret;
616 }
617
vtest_lazy_init_context(struct vtest_context * ctx)618 int vtest_lazy_init_context(struct vtest_context *ctx)
619 {
620 int ret;
621
622 if (ctx->context_initialized)
623 return 0;
624
625 if (renderer.multi_clients && ctx->protocol_version < 3)
626 return report_failed_call("protocol version too low", -EINVAL);
627
628 if (ctx->capset_id) {
629 ret = virgl_renderer_context_create_with_flags(ctx->ctx_id,
630 ctx->capset_id,
631 strlen(ctx->debug_name),
632 ctx->debug_name);
633 } else {
634 ret = virgl_renderer_context_create(ctx->ctx_id,
635 strlen(ctx->debug_name),
636 ctx->debug_name);
637 }
638 ctx->context_initialized = (ret == 0);
639
640 return ret;
641 }
642
vtest_destroy_context(struct vtest_context * ctx)643 void vtest_destroy_context(struct vtest_context *ctx)
644 {
645 struct vtest_sync_wait *wait, *wait_tmp;
646 uint32_t i;
647
648 if (renderer.current_context == ctx) {
649 renderer.current_context = NULL;
650 }
651 list_del(&ctx->head);
652
653 for (i = 0; i < VTEST_MAX_SYNC_QUEUE_COUNT; i++) {
654 struct vtest_sync_queue *queue = &ctx->sync_queues[i];
655 struct vtest_sync_queue_submit *submit, *submit_tmp;
656
657 LIST_FOR_EACH_ENTRY_SAFE(submit, submit_tmp, &queue->submits, head)
658 vtest_free_sync_queue_submit(submit);
659 list_inithead(&queue->submits);
660 }
661
662 LIST_FOR_EACH_ENTRY_SAFE(wait, wait_tmp, &ctx->sync_waits, head) {
663 list_del(&wait->head);
664 vtest_free_sync_wait(wait);
665 }
666 list_inithead(&ctx->sync_waits);
667
668 free(ctx->debug_name);
669 if (ctx->context_initialized)
670 virgl_renderer_context_destroy(ctx->ctx_id);
671 util_hash_table_clear(ctx->resource_table);
672 util_hash_table_clear(ctx->sync_table);
673 vtest_free_context(ctx, false);
674 }
675
vtest_poll_context(struct vtest_context * ctx)676 void vtest_poll_context(struct vtest_context *ctx)
677 {
678 virgl_renderer_context_poll(ctx->ctx_id);
679 }
680
vtest_get_context_poll_fd(struct vtest_context * ctx)681 int vtest_get_context_poll_fd(struct vtest_context *ctx)
682 {
683 return virgl_renderer_context_get_poll_fd(ctx->ctx_id);
684 }
685
vtest_set_current_context(struct vtest_context * ctx)686 void vtest_set_current_context(struct vtest_context *ctx)
687 {
688 renderer.current_context = ctx;
689 }
690
vtest_get_current_context(void)691 static struct vtest_context *vtest_get_current_context(void)
692 {
693 return renderer.current_context;
694 }
695
vtest_ping_protocol_version(UNUSED uint32_t length_dw)696 int vtest_ping_protocol_version(UNUSED uint32_t length_dw)
697 {
698 struct vtest_context *ctx = vtest_get_current_context();
699 uint32_t hdr_buf[VTEST_HDR_SIZE];
700 int ret;
701
702 hdr_buf[VTEST_CMD_LEN] = VCMD_PING_PROTOCOL_VERSION_SIZE;
703 hdr_buf[VTEST_CMD_ID] = VCMD_PING_PROTOCOL_VERSION;
704 ret = vtest_block_write(ctx->out_fd, hdr_buf, sizeof(hdr_buf));
705 if (ret < 0) {
706 return ret;
707 }
708
709 return 0;
710 }
711
vtest_protocol_version(UNUSED uint32_t length_dw)712 int vtest_protocol_version(UNUSED uint32_t length_dw)
713 {
714 struct vtest_context *ctx = vtest_get_current_context();
715 uint32_t hdr_buf[VTEST_HDR_SIZE];
716 uint32_t version_buf[VCMD_PROTOCOL_VERSION_SIZE];
717 unsigned version;
718 int ret;
719
720 ret = ctx->input->read(ctx->input, &version_buf, sizeof(version_buf));
721 if (ret != sizeof(version_buf))
722 return -1;
723
724 version = MIN2(version_buf[VCMD_PROTOCOL_VERSION_VERSION],
725 VTEST_PROTOCOL_VERSION);
726
727 /*
728 * We've deprecated protocol version 1. All of it's called sites are being
729 * moved protocol version 2. If the server supports version 2 and the guest
730 * supports verison 1, fall back to version 0.
731 */
732 if (version == 1) {
733 printf("Older guest Mesa detected, fallbacking to protocol version 0\n");
734 version = 0;
735 }
736
737 /* Protocol version 2 requires shm support. */
738 if (!vtest_shm_check()) {
739 printf("Shared memory not supported, fallbacking to protocol version 0\n");
740 version = 0;
741 }
742
743 if (renderer.multi_clients && version < 3)
744 return report_failed_call("protocol version too low", -EINVAL);
745
746 ctx->protocol_version = version;
747
748 hdr_buf[VTEST_CMD_LEN] = VCMD_PROTOCOL_VERSION_SIZE;
749 hdr_buf[VTEST_CMD_ID] = VCMD_PROTOCOL_VERSION;
750
751 version_buf[VCMD_PROTOCOL_VERSION_VERSION] = ctx->protocol_version;
752
753 ret = vtest_block_write(ctx->out_fd, hdr_buf, sizeof(hdr_buf));
754 if (ret < 0) {
755 return ret;
756 }
757
758 ret = vtest_block_write(ctx->out_fd, version_buf, sizeof(version_buf));
759 if (ret < 0) {
760 return ret;
761 }
762
763 return 0;
764 }
765
vtest_get_param(UNUSED uint32_t length_dw)766 int vtest_get_param(UNUSED uint32_t length_dw)
767 {
768 struct vtest_context *ctx = vtest_get_current_context();
769 uint32_t get_param_buf[VCMD_GET_PARAM_SIZE];
770 uint32_t resp_buf[VTEST_HDR_SIZE + 2];
771 uint32_t param;
772 uint32_t *resp;
773 int ret;
774
775 ret = ctx->input->read(ctx->input, get_param_buf, sizeof(get_param_buf));
776 if (ret != sizeof(get_param_buf))
777 return -1;
778
779 param = get_param_buf[VCMD_GET_PARAM_PARAM];
780
781 resp_buf[VTEST_CMD_LEN] = 2;
782 resp_buf[VTEST_CMD_ID] = VCMD_GET_PARAM;
783 resp = &resp_buf[VTEST_CMD_DATA_START];
784 switch (param) {
785 case VCMD_PARAM_MAX_SYNC_QUEUE_COUNT:
786 resp[0] = true;
787 /* TODO until we have a timerfd */
788 #ifdef HAVE_EVENTFD_H
789 if (!getenv("VIRGL_DISABLE_MT"))
790 resp[1] = VTEST_MAX_SYNC_QUEUE_COUNT;
791 else
792 resp[1] = 0;
793 #else
794 resp[1] = 0;
795 #endif
796 break;
797 default:
798 resp[0] = false;
799 resp[1] = 0;
800 break;
801 }
802
803 ret = vtest_block_write(ctx->out_fd, resp_buf, sizeof(resp_buf));
804 if (ret < 0)
805 return -1;
806
807 return 0;
808 }
809
vtest_get_capset(UNUSED uint32_t length_dw)810 int vtest_get_capset(UNUSED uint32_t length_dw)
811 {
812 struct vtest_context *ctx = vtest_get_current_context();
813 uint32_t get_capset_buf[VCMD_GET_CAPSET_SIZE];
814 uint32_t resp_buf[VTEST_HDR_SIZE + 1];
815 uint32_t id;
816 uint32_t version;
817 uint32_t max_version;
818 uint32_t max_size;
819 void *caps;
820 int ret;
821
822 ret = ctx->input->read(ctx->input, get_capset_buf, sizeof(get_capset_buf));
823 if (ret != sizeof(get_capset_buf))
824 return -1;
825
826 id = get_capset_buf[VCMD_GET_CAPSET_ID];
827 version = get_capset_buf[VCMD_GET_CAPSET_VERSION];
828
829 virgl_renderer_get_cap_set(id, &max_version, &max_size);
830
831 /* unsupported id or version */
832 if ((!max_version && !max_size) || version > max_version) {
833 resp_buf[VTEST_CMD_LEN] = 1;
834 resp_buf[VTEST_CMD_ID] = VCMD_GET_CAPSET;
835 resp_buf[VTEST_CMD_DATA_START] = false;
836 return vtest_block_write(ctx->out_fd, resp_buf, sizeof(resp_buf));
837 }
838
839 if (max_size % 4)
840 return -EINVAL;
841
842 caps = malloc(max_size);
843 if (!caps)
844 return -ENOMEM;
845
846 virgl_renderer_fill_caps(id, version, caps);
847
848 resp_buf[VTEST_CMD_LEN] = 1 + max_size / 4;
849 resp_buf[VTEST_CMD_ID] = VCMD_GET_CAPSET;
850 resp_buf[VTEST_CMD_DATA_START] = true;
851 ret = vtest_block_write(ctx->out_fd, resp_buf, sizeof(resp_buf));
852 if (ret >= 0)
853 ret = vtest_block_write(ctx->out_fd, caps, max_size);
854
855 free(caps);
856 return ret >= 0 ? 0 : ret;
857 }
858
vtest_context_init(UNUSED uint32_t length_dw)859 int vtest_context_init(UNUSED uint32_t length_dw)
860 {
861 struct vtest_context *ctx = vtest_get_current_context();
862 uint32_t context_init_buf[VCMD_CONTEXT_INIT_SIZE];
863 uint32_t capset_id;
864 int ret;
865
866 ret = ctx->input->read(ctx->input, context_init_buf, sizeof(context_init_buf));
867 if (ret != sizeof(context_init_buf))
868 return -1;
869
870 capset_id = context_init_buf[VCMD_CONTEXT_INIT_CAPSET_ID];
871 if (!capset_id)
872 return -EINVAL;
873
874 if (ctx->context_initialized) {
875 return ctx->capset_id == capset_id ? 0 : -EINVAL;
876 }
877
878 ctx->capset_id = capset_id;
879
880 return vtest_lazy_init_context(ctx);
881 }
882
vtest_send_caps2(UNUSED uint32_t length_dw)883 int vtest_send_caps2(UNUSED uint32_t length_dw)
884 {
885 struct vtest_context *ctx = vtest_get_current_context();
886 uint32_t hdr_buf[2];
887 void *caps_buf;
888 int ret;
889 uint32_t max_ver, max_size;
890
891 virgl_renderer_get_cap_set(2, &max_ver, &max_size);
892
893 if (max_size == 0) {
894 return -1;
895 }
896
897 caps_buf = malloc(max_size);
898 if (!caps_buf) {
899 return -1;
900 }
901
902 virgl_renderer_fill_caps(2, 1, caps_buf);
903
904 hdr_buf[0] = max_size + 1;
905 hdr_buf[1] = 2;
906 ret = vtest_block_write(ctx->out_fd, hdr_buf, 8);
907 if (ret < 0) {
908 goto end;
909 }
910
911 vtest_block_write(ctx->out_fd, caps_buf, max_size);
912 if (ret < 0) {
913 goto end;
914 }
915
916 end:
917 free(caps_buf);
918 return 0;
919 }
920
vtest_send_caps(UNUSED uint32_t length_dw)921 int vtest_send_caps(UNUSED uint32_t length_dw)
922 {
923 struct vtest_context *ctx = vtest_get_current_context();
924 uint32_t max_ver, max_size;
925 void *caps_buf;
926 uint32_t hdr_buf[2];
927 int ret;
928
929 virgl_renderer_get_cap_set(1, &max_ver, &max_size);
930
931 caps_buf = malloc(max_size);
932 if (!caps_buf) {
933 return -1;
934 }
935
936 virgl_renderer_fill_caps(1, 1, caps_buf);
937
938 hdr_buf[0] = max_size + 1;
939 hdr_buf[1] = 1;
940 ret = vtest_block_write(ctx->out_fd, hdr_buf, 8);
941 if (ret < 0) {
942 goto end;
943 }
944
945 vtest_block_write(ctx->out_fd, caps_buf, max_size);
946 if (ret < 0) {
947 goto end;
948 }
949
950 end:
951 free(caps_buf);
952 return 0;
953 }
954
vtest_create_resource_decode_args(struct vtest_context * ctx,struct virgl_renderer_resource_create_args * args)955 static int vtest_create_resource_decode_args(struct vtest_context *ctx,
956 struct virgl_renderer_resource_create_args *args)
957 {
958 uint32_t res_create_buf[VCMD_RES_CREATE_SIZE];
959 int ret;
960
961 ret = ctx->input->read(ctx->input, &res_create_buf,
962 sizeof(res_create_buf));
963 if (ret != sizeof(res_create_buf)) {
964 return -1;
965 }
966
967 args->handle = res_create_buf[VCMD_RES_CREATE_RES_HANDLE];
968 args->target = res_create_buf[VCMD_RES_CREATE_TARGET];
969 args->format = res_create_buf[VCMD_RES_CREATE_FORMAT];
970 args->bind = res_create_buf[VCMD_RES_CREATE_BIND];
971
972 args->width = res_create_buf[VCMD_RES_CREATE_WIDTH];
973 args->height = res_create_buf[VCMD_RES_CREATE_HEIGHT];
974 args->depth = res_create_buf[VCMD_RES_CREATE_DEPTH];
975 args->array_size = res_create_buf[VCMD_RES_CREATE_ARRAY_SIZE];
976 args->last_level = res_create_buf[VCMD_RES_CREATE_LAST_LEVEL];
977 args->nr_samples = res_create_buf[VCMD_RES_CREATE_NR_SAMPLES];
978 args->flags = 0;
979
980 return 0;
981 }
982
vtest_create_resource_decode_args2(struct vtest_context * ctx,struct virgl_renderer_resource_create_args * args,size_t * shm_size)983 static int vtest_create_resource_decode_args2(struct vtest_context *ctx,
984 struct virgl_renderer_resource_create_args *args,
985 size_t *shm_size)
986 {
987 uint32_t res_create_buf[VCMD_RES_CREATE2_SIZE];
988 int ret;
989
990 ret = ctx->input->read(ctx->input, &res_create_buf,
991 sizeof(res_create_buf));
992 if (ret != sizeof(res_create_buf)) {
993 return -1;
994 }
995
996 args->handle = res_create_buf[VCMD_RES_CREATE2_RES_HANDLE];
997 args->target = res_create_buf[VCMD_RES_CREATE2_TARGET];
998 args->format = res_create_buf[VCMD_RES_CREATE2_FORMAT];
999 args->bind = res_create_buf[VCMD_RES_CREATE2_BIND];
1000
1001 args->width = res_create_buf[VCMD_RES_CREATE2_WIDTH];
1002 args->height = res_create_buf[VCMD_RES_CREATE2_HEIGHT];
1003 args->depth = res_create_buf[VCMD_RES_CREATE2_DEPTH];
1004 args->array_size = res_create_buf[VCMD_RES_CREATE2_ARRAY_SIZE];
1005 args->last_level = res_create_buf[VCMD_RES_CREATE2_LAST_LEVEL];
1006 args->nr_samples = res_create_buf[VCMD_RES_CREATE2_NR_SAMPLES];
1007 args->flags = 0;
1008
1009 *shm_size = res_create_buf[VCMD_RES_CREATE2_DATA_SIZE];
1010
1011 return 0;
1012 }
1013
vtest_create_resource_setup_shm(struct vtest_resource * res,size_t size)1014 static int vtest_create_resource_setup_shm(struct vtest_resource *res,
1015 size_t size)
1016 {
1017 int fd;
1018 void *ptr;
1019
1020 fd = vtest_new_shm(res->res_id, size);
1021 if (fd < 0)
1022 return report_failed_call("vtest_new_shm", fd);
1023
1024 ptr = mmap(NULL, size, PROT_WRITE | PROT_READ, MAP_SHARED, fd, 0);
1025 if (ptr == MAP_FAILED) {
1026 close(fd);
1027 return -1;
1028 }
1029
1030 res->iov.iov_base = ptr;
1031 res->iov.iov_len = size;
1032
1033 return fd;
1034 }
1035
vtest_create_resource_internal(struct vtest_context * ctx,uint32_t cmd_id,struct virgl_renderer_resource_create_args * args,size_t shm_size)1036 static int vtest_create_resource_internal(struct vtest_context *ctx,
1037 uint32_t cmd_id,
1038 struct virgl_renderer_resource_create_args *args,
1039 size_t shm_size)
1040 {
1041 struct vtest_resource *res;
1042 int ret;
1043
1044 if (ctx->protocol_version >= 3) {
1045 if (args->handle)
1046 return -EINVAL;
1047 } else {
1048 // Check that the handle doesn't already exist.
1049 if (util_hash_table_get(ctx->resource_table, intptr_to_pointer(args->handle))) {
1050 return -EEXIST;
1051 }
1052 }
1053
1054 res = vtest_new_resource(args->handle);
1055 if (!res)
1056 return -ENOMEM;
1057 args->handle = res->res_id;
1058
1059 ret = virgl_renderer_resource_create(args, NULL, 0);
1060 if (ret) {
1061 vtest_unref_resource(res);
1062 return report_failed_call("virgl_renderer_resource_create", ret);
1063 }
1064
1065 virgl_renderer_ctx_attach_resource(ctx->ctx_id, res->res_id);
1066
1067 if (ctx->protocol_version >= 3) {
1068 uint32_t resp_buf[VTEST_HDR_SIZE + 1] = {
1069 [VTEST_CMD_LEN] = 1,
1070 [VTEST_CMD_ID] = cmd_id,
1071 [VTEST_CMD_DATA_START] = res->res_id,
1072 };
1073 ret = vtest_block_write(ctx->out_fd, resp_buf, sizeof(resp_buf));
1074 if (ret < 0) {
1075 vtest_unref_resource(res);
1076 return ret;
1077 }
1078 }
1079
1080 /* no shm for v1 resources or v2 multi-sample resources */
1081 if (shm_size) {
1082 int fd;
1083
1084 fd = vtest_create_resource_setup_shm(res, shm_size);
1085 if (fd < 0) {
1086 vtest_unref_resource(res);
1087 return -ENOMEM;
1088 }
1089
1090 ret = vtest_send_fd(ctx->out_fd, fd);
1091 if (ret < 0) {
1092 close(fd);
1093 vtest_unref_resource(res);
1094 return report_failed_call("vtest_send_fd", ret);
1095 }
1096
1097 /* Closing the file descriptor does not unmap the region. */
1098 close(fd);
1099
1100 virgl_renderer_resource_attach_iov(res->res_id, &res->iov, 1);
1101 }
1102
1103 util_hash_table_set(ctx->resource_table, intptr_to_pointer(res->res_id), res);
1104
1105 return 0;
1106 }
1107
vtest_create_resource(UNUSED uint32_t length_dw)1108 int vtest_create_resource(UNUSED uint32_t length_dw)
1109 {
1110 struct vtest_context *ctx = vtest_get_current_context();
1111 struct virgl_renderer_resource_create_args args;
1112 int ret;
1113
1114 ret = vtest_create_resource_decode_args(ctx, &args);
1115 if (ret < 0) {
1116 return ret;
1117 }
1118
1119 return vtest_create_resource_internal(ctx, VCMD_RESOURCE_CREATE, &args, 0);
1120 }
1121
vtest_create_resource2(UNUSED uint32_t length_dw)1122 int vtest_create_resource2(UNUSED uint32_t length_dw)
1123 {
1124 struct vtest_context *ctx = vtest_get_current_context();
1125 struct virgl_renderer_resource_create_args args;
1126 size_t shm_size;
1127 int ret;
1128
1129 ret = vtest_create_resource_decode_args2(ctx, &args, &shm_size);
1130 if (ret < 0) {
1131 return ret;
1132 }
1133
1134 return vtest_create_resource_internal(ctx, VCMD_RESOURCE_CREATE2, &args, shm_size);
1135 }
1136
vtest_resource_create_blob(UNUSED uint32_t length_dw)1137 int vtest_resource_create_blob(UNUSED uint32_t length_dw)
1138 {
1139 struct vtest_context *ctx = vtest_get_current_context();
1140 uint32_t res_create_blob_buf[VCMD_RES_CREATE_BLOB_SIZE];
1141 uint32_t resp_buf[VTEST_HDR_SIZE + 1];
1142 struct virgl_renderer_resource_create_blob_args args;
1143 struct vtest_resource *res;
1144 int fd;
1145 int ret;
1146
1147 ret = ctx->input->read(ctx->input, res_create_blob_buf,
1148 sizeof(res_create_blob_buf));
1149 if (ret != sizeof(res_create_blob_buf))
1150 return -1;
1151
1152 memset(&args, 0, sizeof(args));
1153 args.blob_mem = res_create_blob_buf[VCMD_RES_CREATE_BLOB_TYPE];
1154 args.blob_flags = res_create_blob_buf[VCMD_RES_CREATE_BLOB_FLAGS];
1155 args.size = res_create_blob_buf[VCMD_RES_CREATE_BLOB_SIZE_LO];
1156 args.size |= (uint64_t)res_create_blob_buf[VCMD_RES_CREATE_BLOB_SIZE_HI] << 32;
1157 args.blob_id = res_create_blob_buf[VCMD_RES_CREATE_BLOB_ID_LO];
1158 args.blob_id |= (uint64_t)res_create_blob_buf[VCMD_RES_CREATE_BLOB_ID_HI] << 32;
1159
1160 res = vtest_new_resource(0);
1161 if (!res)
1162 return -ENOMEM;
1163
1164 args.res_handle = res->res_id;
1165 args.ctx_id = ctx->ctx_id;
1166
1167 switch (args.blob_mem) {
1168 case VIRGL_RENDERER_BLOB_MEM_GUEST:
1169 case VIRGL_RENDERER_BLOB_MEM_HOST3D_GUEST:
1170 fd = vtest_create_resource_setup_shm(res, args.size);
1171 if (fd < 0) {
1172 vtest_unref_resource(res);
1173 return -ENOMEM;
1174 }
1175
1176 args.iovecs = &res->iov;
1177 args.num_iovs = 1;
1178 break;
1179 case VIRGL_RENDERER_BLOB_MEM_HOST3D:
1180 fd = -1;
1181 break;
1182 default:
1183 return -EINVAL;
1184 }
1185
1186 ret = virgl_renderer_resource_create_blob(&args);
1187 if (ret) {
1188 if (fd >= 0)
1189 close(fd);
1190 vtest_unref_resource(res);
1191 return report_failed_call("virgl_renderer_resource_create_blob", ret);
1192 }
1193
1194 /* need dmabuf */
1195 if (args.blob_mem == VIRGL_RENDERER_BLOB_MEM_HOST3D) {
1196 uint32_t fd_type;
1197 ret = virgl_renderer_resource_export_blob(res->res_id, &fd_type, &fd);
1198 if (ret) {
1199 vtest_unref_resource(res);
1200 return report_failed_call("virgl_renderer_resource_export_blob", ret);
1201 }
1202 if (fd_type != VIRGL_RENDERER_BLOB_FD_TYPE_DMABUF) {
1203 close(fd);
1204 vtest_unref_resource(res);
1205 return report_failed_call("virgl_renderer_resource_export_blob", -EINVAL);
1206 }
1207 }
1208
1209 virgl_renderer_ctx_attach_resource(ctx->ctx_id, res->res_id);
1210
1211 resp_buf[VTEST_CMD_LEN] = 1;
1212 resp_buf[VTEST_CMD_ID] = VCMD_RESOURCE_CREATE_BLOB;
1213 resp_buf[VTEST_CMD_DATA_START] = res->res_id;
1214 ret = vtest_block_write(ctx->out_fd, resp_buf, sizeof(resp_buf));
1215 if (ret < 0) {
1216 close(fd);
1217 vtest_unref_resource(res);
1218 return ret;
1219 }
1220
1221 ret = vtest_send_fd(ctx->out_fd, fd);
1222 if (ret < 0) {
1223 close(fd);
1224 vtest_unref_resource(res);
1225 return report_failed_call("vtest_send_fd", ret);
1226 }
1227
1228 /* Closing the file descriptor does not unmap the region. */
1229 close(fd);
1230
1231 util_hash_table_set(ctx->resource_table, intptr_to_pointer(res->res_id), res);
1232
1233 return 0;
1234 }
1235
vtest_resource_unref(UNUSED uint32_t length_dw)1236 int vtest_resource_unref(UNUSED uint32_t length_dw)
1237 {
1238 struct vtest_context *ctx = vtest_get_current_context();
1239 uint32_t res_unref_buf[VCMD_RES_UNREF_SIZE];
1240 int ret;
1241 uint32_t handle;
1242
1243 ret = ctx->input->read(ctx->input, &res_unref_buf,
1244 sizeof(res_unref_buf));
1245 if (ret != sizeof(res_unref_buf)) {
1246 return -1;
1247 }
1248
1249 handle = res_unref_buf[VCMD_RES_UNREF_RES_HANDLE];
1250 util_hash_table_remove(ctx->resource_table, intptr_to_pointer(handle));
1251
1252 return 0;
1253 }
1254
vtest_submit_cmd(uint32_t length_dw)1255 int vtest_submit_cmd(uint32_t length_dw)
1256 {
1257 struct vtest_context *ctx = vtest_get_current_context();
1258 uint32_t *cbuf;
1259 int ret;
1260
1261 if (length_dw > renderer.max_length / 4) {
1262 return -1;
1263 }
1264
1265 cbuf = malloc(length_dw * 4);
1266 if (!cbuf) {
1267 return -1;
1268 }
1269
1270 ret = ctx->input->read(ctx->input, cbuf, length_dw * 4);
1271 if (ret != (int)length_dw * 4) {
1272 free(cbuf);
1273 return -1;
1274 }
1275
1276 ret = virgl_renderer_submit_cmd(cbuf, ctx->ctx_id, length_dw);
1277
1278 free(cbuf);
1279 if (ret)
1280 return -1;
1281
1282 vtest_create_implicit_fence(&renderer);
1283 return 0;
1284 }
1285
1286 struct vtest_transfer_args {
1287 uint32_t handle;
1288 uint32_t level;
1289 uint32_t stride;
1290 uint32_t layer_stride;
1291 struct virgl_box box;
1292 uint32_t offset;
1293 };
1294
vtest_transfer_decode_args(struct vtest_context * ctx,struct vtest_transfer_args * args,uint32_t * data_size)1295 static int vtest_transfer_decode_args(struct vtest_context *ctx,
1296 struct vtest_transfer_args *args,
1297 uint32_t *data_size)
1298 {
1299 uint32_t thdr_buf[VCMD_TRANSFER_HDR_SIZE];
1300 int ret;
1301
1302 ret = ctx->input->read(ctx->input, thdr_buf, sizeof(thdr_buf));
1303 if (ret != sizeof(thdr_buf)) {
1304 return -1;
1305 }
1306
1307 args->handle = thdr_buf[VCMD_TRANSFER_RES_HANDLE];
1308 args->level = thdr_buf[VCMD_TRANSFER_LEVEL];
1309 args->stride = thdr_buf[VCMD_TRANSFER_STRIDE];
1310 args->layer_stride = thdr_buf[VCMD_TRANSFER_LAYER_STRIDE];
1311 args->box.x = thdr_buf[VCMD_TRANSFER_X];
1312 args->box.y = thdr_buf[VCMD_TRANSFER_Y];
1313 args->box.z = thdr_buf[VCMD_TRANSFER_Z];
1314 args->box.w = thdr_buf[VCMD_TRANSFER_WIDTH];
1315 args->box.h = thdr_buf[VCMD_TRANSFER_HEIGHT];
1316 args->box.d = thdr_buf[VCMD_TRANSFER_DEPTH];
1317 args->offset = 0;
1318
1319 *data_size = thdr_buf[VCMD_TRANSFER_DATA_SIZE];
1320
1321 if (*data_size > renderer.max_length) {
1322 return -ENOMEM;
1323 }
1324
1325 return 0;
1326 }
1327
vtest_transfer_decode_args2(struct vtest_context * ctx,struct vtest_transfer_args * args)1328 static int vtest_transfer_decode_args2(struct vtest_context *ctx,
1329 struct vtest_transfer_args *args)
1330 {
1331 uint32_t thdr_buf[VCMD_TRANSFER2_HDR_SIZE];
1332 int ret;
1333
1334 ret = ctx->input->read(ctx->input, thdr_buf, sizeof(thdr_buf));
1335 if (ret != sizeof(thdr_buf)) {
1336 return -1;
1337 }
1338
1339 args->handle = thdr_buf[VCMD_TRANSFER2_RES_HANDLE];
1340 args->level = thdr_buf[VCMD_TRANSFER2_LEVEL];
1341 args->stride = 0;
1342 args->layer_stride = 0;
1343 args->box.x = thdr_buf[VCMD_TRANSFER2_X];
1344 args->box.y = thdr_buf[VCMD_TRANSFER2_Y];
1345 args->box.z = thdr_buf[VCMD_TRANSFER2_Z];
1346 args->box.w = thdr_buf[VCMD_TRANSFER2_WIDTH];
1347 args->box.h = thdr_buf[VCMD_TRANSFER2_HEIGHT];
1348 args->box.d = thdr_buf[VCMD_TRANSFER2_DEPTH];
1349 args->offset = thdr_buf[VCMD_TRANSFER2_OFFSET];
1350
1351 return 0;
1352 }
1353
vtest_transfer_get_internal(struct vtest_context * ctx,struct vtest_transfer_args * args,uint32_t data_size,bool do_transfer)1354 static int vtest_transfer_get_internal(struct vtest_context *ctx,
1355 struct vtest_transfer_args *args,
1356 uint32_t data_size,
1357 bool do_transfer)
1358 {
1359 struct vtest_resource *res;
1360 struct iovec data_iov;
1361 int ret = 0;
1362
1363 res = util_hash_table_get(ctx->resource_table,
1364 intptr_to_pointer(args->handle));
1365 if (!res) {
1366 return report_failed_call("util_hash_table_get", -ESRCH);
1367 }
1368
1369 if (data_size) {
1370 data_iov.iov_len = data_size;
1371 data_iov.iov_base = malloc(data_size);
1372 if (!data_iov.iov_base) {
1373 return -ENOMEM;
1374 }
1375 } else {
1376 if (args->offset >= res->iov.iov_len) {
1377 return report_failure("offset larger then length of backing store", -EFAULT);
1378 }
1379 }
1380
1381 if (do_transfer) {
1382 ret = virgl_renderer_transfer_read_iov(res->res_id,
1383 ctx->ctx_id,
1384 args->level,
1385 args->stride,
1386 args->layer_stride,
1387 &args->box,
1388 args->offset,
1389 data_size ? &data_iov : NULL,
1390 data_size ? 1 : 0);
1391 if (ret) {
1392 report_failed_call("virgl_renderer_transfer_read_iov", ret);
1393 }
1394 } else if (data_size) {
1395 memset(data_iov.iov_base, 0, data_iov.iov_len);
1396 }
1397
1398 if (data_size) {
1399 ret = vtest_block_write(ctx->out_fd, data_iov.iov_base, data_iov.iov_len);
1400 if (ret > 0)
1401 ret = 0;
1402
1403 free(data_iov.iov_base);
1404 }
1405
1406 return ret;
1407 }
1408
vtest_transfer_put_internal(struct vtest_context * ctx,struct vtest_transfer_args * args,uint32_t data_size,bool do_transfer)1409 static int vtest_transfer_put_internal(struct vtest_context *ctx,
1410 struct vtest_transfer_args *args,
1411 uint32_t data_size,
1412 bool do_transfer)
1413 {
1414 struct vtest_resource *res;
1415 struct iovec data_iov;
1416 int ret = 0;
1417
1418 res = util_hash_table_get(ctx->resource_table,
1419 intptr_to_pointer(args->handle));
1420 if (!res) {
1421 return report_failed_call("util_hash_table_get", -ESRCH);
1422 }
1423
1424 if (data_size) {
1425 data_iov.iov_len = data_size;
1426 data_iov.iov_base = malloc(data_size);
1427 if (!data_iov.iov_base) {
1428 return -ENOMEM;
1429 }
1430
1431 ret = ctx->input->read(ctx->input, data_iov.iov_base, data_iov.iov_len);
1432 if (ret < 0) {
1433 return ret;
1434 }
1435 }
1436
1437 if (do_transfer) {
1438 ret = virgl_renderer_transfer_write_iov(res->res_id,
1439 ctx->ctx_id,
1440 args->level,
1441 args->stride,
1442 args->layer_stride,
1443 &args->box,
1444 args->offset,
1445 data_size ? &data_iov : NULL,
1446 data_size ? 1 : 0);
1447 if (ret) {
1448 report_failed_call("virgl_renderer_transfer_write_iov", ret);
1449 }
1450 }
1451
1452 if (data_size) {
1453 free(data_iov.iov_base);
1454 }
1455
1456 return ret;
1457 }
1458
vtest_transfer_get(UNUSED uint32_t length_dw)1459 int vtest_transfer_get(UNUSED uint32_t length_dw)
1460 {
1461 struct vtest_context *ctx = vtest_get_current_context();
1462 int ret;
1463 struct vtest_transfer_args args;
1464 uint32_t data_size;
1465
1466 ret = vtest_transfer_decode_args(ctx, &args, &data_size);
1467 if (ret < 0) {
1468 return ret;
1469 }
1470
1471 return vtest_transfer_get_internal(ctx, &args, data_size, true);
1472 }
1473
vtest_transfer_get_nop(UNUSED uint32_t length_dw)1474 int vtest_transfer_get_nop(UNUSED uint32_t length_dw)
1475 {
1476 struct vtest_context *ctx = vtest_get_current_context();
1477 int ret;
1478 struct vtest_transfer_args args;
1479 uint32_t data_size;
1480
1481 ret = vtest_transfer_decode_args(ctx, &args, &data_size);
1482 if (ret < 0) {
1483 return ret;
1484 }
1485
1486 return vtest_transfer_get_internal(ctx, &args, data_size, false);
1487 }
1488
vtest_transfer_put(UNUSED uint32_t length_dw)1489 int vtest_transfer_put(UNUSED uint32_t length_dw)
1490 {
1491 struct vtest_context *ctx = vtest_get_current_context();
1492 int ret;
1493 struct vtest_transfer_args args;
1494 uint32_t data_size;
1495
1496 ret = vtest_transfer_decode_args(ctx, &args, &data_size);
1497 if (ret < 0) {
1498 return ret;
1499 }
1500
1501 return vtest_transfer_put_internal(ctx, &args, data_size, true);
1502 }
1503
vtest_transfer_put_nop(UNUSED uint32_t length_dw)1504 int vtest_transfer_put_nop(UNUSED uint32_t length_dw)
1505 {
1506 struct vtest_context *ctx = vtest_get_current_context();
1507 int ret;
1508 struct vtest_transfer_args args;
1509 uint32_t data_size;
1510
1511 ret = vtest_transfer_decode_args(ctx, &args, &data_size);
1512 if (ret < 0) {
1513 return ret;
1514 }
1515
1516 return vtest_transfer_put_internal(ctx, &args, data_size, false);
1517 }
1518
vtest_transfer_get2(UNUSED uint32_t length_dw)1519 int vtest_transfer_get2(UNUSED uint32_t length_dw)
1520 {
1521 struct vtest_context *ctx = vtest_get_current_context();
1522 int ret;
1523 struct vtest_transfer_args args;
1524
1525 ret = vtest_transfer_decode_args2(ctx, &args);
1526 if (ret < 0) {
1527 return ret;
1528 }
1529
1530 return vtest_transfer_get_internal(ctx, &args, 0, true);
1531 }
1532
vtest_transfer_get2_nop(UNUSED uint32_t length_dw)1533 int vtest_transfer_get2_nop(UNUSED uint32_t length_dw)
1534 {
1535 struct vtest_context *ctx = vtest_get_current_context();
1536 int ret;
1537 struct vtest_transfer_args args;
1538
1539 ret = vtest_transfer_decode_args2(ctx, &args);
1540 if (ret < 0) {
1541 return ret;
1542 }
1543
1544 return vtest_transfer_get_internal(ctx, &args, 0, false);
1545 }
1546
vtest_transfer_put2(UNUSED uint32_t length_dw)1547 int vtest_transfer_put2(UNUSED uint32_t length_dw)
1548 {
1549 struct vtest_context *ctx = vtest_get_current_context();
1550 int ret;
1551 struct vtest_transfer_args args;
1552
1553 ret = vtest_transfer_decode_args2(ctx, &args);
1554 if (ret < 0) {
1555 return ret;
1556 }
1557
1558 return vtest_transfer_put_internal(ctx, &args, 0, true);
1559 }
1560
vtest_transfer_put2_nop(UNUSED uint32_t length_dw)1561 int vtest_transfer_put2_nop(UNUSED uint32_t length_dw)
1562 {
1563 struct vtest_context *ctx = vtest_get_current_context();
1564 int ret;
1565 struct vtest_transfer_args args;
1566
1567 ret = vtest_transfer_decode_args2(ctx, &args);
1568 if (ret < 0) {
1569 return ret;
1570 }
1571
1572 return vtest_transfer_put_internal(ctx, &args, 0, false);
1573 }
1574
vtest_resource_busy_wait(UNUSED uint32_t length_dw)1575 int vtest_resource_busy_wait(UNUSED uint32_t length_dw)
1576 {
1577 struct vtest_context *ctx = vtest_get_current_context();
1578 uint32_t bw_buf[VCMD_BUSY_WAIT_SIZE];
1579 int ret, fd;
1580 int flags;
1581 uint32_t hdr_buf[VTEST_HDR_SIZE];
1582 uint32_t reply_buf[1];
1583 bool busy = false;
1584
1585 ret = ctx->input->read(ctx->input, &bw_buf, sizeof(bw_buf));
1586 if (ret != sizeof(bw_buf)) {
1587 return -1;
1588 }
1589
1590 /* clients often send VCMD_PING_PROTOCOL_VERSION followed by
1591 * VCMD_RESOURCE_BUSY_WAIT with handle 0 to figure out if
1592 * VCMD_PING_PROTOCOL_VERSION is supported. We need to make a special case
1593 * for that.
1594 */
1595 if (!ctx->context_initialized && bw_buf[VCMD_BUSY_WAIT_HANDLE])
1596 return -1;
1597
1598 /* handle = bw_buf[VCMD_BUSY_WAIT_HANDLE]; unused as of now */
1599 flags = bw_buf[VCMD_BUSY_WAIT_FLAGS];
1600
1601 do {
1602 busy = renderer.implicit_fence_completed !=
1603 renderer.implicit_fence_submitted;
1604 if (!busy || !(flags & VCMD_BUSY_WAIT_FLAG_WAIT))
1605 break;
1606
1607 /* TODO this is bad when there are multiple clients */
1608 fd = virgl_renderer_get_poll_fd();
1609 if (fd != -1) {
1610 vtest_wait_for_fd_read(fd);
1611 }
1612 virgl_renderer_poll();
1613 } while (true);
1614
1615 hdr_buf[VTEST_CMD_LEN] = 1;
1616 hdr_buf[VTEST_CMD_ID] = VCMD_RESOURCE_BUSY_WAIT;
1617 reply_buf[0] = busy ? 1 : 0;
1618
1619 ret = vtest_block_write(ctx->out_fd, hdr_buf, sizeof(hdr_buf));
1620 if (ret < 0) {
1621 return ret;
1622 }
1623
1624 ret = vtest_block_write(ctx->out_fd, reply_buf, sizeof(reply_buf));
1625 if (ret < 0) {
1626 return ret;
1627 }
1628
1629 return 0;
1630 }
1631
vtest_resource_busy_wait_nop(UNUSED uint32_t length_dw)1632 int vtest_resource_busy_wait_nop(UNUSED uint32_t length_dw)
1633 {
1634 struct vtest_context *ctx = vtest_get_current_context();
1635 uint32_t bw_buf[VCMD_BUSY_WAIT_SIZE];
1636 uint32_t reply_buf[VTEST_HDR_SIZE + 1];
1637 int ret;
1638
1639 ret = ctx->input->read(ctx->input, &bw_buf, sizeof(bw_buf));
1640 if (ret != sizeof(bw_buf)) {
1641 return -1;
1642 }
1643
1644 reply_buf[VTEST_CMD_LEN] = 1;
1645 reply_buf[VTEST_CMD_ID] = VCMD_RESOURCE_BUSY_WAIT;
1646 reply_buf[VTEST_CMD_DATA_START] = 0;
1647
1648 ret = vtest_block_write(ctx->out_fd, reply_buf, sizeof(reply_buf));
1649 if (ret < 0) {
1650 return ret;
1651 }
1652
1653 return 0;
1654 }
1655
vtest_poll_resource_busy_wait(void)1656 void vtest_poll_resource_busy_wait(void)
1657 {
1658 /* poll the implicit fences */
1659 virgl_renderer_poll();
1660 }
1661
vtest_gettime(uint32_t offset_ms)1662 static uint64_t vtest_gettime(uint32_t offset_ms)
1663 {
1664 const uint64_t ns_per_ms = 1000000;
1665 const uint64_t ns_per_s = ns_per_ms * 1000;
1666 struct timespec ts;
1667 uint64_t ns;
1668
1669 if (offset_ms > INT32_MAX)
1670 return UINT64_MAX;
1671
1672 clock_gettime(CLOCK_MONOTONIC, &ts);
1673 ns = ns_per_s * ts.tv_sec + ts.tv_nsec;
1674
1675 return ns + ns_per_ms * offset_ms;
1676 }
1677
1678 /* TODO this is slow */
vtest_signal_sync(struct vtest_sync * sync,uint64_t value)1679 static void vtest_signal_sync(struct vtest_sync *sync, uint64_t value)
1680 {
1681 struct vtest_context *ctx;
1682 uint64_t now;
1683
1684 if (sync->value >= value) {
1685 sync->value = value;
1686 return;
1687 }
1688 sync->value = value;
1689
1690 now = vtest_gettime(0);
1691
1692 LIST_FOR_EACH_ENTRY(ctx, &renderer.active_contexts, head) {
1693 struct vtest_sync_wait *wait, *tmp;
1694 LIST_FOR_EACH_ENTRY_SAFE(wait, tmp, &ctx->sync_waits, head) {
1695 bool is_ready = false;
1696 uint32_t i;
1697
1698 /* garbage collect */
1699 if (wait->valid_before < now) {
1700 list_del(&wait->head);
1701 vtest_free_sync_wait(wait);
1702 continue;
1703 }
1704
1705 for (i = 0; i < wait->count; i++) {
1706 if (wait->syncs[i] != sync || wait->values[i] > value)
1707 continue;
1708
1709 vtest_unref_sync(wait->syncs[i]);
1710 wait->syncs[i] = NULL;
1711
1712 wait->signaled_count++;
1713 if (wait->signaled_count == wait->count ||
1714 (wait->flags & VCMD_SYNC_WAIT_FLAG_ANY)) {
1715 is_ready = true;
1716 break;
1717 }
1718 }
1719
1720 if (is_ready) {
1721 const uint64_t val = 1;
1722
1723 list_del(&wait->head);
1724 write(wait->fd, &val, sizeof(val));
1725 vtest_free_sync_wait(wait);
1726 }
1727 }
1728 }
1729 }
1730
vtest_signal_sync_queue(struct vtest_sync_queue * queue,struct vtest_sync_queue_submit * to_submit)1731 static void vtest_signal_sync_queue(struct vtest_sync_queue *queue,
1732 struct vtest_sync_queue_submit *to_submit)
1733 {
1734 struct vtest_sync_queue_submit *submit, *tmp;
1735
1736 LIST_FOR_EACH_ENTRY_SAFE(submit, tmp, &queue->submits, head) {
1737 uint32_t i;
1738
1739 list_del(&submit->head);
1740
1741 for (i = 0; i < submit->count; i++) {
1742 vtest_signal_sync(submit->syncs[i], submit->values[i]);
1743 vtest_unref_sync(submit->syncs[i]);
1744 }
1745 free(submit);
1746
1747 if (submit == to_submit)
1748 break;
1749 }
1750 }
1751
vtest_sync_create(UNUSED uint32_t length_dw)1752 int vtest_sync_create(UNUSED uint32_t length_dw)
1753 {
1754 struct vtest_context *ctx = vtest_get_current_context();
1755 uint32_t sync_create_buf[VCMD_SYNC_CREATE_SIZE];
1756 uint32_t resp_buf[VTEST_HDR_SIZE + 1];
1757 uint64_t value;
1758 struct vtest_sync *sync;
1759 int ret;
1760
1761 ret = ctx->input->read(ctx->input, sync_create_buf, sizeof(sync_create_buf));
1762 if (ret != sizeof(sync_create_buf))
1763 return -1;
1764
1765 value = sync_create_buf[VCMD_SYNC_CREATE_VALUE_LO];
1766 value |= (uint64_t)sync_create_buf[VCMD_SYNC_CREATE_VALUE_HI] << 32;
1767
1768 sync = vtest_new_sync(value);
1769 if (!sync)
1770 return -ENOMEM;
1771
1772 resp_buf[VTEST_CMD_LEN] = 1;
1773 resp_buf[VTEST_CMD_ID] = VCMD_SYNC_CREATE;
1774 resp_buf[VTEST_CMD_DATA_START] = sync->sync_id;
1775 ret = vtest_block_write(ctx->out_fd, resp_buf, sizeof(resp_buf));
1776 if (ret < 0) {
1777 vtest_unref_sync(sync);
1778 return ret;
1779 }
1780
1781 util_hash_table_set(ctx->sync_table, intptr_to_pointer(sync->sync_id), sync);
1782
1783 return 0;
1784 }
1785
vtest_sync_unref(UNUSED uint32_t length_dw)1786 int vtest_sync_unref(UNUSED uint32_t length_dw)
1787 {
1788 struct vtest_context *ctx = vtest_get_current_context();
1789 uint32_t sync_unref_buf[VCMD_SYNC_UNREF_SIZE];
1790 uint32_t sync_id;
1791 int ret;
1792
1793 ret = ctx->input->read(ctx->input, &sync_unref_buf,
1794 sizeof(sync_unref_buf));
1795 if (ret != sizeof(sync_unref_buf)) {
1796 return -1;
1797 }
1798
1799 sync_id = sync_unref_buf[VCMD_SYNC_UNREF_ID];
1800 util_hash_table_remove(ctx->sync_table, intptr_to_pointer(sync_id));
1801
1802 return 0;
1803 }
1804
vtest_sync_read(UNUSED uint32_t length_dw)1805 int vtest_sync_read(UNUSED uint32_t length_dw)
1806 {
1807 struct vtest_context *ctx = vtest_get_current_context();
1808 uint32_t sync_read_buf[VCMD_SYNC_READ_SIZE];
1809 uint32_t resp_buf[VTEST_HDR_SIZE + 2];
1810 uint32_t sync_id;
1811 struct vtest_sync *sync;
1812 int ret;
1813
1814 ret = ctx->input->read(ctx->input, &sync_read_buf,
1815 sizeof(sync_read_buf));
1816 if (ret != sizeof(sync_read_buf)) {
1817 return -1;
1818 }
1819
1820 sync_id = sync_read_buf[VCMD_SYNC_READ_ID];
1821
1822 sync = util_hash_table_get(ctx->sync_table, intptr_to_pointer(sync_id));
1823 if (!sync)
1824 return -EEXIST;
1825
1826 resp_buf[VTEST_CMD_LEN] = 2;
1827 resp_buf[VTEST_CMD_ID] = VCMD_SYNC_READ;
1828 resp_buf[VTEST_CMD_DATA_START] = (uint32_t)sync->value;
1829 resp_buf[VTEST_CMD_DATA_START + 1] = (uint32_t)(sync->value >> 32);
1830
1831 ret = vtest_block_write(ctx->out_fd, resp_buf, sizeof(resp_buf));
1832 if (ret < 0)
1833 return ret;
1834
1835 return 0;
1836 }
1837
vtest_sync_decode_id_and_value(const uint32_t * data,uint32_t index,uint64_t * value)1838 static uint32_t vtest_sync_decode_id_and_value(const uint32_t *data,
1839 uint32_t index,
1840 uint64_t *value)
1841 {
1842 data += index * 3;
1843
1844 /* 32-bit sync id followed by 64-bit sync value */
1845 *value = (uint64_t)data[1];
1846 *value |= (uint64_t)data[2] << 32;
1847 return data[0];
1848 }
1849
vtest_sync_write(UNUSED uint32_t length_dw)1850 int vtest_sync_write(UNUSED uint32_t length_dw)
1851 {
1852 struct vtest_context *ctx = vtest_get_current_context();
1853 uint32_t sync_write_buf[VCMD_SYNC_WRITE_SIZE];
1854 uint32_t sync_id;
1855 uint64_t value;
1856 struct vtest_sync *sync;
1857 int ret;
1858
1859 ret = ctx->input->read(ctx->input, &sync_write_buf,
1860 sizeof(sync_write_buf));
1861 if (ret != sizeof(sync_write_buf)) {
1862 return -1;
1863 }
1864
1865 sync_id = vtest_sync_decode_id_and_value(sync_write_buf, 0, &value);
1866
1867 sync = util_hash_table_get(ctx->sync_table, intptr_to_pointer(sync_id));
1868 if (!sync)
1869 return -EEXIST;
1870
1871 vtest_signal_sync(sync, value);
1872
1873 return 0;
1874 }
1875
vtest_sync_wait_init(struct vtest_sync_wait * wait,struct vtest_context * ctx,uint32_t flags,uint32_t timeout,const uint32_t * syncs,uint32_t sync_count)1876 static int vtest_sync_wait_init(struct vtest_sync_wait *wait,
1877 struct vtest_context *ctx,
1878 uint32_t flags,
1879 uint32_t timeout,
1880 const uint32_t *syncs,
1881 uint32_t sync_count)
1882 {
1883 uint32_t i;
1884
1885 #ifdef HAVE_EVENTFD_H
1886 wait->fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
1887 #else
1888 /* TODO pipe */
1889 wait->fd = -1;
1890 #endif
1891 if (wait->fd < 0)
1892 return -ENODEV;
1893
1894 wait->flags = flags;
1895 wait->valid_before = vtest_gettime(timeout);
1896
1897 wait->count = 0;
1898 wait->signaled_count = 0;
1899 for (i = 0; i < sync_count; i++) {
1900 struct vtest_sync *sync;
1901 uint32_t sync_id;
1902 uint64_t value;
1903
1904 sync_id = vtest_sync_decode_id_and_value(syncs, i, &value);
1905
1906 sync = util_hash_table_get(ctx->sync_table, intptr_to_pointer(sync_id));
1907 if (!sync)
1908 break;
1909
1910 /* skip signaled */
1911 if (sync->value < value) {
1912 wait->syncs[wait->count] = vtest_ref_sync(sync);
1913 wait->values[wait->count] = value;
1914 wait->count++;
1915 }
1916 }
1917
1918 if (i < sync_count) {
1919 vtest_free_sync_wait(wait);
1920 return -EEXIST;
1921 }
1922
1923 return 0;
1924 }
1925
vtest_sync_wait(uint32_t length_dw)1926 int vtest_sync_wait(uint32_t length_dw)
1927 {
1928 struct vtest_context *ctx = vtest_get_current_context();
1929 uint32_t resp_buf[VTEST_HDR_SIZE];
1930 uint32_t sync_count;
1931 uint32_t *sync_wait_buf;
1932 uint32_t flags;
1933 uint32_t timeout;
1934 struct vtest_sync_wait *wait;
1935 bool is_ready;
1936 int ret;
1937
1938 if (length_dw > renderer.max_length / 4)
1939 return -EINVAL;
1940
1941 if ((length_dw - 2) % 3)
1942 return -EINVAL;
1943 sync_count = (length_dw - 2) / 3;
1944
1945 sync_wait_buf = malloc(length_dw * 4);
1946 if (!sync_wait_buf)
1947 return -ENOMEM;
1948
1949 ret = ctx->input->read(ctx->input, sync_wait_buf, length_dw * 4);
1950 if (ret != (int)length_dw * 4) {
1951 free(sync_wait_buf);
1952 return -1;
1953 }
1954
1955 flags = sync_wait_buf[VCMD_SYNC_WAIT_FLAGS];
1956 timeout = sync_wait_buf[VCMD_SYNC_WAIT_TIMEOUT];
1957
1958 wait = malloc(sizeof(*wait) +
1959 sizeof(*wait->syncs) * sync_count +
1960 sizeof(*wait->values) * sync_count);
1961 if (!wait) {
1962 free(sync_wait_buf);
1963 return -ENOMEM;
1964 }
1965 wait->syncs = (void *)&wait[1];
1966 wait->values = (void *)&wait->syncs[sync_count];
1967
1968 ret = vtest_sync_wait_init(wait, ctx, flags, timeout,
1969 sync_wait_buf + 2, sync_count);
1970 free(sync_wait_buf);
1971
1972 if (ret) {
1973 free(wait);
1974 return ret;
1975 }
1976
1977 is_ready = !wait->count;
1978 if ((wait->flags & VCMD_SYNC_WAIT_FLAG_ANY) && wait->count < sync_count)
1979 is_ready = true;
1980
1981 if (is_ready) {
1982 const uint64_t val = 1;
1983 write(wait->fd, &val, sizeof(val));
1984 }
1985
1986 resp_buf[VTEST_CMD_LEN] = 0;
1987 resp_buf[VTEST_CMD_ID] = VCMD_SYNC_WAIT;
1988 ret = vtest_block_write(ctx->out_fd, resp_buf, sizeof(resp_buf));
1989 if (ret >= 0)
1990 ret = vtest_send_fd(ctx->out_fd, wait->fd);
1991
1992 if (ret || is_ready || !timeout)
1993 vtest_free_sync_wait(wait);
1994 else
1995 list_addtail(&wait->head, &ctx->sync_waits);
1996
1997 return ret;
1998 }
1999
vtest_submit_cmd2_batch(struct vtest_context * ctx,const struct vcmd_submit_cmd2_batch * batch,const uint32_t * cmds,const uint32_t * syncs)2000 static int vtest_submit_cmd2_batch(struct vtest_context *ctx,
2001 const struct vcmd_submit_cmd2_batch *batch,
2002 const uint32_t *cmds,
2003 const uint32_t *syncs)
2004 {
2005 struct vtest_sync_queue_submit *submit = NULL;
2006 uint32_t i;
2007 int ret;
2008
2009 ret = virgl_renderer_submit_cmd((void *)cmds, ctx->ctx_id, batch->cmd_size);
2010 if (ret)
2011 return -EINVAL;
2012
2013 if (!batch->sync_count)
2014 return 0;
2015
2016 if (batch->flags & VCMD_SUBMIT_CMD2_FLAG_SYNC_QUEUE) {
2017 submit = malloc(sizeof(*submit) +
2018 sizeof(*submit->syncs) * batch->sync_count +
2019 sizeof(*submit->values) * batch->sync_count);
2020 if (!submit)
2021 return -ENOMEM;
2022
2023 submit->count = batch->sync_count;
2024 submit->syncs = (void *)&submit[1];
2025 submit->values = (void *)&submit->syncs[batch->sync_count];
2026 }
2027
2028 for (i = 0; i < batch->sync_count; i++) {
2029 struct vtest_sync *sync;
2030 uint32_t sync_id;
2031 uint64_t value;
2032
2033 sync_id = vtest_sync_decode_id_and_value(syncs, i, &value);
2034
2035 sync = util_hash_table_get(ctx->sync_table, intptr_to_pointer(sync_id));
2036 if (!sync)
2037 break;
2038
2039 if (submit) {
2040 submit->syncs[i] = vtest_ref_sync(sync);
2041 submit->values[i] = value;
2042 } else {
2043 vtest_signal_sync(sync, value);
2044 }
2045 }
2046
2047 if (i < batch->sync_count) {
2048 if (submit) {
2049 submit->count = i;
2050 vtest_free_sync_queue_submit(submit);
2051 }
2052 return -EEXIST;
2053 }
2054
2055 if (submit) {
2056 struct vtest_sync_queue *queue = &ctx->sync_queues[batch->sync_queue_index];
2057
2058 submit->sync_queue = queue;
2059 ret = virgl_renderer_context_create_fence(ctx->ctx_id,
2060 VIRGL_RENDERER_FENCE_FLAG_MERGEABLE,
2061 batch->sync_queue_id,
2062 submit);
2063 if (ret) {
2064 vtest_free_sync_queue_submit(submit);
2065 return ret;
2066 }
2067
2068 list_addtail(&submit->head, &queue->submits);
2069 }
2070
2071 return 0;
2072 }
2073
vtest_submit_cmd2(uint32_t length_dw)2074 int vtest_submit_cmd2(uint32_t length_dw)
2075 {
2076 struct vtest_context *ctx = vtest_get_current_context();
2077 uint32_t *submit_cmd2_buf;
2078 uint32_t batch_count;
2079 uint32_t i;
2080 int ret;
2081
2082 if (length_dw > renderer.max_length / 4)
2083 return -EINVAL;
2084
2085 submit_cmd2_buf = malloc(length_dw * 4);
2086 if (!submit_cmd2_buf)
2087 return -ENOMEM;
2088
2089 ret = ctx->input->read(ctx->input, submit_cmd2_buf, length_dw * 4);
2090 if (ret != (int)length_dw * 4) {
2091 free(submit_cmd2_buf);
2092 return -1;
2093 }
2094
2095 batch_count = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_COUNT];
2096 if (VCMD_SUBMIT_CMD2_BATCH_COUNT + 8 * batch_count > length_dw) {
2097 free(submit_cmd2_buf);
2098 return -EINVAL;
2099 }
2100
2101 for (i = 0; i < batch_count; i++) {
2102 const struct vcmd_submit_cmd2_batch batch = {
2103 .flags = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_FLAGS(i)],
2104 .cmd_offset = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_CMD_OFFSET(i)],
2105 .cmd_size = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_CMD_SIZE(i)],
2106 .sync_offset = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_SYNC_OFFSET(i)],
2107 .sync_count = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_SYNC_COUNT(i)],
2108 .sync_queue_index = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_SYNC_QUEUE_INDEX(i)],
2109 .sync_queue_id = submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_SYNC_QUEUE_ID_LO(i)] |
2110 (uint64_t)submit_cmd2_buf[VCMD_SUBMIT_CMD2_BATCH_SYNC_QUEUE_ID_HI(i)] << 32,
2111 };
2112 const uint32_t *cmds = &submit_cmd2_buf[batch.cmd_offset];
2113 const uint32_t *syncs = &submit_cmd2_buf[batch.sync_offset];
2114
2115 if (batch.cmd_offset + batch.cmd_size > length_dw ||
2116 batch.sync_offset + batch.sync_count * 3 > length_dw ||
2117 batch.sync_queue_index >= VTEST_MAX_SYNC_QUEUE_COUNT) {
2118 free(submit_cmd2_buf);
2119 return -EINVAL;
2120 }
2121
2122 ret = vtest_submit_cmd2_batch(ctx, &batch, cmds, syncs);
2123 if (ret) {
2124 free(submit_cmd2_buf);
2125 return ret;
2126 }
2127 }
2128
2129 free(submit_cmd2_buf);
2130
2131 return 0;
2132 }
2133
vtest_set_max_length(uint32_t length)2134 void vtest_set_max_length(uint32_t length)
2135 {
2136 renderer.max_length = length;
2137 }
2138