1 /*
2 * Copyright 2019 Google LLC
3 * SPDX-License-Identifier: MIT
4 *
5 * based in part on virgl which is:
6 * Copyright 2014, 2015 Red Hat.
7 */
8
9 #include <errno.h>
10 #include <netinet/in.h>
11 #include <poll.h>
12 #include <sys/mman.h>
13 #include <sys/socket.h>
14 #include <sys/types.h>
15 #include <sys/un.h>
16 #include <unistd.h>
17
18 #include "util/os_file.h"
19 #include "util/sparse_array.h"
20 #include "util/u_process.h"
21 #define VIRGL_RENDERER_UNSTABLE_APIS
22 #include "virtio-gpu/virglrenderer_hw.h"
23 #include "vtest/vtest_protocol.h"
24
25 #include "vn_renderer.h"
26
27 #define VTEST_PCI_VENDOR_ID 0x1af4
28 #define VTEST_PCI_DEVICE_ID 0x1050
29
30 struct vtest;
31
32 struct vtest_shmem {
33 struct vn_renderer_shmem base;
34 };
35
36 struct vtest_bo {
37 struct vn_renderer_bo base;
38
39 uint32_t blob_flags;
40 /* might be closed after mmap */
41 int res_fd;
42 };
43
44 struct vtest_sync {
45 struct vn_renderer_sync base;
46 };
47
48 struct vtest {
49 struct vn_renderer base;
50
51 struct vn_instance *instance;
52
53 mtx_t sock_mutex;
54 int sock_fd;
55
56 uint32_t protocol_version;
57 uint32_t max_sync_queue_count;
58
59 struct {
60 enum virgl_renderer_capset id;
61 uint32_t version;
62 struct virgl_renderer_capset_venus data;
63 } capset;
64
65 struct util_sparse_array shmem_array;
66 struct util_sparse_array bo_array;
67 };
68
69 static int
vtest_connect_socket(struct vn_instance * instance,const char * path)70 vtest_connect_socket(struct vn_instance *instance, const char *path)
71 {
72 struct sockaddr_un un;
73 int sock;
74
75 sock = socket(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0);
76 if (sock < 0) {
77 vn_log(instance, "failed to create a socket");
78 return -1;
79 }
80
81 memset(&un, 0, sizeof(un));
82 un.sun_family = AF_UNIX;
83 memcpy(un.sun_path, path, strlen(path));
84
85 if (connect(sock, (struct sockaddr *)&un, sizeof(un)) == -1) {
86 vn_log(instance, "failed to connect to %s: %s", path, strerror(errno));
87 close(sock);
88 return -1;
89 }
90
91 return sock;
92 }
93
94 static void
vtest_read(struct vtest * vtest,void * buf,size_t size)95 vtest_read(struct vtest *vtest, void *buf, size_t size)
96 {
97 do {
98 const ssize_t ret = read(vtest->sock_fd, buf, size);
99 if (unlikely(ret < 0)) {
100 vn_log(vtest->instance,
101 "lost connection to rendering server on %zu read %zi %d",
102 size, ret, errno);
103 abort();
104 }
105
106 buf += ret;
107 size -= ret;
108 } while (size);
109 }
110
111 static int
vtest_receive_fd(struct vtest * vtest)112 vtest_receive_fd(struct vtest *vtest)
113 {
114 char cmsg_buf[CMSG_SPACE(sizeof(int))];
115 char dummy;
116 struct msghdr msg = {
117 .msg_iov =
118 &(struct iovec){
119 .iov_base = &dummy,
120 .iov_len = sizeof(dummy),
121 },
122 .msg_iovlen = 1,
123 .msg_control = cmsg_buf,
124 .msg_controllen = sizeof(cmsg_buf),
125 };
126
127 if (recvmsg(vtest->sock_fd, &msg, 0) < 0) {
128 vn_log(vtest->instance, "recvmsg failed: %s", strerror(errno));
129 abort();
130 }
131
132 struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
133 if (!cmsg || cmsg->cmsg_level != SOL_SOCKET ||
134 cmsg->cmsg_type != SCM_RIGHTS) {
135 vn_log(vtest->instance, "invalid cmsghdr");
136 abort();
137 }
138
139 return *((int *)CMSG_DATA(cmsg));
140 }
141
142 static void
vtest_write(struct vtest * vtest,const void * buf,size_t size)143 vtest_write(struct vtest *vtest, const void *buf, size_t size)
144 {
145 do {
146 const ssize_t ret = write(vtest->sock_fd, buf, size);
147 if (unlikely(ret < 0)) {
148 vn_log(vtest->instance,
149 "lost connection to rendering server on %zu write %zi %d",
150 size, ret, errno);
151 abort();
152 }
153
154 buf += ret;
155 size -= ret;
156 } while (size);
157 }
158
159 static void
vtest_vcmd_create_renderer(struct vtest * vtest,const char * name)160 vtest_vcmd_create_renderer(struct vtest *vtest, const char *name)
161 {
162 const size_t size = strlen(name) + 1;
163
164 uint32_t vtest_hdr[VTEST_HDR_SIZE];
165 vtest_hdr[VTEST_CMD_LEN] = size;
166 vtest_hdr[VTEST_CMD_ID] = VCMD_CREATE_RENDERER;
167
168 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
169 vtest_write(vtest, name, size);
170 }
171
172 static bool
vtest_vcmd_ping_protocol_version(struct vtest * vtest)173 vtest_vcmd_ping_protocol_version(struct vtest *vtest)
174 {
175 uint32_t vtest_hdr[VTEST_HDR_SIZE];
176 vtest_hdr[VTEST_CMD_LEN] = VCMD_PING_PROTOCOL_VERSION_SIZE;
177 vtest_hdr[VTEST_CMD_ID] = VCMD_PING_PROTOCOL_VERSION;
178
179 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
180
181 /* send a dummy busy wait to avoid blocking in vtest_read in case ping
182 * protocol version is not supported
183 */
184 uint32_t vcmd_busy_wait[VCMD_BUSY_WAIT_SIZE];
185 vtest_hdr[VTEST_CMD_LEN] = VCMD_BUSY_WAIT_SIZE;
186 vtest_hdr[VTEST_CMD_ID] = VCMD_RESOURCE_BUSY_WAIT;
187 vcmd_busy_wait[VCMD_BUSY_WAIT_HANDLE] = 0;
188 vcmd_busy_wait[VCMD_BUSY_WAIT_FLAGS] = 0;
189
190 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
191 vtest_write(vtest, vcmd_busy_wait, sizeof(vcmd_busy_wait));
192
193 uint32_t dummy;
194 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
195 if (vtest_hdr[VTEST_CMD_ID] == VCMD_PING_PROTOCOL_VERSION) {
196 /* consume the dummy busy wait result */
197 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
198 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_RESOURCE_BUSY_WAIT);
199 vtest_read(vtest, &dummy, sizeof(dummy));
200 return true;
201 } else {
202 /* no ping protocol version support */
203 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_RESOURCE_BUSY_WAIT);
204 vtest_read(vtest, &dummy, sizeof(dummy));
205 return false;
206 }
207 }
208
209 static uint32_t
vtest_vcmd_protocol_version(struct vtest * vtest)210 vtest_vcmd_protocol_version(struct vtest *vtest)
211 {
212 uint32_t vtest_hdr[VTEST_HDR_SIZE];
213 uint32_t vcmd_protocol_version[VCMD_PROTOCOL_VERSION_SIZE];
214 vtest_hdr[VTEST_CMD_LEN] = VCMD_PROTOCOL_VERSION_SIZE;
215 vtest_hdr[VTEST_CMD_ID] = VCMD_PROTOCOL_VERSION;
216 vcmd_protocol_version[VCMD_PROTOCOL_VERSION_VERSION] =
217 VTEST_PROTOCOL_VERSION;
218
219 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
220 vtest_write(vtest, vcmd_protocol_version, sizeof(vcmd_protocol_version));
221
222 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
223 assert(vtest_hdr[VTEST_CMD_LEN] == VCMD_PROTOCOL_VERSION_SIZE);
224 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_PROTOCOL_VERSION);
225 vtest_read(vtest, vcmd_protocol_version, sizeof(vcmd_protocol_version));
226
227 return vcmd_protocol_version[VCMD_PROTOCOL_VERSION_VERSION];
228 }
229
230 static uint32_t
vtest_vcmd_get_param(struct vtest * vtest,enum vcmd_param param)231 vtest_vcmd_get_param(struct vtest *vtest, enum vcmd_param param)
232 {
233 uint32_t vtest_hdr[VTEST_HDR_SIZE];
234 uint32_t vcmd_get_param[VCMD_GET_PARAM_SIZE];
235 vtest_hdr[VTEST_CMD_LEN] = VCMD_GET_PARAM_SIZE;
236 vtest_hdr[VTEST_CMD_ID] = VCMD_GET_PARAM;
237 vcmd_get_param[VCMD_GET_PARAM_PARAM] = param;
238
239 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
240 vtest_write(vtest, vcmd_get_param, sizeof(vcmd_get_param));
241
242 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
243 assert(vtest_hdr[VTEST_CMD_LEN] == 2);
244 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_GET_PARAM);
245
246 uint32_t resp[2];
247 vtest_read(vtest, resp, sizeof(resp));
248
249 return resp[0] ? resp[1] : 0;
250 }
251
252 static bool
vtest_vcmd_get_capset(struct vtest * vtest,enum virgl_renderer_capset id,uint32_t version,void * capset,size_t capset_size)253 vtest_vcmd_get_capset(struct vtest *vtest,
254 enum virgl_renderer_capset id,
255 uint32_t version,
256 void *capset,
257 size_t capset_size)
258 {
259 uint32_t vtest_hdr[VTEST_HDR_SIZE];
260 uint32_t vcmd_get_capset[VCMD_GET_CAPSET_SIZE];
261 vtest_hdr[VTEST_CMD_LEN] = VCMD_GET_CAPSET_SIZE;
262 vtest_hdr[VTEST_CMD_ID] = VCMD_GET_CAPSET;
263 vcmd_get_capset[VCMD_GET_CAPSET_ID] = id;
264 vcmd_get_capset[VCMD_GET_CAPSET_VERSION] = version;
265
266 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
267 vtest_write(vtest, vcmd_get_capset, sizeof(vcmd_get_capset));
268
269 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
270 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_GET_CAPSET);
271
272 uint32_t valid;
273 vtest_read(vtest, &valid, sizeof(valid));
274 if (!valid)
275 return false;
276
277 size_t read_size = (vtest_hdr[VTEST_CMD_LEN] - 1) * 4;
278 if (capset_size >= read_size) {
279 vtest_read(vtest, capset, read_size);
280 memset(capset + read_size, 0, capset_size - read_size);
281 } else {
282 vtest_read(vtest, capset, capset_size);
283
284 char temp[256];
285 read_size -= capset_size;
286 while (read_size) {
287 const size_t temp_size = MIN2(read_size, ARRAY_SIZE(temp));
288 vtest_read(vtest, temp, temp_size);
289 read_size -= temp_size;
290 }
291 }
292
293 return true;
294 }
295
296 static void
vtest_vcmd_context_init(struct vtest * vtest,enum virgl_renderer_capset capset_id)297 vtest_vcmd_context_init(struct vtest *vtest,
298 enum virgl_renderer_capset capset_id)
299 {
300 uint32_t vtest_hdr[VTEST_HDR_SIZE];
301 uint32_t vcmd_context_init[VCMD_CONTEXT_INIT_SIZE];
302 vtest_hdr[VTEST_CMD_LEN] = VCMD_CONTEXT_INIT_SIZE;
303 vtest_hdr[VTEST_CMD_ID] = VCMD_CONTEXT_INIT;
304 vcmd_context_init[VCMD_CONTEXT_INIT_CAPSET_ID] = capset_id;
305
306 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
307 vtest_write(vtest, vcmd_context_init, sizeof(vcmd_context_init));
308 }
309
310 static uint32_t
vtest_vcmd_resource_create_blob(struct vtest * vtest,enum vcmd_blob_type type,uint32_t flags,VkDeviceSize size,vn_object_id blob_id,int * res_fd)311 vtest_vcmd_resource_create_blob(struct vtest *vtest,
312 enum vcmd_blob_type type,
313 uint32_t flags,
314 VkDeviceSize size,
315 vn_object_id blob_id,
316 int *res_fd)
317 {
318 uint32_t vtest_hdr[VTEST_HDR_SIZE];
319 uint32_t vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_SIZE];
320
321 vtest_hdr[VTEST_CMD_LEN] = VCMD_RES_CREATE_BLOB_SIZE;
322 vtest_hdr[VTEST_CMD_ID] = VCMD_RESOURCE_CREATE_BLOB;
323
324 vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_TYPE] = type;
325 vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_FLAGS] = flags;
326 vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_SIZE_LO] = (uint32_t)size;
327 vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_SIZE_HI] =
328 (uint32_t)(size >> 32);
329 vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_ID_LO] = (uint32_t)blob_id;
330 vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_ID_HI] =
331 (uint32_t)(blob_id >> 32);
332
333 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
334 vtest_write(vtest, vcmd_res_create_blob, sizeof(vcmd_res_create_blob));
335
336 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
337 assert(vtest_hdr[VTEST_CMD_LEN] == 1);
338 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_RESOURCE_CREATE_BLOB);
339
340 uint32_t res_id;
341 vtest_read(vtest, &res_id, sizeof(res_id));
342
343 *res_fd = vtest_receive_fd(vtest);
344
345 return res_id;
346 }
347
348 static void
vtest_vcmd_resource_unref(struct vtest * vtest,uint32_t res_id)349 vtest_vcmd_resource_unref(struct vtest *vtest, uint32_t res_id)
350 {
351 uint32_t vtest_hdr[VTEST_HDR_SIZE];
352 uint32_t vcmd_res_unref[VCMD_RES_UNREF_SIZE];
353
354 vtest_hdr[VTEST_CMD_LEN] = VCMD_RES_UNREF_SIZE;
355 vtest_hdr[VTEST_CMD_ID] = VCMD_RESOURCE_UNREF;
356 vcmd_res_unref[VCMD_RES_UNREF_RES_HANDLE] = res_id;
357
358 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
359 vtest_write(vtest, vcmd_res_unref, sizeof(vcmd_res_unref));
360 }
361
362 static uint32_t
vtest_vcmd_sync_create(struct vtest * vtest,uint64_t initial_val)363 vtest_vcmd_sync_create(struct vtest *vtest, uint64_t initial_val)
364 {
365 uint32_t vtest_hdr[VTEST_HDR_SIZE];
366 uint32_t vcmd_sync_create[VCMD_SYNC_CREATE_SIZE];
367
368 vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_CREATE_SIZE;
369 vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_CREATE;
370
371 vcmd_sync_create[VCMD_SYNC_CREATE_VALUE_LO] = (uint32_t)initial_val;
372 vcmd_sync_create[VCMD_SYNC_CREATE_VALUE_HI] =
373 (uint32_t)(initial_val >> 32);
374
375 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
376 vtest_write(vtest, vcmd_sync_create, sizeof(vcmd_sync_create));
377
378 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
379 assert(vtest_hdr[VTEST_CMD_LEN] == 1);
380 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_SYNC_CREATE);
381
382 uint32_t sync_id;
383 vtest_read(vtest, &sync_id, sizeof(sync_id));
384
385 return sync_id;
386 }
387
388 static void
vtest_vcmd_sync_unref(struct vtest * vtest,uint32_t sync_id)389 vtest_vcmd_sync_unref(struct vtest *vtest, uint32_t sync_id)
390 {
391 uint32_t vtest_hdr[VTEST_HDR_SIZE];
392 uint32_t vcmd_sync_unref[VCMD_SYNC_UNREF_SIZE];
393
394 vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_UNREF_SIZE;
395 vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_UNREF;
396 vcmd_sync_unref[VCMD_SYNC_UNREF_ID] = sync_id;
397
398 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
399 vtest_write(vtest, vcmd_sync_unref, sizeof(vcmd_sync_unref));
400 }
401
402 static uint64_t
vtest_vcmd_sync_read(struct vtest * vtest,uint32_t sync_id)403 vtest_vcmd_sync_read(struct vtest *vtest, uint32_t sync_id)
404 {
405 uint32_t vtest_hdr[VTEST_HDR_SIZE];
406 uint32_t vcmd_sync_read[VCMD_SYNC_READ_SIZE];
407
408 vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_READ_SIZE;
409 vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_READ;
410
411 vcmd_sync_read[VCMD_SYNC_READ_ID] = sync_id;
412
413 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
414 vtest_write(vtest, vcmd_sync_read, sizeof(vcmd_sync_read));
415
416 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
417 assert(vtest_hdr[VTEST_CMD_LEN] == 2);
418 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_SYNC_READ);
419
420 uint64_t val;
421 vtest_read(vtest, &val, sizeof(val));
422
423 return val;
424 }
425
426 static void
vtest_vcmd_sync_write(struct vtest * vtest,uint32_t sync_id,uint64_t val)427 vtest_vcmd_sync_write(struct vtest *vtest, uint32_t sync_id, uint64_t val)
428 {
429 uint32_t vtest_hdr[VTEST_HDR_SIZE];
430 uint32_t vcmd_sync_write[VCMD_SYNC_WRITE_SIZE];
431
432 vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_WRITE_SIZE;
433 vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_WRITE;
434
435 vcmd_sync_write[VCMD_SYNC_WRITE_ID] = sync_id;
436 vcmd_sync_write[VCMD_SYNC_WRITE_VALUE_LO] = (uint32_t)val;
437 vcmd_sync_write[VCMD_SYNC_WRITE_VALUE_HI] = (uint32_t)(val >> 32);
438
439 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
440 vtest_write(vtest, vcmd_sync_write, sizeof(vcmd_sync_write));
441 }
442
443 static int
vtest_vcmd_sync_wait(struct vtest * vtest,uint32_t flags,int poll_timeout,struct vn_renderer_sync * const * syncs,const uint64_t * vals,uint32_t count)444 vtest_vcmd_sync_wait(struct vtest *vtest,
445 uint32_t flags,
446 int poll_timeout,
447 struct vn_renderer_sync *const *syncs,
448 const uint64_t *vals,
449 uint32_t count)
450 {
451 const uint32_t timeout = poll_timeout >= 0 && poll_timeout <= INT32_MAX
452 ? poll_timeout
453 : UINT32_MAX;
454
455 uint32_t vtest_hdr[VTEST_HDR_SIZE];
456 vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_WAIT_SIZE(count);
457 vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_WAIT;
458
459 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
460 vtest_write(vtest, &flags, sizeof(flags));
461 vtest_write(vtest, &timeout, sizeof(timeout));
462 for (uint32_t i = 0; i < count; i++) {
463 const uint64_t val = vals[i];
464 const uint32_t sync[3] = {
465 syncs[i]->sync_id,
466 (uint32_t)val,
467 (uint32_t)(val >> 32),
468 };
469 vtest_write(vtest, sync, sizeof(sync));
470 }
471
472 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
473 assert(vtest_hdr[VTEST_CMD_LEN] == 0);
474 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_SYNC_WAIT);
475
476 return vtest_receive_fd(vtest);
477 }
478
479 static void
submit_cmd2_sizes(const struct vn_renderer_submit * submit,size_t * header_size,size_t * cs_size,size_t * sync_size)480 submit_cmd2_sizes(const struct vn_renderer_submit *submit,
481 size_t *header_size,
482 size_t *cs_size,
483 size_t *sync_size)
484 {
485 if (!submit->batch_count) {
486 *header_size = 0;
487 *cs_size = 0;
488 *sync_size = 0;
489 return;
490 }
491
492 *header_size = sizeof(uint32_t) +
493 sizeof(struct vcmd_submit_cmd2_batch) * submit->batch_count;
494
495 *cs_size = 0;
496 *sync_size = 0;
497 for (uint32_t i = 0; i < submit->batch_count; i++) {
498 const struct vn_renderer_submit_batch *batch = &submit->batches[i];
499 assert(batch->cs_size % sizeof(uint32_t) == 0);
500 *cs_size += batch->cs_size;
501 *sync_size += (sizeof(uint32_t) + sizeof(uint64_t)) * batch->sync_count;
502 }
503
504 assert(*header_size % sizeof(uint32_t) == 0);
505 assert(*cs_size % sizeof(uint32_t) == 0);
506 assert(*sync_size % sizeof(uint32_t) == 0);
507 }
508
509 static void
vtest_vcmd_submit_cmd2(struct vtest * vtest,const struct vn_renderer_submit * submit)510 vtest_vcmd_submit_cmd2(struct vtest *vtest,
511 const struct vn_renderer_submit *submit)
512 {
513 size_t header_size;
514 size_t cs_size;
515 size_t sync_size;
516 submit_cmd2_sizes(submit, &header_size, &cs_size, &sync_size);
517 const size_t total_size = header_size + cs_size + sync_size;
518 if (!total_size)
519 return;
520
521 uint32_t vtest_hdr[VTEST_HDR_SIZE];
522 vtest_hdr[VTEST_CMD_LEN] = total_size / sizeof(uint32_t);
523 vtest_hdr[VTEST_CMD_ID] = VCMD_SUBMIT_CMD2;
524 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
525
526 /* write batch count and batch headers */
527 const uint32_t batch_count = submit->batch_count;
528 size_t cs_offset = header_size;
529 size_t sync_offset = cs_offset + cs_size;
530 vtest_write(vtest, &batch_count, sizeof(batch_count));
531 for (uint32_t i = 0; i < submit->batch_count; i++) {
532 const struct vn_renderer_submit_batch *batch = &submit->batches[i];
533 struct vcmd_submit_cmd2_batch dst = {
534 .cmd_offset = cs_offset / sizeof(uint32_t),
535 .cmd_size = batch->cs_size / sizeof(uint32_t),
536 .sync_offset = sync_offset / sizeof(uint32_t),
537 .sync_count = batch->sync_count,
538 };
539 if (!batch->sync_queue_cpu) {
540 dst.flags = VCMD_SUBMIT_CMD2_FLAG_SYNC_QUEUE;
541 dst.sync_queue_index = batch->sync_queue_index;
542 dst.sync_queue_id = batch->vk_queue_id;
543 }
544 vtest_write(vtest, &dst, sizeof(dst));
545
546 cs_offset += batch->cs_size;
547 sync_offset +=
548 (sizeof(uint32_t) + sizeof(uint64_t)) * batch->sync_count;
549 }
550
551 /* write cs */
552 if (cs_size) {
553 for (uint32_t i = 0; i < submit->batch_count; i++) {
554 const struct vn_renderer_submit_batch *batch = &submit->batches[i];
555 if (batch->cs_size)
556 vtest_write(vtest, batch->cs_data, batch->cs_size);
557 }
558 }
559
560 /* write syncs */
561 for (uint32_t i = 0; i < submit->batch_count; i++) {
562 const struct vn_renderer_submit_batch *batch = &submit->batches[i];
563
564 for (uint32_t j = 0; j < batch->sync_count; j++) {
565 const uint64_t val = batch->sync_values[j];
566 const uint32_t sync[3] = {
567 batch->syncs[j]->sync_id,
568 (uint32_t)val,
569 (uint32_t)(val >> 32),
570 };
571 vtest_write(vtest, sync, sizeof(sync));
572 }
573 }
574 }
575
576 static VkResult
vtest_sync_write(struct vn_renderer * renderer,struct vn_renderer_sync * _sync,uint64_t val)577 vtest_sync_write(struct vn_renderer *renderer,
578 struct vn_renderer_sync *_sync,
579 uint64_t val)
580 {
581 struct vtest *vtest = (struct vtest *)renderer;
582 struct vtest_sync *sync = (struct vtest_sync *)_sync;
583
584 mtx_lock(&vtest->sock_mutex);
585 vtest_vcmd_sync_write(vtest, sync->base.sync_id, val);
586 mtx_unlock(&vtest->sock_mutex);
587
588 return VK_SUCCESS;
589 }
590
591 static VkResult
vtest_sync_read(struct vn_renderer * renderer,struct vn_renderer_sync * _sync,uint64_t * val)592 vtest_sync_read(struct vn_renderer *renderer,
593 struct vn_renderer_sync *_sync,
594 uint64_t *val)
595 {
596 struct vtest *vtest = (struct vtest *)renderer;
597 struct vtest_sync *sync = (struct vtest_sync *)_sync;
598
599 mtx_lock(&vtest->sock_mutex);
600 *val = vtest_vcmd_sync_read(vtest, sync->base.sync_id);
601 mtx_unlock(&vtest->sock_mutex);
602
603 return VK_SUCCESS;
604 }
605
606 static VkResult
vtest_sync_reset(struct vn_renderer * renderer,struct vn_renderer_sync * sync,uint64_t initial_val)607 vtest_sync_reset(struct vn_renderer *renderer,
608 struct vn_renderer_sync *sync,
609 uint64_t initial_val)
610 {
611 /* same as write */
612 return vtest_sync_write(renderer, sync, initial_val);
613 }
614
615 static void
vtest_sync_destroy(struct vn_renderer * renderer,struct vn_renderer_sync * _sync)616 vtest_sync_destroy(struct vn_renderer *renderer,
617 struct vn_renderer_sync *_sync)
618 {
619 struct vtest *vtest = (struct vtest *)renderer;
620 struct vtest_sync *sync = (struct vtest_sync *)_sync;
621
622 mtx_lock(&vtest->sock_mutex);
623 vtest_vcmd_sync_unref(vtest, sync->base.sync_id);
624 mtx_unlock(&vtest->sock_mutex);
625
626 free(sync);
627 }
628
629 static VkResult
vtest_sync_create(struct vn_renderer * renderer,uint64_t initial_val,uint32_t flags,struct vn_renderer_sync ** out_sync)630 vtest_sync_create(struct vn_renderer *renderer,
631 uint64_t initial_val,
632 uint32_t flags,
633 struct vn_renderer_sync **out_sync)
634 {
635 struct vtest *vtest = (struct vtest *)renderer;
636
637 struct vtest_sync *sync = calloc(1, sizeof(*sync));
638 if (!sync)
639 return VK_ERROR_OUT_OF_HOST_MEMORY;
640
641 mtx_lock(&vtest->sock_mutex);
642 sync->base.sync_id = vtest_vcmd_sync_create(vtest, initial_val);
643 mtx_unlock(&vtest->sock_mutex);
644
645 *out_sync = &sync->base;
646 return VK_SUCCESS;
647 }
648
649 static void
vtest_bo_invalidate(struct vn_renderer * renderer,struct vn_renderer_bo * bo,VkDeviceSize offset,VkDeviceSize size)650 vtest_bo_invalidate(struct vn_renderer *renderer,
651 struct vn_renderer_bo *bo,
652 VkDeviceSize offset,
653 VkDeviceSize size)
654 {
655 /* nop */
656 }
657
658 static void
vtest_bo_flush(struct vn_renderer * renderer,struct vn_renderer_bo * bo,VkDeviceSize offset,VkDeviceSize size)659 vtest_bo_flush(struct vn_renderer *renderer,
660 struct vn_renderer_bo *bo,
661 VkDeviceSize offset,
662 VkDeviceSize size)
663 {
664 /* nop */
665 }
666
667 static void *
vtest_bo_map(struct vn_renderer * renderer,struct vn_renderer_bo * _bo)668 vtest_bo_map(struct vn_renderer *renderer, struct vn_renderer_bo *_bo)
669 {
670 struct vtest *vtest = (struct vtest *)renderer;
671 struct vtest_bo *bo = (struct vtest_bo *)_bo;
672 const bool mappable = bo->blob_flags & VCMD_BLOB_FLAG_MAPPABLE;
673 const bool shareable = bo->blob_flags & VCMD_BLOB_FLAG_SHAREABLE;
674
675 /* not thread-safe but is fine */
676 if (!bo->base.mmap_ptr && mappable) {
677 /* We wrongly assume that mmap(dma_buf) and vkMapMemory(VkDeviceMemory)
678 * are equivalent when the blob type is VCMD_BLOB_TYPE_HOST3D. While we
679 * check for VCMD_PARAM_HOST_COHERENT_DMABUF_BLOB, we know vtest can
680 * lie.
681 */
682 void *ptr = mmap(NULL, bo->base.mmap_size, PROT_READ | PROT_WRITE,
683 MAP_SHARED, bo->res_fd, 0);
684 if (ptr == MAP_FAILED) {
685 vn_log(vtest->instance, "failed to mmap %d of size %zu rw: %s",
686 bo->res_fd, bo->base.mmap_size, strerror(errno));
687 } else {
688 bo->base.mmap_ptr = ptr;
689 /* we don't need the fd anymore */
690 if (!shareable) {
691 close(bo->res_fd);
692 bo->res_fd = -1;
693 }
694 }
695 }
696
697 return bo->base.mmap_ptr;
698 }
699
700 static int
vtest_bo_export_dma_buf(struct vn_renderer * renderer,struct vn_renderer_bo * _bo)701 vtest_bo_export_dma_buf(struct vn_renderer *renderer,
702 struct vn_renderer_bo *_bo)
703 {
704 const struct vtest_bo *bo = (struct vtest_bo *)_bo;
705 const bool shareable = bo->blob_flags & VCMD_BLOB_FLAG_SHAREABLE;
706 return shareable ? os_dupfd_cloexec(bo->res_fd) : -1;
707 }
708
709 static bool
vtest_bo_destroy(struct vn_renderer * renderer,struct vn_renderer_bo * _bo)710 vtest_bo_destroy(struct vn_renderer *renderer, struct vn_renderer_bo *_bo)
711 {
712 struct vtest *vtest = (struct vtest *)renderer;
713 struct vtest_bo *bo = (struct vtest_bo *)_bo;
714
715 if (bo->base.mmap_ptr)
716 munmap(bo->base.mmap_ptr, bo->base.mmap_size);
717 if (bo->res_fd >= 0)
718 close(bo->res_fd);
719
720 mtx_lock(&vtest->sock_mutex);
721 vtest_vcmd_resource_unref(vtest, bo->base.res_id);
722 mtx_unlock(&vtest->sock_mutex);
723
724 return true;
725 }
726
727 static uint32_t
vtest_bo_blob_flags(VkMemoryPropertyFlags flags,VkExternalMemoryHandleTypeFlags external_handles)728 vtest_bo_blob_flags(VkMemoryPropertyFlags flags,
729 VkExternalMemoryHandleTypeFlags external_handles)
730 {
731 uint32_t blob_flags = 0;
732 if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
733 blob_flags |= VCMD_BLOB_FLAG_MAPPABLE;
734 if (external_handles)
735 blob_flags |= VCMD_BLOB_FLAG_SHAREABLE;
736 if (external_handles & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)
737 blob_flags |= VCMD_BLOB_FLAG_CROSS_DEVICE;
738
739 return blob_flags;
740 }
741
742 static VkResult
vtest_bo_create_from_device_memory(struct vn_renderer * renderer,VkDeviceSize size,vn_object_id mem_id,VkMemoryPropertyFlags flags,VkExternalMemoryHandleTypeFlags external_handles,struct vn_renderer_bo ** out_bo)743 vtest_bo_create_from_device_memory(
744 struct vn_renderer *renderer,
745 VkDeviceSize size,
746 vn_object_id mem_id,
747 VkMemoryPropertyFlags flags,
748 VkExternalMemoryHandleTypeFlags external_handles,
749 struct vn_renderer_bo **out_bo)
750 {
751 struct vtest *vtest = (struct vtest *)renderer;
752 const uint32_t blob_flags = vtest_bo_blob_flags(flags, external_handles);
753
754 mtx_lock(&vtest->sock_mutex);
755 int res_fd;
756 uint32_t res_id = vtest_vcmd_resource_create_blob(
757 vtest, VCMD_BLOB_TYPE_HOST3D, blob_flags, size, mem_id, &res_fd);
758 assert(res_id > 0 && res_fd >= 0);
759 mtx_unlock(&vtest->sock_mutex);
760
761 struct vtest_bo *bo = util_sparse_array_get(&vtest->bo_array, res_id);
762 *bo = (struct vtest_bo){
763 .base = {
764 .refcount = VN_REFCOUNT_INIT(1),
765 .res_id = res_id,
766 .mmap_size = size,
767 },
768 .res_fd = res_fd,
769 .blob_flags = blob_flags,
770 };
771
772 *out_bo = &bo->base;
773
774 return VK_SUCCESS;
775 }
776
777 static void
vtest_shmem_destroy(struct vn_renderer * renderer,struct vn_renderer_shmem * _shmem)778 vtest_shmem_destroy(struct vn_renderer *renderer,
779 struct vn_renderer_shmem *_shmem)
780 {
781 struct vtest *vtest = (struct vtest *)renderer;
782 struct vtest_shmem *shmem = (struct vtest_shmem *)_shmem;
783
784 munmap(shmem->base.mmap_ptr, shmem->base.mmap_size);
785
786 mtx_lock(&vtest->sock_mutex);
787 vtest_vcmd_resource_unref(vtest, shmem->base.res_id);
788 mtx_unlock(&vtest->sock_mutex);
789 }
790
791 static struct vn_renderer_shmem *
vtest_shmem_create(struct vn_renderer * renderer,size_t size)792 vtest_shmem_create(struct vn_renderer *renderer, size_t size)
793 {
794 struct vtest *vtest = (struct vtest *)renderer;
795
796 mtx_lock(&vtest->sock_mutex);
797 int res_fd;
798 uint32_t res_id = vtest_vcmd_resource_create_blob(
799 vtest, VCMD_BLOB_TYPE_GUEST, VCMD_BLOB_FLAG_MAPPABLE, size, 0, &res_fd);
800 assert(res_id > 0 && res_fd >= 0);
801 mtx_unlock(&vtest->sock_mutex);
802
803 void *ptr =
804 mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, res_fd, 0);
805 close(res_fd);
806 if (ptr == MAP_FAILED) {
807 mtx_lock(&vtest->sock_mutex);
808 vtest_vcmd_resource_unref(vtest, res_id);
809 mtx_unlock(&vtest->sock_mutex);
810 return NULL;
811 }
812
813 struct vtest_shmem *shmem =
814 util_sparse_array_get(&vtest->shmem_array, res_id);
815 *shmem = (struct vtest_shmem){
816 .base = {
817 .refcount = VN_REFCOUNT_INIT(1),
818 .res_id = res_id,
819 .mmap_size = size,
820 .mmap_ptr = ptr,
821 },
822 };
823
824 return &shmem->base;
825 }
826
827 static VkResult
sync_wait_poll(int fd,int poll_timeout)828 sync_wait_poll(int fd, int poll_timeout)
829 {
830 struct pollfd pollfd = {
831 .fd = fd,
832 .events = POLLIN,
833 };
834 int ret;
835 do {
836 ret = poll(&pollfd, 1, poll_timeout);
837 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
838
839 if (ret < 0 || (ret > 0 && !(pollfd.revents & POLLIN))) {
840 return (ret < 0 && errno == ENOMEM) ? VK_ERROR_OUT_OF_HOST_MEMORY
841 : VK_ERROR_DEVICE_LOST;
842 }
843
844 return ret ? VK_SUCCESS : VK_TIMEOUT;
845 }
846
847 static int
timeout_to_poll_timeout(uint64_t timeout)848 timeout_to_poll_timeout(uint64_t timeout)
849 {
850 const uint64_t ns_per_ms = 1000000;
851 const uint64_t ms = (timeout + ns_per_ms - 1) / ns_per_ms;
852 if (!ms && timeout)
853 return -1;
854 return ms <= INT_MAX ? ms : -1;
855 }
856
857 static VkResult
vtest_wait(struct vn_renderer * renderer,const struct vn_renderer_wait * wait)858 vtest_wait(struct vn_renderer *renderer, const struct vn_renderer_wait *wait)
859 {
860 struct vtest *vtest = (struct vtest *)renderer;
861 const uint32_t flags = wait->wait_any ? VCMD_SYNC_WAIT_FLAG_ANY : 0;
862 const int poll_timeout = timeout_to_poll_timeout(wait->timeout);
863
864 /*
865 * vtest_vcmd_sync_wait (and some other sync commands) is executed after
866 * all prior commands are dispatched. That is far from ideal.
867 *
868 * In virtio-gpu, a drm_syncobj wait ioctl is executed immediately. It
869 * works because it uses virtio-gpu interrupts as a side channel. vtest
870 * needs a side channel to perform well.
871 *
872 * virtio-gpu or vtest, we should also set up a 1-byte coherent memory that
873 * is set to non-zero by GPU after the syncs signal. That would allow us
874 * to do a quick check (or spin a bit) before waiting.
875 */
876 mtx_lock(&vtest->sock_mutex);
877 const int fd =
878 vtest_vcmd_sync_wait(vtest, flags, poll_timeout, wait->syncs,
879 wait->sync_values, wait->sync_count);
880 mtx_unlock(&vtest->sock_mutex);
881
882 VkResult result = sync_wait_poll(fd, poll_timeout);
883 close(fd);
884
885 return result;
886 }
887
888 static VkResult
vtest_submit(struct vn_renderer * renderer,const struct vn_renderer_submit * submit)889 vtest_submit(struct vn_renderer *renderer,
890 const struct vn_renderer_submit *submit)
891 {
892 struct vtest *vtest = (struct vtest *)renderer;
893
894 mtx_lock(&vtest->sock_mutex);
895 vtest_vcmd_submit_cmd2(vtest, submit);
896 mtx_unlock(&vtest->sock_mutex);
897
898 return VK_SUCCESS;
899 }
900
901 static void
vtest_get_info(struct vn_renderer * renderer,struct vn_renderer_info * info)902 vtest_get_info(struct vn_renderer *renderer, struct vn_renderer_info *info)
903 {
904 struct vtest *vtest = (struct vtest *)renderer;
905
906 memset(info, 0, sizeof(*info));
907
908 info->pci.vendor_id = VTEST_PCI_VENDOR_ID;
909 info->pci.device_id = VTEST_PCI_DEVICE_ID;
910
911 info->has_dma_buf_import = false;
912 info->has_cache_management = false;
913 info->has_external_sync = false;
914 info->has_implicit_fencing = false;
915
916 info->max_sync_queue_count = vtest->max_sync_queue_count;
917
918 const struct virgl_renderer_capset_venus *capset = &vtest->capset.data;
919 info->wire_format_version = capset->wire_format_version;
920 info->vk_xml_version = capset->vk_xml_version;
921 info->vk_ext_command_serialization_spec_version =
922 capset->vk_ext_command_serialization_spec_version;
923 info->vk_mesa_venus_protocol_spec_version =
924 capset->vk_mesa_venus_protocol_spec_version;
925 }
926
927 static void
vtest_destroy(struct vn_renderer * renderer,const VkAllocationCallbacks * alloc)928 vtest_destroy(struct vn_renderer *renderer,
929 const VkAllocationCallbacks *alloc)
930 {
931 struct vtest *vtest = (struct vtest *)renderer;
932
933 if (vtest->sock_fd >= 0) {
934 shutdown(vtest->sock_fd, SHUT_RDWR);
935 close(vtest->sock_fd);
936 }
937
938 mtx_destroy(&vtest->sock_mutex);
939 util_sparse_array_finish(&vtest->shmem_array);
940 util_sparse_array_finish(&vtest->bo_array);
941
942 vk_free(alloc, vtest);
943 }
944
945 static VkResult
vtest_init_capset(struct vtest * vtest)946 vtest_init_capset(struct vtest *vtest)
947 {
948 vtest->capset.id = VIRGL_RENDERER_CAPSET_VENUS;
949 vtest->capset.version = 0;
950
951 if (!vtest_vcmd_get_capset(vtest, vtest->capset.id, vtest->capset.version,
952 &vtest->capset.data,
953 sizeof(vtest->capset.data))) {
954 vn_log(vtest->instance, "no venus capset");
955 return VK_ERROR_INITIALIZATION_FAILED;
956 }
957
958 return VK_SUCCESS;
959 }
960
961 static VkResult
vtest_init_params(struct vtest * vtest)962 vtest_init_params(struct vtest *vtest)
963 {
964 uint32_t val =
965 vtest_vcmd_get_param(vtest, VCMD_PARAM_MAX_SYNC_QUEUE_COUNT);
966 if (!val) {
967 vn_log(vtest->instance, "no sync queue support");
968 return VK_ERROR_INITIALIZATION_FAILED;
969 }
970 vtest->max_sync_queue_count = val;
971
972 return VK_SUCCESS;
973 }
974
975 static VkResult
vtest_init_protocol_version(struct vtest * vtest)976 vtest_init_protocol_version(struct vtest *vtest)
977 {
978 const uint32_t min_protocol_version = 3;
979
980 const uint32_t ver = vtest_vcmd_ping_protocol_version(vtest)
981 ? vtest_vcmd_protocol_version(vtest)
982 : 0;
983 if (ver < min_protocol_version) {
984 vn_log(vtest->instance, "vtest protocol version (%d) too old", ver);
985 return VK_ERROR_INITIALIZATION_FAILED;
986 }
987
988 vtest->protocol_version = ver;
989
990 return VK_SUCCESS;
991 }
992
993 static VkResult
vtest_init(struct vtest * vtest)994 vtest_init(struct vtest *vtest)
995 {
996 util_sparse_array_init(&vtest->shmem_array, sizeof(struct vtest_shmem),
997 1024);
998 util_sparse_array_init(&vtest->bo_array, sizeof(struct vtest_bo), 1024);
999
1000 mtx_init(&vtest->sock_mutex, mtx_plain);
1001 vtest->sock_fd =
1002 vtest_connect_socket(vtest->instance, VTEST_DEFAULT_SOCKET_NAME);
1003 if (vtest->sock_fd < 0)
1004 return VK_ERROR_INITIALIZATION_FAILED;
1005
1006 const char *renderer_name = util_get_process_name();
1007 if (!renderer_name)
1008 renderer_name = "venus";
1009 vtest_vcmd_create_renderer(vtest, renderer_name);
1010
1011 VkResult result = vtest_init_protocol_version(vtest);
1012 if (result == VK_SUCCESS)
1013 result = vtest_init_params(vtest);
1014 if (result == VK_SUCCESS)
1015 result = vtest_init_capset(vtest);
1016 if (result != VK_SUCCESS)
1017 return result;
1018
1019 vtest_vcmd_context_init(vtest, vtest->capset.id);
1020
1021 vtest->base.ops.destroy = vtest_destroy;
1022 vtest->base.ops.get_info = vtest_get_info;
1023 vtest->base.ops.submit = vtest_submit;
1024 vtest->base.ops.wait = vtest_wait;
1025
1026 vtest->base.shmem_ops.create = vtest_shmem_create;
1027 vtest->base.shmem_ops.destroy = vtest_shmem_destroy;
1028
1029 vtest->base.bo_ops.create_from_device_memory =
1030 vtest_bo_create_from_device_memory;
1031 vtest->base.bo_ops.create_from_dma_buf = NULL;
1032 vtest->base.bo_ops.destroy = vtest_bo_destroy;
1033 vtest->base.bo_ops.export_dma_buf = vtest_bo_export_dma_buf;
1034 vtest->base.bo_ops.map = vtest_bo_map;
1035 vtest->base.bo_ops.flush = vtest_bo_flush;
1036 vtest->base.bo_ops.invalidate = vtest_bo_invalidate;
1037
1038 vtest->base.sync_ops.create = vtest_sync_create;
1039 vtest->base.sync_ops.create_from_syncobj = NULL;
1040 vtest->base.sync_ops.destroy = vtest_sync_destroy;
1041 vtest->base.sync_ops.export_syncobj = NULL;
1042 vtest->base.sync_ops.reset = vtest_sync_reset;
1043 vtest->base.sync_ops.read = vtest_sync_read;
1044 vtest->base.sync_ops.write = vtest_sync_write;
1045
1046 return VK_SUCCESS;
1047 }
1048
1049 VkResult
vn_renderer_create_vtest(struct vn_instance * instance,const VkAllocationCallbacks * alloc,struct vn_renderer ** renderer)1050 vn_renderer_create_vtest(struct vn_instance *instance,
1051 const VkAllocationCallbacks *alloc,
1052 struct vn_renderer **renderer)
1053 {
1054 struct vtest *vtest = vk_zalloc(alloc, sizeof(*vtest), VN_DEFAULT_ALIGN,
1055 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1056 if (!vtest)
1057 return VK_ERROR_OUT_OF_HOST_MEMORY;
1058
1059 vtest->instance = instance;
1060 vtest->sock_fd = -1;
1061
1062 VkResult result = vtest_init(vtest);
1063 if (result != VK_SUCCESS) {
1064 vtest_destroy(&vtest->base, alloc);
1065 return result;
1066 }
1067
1068 *renderer = &vtest->base;
1069
1070 return VK_SUCCESS;
1071 }
1072