1 /*
2 * Copyright 2019 Google LLC
3 * SPDX-License-Identifier: MIT
4 *
5 * based in part on virgl which is:
6 * Copyright 2014, 2015 Red Hat.
7 */
8
9 #include <errno.h>
10 #include <netinet/in.h>
11 #include <poll.h>
12 #include <sys/mman.h>
13 #include <sys/socket.h>
14 #include <sys/types.h>
15 #include <sys/un.h>
16 #include <unistd.h>
17
18 #include "util/os_file.h"
19 #include "util/os_misc.h"
20 #include "util/sparse_array.h"
21 #include "util/u_process.h"
22 #define VIRGL_RENDERER_UNSTABLE_APIS
23 #include "virtio-gpu/virglrenderer_hw.h"
24 #include "vtest/vtest_protocol.h"
25
26 #include "vn_renderer_internal.h"
27
28 #define VTEST_PCI_VENDOR_ID 0x1af4
29 #define VTEST_PCI_DEVICE_ID 0x1050
30
31 struct vtest;
32
33 struct vtest_shmem {
34 struct vn_renderer_shmem base;
35 };
36
37 struct vtest_bo {
38 struct vn_renderer_bo base;
39
40 uint32_t blob_flags;
41 /* might be closed after mmap */
42 int res_fd;
43 };
44
45 struct vtest_sync {
46 struct vn_renderer_sync base;
47 };
48
49 struct vtest {
50 struct vn_renderer base;
51
52 struct vn_instance *instance;
53
54 mtx_t sock_mutex;
55 int sock_fd;
56
57 uint32_t protocol_version;
58 uint32_t max_timeline_count;
59
60 struct {
61 enum virgl_renderer_capset id;
62 uint32_t version;
63 struct virgl_renderer_capset_venus data;
64 } capset;
65
66 uint32_t shmem_blob_mem;
67
68 struct util_sparse_array shmem_array;
69 struct util_sparse_array bo_array;
70
71 struct vn_renderer_shmem_cache shmem_cache;
72 };
73
74 static int
vtest_connect_socket(struct vn_instance * instance,const char * path)75 vtest_connect_socket(struct vn_instance *instance, const char *path)
76 {
77 struct sockaddr_un un;
78 int sock;
79
80 sock = socket(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0);
81 if (sock < 0) {
82 vn_log(instance, "failed to create a socket");
83 return -1;
84 }
85
86 memset(&un, 0, sizeof(un));
87 un.sun_family = AF_UNIX;
88 memcpy(un.sun_path, path, strlen(path));
89
90 if (connect(sock, (struct sockaddr *)&un, sizeof(un)) == -1) {
91 vn_log(instance, "failed to connect to %s: %s", path, strerror(errno));
92 close(sock);
93 return -1;
94 }
95
96 return sock;
97 }
98
99 static void
vtest_read(struct vtest * vtest,void * buf,size_t size)100 vtest_read(struct vtest *vtest, void *buf, size_t size)
101 {
102 do {
103 const ssize_t ret = read(vtest->sock_fd, buf, size);
104 if (unlikely(ret < 0)) {
105 vn_log(vtest->instance,
106 "lost connection to rendering server on %zu read %zi %d",
107 size, ret, errno);
108 abort();
109 }
110
111 buf += ret;
112 size -= ret;
113 } while (size);
114 }
115
116 static int
vtest_receive_fd(struct vtest * vtest)117 vtest_receive_fd(struct vtest *vtest)
118 {
119 char cmsg_buf[CMSG_SPACE(sizeof(int))];
120 char dummy;
121 struct msghdr msg = {
122 .msg_iov =
123 &(struct iovec){
124 .iov_base = &dummy,
125 .iov_len = sizeof(dummy),
126 },
127 .msg_iovlen = 1,
128 .msg_control = cmsg_buf,
129 .msg_controllen = sizeof(cmsg_buf),
130 };
131
132 if (recvmsg(vtest->sock_fd, &msg, 0) < 0) {
133 vn_log(vtest->instance, "recvmsg failed: %s", strerror(errno));
134 abort();
135 }
136
137 struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
138 if (!cmsg || cmsg->cmsg_level != SOL_SOCKET ||
139 cmsg->cmsg_type != SCM_RIGHTS) {
140 vn_log(vtest->instance, "invalid cmsghdr");
141 abort();
142 }
143
144 return *((int *)CMSG_DATA(cmsg));
145 }
146
147 static void
vtest_write(struct vtest * vtest,const void * buf,size_t size)148 vtest_write(struct vtest *vtest, const void *buf, size_t size)
149 {
150 do {
151 const ssize_t ret = write(vtest->sock_fd, buf, size);
152 if (unlikely(ret < 0)) {
153 vn_log(vtest->instance,
154 "lost connection to rendering server on %zu write %zi %d",
155 size, ret, errno);
156 abort();
157 }
158
159 buf += ret;
160 size -= ret;
161 } while (size);
162 }
163
164 static void
vtest_vcmd_create_renderer(struct vtest * vtest,const char * name)165 vtest_vcmd_create_renderer(struct vtest *vtest, const char *name)
166 {
167 const size_t size = strlen(name) + 1;
168
169 uint32_t vtest_hdr[VTEST_HDR_SIZE];
170 vtest_hdr[VTEST_CMD_LEN] = size;
171 vtest_hdr[VTEST_CMD_ID] = VCMD_CREATE_RENDERER;
172
173 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
174 vtest_write(vtest, name, size);
175 }
176
177 static bool
vtest_vcmd_ping_protocol_version(struct vtest * vtest)178 vtest_vcmd_ping_protocol_version(struct vtest *vtest)
179 {
180 uint32_t vtest_hdr[VTEST_HDR_SIZE];
181 vtest_hdr[VTEST_CMD_LEN] = VCMD_PING_PROTOCOL_VERSION_SIZE;
182 vtest_hdr[VTEST_CMD_ID] = VCMD_PING_PROTOCOL_VERSION;
183
184 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
185
186 /* send a dummy busy wait to avoid blocking in vtest_read in case ping
187 * protocol version is not supported
188 */
189 uint32_t vcmd_busy_wait[VCMD_BUSY_WAIT_SIZE];
190 vtest_hdr[VTEST_CMD_LEN] = VCMD_BUSY_WAIT_SIZE;
191 vtest_hdr[VTEST_CMD_ID] = VCMD_RESOURCE_BUSY_WAIT;
192 vcmd_busy_wait[VCMD_BUSY_WAIT_HANDLE] = 0;
193 vcmd_busy_wait[VCMD_BUSY_WAIT_FLAGS] = 0;
194
195 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
196 vtest_write(vtest, vcmd_busy_wait, sizeof(vcmd_busy_wait));
197
198 uint32_t dummy;
199 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
200 if (vtest_hdr[VTEST_CMD_ID] == VCMD_PING_PROTOCOL_VERSION) {
201 /* consume the dummy busy wait result */
202 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
203 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_RESOURCE_BUSY_WAIT);
204 vtest_read(vtest, &dummy, sizeof(dummy));
205 return true;
206 } else {
207 /* no ping protocol version support */
208 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_RESOURCE_BUSY_WAIT);
209 vtest_read(vtest, &dummy, sizeof(dummy));
210 return false;
211 }
212 }
213
214 static uint32_t
vtest_vcmd_protocol_version(struct vtest * vtest)215 vtest_vcmd_protocol_version(struct vtest *vtest)
216 {
217 uint32_t vtest_hdr[VTEST_HDR_SIZE];
218 uint32_t vcmd_protocol_version[VCMD_PROTOCOL_VERSION_SIZE];
219 vtest_hdr[VTEST_CMD_LEN] = VCMD_PROTOCOL_VERSION_SIZE;
220 vtest_hdr[VTEST_CMD_ID] = VCMD_PROTOCOL_VERSION;
221 vcmd_protocol_version[VCMD_PROTOCOL_VERSION_VERSION] =
222 VTEST_PROTOCOL_VERSION;
223
224 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
225 vtest_write(vtest, vcmd_protocol_version, sizeof(vcmd_protocol_version));
226
227 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
228 assert(vtest_hdr[VTEST_CMD_LEN] == VCMD_PROTOCOL_VERSION_SIZE);
229 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_PROTOCOL_VERSION);
230 vtest_read(vtest, vcmd_protocol_version, sizeof(vcmd_protocol_version));
231
232 return vcmd_protocol_version[VCMD_PROTOCOL_VERSION_VERSION];
233 }
234
235 static uint32_t
vtest_vcmd_get_param(struct vtest * vtest,enum vcmd_param param)236 vtest_vcmd_get_param(struct vtest *vtest, enum vcmd_param param)
237 {
238 uint32_t vtest_hdr[VTEST_HDR_SIZE];
239 uint32_t vcmd_get_param[VCMD_GET_PARAM_SIZE];
240 vtest_hdr[VTEST_CMD_LEN] = VCMD_GET_PARAM_SIZE;
241 vtest_hdr[VTEST_CMD_ID] = VCMD_GET_PARAM;
242 vcmd_get_param[VCMD_GET_PARAM_PARAM] = param;
243
244 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
245 vtest_write(vtest, vcmd_get_param, sizeof(vcmd_get_param));
246
247 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
248 assert(vtest_hdr[VTEST_CMD_LEN] == 2);
249 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_GET_PARAM);
250
251 uint32_t resp[2];
252 vtest_read(vtest, resp, sizeof(resp));
253
254 return resp[0] ? resp[1] : 0;
255 }
256
257 static bool
vtest_vcmd_get_capset(struct vtest * vtest,enum virgl_renderer_capset id,uint32_t version,void * capset,size_t capset_size)258 vtest_vcmd_get_capset(struct vtest *vtest,
259 enum virgl_renderer_capset id,
260 uint32_t version,
261 void *capset,
262 size_t capset_size)
263 {
264 uint32_t vtest_hdr[VTEST_HDR_SIZE];
265 uint32_t vcmd_get_capset[VCMD_GET_CAPSET_SIZE];
266 vtest_hdr[VTEST_CMD_LEN] = VCMD_GET_CAPSET_SIZE;
267 vtest_hdr[VTEST_CMD_ID] = VCMD_GET_CAPSET;
268 vcmd_get_capset[VCMD_GET_CAPSET_ID] = id;
269 vcmd_get_capset[VCMD_GET_CAPSET_VERSION] = version;
270
271 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
272 vtest_write(vtest, vcmd_get_capset, sizeof(vcmd_get_capset));
273
274 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
275 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_GET_CAPSET);
276
277 uint32_t valid;
278 vtest_read(vtest, &valid, sizeof(valid));
279 if (!valid)
280 return false;
281
282 size_t read_size = (vtest_hdr[VTEST_CMD_LEN] - 1) * 4;
283 if (capset_size >= read_size) {
284 vtest_read(vtest, capset, read_size);
285 memset(capset + read_size, 0, capset_size - read_size);
286 } else {
287 vtest_read(vtest, capset, capset_size);
288
289 char temp[256];
290 read_size -= capset_size;
291 while (read_size) {
292 const size_t temp_size = MIN2(read_size, ARRAY_SIZE(temp));
293 vtest_read(vtest, temp, temp_size);
294 read_size -= temp_size;
295 }
296 }
297
298 return true;
299 }
300
301 static void
vtest_vcmd_context_init(struct vtest * vtest,enum virgl_renderer_capset capset_id)302 vtest_vcmd_context_init(struct vtest *vtest,
303 enum virgl_renderer_capset capset_id)
304 {
305 uint32_t vtest_hdr[VTEST_HDR_SIZE];
306 uint32_t vcmd_context_init[VCMD_CONTEXT_INIT_SIZE];
307 vtest_hdr[VTEST_CMD_LEN] = VCMD_CONTEXT_INIT_SIZE;
308 vtest_hdr[VTEST_CMD_ID] = VCMD_CONTEXT_INIT;
309 vcmd_context_init[VCMD_CONTEXT_INIT_CAPSET_ID] = capset_id;
310
311 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
312 vtest_write(vtest, vcmd_context_init, sizeof(vcmd_context_init));
313 }
314
315 static uint32_t
vtest_vcmd_resource_create_blob(struct vtest * vtest,enum vcmd_blob_type type,uint32_t flags,VkDeviceSize size,vn_object_id blob_id,int * res_fd)316 vtest_vcmd_resource_create_blob(struct vtest *vtest,
317 enum vcmd_blob_type type,
318 uint32_t flags,
319 VkDeviceSize size,
320 vn_object_id blob_id,
321 int *res_fd)
322 {
323 uint32_t vtest_hdr[VTEST_HDR_SIZE];
324 uint32_t vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_SIZE];
325
326 vtest_hdr[VTEST_CMD_LEN] = VCMD_RES_CREATE_BLOB_SIZE;
327 vtest_hdr[VTEST_CMD_ID] = VCMD_RESOURCE_CREATE_BLOB;
328
329 vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_TYPE] = type;
330 vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_FLAGS] = flags;
331 vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_SIZE_LO] = (uint32_t)size;
332 vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_SIZE_HI] =
333 (uint32_t)(size >> 32);
334 vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_ID_LO] = (uint32_t)blob_id;
335 vcmd_res_create_blob[VCMD_RES_CREATE_BLOB_ID_HI] =
336 (uint32_t)(blob_id >> 32);
337
338 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
339 vtest_write(vtest, vcmd_res_create_blob, sizeof(vcmd_res_create_blob));
340
341 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
342 assert(vtest_hdr[VTEST_CMD_LEN] == 1);
343 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_RESOURCE_CREATE_BLOB);
344
345 uint32_t res_id;
346 vtest_read(vtest, &res_id, sizeof(res_id));
347
348 *res_fd = vtest_receive_fd(vtest);
349
350 return res_id;
351 }
352
353 static void
vtest_vcmd_resource_unref(struct vtest * vtest,uint32_t res_id)354 vtest_vcmd_resource_unref(struct vtest *vtest, uint32_t res_id)
355 {
356 uint32_t vtest_hdr[VTEST_HDR_SIZE];
357 uint32_t vcmd_res_unref[VCMD_RES_UNREF_SIZE];
358
359 vtest_hdr[VTEST_CMD_LEN] = VCMD_RES_UNREF_SIZE;
360 vtest_hdr[VTEST_CMD_ID] = VCMD_RESOURCE_UNREF;
361 vcmd_res_unref[VCMD_RES_UNREF_RES_HANDLE] = res_id;
362
363 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
364 vtest_write(vtest, vcmd_res_unref, sizeof(vcmd_res_unref));
365 }
366
367 static uint32_t
vtest_vcmd_sync_create(struct vtest * vtest,uint64_t initial_val)368 vtest_vcmd_sync_create(struct vtest *vtest, uint64_t initial_val)
369 {
370 uint32_t vtest_hdr[VTEST_HDR_SIZE];
371 uint32_t vcmd_sync_create[VCMD_SYNC_CREATE_SIZE];
372
373 vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_CREATE_SIZE;
374 vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_CREATE;
375
376 vcmd_sync_create[VCMD_SYNC_CREATE_VALUE_LO] = (uint32_t)initial_val;
377 vcmd_sync_create[VCMD_SYNC_CREATE_VALUE_HI] =
378 (uint32_t)(initial_val >> 32);
379
380 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
381 vtest_write(vtest, vcmd_sync_create, sizeof(vcmd_sync_create));
382
383 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
384 assert(vtest_hdr[VTEST_CMD_LEN] == 1);
385 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_SYNC_CREATE);
386
387 uint32_t sync_id;
388 vtest_read(vtest, &sync_id, sizeof(sync_id));
389
390 return sync_id;
391 }
392
393 static void
vtest_vcmd_sync_unref(struct vtest * vtest,uint32_t sync_id)394 vtest_vcmd_sync_unref(struct vtest *vtest, uint32_t sync_id)
395 {
396 uint32_t vtest_hdr[VTEST_HDR_SIZE];
397 uint32_t vcmd_sync_unref[VCMD_SYNC_UNREF_SIZE];
398
399 vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_UNREF_SIZE;
400 vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_UNREF;
401 vcmd_sync_unref[VCMD_SYNC_UNREF_ID] = sync_id;
402
403 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
404 vtest_write(vtest, vcmd_sync_unref, sizeof(vcmd_sync_unref));
405 }
406
407 static uint64_t
vtest_vcmd_sync_read(struct vtest * vtest,uint32_t sync_id)408 vtest_vcmd_sync_read(struct vtest *vtest, uint32_t sync_id)
409 {
410 uint32_t vtest_hdr[VTEST_HDR_SIZE];
411 uint32_t vcmd_sync_read[VCMD_SYNC_READ_SIZE];
412
413 vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_READ_SIZE;
414 vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_READ;
415
416 vcmd_sync_read[VCMD_SYNC_READ_ID] = sync_id;
417
418 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
419 vtest_write(vtest, vcmd_sync_read, sizeof(vcmd_sync_read));
420
421 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
422 assert(vtest_hdr[VTEST_CMD_LEN] == 2);
423 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_SYNC_READ);
424
425 uint64_t val;
426 vtest_read(vtest, &val, sizeof(val));
427
428 return val;
429 }
430
431 static void
vtest_vcmd_sync_write(struct vtest * vtest,uint32_t sync_id,uint64_t val)432 vtest_vcmd_sync_write(struct vtest *vtest, uint32_t sync_id, uint64_t val)
433 {
434 uint32_t vtest_hdr[VTEST_HDR_SIZE];
435 uint32_t vcmd_sync_write[VCMD_SYNC_WRITE_SIZE];
436
437 vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_WRITE_SIZE;
438 vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_WRITE;
439
440 vcmd_sync_write[VCMD_SYNC_WRITE_ID] = sync_id;
441 vcmd_sync_write[VCMD_SYNC_WRITE_VALUE_LO] = (uint32_t)val;
442 vcmd_sync_write[VCMD_SYNC_WRITE_VALUE_HI] = (uint32_t)(val >> 32);
443
444 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
445 vtest_write(vtest, vcmd_sync_write, sizeof(vcmd_sync_write));
446 }
447
448 static int
vtest_vcmd_sync_wait(struct vtest * vtest,uint32_t flags,int poll_timeout,struct vn_renderer_sync * const * syncs,const uint64_t * vals,uint32_t count)449 vtest_vcmd_sync_wait(struct vtest *vtest,
450 uint32_t flags,
451 int poll_timeout,
452 struct vn_renderer_sync *const *syncs,
453 const uint64_t *vals,
454 uint32_t count)
455 {
456 const uint32_t timeout = poll_timeout >= 0 && poll_timeout <= INT32_MAX
457 ? poll_timeout
458 : UINT32_MAX;
459
460 uint32_t vtest_hdr[VTEST_HDR_SIZE];
461 vtest_hdr[VTEST_CMD_LEN] = VCMD_SYNC_WAIT_SIZE(count);
462 vtest_hdr[VTEST_CMD_ID] = VCMD_SYNC_WAIT;
463
464 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
465 vtest_write(vtest, &flags, sizeof(flags));
466 vtest_write(vtest, &timeout, sizeof(timeout));
467 for (uint32_t i = 0; i < count; i++) {
468 const uint64_t val = vals[i];
469 const uint32_t sync[3] = {
470 syncs[i]->sync_id,
471 (uint32_t)val,
472 (uint32_t)(val >> 32),
473 };
474 vtest_write(vtest, sync, sizeof(sync));
475 }
476
477 vtest_read(vtest, vtest_hdr, sizeof(vtest_hdr));
478 assert(vtest_hdr[VTEST_CMD_LEN] == 0);
479 assert(vtest_hdr[VTEST_CMD_ID] == VCMD_SYNC_WAIT);
480
481 return vtest_receive_fd(vtest);
482 }
483
484 static void
submit_cmd2_sizes(const struct vn_renderer_submit * submit,size_t * header_size,size_t * cs_size,size_t * sync_size)485 submit_cmd2_sizes(const struct vn_renderer_submit *submit,
486 size_t *header_size,
487 size_t *cs_size,
488 size_t *sync_size)
489 {
490 if (!submit->batch_count) {
491 *header_size = 0;
492 *cs_size = 0;
493 *sync_size = 0;
494 return;
495 }
496
497 *header_size = sizeof(uint32_t) +
498 sizeof(struct vcmd_submit_cmd2_batch) * submit->batch_count;
499
500 *cs_size = 0;
501 *sync_size = 0;
502 for (uint32_t i = 0; i < submit->batch_count; i++) {
503 const struct vn_renderer_submit_batch *batch = &submit->batches[i];
504 assert(batch->cs_size % sizeof(uint32_t) == 0);
505 *cs_size += batch->cs_size;
506 *sync_size += (sizeof(uint32_t) + sizeof(uint64_t)) * batch->sync_count;
507 }
508
509 assert(*header_size % sizeof(uint32_t) == 0);
510 assert(*cs_size % sizeof(uint32_t) == 0);
511 assert(*sync_size % sizeof(uint32_t) == 0);
512 }
513
514 static void
vtest_vcmd_submit_cmd2(struct vtest * vtest,const struct vn_renderer_submit * submit)515 vtest_vcmd_submit_cmd2(struct vtest *vtest,
516 const struct vn_renderer_submit *submit)
517 {
518 size_t header_size;
519 size_t cs_size;
520 size_t sync_size;
521 submit_cmd2_sizes(submit, &header_size, &cs_size, &sync_size);
522 const size_t total_size = header_size + cs_size + sync_size;
523 if (!total_size)
524 return;
525
526 uint32_t vtest_hdr[VTEST_HDR_SIZE];
527 vtest_hdr[VTEST_CMD_LEN] = total_size / sizeof(uint32_t);
528 vtest_hdr[VTEST_CMD_ID] = VCMD_SUBMIT_CMD2;
529 vtest_write(vtest, vtest_hdr, sizeof(vtest_hdr));
530
531 /* write batch count and batch headers */
532 const uint32_t batch_count = submit->batch_count;
533 size_t cs_offset = header_size;
534 size_t sync_offset = cs_offset + cs_size;
535 vtest_write(vtest, &batch_count, sizeof(batch_count));
536 for (uint32_t i = 0; i < submit->batch_count; i++) {
537 const struct vn_renderer_submit_batch *batch = &submit->batches[i];
538 struct vcmd_submit_cmd2_batch dst = {
539 .cmd_offset = cs_offset / sizeof(uint32_t),
540 .cmd_size = batch->cs_size / sizeof(uint32_t),
541 .sync_offset = sync_offset / sizeof(uint32_t),
542 .sync_count = batch->sync_count,
543 };
544 if (vtest->base.info.supports_multiple_timelines) {
545 dst.flags = VCMD_SUBMIT_CMD2_FLAG_RING_IDX;
546 dst.ring_idx = batch->ring_idx;
547 }
548 vtest_write(vtest, &dst, sizeof(dst));
549
550 cs_offset += batch->cs_size;
551 sync_offset +=
552 (sizeof(uint32_t) + sizeof(uint64_t)) * batch->sync_count;
553 }
554
555 /* write cs */
556 if (cs_size) {
557 for (uint32_t i = 0; i < submit->batch_count; i++) {
558 const struct vn_renderer_submit_batch *batch = &submit->batches[i];
559 if (batch->cs_size)
560 vtest_write(vtest, batch->cs_data, batch->cs_size);
561 }
562 }
563
564 /* write syncs */
565 for (uint32_t i = 0; i < submit->batch_count; i++) {
566 const struct vn_renderer_submit_batch *batch = &submit->batches[i];
567
568 for (uint32_t j = 0; j < batch->sync_count; j++) {
569 const uint64_t val = batch->sync_values[j];
570 const uint32_t sync[3] = {
571 batch->syncs[j]->sync_id,
572 (uint32_t)val,
573 (uint32_t)(val >> 32),
574 };
575 vtest_write(vtest, sync, sizeof(sync));
576 }
577 }
578 }
579
580 static VkResult
vtest_sync_write(struct vn_renderer * renderer,struct vn_renderer_sync * _sync,uint64_t val)581 vtest_sync_write(struct vn_renderer *renderer,
582 struct vn_renderer_sync *_sync,
583 uint64_t val)
584 {
585 struct vtest *vtest = (struct vtest *)renderer;
586 struct vtest_sync *sync = (struct vtest_sync *)_sync;
587
588 mtx_lock(&vtest->sock_mutex);
589 vtest_vcmd_sync_write(vtest, sync->base.sync_id, val);
590 mtx_unlock(&vtest->sock_mutex);
591
592 return VK_SUCCESS;
593 }
594
595 static VkResult
vtest_sync_read(struct vn_renderer * renderer,struct vn_renderer_sync * _sync,uint64_t * val)596 vtest_sync_read(struct vn_renderer *renderer,
597 struct vn_renderer_sync *_sync,
598 uint64_t *val)
599 {
600 struct vtest *vtest = (struct vtest *)renderer;
601 struct vtest_sync *sync = (struct vtest_sync *)_sync;
602
603 mtx_lock(&vtest->sock_mutex);
604 *val = vtest_vcmd_sync_read(vtest, sync->base.sync_id);
605 mtx_unlock(&vtest->sock_mutex);
606
607 return VK_SUCCESS;
608 }
609
610 static VkResult
vtest_sync_reset(struct vn_renderer * renderer,struct vn_renderer_sync * sync,uint64_t initial_val)611 vtest_sync_reset(struct vn_renderer *renderer,
612 struct vn_renderer_sync *sync,
613 uint64_t initial_val)
614 {
615 /* same as write */
616 return vtest_sync_write(renderer, sync, initial_val);
617 }
618
619 static void
vtest_sync_destroy(struct vn_renderer * renderer,struct vn_renderer_sync * _sync)620 vtest_sync_destroy(struct vn_renderer *renderer,
621 struct vn_renderer_sync *_sync)
622 {
623 struct vtest *vtest = (struct vtest *)renderer;
624 struct vtest_sync *sync = (struct vtest_sync *)_sync;
625
626 mtx_lock(&vtest->sock_mutex);
627 vtest_vcmd_sync_unref(vtest, sync->base.sync_id);
628 mtx_unlock(&vtest->sock_mutex);
629
630 free(sync);
631 }
632
633 static VkResult
vtest_sync_create(struct vn_renderer * renderer,uint64_t initial_val,uint32_t flags,struct vn_renderer_sync ** out_sync)634 vtest_sync_create(struct vn_renderer *renderer,
635 uint64_t initial_val,
636 uint32_t flags,
637 struct vn_renderer_sync **out_sync)
638 {
639 struct vtest *vtest = (struct vtest *)renderer;
640
641 struct vtest_sync *sync = calloc(1, sizeof(*sync));
642 if (!sync)
643 return VK_ERROR_OUT_OF_HOST_MEMORY;
644
645 mtx_lock(&vtest->sock_mutex);
646 sync->base.sync_id = vtest_vcmd_sync_create(vtest, initial_val);
647 mtx_unlock(&vtest->sock_mutex);
648
649 *out_sync = &sync->base;
650 return VK_SUCCESS;
651 }
652
653 static void
vtest_bo_invalidate(struct vn_renderer * renderer,struct vn_renderer_bo * bo,VkDeviceSize offset,VkDeviceSize size)654 vtest_bo_invalidate(struct vn_renderer *renderer,
655 struct vn_renderer_bo *bo,
656 VkDeviceSize offset,
657 VkDeviceSize size)
658 {
659 /* nop */
660 }
661
662 static void
vtest_bo_flush(struct vn_renderer * renderer,struct vn_renderer_bo * bo,VkDeviceSize offset,VkDeviceSize size)663 vtest_bo_flush(struct vn_renderer *renderer,
664 struct vn_renderer_bo *bo,
665 VkDeviceSize offset,
666 VkDeviceSize size)
667 {
668 /* nop */
669 }
670
671 static void *
vtest_bo_map(struct vn_renderer * renderer,struct vn_renderer_bo * _bo)672 vtest_bo_map(struct vn_renderer *renderer, struct vn_renderer_bo *_bo)
673 {
674 struct vtest *vtest = (struct vtest *)renderer;
675 struct vtest_bo *bo = (struct vtest_bo *)_bo;
676 const bool mappable = bo->blob_flags & VCMD_BLOB_FLAG_MAPPABLE;
677 const bool shareable = bo->blob_flags & VCMD_BLOB_FLAG_SHAREABLE;
678
679 /* not thread-safe but is fine */
680 if (!bo->base.mmap_ptr && mappable) {
681 /* We wrongly assume that mmap(dma_buf) and vkMapMemory(VkDeviceMemory)
682 * are equivalent when the blob type is VCMD_BLOB_TYPE_HOST3D. While we
683 * check for VCMD_PARAM_HOST_COHERENT_DMABUF_BLOB, we know vtest can
684 * lie.
685 */
686 void *ptr = mmap(NULL, bo->base.mmap_size, PROT_READ | PROT_WRITE,
687 MAP_SHARED, bo->res_fd, 0);
688 if (ptr == MAP_FAILED) {
689 vn_log(vtest->instance, "failed to mmap %d of size %zu rw: %s",
690 bo->res_fd, bo->base.mmap_size, strerror(errno));
691 } else {
692 bo->base.mmap_ptr = ptr;
693 /* we don't need the fd anymore */
694 if (!shareable) {
695 close(bo->res_fd);
696 bo->res_fd = -1;
697 }
698 }
699 }
700
701 return bo->base.mmap_ptr;
702 }
703
704 static int
vtest_bo_export_dma_buf(struct vn_renderer * renderer,struct vn_renderer_bo * _bo)705 vtest_bo_export_dma_buf(struct vn_renderer *renderer,
706 struct vn_renderer_bo *_bo)
707 {
708 const struct vtest_bo *bo = (struct vtest_bo *)_bo;
709 const bool shareable = bo->blob_flags & VCMD_BLOB_FLAG_SHAREABLE;
710 return shareable ? os_dupfd_cloexec(bo->res_fd) : -1;
711 }
712
713 static bool
vtest_bo_destroy(struct vn_renderer * renderer,struct vn_renderer_bo * _bo)714 vtest_bo_destroy(struct vn_renderer *renderer, struct vn_renderer_bo *_bo)
715 {
716 struct vtest *vtest = (struct vtest *)renderer;
717 struct vtest_bo *bo = (struct vtest_bo *)_bo;
718
719 if (bo->base.mmap_ptr)
720 munmap(bo->base.mmap_ptr, bo->base.mmap_size);
721 if (bo->res_fd >= 0)
722 close(bo->res_fd);
723
724 mtx_lock(&vtest->sock_mutex);
725 vtest_vcmd_resource_unref(vtest, bo->base.res_id);
726 mtx_unlock(&vtest->sock_mutex);
727
728 return true;
729 }
730
731 static uint32_t
vtest_bo_blob_flags(VkMemoryPropertyFlags flags,VkExternalMemoryHandleTypeFlags external_handles)732 vtest_bo_blob_flags(VkMemoryPropertyFlags flags,
733 VkExternalMemoryHandleTypeFlags external_handles)
734 {
735 uint32_t blob_flags = 0;
736 if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
737 blob_flags |= VCMD_BLOB_FLAG_MAPPABLE;
738 if (external_handles)
739 blob_flags |= VCMD_BLOB_FLAG_SHAREABLE;
740 if (external_handles & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)
741 blob_flags |= VCMD_BLOB_FLAG_CROSS_DEVICE;
742
743 return blob_flags;
744 }
745
746 static VkResult
vtest_bo_create_from_device_memory(struct vn_renderer * renderer,VkDeviceSize size,vn_object_id mem_id,VkMemoryPropertyFlags flags,VkExternalMemoryHandleTypeFlags external_handles,struct vn_renderer_bo ** out_bo)747 vtest_bo_create_from_device_memory(
748 struct vn_renderer *renderer,
749 VkDeviceSize size,
750 vn_object_id mem_id,
751 VkMemoryPropertyFlags flags,
752 VkExternalMemoryHandleTypeFlags external_handles,
753 struct vn_renderer_bo **out_bo)
754 {
755 struct vtest *vtest = (struct vtest *)renderer;
756 const uint32_t blob_flags = vtest_bo_blob_flags(flags, external_handles);
757
758 mtx_lock(&vtest->sock_mutex);
759 int res_fd;
760 uint32_t res_id = vtest_vcmd_resource_create_blob(
761 vtest, VCMD_BLOB_TYPE_HOST3D, blob_flags, size, mem_id, &res_fd);
762 assert(res_id > 0 && res_fd >= 0);
763 mtx_unlock(&vtest->sock_mutex);
764
765 struct vtest_bo *bo = util_sparse_array_get(&vtest->bo_array, res_id);
766 *bo = (struct vtest_bo){
767 .base = {
768 .refcount = VN_REFCOUNT_INIT(1),
769 .res_id = res_id,
770 .mmap_size = size,
771 },
772 .res_fd = res_fd,
773 .blob_flags = blob_flags,
774 };
775
776 *out_bo = &bo->base;
777
778 return VK_SUCCESS;
779 }
780
781 static void
vtest_shmem_destroy_now(struct vn_renderer * renderer,struct vn_renderer_shmem * _shmem)782 vtest_shmem_destroy_now(struct vn_renderer *renderer,
783 struct vn_renderer_shmem *_shmem)
784 {
785 struct vtest *vtest = (struct vtest *)renderer;
786 struct vtest_shmem *shmem = (struct vtest_shmem *)_shmem;
787
788 munmap(shmem->base.mmap_ptr, shmem->base.mmap_size);
789
790 mtx_lock(&vtest->sock_mutex);
791 vtest_vcmd_resource_unref(vtest, shmem->base.res_id);
792 mtx_unlock(&vtest->sock_mutex);
793 }
794
795 static void
vtest_shmem_destroy(struct vn_renderer * renderer,struct vn_renderer_shmem * shmem)796 vtest_shmem_destroy(struct vn_renderer *renderer,
797 struct vn_renderer_shmem *shmem)
798 {
799 struct vtest *vtest = (struct vtest *)renderer;
800
801 if (vn_renderer_shmem_cache_add(&vtest->shmem_cache, shmem))
802 return;
803
804 vtest_shmem_destroy_now(&vtest->base, shmem);
805 }
806
807 static struct vn_renderer_shmem *
vtest_shmem_create(struct vn_renderer * renderer,size_t size)808 vtest_shmem_create(struct vn_renderer *renderer, size_t size)
809 {
810 struct vtest *vtest = (struct vtest *)renderer;
811
812 struct vn_renderer_shmem *cached_shmem =
813 vn_renderer_shmem_cache_get(&vtest->shmem_cache, size);
814 if (cached_shmem) {
815 cached_shmem->refcount = VN_REFCOUNT_INIT(1);
816 return cached_shmem;
817 }
818
819 mtx_lock(&vtest->sock_mutex);
820 int res_fd;
821 uint32_t res_id = vtest_vcmd_resource_create_blob(
822 vtest, vtest->shmem_blob_mem, VCMD_BLOB_FLAG_MAPPABLE, size, 0,
823 &res_fd);
824 assert(res_id > 0 && res_fd >= 0);
825 mtx_unlock(&vtest->sock_mutex);
826
827 void *ptr =
828 mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, res_fd, 0);
829 close(res_fd);
830 if (ptr == MAP_FAILED) {
831 mtx_lock(&vtest->sock_mutex);
832 vtest_vcmd_resource_unref(vtest, res_id);
833 mtx_unlock(&vtest->sock_mutex);
834 return NULL;
835 }
836
837 struct vtest_shmem *shmem =
838 util_sparse_array_get(&vtest->shmem_array, res_id);
839 *shmem = (struct vtest_shmem){
840 .base = {
841 .refcount = VN_REFCOUNT_INIT(1),
842 .res_id = res_id,
843 .mmap_size = size,
844 .mmap_ptr = ptr,
845 },
846 };
847
848 return &shmem->base;
849 }
850
851 static VkResult
sync_wait_poll(int fd,int poll_timeout)852 sync_wait_poll(int fd, int poll_timeout)
853 {
854 struct pollfd pollfd = {
855 .fd = fd,
856 .events = POLLIN,
857 };
858 int ret;
859 do {
860 ret = poll(&pollfd, 1, poll_timeout);
861 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
862
863 if (ret < 0 || (ret > 0 && !(pollfd.revents & POLLIN))) {
864 return (ret < 0 && errno == ENOMEM) ? VK_ERROR_OUT_OF_HOST_MEMORY
865 : VK_ERROR_DEVICE_LOST;
866 }
867
868 return ret ? VK_SUCCESS : VK_TIMEOUT;
869 }
870
871 static int
timeout_to_poll_timeout(uint64_t timeout)872 timeout_to_poll_timeout(uint64_t timeout)
873 {
874 const uint64_t ns_per_ms = 1000000;
875 const uint64_t ms = (timeout + ns_per_ms - 1) / ns_per_ms;
876 if (!ms && timeout)
877 return -1;
878 return ms <= INT_MAX ? ms : -1;
879 }
880
881 static VkResult
vtest_wait(struct vn_renderer * renderer,const struct vn_renderer_wait * wait)882 vtest_wait(struct vn_renderer *renderer, const struct vn_renderer_wait *wait)
883 {
884 struct vtest *vtest = (struct vtest *)renderer;
885 const uint32_t flags = wait->wait_any ? VCMD_SYNC_WAIT_FLAG_ANY : 0;
886 const int poll_timeout = timeout_to_poll_timeout(wait->timeout);
887
888 /*
889 * vtest_vcmd_sync_wait (and some other sync commands) is executed after
890 * all prior commands are dispatched. That is far from ideal.
891 *
892 * In virtio-gpu, a drm_syncobj wait ioctl is executed immediately. It
893 * works because it uses virtio-gpu interrupts as a side channel. vtest
894 * needs a side channel to perform well.
895 *
896 * virtio-gpu or vtest, we should also set up a 1-byte coherent memory that
897 * is set to non-zero by GPU after the syncs signal. That would allow us
898 * to do a quick check (or spin a bit) before waiting.
899 */
900 mtx_lock(&vtest->sock_mutex);
901 const int fd =
902 vtest_vcmd_sync_wait(vtest, flags, poll_timeout, wait->syncs,
903 wait->sync_values, wait->sync_count);
904 mtx_unlock(&vtest->sock_mutex);
905
906 VkResult result = sync_wait_poll(fd, poll_timeout);
907 close(fd);
908
909 return result;
910 }
911
912 static VkResult
vtest_submit(struct vn_renderer * renderer,const struct vn_renderer_submit * submit)913 vtest_submit(struct vn_renderer *renderer,
914 const struct vn_renderer_submit *submit)
915 {
916 struct vtest *vtest = (struct vtest *)renderer;
917
918 mtx_lock(&vtest->sock_mutex);
919 vtest_vcmd_submit_cmd2(vtest, submit);
920 mtx_unlock(&vtest->sock_mutex);
921
922 return VK_SUCCESS;
923 }
924
925 static void
vtest_init_renderer_info(struct vtest * vtest)926 vtest_init_renderer_info(struct vtest *vtest)
927 {
928 struct vn_renderer_info *info = &vtest->base.info;
929
930 info->drm.has_primary = false;
931 info->drm.primary_major = 0;
932 info->drm.primary_minor = 0;
933 info->drm.has_render = false;
934 info->drm.render_major = 0;
935 info->drm.render_minor = 0;
936
937 info->pci.vendor_id = VTEST_PCI_VENDOR_ID;
938 info->pci.device_id = VTEST_PCI_DEVICE_ID;
939
940 info->has_dma_buf_import = false;
941 info->has_external_sync = false;
942 info->has_implicit_fencing = false;
943
944 const struct virgl_renderer_capset_venus *capset = &vtest->capset.data;
945 info->wire_format_version = capset->wire_format_version;
946 info->vk_xml_version = capset->vk_xml_version;
947 info->vk_ext_command_serialization_spec_version =
948 capset->vk_ext_command_serialization_spec_version;
949 info->vk_mesa_venus_protocol_spec_version =
950 capset->vk_mesa_venus_protocol_spec_version;
951 info->supports_blob_id_0 = capset->supports_blob_id_0;
952
953 /* ensure vk_extension_mask is large enough to hold all capset masks */
954 STATIC_ASSERT(sizeof(info->vk_extension_mask) >=
955 sizeof(capset->vk_extension_mask1));
956 memcpy(info->vk_extension_mask, capset->vk_extension_mask1,
957 sizeof(capset->vk_extension_mask1));
958
959 info->allow_vk_wait_syncs = capset->allow_vk_wait_syncs;
960
961 info->supports_multiple_timelines = capset->supports_multiple_timelines;
962 info->max_timeline_count = vtest->max_timeline_count;
963 }
964
965 static void
vtest_destroy(struct vn_renderer * renderer,const VkAllocationCallbacks * alloc)966 vtest_destroy(struct vn_renderer *renderer,
967 const VkAllocationCallbacks *alloc)
968 {
969 struct vtest *vtest = (struct vtest *)renderer;
970
971 vn_renderer_shmem_cache_fini(&vtest->shmem_cache);
972
973 if (vtest->sock_fd >= 0) {
974 shutdown(vtest->sock_fd, SHUT_RDWR);
975 close(vtest->sock_fd);
976 }
977
978 mtx_destroy(&vtest->sock_mutex);
979 util_sparse_array_finish(&vtest->shmem_array);
980 util_sparse_array_finish(&vtest->bo_array);
981
982 vk_free(alloc, vtest);
983 }
984
985 static VkResult
vtest_init_capset(struct vtest * vtest)986 vtest_init_capset(struct vtest *vtest)
987 {
988 vtest->capset.id = VIRGL_RENDERER_CAPSET_VENUS;
989 vtest->capset.version = 0;
990
991 if (!vtest_vcmd_get_capset(vtest, vtest->capset.id, vtest->capset.version,
992 &vtest->capset.data,
993 sizeof(vtest->capset.data))) {
994 vn_log(vtest->instance, "no venus capset");
995 return VK_ERROR_INITIALIZATION_FAILED;
996 }
997
998 return VK_SUCCESS;
999 }
1000
1001 static VkResult
vtest_init_params(struct vtest * vtest)1002 vtest_init_params(struct vtest *vtest)
1003 {
1004 uint32_t val = vtest_vcmd_get_param(vtest, VCMD_PARAM_MAX_TIMELINE_COUNT);
1005 if (!val) {
1006 vn_log(vtest->instance, "no timeline support");
1007 return VK_ERROR_INITIALIZATION_FAILED;
1008 }
1009 vtest->max_timeline_count = val;
1010
1011 return VK_SUCCESS;
1012 }
1013
1014 static VkResult
vtest_init_protocol_version(struct vtest * vtest)1015 vtest_init_protocol_version(struct vtest *vtest)
1016 {
1017 const uint32_t min_protocol_version = 3;
1018
1019 const uint32_t ver = vtest_vcmd_ping_protocol_version(vtest)
1020 ? vtest_vcmd_protocol_version(vtest)
1021 : 0;
1022 if (ver < min_protocol_version) {
1023 vn_log(vtest->instance, "vtest protocol version (%d) too old", ver);
1024 return VK_ERROR_INITIALIZATION_FAILED;
1025 }
1026
1027 vtest->protocol_version = ver;
1028
1029 return VK_SUCCESS;
1030 }
1031
1032 static VkResult
vtest_init(struct vtest * vtest)1033 vtest_init(struct vtest *vtest)
1034 {
1035 const char *socket_name = os_get_option("VTEST_SOCKET_NAME");
1036
1037 util_sparse_array_init(&vtest->shmem_array, sizeof(struct vtest_shmem),
1038 1024);
1039 util_sparse_array_init(&vtest->bo_array, sizeof(struct vtest_bo), 1024);
1040
1041 mtx_init(&vtest->sock_mutex, mtx_plain);
1042 vtest->sock_fd = vtest_connect_socket(
1043 vtest->instance, socket_name ? socket_name : VTEST_DEFAULT_SOCKET_NAME);
1044 if (vtest->sock_fd < 0)
1045 return VK_ERROR_INITIALIZATION_FAILED;
1046
1047 const char *renderer_name = util_get_process_name();
1048 if (!renderer_name)
1049 renderer_name = "venus";
1050 vtest_vcmd_create_renderer(vtest, renderer_name);
1051
1052 VkResult result = vtest_init_protocol_version(vtest);
1053 if (result == VK_SUCCESS)
1054 result = vtest_init_params(vtest);
1055 if (result == VK_SUCCESS)
1056 result = vtest_init_capset(vtest);
1057 if (result != VK_SUCCESS)
1058 return result;
1059
1060 /* see virtgpu_init_shmem_blob_mem */
1061 assert(vtest->capset.data.supports_blob_id_0);
1062 vtest->shmem_blob_mem = VCMD_BLOB_TYPE_HOST3D;
1063
1064 vn_renderer_shmem_cache_init(&vtest->shmem_cache, &vtest->base,
1065 vtest_shmem_destroy_now);
1066
1067 vtest_vcmd_context_init(vtest, vtest->capset.id);
1068
1069 vtest_init_renderer_info(vtest);
1070
1071 vtest->base.ops.destroy = vtest_destroy;
1072 vtest->base.ops.submit = vtest_submit;
1073 vtest->base.ops.wait = vtest_wait;
1074
1075 vtest->base.shmem_ops.create = vtest_shmem_create;
1076 vtest->base.shmem_ops.destroy = vtest_shmem_destroy;
1077
1078 vtest->base.bo_ops.create_from_device_memory =
1079 vtest_bo_create_from_device_memory;
1080 vtest->base.bo_ops.create_from_dma_buf = NULL;
1081 vtest->base.bo_ops.destroy = vtest_bo_destroy;
1082 vtest->base.bo_ops.export_dma_buf = vtest_bo_export_dma_buf;
1083 vtest->base.bo_ops.map = vtest_bo_map;
1084 vtest->base.bo_ops.flush = vtest_bo_flush;
1085 vtest->base.bo_ops.invalidate = vtest_bo_invalidate;
1086
1087 vtest->base.sync_ops.create = vtest_sync_create;
1088 vtest->base.sync_ops.create_from_syncobj = NULL;
1089 vtest->base.sync_ops.destroy = vtest_sync_destroy;
1090 vtest->base.sync_ops.export_syncobj = NULL;
1091 vtest->base.sync_ops.reset = vtest_sync_reset;
1092 vtest->base.sync_ops.read = vtest_sync_read;
1093 vtest->base.sync_ops.write = vtest_sync_write;
1094
1095 return VK_SUCCESS;
1096 }
1097
1098 VkResult
vn_renderer_create_vtest(struct vn_instance * instance,const VkAllocationCallbacks * alloc,struct vn_renderer ** renderer)1099 vn_renderer_create_vtest(struct vn_instance *instance,
1100 const VkAllocationCallbacks *alloc,
1101 struct vn_renderer **renderer)
1102 {
1103 struct vtest *vtest = vk_zalloc(alloc, sizeof(*vtest), VN_DEFAULT_ALIGN,
1104 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1105 if (!vtest)
1106 return VK_ERROR_OUT_OF_HOST_MEMORY;
1107
1108 vtest->instance = instance;
1109 vtest->sock_fd = -1;
1110
1111 VkResult result = vtest_init(vtest);
1112 if (result != VK_SUCCESS) {
1113 vtest_destroy(&vtest->base, alloc);
1114 return result;
1115 }
1116
1117 *renderer = &vtest->base;
1118
1119 return VK_SUCCESS;
1120 }
1121