1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <errno.h>
25 #include <fcntl.h>
26 #include <limits.h>
27 #include <stdio.h>
28 #include <sys/ioctl.h>
29 #include <sys/stat.h>
30
31 #include "os/os_mman.h"
32 #include "util/os_file.h"
33 #include "util/os_time.h"
34 #include "util/u_memory.h"
35 #include "util/format/u_format.h"
36 #include "util/u_hash_table.h"
37 #include "util/u_inlines.h"
38 #include "util/u_pointer.h"
39 #include "frontend/drm_driver.h"
40 #include "virgl/virgl_screen.h"
41 #include "virgl/virgl_public.h"
42 #include "virtio-gpu/virgl_protocol.h"
43
44 #include <xf86drm.h>
45 #include <libsync.h>
46 #include "drm-uapi/virtgpu_drm.h"
47
48 #include "virgl_drm_winsys.h"
49 #include "virgl_drm_public.h"
50
51 // Delete local definitions when virglrenderer_hw.h becomes public
52 #define VIRGL_DRM_CAPSET_VIRGL 1
53 #define VIRGL_DRM_CAPSET_VIRGL2 2
54
55 #define VIRGL_DRM_VERSION(major, minor) ((major) << 16 | (minor))
56 #define VIRGL_DRM_VERSION_FENCE_FD VIRGL_DRM_VERSION(0, 1)
57
58 /* Gets a pointer to the virgl_hw_res containing the pointed to cache entry. */
59 #define cache_entry_container_res(ptr) \
60 (struct virgl_hw_res*)((char*)ptr - offsetof(struct virgl_hw_res, cache_entry))
61
can_cache_resource(uint32_t bind)62 static inline boolean can_cache_resource(uint32_t bind)
63 {
64 return bind == VIRGL_BIND_CONSTANT_BUFFER ||
65 bind == VIRGL_BIND_INDEX_BUFFER ||
66 bind == VIRGL_BIND_VERTEX_BUFFER ||
67 bind == VIRGL_BIND_CUSTOM ||
68 bind == VIRGL_BIND_STAGING ||
69 bind == VIRGL_BIND_DEPTH_STENCIL ||
70 bind == VIRGL_BIND_RENDER_TARGET ||
71 bind == 0;
72 }
73
virgl_hw_res_destroy(struct virgl_drm_winsys * qdws,struct virgl_hw_res * res)74 static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
75 struct virgl_hw_res *res)
76 {
77 struct drm_gem_close args;
78
79 mtx_lock(&qdws->bo_handles_mutex);
80
81 /* We intentionally avoid taking the lock in
82 * virgl_drm_resource_reference. Now that the
83 * lock is taken, we need to check the refcount
84 * again. */
85 if (pipe_is_referenced(&res->reference)) {
86 mtx_unlock(&qdws->bo_handles_mutex);
87 return;
88 }
89
90 _mesa_hash_table_remove_key(qdws->bo_handles,
91 (void *)(uintptr_t)res->bo_handle);
92 if (res->flink_name)
93 _mesa_hash_table_remove_key(qdws->bo_names,
94 (void *)(uintptr_t)res->flink_name);
95 mtx_unlock(&qdws->bo_handles_mutex);
96 if (res->ptr)
97 os_munmap(res->ptr, res->size);
98
99 memset(&args, 0, sizeof(args));
100 args.handle = res->bo_handle;
101 drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args);
102 FREE(res);
103 }
104
virgl_drm_resource_is_busy(struct virgl_winsys * vws,struct virgl_hw_res * res)105 static boolean virgl_drm_resource_is_busy(struct virgl_winsys *vws,
106 struct virgl_hw_res *res)
107 {
108 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
109 struct drm_virtgpu_3d_wait waitcmd;
110 int ret;
111
112 if (!p_atomic_read(&res->maybe_busy) && !p_atomic_read(&res->external))
113 return false;
114
115 memset(&waitcmd, 0, sizeof(waitcmd));
116 waitcmd.handle = res->bo_handle;
117 waitcmd.flags = VIRTGPU_WAIT_NOWAIT;
118
119 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
120 if (ret && errno == EBUSY)
121 return TRUE;
122
123 p_atomic_set(&res->maybe_busy, false);
124
125 return FALSE;
126 }
127
128 static void
virgl_drm_winsys_destroy(struct virgl_winsys * qws)129 virgl_drm_winsys_destroy(struct virgl_winsys *qws)
130 {
131 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
132
133 virgl_resource_cache_flush(&qdws->cache);
134
135 _mesa_hash_table_destroy(qdws->bo_handles, NULL);
136 _mesa_hash_table_destroy(qdws->bo_names, NULL);
137 mtx_destroy(&qdws->bo_handles_mutex);
138 mtx_destroy(&qdws->mutex);
139
140 FREE(qdws);
141 }
142
virgl_drm_resource_reference(struct virgl_winsys * qws,struct virgl_hw_res ** dres,struct virgl_hw_res * sres)143 static void virgl_drm_resource_reference(struct virgl_winsys *qws,
144 struct virgl_hw_res **dres,
145 struct virgl_hw_res *sres)
146 {
147 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
148 struct virgl_hw_res *old = *dres;
149
150 if (pipe_reference(&(*dres)->reference, &sres->reference)) {
151
152 if (!can_cache_resource(old->bind) ||
153 p_atomic_read(&old->external)) {
154 virgl_hw_res_destroy(qdws, old);
155 } else {
156 mtx_lock(&qdws->mutex);
157 virgl_resource_cache_add(&qdws->cache, &old->cache_entry);
158 mtx_unlock(&qdws->mutex);
159 }
160 }
161 *dres = sres;
162 }
163
164 static struct virgl_hw_res *
virgl_drm_winsys_resource_create_blob(struct virgl_winsys * qws,enum pipe_texture_target target,uint32_t format,uint32_t bind,uint32_t width,uint32_t height,uint32_t depth,uint32_t array_size,uint32_t last_level,uint32_t nr_samples,uint32_t flags,uint32_t size)165 virgl_drm_winsys_resource_create_blob(struct virgl_winsys *qws,
166 enum pipe_texture_target target,
167 uint32_t format,
168 uint32_t bind,
169 uint32_t width,
170 uint32_t height,
171 uint32_t depth,
172 uint32_t array_size,
173 uint32_t last_level,
174 uint32_t nr_samples,
175 uint32_t flags,
176 uint32_t size)
177 {
178 int ret;
179 int32_t blob_id;
180 uint32_t cmd[VIRGL_PIPE_RES_CREATE_SIZE + 1] = { 0 };
181 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
182 struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
183 struct virgl_hw_res *res;
184 struct virgl_resource_params params = { .size = size,
185 .bind = bind,
186 .format = format,
187 .flags = flags,
188 .nr_samples = nr_samples,
189 .width = width,
190 .height = height,
191 .depth = depth,
192 .array_size = array_size,
193 .last_level = last_level,
194 .target = target };
195
196 res = CALLOC_STRUCT(virgl_hw_res);
197 if (!res)
198 return NULL;
199
200 /* Make sure blob is page aligned. */
201 if (flags & (VIRGL_RESOURCE_FLAG_MAP_PERSISTENT |
202 VIRGL_RESOURCE_FLAG_MAP_COHERENT)) {
203 width = ALIGN(width, getpagesize());
204 size = ALIGN(size, getpagesize());
205 }
206
207 blob_id = p_atomic_inc_return(&qdws->blob_id);
208 cmd[0] = VIRGL_CMD0(VIRGL_CCMD_PIPE_RESOURCE_CREATE, 0, VIRGL_PIPE_RES_CREATE_SIZE);
209 cmd[VIRGL_PIPE_RES_CREATE_FORMAT] = format;
210 cmd[VIRGL_PIPE_RES_CREATE_BIND] = bind;
211 cmd[VIRGL_PIPE_RES_CREATE_TARGET] = target;
212 cmd[VIRGL_PIPE_RES_CREATE_WIDTH] = width;
213 cmd[VIRGL_PIPE_RES_CREATE_HEIGHT] = height;
214 cmd[VIRGL_PIPE_RES_CREATE_DEPTH] = depth;
215 cmd[VIRGL_PIPE_RES_CREATE_ARRAY_SIZE] = array_size;
216 cmd[VIRGL_PIPE_RES_CREATE_LAST_LEVEL] = last_level;
217 cmd[VIRGL_PIPE_RES_CREATE_NR_SAMPLES] = nr_samples;
218 cmd[VIRGL_PIPE_RES_CREATE_FLAGS] = flags;
219 cmd[VIRGL_PIPE_RES_CREATE_BLOB_ID] = blob_id;
220
221 drm_rc_blob.cmd = (unsigned long)(void *)&cmd;
222 drm_rc_blob.cmd_size = 4 * (VIRGL_PIPE_RES_CREATE_SIZE + 1);
223 drm_rc_blob.size = size;
224 drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
225 drm_rc_blob.blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
226 drm_rc_blob.blob_id = (uint64_t) blob_id;
227
228 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
229 if (ret != 0) {
230 FREE(res);
231 return NULL;
232 }
233
234 res->bind = bind;
235 res->res_handle = drm_rc_blob.res_handle;
236 res->bo_handle = drm_rc_blob.bo_handle;
237 res->size = size;
238 res->flags = flags;
239 res->maybe_untyped = false;
240 pipe_reference_init(&res->reference, 1);
241 p_atomic_set(&res->external, false);
242 p_atomic_set(&res->num_cs_references, 0);
243 virgl_resource_cache_entry_init(&res->cache_entry, params);
244 return res;
245 }
246
247 static struct virgl_hw_res *
virgl_drm_winsys_resource_create(struct virgl_winsys * qws,enum pipe_texture_target target,uint32_t format,uint32_t bind,uint32_t width,uint32_t height,uint32_t depth,uint32_t array_size,uint32_t last_level,uint32_t nr_samples,uint32_t size,bool for_fencing)248 virgl_drm_winsys_resource_create(struct virgl_winsys *qws,
249 enum pipe_texture_target target,
250 uint32_t format,
251 uint32_t bind,
252 uint32_t width,
253 uint32_t height,
254 uint32_t depth,
255 uint32_t array_size,
256 uint32_t last_level,
257 uint32_t nr_samples,
258 uint32_t size,
259 bool for_fencing)
260 {
261 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
262 struct drm_virtgpu_resource_create createcmd;
263 int ret;
264 struct virgl_hw_res *res;
265 uint32_t stride = width * util_format_get_blocksize(format);
266 struct virgl_resource_params params = { .size = size,
267 .bind = bind,
268 .format = format,
269 .flags = 0,
270 .nr_samples = nr_samples,
271 .width = width,
272 .height = height,
273 .depth = depth,
274 .array_size = array_size,
275 .last_level = last_level,
276 .target = target };
277
278 res = CALLOC_STRUCT(virgl_hw_res);
279 if (!res)
280 return NULL;
281
282 memset(&createcmd, 0, sizeof(createcmd));
283 createcmd.target = target;
284 createcmd.format = pipe_to_virgl_format(format);
285 createcmd.bind = bind;
286 createcmd.width = width;
287 createcmd.height = height;
288 createcmd.depth = depth;
289 createcmd.array_size = array_size;
290 createcmd.last_level = last_level;
291 createcmd.nr_samples = nr_samples;
292 createcmd.stride = stride;
293 createcmd.size = size;
294
295 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &createcmd);
296 if (ret != 0) {
297 FREE(res);
298 return NULL;
299 }
300
301 res->bind = bind;
302
303 res->res_handle = createcmd.res_handle;
304 res->bo_handle = createcmd.bo_handle;
305 res->size = size;
306 res->target = target;
307 res->maybe_untyped = false;
308 pipe_reference_init(&res->reference, 1);
309 p_atomic_set(&res->external, false);
310 p_atomic_set(&res->num_cs_references, 0);
311
312 /* A newly created resource is considered busy by the kernel until the
313 * command is retired. But for our purposes, we can consider it idle
314 * unless it is used for fencing.
315 */
316 p_atomic_set(&res->maybe_busy, for_fencing);
317
318 virgl_resource_cache_entry_init(&res->cache_entry, params);
319
320 return res;
321 }
322
323 /*
324 * Previously, with DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, all host resources had
325 * a guest memory shadow resource with size = stride * bpp. Virglrenderer
326 * would guess the stride implicitly when performing transfer operations, if
327 * the stride wasn't specified. Interestingly, vtest would specify the stride.
328 *
329 * Guessing the stride breaks down with YUV images, which may be imported into
330 * Mesa as 3R8 images. It also doesn't work if an external allocator
331 * (i.e, minigbm) decides to use a stride not equal to stride * bpp. With blob
332 * resources, the size = stride * bpp restriction no longer holds, so use
333 * explicit strides passed into Mesa.
334 */
use_explicit_stride(struct virgl_hw_res * res,uint32_t level,uint32_t depth)335 static inline bool use_explicit_stride(struct virgl_hw_res *res, uint32_t level,
336 uint32_t depth)
337 {
338 return (params[param_resource_blob].value &&
339 res->blob_mem == VIRTGPU_BLOB_MEM_HOST3D_GUEST &&
340 res->target == PIPE_TEXTURE_2D &&
341 level == 0 && depth == 1);
342 }
343
344 static int
virgl_bo_transfer_put(struct virgl_winsys * vws,struct virgl_hw_res * res,const struct pipe_box * box,uint32_t stride,uint32_t layer_stride,uint32_t buf_offset,uint32_t level)345 virgl_bo_transfer_put(struct virgl_winsys *vws,
346 struct virgl_hw_res *res,
347 const struct pipe_box *box,
348 uint32_t stride, uint32_t layer_stride,
349 uint32_t buf_offset, uint32_t level)
350 {
351 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
352 struct drm_virtgpu_3d_transfer_to_host tohostcmd;
353
354 p_atomic_set(&res->maybe_busy, true);
355
356 memset(&tohostcmd, 0, sizeof(tohostcmd));
357 tohostcmd.bo_handle = res->bo_handle;
358 tohostcmd.box.x = box->x;
359 tohostcmd.box.y = box->y;
360 tohostcmd.box.z = box->z;
361 tohostcmd.box.w = box->width;
362 tohostcmd.box.h = box->height;
363 tohostcmd.box.d = box->depth;
364 tohostcmd.offset = buf_offset;
365 tohostcmd.level = level;
366
367 if (use_explicit_stride(res, level, box->depth))
368 tohostcmd.stride = stride;
369
370 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &tohostcmd);
371 }
372
373 static int
virgl_bo_transfer_get(struct virgl_winsys * vws,struct virgl_hw_res * res,const struct pipe_box * box,uint32_t stride,uint32_t layer_stride,uint32_t buf_offset,uint32_t level)374 virgl_bo_transfer_get(struct virgl_winsys *vws,
375 struct virgl_hw_res *res,
376 const struct pipe_box *box,
377 uint32_t stride, uint32_t layer_stride,
378 uint32_t buf_offset, uint32_t level)
379 {
380 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
381 struct drm_virtgpu_3d_transfer_from_host fromhostcmd;
382
383 p_atomic_set(&res->maybe_busy, true);
384
385 memset(&fromhostcmd, 0, sizeof(fromhostcmd));
386 fromhostcmd.bo_handle = res->bo_handle;
387 fromhostcmd.level = level;
388 fromhostcmd.offset = buf_offset;
389 fromhostcmd.box.x = box->x;
390 fromhostcmd.box.y = box->y;
391 fromhostcmd.box.z = box->z;
392 fromhostcmd.box.w = box->width;
393 fromhostcmd.box.h = box->height;
394 fromhostcmd.box.d = box->depth;
395
396 if (use_explicit_stride(res, level, box->depth))
397 fromhostcmd.stride = stride;
398
399 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &fromhostcmd);
400 }
401
402 static struct virgl_hw_res *
virgl_drm_winsys_resource_cache_create(struct virgl_winsys * qws,enum pipe_texture_target target,const void * map_front_private,uint32_t format,uint32_t bind,uint32_t width,uint32_t height,uint32_t depth,uint32_t array_size,uint32_t last_level,uint32_t nr_samples,uint32_t flags,uint32_t size)403 virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
404 enum pipe_texture_target target,
405 const void *map_front_private,
406 uint32_t format,
407 uint32_t bind,
408 uint32_t width,
409 uint32_t height,
410 uint32_t depth,
411 uint32_t array_size,
412 uint32_t last_level,
413 uint32_t nr_samples,
414 uint32_t flags,
415 uint32_t size)
416 {
417 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
418 struct virgl_hw_res *res;
419 struct virgl_resource_cache_entry *entry;
420 struct virgl_resource_params params = { .size = size,
421 .bind = bind,
422 .format = format,
423 .flags = flags,
424 .nr_samples = nr_samples,
425 .width = width,
426 .height = height,
427 .depth = depth,
428 .array_size = array_size,
429 .last_level = last_level,
430 .target = target };
431
432 if (!can_cache_resource(bind))
433 goto alloc;
434
435 mtx_lock(&qdws->mutex);
436
437 entry = virgl_resource_cache_remove_compatible(&qdws->cache, params);
438 if (entry) {
439 res = cache_entry_container_res(entry);
440 mtx_unlock(&qdws->mutex);
441 pipe_reference_init(&res->reference, 1);
442 return res;
443 }
444
445 mtx_unlock(&qdws->mutex);
446
447 alloc:
448 if (flags & (VIRGL_RESOURCE_FLAG_MAP_PERSISTENT |
449 VIRGL_RESOURCE_FLAG_MAP_COHERENT))
450 res = virgl_drm_winsys_resource_create_blob(qws, target, format, bind,
451 width, height, depth,
452 array_size, last_level,
453 nr_samples, flags, size);
454 else
455 res = virgl_drm_winsys_resource_create(qws, target, format, bind, width,
456 height, depth, array_size,
457 last_level, nr_samples, size,
458 true);
459 return res;
460 }
461
462 static struct virgl_hw_res *
virgl_drm_winsys_resource_create_handle(struct virgl_winsys * qws,struct winsys_handle * whandle,uint32_t * plane,uint32_t * stride,uint32_t * plane_offset,uint64_t * modifier,uint32_t * blob_mem)463 virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
464 struct winsys_handle *whandle,
465 uint32_t *plane,
466 uint32_t *stride,
467 uint32_t *plane_offset,
468 uint64_t *modifier,
469 uint32_t *blob_mem)
470 {
471 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
472 struct drm_gem_open open_arg = {};
473 struct drm_virtgpu_resource_info info_arg = {};
474 struct virgl_hw_res *res = NULL;
475 uint32_t handle = whandle->handle;
476
477 if (whandle->plane >= VIRGL_MAX_PLANE_COUNT) {
478 return NULL;
479 }
480
481 if (whandle->offset != 0 && whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
482 _debug_printf("attempt to import unsupported winsys offset %u\n",
483 whandle->offset);
484 return NULL;
485 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
486 *plane = whandle->plane;
487 *stride = whandle->stride;
488 *plane_offset = whandle->offset;
489 *modifier = whandle->modifier;
490 }
491
492 mtx_lock(&qdws->bo_handles_mutex);
493
494 /* We must maintain a list of pairs <handle, bo>, so that we always return
495 * the same BO for one particular handle. If we didn't do that and created
496 * more than one BO for the same handle and then relocated them in a CS,
497 * we would hit a deadlock in the kernel.
498 *
499 * The list of pairs is guarded by a mutex, of course. */
500 if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
501 res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
502 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
503 int r;
504 r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
505 if (r)
506 goto done;
507 res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)handle);
508 } else {
509 /* Unknown handle type */
510 goto done;
511 }
512
513 if (res) {
514 /* qdws->bo_{names,handles} hold weak pointers to virgl_hw_res. Because
515 * virgl_drm_resource_reference does not take qdws->bo_handles_mutex
516 * until it enters virgl_hw_res_destroy, there is a small window that
517 * the refcount can drop to zero. Call p_atomic_inc directly instead of
518 * virgl_drm_resource_reference to avoid hitting assert failures.
519 */
520 p_atomic_inc(&res->reference.count);
521 goto done;
522 }
523
524 res = CALLOC_STRUCT(virgl_hw_res);
525 if (!res)
526 goto done;
527
528 if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
529 res->bo_handle = handle;
530 } else {
531 memset(&open_arg, 0, sizeof(open_arg));
532 open_arg.name = whandle->handle;
533 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
534 FREE(res);
535 res = NULL;
536 goto done;
537 }
538 res->bo_handle = open_arg.handle;
539 res->flink_name = whandle->handle;
540 }
541
542 memset(&info_arg, 0, sizeof(info_arg));
543 info_arg.bo_handle = res->bo_handle;
544
545 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info_arg)) {
546 /* close */
547 FREE(res);
548 res = NULL;
549 goto done;
550 }
551
552 res->res_handle = info_arg.res_handle;
553 res->blob_mem = info_arg.blob_mem;
554 *blob_mem = info_arg.blob_mem;
555
556 res->size = info_arg.size;
557 res->maybe_untyped = info_arg.blob_mem ? true : false;
558 pipe_reference_init(&res->reference, 1);
559 p_atomic_set(&res->external, true);
560 res->num_cs_references = 0;
561
562 if (res->flink_name)
563 _mesa_hash_table_insert(qdws->bo_names, (void *)(uintptr_t)res->flink_name, res);
564 _mesa_hash_table_insert(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
565
566 done:
567 mtx_unlock(&qdws->bo_handles_mutex);
568 return res;
569 }
570
571 static void
virgl_drm_winsys_resource_set_type(struct virgl_winsys * qws,struct virgl_hw_res * res,uint32_t format,uint32_t bind,uint32_t width,uint32_t height,uint32_t usage,uint64_t modifier,uint32_t plane_count,const uint32_t * plane_strides,const uint32_t * plane_offsets)572 virgl_drm_winsys_resource_set_type(struct virgl_winsys *qws,
573 struct virgl_hw_res *res,
574 uint32_t format, uint32_t bind,
575 uint32_t width, uint32_t height,
576 uint32_t usage, uint64_t modifier,
577 uint32_t plane_count,
578 const uint32_t *plane_strides,
579 const uint32_t *plane_offsets)
580 {
581 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
582 uint32_t cmd[VIRGL_PIPE_RES_SET_TYPE_SIZE(VIRGL_MAX_PLANE_COUNT)];
583 struct drm_virtgpu_execbuffer eb;
584 int ret;
585
586 mtx_lock(&qdws->bo_handles_mutex);
587
588 if (!res->maybe_untyped) {
589 mtx_unlock(&qdws->bo_handles_mutex);
590 return;
591 }
592 res->maybe_untyped = false;
593
594 assert(plane_count && plane_count <= VIRGL_MAX_PLANE_COUNT);
595
596 cmd[0] = VIRGL_CMD0(VIRGL_CCMD_PIPE_RESOURCE_SET_TYPE, 0, VIRGL_PIPE_RES_SET_TYPE_SIZE(plane_count));
597 cmd[VIRGL_PIPE_RES_SET_TYPE_RES_HANDLE] = res->res_handle,
598 cmd[VIRGL_PIPE_RES_SET_TYPE_FORMAT] = format;
599 cmd[VIRGL_PIPE_RES_SET_TYPE_BIND] = bind;
600 cmd[VIRGL_PIPE_RES_SET_TYPE_WIDTH] = width;
601 cmd[VIRGL_PIPE_RES_SET_TYPE_HEIGHT] = height;
602 cmd[VIRGL_PIPE_RES_SET_TYPE_USAGE] = usage;
603 cmd[VIRGL_PIPE_RES_SET_TYPE_MODIFIER_LO] = (uint32_t)modifier;
604 cmd[VIRGL_PIPE_RES_SET_TYPE_MODIFIER_HI] = (uint32_t)(modifier >> 32);
605 for (uint32_t i = 0; i < plane_count; i++) {
606 cmd[VIRGL_PIPE_RES_SET_TYPE_PLANE_STRIDE(i)] = plane_strides[i];
607 cmd[VIRGL_PIPE_RES_SET_TYPE_PLANE_OFFSET(i)] = plane_offsets[i];
608 }
609
610 memset(&eb, 0, sizeof(eb));
611 eb.command = (uintptr_t)cmd;
612 eb.size = (1 + VIRGL_PIPE_RES_SET_TYPE_SIZE(plane_count)) * 4;
613 eb.num_bo_handles = 1;
614 eb.bo_handles = (uintptr_t)&res->bo_handle;
615
616 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
617 if (ret == -1)
618 _debug_printf("failed to set resource type: %s", strerror(errno));
619
620 mtx_unlock(&qdws->bo_handles_mutex);
621 }
622
virgl_drm_winsys_resource_get_handle(struct virgl_winsys * qws,struct virgl_hw_res * res,uint32_t stride,struct winsys_handle * whandle)623 static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
624 struct virgl_hw_res *res,
625 uint32_t stride,
626 struct winsys_handle *whandle)
627 {
628 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
629 struct drm_gem_flink flink;
630
631 if (!res)
632 return FALSE;
633
634 if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
635 if (!res->flink_name) {
636 memset(&flink, 0, sizeof(flink));
637 flink.handle = res->bo_handle;
638
639 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
640 return FALSE;
641 }
642 res->flink_name = flink.name;
643
644 mtx_lock(&qdws->bo_handles_mutex);
645 _mesa_hash_table_insert(qdws->bo_names, (void *)(uintptr_t)res->flink_name, res);
646 mtx_unlock(&qdws->bo_handles_mutex);
647 }
648 whandle->handle = res->flink_name;
649 } else if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
650 whandle->handle = res->bo_handle;
651 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
652 if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
653 return FALSE;
654 mtx_lock(&qdws->bo_handles_mutex);
655 _mesa_hash_table_insert(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
656 mtx_unlock(&qdws->bo_handles_mutex);
657 }
658
659 p_atomic_set(&res->external, true);
660
661 whandle->stride = stride;
662 return TRUE;
663 }
664
virgl_drm_resource_map(struct virgl_winsys * qws,struct virgl_hw_res * res)665 static void *virgl_drm_resource_map(struct virgl_winsys *qws,
666 struct virgl_hw_res *res)
667 {
668 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
669 struct drm_virtgpu_map mmap_arg;
670 void *ptr;
671
672 if (res->ptr)
673 return res->ptr;
674
675 memset(&mmap_arg, 0, sizeof(mmap_arg));
676 mmap_arg.handle = res->bo_handle;
677 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_MAP, &mmap_arg))
678 return NULL;
679
680 ptr = os_mmap(0, res->size, PROT_READ|PROT_WRITE, MAP_SHARED,
681 qdws->fd, mmap_arg.offset);
682 if (ptr == MAP_FAILED)
683 return NULL;
684
685 res->ptr = ptr;
686 return ptr;
687
688 }
689
virgl_drm_resource_wait(struct virgl_winsys * qws,struct virgl_hw_res * res)690 static void virgl_drm_resource_wait(struct virgl_winsys *qws,
691 struct virgl_hw_res *res)
692 {
693 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
694 struct drm_virtgpu_3d_wait waitcmd;
695 int ret;
696
697 if (!p_atomic_read(&res->maybe_busy) && !p_atomic_read(&res->external))
698 return;
699
700 memset(&waitcmd, 0, sizeof(waitcmd));
701 waitcmd.handle = res->bo_handle;
702
703 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
704 if (ret)
705 _debug_printf("waiting got error - %d, slow gpu or hang?\n", errno);
706
707 p_atomic_set(&res->maybe_busy, false);
708 }
709
virgl_drm_alloc_res_list(struct virgl_drm_cmd_buf * cbuf,int initial_size)710 static bool virgl_drm_alloc_res_list(struct virgl_drm_cmd_buf *cbuf,
711 int initial_size)
712 {
713 cbuf->nres = initial_size;
714 cbuf->cres = 0;
715
716 cbuf->res_bo = CALLOC(cbuf->nres, sizeof(struct virgl_hw_buf*));
717 if (!cbuf->res_bo)
718 return false;
719
720 cbuf->res_hlist = MALLOC(cbuf->nres * sizeof(uint32_t));
721 if (!cbuf->res_hlist) {
722 FREE(cbuf->res_bo);
723 return false;
724 }
725
726 return true;
727 }
728
virgl_drm_free_res_list(struct virgl_drm_cmd_buf * cbuf)729 static void virgl_drm_free_res_list(struct virgl_drm_cmd_buf *cbuf)
730 {
731 int i;
732
733 for (i = 0; i < cbuf->cres; i++) {
734 p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
735 virgl_drm_resource_reference(cbuf->ws, &cbuf->res_bo[i], NULL);
736 }
737 FREE(cbuf->res_hlist);
738 FREE(cbuf->res_bo);
739 }
740
virgl_drm_lookup_res(struct virgl_drm_cmd_buf * cbuf,struct virgl_hw_res * res)741 static boolean virgl_drm_lookup_res(struct virgl_drm_cmd_buf *cbuf,
742 struct virgl_hw_res *res)
743 {
744 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
745 int i;
746
747 if (cbuf->is_handle_added[hash]) {
748 i = cbuf->reloc_indices_hashlist[hash];
749 if (cbuf->res_bo[i] == res)
750 return true;
751
752 for (i = 0; i < cbuf->cres; i++) {
753 if (cbuf->res_bo[i] == res) {
754 cbuf->reloc_indices_hashlist[hash] = i;
755 return true;
756 }
757 }
758 }
759 return false;
760 }
761
virgl_drm_add_res(struct virgl_drm_winsys * qdws,struct virgl_drm_cmd_buf * cbuf,struct virgl_hw_res * res)762 static void virgl_drm_add_res(struct virgl_drm_winsys *qdws,
763 struct virgl_drm_cmd_buf *cbuf,
764 struct virgl_hw_res *res)
765 {
766 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
767
768 if (cbuf->cres >= cbuf->nres) {
769 unsigned new_nres = cbuf->nres + 256;
770 void *new_ptr = REALLOC(cbuf->res_bo,
771 cbuf->nres * sizeof(struct virgl_hw_buf*),
772 new_nres * sizeof(struct virgl_hw_buf*));
773 if (!new_ptr) {
774 _debug_printf("failure to add relocation %d, %d\n", cbuf->cres, new_nres);
775 return;
776 }
777 cbuf->res_bo = new_ptr;
778
779 new_ptr = REALLOC(cbuf->res_hlist,
780 cbuf->nres * sizeof(uint32_t),
781 new_nres * sizeof(uint32_t));
782 if (!new_ptr) {
783 _debug_printf("failure to add hlist relocation %d, %d\n", cbuf->cres, cbuf->nres);
784 return;
785 }
786 cbuf->res_hlist = new_ptr;
787 cbuf->nres = new_nres;
788 }
789
790 cbuf->res_bo[cbuf->cres] = NULL;
791 virgl_drm_resource_reference(&qdws->base, &cbuf->res_bo[cbuf->cres], res);
792 cbuf->res_hlist[cbuf->cres] = res->bo_handle;
793 cbuf->is_handle_added[hash] = TRUE;
794
795 cbuf->reloc_indices_hashlist[hash] = cbuf->cres;
796 p_atomic_inc(&res->num_cs_references);
797 cbuf->cres++;
798 }
799
800 /* This is called after the cbuf is submitted. */
virgl_drm_clear_res_list(struct virgl_drm_cmd_buf * cbuf)801 static void virgl_drm_clear_res_list(struct virgl_drm_cmd_buf *cbuf)
802 {
803 int i;
804
805 for (i = 0; i < cbuf->cres; i++) {
806 /* mark all BOs busy after submission */
807 p_atomic_set(&cbuf->res_bo[i]->maybe_busy, true);
808
809 p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
810 virgl_drm_resource_reference(cbuf->ws, &cbuf->res_bo[i], NULL);
811 }
812
813 cbuf->cres = 0;
814
815 memset(cbuf->is_handle_added, 0, sizeof(cbuf->is_handle_added));
816 }
817
virgl_drm_emit_res(struct virgl_winsys * qws,struct virgl_cmd_buf * _cbuf,struct virgl_hw_res * res,boolean write_buf)818 static void virgl_drm_emit_res(struct virgl_winsys *qws,
819 struct virgl_cmd_buf *_cbuf,
820 struct virgl_hw_res *res, boolean write_buf)
821 {
822 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
823 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
824 boolean already_in_list = virgl_drm_lookup_res(cbuf, res);
825
826 if (write_buf)
827 cbuf->base.buf[cbuf->base.cdw++] = res->res_handle;
828
829 if (!already_in_list)
830 virgl_drm_add_res(qdws, cbuf, res);
831 }
832
virgl_drm_res_is_ref(struct virgl_winsys * qws,struct virgl_cmd_buf * _cbuf,struct virgl_hw_res * res)833 static boolean virgl_drm_res_is_ref(struct virgl_winsys *qws,
834 struct virgl_cmd_buf *_cbuf,
835 struct virgl_hw_res *res)
836 {
837 if (!p_atomic_read(&res->num_cs_references))
838 return FALSE;
839
840 return TRUE;
841 }
842
virgl_drm_cmd_buf_create(struct virgl_winsys * qws,uint32_t size)843 static struct virgl_cmd_buf *virgl_drm_cmd_buf_create(struct virgl_winsys *qws,
844 uint32_t size)
845 {
846 struct virgl_drm_cmd_buf *cbuf;
847
848 cbuf = CALLOC_STRUCT(virgl_drm_cmd_buf);
849 if (!cbuf)
850 return NULL;
851
852 cbuf->ws = qws;
853
854 if (!virgl_drm_alloc_res_list(cbuf, 512)) {
855 FREE(cbuf);
856 return NULL;
857 }
858
859 cbuf->buf = CALLOC(size, sizeof(uint32_t));
860 if (!cbuf->buf) {
861 FREE(cbuf->res_hlist);
862 FREE(cbuf->res_bo);
863 FREE(cbuf);
864 return NULL;
865 }
866
867 cbuf->in_fence_fd = -1;
868 cbuf->base.buf = cbuf->buf;
869 return &cbuf->base;
870 }
871
virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf * _cbuf)872 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf *_cbuf)
873 {
874 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
875
876 virgl_drm_free_res_list(cbuf);
877
878 FREE(cbuf->buf);
879 FREE(cbuf);
880 }
881
882 static struct pipe_fence_handle *
virgl_drm_fence_create(struct virgl_winsys * vws,int fd,bool external)883 virgl_drm_fence_create(struct virgl_winsys *vws, int fd, bool external)
884 {
885 struct virgl_drm_fence *fence;
886
887 assert(vws->supports_fences);
888
889 if (external) {
890 fd = os_dupfd_cloexec(fd);
891 if (fd < 0)
892 return NULL;
893 }
894
895 fence = CALLOC_STRUCT(virgl_drm_fence);
896 if (!fence) {
897 close(fd);
898 return NULL;
899 }
900
901 fence->fd = fd;
902 fence->external = external;
903
904 pipe_reference_init(&fence->reference, 1);
905
906 return (struct pipe_fence_handle *)fence;
907 }
908
909 static struct pipe_fence_handle *
virgl_drm_fence_create_legacy(struct virgl_winsys * vws)910 virgl_drm_fence_create_legacy(struct virgl_winsys *vws)
911 {
912 struct virgl_drm_fence *fence;
913
914 assert(!vws->supports_fences);
915
916 fence = CALLOC_STRUCT(virgl_drm_fence);
917 if (!fence)
918 return NULL;
919 fence->fd = -1;
920
921 /* Resources for fences should not be from the cache, since we are basing
922 * the fence status on the resource creation busy status.
923 */
924 fence->hw_res = virgl_drm_winsys_resource_create(vws, PIPE_BUFFER,
925 PIPE_FORMAT_R8_UNORM, VIRGL_BIND_CUSTOM, 8, 1, 1, 0, 0, 0, 8, true);
926 if (!fence->hw_res) {
927 FREE(fence);
928 return NULL;
929 }
930
931 pipe_reference_init(&fence->reference, 1);
932
933 return (struct pipe_fence_handle *)fence;
934 }
935
virgl_drm_winsys_submit_cmd(struct virgl_winsys * qws,struct virgl_cmd_buf * _cbuf,struct pipe_fence_handle ** fence)936 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys *qws,
937 struct virgl_cmd_buf *_cbuf,
938 struct pipe_fence_handle **fence)
939 {
940 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
941 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
942 struct drm_virtgpu_execbuffer eb;
943 int ret;
944
945 if (cbuf->base.cdw == 0)
946 return 0;
947
948 memset(&eb, 0, sizeof(struct drm_virtgpu_execbuffer));
949 eb.command = (unsigned long)(void*)cbuf->buf;
950 eb.size = cbuf->base.cdw * 4;
951 eb.num_bo_handles = cbuf->cres;
952 eb.bo_handles = (unsigned long)(void *)cbuf->res_hlist;
953
954 eb.fence_fd = -1;
955 if (qws->supports_fences) {
956 if (cbuf->in_fence_fd >= 0) {
957 eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_IN;
958 eb.fence_fd = cbuf->in_fence_fd;
959 }
960
961 if (fence != NULL)
962 eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_OUT;
963 } else {
964 assert(cbuf->in_fence_fd < 0);
965 }
966
967 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
968 if (ret == -1)
969 _debug_printf("got error from kernel - expect bad rendering %d\n", errno);
970 cbuf->base.cdw = 0;
971
972 if (qws->supports_fences) {
973 if (cbuf->in_fence_fd >= 0) {
974 close(cbuf->in_fence_fd);
975 cbuf->in_fence_fd = -1;
976 }
977
978 if (fence != NULL && ret == 0)
979 *fence = virgl_drm_fence_create(qws, eb.fence_fd, false);
980 } else {
981 if (fence != NULL && ret == 0)
982 *fence = virgl_drm_fence_create_legacy(qws);
983 }
984
985 virgl_drm_clear_res_list(cbuf);
986
987 return ret;
988 }
989
virgl_drm_get_caps(struct virgl_winsys * vws,struct virgl_drm_caps * caps)990 static int virgl_drm_get_caps(struct virgl_winsys *vws,
991 struct virgl_drm_caps *caps)
992 {
993 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
994 struct drm_virtgpu_get_caps args;
995 int ret;
996
997 virgl_ws_fill_new_caps_defaults(caps);
998
999 memset(&args, 0, sizeof(args));
1000 if (params[param_capset_fix].value) {
1001 /* if we have the query fix - try and get cap set id 2 first */
1002 args.cap_set_id = 2;
1003 args.size = sizeof(union virgl_caps);
1004 } else {
1005 args.cap_set_id = 1;
1006 args.size = sizeof(struct virgl_caps_v1);
1007 }
1008 args.addr = (unsigned long)&caps->caps;
1009
1010 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
1011 if (ret == -1 && errno == EINVAL) {
1012 /* Fallback to v1 */
1013 args.cap_set_id = 1;
1014 args.size = sizeof(struct virgl_caps_v1);
1015 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
1016 if (ret == -1)
1017 return ret;
1018 }
1019 return ret;
1020 }
1021
1022 static struct pipe_fence_handle *
virgl_cs_create_fence(struct virgl_winsys * vws,int fd)1023 virgl_cs_create_fence(struct virgl_winsys *vws, int fd)
1024 {
1025 if (!vws->supports_fences)
1026 return NULL;
1027
1028 return virgl_drm_fence_create(vws, fd, true);
1029 }
1030
virgl_fence_wait(struct virgl_winsys * vws,struct pipe_fence_handle * _fence,uint64_t timeout)1031 static bool virgl_fence_wait(struct virgl_winsys *vws,
1032 struct pipe_fence_handle *_fence,
1033 uint64_t timeout)
1034 {
1035 struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
1036
1037 if (vws->supports_fences) {
1038 uint64_t timeout_ms;
1039 int timeout_poll;
1040
1041 if (timeout == 0)
1042 return sync_wait(fence->fd, 0) == 0;
1043
1044 timeout_ms = timeout / 1000000;
1045 /* round up */
1046 if (timeout_ms * 1000000 < timeout)
1047 timeout_ms++;
1048
1049 timeout_poll = timeout_ms <= INT_MAX ? (int) timeout_ms : -1;
1050
1051 return sync_wait(fence->fd, timeout_poll) == 0;
1052 }
1053
1054 if (timeout == 0)
1055 return !virgl_drm_resource_is_busy(vws, fence->hw_res);
1056
1057 if (timeout != PIPE_TIMEOUT_INFINITE) {
1058 int64_t start_time = os_time_get();
1059 timeout /= 1000;
1060 while (virgl_drm_resource_is_busy(vws, fence->hw_res)) {
1061 if (os_time_get() - start_time >= timeout)
1062 return FALSE;
1063 os_time_sleep(10);
1064 }
1065 return TRUE;
1066 }
1067 virgl_drm_resource_wait(vws, fence->hw_res);
1068
1069 return TRUE;
1070 }
1071
virgl_fence_reference(struct virgl_winsys * vws,struct pipe_fence_handle ** dst,struct pipe_fence_handle * src)1072 static void virgl_fence_reference(struct virgl_winsys *vws,
1073 struct pipe_fence_handle **dst,
1074 struct pipe_fence_handle *src)
1075 {
1076 struct virgl_drm_fence *dfence = virgl_drm_fence(*dst);
1077 struct virgl_drm_fence *sfence = virgl_drm_fence(src);
1078
1079 if (pipe_reference(&dfence->reference, &sfence->reference)) {
1080 if (vws->supports_fences) {
1081 close(dfence->fd);
1082 } else {
1083 virgl_drm_resource_reference(vws, &dfence->hw_res, NULL);
1084 }
1085 FREE(dfence);
1086 }
1087
1088 *dst = src;
1089 }
1090
virgl_fence_server_sync(struct virgl_winsys * vws,struct virgl_cmd_buf * _cbuf,struct pipe_fence_handle * _fence)1091 static void virgl_fence_server_sync(struct virgl_winsys *vws,
1092 struct virgl_cmd_buf *_cbuf,
1093 struct pipe_fence_handle *_fence)
1094 {
1095 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
1096 struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
1097
1098 if (!vws->supports_fences)
1099 return;
1100
1101 /* if not an external fence, then nothing more to do without preemption: */
1102 if (!fence->external)
1103 return;
1104
1105 sync_accumulate("virgl", &cbuf->in_fence_fd, fence->fd);
1106 }
1107
virgl_fence_get_fd(struct virgl_winsys * vws,struct pipe_fence_handle * _fence)1108 static int virgl_fence_get_fd(struct virgl_winsys *vws,
1109 struct pipe_fence_handle *_fence)
1110 {
1111 struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
1112
1113 if (!vws->supports_fences)
1114 return -1;
1115
1116 return os_dupfd_cloexec(fence->fd);
1117 }
1118
virgl_drm_get_version(int fd)1119 static int virgl_drm_get_version(int fd)
1120 {
1121 int ret;
1122 drmVersionPtr version;
1123
1124 version = drmGetVersion(fd);
1125
1126 if (!version)
1127 ret = -EFAULT;
1128 else if (version->version_major != 0)
1129 ret = -EINVAL;
1130 else
1131 ret = VIRGL_DRM_VERSION(0, version->version_minor);
1132
1133 drmFreeVersion(version);
1134
1135 return ret;
1136 }
1137
1138 static bool
virgl_drm_resource_cache_entry_is_busy(struct virgl_resource_cache_entry * entry,void * user_data)1139 virgl_drm_resource_cache_entry_is_busy(struct virgl_resource_cache_entry *entry,
1140 void *user_data)
1141 {
1142 struct virgl_drm_winsys *qdws = user_data;
1143 struct virgl_hw_res *res = cache_entry_container_res(entry);
1144
1145 return virgl_drm_resource_is_busy(&qdws->base, res);
1146 }
1147
1148 static void
virgl_drm_resource_cache_entry_release(struct virgl_resource_cache_entry * entry,void * user_data)1149 virgl_drm_resource_cache_entry_release(struct virgl_resource_cache_entry *entry,
1150 void *user_data)
1151 {
1152 struct virgl_drm_winsys *qdws = user_data;
1153 struct virgl_hw_res *res = cache_entry_container_res(entry);
1154
1155 virgl_hw_res_destroy(qdws, res);
1156 }
1157
virgl_init_context(int drmFD)1158 static int virgl_init_context(int drmFD)
1159 {
1160 int ret;
1161 struct drm_virtgpu_context_init init = { 0 };
1162 struct drm_virtgpu_context_set_param ctx_set_param = { 0 };
1163 uint64_t supports_capset_virgl, supports_capset_virgl2;
1164 supports_capset_virgl = supports_capset_virgl2 = 0;
1165
1166 supports_capset_virgl = ((1 << VIRGL_DRM_CAPSET_VIRGL) &
1167 params[param_supported_capset_ids].value);
1168
1169 supports_capset_virgl2 = ((1 << VIRGL_DRM_CAPSET_VIRGL2) &
1170 params[param_supported_capset_ids].value);
1171
1172 if (!supports_capset_virgl && !supports_capset_virgl2) {
1173 _debug_printf("No virgl contexts available on host");
1174 return -EINVAL;
1175 }
1176
1177 ctx_set_param.param = VIRTGPU_CONTEXT_PARAM_CAPSET_ID;
1178 ctx_set_param.value = (supports_capset_virgl2) ?
1179 VIRGL_DRM_CAPSET_VIRGL2 :
1180 VIRGL_DRM_CAPSET_VIRGL;
1181
1182 init.ctx_set_params = (unsigned long)(void *)&ctx_set_param;
1183 init.num_params = 1;
1184
1185 ret = drmIoctl(drmFD, DRM_IOCTL_VIRTGPU_CONTEXT_INIT, &init);
1186 /*
1187 * EEXIST happens when a compositor does DUMB_CREATE before initializing
1188 * virgl.
1189 */
1190 if (ret && errno != EEXIST) {
1191 _debug_printf("DRM_IOCTL_VIRTGPU_CONTEXT_INIT failed with %s\n",
1192 strerror(errno));
1193 return -1;
1194 }
1195
1196 return 0;
1197 }
1198
1199 static struct virgl_winsys *
virgl_drm_winsys_create(int drmFD)1200 virgl_drm_winsys_create(int drmFD)
1201 {
1202 static const unsigned CACHE_TIMEOUT_USEC = 1000000;
1203 struct virgl_drm_winsys *qdws;
1204 int drm_version;
1205 int ret;
1206
1207 for (uint32_t i = 0; i < ARRAY_SIZE(params); i++) {
1208 struct drm_virtgpu_getparam getparam = { 0 };
1209 uint64_t value = 0;
1210 getparam.param = params[i].param;
1211 getparam.value = (uint64_t)(uintptr_t)&value;
1212 ret = drmIoctl(drmFD, DRM_IOCTL_VIRTGPU_GETPARAM, &getparam);
1213 params[i].value = (ret == 0) ? value : 0;
1214 }
1215
1216 if (!params[param_3d_features].value)
1217 return NULL;
1218
1219 drm_version = virgl_drm_get_version(drmFD);
1220 if (drm_version < 0)
1221 return NULL;
1222
1223 if (params[param_context_init].value) {
1224 ret = virgl_init_context(drmFD);
1225 if (ret)
1226 return NULL;
1227 }
1228
1229 qdws = CALLOC_STRUCT(virgl_drm_winsys);
1230 if (!qdws)
1231 return NULL;
1232
1233 qdws->fd = drmFD;
1234 virgl_resource_cache_init(&qdws->cache, CACHE_TIMEOUT_USEC,
1235 virgl_drm_resource_cache_entry_is_busy,
1236 virgl_drm_resource_cache_entry_release,
1237 qdws);
1238 (void) mtx_init(&qdws->mutex, mtx_plain);
1239 (void) mtx_init(&qdws->bo_handles_mutex, mtx_plain);
1240 p_atomic_set(&qdws->blob_id, 0);
1241
1242 qdws->bo_handles = util_hash_table_create_ptr_keys();
1243 qdws->bo_names = util_hash_table_create_ptr_keys();
1244 qdws->base.destroy = virgl_drm_winsys_destroy;
1245
1246 qdws->base.transfer_put = virgl_bo_transfer_put;
1247 qdws->base.transfer_get = virgl_bo_transfer_get;
1248 qdws->base.resource_create = virgl_drm_winsys_resource_cache_create;
1249 qdws->base.resource_reference = virgl_drm_resource_reference;
1250 qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle;
1251 qdws->base.resource_set_type = virgl_drm_winsys_resource_set_type;
1252 qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle;
1253 qdws->base.resource_map = virgl_drm_resource_map;
1254 qdws->base.resource_wait = virgl_drm_resource_wait;
1255 qdws->base.resource_is_busy = virgl_drm_resource_is_busy;
1256 qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create;
1257 qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy;
1258 qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
1259 qdws->base.emit_res = virgl_drm_emit_res;
1260 qdws->base.res_is_referenced = virgl_drm_res_is_ref;
1261
1262 qdws->base.cs_create_fence = virgl_cs_create_fence;
1263 qdws->base.fence_wait = virgl_fence_wait;
1264 qdws->base.fence_reference = virgl_fence_reference;
1265 qdws->base.fence_server_sync = virgl_fence_server_sync;
1266 qdws->base.fence_get_fd = virgl_fence_get_fd;
1267 qdws->base.get_caps = virgl_drm_get_caps;
1268 qdws->base.supports_fences = drm_version >= VIRGL_DRM_VERSION_FENCE_FD;
1269 qdws->base.supports_encoded_transfers = 1;
1270
1271 qdws->base.supports_coherent = params[param_resource_blob].value &&
1272 params[param_host_visible].value;
1273 return &qdws->base;
1274
1275 }
1276
1277 static struct hash_table *fd_tab = NULL;
1278 static mtx_t virgl_screen_mutex = _MTX_INITIALIZER_NP;
1279
1280 static void
virgl_drm_screen_destroy(struct pipe_screen * pscreen)1281 virgl_drm_screen_destroy(struct pipe_screen *pscreen)
1282 {
1283 struct virgl_screen *screen = virgl_screen(pscreen);
1284 boolean destroy;
1285
1286 mtx_lock(&virgl_screen_mutex);
1287 destroy = --screen->refcnt == 0;
1288 if (destroy) {
1289 int fd = virgl_drm_winsys(screen->vws)->fd;
1290 _mesa_hash_table_remove_key(fd_tab, intptr_to_pointer(fd));
1291 close(fd);
1292 }
1293 mtx_unlock(&virgl_screen_mutex);
1294
1295 if (destroy) {
1296 pscreen->destroy = screen->winsys_priv;
1297 pscreen->destroy(pscreen);
1298 }
1299 }
1300
1301 static uint32_t
hash_fd(const void * key)1302 hash_fd(const void *key)
1303 {
1304 int fd = pointer_to_intptr(key);
1305
1306 return _mesa_hash_int(&fd);
1307 }
1308
1309 static bool
equal_fd(const void * key1,const void * key2)1310 equal_fd(const void *key1, const void *key2)
1311 {
1312 int ret;
1313 int fd1 = pointer_to_intptr(key1);
1314 int fd2 = pointer_to_intptr(key2);
1315
1316 /* Since the scope of prime handle is limited to drm_file,
1317 * virgl_screen is only shared at the drm_file level,
1318 * not at the device (/dev/dri/cardX) level.
1319 */
1320 ret = os_same_file_description(fd1, fd2);
1321 if (ret == 0) {
1322 return true;
1323 } else if (ret < 0) {
1324 static bool logged;
1325
1326 if (!logged) {
1327 _debug_printf("virgl: os_same_file_description couldn't "
1328 "determine if two DRM fds reference the same "
1329 "file description.\n"
1330 "If they do, bad things may happen!\n");
1331 logged = true;
1332 }
1333 }
1334
1335 return false;
1336 }
1337
1338 struct pipe_screen *
virgl_drm_screen_create(int fd,const struct pipe_screen_config * config)1339 virgl_drm_screen_create(int fd, const struct pipe_screen_config *config)
1340 {
1341 struct pipe_screen *pscreen = NULL;
1342
1343 mtx_lock(&virgl_screen_mutex);
1344 if (!fd_tab) {
1345 fd_tab = _mesa_hash_table_create(NULL, hash_fd, equal_fd);
1346 if (!fd_tab)
1347 goto unlock;
1348 }
1349
1350 pscreen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
1351 if (pscreen) {
1352 virgl_screen(pscreen)->refcnt++;
1353 } else {
1354 struct virgl_winsys *vws;
1355 int dup_fd = os_dupfd_cloexec(fd);
1356
1357 vws = virgl_drm_winsys_create(dup_fd);
1358 if (!vws) {
1359 close(dup_fd);
1360 goto unlock;
1361 }
1362
1363 pscreen = virgl_create_screen(vws, config);
1364 if (pscreen) {
1365 _mesa_hash_table_insert(fd_tab, intptr_to_pointer(dup_fd), pscreen);
1366
1367 /* Bit of a hack, to avoid circular linkage dependency,
1368 * ie. pipe driver having to call in to winsys, we
1369 * override the pipe drivers screen->destroy():
1370 */
1371 virgl_screen(pscreen)->winsys_priv = pscreen->destroy;
1372 pscreen->destroy = virgl_drm_screen_destroy;
1373 }
1374 }
1375
1376 unlock:
1377 mtx_unlock(&virgl_screen_mutex);
1378 return pscreen;
1379 }
1380