• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014, 2015 Red Hat.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <errno.h>
25 #include <fcntl.h>
26 #include <limits.h>
27 #include <stdio.h>
28 #include <sys/ioctl.h>
29 #include <sys/stat.h>
30 
31 #include "util/os_mman.h"
32 #include "util/os_file.h"
33 #include "util/os_time.h"
34 #include "util/simple_mtx.h"
35 #include "util/u_memory.h"
36 #include "util/format/u_format.h"
37 #include "util/u_hash_table.h"
38 #include "util/u_inlines.h"
39 #include "util/u_pointer.h"
40 #include "frontend/drm_driver.h"
41 #include "virgl/virgl_screen.h"
42 #include "virgl/virgl_public.h"
43 #include "virtio-gpu/virgl_protocol.h"
44 
45 #include <xf86drm.h>
46 #include <libsync.h>
47 #include "drm-uapi/virtgpu_drm.h"
48 
49 #include "virgl_drm_winsys.h"
50 #include "virgl_drm_public.h"
51 
52 // Delete local definitions when virglrenderer_hw.h becomes public
53 #define VIRGL_DRM_CAPSET_VIRGL  1
54 #define VIRGL_DRM_CAPSET_VIRGL2 2
55 
56 #define VIRGL_DRM_VERSION(major, minor) ((major) << 16 | (minor))
57 #define VIRGL_DRM_VERSION_FENCE_FD      VIRGL_DRM_VERSION(0, 1)
58 
59 /* Gets a pointer to the virgl_hw_res containing the pointed to cache entry. */
60 #define cache_entry_container_res(ptr) \
61     (struct virgl_hw_res*)((char*)ptr - offsetof(struct virgl_hw_res, cache_entry))
62 
can_cache_resource(uint32_t bind)63 static inline bool can_cache_resource(uint32_t bind)
64 {
65    return bind == VIRGL_BIND_CONSTANT_BUFFER ||
66           bind == VIRGL_BIND_INDEX_BUFFER ||
67           bind == VIRGL_BIND_VERTEX_BUFFER ||
68           bind == VIRGL_BIND_CUSTOM ||
69           bind == VIRGL_BIND_STAGING ||
70           bind == VIRGL_BIND_DEPTH_STENCIL ||
71           bind == VIRGL_BIND_RENDER_TARGET ||
72           bind == 0;
73 }
74 
virgl_hw_res_destroy(struct virgl_drm_winsys * qdws,struct virgl_hw_res * res)75 static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
76                                  struct virgl_hw_res *res)
77 {
78       struct drm_gem_close args;
79 
80       mtx_lock(&qdws->bo_handles_mutex);
81 
82       /* We intentionally avoid taking the lock in
83        * virgl_drm_resource_reference. Now that the
84        * lock is taken, we need to check the refcount
85        * again. */
86       if (pipe_is_referenced(&res->reference)) {
87          mtx_unlock(&qdws->bo_handles_mutex);
88          return;
89       }
90 
91       _mesa_hash_table_remove_key(qdws->bo_handles,
92                              (void *)(uintptr_t)res->bo_handle);
93       if (res->flink_name)
94          _mesa_hash_table_remove_key(qdws->bo_names,
95                                 (void *)(uintptr_t)res->flink_name);
96       if (res->ptr)
97          os_munmap(res->ptr, res->size);
98 
99       memset(&args, 0, sizeof(args));
100       args.handle = res->bo_handle;
101       drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args);
102       /* We need to unlock the access to bo_handles after closing the GEM to
103        * avoid a race condition where another thread would not find the
104        * bo_handle leading to a call of DRM_IOCTL_GEM_OPEN which will return
105        * the same bo_handle as the one we are closing here. */
106       mtx_unlock(&qdws->bo_handles_mutex);
107       FREE(res);
108 }
109 
virgl_drm_resource_is_busy(struct virgl_winsys * vws,struct virgl_hw_res * res)110 static bool virgl_drm_resource_is_busy(struct virgl_winsys *vws,
111                                        struct virgl_hw_res *res)
112 {
113    struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
114    struct drm_virtgpu_3d_wait waitcmd;
115    int ret;
116 
117    if (!p_atomic_read(&res->maybe_busy) && !p_atomic_read(&res->external))
118       return false;
119 
120    memset(&waitcmd, 0, sizeof(waitcmd));
121    waitcmd.handle = res->bo_handle;
122    waitcmd.flags = VIRTGPU_WAIT_NOWAIT;
123 
124    ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
125    if (ret && errno == EBUSY)
126       return true;
127 
128    p_atomic_set(&res->maybe_busy, false);
129 
130    return false;
131 }
132 
133 static void
virgl_drm_winsys_destroy(struct virgl_winsys * qws)134 virgl_drm_winsys_destroy(struct virgl_winsys *qws)
135 {
136    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
137 
138    virgl_resource_cache_flush(&qdws->cache);
139 
140    _mesa_hash_table_destroy(qdws->bo_handles, NULL);
141    _mesa_hash_table_destroy(qdws->bo_names, NULL);
142    mtx_destroy(&qdws->bo_handles_mutex);
143    mtx_destroy(&qdws->mutex);
144 
145    FREE(qdws);
146 }
147 
virgl_drm_resource_reference(struct virgl_winsys * qws,struct virgl_hw_res ** dres,struct virgl_hw_res * sres)148 static void virgl_drm_resource_reference(struct virgl_winsys *qws,
149                                          struct virgl_hw_res **dres,
150                                          struct virgl_hw_res *sres)
151 {
152    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
153    struct virgl_hw_res *old = *dres;
154 
155    if (pipe_reference(&(*dres)->reference, &sres->reference)) {
156 
157       if (!can_cache_resource(old->bind) ||
158           p_atomic_read(&old->external)) {
159          virgl_hw_res_destroy(qdws, old);
160       } else {
161          mtx_lock(&qdws->mutex);
162          virgl_resource_cache_add(&qdws->cache, &old->cache_entry);
163          mtx_unlock(&qdws->mutex);
164       }
165    }
166    *dres = sres;
167 }
168 
169 static struct virgl_hw_res *
virgl_drm_winsys_resource_create_blob(struct virgl_winsys * qws,enum pipe_texture_target target,uint32_t format,uint32_t bind,uint32_t width,uint32_t height,uint32_t depth,uint32_t array_size,uint32_t last_level,uint32_t nr_samples,uint32_t flags,uint32_t size)170 virgl_drm_winsys_resource_create_blob(struct virgl_winsys *qws,
171                                       enum pipe_texture_target target,
172                                       uint32_t format,
173                                       uint32_t bind,
174                                       uint32_t width,
175                                       uint32_t height,
176                                       uint32_t depth,
177                                       uint32_t array_size,
178                                       uint32_t last_level,
179                                       uint32_t nr_samples,
180                                       uint32_t flags,
181                                       uint32_t size)
182 {
183    int ret;
184    int32_t blob_id;
185    uint32_t cmd[VIRGL_PIPE_RES_CREATE_SIZE + 1] = { 0 };
186    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
187    struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
188    struct virgl_hw_res *res;
189    struct virgl_resource_params params = { .size = size,
190                                            .bind = bind,
191                                            .format = format,
192                                            .flags = flags,
193                                            .nr_samples = nr_samples,
194                                            .width = width,
195                                            .height = height,
196                                            .depth = depth,
197                                            .array_size = array_size,
198                                            .last_level = last_level,
199                                            .target = target };
200 
201    res = CALLOC_STRUCT(virgl_hw_res);
202    if (!res)
203       return NULL;
204 
205    /* Make sure blob is page aligned. */
206    if (flags & (VIRGL_RESOURCE_FLAG_MAP_PERSISTENT |
207                 VIRGL_RESOURCE_FLAG_MAP_COHERENT)) {
208       width = ALIGN(width, getpagesize());
209       size = ALIGN(size, getpagesize());
210    }
211 
212    blob_id = p_atomic_inc_return(&qdws->blob_id);
213    cmd[0] = VIRGL_CMD0(VIRGL_CCMD_PIPE_RESOURCE_CREATE, 0, VIRGL_PIPE_RES_CREATE_SIZE);
214    cmd[VIRGL_PIPE_RES_CREATE_FORMAT] = format;
215    cmd[VIRGL_PIPE_RES_CREATE_BIND] = bind;
216    cmd[VIRGL_PIPE_RES_CREATE_TARGET] = target;
217    cmd[VIRGL_PIPE_RES_CREATE_WIDTH] = width;
218    cmd[VIRGL_PIPE_RES_CREATE_HEIGHT] = height;
219    cmd[VIRGL_PIPE_RES_CREATE_DEPTH] = depth;
220    cmd[VIRGL_PIPE_RES_CREATE_ARRAY_SIZE] = array_size;
221    cmd[VIRGL_PIPE_RES_CREATE_LAST_LEVEL] = last_level;
222    cmd[VIRGL_PIPE_RES_CREATE_NR_SAMPLES] = nr_samples;
223    cmd[VIRGL_PIPE_RES_CREATE_FLAGS] = flags;
224    cmd[VIRGL_PIPE_RES_CREATE_BLOB_ID] = blob_id;
225 
226    drm_rc_blob.cmd = (unsigned long)(void *)&cmd;
227    drm_rc_blob.cmd_size = 4 * (VIRGL_PIPE_RES_CREATE_SIZE + 1);
228    drm_rc_blob.size = size;
229    drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
230    drm_rc_blob.blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
231    drm_rc_blob.blob_id = (uint64_t) blob_id;
232 
233    ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
234    if (ret != 0) {
235       FREE(res);
236       return NULL;
237    }
238 
239    res->bind = bind;
240    res->res_handle = drm_rc_blob.res_handle;
241    res->bo_handle = drm_rc_blob.bo_handle;
242    res->size = size;
243    res->flags = flags;
244    res->maybe_untyped = false;
245    pipe_reference_init(&res->reference, 1);
246    p_atomic_set(&res->external, false);
247    p_atomic_set(&res->num_cs_references, 0);
248    virgl_resource_cache_entry_init(&res->cache_entry, params);
249    return res;
250 }
251 
252 static struct virgl_hw_res *
virgl_drm_winsys_resource_create(struct virgl_winsys * qws,enum pipe_texture_target target,uint32_t format,uint32_t bind,uint32_t width,uint32_t height,uint32_t depth,uint32_t array_size,uint32_t last_level,uint32_t nr_samples,uint32_t size,bool for_fencing)253 virgl_drm_winsys_resource_create(struct virgl_winsys *qws,
254                                  enum pipe_texture_target target,
255                                  uint32_t format,
256                                  uint32_t bind,
257                                  uint32_t width,
258                                  uint32_t height,
259                                  uint32_t depth,
260                                  uint32_t array_size,
261                                  uint32_t last_level,
262                                  uint32_t nr_samples,
263                                  uint32_t size,
264                                  bool for_fencing)
265 {
266    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
267    struct drm_virtgpu_resource_create createcmd;
268    int ret;
269    struct virgl_hw_res *res;
270    uint32_t stride = width * util_format_get_blocksize(format);
271    struct virgl_resource_params params = { .size = size,
272                                            .bind = bind,
273                                            .format = format,
274                                            .flags = 0,
275                                            .nr_samples = nr_samples,
276                                            .width = width,
277                                            .height = height,
278                                            .depth = depth,
279                                            .array_size = array_size,
280                                            .last_level = last_level,
281                                            .target = target };
282 
283    res = CALLOC_STRUCT(virgl_hw_res);
284    if (!res)
285       return NULL;
286 
287    memset(&createcmd, 0, sizeof(createcmd));
288    createcmd.target = target;
289    createcmd.format = pipe_to_virgl_format(format);
290    createcmd.bind = bind;
291    createcmd.width = width;
292    createcmd.height = height;
293    createcmd.depth = depth;
294    createcmd.array_size = array_size;
295    createcmd.last_level = last_level;
296    createcmd.nr_samples = nr_samples;
297    createcmd.stride = stride;
298    createcmd.size = size;
299 
300    ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &createcmd);
301    if (ret != 0) {
302       FREE(res);
303       return NULL;
304    }
305 
306    res->bind = bind;
307 
308    res->res_handle = createcmd.res_handle;
309    res->bo_handle = createcmd.bo_handle;
310    res->size = size;
311    res->target = target;
312    res->maybe_untyped = false;
313    pipe_reference_init(&res->reference, 1);
314    p_atomic_set(&res->external, false);
315    p_atomic_set(&res->num_cs_references, 0);
316 
317    /* A newly created resource is considered busy by the kernel until the
318     * command is retired.  But for our purposes, we can consider it idle
319     * unless it is used for fencing.
320     */
321    p_atomic_set(&res->maybe_busy, for_fencing);
322 
323    virgl_resource_cache_entry_init(&res->cache_entry, params);
324 
325    return res;
326 }
327 
328 /*
329  * Previously, with DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, all host resources had
330  * a guest memory shadow resource with size = stride * bpp.  Virglrenderer
331  * would guess the stride implicitly when performing transfer operations, if
332  * the stride wasn't specified.  Interestingly, vtest would specify the stride.
333  *
334  * Guessing the stride breaks down with YUV images, which may be imported into
335  * Mesa as 3R8 images. It also doesn't work if an external allocator
336  * (i.e, minigbm) decides to use a stride not equal to stride * bpp. With blob
337  * resources, the size = stride * bpp restriction no longer holds, so use
338  * explicit strides passed into Mesa.
339  */
use_explicit_stride(struct virgl_hw_res * res,uint32_t level,uint32_t depth)340 static inline bool use_explicit_stride(struct virgl_hw_res *res, uint32_t level,
341 				       uint32_t depth)
342 {
343    return (params[param_resource_blob].value &&
344            res->blob_mem == VIRTGPU_BLOB_MEM_HOST3D_GUEST &&
345            res->target == PIPE_TEXTURE_2D &&
346            level == 0 && depth == 1);
347 }
348 
349 static int
virgl_bo_transfer_put(struct virgl_winsys * vws,struct virgl_hw_res * res,const struct pipe_box * box,uint32_t stride,uint32_t layer_stride,uint32_t buf_offset,uint32_t level)350 virgl_bo_transfer_put(struct virgl_winsys *vws,
351                       struct virgl_hw_res *res,
352                       const struct pipe_box *box,
353                       uint32_t stride, uint32_t layer_stride,
354                       uint32_t buf_offset, uint32_t level)
355 {
356    struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
357    struct drm_virtgpu_3d_transfer_to_host tohostcmd;
358 
359    p_atomic_set(&res->maybe_busy, true);
360 
361    memset(&tohostcmd, 0, sizeof(tohostcmd));
362    tohostcmd.bo_handle = res->bo_handle;
363    tohostcmd.box.x = box->x;
364    tohostcmd.box.y = box->y;
365    tohostcmd.box.z = box->z;
366    tohostcmd.box.w = box->width;
367    tohostcmd.box.h = box->height;
368    tohostcmd.box.d = box->depth;
369    tohostcmd.offset = buf_offset;
370    tohostcmd.level = level;
371 
372    if (use_explicit_stride(res, level, box->depth))
373       tohostcmd.stride = stride;
374 
375    return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &tohostcmd);
376 }
377 
378 static int
virgl_bo_transfer_get(struct virgl_winsys * vws,struct virgl_hw_res * res,const struct pipe_box * box,uint32_t stride,uint32_t layer_stride,uint32_t buf_offset,uint32_t level)379 virgl_bo_transfer_get(struct virgl_winsys *vws,
380                       struct virgl_hw_res *res,
381                       const struct pipe_box *box,
382                       uint32_t stride, uint32_t layer_stride,
383                       uint32_t buf_offset, uint32_t level)
384 {
385    struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
386    struct drm_virtgpu_3d_transfer_from_host fromhostcmd;
387 
388    p_atomic_set(&res->maybe_busy, true);
389 
390    memset(&fromhostcmd, 0, sizeof(fromhostcmd));
391    fromhostcmd.bo_handle = res->bo_handle;
392    fromhostcmd.level = level;
393    fromhostcmd.offset = buf_offset;
394    fromhostcmd.box.x = box->x;
395    fromhostcmd.box.y = box->y;
396    fromhostcmd.box.z = box->z;
397    fromhostcmd.box.w = box->width;
398    fromhostcmd.box.h = box->height;
399    fromhostcmd.box.d = box->depth;
400 
401    if (use_explicit_stride(res, level, box->depth))
402       fromhostcmd.stride = stride;
403 
404    return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &fromhostcmd);
405 }
406 
407 static struct virgl_hw_res *
virgl_drm_winsys_resource_cache_create(struct virgl_winsys * qws,enum pipe_texture_target target,const void * map_front_private,uint32_t format,uint32_t bind,uint32_t width,uint32_t height,uint32_t depth,uint32_t array_size,uint32_t last_level,uint32_t nr_samples,uint32_t flags,uint32_t size)408 virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
409                                        enum pipe_texture_target target,
410                                        const void *map_front_private,
411                                        uint32_t format,
412                                        uint32_t bind,
413                                        uint32_t width,
414                                        uint32_t height,
415                                        uint32_t depth,
416                                        uint32_t array_size,
417                                        uint32_t last_level,
418                                        uint32_t nr_samples,
419                                        uint32_t flags,
420                                        uint32_t size)
421 {
422    bool need_sync = false;
423    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
424    struct virgl_hw_res *res;
425    struct virgl_resource_cache_entry *entry;
426    struct virgl_resource_params params = { .size = size,
427                                      .bind = bind,
428                                      .format = format,
429                                      .flags = flags,
430                                      .nr_samples = nr_samples,
431                                      .width = width,
432                                      .height = height,
433                                      .depth = depth,
434                                      .array_size = array_size,
435                                      .last_level = last_level,
436                                      .target = target };
437 
438    if (!can_cache_resource(bind))
439       goto alloc;
440 
441    mtx_lock(&qdws->mutex);
442 
443    entry = virgl_resource_cache_remove_compatible(&qdws->cache, params);
444    if (entry) {
445       res = cache_entry_container_res(entry);
446       mtx_unlock(&qdws->mutex);
447       pipe_reference_init(&res->reference, 1);
448       return res;
449    }
450 
451    mtx_unlock(&qdws->mutex);
452 
453 alloc:
454    /* PIPE_BUFFER with VIRGL_BIND_CUSTOM flag will access data when attaching,
455     * in order to avoid race conditions we need to treat it as busy during
456     * creation
457     */
458    if (target == PIPE_BUFFER && (bind & VIRGL_BIND_CUSTOM))
459        need_sync = true;
460 
461    if (flags & (VIRGL_RESOURCE_FLAG_MAP_PERSISTENT |
462                 VIRGL_RESOURCE_FLAG_MAP_COHERENT))
463       res = virgl_drm_winsys_resource_create_blob(qws, target, format, bind,
464                                                   width, height, depth,
465                                                   array_size, last_level,
466                                                   nr_samples, flags, size);
467    else
468       res = virgl_drm_winsys_resource_create(qws, target, format, bind, width,
469                                              height, depth, array_size,
470                                              last_level, nr_samples, size,
471                                              need_sync);
472    return res;
473 }
474 
475 static uint32_t
virgl_drm_winsys_resource_get_storage_size(struct virgl_winsys * qws,struct virgl_hw_res * res)476 virgl_drm_winsys_resource_get_storage_size(struct virgl_winsys *qws,
477                                            struct virgl_hw_res *res)
478 {
479    return res->size;
480 }
481 
482 static struct virgl_hw_res *
virgl_drm_winsys_resource_create_handle(struct virgl_winsys * qws,struct winsys_handle * whandle,UNUSED struct pipe_resource * templ,uint32_t * plane,uint32_t * stride,uint32_t * plane_offset,uint64_t * modifier,uint32_t * blob_mem)483 virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
484                                         struct winsys_handle *whandle,
485                                         UNUSED struct pipe_resource *templ,
486                                         uint32_t *plane,
487                                         uint32_t *stride,
488                                         uint32_t *plane_offset,
489                                         uint64_t *modifier,
490                                         uint32_t *blob_mem)
491 {
492    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
493    struct drm_gem_open open_arg = {};
494    struct drm_virtgpu_resource_info info_arg = {};
495    struct virgl_hw_res *res = NULL;
496    uint32_t handle = whandle->handle;
497 
498    if (whandle->plane >= VIRGL_MAX_PLANE_COUNT) {
499       return NULL;
500    }
501 
502    if (whandle->offset != 0 && whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
503       _debug_printf("attempt to import unsupported winsys offset %u\n",
504                     whandle->offset);
505       return NULL;
506    } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
507       *plane = whandle->plane;
508       *stride = whandle->stride;
509       *plane_offset = whandle->offset;
510       *modifier = whandle->modifier;
511    }
512 
513    mtx_lock(&qdws->bo_handles_mutex);
514 
515    /* We must maintain a list of pairs <handle, bo>, so that we always return
516     * the same BO for one particular handle. If we didn't do that and created
517     * more than one BO for the same handle and then relocated them in a CS,
518     * we would hit a deadlock in the kernel.
519     *
520     * The list of pairs is guarded by a mutex, of course. */
521    if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
522       res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
523    } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
524       int r;
525       r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
526       if (r)
527          goto done;
528       res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)handle);
529    } else {
530       /* Unknown handle type */
531       goto done;
532    }
533 
534    if (res) {
535       /* qdws->bo_{names,handles} hold weak pointers to virgl_hw_res. Because
536        * virgl_drm_resource_reference does not take qdws->bo_handles_mutex
537        * until it enters virgl_hw_res_destroy, there is a small window that
538        * the refcount can drop to zero. Call p_atomic_inc directly instead of
539        * virgl_drm_resource_reference to avoid hitting assert failures.
540        */
541       p_atomic_inc(&res->reference.count);
542       goto done;
543    }
544 
545    res = CALLOC_STRUCT(virgl_hw_res);
546    if (!res)
547       goto done;
548 
549    if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
550       res->bo_handle = handle;
551    } else {
552       memset(&open_arg, 0, sizeof(open_arg));
553       open_arg.name = whandle->handle;
554       if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
555          FREE(res);
556          res = NULL;
557          goto done;
558       }
559       res->bo_handle = open_arg.handle;
560       res->flink_name = whandle->handle;
561    }
562 
563    memset(&info_arg, 0, sizeof(info_arg));
564    info_arg.bo_handle = res->bo_handle;
565 
566    if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info_arg)) {
567       /* close */
568       FREE(res);
569       res = NULL;
570       goto done;
571    }
572 
573    res->res_handle = info_arg.res_handle;
574    res->blob_mem = info_arg.blob_mem;
575    *blob_mem = info_arg.blob_mem;
576 
577    res->size = info_arg.size;
578    res->maybe_untyped = info_arg.blob_mem ? true : false;
579    pipe_reference_init(&res->reference, 1);
580    p_atomic_set(&res->external, true);
581    res->num_cs_references = 0;
582 
583    if (res->flink_name)
584       _mesa_hash_table_insert(qdws->bo_names, (void *)(uintptr_t)res->flink_name, res);
585    _mesa_hash_table_insert(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
586 
587 done:
588    mtx_unlock(&qdws->bo_handles_mutex);
589    return res;
590 }
591 
592 static void
virgl_drm_winsys_resource_set_type(struct virgl_winsys * qws,struct virgl_hw_res * res,uint32_t format,uint32_t bind,uint32_t width,uint32_t height,uint32_t usage,uint64_t modifier,uint32_t plane_count,const uint32_t * plane_strides,const uint32_t * plane_offsets)593 virgl_drm_winsys_resource_set_type(struct virgl_winsys *qws,
594                                    struct virgl_hw_res *res,
595                                    uint32_t format, uint32_t bind,
596                                    uint32_t width, uint32_t height,
597                                    uint32_t usage, uint64_t modifier,
598                                    uint32_t plane_count,
599                                    const uint32_t *plane_strides,
600                                    const uint32_t *plane_offsets)
601 {
602    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
603    uint32_t cmd[VIRGL_PIPE_RES_SET_TYPE_SIZE(VIRGL_MAX_PLANE_COUNT) + 1];
604    struct drm_virtgpu_execbuffer eb;
605    int ret;
606 
607    mtx_lock(&qdws->bo_handles_mutex);
608 
609    if (!res->maybe_untyped) {
610       mtx_unlock(&qdws->bo_handles_mutex);
611       return;
612    }
613    res->maybe_untyped = false;
614 
615    assert(plane_count && plane_count <= VIRGL_MAX_PLANE_COUNT);
616 
617    cmd[0] = VIRGL_CMD0(VIRGL_CCMD_PIPE_RESOURCE_SET_TYPE, 0, VIRGL_PIPE_RES_SET_TYPE_SIZE(plane_count));
618    cmd[VIRGL_PIPE_RES_SET_TYPE_RES_HANDLE] = res->res_handle,
619    cmd[VIRGL_PIPE_RES_SET_TYPE_FORMAT] = format;
620    cmd[VIRGL_PIPE_RES_SET_TYPE_BIND] = bind;
621    cmd[VIRGL_PIPE_RES_SET_TYPE_WIDTH] = width;
622    cmd[VIRGL_PIPE_RES_SET_TYPE_HEIGHT] = height;
623    cmd[VIRGL_PIPE_RES_SET_TYPE_USAGE] = usage;
624    cmd[VIRGL_PIPE_RES_SET_TYPE_MODIFIER_LO] = (uint32_t)modifier;
625    cmd[VIRGL_PIPE_RES_SET_TYPE_MODIFIER_HI] = (uint32_t)(modifier >> 32);
626    for (uint32_t i = 0; i < plane_count; i++) {
627       cmd[VIRGL_PIPE_RES_SET_TYPE_PLANE_STRIDE(i)] = plane_strides[i];
628       cmd[VIRGL_PIPE_RES_SET_TYPE_PLANE_OFFSET(i)] = plane_offsets[i];
629    }
630 
631    memset(&eb, 0, sizeof(eb));
632    eb.command = (uintptr_t)cmd;
633    eb.size = (1 + VIRGL_PIPE_RES_SET_TYPE_SIZE(plane_count)) * 4;
634    eb.num_bo_handles = 1;
635    eb.bo_handles = (uintptr_t)&res->bo_handle;
636 
637    ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
638    if (ret == -1)
639       _debug_printf("failed to set resource type: %s", strerror(errno));
640 
641    mtx_unlock(&qdws->bo_handles_mutex);
642 }
643 
virgl_drm_winsys_resource_get_handle(struct virgl_winsys * qws,struct virgl_hw_res * res,uint32_t stride,struct winsys_handle * whandle)644 static bool virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
645                                                  struct virgl_hw_res *res,
646                                                  uint32_t stride,
647                                                  struct winsys_handle *whandle)
648  {
649    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
650    struct drm_gem_flink flink;
651 
652    if (!res)
653        return false;
654 
655    if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
656       if (!res->flink_name) {
657          memset(&flink, 0, sizeof(flink));
658          flink.handle = res->bo_handle;
659 
660          if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
661             return false;
662          }
663          res->flink_name = flink.name;
664 
665          mtx_lock(&qdws->bo_handles_mutex);
666          _mesa_hash_table_insert(qdws->bo_names, (void *)(uintptr_t)res->flink_name, res);
667          mtx_unlock(&qdws->bo_handles_mutex);
668       }
669       whandle->handle = res->flink_name;
670    } else if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
671       whandle->handle = res->bo_handle;
672    } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
673       if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
674             return false;
675       mtx_lock(&qdws->bo_handles_mutex);
676       _mesa_hash_table_insert(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
677       mtx_unlock(&qdws->bo_handles_mutex);
678    }
679 
680    p_atomic_set(&res->external, true);
681 
682    whandle->stride = stride;
683    return true;
684 }
685 
virgl_drm_resource_map(struct virgl_winsys * qws,struct virgl_hw_res * res)686 static void *virgl_drm_resource_map(struct virgl_winsys *qws,
687                                     struct virgl_hw_res *res)
688 {
689    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
690    struct drm_virtgpu_map mmap_arg;
691    void *ptr;
692 
693    if (res->ptr)
694       return res->ptr;
695 
696    memset(&mmap_arg, 0, sizeof(mmap_arg));
697    mmap_arg.handle = res->bo_handle;
698    if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_MAP, &mmap_arg))
699       return NULL;
700 
701    ptr = os_mmap(0, res->size, PROT_READ|PROT_WRITE, MAP_SHARED,
702                  qdws->fd, mmap_arg.offset);
703    if (ptr == MAP_FAILED)
704       return NULL;
705 
706    res->ptr = ptr;
707    return ptr;
708 
709 }
710 
virgl_drm_resource_wait(struct virgl_winsys * qws,struct virgl_hw_res * res)711 static void virgl_drm_resource_wait(struct virgl_winsys *qws,
712                                     struct virgl_hw_res *res)
713 {
714    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
715    struct drm_virtgpu_3d_wait waitcmd;
716    int ret;
717 
718    if (!p_atomic_read(&res->maybe_busy) && !p_atomic_read(&res->external))
719       return;
720 
721    memset(&waitcmd, 0, sizeof(waitcmd));
722    waitcmd.handle = res->bo_handle;
723 
724    ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
725    if (ret)
726       _debug_printf("waiting got error - %d, slow gpu or hang?\n", errno);
727 
728    p_atomic_set(&res->maybe_busy, false);
729 }
730 
virgl_drm_alloc_res_list(struct virgl_drm_cmd_buf * cbuf,int initial_size)731 static bool virgl_drm_alloc_res_list(struct virgl_drm_cmd_buf *cbuf,
732                                      int initial_size)
733 {
734    cbuf->nres = initial_size;
735    cbuf->cres = 0;
736 
737    cbuf->res_bo = CALLOC(cbuf->nres, sizeof(struct virgl_hw_buf*));
738    if (!cbuf->res_bo)
739       return false;
740 
741    cbuf->res_hlist = MALLOC(cbuf->nres * sizeof(uint32_t));
742    if (!cbuf->res_hlist) {
743       FREE(cbuf->res_bo);
744       return false;
745    }
746 
747    return true;
748 }
749 
virgl_drm_free_res_list(struct virgl_drm_cmd_buf * cbuf)750 static void virgl_drm_free_res_list(struct virgl_drm_cmd_buf *cbuf)
751 {
752    int i;
753 
754    for (i = 0; i < cbuf->cres; i++) {
755       p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
756       virgl_drm_resource_reference(cbuf->ws, &cbuf->res_bo[i], NULL);
757    }
758    FREE(cbuf->res_hlist);
759    FREE(cbuf->res_bo);
760 }
761 
virgl_drm_res_is_added(struct virgl_drm_cmd_buf * cbuf,struct virgl_hw_res * res)762 static bool virgl_drm_res_is_added(struct virgl_drm_cmd_buf *cbuf,
763                                    struct virgl_hw_res *res)
764 {
765    for (int i = 0; i < cbuf->cres; i++) {
766       if (cbuf->res_bo[i] == res)
767          return true;
768    }
769 
770    return false;
771 }
772 
virgl_drm_add_res(struct virgl_drm_winsys * qdws,struct virgl_drm_cmd_buf * cbuf,struct virgl_hw_res * res)773 static void virgl_drm_add_res(struct virgl_drm_winsys *qdws,
774                               struct virgl_drm_cmd_buf *cbuf,
775                               struct virgl_hw_res *res)
776 {
777    bool already_in_list = virgl_drm_res_is_added(cbuf, res);
778    if (unlikely(already_in_list))
779       return;
780 
781    if (cbuf->cres >= cbuf->nres) {
782       unsigned new_nres = cbuf->nres + 256;
783       void *new_ptr = REALLOC(cbuf->res_bo,
784                               cbuf->nres * sizeof(struct virgl_hw_buf*),
785                               new_nres * sizeof(struct virgl_hw_buf*));
786       if (!new_ptr) {
787           _debug_printf("failure to add relocation %d, %d\n", cbuf->cres, new_nres);
788           return;
789       }
790       cbuf->res_bo = new_ptr;
791 
792       new_ptr = REALLOC(cbuf->res_hlist,
793                         cbuf->nres * sizeof(uint32_t),
794                         new_nres * sizeof(uint32_t));
795       if (!new_ptr) {
796           _debug_printf("failure to add hlist relocation %d, %d\n", cbuf->cres, cbuf->nres);
797           return;
798       }
799       cbuf->res_hlist = new_ptr;
800       cbuf->nres = new_nres;
801    }
802 
803    cbuf->res_bo[cbuf->cres] = NULL;
804    virgl_drm_resource_reference(&qdws->base, &cbuf->res_bo[cbuf->cres], res);
805    cbuf->res_hlist[cbuf->cres] = res->bo_handle;
806    p_atomic_inc(&res->num_cs_references);
807    cbuf->cres++;
808 }
809 
810 /* This is called after the cbuf is submitted. */
virgl_drm_clear_res_list(struct virgl_drm_cmd_buf * cbuf)811 static void virgl_drm_clear_res_list(struct virgl_drm_cmd_buf *cbuf)
812 {
813    int i;
814 
815    for (i = 0; i < cbuf->cres; i++) {
816       /* mark all BOs busy after submission */
817       p_atomic_set(&cbuf->res_bo[i]->maybe_busy, true);
818 
819       p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
820       virgl_drm_resource_reference(cbuf->ws, &cbuf->res_bo[i], NULL);
821    }
822 
823    cbuf->cres = 0;
824 }
825 
virgl_drm_emit_res(struct virgl_winsys * qws,struct virgl_cmd_buf * _cbuf,struct virgl_hw_res * res,bool write_buf)826 static void virgl_drm_emit_res(struct virgl_winsys *qws,
827                                struct virgl_cmd_buf *_cbuf,
828                                struct virgl_hw_res *res, bool write_buf)
829 {
830    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
831    struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
832 
833    if (write_buf)
834       cbuf->base.buf[cbuf->base.cdw++] = res->res_handle;
835 
836    virgl_drm_add_res(qdws, cbuf, res);
837 }
838 
virgl_drm_res_is_ref(struct virgl_winsys * qws,struct virgl_cmd_buf * _cbuf,struct virgl_hw_res * res)839 static bool virgl_drm_res_is_ref(struct virgl_winsys *qws,
840                                  struct virgl_cmd_buf *_cbuf,
841                                  struct virgl_hw_res *res)
842 {
843    if (!p_atomic_read(&res->num_cs_references))
844       return false;
845 
846    return true;
847 }
848 
virgl_drm_cmd_buf_create(struct virgl_winsys * qws,uint32_t size)849 static struct virgl_cmd_buf *virgl_drm_cmd_buf_create(struct virgl_winsys *qws,
850                                                       uint32_t size)
851 {
852    struct virgl_drm_cmd_buf *cbuf;
853 
854    cbuf = CALLOC_STRUCT(virgl_drm_cmd_buf);
855    if (!cbuf)
856       return NULL;
857 
858    cbuf->ws = qws;
859 
860    if (!virgl_drm_alloc_res_list(cbuf, 512)) {
861       FREE(cbuf);
862       return NULL;
863    }
864 
865    cbuf->buf = CALLOC(size, sizeof(uint32_t));
866    if (!cbuf->buf) {
867       FREE(cbuf->res_hlist);
868       FREE(cbuf->res_bo);
869       FREE(cbuf);
870       return NULL;
871    }
872 
873    cbuf->in_fence_fd = -1;
874    cbuf->base.buf = cbuf->buf;
875    return &cbuf->base;
876 }
877 
virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf * _cbuf)878 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf *_cbuf)
879 {
880    struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
881 
882    virgl_drm_free_res_list(cbuf);
883 
884    FREE(cbuf->buf);
885    FREE(cbuf);
886 }
887 
888 static struct pipe_fence_handle *
virgl_drm_fence_create(struct virgl_winsys * vws,int fd,bool external)889 virgl_drm_fence_create(struct virgl_winsys *vws, int fd, bool external)
890 {
891    struct virgl_drm_fence *fence;
892 
893    assert(vws->supports_fences);
894 
895    if (external) {
896       fd = os_dupfd_cloexec(fd);
897       if (fd < 0)
898          return NULL;
899    }
900 
901    fence = CALLOC_STRUCT(virgl_drm_fence);
902    if (!fence) {
903       close(fd);
904       return NULL;
905    }
906 
907    fence->fd = fd;
908    fence->external = external;
909 
910    pipe_reference_init(&fence->reference, 1);
911 
912    return (struct pipe_fence_handle *)fence;
913 }
914 
915 static struct pipe_fence_handle *
virgl_drm_fence_create_legacy(struct virgl_winsys * vws)916 virgl_drm_fence_create_legacy(struct virgl_winsys *vws)
917 {
918    struct virgl_drm_fence *fence;
919 
920    assert(!vws->supports_fences);
921 
922    fence = CALLOC_STRUCT(virgl_drm_fence);
923    if (!fence)
924       return NULL;
925    fence->fd = -1;
926 
927    /* Resources for fences should not be from the cache, since we are basing
928     * the fence status on the resource creation busy status.
929     */
930    fence->hw_res = virgl_drm_winsys_resource_create(vws, PIPE_BUFFER,
931          PIPE_FORMAT_R8_UNORM, VIRGL_BIND_CUSTOM, 8, 1, 1, 0, 0, 0, 8, true);
932    if (!fence->hw_res) {
933       FREE(fence);
934       return NULL;
935    }
936 
937    pipe_reference_init(&fence->reference, 1);
938 
939    return (struct pipe_fence_handle *)fence;
940 }
941 
virgl_drm_winsys_submit_cmd(struct virgl_winsys * qws,struct virgl_cmd_buf * _cbuf,struct pipe_fence_handle ** fence)942 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys *qws,
943                                        struct virgl_cmd_buf *_cbuf,
944                                        struct pipe_fence_handle **fence)
945 {
946    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
947    struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
948    struct drm_virtgpu_execbuffer eb;
949    int ret;
950 
951    if (cbuf->base.cdw == 0)
952       return 0;
953 
954    memset(&eb, 0, sizeof(struct drm_virtgpu_execbuffer));
955    eb.command = (unsigned long)(void*)cbuf->buf;
956    eb.size = cbuf->base.cdw * 4;
957    eb.num_bo_handles = cbuf->cres;
958    eb.bo_handles = (unsigned long)(void *)cbuf->res_hlist;
959 
960    eb.fence_fd = -1;
961    if (qws->supports_fences) {
962       if (cbuf->in_fence_fd >= 0) {
963          eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_IN;
964          eb.fence_fd = cbuf->in_fence_fd;
965       }
966 
967       if (fence != NULL)
968          eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_OUT;
969    } else {
970       assert(cbuf->in_fence_fd < 0);
971    }
972 
973    ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
974    if (ret == -1)
975       _debug_printf("got error from kernel - expect bad rendering %d\n", errno);
976    cbuf->base.cdw = 0;
977 
978    if (qws->supports_fences) {
979       if (cbuf->in_fence_fd >= 0) {
980          close(cbuf->in_fence_fd);
981          cbuf->in_fence_fd = -1;
982       }
983 
984       if (fence != NULL && ret == 0)
985          *fence = virgl_drm_fence_create(qws, eb.fence_fd, false);
986    } else {
987       if (fence != NULL && ret == 0)
988          *fence = virgl_drm_fence_create_legacy(qws);
989    }
990 
991    virgl_drm_clear_res_list(cbuf);
992 
993    return ret;
994 }
995 
virgl_drm_get_caps(struct virgl_winsys * vws,struct virgl_drm_caps * caps)996 static int virgl_drm_get_caps(struct virgl_winsys *vws,
997                               struct virgl_drm_caps *caps)
998 {
999    struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
1000    struct drm_virtgpu_get_caps args;
1001    int ret;
1002 
1003    virgl_ws_fill_new_caps_defaults(caps);
1004 
1005    memset(&args, 0, sizeof(args));
1006    if (params[param_capset_fix].value) {
1007       /* if we have the query fix - try and get cap set id 2 first */
1008       args.cap_set_id = 2;
1009       args.size = sizeof(union virgl_caps);
1010    } else {
1011       args.cap_set_id = 1;
1012       args.size = sizeof(struct virgl_caps_v1);
1013    }
1014    args.addr = (unsigned long)&caps->caps;
1015 
1016    ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
1017    if (ret == -1 && errno == EINVAL) {
1018       /* Fallback to v1 */
1019       args.cap_set_id = 1;
1020       args.size = sizeof(struct virgl_caps_v1);
1021       ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
1022       if (ret == -1)
1023           return ret;
1024    }
1025    return ret;
1026 }
1027 
1028 static struct pipe_fence_handle *
virgl_cs_create_fence(struct virgl_winsys * vws,int fd)1029 virgl_cs_create_fence(struct virgl_winsys *vws, int fd)
1030 {
1031    if (!vws->supports_fences)
1032       return NULL;
1033 
1034    return virgl_drm_fence_create(vws, fd, true);
1035 }
1036 
virgl_fence_wait(struct virgl_winsys * vws,struct pipe_fence_handle * _fence,uint64_t timeout)1037 static bool virgl_fence_wait(struct virgl_winsys *vws,
1038                              struct pipe_fence_handle *_fence,
1039                              uint64_t timeout)
1040 {
1041    struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
1042 
1043    if (vws->supports_fences) {
1044       uint64_t timeout_ms;
1045       int timeout_poll;
1046 
1047       if (timeout == 0)
1048          return sync_wait(fence->fd, 0) == 0;
1049 
1050       timeout_ms = timeout / 1000000;
1051       /* round up */
1052       if (timeout_ms * 1000000 < timeout)
1053          timeout_ms++;
1054 
1055       timeout_poll = timeout_ms <= INT_MAX ? (int) timeout_ms : -1;
1056 
1057       return sync_wait(fence->fd, timeout_poll) == 0;
1058    }
1059 
1060    if (timeout == 0)
1061       return !virgl_drm_resource_is_busy(vws, fence->hw_res);
1062 
1063    if (timeout != OS_TIMEOUT_INFINITE) {
1064       int64_t start_time = os_time_get();
1065       timeout /= 1000;
1066       while (virgl_drm_resource_is_busy(vws, fence->hw_res)) {
1067          if (os_time_get() - start_time >= timeout)
1068             return false;
1069          os_time_sleep(10);
1070       }
1071       return true;
1072    }
1073    virgl_drm_resource_wait(vws, fence->hw_res);
1074 
1075    return true;
1076 }
1077 
virgl_fence_reference(struct virgl_winsys * vws,struct pipe_fence_handle ** dst,struct pipe_fence_handle * src)1078 static void virgl_fence_reference(struct virgl_winsys *vws,
1079                                   struct pipe_fence_handle **dst,
1080                                   struct pipe_fence_handle *src)
1081 {
1082    struct virgl_drm_fence *dfence = virgl_drm_fence(*dst);
1083    struct virgl_drm_fence *sfence = virgl_drm_fence(src);
1084 
1085    if (pipe_reference(&dfence->reference, &sfence->reference)) {
1086       if (vws->supports_fences) {
1087          close(dfence->fd);
1088       } else {
1089          virgl_drm_resource_reference(vws, &dfence->hw_res, NULL);
1090       }
1091       FREE(dfence);
1092    }
1093 
1094    *dst = src;
1095 }
1096 
virgl_fence_server_sync(struct virgl_winsys * vws,struct virgl_cmd_buf * _cbuf,struct pipe_fence_handle * _fence)1097 static void virgl_fence_server_sync(struct virgl_winsys *vws,
1098                                     struct virgl_cmd_buf *_cbuf,
1099                                     struct pipe_fence_handle *_fence)
1100 {
1101    struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
1102    struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
1103 
1104    if (!vws->supports_fences)
1105       return;
1106 
1107    /* if not an external fence, then nothing more to do without preemption: */
1108    if (!fence->external)
1109       return;
1110 
1111    sync_accumulate("virgl", &cbuf->in_fence_fd, fence->fd);
1112 }
1113 
virgl_fence_get_fd(struct virgl_winsys * vws,struct pipe_fence_handle * _fence)1114 static int virgl_fence_get_fd(struct virgl_winsys *vws,
1115                               struct pipe_fence_handle *_fence)
1116 {
1117    struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
1118 
1119    if (!vws->supports_fences)
1120       return -1;
1121 
1122    return os_dupfd_cloexec(fence->fd);
1123 }
1124 
virgl_drm_get_version(int fd)1125 static int virgl_drm_get_version(int fd)
1126 {
1127 	int ret;
1128 	drmVersionPtr version;
1129 
1130 	version = drmGetVersion(fd);
1131 
1132 	if (!version)
1133 		ret = -EFAULT;
1134 	else if (version->version_major != 0)
1135 		ret = -EINVAL;
1136 	else
1137 		ret = VIRGL_DRM_VERSION(0, version->version_minor);
1138 
1139 	drmFreeVersion(version);
1140 
1141 	return ret;
1142 }
1143 
1144 static bool
virgl_drm_resource_cache_entry_is_busy(struct virgl_resource_cache_entry * entry,void * user_data)1145 virgl_drm_resource_cache_entry_is_busy(struct virgl_resource_cache_entry *entry,
1146                                        void *user_data)
1147 {
1148    struct virgl_drm_winsys *qdws = user_data;
1149    struct virgl_hw_res *res = cache_entry_container_res(entry);
1150 
1151    return virgl_drm_resource_is_busy(&qdws->base, res);
1152 }
1153 
1154 static void
virgl_drm_resource_cache_entry_release(struct virgl_resource_cache_entry * entry,void * user_data)1155 virgl_drm_resource_cache_entry_release(struct virgl_resource_cache_entry *entry,
1156                                        void *user_data)
1157 {
1158    struct virgl_drm_winsys *qdws = user_data;
1159    struct virgl_hw_res *res = cache_entry_container_res(entry);
1160 
1161    virgl_hw_res_destroy(qdws, res);
1162 }
1163 
virgl_init_context(int drmFD)1164 static int virgl_init_context(int drmFD)
1165 {
1166    int ret;
1167    struct drm_virtgpu_context_init init = { 0 };
1168    struct drm_virtgpu_context_set_param ctx_set_param = { 0 };
1169    uint64_t supports_capset_virgl, supports_capset_virgl2;
1170    supports_capset_virgl = supports_capset_virgl2 = 0;
1171 
1172    supports_capset_virgl = ((1 << VIRGL_DRM_CAPSET_VIRGL) &
1173                              params[param_supported_capset_ids].value);
1174 
1175    supports_capset_virgl2 = ((1 << VIRGL_DRM_CAPSET_VIRGL2) &
1176                               params[param_supported_capset_ids].value);
1177 
1178    if (!supports_capset_virgl && !supports_capset_virgl2) {
1179       _debug_printf("No virgl contexts available on host");
1180       return -EINVAL;
1181    }
1182 
1183    ctx_set_param.param = VIRTGPU_CONTEXT_PARAM_CAPSET_ID;
1184    ctx_set_param.value = (supports_capset_virgl2) ?
1185                          VIRGL_DRM_CAPSET_VIRGL2 :
1186                          VIRGL_DRM_CAPSET_VIRGL;
1187 
1188    init.ctx_set_params = (unsigned long)(void *)&ctx_set_param;
1189    init.num_params = 1;
1190 
1191    ret = drmIoctl(drmFD, DRM_IOCTL_VIRTGPU_CONTEXT_INIT, &init);
1192    /*
1193     * EEXIST happens when a compositor does DUMB_CREATE before initializing
1194     * virgl.
1195     */
1196    if (ret && errno != EEXIST) {
1197       _debug_printf("DRM_IOCTL_VIRTGPU_CONTEXT_INIT failed with %s\n",
1198                      strerror(errno));
1199       return -1;
1200    }
1201 
1202    return 0;
1203 }
1204 
1205 static int
virgl_drm_winsys_get_fd(struct virgl_winsys * vws)1206 virgl_drm_winsys_get_fd(struct virgl_winsys *vws)
1207 {
1208    struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
1209 
1210    return vdws->fd;
1211 }
1212 
1213 static struct virgl_winsys *
virgl_drm_winsys_create(int drmFD)1214 virgl_drm_winsys_create(int drmFD)
1215 {
1216    static const unsigned CACHE_TIMEOUT_USEC = 1000000;
1217    struct virgl_drm_winsys *qdws;
1218    int drm_version;
1219    int ret;
1220 
1221    for (uint32_t i = 0; i < ARRAY_SIZE(params); i++) {
1222       struct drm_virtgpu_getparam getparam = { 0 };
1223       uint64_t value = 0;
1224       getparam.param = params[i].param;
1225       getparam.value = (uint64_t)(uintptr_t)&value;
1226       ret = drmIoctl(drmFD, DRM_IOCTL_VIRTGPU_GETPARAM, &getparam);
1227       params[i].value = (ret == 0) ? value : 0;
1228    }
1229 
1230    if (!params[param_3d_features].value)
1231       return NULL;
1232 
1233    drm_version = virgl_drm_get_version(drmFD);
1234    if (drm_version < 0)
1235       return NULL;
1236 
1237    if (params[param_context_init].value) {
1238       ret = virgl_init_context(drmFD);
1239       if (ret)
1240          return NULL;
1241    }
1242 
1243    qdws = CALLOC_STRUCT(virgl_drm_winsys);
1244    if (!qdws)
1245       return NULL;
1246 
1247    qdws->fd = drmFD;
1248    virgl_resource_cache_init(&qdws->cache, CACHE_TIMEOUT_USEC,
1249                              virgl_drm_resource_cache_entry_is_busy,
1250                              virgl_drm_resource_cache_entry_release,
1251                              qdws);
1252    (void) mtx_init(&qdws->mutex, mtx_plain);
1253    (void) mtx_init(&qdws->bo_handles_mutex, mtx_plain);
1254    p_atomic_set(&qdws->blob_id, 0);
1255 
1256    qdws->bo_handles = util_hash_table_create_ptr_keys();
1257    qdws->bo_names = util_hash_table_create_ptr_keys();
1258    qdws->base.destroy = virgl_drm_winsys_destroy;
1259 
1260    qdws->base.transfer_put = virgl_bo_transfer_put;
1261    qdws->base.transfer_get = virgl_bo_transfer_get;
1262    qdws->base.resource_create = virgl_drm_winsys_resource_cache_create;
1263    qdws->base.resource_reference = virgl_drm_resource_reference;
1264    qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle;
1265    qdws->base.resource_set_type = virgl_drm_winsys_resource_set_type;
1266    qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle;
1267    qdws->base.resource_get_storage_size = virgl_drm_winsys_resource_get_storage_size;
1268    qdws->base.resource_map = virgl_drm_resource_map;
1269    qdws->base.resource_wait = virgl_drm_resource_wait;
1270    qdws->base.resource_is_busy = virgl_drm_resource_is_busy;
1271    qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create;
1272    qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy;
1273    qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
1274    qdws->base.emit_res = virgl_drm_emit_res;
1275    qdws->base.res_is_referenced = virgl_drm_res_is_ref;
1276 
1277    qdws->base.cs_create_fence = virgl_cs_create_fence;
1278    qdws->base.fence_wait = virgl_fence_wait;
1279    qdws->base.fence_reference = virgl_fence_reference;
1280    qdws->base.fence_server_sync = virgl_fence_server_sync;
1281    qdws->base.fence_get_fd = virgl_fence_get_fd;
1282    qdws->base.get_caps = virgl_drm_get_caps;
1283    qdws->base.get_fd = virgl_drm_winsys_get_fd;
1284    qdws->base.supports_fences =  drm_version >= VIRGL_DRM_VERSION_FENCE_FD;
1285    qdws->base.supports_encoded_transfers = 1;
1286 
1287    qdws->base.supports_coherent = params[param_resource_blob].value &&
1288                                   params[param_host_visible].value;
1289    return &qdws->base;
1290 
1291 }
1292 
1293 static struct hash_table *fd_tab = NULL;
1294 static simple_mtx_t virgl_screen_mutex = SIMPLE_MTX_INITIALIZER;
1295 
1296 static void
virgl_drm_screen_destroy(struct pipe_screen * pscreen)1297 virgl_drm_screen_destroy(struct pipe_screen *pscreen)
1298 {
1299    struct virgl_screen *screen = virgl_screen(pscreen);
1300    bool destroy;
1301 
1302    simple_mtx_lock(&virgl_screen_mutex);
1303    destroy = --screen->refcnt == 0;
1304    if (destroy) {
1305       int fd = virgl_drm_winsys(screen->vws)->fd;
1306       _mesa_hash_table_remove_key(fd_tab, intptr_to_pointer(fd));
1307       close(fd);
1308    }
1309    simple_mtx_unlock(&virgl_screen_mutex);
1310 
1311    if (destroy) {
1312       pscreen->destroy = screen->winsys_priv;
1313       pscreen->destroy(pscreen);
1314    }
1315 }
1316 
1317 static uint32_t
hash_fd(const void * key)1318 hash_fd(const void *key)
1319 {
1320    int fd = pointer_to_intptr(key);
1321 
1322    return _mesa_hash_int(&fd);
1323 }
1324 
1325 static bool
equal_fd(const void * key1,const void * key2)1326 equal_fd(const void *key1, const void *key2)
1327 {
1328    int ret;
1329    int fd1 = pointer_to_intptr(key1);
1330    int fd2 = pointer_to_intptr(key2);
1331 
1332    /* Since the scope of prime handle is limited to drm_file,
1333     * virgl_screen is only shared at the drm_file level,
1334     * not at the device (/dev/dri/cardX) level.
1335     */
1336    ret = os_same_file_description(fd1, fd2);
1337    if (ret == 0) {
1338        return true;
1339    } else if (ret < 0) {
1340       static bool logged;
1341 
1342       if (!logged) {
1343          _debug_printf("virgl: os_same_file_description couldn't "
1344                        "determine if two DRM fds reference the same "
1345                        "file description.\n"
1346                        "If they do, bad things may happen!\n");
1347          logged = true;
1348       }
1349    }
1350 
1351    return false;
1352 }
1353 
1354 struct pipe_screen *
virgl_drm_screen_create(int fd,const struct pipe_screen_config * config)1355 virgl_drm_screen_create(int fd, const struct pipe_screen_config *config)
1356 {
1357    struct pipe_screen *pscreen = NULL;
1358 
1359    simple_mtx_lock(&virgl_screen_mutex);
1360    if (!fd_tab) {
1361       fd_tab = _mesa_hash_table_create(NULL, hash_fd, equal_fd);
1362       if (!fd_tab)
1363          goto unlock;
1364    }
1365 
1366    pscreen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
1367    if (pscreen) {
1368       virgl_screen(pscreen)->refcnt++;
1369    } else {
1370       struct virgl_winsys *vws;
1371       int dup_fd = os_dupfd_cloexec(fd);
1372 
1373       vws = virgl_drm_winsys_create(dup_fd);
1374       if (!vws) {
1375          close(dup_fd);
1376          goto unlock;
1377       }
1378 
1379       pscreen = virgl_create_screen(vws, config);
1380       if (pscreen) {
1381          _mesa_hash_table_insert(fd_tab, intptr_to_pointer(dup_fd), pscreen);
1382 
1383          /* Bit of a hack, to avoid circular linkage dependency,
1384           * ie. pipe driver having to call in to winsys, we
1385           * override the pipe drivers screen->destroy():
1386           */
1387          virgl_screen(pscreen)->winsys_priv = pscreen->destroy;
1388          pscreen->destroy = virgl_drm_screen_destroy;
1389       }
1390    }
1391 
1392 unlock:
1393    simple_mtx_unlock(&virgl_screen_mutex);
1394    return pscreen;
1395 }
1396