• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014, 2015 Red Hat.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <errno.h>
25 #include <fcntl.h>
26 #include <stdio.h>
27 #include <sys/ioctl.h>
28 #include <sys/stat.h>
29 
30 #include "os/os_mman.h"
31 #include "util/os_time.h"
32 #include "util/u_memory.h"
33 #include "util/u_format.h"
34 #include "util/u_hash_table.h"
35 #include "util/u_inlines.h"
36 #include "state_tracker/drm_driver.h"
37 #include "virgl/virgl_screen.h"
38 #include "virgl/virgl_public.h"
39 
40 #include <xf86drm.h>
41 #include "virtgpu_drm.h"
42 
43 #include "virgl_drm_winsys.h"
44 #include "virgl_drm_public.h"
45 
can_cache_resource(struct virgl_hw_res * res)46 static inline boolean can_cache_resource(struct virgl_hw_res *res)
47 {
48    return res->cacheable == TRUE;
49 }
50 
virgl_hw_res_destroy(struct virgl_drm_winsys * qdws,struct virgl_hw_res * res)51 static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
52                                  struct virgl_hw_res *res)
53 {
54       struct drm_gem_close args;
55 
56       if (res->flinked) {
57          mtx_lock(&qdws->bo_handles_mutex);
58          util_hash_table_remove(qdws->bo_names,
59                                 (void *)(uintptr_t)res->flink);
60          mtx_unlock(&qdws->bo_handles_mutex);
61       }
62 
63       if (res->bo_handle) {
64          mtx_lock(&qdws->bo_handles_mutex);
65          util_hash_table_remove(qdws->bo_handles,
66                                 (void *)(uintptr_t)res->bo_handle);
67          mtx_unlock(&qdws->bo_handles_mutex);
68       }
69 
70       if (res->ptr)
71          os_munmap(res->ptr, res->size);
72 
73       memset(&args, 0, sizeof(args));
74       args.handle = res->bo_handle;
75       drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args);
76       FREE(res);
77 }
78 
virgl_drm_resource_is_busy(struct virgl_drm_winsys * qdws,struct virgl_hw_res * res)79 static boolean virgl_drm_resource_is_busy(struct virgl_drm_winsys *qdws,
80                                           struct virgl_hw_res *res)
81 {
82    struct drm_virtgpu_3d_wait waitcmd;
83    int ret;
84 
85    memset(&waitcmd, 0, sizeof(waitcmd));
86    waitcmd.handle = res->bo_handle;
87    waitcmd.flags = VIRTGPU_WAIT_NOWAIT;
88 
89    ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
90    if (ret && errno == EBUSY)
91       return TRUE;
92    return FALSE;
93 }
94 
95 static void
virgl_cache_flush(struct virgl_drm_winsys * qdws)96 virgl_cache_flush(struct virgl_drm_winsys *qdws)
97 {
98    struct list_head *curr, *next;
99    struct virgl_hw_res *res;
100 
101    mtx_lock(&qdws->mutex);
102    curr = qdws->delayed.next;
103    next = curr->next;
104 
105    while (curr != &qdws->delayed) {
106       res = LIST_ENTRY(struct virgl_hw_res, curr, head);
107       LIST_DEL(&res->head);
108       virgl_hw_res_destroy(qdws, res);
109       curr = next;
110       next = curr->next;
111    }
112    mtx_unlock(&qdws->mutex);
113 }
114 static void
virgl_drm_winsys_destroy(struct virgl_winsys * qws)115 virgl_drm_winsys_destroy(struct virgl_winsys *qws)
116 {
117    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
118 
119    virgl_cache_flush(qdws);
120 
121    util_hash_table_destroy(qdws->bo_handles);
122    util_hash_table_destroy(qdws->bo_names);
123    mtx_destroy(&qdws->bo_handles_mutex);
124    mtx_destroy(&qdws->mutex);
125 
126    FREE(qdws);
127 }
128 
129 static void
virgl_cache_list_check_free(struct virgl_drm_winsys * qdws)130 virgl_cache_list_check_free(struct virgl_drm_winsys *qdws)
131 {
132    struct list_head *curr, *next;
133    struct virgl_hw_res *res;
134    int64_t now;
135 
136    now = os_time_get();
137    curr = qdws->delayed.next;
138    next = curr->next;
139    while (curr != &qdws->delayed) {
140       res = LIST_ENTRY(struct virgl_hw_res, curr, head);
141       if (!os_time_timeout(res->start, res->end, now))
142          break;
143 
144       LIST_DEL(&res->head);
145       virgl_hw_res_destroy(qdws, res);
146       curr = next;
147       next = curr->next;
148    }
149 }
150 
virgl_drm_resource_reference(struct virgl_drm_winsys * qdws,struct virgl_hw_res ** dres,struct virgl_hw_res * sres)151 static void virgl_drm_resource_reference(struct virgl_drm_winsys *qdws,
152                                        struct virgl_hw_res **dres,
153                                        struct virgl_hw_res *sres)
154 {
155    struct virgl_hw_res *old = *dres;
156    if (pipe_reference(&(*dres)->reference, &sres->reference)) {
157 
158       if (!can_cache_resource(old)) {
159          virgl_hw_res_destroy(qdws, old);
160       } else {
161          mtx_lock(&qdws->mutex);
162          virgl_cache_list_check_free(qdws);
163 
164          old->start = os_time_get();
165          old->end = old->start + qdws->usecs;
166          LIST_ADDTAIL(&old->head, &qdws->delayed);
167          qdws->num_delayed++;
168          mtx_unlock(&qdws->mutex);
169       }
170    }
171    *dres = sres;
172 }
173 
174 static struct virgl_hw_res *
virgl_drm_winsys_resource_create(struct virgl_winsys * qws,enum pipe_texture_target target,uint32_t format,uint32_t bind,uint32_t width,uint32_t height,uint32_t depth,uint32_t array_size,uint32_t last_level,uint32_t nr_samples,uint32_t size)175 virgl_drm_winsys_resource_create(struct virgl_winsys *qws,
176                                  enum pipe_texture_target target,
177                                  uint32_t format,
178                                  uint32_t bind,
179                                  uint32_t width,
180                                  uint32_t height,
181                                  uint32_t depth,
182                                  uint32_t array_size,
183                                  uint32_t last_level,
184                                  uint32_t nr_samples,
185                                  uint32_t size)
186 {
187    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
188    struct drm_virtgpu_resource_create createcmd;
189    int ret;
190    struct virgl_hw_res *res;
191    uint32_t stride = width * util_format_get_blocksize(format);
192 
193    res = CALLOC_STRUCT(virgl_hw_res);
194    if (!res)
195       return NULL;
196 
197    memset(&createcmd, 0, sizeof(createcmd));
198    createcmd.target = target;
199    createcmd.format = format;
200    createcmd.bind = bind;
201    createcmd.width = width;
202    createcmd.height = height;
203    createcmd.depth = depth;
204    createcmd.array_size = array_size;
205    createcmd.last_level = last_level;
206    createcmd.nr_samples = nr_samples;
207    createcmd.stride = stride;
208    createcmd.size = size;
209 
210    ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &createcmd);
211    if (ret != 0) {
212       FREE(res);
213       return NULL;
214    }
215 
216    res->bind = bind;
217    res->format = format;
218 
219    res->res_handle = createcmd.res_handle;
220    res->bo_handle = createcmd.bo_handle;
221    res->size = size;
222    res->stride = stride;
223    pipe_reference_init(&res->reference, 1);
224    res->num_cs_references = 0;
225    return res;
226 }
227 
virgl_is_res_compat(struct virgl_drm_winsys * qdws,struct virgl_hw_res * res,uint32_t size,uint32_t bind,uint32_t format)228 static inline int virgl_is_res_compat(struct virgl_drm_winsys *qdws,
229                                       struct virgl_hw_res *res,
230                                       uint32_t size, uint32_t bind,
231                                       uint32_t format)
232 {
233    if (res->bind != bind)
234       return 0;
235    if (res->format != format)
236       return 0;
237    if (res->size < size)
238       return 0;
239    if (res->size > size * 2)
240       return 0;
241 
242    if (virgl_drm_resource_is_busy(qdws, res)) {
243       return -1;
244    }
245 
246    return 1;
247 }
248 
249 static int
virgl_bo_transfer_put(struct virgl_winsys * vws,struct virgl_hw_res * res,const struct pipe_box * box,uint32_t stride,uint32_t layer_stride,uint32_t buf_offset,uint32_t level)250 virgl_bo_transfer_put(struct virgl_winsys *vws,
251                       struct virgl_hw_res *res,
252                       const struct pipe_box *box,
253                       uint32_t stride, uint32_t layer_stride,
254                       uint32_t buf_offset, uint32_t level)
255 {
256    struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
257    struct drm_virtgpu_3d_transfer_to_host tohostcmd;
258 
259    memset(&tohostcmd, 0, sizeof(tohostcmd));
260    tohostcmd.bo_handle = res->bo_handle;
261    tohostcmd.box.x = box->x;
262    tohostcmd.box.y = box->y;
263    tohostcmd.box.z = box->z;
264    tohostcmd.box.w = box->width;
265    tohostcmd.box.h = box->height;
266    tohostcmd.box.d = box->depth;
267    tohostcmd.offset = buf_offset;
268    tohostcmd.level = level;
269   // tohostcmd.stride = stride;
270   // tohostcmd.layer_stride = stride;
271    return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &tohostcmd);
272 }
273 
274 static int
virgl_bo_transfer_get(struct virgl_winsys * vws,struct virgl_hw_res * res,const struct pipe_box * box,uint32_t stride,uint32_t layer_stride,uint32_t buf_offset,uint32_t level)275 virgl_bo_transfer_get(struct virgl_winsys *vws,
276                       struct virgl_hw_res *res,
277                       const struct pipe_box *box,
278                       uint32_t stride, uint32_t layer_stride,
279                       uint32_t buf_offset, uint32_t level)
280 {
281    struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
282    struct drm_virtgpu_3d_transfer_from_host fromhostcmd;
283 
284    memset(&fromhostcmd, 0, sizeof(fromhostcmd));
285    fromhostcmd.bo_handle = res->bo_handle;
286    fromhostcmd.level = level;
287    fromhostcmd.offset = buf_offset;
288   // fromhostcmd.stride = stride;
289   // fromhostcmd.layer_stride = layer_stride;
290    fromhostcmd.box.x = box->x;
291    fromhostcmd.box.y = box->y;
292    fromhostcmd.box.z = box->z;
293    fromhostcmd.box.w = box->width;
294    fromhostcmd.box.h = box->height;
295    fromhostcmd.box.d = box->depth;
296    return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &fromhostcmd);
297 }
298 
299 static struct virgl_hw_res *
virgl_drm_winsys_resource_cache_create(struct virgl_winsys * qws,enum pipe_texture_target target,uint32_t format,uint32_t bind,uint32_t width,uint32_t height,uint32_t depth,uint32_t array_size,uint32_t last_level,uint32_t nr_samples,uint32_t size)300 virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
301                                        enum pipe_texture_target target,
302                                        uint32_t format,
303                                        uint32_t bind,
304                                        uint32_t width,
305                                        uint32_t height,
306                                        uint32_t depth,
307                                        uint32_t array_size,
308                                        uint32_t last_level,
309                                        uint32_t nr_samples,
310                                        uint32_t size)
311 {
312    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
313    struct virgl_hw_res *res, *curr_res;
314    struct list_head *curr, *next;
315    int64_t now;
316    int ret;
317 
318    /* only store binds for vertex/index/const buffers */
319    if (bind != VIRGL_BIND_CONSTANT_BUFFER && bind != VIRGL_BIND_INDEX_BUFFER &&
320        bind != VIRGL_BIND_VERTEX_BUFFER && bind != VIRGL_BIND_CUSTOM)
321       goto alloc;
322 
323    mtx_lock(&qdws->mutex);
324 
325    res = NULL;
326    curr = qdws->delayed.next;
327    next = curr->next;
328 
329    now = os_time_get();
330    while (curr != &qdws->delayed) {
331       curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head);
332 
333       if (!res && ((ret = virgl_is_res_compat(qdws, curr_res, size, bind, format)) > 0))
334          res = curr_res;
335       else if (os_time_timeout(curr_res->start, curr_res->end, now)) {
336          LIST_DEL(&curr_res->head);
337          virgl_hw_res_destroy(qdws, curr_res);
338       } else
339          break;
340 
341       if (ret == -1)
342          break;
343 
344       curr = next;
345       next = curr->next;
346    }
347 
348    if (!res && ret != -1) {
349       while (curr != &qdws->delayed) {
350          curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head);
351          ret = virgl_is_res_compat(qdws, curr_res, size, bind, format);
352          if (ret > 0) {
353             res = curr_res;
354             break;
355          }
356          if (ret == -1)
357             break;
358          curr = next;
359          next = curr->next;
360       }
361    }
362 
363    if (res) {
364       LIST_DEL(&res->head);
365       --qdws->num_delayed;
366       mtx_unlock(&qdws->mutex);
367       pipe_reference_init(&res->reference, 1);
368       return res;
369    }
370 
371    mtx_unlock(&qdws->mutex);
372 
373 alloc:
374    res = virgl_drm_winsys_resource_create(qws, target, format, bind,
375                                            width, height, depth, array_size,
376                                            last_level, nr_samples, size);
377    if (bind == VIRGL_BIND_CONSTANT_BUFFER || bind == VIRGL_BIND_INDEX_BUFFER ||
378        bind == VIRGL_BIND_VERTEX_BUFFER)
379       res->cacheable = TRUE;
380    return res;
381 }
382 
383 static struct virgl_hw_res *
virgl_drm_winsys_resource_create_handle(struct virgl_winsys * qws,struct winsys_handle * whandle)384 virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
385                                         struct winsys_handle *whandle)
386 {
387    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
388    struct drm_gem_open open_arg = {};
389    struct drm_virtgpu_resource_info info_arg = {};
390    struct virgl_hw_res *res;
391    uint32_t handle = whandle->handle;
392 
393    if (whandle->offset != 0) {
394       fprintf(stderr, "attempt to import unsupported winsys offset %u\n",
395               whandle->offset);
396       return NULL;
397    }
398 
399    mtx_lock(&qdws->bo_handles_mutex);
400 
401    if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
402       res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
403       if (res) {
404          struct virgl_hw_res *r = NULL;
405          virgl_drm_resource_reference(qdws, &r, res);
406          goto done;
407       }
408    }
409 
410    if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
411       int r;
412       r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
413       if (r) {
414          res = NULL;
415          goto done;
416       }
417    }
418 
419    res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)handle);
420    fprintf(stderr, "resource %p for handle %d, pfd=%d\n", res, handle, whandle->handle);
421    if (res) {
422       struct virgl_hw_res *r = NULL;
423       virgl_drm_resource_reference(qdws, &r, res);
424       goto done;
425    }
426 
427    res = CALLOC_STRUCT(virgl_hw_res);
428    if (!res)
429       goto done;
430 
431    if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
432       res->bo_handle = handle;
433    } else {
434       fprintf(stderr, "gem open handle %d\n", handle);
435       memset(&open_arg, 0, sizeof(open_arg));
436       open_arg.name = whandle->handle;
437       if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
438          FREE(res);
439          res = NULL;
440          goto done;
441       }
442       res->bo_handle = open_arg.handle;
443    }
444    res->name = handle;
445 
446    memset(&info_arg, 0, sizeof(info_arg));
447    info_arg.bo_handle = res->bo_handle;
448 
449    if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info_arg)) {
450       /* close */
451       FREE(res);
452       res = NULL;
453       goto done;
454    }
455 
456    res->res_handle = info_arg.res_handle;
457 
458    res->size = info_arg.size;
459    res->stride = info_arg.stride;
460    pipe_reference_init(&res->reference, 1);
461    res->num_cs_references = 0;
462 
463    util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)handle, res);
464 
465 done:
466    mtx_unlock(&qdws->bo_handles_mutex);
467    return res;
468 }
469 
virgl_drm_winsys_resource_get_handle(struct virgl_winsys * qws,struct virgl_hw_res * res,uint32_t stride,struct winsys_handle * whandle)470 static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
471                                                     struct virgl_hw_res *res,
472                                                     uint32_t stride,
473                                                     struct winsys_handle *whandle)
474  {
475    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
476    struct drm_gem_flink flink;
477 
478    if (!res)
479        return FALSE;
480 
481    if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
482       if (!res->flinked) {
483          memset(&flink, 0, sizeof(flink));
484          flink.handle = res->bo_handle;
485 
486          if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
487             return FALSE;
488          }
489          res->flinked = TRUE;
490          res->flink = flink.name;
491 
492          mtx_lock(&qdws->bo_handles_mutex);
493          util_hash_table_set(qdws->bo_names, (void *)(uintptr_t)res->flink, res);
494          mtx_unlock(&qdws->bo_handles_mutex);
495       }
496       whandle->handle = res->flink;
497    } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
498       whandle->handle = res->bo_handle;
499    } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
500       if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
501             return FALSE;
502       mtx_lock(&qdws->bo_handles_mutex);
503       util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
504       mtx_unlock(&qdws->bo_handles_mutex);
505    }
506    whandle->stride = stride;
507    return TRUE;
508 }
509 
virgl_drm_winsys_resource_unref(struct virgl_winsys * qws,struct virgl_hw_res * hres)510 static void virgl_drm_winsys_resource_unref(struct virgl_winsys *qws,
511                                             struct virgl_hw_res *hres)
512 {
513    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
514 
515    virgl_drm_resource_reference(qdws, &hres, NULL);
516 }
517 
virgl_drm_resource_map(struct virgl_winsys * qws,struct virgl_hw_res * res)518 static void *virgl_drm_resource_map(struct virgl_winsys *qws,
519                                     struct virgl_hw_res *res)
520 {
521    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
522    struct drm_virtgpu_map mmap_arg;
523    void *ptr;
524 
525    if (res->ptr)
526       return res->ptr;
527 
528    memset(&mmap_arg, 0, sizeof(mmap_arg));
529    mmap_arg.handle = res->bo_handle;
530    if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_MAP, &mmap_arg))
531       return NULL;
532 
533    ptr = os_mmap(0, res->size, PROT_READ|PROT_WRITE, MAP_SHARED,
534                  qdws->fd, mmap_arg.offset);
535    if (ptr == MAP_FAILED)
536       return NULL;
537 
538    res->ptr = ptr;
539    return ptr;
540 
541 }
542 
virgl_drm_resource_wait(struct virgl_winsys * qws,struct virgl_hw_res * res)543 static void virgl_drm_resource_wait(struct virgl_winsys *qws,
544                                     struct virgl_hw_res *res)
545 {
546    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
547    struct drm_virtgpu_3d_wait waitcmd;
548    int ret;
549 
550    memset(&waitcmd, 0, sizeof(waitcmd));
551    waitcmd.handle = res->bo_handle;
552  again:
553    ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
554    if (ret == -EAGAIN)
555       goto again;
556 }
557 
virgl_drm_cmd_buf_create(struct virgl_winsys * qws)558 static struct virgl_cmd_buf *virgl_drm_cmd_buf_create(struct virgl_winsys *qws)
559 {
560    struct virgl_drm_cmd_buf *cbuf;
561 
562    cbuf = CALLOC_STRUCT(virgl_drm_cmd_buf);
563    if (!cbuf)
564       return NULL;
565 
566    cbuf->ws = qws;
567 
568    cbuf->nres = 512;
569    cbuf->res_bo = CALLOC(cbuf->nres, sizeof(struct virgl_hw_buf*));
570    if (!cbuf->res_bo) {
571       FREE(cbuf);
572       return NULL;
573    }
574    cbuf->res_hlist = MALLOC(cbuf->nres * sizeof(uint32_t));
575    if (!cbuf->res_hlist) {
576       FREE(cbuf->res_bo);
577       FREE(cbuf);
578       return NULL;
579    }
580 
581    cbuf->base.buf = cbuf->buf;
582    return &cbuf->base;
583 }
584 
virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf * _cbuf)585 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf *_cbuf)
586 {
587    struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
588 
589    FREE(cbuf->res_hlist);
590    FREE(cbuf->res_bo);
591    FREE(cbuf);
592 
593 }
594 
virgl_drm_lookup_res(struct virgl_drm_cmd_buf * cbuf,struct virgl_hw_res * res)595 static boolean virgl_drm_lookup_res(struct virgl_drm_cmd_buf *cbuf,
596                                     struct virgl_hw_res *res)
597 {
598    unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
599    int i;
600 
601    if (cbuf->is_handle_added[hash]) {
602       i = cbuf->reloc_indices_hashlist[hash];
603       if (cbuf->res_bo[i] == res)
604          return true;
605 
606       for (i = 0; i < cbuf->cres; i++) {
607          if (cbuf->res_bo[i] == res) {
608             cbuf->reloc_indices_hashlist[hash] = i;
609             return true;
610          }
611       }
612    }
613    return false;
614 }
615 
virgl_drm_add_res(struct virgl_drm_winsys * qdws,struct virgl_drm_cmd_buf * cbuf,struct virgl_hw_res * res)616 static void virgl_drm_add_res(struct virgl_drm_winsys *qdws,
617                               struct virgl_drm_cmd_buf *cbuf,
618                               struct virgl_hw_res *res)
619 {
620    unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
621 
622    if (cbuf->cres > cbuf->nres) {
623       fprintf(stderr,"failure to add relocation\n");
624       return;
625    }
626 
627    cbuf->res_bo[cbuf->cres] = NULL;
628    virgl_drm_resource_reference(qdws, &cbuf->res_bo[cbuf->cres], res);
629    cbuf->res_hlist[cbuf->cres] = res->bo_handle;
630    cbuf->is_handle_added[hash] = TRUE;
631 
632    cbuf->reloc_indices_hashlist[hash] = cbuf->cres;
633    p_atomic_inc(&res->num_cs_references);
634    cbuf->cres++;
635 }
636 
virgl_drm_release_all_res(struct virgl_drm_winsys * qdws,struct virgl_drm_cmd_buf * cbuf)637 static void virgl_drm_release_all_res(struct virgl_drm_winsys *qdws,
638                                       struct virgl_drm_cmd_buf *cbuf)
639 {
640    int i;
641 
642    for (i = 0; i < cbuf->cres; i++) {
643       p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
644       virgl_drm_resource_reference(qdws, &cbuf->res_bo[i], NULL);
645    }
646    cbuf->cres = 0;
647 }
648 
virgl_drm_emit_res(struct virgl_winsys * qws,struct virgl_cmd_buf * _cbuf,struct virgl_hw_res * res,boolean write_buf)649 static void virgl_drm_emit_res(struct virgl_winsys *qws,
650                                struct virgl_cmd_buf *_cbuf,
651                                struct virgl_hw_res *res, boolean write_buf)
652 {
653    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
654    struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
655    boolean already_in_list = virgl_drm_lookup_res(cbuf, res);
656 
657    if (write_buf)
658       cbuf->base.buf[cbuf->base.cdw++] = res->res_handle;
659 
660    if (!already_in_list)
661       virgl_drm_add_res(qdws, cbuf, res);
662 }
663 
virgl_drm_res_is_ref(struct virgl_winsys * qws,struct virgl_cmd_buf * _cbuf,struct virgl_hw_res * res)664 static boolean virgl_drm_res_is_ref(struct virgl_winsys *qws,
665                                     struct virgl_cmd_buf *_cbuf,
666                                     struct virgl_hw_res *res)
667 {
668    if (!res->num_cs_references)
669       return FALSE;
670 
671    return TRUE;
672 }
673 
virgl_drm_winsys_submit_cmd(struct virgl_winsys * qws,struct virgl_cmd_buf * _cbuf)674 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys *qws,
675                                        struct virgl_cmd_buf *_cbuf)
676 {
677    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
678    struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
679    struct drm_virtgpu_execbuffer eb;
680    int ret;
681 
682    if (cbuf->base.cdw == 0)
683       return 0;
684 
685    memset(&eb, 0, sizeof(struct drm_virtgpu_execbuffer));
686    eb.command = (unsigned long)(void*)cbuf->buf;
687    eb.size = cbuf->base.cdw * 4;
688    eb.num_bo_handles = cbuf->cres;
689    eb.bo_handles = (unsigned long)(void *)cbuf->res_hlist;
690 
691    ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
692    if (ret == -1)
693       fprintf(stderr,"got error from kernel - expect bad rendering %d\n", errno);
694    cbuf->base.cdw = 0;
695 
696    virgl_drm_release_all_res(qdws, cbuf);
697 
698    memset(cbuf->is_handle_added, 0, sizeof(cbuf->is_handle_added));
699    return ret;
700 }
701 
virgl_drm_get_caps(struct virgl_winsys * vws,struct virgl_drm_caps * caps)702 static int virgl_drm_get_caps(struct virgl_winsys *vws,
703                               struct virgl_drm_caps *caps)
704 {
705    struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
706    struct drm_virtgpu_get_caps args;
707 
708    memset(&args, 0, sizeof(args));
709 
710    args.cap_set_id = 1;
711    args.addr = (unsigned long)&caps->caps;
712    args.size = sizeof(union virgl_caps);
713    return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
714 }
715 
716 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
717 
handle_hash(void * key)718 static unsigned handle_hash(void *key)
719 {
720     return PTR_TO_UINT(key);
721 }
722 
handle_compare(void * key1,void * key2)723 static int handle_compare(void *key1, void *key2)
724 {
725     return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
726 }
727 
728 static struct pipe_fence_handle *
virgl_cs_create_fence(struct virgl_winsys * vws)729 virgl_cs_create_fence(struct virgl_winsys *vws)
730 {
731    struct virgl_hw_res *res;
732 
733    res = virgl_drm_winsys_resource_cache_create(vws,
734                                                 PIPE_BUFFER,
735                                                 PIPE_FORMAT_R8_UNORM,
736                                                 VIRGL_BIND_CUSTOM,
737                                                 8, 1, 1, 0, 0, 0, 8);
738 
739    return (struct pipe_fence_handle *)res;
740 }
741 
virgl_fence_wait(struct virgl_winsys * vws,struct pipe_fence_handle * fence,uint64_t timeout)742 static bool virgl_fence_wait(struct virgl_winsys *vws,
743                              struct pipe_fence_handle *fence,
744                              uint64_t timeout)
745 {
746    struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
747    struct virgl_hw_res *res = virgl_hw_res(fence);
748 
749    if (timeout == 0)
750       return !virgl_drm_resource_is_busy(vdws, res);
751 
752    if (timeout != PIPE_TIMEOUT_INFINITE) {
753       int64_t start_time = os_time_get();
754       timeout /= 1000;
755       while (virgl_drm_resource_is_busy(vdws, res)) {
756          if (os_time_get() - start_time >= timeout)
757             return FALSE;
758          os_time_sleep(10);
759       }
760       return TRUE;
761    }
762    virgl_drm_resource_wait(vws, res);
763    return TRUE;
764 }
765 
virgl_fence_reference(struct virgl_winsys * vws,struct pipe_fence_handle ** dst,struct pipe_fence_handle * src)766 static void virgl_fence_reference(struct virgl_winsys *vws,
767                                   struct pipe_fence_handle **dst,
768                                   struct pipe_fence_handle *src)
769 {
770    struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
771    virgl_drm_resource_reference(vdws, (struct virgl_hw_res **)dst,
772                                 virgl_hw_res(src));
773 }
774 
775 
776 static struct virgl_winsys *
virgl_drm_winsys_create(int drmFD)777 virgl_drm_winsys_create(int drmFD)
778 {
779    struct virgl_drm_winsys *qdws;
780 
781    qdws = CALLOC_STRUCT(virgl_drm_winsys);
782    if (!qdws)
783       return NULL;
784 
785    qdws->fd = drmFD;
786    qdws->num_delayed = 0;
787    qdws->usecs = 1000000;
788    LIST_INITHEAD(&qdws->delayed);
789    (void) mtx_init(&qdws->mutex, mtx_plain);
790    (void) mtx_init(&qdws->bo_handles_mutex, mtx_plain);
791    qdws->bo_handles = util_hash_table_create(handle_hash, handle_compare);
792    qdws->bo_names = util_hash_table_create(handle_hash, handle_compare);
793    qdws->base.destroy = virgl_drm_winsys_destroy;
794 
795    qdws->base.transfer_put = virgl_bo_transfer_put;
796    qdws->base.transfer_get = virgl_bo_transfer_get;
797    qdws->base.resource_create = virgl_drm_winsys_resource_cache_create;
798    qdws->base.resource_unref = virgl_drm_winsys_resource_unref;
799    qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle;
800    qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle;
801    qdws->base.resource_map = virgl_drm_resource_map;
802    qdws->base.resource_wait = virgl_drm_resource_wait;
803    qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create;
804    qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy;
805    qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
806    qdws->base.emit_res = virgl_drm_emit_res;
807    qdws->base.res_is_referenced = virgl_drm_res_is_ref;
808 
809    qdws->base.cs_create_fence = virgl_cs_create_fence;
810    qdws->base.fence_wait = virgl_fence_wait;
811    qdws->base.fence_reference = virgl_fence_reference;
812 
813    qdws->base.get_caps = virgl_drm_get_caps;
814    return &qdws->base;
815 
816 }
817 
818 static struct util_hash_table *fd_tab = NULL;
819 static mtx_t virgl_screen_mutex = _MTX_INITIALIZER_NP;
820 
821 static void
virgl_drm_screen_destroy(struct pipe_screen * pscreen)822 virgl_drm_screen_destroy(struct pipe_screen *pscreen)
823 {
824    struct virgl_screen *screen = virgl_screen(pscreen);
825    boolean destroy;
826 
827    mtx_lock(&virgl_screen_mutex);
828    destroy = --screen->refcnt == 0;
829    if (destroy) {
830       int fd = virgl_drm_winsys(screen->vws)->fd;
831       util_hash_table_remove(fd_tab, intptr_to_pointer(fd));
832    }
833    mtx_unlock(&virgl_screen_mutex);
834 
835    if (destroy) {
836       pscreen->destroy = screen->winsys_priv;
837       pscreen->destroy(pscreen);
838    }
839 }
840 
hash_fd(void * key)841 static unsigned hash_fd(void *key)
842 {
843    int fd = pointer_to_intptr(key);
844    struct stat stat;
845    fstat(fd, &stat);
846 
847    return stat.st_dev ^ stat.st_ino ^ stat.st_rdev;
848 }
849 
compare_fd(void * key1,void * key2)850 static int compare_fd(void *key1, void *key2)
851 {
852    int fd1 = pointer_to_intptr(key1);
853    int fd2 = pointer_to_intptr(key2);
854    struct stat stat1, stat2;
855    fstat(fd1, &stat1);
856    fstat(fd2, &stat2);
857 
858    return stat1.st_dev != stat2.st_dev ||
859          stat1.st_ino != stat2.st_ino ||
860          stat1.st_rdev != stat2.st_rdev;
861 }
862 
863 struct pipe_screen *
virgl_drm_screen_create(int fd)864 virgl_drm_screen_create(int fd)
865 {
866    struct pipe_screen *pscreen = NULL;
867 
868    mtx_lock(&virgl_screen_mutex);
869    if (!fd_tab) {
870       fd_tab = util_hash_table_create(hash_fd, compare_fd);
871       if (!fd_tab)
872          goto unlock;
873    }
874 
875    pscreen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
876    if (pscreen) {
877       virgl_screen(pscreen)->refcnt++;
878    } else {
879       struct virgl_winsys *vws;
880       int dup_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
881 
882       vws = virgl_drm_winsys_create(dup_fd);
883 
884       pscreen = virgl_create_screen(vws);
885       if (pscreen) {
886          util_hash_table_set(fd_tab, intptr_to_pointer(dup_fd), pscreen);
887 
888          /* Bit of a hack, to avoid circular linkage dependency,
889           * ie. pipe driver having to call in to winsys, we
890           * override the pipe drivers screen->destroy():
891           */
892          virgl_screen(pscreen)->winsys_priv = pscreen->destroy;
893          pscreen->destroy = virgl_drm_screen_destroy;
894       }
895    }
896 
897 unlock:
898    mtx_unlock(&virgl_screen_mutex);
899    return pscreen;
900 }
901