1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <errno.h>
25 #include <fcntl.h>
26 #include <limits.h>
27 #include <stdio.h>
28 #include <sys/ioctl.h>
29 #include <sys/stat.h>
30
31 #include "os/os_mman.h"
32 #include "util/os_file.h"
33 #include "util/os_time.h"
34 #include "util/u_memory.h"
35 #include "util/format/u_format.h"
36 #include "util/u_hash_table.h"
37 #include "util/u_inlines.h"
38 #include "util/u_pointer.h"
39 #include "frontend/drm_driver.h"
40 #include "virgl/virgl_screen.h"
41 #include "virgl/virgl_public.h"
42 #include "virtio-gpu/virgl_protocol.h"
43
44 #include <xf86drm.h>
45 #include <libsync.h>
46 #include "drm-uapi/virtgpu_drm.h"
47
48 #include "virgl_drm_winsys.h"
49 #include "virgl_drm_public.h"
50
51
52 #define VIRGL_DRM_VERSION(major, minor) ((major) << 16 | (minor))
53 #define VIRGL_DRM_VERSION_FENCE_FD VIRGL_DRM_VERSION(0, 1)
54
55 /* Gets a pointer to the virgl_hw_res containing the pointed to cache entry. */
56 #define cache_entry_container_res(ptr) \
57 (struct virgl_hw_res*)((char*)ptr - offsetof(struct virgl_hw_res, cache_entry))
58
can_cache_resource(uint32_t bind)59 static inline boolean can_cache_resource(uint32_t bind)
60 {
61 return bind == VIRGL_BIND_CONSTANT_BUFFER ||
62 bind == VIRGL_BIND_INDEX_BUFFER ||
63 bind == VIRGL_BIND_VERTEX_BUFFER ||
64 bind == VIRGL_BIND_CUSTOM ||
65 bind == VIRGL_BIND_STAGING;
66 }
67
virgl_hw_res_destroy(struct virgl_drm_winsys * qdws,struct virgl_hw_res * res)68 static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
69 struct virgl_hw_res *res)
70 {
71 struct drm_gem_close args;
72
73 mtx_lock(&qdws->bo_handles_mutex);
74 _mesa_hash_table_remove_key(qdws->bo_handles,
75 (void *)(uintptr_t)res->bo_handle);
76 if (res->flink_name)
77 _mesa_hash_table_remove_key(qdws->bo_names,
78 (void *)(uintptr_t)res->flink_name);
79 mtx_unlock(&qdws->bo_handles_mutex);
80 if (res->ptr)
81 os_munmap(res->ptr, res->size);
82
83 memset(&args, 0, sizeof(args));
84 args.handle = res->bo_handle;
85 drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args);
86 FREE(res);
87 }
88
virgl_drm_resource_is_busy(struct virgl_winsys * vws,struct virgl_hw_res * res)89 static boolean virgl_drm_resource_is_busy(struct virgl_winsys *vws,
90 struct virgl_hw_res *res)
91 {
92 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
93 struct drm_virtgpu_3d_wait waitcmd;
94 int ret;
95
96 if (!p_atomic_read(&res->maybe_busy) && !p_atomic_read(&res->external))
97 return false;
98
99 memset(&waitcmd, 0, sizeof(waitcmd));
100 waitcmd.handle = res->bo_handle;
101 waitcmd.flags = VIRTGPU_WAIT_NOWAIT;
102
103 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
104 if (ret && errno == EBUSY)
105 return TRUE;
106
107 p_atomic_set(&res->maybe_busy, false);
108
109 return FALSE;
110 }
111
112 static void
virgl_drm_winsys_destroy(struct virgl_winsys * qws)113 virgl_drm_winsys_destroy(struct virgl_winsys *qws)
114 {
115 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
116
117 virgl_resource_cache_flush(&qdws->cache);
118
119 _mesa_hash_table_destroy(qdws->bo_handles, NULL);
120 _mesa_hash_table_destroy(qdws->bo_names, NULL);
121 mtx_destroy(&qdws->bo_handles_mutex);
122 mtx_destroy(&qdws->mutex);
123
124 FREE(qdws);
125 }
126
virgl_drm_resource_reference(struct virgl_winsys * qws,struct virgl_hw_res ** dres,struct virgl_hw_res * sres)127 static void virgl_drm_resource_reference(struct virgl_winsys *qws,
128 struct virgl_hw_res **dres,
129 struct virgl_hw_res *sres)
130 {
131 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
132 struct virgl_hw_res *old = *dres;
133
134 if (pipe_reference(&(*dres)->reference, &sres->reference)) {
135
136 if (!can_cache_resource(old->bind) ||
137 p_atomic_read(&old->external)) {
138 virgl_hw_res_destroy(qdws, old);
139 } else {
140 mtx_lock(&qdws->mutex);
141 virgl_resource_cache_add(&qdws->cache, &old->cache_entry);
142 mtx_unlock(&qdws->mutex);
143 }
144 }
145 *dres = sres;
146 }
147
148 static struct virgl_hw_res *
virgl_drm_winsys_resource_create_blob(struct virgl_winsys * qws,enum pipe_texture_target target,uint32_t format,uint32_t bind,uint32_t width,uint32_t height,uint32_t depth,uint32_t array_size,uint32_t last_level,uint32_t nr_samples,uint32_t flags,uint32_t size)149 virgl_drm_winsys_resource_create_blob(struct virgl_winsys *qws,
150 enum pipe_texture_target target,
151 uint32_t format,
152 uint32_t bind,
153 uint32_t width,
154 uint32_t height,
155 uint32_t depth,
156 uint32_t array_size,
157 uint32_t last_level,
158 uint32_t nr_samples,
159 uint32_t flags,
160 uint32_t size)
161 {
162 int ret;
163 int32_t blob_id;
164 uint32_t cmd[VIRGL_PIPE_RES_CREATE_SIZE + 1] = { 0 };
165 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
166 struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
167 struct virgl_hw_res *res;
168
169 res = CALLOC_STRUCT(virgl_hw_res);
170 if (!res)
171 return NULL;
172
173 /* Make sure blob is page aligned. */
174 if (flags & (VIRGL_RESOURCE_FLAG_MAP_PERSISTENT |
175 VIRGL_RESOURCE_FLAG_MAP_COHERENT)) {
176 width = ALIGN(width, getpagesize());
177 size = ALIGN(size, getpagesize());
178 }
179
180 blob_id = p_atomic_inc_return(&qdws->blob_id);
181 cmd[0] = VIRGL_CMD0(VIRGL_CCMD_PIPE_RESOURCE_CREATE, 0, VIRGL_PIPE_RES_CREATE_SIZE);
182 cmd[VIRGL_PIPE_RES_CREATE_FORMAT] = format;
183 cmd[VIRGL_PIPE_RES_CREATE_BIND] = bind;
184 cmd[VIRGL_PIPE_RES_CREATE_TARGET] = target;
185 cmd[VIRGL_PIPE_RES_CREATE_WIDTH] = width;
186 cmd[VIRGL_PIPE_RES_CREATE_HEIGHT] = height;
187 cmd[VIRGL_PIPE_RES_CREATE_DEPTH] = depth;
188 cmd[VIRGL_PIPE_RES_CREATE_ARRAY_SIZE] = array_size;
189 cmd[VIRGL_PIPE_RES_CREATE_LAST_LEVEL] = last_level;
190 cmd[VIRGL_PIPE_RES_CREATE_NR_SAMPLES] = nr_samples;
191 cmd[VIRGL_PIPE_RES_CREATE_FLAGS] = flags;
192 cmd[VIRGL_PIPE_RES_CREATE_BLOB_ID] = blob_id;
193
194 drm_rc_blob.cmd = (unsigned long)(void *)&cmd;
195 drm_rc_blob.cmd_size = 4 * (VIRGL_PIPE_RES_CREATE_SIZE + 1);
196 drm_rc_blob.size = size;
197 drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
198 drm_rc_blob.blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
199 drm_rc_blob.blob_id = (uint64_t) blob_id;
200
201 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
202 if (ret != 0) {
203 FREE(res);
204 return NULL;
205 }
206
207 res->bind = bind;
208 res->res_handle = drm_rc_blob.res_handle;
209 res->bo_handle = drm_rc_blob.bo_handle;
210 res->size = size;
211 res->flags = flags;
212 pipe_reference_init(&res->reference, 1);
213 p_atomic_set(&res->external, false);
214 p_atomic_set(&res->num_cs_references, 0);
215 virgl_resource_cache_entry_init(&res->cache_entry, size, bind, format,
216 flags);
217 return res;
218 }
219
220 static struct virgl_hw_res *
virgl_drm_winsys_resource_create(struct virgl_winsys * qws,enum pipe_texture_target target,uint32_t format,uint32_t bind,uint32_t width,uint32_t height,uint32_t depth,uint32_t array_size,uint32_t last_level,uint32_t nr_samples,uint32_t size,bool for_fencing)221 virgl_drm_winsys_resource_create(struct virgl_winsys *qws,
222 enum pipe_texture_target target,
223 uint32_t format,
224 uint32_t bind,
225 uint32_t width,
226 uint32_t height,
227 uint32_t depth,
228 uint32_t array_size,
229 uint32_t last_level,
230 uint32_t nr_samples,
231 uint32_t size,
232 bool for_fencing)
233 {
234 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
235 struct drm_virtgpu_resource_create createcmd;
236 int ret;
237 struct virgl_hw_res *res;
238 uint32_t stride = width * util_format_get_blocksize(format);
239
240 res = CALLOC_STRUCT(virgl_hw_res);
241 if (!res)
242 return NULL;
243
244 memset(&createcmd, 0, sizeof(createcmd));
245 createcmd.target = target;
246 createcmd.format = pipe_to_virgl_format(format);
247 createcmd.bind = bind;
248 createcmd.width = width;
249 createcmd.height = height;
250 createcmd.depth = depth;
251 createcmd.array_size = array_size;
252 createcmd.last_level = last_level;
253 createcmd.nr_samples = nr_samples;
254 createcmd.stride = stride;
255 createcmd.size = size;
256
257 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &createcmd);
258 if (ret != 0) {
259 FREE(res);
260 return NULL;
261 }
262
263 res->bind = bind;
264
265 res->res_handle = createcmd.res_handle;
266 res->bo_handle = createcmd.bo_handle;
267 res->size = size;
268 res->target = target;
269 pipe_reference_init(&res->reference, 1);
270 p_atomic_set(&res->external, false);
271 p_atomic_set(&res->num_cs_references, 0);
272
273 /* A newly created resource is considered busy by the kernel until the
274 * command is retired. But for our purposes, we can consider it idle
275 * unless it is used for fencing.
276 */
277 p_atomic_set(&res->maybe_busy, for_fencing);
278
279 virgl_resource_cache_entry_init(&res->cache_entry, size, bind, format, 0);
280
281 return res;
282 }
283
284 /*
285 * Previously, with DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, all host resources had
286 * a guest memory shadow resource with size = stride * bpp. Virglrenderer
287 * would guess the stride implicitly when performing transfer operations, if
288 * the stride wasn't specified. Interestingly, vtest would specify the stride.
289 *
290 * Guessing the stride breaks down with YUV images, which may be imported into
291 * Mesa as 3R8 images. It also doesn't work if an external allocator
292 * (i.e, minigbm) decides to use a stride not equal to stride * bpp. With blob
293 * resources, the size = stride * bpp restriction no longer holds, so use
294 * explicit strides passed into Mesa.
295 */
use_explicit_stride(struct virgl_hw_res * res,uint32_t level,uint32_t depth)296 static inline bool use_explicit_stride(struct virgl_hw_res *res, uint32_t level,
297 uint32_t depth)
298 {
299 return (params[param_resource_blob].value &&
300 res->blob_mem == VIRTGPU_BLOB_MEM_HOST3D_GUEST &&
301 res->target == PIPE_TEXTURE_2D &&
302 level == 0 && depth == 1);
303 }
304
305 static int
virgl_bo_transfer_put(struct virgl_winsys * vws,struct virgl_hw_res * res,const struct pipe_box * box,uint32_t stride,uint32_t layer_stride,uint32_t buf_offset,uint32_t level)306 virgl_bo_transfer_put(struct virgl_winsys *vws,
307 struct virgl_hw_res *res,
308 const struct pipe_box *box,
309 uint32_t stride, uint32_t layer_stride,
310 uint32_t buf_offset, uint32_t level)
311 {
312 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
313 struct drm_virtgpu_3d_transfer_to_host tohostcmd;
314
315 p_atomic_set(&res->maybe_busy, true);
316
317 memset(&tohostcmd, 0, sizeof(tohostcmd));
318 tohostcmd.bo_handle = res->bo_handle;
319 tohostcmd.box.x = box->x;
320 tohostcmd.box.y = box->y;
321 tohostcmd.box.z = box->z;
322 tohostcmd.box.w = box->width;
323 tohostcmd.box.h = box->height;
324 tohostcmd.box.d = box->depth;
325 tohostcmd.offset = buf_offset;
326 tohostcmd.level = level;
327
328 if (use_explicit_stride(res, level, box->depth))
329 tohostcmd.stride = stride;
330
331 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &tohostcmd);
332 }
333
334 static int
virgl_bo_transfer_get(struct virgl_winsys * vws,struct virgl_hw_res * res,const struct pipe_box * box,uint32_t stride,uint32_t layer_stride,uint32_t buf_offset,uint32_t level)335 virgl_bo_transfer_get(struct virgl_winsys *vws,
336 struct virgl_hw_res *res,
337 const struct pipe_box *box,
338 uint32_t stride, uint32_t layer_stride,
339 uint32_t buf_offset, uint32_t level)
340 {
341 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
342 struct drm_virtgpu_3d_transfer_from_host fromhostcmd;
343
344 p_atomic_set(&res->maybe_busy, true);
345
346 memset(&fromhostcmd, 0, sizeof(fromhostcmd));
347 fromhostcmd.bo_handle = res->bo_handle;
348 fromhostcmd.level = level;
349 fromhostcmd.offset = buf_offset;
350 fromhostcmd.box.x = box->x;
351 fromhostcmd.box.y = box->y;
352 fromhostcmd.box.z = box->z;
353 fromhostcmd.box.w = box->width;
354 fromhostcmd.box.h = box->height;
355 fromhostcmd.box.d = box->depth;
356
357 if (use_explicit_stride(res, level, box->depth))
358 fromhostcmd.stride = stride;
359
360 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &fromhostcmd);
361 }
362
363 static struct virgl_hw_res *
virgl_drm_winsys_resource_cache_create(struct virgl_winsys * qws,enum pipe_texture_target target,uint32_t format,uint32_t bind,uint32_t width,uint32_t height,uint32_t depth,uint32_t array_size,uint32_t last_level,uint32_t nr_samples,uint32_t flags,uint32_t size)364 virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
365 enum pipe_texture_target target,
366 uint32_t format,
367 uint32_t bind,
368 uint32_t width,
369 uint32_t height,
370 uint32_t depth,
371 uint32_t array_size,
372 uint32_t last_level,
373 uint32_t nr_samples,
374 uint32_t flags,
375 uint32_t size)
376 {
377 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
378 struct virgl_hw_res *res;
379 struct virgl_resource_cache_entry *entry;
380
381 if (!can_cache_resource(bind))
382 goto alloc;
383
384 mtx_lock(&qdws->mutex);
385
386 entry = virgl_resource_cache_remove_compatible(&qdws->cache, size,
387 bind, format, flags);
388 if (entry) {
389 res = cache_entry_container_res(entry);
390 mtx_unlock(&qdws->mutex);
391 pipe_reference_init(&res->reference, 1);
392 return res;
393 }
394
395 mtx_unlock(&qdws->mutex);
396
397 alloc:
398 if (flags & (VIRGL_RESOURCE_FLAG_MAP_PERSISTENT |
399 VIRGL_RESOURCE_FLAG_MAP_COHERENT))
400 res = virgl_drm_winsys_resource_create_blob(qws, target, format, bind,
401 width, height, depth,
402 array_size, last_level,
403 nr_samples, flags, size);
404 else
405 res = virgl_drm_winsys_resource_create(qws, target, format, bind, width,
406 height, depth, array_size,
407 last_level, nr_samples, size,
408 false);
409 return res;
410 }
411
412 static struct virgl_hw_res *
virgl_drm_winsys_resource_create_handle(struct virgl_winsys * qws,struct winsys_handle * whandle,uint32_t * plane,uint32_t * stride,uint32_t * plane_offset,uint64_t * modifier,uint32_t * blob_mem)413 virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
414 struct winsys_handle *whandle,
415 uint32_t *plane,
416 uint32_t *stride,
417 uint32_t *plane_offset,
418 uint64_t *modifier,
419 uint32_t *blob_mem)
420 {
421 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
422 struct drm_gem_open open_arg = {};
423 struct drm_virtgpu_resource_info info_arg = {};
424 struct virgl_hw_res *res = NULL;
425 uint32_t handle = whandle->handle;
426
427 if (whandle->offset != 0 && whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
428 _debug_printf("attempt to import unsupported winsys offset %u\n",
429 whandle->offset);
430 return NULL;
431 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
432 *plane = whandle->plane;
433 *stride = whandle->stride;
434 *plane_offset = whandle->offset;
435 *modifier = whandle->modifier;
436 }
437
438 mtx_lock(&qdws->bo_handles_mutex);
439
440 /* We must maintain a list of pairs <handle, bo>, so that we always return
441 * the same BO for one particular handle. If we didn't do that and created
442 * more than one BO for the same handle and then relocated them in a CS,
443 * we would hit a deadlock in the kernel.
444 *
445 * The list of pairs is guarded by a mutex, of course. */
446 if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
447 res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
448 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
449 int r;
450 r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
451 if (r)
452 goto done;
453 res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)handle);
454 } else {
455 /* Unknown handle type */
456 goto done;
457 }
458
459 if (res) {
460 struct virgl_hw_res *r = NULL;
461 virgl_drm_resource_reference(&qdws->base, &r, res);
462 goto done;
463 }
464
465 res = CALLOC_STRUCT(virgl_hw_res);
466 if (!res)
467 goto done;
468
469 if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
470 res->bo_handle = handle;
471 } else {
472 memset(&open_arg, 0, sizeof(open_arg));
473 open_arg.name = whandle->handle;
474 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
475 FREE(res);
476 res = NULL;
477 goto done;
478 }
479 res->bo_handle = open_arg.handle;
480 res->flink_name = whandle->handle;
481 }
482
483 memset(&info_arg, 0, sizeof(info_arg));
484 info_arg.bo_handle = res->bo_handle;
485
486 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info_arg)) {
487 /* close */
488 FREE(res);
489 res = NULL;
490 goto done;
491 }
492
493 res->res_handle = info_arg.res_handle;
494 res->blob_mem = info_arg.blob_mem;
495 *blob_mem = info_arg.blob_mem;
496
497 res->size = info_arg.size;
498 pipe_reference_init(&res->reference, 1);
499 p_atomic_set(&res->external, true);
500 res->num_cs_references = 0;
501
502 if (res->flink_name)
503 _mesa_hash_table_insert(qdws->bo_names, (void *)(uintptr_t)res->flink_name, res);
504 _mesa_hash_table_insert(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
505
506 done:
507 mtx_unlock(&qdws->bo_handles_mutex);
508 return res;
509 }
510
virgl_drm_winsys_resource_get_handle(struct virgl_winsys * qws,struct virgl_hw_res * res,uint32_t stride,struct winsys_handle * whandle)511 static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
512 struct virgl_hw_res *res,
513 uint32_t stride,
514 struct winsys_handle *whandle)
515 {
516 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
517 struct drm_gem_flink flink;
518
519 if (!res)
520 return FALSE;
521
522 if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
523 if (!res->flink_name) {
524 memset(&flink, 0, sizeof(flink));
525 flink.handle = res->bo_handle;
526
527 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
528 return FALSE;
529 }
530 res->flink_name = flink.name;
531
532 mtx_lock(&qdws->bo_handles_mutex);
533 _mesa_hash_table_insert(qdws->bo_names, (void *)(uintptr_t)res->flink_name, res);
534 mtx_unlock(&qdws->bo_handles_mutex);
535 }
536 whandle->handle = res->flink_name;
537 } else if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
538 whandle->handle = res->bo_handle;
539 } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
540 if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
541 return FALSE;
542 mtx_lock(&qdws->bo_handles_mutex);
543 _mesa_hash_table_insert(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
544 mtx_unlock(&qdws->bo_handles_mutex);
545 }
546
547 p_atomic_set(&res->external, true);
548
549 whandle->stride = stride;
550 return TRUE;
551 }
552
virgl_drm_resource_map(struct virgl_winsys * qws,struct virgl_hw_res * res)553 static void *virgl_drm_resource_map(struct virgl_winsys *qws,
554 struct virgl_hw_res *res)
555 {
556 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
557 struct drm_virtgpu_map mmap_arg;
558 void *ptr;
559
560 if (res->ptr)
561 return res->ptr;
562
563 memset(&mmap_arg, 0, sizeof(mmap_arg));
564 mmap_arg.handle = res->bo_handle;
565 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_MAP, &mmap_arg))
566 return NULL;
567
568 ptr = os_mmap(0, res->size, PROT_READ|PROT_WRITE, MAP_SHARED,
569 qdws->fd, mmap_arg.offset);
570 if (ptr == MAP_FAILED)
571 return NULL;
572
573 res->ptr = ptr;
574 return ptr;
575
576 }
577
virgl_drm_resource_wait(struct virgl_winsys * qws,struct virgl_hw_res * res)578 static void virgl_drm_resource_wait(struct virgl_winsys *qws,
579 struct virgl_hw_res *res)
580 {
581 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
582 struct drm_virtgpu_3d_wait waitcmd;
583 int ret;
584
585 if (!p_atomic_read(&res->maybe_busy) && !p_atomic_read(&res->external))
586 return;
587
588 memset(&waitcmd, 0, sizeof(waitcmd));
589 waitcmd.handle = res->bo_handle;
590
591 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
592 if (ret)
593 _debug_printf("waiting got error - %d, slow gpu or hang?\n", errno);
594
595 p_atomic_set(&res->maybe_busy, false);
596 }
597
virgl_drm_alloc_res_list(struct virgl_drm_cmd_buf * cbuf,int initial_size)598 static bool virgl_drm_alloc_res_list(struct virgl_drm_cmd_buf *cbuf,
599 int initial_size)
600 {
601 cbuf->nres = initial_size;
602 cbuf->cres = 0;
603
604 cbuf->res_bo = CALLOC(cbuf->nres, sizeof(struct virgl_hw_buf*));
605 if (!cbuf->res_bo)
606 return false;
607
608 cbuf->res_hlist = MALLOC(cbuf->nres * sizeof(uint32_t));
609 if (!cbuf->res_hlist) {
610 FREE(cbuf->res_bo);
611 return false;
612 }
613
614 return true;
615 }
616
virgl_drm_free_res_list(struct virgl_drm_cmd_buf * cbuf)617 static void virgl_drm_free_res_list(struct virgl_drm_cmd_buf *cbuf)
618 {
619 int i;
620
621 for (i = 0; i < cbuf->cres; i++) {
622 p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
623 virgl_drm_resource_reference(cbuf->ws, &cbuf->res_bo[i], NULL);
624 }
625 FREE(cbuf->res_hlist);
626 FREE(cbuf->res_bo);
627 }
628
virgl_drm_lookup_res(struct virgl_drm_cmd_buf * cbuf,struct virgl_hw_res * res)629 static boolean virgl_drm_lookup_res(struct virgl_drm_cmd_buf *cbuf,
630 struct virgl_hw_res *res)
631 {
632 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
633 int i;
634
635 if (cbuf->is_handle_added[hash]) {
636 i = cbuf->reloc_indices_hashlist[hash];
637 if (cbuf->res_bo[i] == res)
638 return true;
639
640 for (i = 0; i < cbuf->cres; i++) {
641 if (cbuf->res_bo[i] == res) {
642 cbuf->reloc_indices_hashlist[hash] = i;
643 return true;
644 }
645 }
646 }
647 return false;
648 }
649
virgl_drm_add_res(struct virgl_drm_winsys * qdws,struct virgl_drm_cmd_buf * cbuf,struct virgl_hw_res * res)650 static void virgl_drm_add_res(struct virgl_drm_winsys *qdws,
651 struct virgl_drm_cmd_buf *cbuf,
652 struct virgl_hw_res *res)
653 {
654 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
655
656 if (cbuf->cres >= cbuf->nres) {
657 unsigned new_nres = cbuf->nres + 256;
658 void *new_ptr = REALLOC(cbuf->res_bo,
659 cbuf->nres * sizeof(struct virgl_hw_buf*),
660 new_nres * sizeof(struct virgl_hw_buf*));
661 if (!new_ptr) {
662 _debug_printf("failure to add relocation %d, %d\n", cbuf->cres, new_nres);
663 return;
664 }
665 cbuf->res_bo = new_ptr;
666
667 new_ptr = REALLOC(cbuf->res_hlist,
668 cbuf->nres * sizeof(uint32_t),
669 new_nres * sizeof(uint32_t));
670 if (!new_ptr) {
671 _debug_printf("failure to add hlist relocation %d, %d\n", cbuf->cres, cbuf->nres);
672 return;
673 }
674 cbuf->res_hlist = new_ptr;
675 cbuf->nres = new_nres;
676 }
677
678 cbuf->res_bo[cbuf->cres] = NULL;
679 virgl_drm_resource_reference(&qdws->base, &cbuf->res_bo[cbuf->cres], res);
680 cbuf->res_hlist[cbuf->cres] = res->bo_handle;
681 cbuf->is_handle_added[hash] = TRUE;
682
683 cbuf->reloc_indices_hashlist[hash] = cbuf->cres;
684 p_atomic_inc(&res->num_cs_references);
685 cbuf->cres++;
686 }
687
688 /* This is called after the cbuf is submitted. */
virgl_drm_clear_res_list(struct virgl_drm_cmd_buf * cbuf)689 static void virgl_drm_clear_res_list(struct virgl_drm_cmd_buf *cbuf)
690 {
691 int i;
692
693 for (i = 0; i < cbuf->cres; i++) {
694 /* mark all BOs busy after submission */
695 p_atomic_set(&cbuf->res_bo[i]->maybe_busy, true);
696
697 p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
698 virgl_drm_resource_reference(cbuf->ws, &cbuf->res_bo[i], NULL);
699 }
700
701 cbuf->cres = 0;
702
703 memset(cbuf->is_handle_added, 0, sizeof(cbuf->is_handle_added));
704 }
705
virgl_drm_emit_res(struct virgl_winsys * qws,struct virgl_cmd_buf * _cbuf,struct virgl_hw_res * res,boolean write_buf)706 static void virgl_drm_emit_res(struct virgl_winsys *qws,
707 struct virgl_cmd_buf *_cbuf,
708 struct virgl_hw_res *res, boolean write_buf)
709 {
710 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
711 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
712 boolean already_in_list = virgl_drm_lookup_res(cbuf, res);
713
714 if (write_buf)
715 cbuf->base.buf[cbuf->base.cdw++] = res->res_handle;
716
717 if (!already_in_list)
718 virgl_drm_add_res(qdws, cbuf, res);
719 }
720
virgl_drm_res_is_ref(struct virgl_winsys * qws,struct virgl_cmd_buf * _cbuf,struct virgl_hw_res * res)721 static boolean virgl_drm_res_is_ref(struct virgl_winsys *qws,
722 struct virgl_cmd_buf *_cbuf,
723 struct virgl_hw_res *res)
724 {
725 if (!p_atomic_read(&res->num_cs_references))
726 return FALSE;
727
728 return TRUE;
729 }
730
virgl_drm_cmd_buf_create(struct virgl_winsys * qws,uint32_t size)731 static struct virgl_cmd_buf *virgl_drm_cmd_buf_create(struct virgl_winsys *qws,
732 uint32_t size)
733 {
734 struct virgl_drm_cmd_buf *cbuf;
735
736 cbuf = CALLOC_STRUCT(virgl_drm_cmd_buf);
737 if (!cbuf)
738 return NULL;
739
740 cbuf->ws = qws;
741
742 if (!virgl_drm_alloc_res_list(cbuf, 512)) {
743 FREE(cbuf);
744 return NULL;
745 }
746
747 cbuf->buf = CALLOC(size, sizeof(uint32_t));
748 if (!cbuf->buf) {
749 FREE(cbuf->res_hlist);
750 FREE(cbuf->res_bo);
751 FREE(cbuf);
752 return NULL;
753 }
754
755 cbuf->in_fence_fd = -1;
756 cbuf->base.buf = cbuf->buf;
757 return &cbuf->base;
758 }
759
virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf * _cbuf)760 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf *_cbuf)
761 {
762 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
763
764 virgl_drm_free_res_list(cbuf);
765
766 FREE(cbuf->buf);
767 FREE(cbuf);
768 }
769
770 static struct pipe_fence_handle *
virgl_drm_fence_create(struct virgl_winsys * vws,int fd,bool external)771 virgl_drm_fence_create(struct virgl_winsys *vws, int fd, bool external)
772 {
773 struct virgl_drm_fence *fence;
774
775 assert(vws->supports_fences);
776
777 if (external) {
778 fd = os_dupfd_cloexec(fd);
779 if (fd < 0)
780 return NULL;
781 }
782
783 fence = CALLOC_STRUCT(virgl_drm_fence);
784 if (!fence) {
785 close(fd);
786 return NULL;
787 }
788
789 fence->fd = fd;
790 fence->external = external;
791
792 pipe_reference_init(&fence->reference, 1);
793
794 return (struct pipe_fence_handle *)fence;
795 }
796
797 static struct pipe_fence_handle *
virgl_drm_fence_create_legacy(struct virgl_winsys * vws)798 virgl_drm_fence_create_legacy(struct virgl_winsys *vws)
799 {
800 struct virgl_drm_fence *fence;
801
802 assert(!vws->supports_fences);
803
804 fence = CALLOC_STRUCT(virgl_drm_fence);
805 if (!fence)
806 return NULL;
807 fence->fd = -1;
808
809 /* Resources for fences should not be from the cache, since we are basing
810 * the fence status on the resource creation busy status.
811 */
812 fence->hw_res = virgl_drm_winsys_resource_create(vws, PIPE_BUFFER,
813 PIPE_FORMAT_R8_UNORM, VIRGL_BIND_CUSTOM, 8, 1, 1, 0, 0, 0, 8, true);
814 if (!fence->hw_res) {
815 FREE(fence);
816 return NULL;
817 }
818
819 pipe_reference_init(&fence->reference, 1);
820
821 return (struct pipe_fence_handle *)fence;
822 }
823
virgl_drm_winsys_submit_cmd(struct virgl_winsys * qws,struct virgl_cmd_buf * _cbuf,struct pipe_fence_handle ** fence)824 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys *qws,
825 struct virgl_cmd_buf *_cbuf,
826 struct pipe_fence_handle **fence)
827 {
828 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
829 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
830 struct drm_virtgpu_execbuffer eb;
831 int ret;
832
833 if (cbuf->base.cdw == 0)
834 return 0;
835
836 memset(&eb, 0, sizeof(struct drm_virtgpu_execbuffer));
837 eb.command = (unsigned long)(void*)cbuf->buf;
838 eb.size = cbuf->base.cdw * 4;
839 eb.num_bo_handles = cbuf->cres;
840 eb.bo_handles = (unsigned long)(void *)cbuf->res_hlist;
841
842 eb.fence_fd = -1;
843 if (qws->supports_fences) {
844 if (cbuf->in_fence_fd >= 0) {
845 eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_IN;
846 eb.fence_fd = cbuf->in_fence_fd;
847 }
848
849 if (fence != NULL)
850 eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_OUT;
851 } else {
852 assert(cbuf->in_fence_fd < 0);
853 }
854
855 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
856 if (ret == -1)
857 _debug_printf("got error from kernel - expect bad rendering %d\n", errno);
858 cbuf->base.cdw = 0;
859
860 if (qws->supports_fences) {
861 if (cbuf->in_fence_fd >= 0) {
862 close(cbuf->in_fence_fd);
863 cbuf->in_fence_fd = -1;
864 }
865
866 if (fence != NULL && ret == 0)
867 *fence = virgl_drm_fence_create(qws, eb.fence_fd, false);
868 } else {
869 if (fence != NULL && ret == 0)
870 *fence = virgl_drm_fence_create_legacy(qws);
871 }
872
873 virgl_drm_clear_res_list(cbuf);
874
875 return ret;
876 }
877
virgl_drm_get_caps(struct virgl_winsys * vws,struct virgl_drm_caps * caps)878 static int virgl_drm_get_caps(struct virgl_winsys *vws,
879 struct virgl_drm_caps *caps)
880 {
881 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
882 struct drm_virtgpu_get_caps args;
883 int ret;
884
885 virgl_ws_fill_new_caps_defaults(caps);
886
887 memset(&args, 0, sizeof(args));
888 if (params[param_capset_fix].value) {
889 /* if we have the query fix - try and get cap set id 2 first */
890 args.cap_set_id = 2;
891 args.size = sizeof(union virgl_caps);
892 } else {
893 args.cap_set_id = 1;
894 args.size = sizeof(struct virgl_caps_v1);
895 }
896 args.addr = (unsigned long)&caps->caps;
897
898 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
899 if (ret == -1 && errno == EINVAL) {
900 /* Fallback to v1 */
901 args.cap_set_id = 1;
902 args.size = sizeof(struct virgl_caps_v1);
903 ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
904 if (ret == -1)
905 return ret;
906 }
907 return ret;
908 }
909
910 static struct pipe_fence_handle *
virgl_cs_create_fence(struct virgl_winsys * vws,int fd)911 virgl_cs_create_fence(struct virgl_winsys *vws, int fd)
912 {
913 if (!vws->supports_fences)
914 return NULL;
915
916 return virgl_drm_fence_create(vws, fd, true);
917 }
918
virgl_fence_wait(struct virgl_winsys * vws,struct pipe_fence_handle * _fence,uint64_t timeout)919 static bool virgl_fence_wait(struct virgl_winsys *vws,
920 struct pipe_fence_handle *_fence,
921 uint64_t timeout)
922 {
923 struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
924
925 if (vws->supports_fences) {
926 uint64_t timeout_ms;
927 int timeout_poll;
928
929 if (timeout == 0)
930 return sync_wait(fence->fd, 0) == 0;
931
932 timeout_ms = timeout / 1000000;
933 /* round up */
934 if (timeout_ms * 1000000 < timeout)
935 timeout_ms++;
936
937 timeout_poll = timeout_ms <= INT_MAX ? (int) timeout_ms : -1;
938
939 return sync_wait(fence->fd, timeout_poll) == 0;
940 }
941
942 if (timeout == 0)
943 return !virgl_drm_resource_is_busy(vws, fence->hw_res);
944
945 if (timeout != PIPE_TIMEOUT_INFINITE) {
946 int64_t start_time = os_time_get();
947 timeout /= 1000;
948 while (virgl_drm_resource_is_busy(vws, fence->hw_res)) {
949 if (os_time_get() - start_time >= timeout)
950 return FALSE;
951 os_time_sleep(10);
952 }
953 return TRUE;
954 }
955 virgl_drm_resource_wait(vws, fence->hw_res);
956
957 return TRUE;
958 }
959
virgl_fence_reference(struct virgl_winsys * vws,struct pipe_fence_handle ** dst,struct pipe_fence_handle * src)960 static void virgl_fence_reference(struct virgl_winsys *vws,
961 struct pipe_fence_handle **dst,
962 struct pipe_fence_handle *src)
963 {
964 struct virgl_drm_fence *dfence = virgl_drm_fence(*dst);
965 struct virgl_drm_fence *sfence = virgl_drm_fence(src);
966
967 if (pipe_reference(&dfence->reference, &sfence->reference)) {
968 if (vws->supports_fences) {
969 close(dfence->fd);
970 } else {
971 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
972 virgl_hw_res_destroy(vdws, dfence->hw_res);
973 }
974 FREE(dfence);
975 }
976
977 *dst = src;
978 }
979
virgl_fence_server_sync(struct virgl_winsys * vws,struct virgl_cmd_buf * _cbuf,struct pipe_fence_handle * _fence)980 static void virgl_fence_server_sync(struct virgl_winsys *vws,
981 struct virgl_cmd_buf *_cbuf,
982 struct pipe_fence_handle *_fence)
983 {
984 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
985 struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
986
987 if (!vws->supports_fences)
988 return;
989
990 /* if not an external fence, then nothing more to do without preemption: */
991 if (!fence->external)
992 return;
993
994 sync_accumulate("virgl", &cbuf->in_fence_fd, fence->fd);
995 }
996
virgl_fence_get_fd(struct virgl_winsys * vws,struct pipe_fence_handle * _fence)997 static int virgl_fence_get_fd(struct virgl_winsys *vws,
998 struct pipe_fence_handle *_fence)
999 {
1000 struct virgl_drm_fence *fence = virgl_drm_fence(_fence);
1001
1002 if (!vws->supports_fences)
1003 return -1;
1004
1005 return os_dupfd_cloexec(fence->fd);
1006 }
1007
virgl_drm_get_version(int fd)1008 static int virgl_drm_get_version(int fd)
1009 {
1010 int ret;
1011 drmVersionPtr version;
1012
1013 version = drmGetVersion(fd);
1014
1015 if (!version)
1016 ret = -EFAULT;
1017 else if (version->version_major != 0)
1018 ret = -EINVAL;
1019 else
1020 ret = VIRGL_DRM_VERSION(0, version->version_minor);
1021
1022 drmFreeVersion(version);
1023
1024 return ret;
1025 }
1026
1027 static bool
virgl_drm_resource_cache_entry_is_busy(struct virgl_resource_cache_entry * entry,void * user_data)1028 virgl_drm_resource_cache_entry_is_busy(struct virgl_resource_cache_entry *entry,
1029 void *user_data)
1030 {
1031 struct virgl_drm_winsys *qdws = user_data;
1032 struct virgl_hw_res *res = cache_entry_container_res(entry);
1033
1034 return virgl_drm_resource_is_busy(&qdws->base, res);
1035 }
1036
1037 static void
virgl_drm_resource_cache_entry_release(struct virgl_resource_cache_entry * entry,void * user_data)1038 virgl_drm_resource_cache_entry_release(struct virgl_resource_cache_entry *entry,
1039 void *user_data)
1040 {
1041 struct virgl_drm_winsys *qdws = user_data;
1042 struct virgl_hw_res *res = cache_entry_container_res(entry);
1043
1044 virgl_hw_res_destroy(qdws, res);
1045 }
1046
1047 static struct virgl_winsys *
virgl_drm_winsys_create(int drmFD)1048 virgl_drm_winsys_create(int drmFD)
1049 {
1050 static const unsigned CACHE_TIMEOUT_USEC = 1000000;
1051 struct virgl_drm_winsys *qdws;
1052 int drm_version;
1053 int ret;
1054
1055 for (uint32_t i = 0; i < ARRAY_SIZE(params); i++) {
1056 struct drm_virtgpu_getparam getparam = { 0 };
1057 uint64_t value = 0;
1058 getparam.param = params[i].param;
1059 getparam.value = (uint64_t)(uintptr_t)&value;
1060 ret = drmIoctl(drmFD, DRM_IOCTL_VIRTGPU_GETPARAM, &getparam);
1061 params[i].value = (ret == 0) ? value : 0;
1062 }
1063
1064 if (!params[param_3d_features].value)
1065 return NULL;
1066
1067 drm_version = virgl_drm_get_version(drmFD);
1068 if (drm_version < 0)
1069 return NULL;
1070
1071 qdws = CALLOC_STRUCT(virgl_drm_winsys);
1072 if (!qdws)
1073 return NULL;
1074
1075 qdws->fd = drmFD;
1076 virgl_resource_cache_init(&qdws->cache, CACHE_TIMEOUT_USEC,
1077 virgl_drm_resource_cache_entry_is_busy,
1078 virgl_drm_resource_cache_entry_release,
1079 qdws);
1080 (void) mtx_init(&qdws->mutex, mtx_plain);
1081 (void) mtx_init(&qdws->bo_handles_mutex, mtx_plain);
1082 p_atomic_set(&qdws->blob_id, 0);
1083
1084 qdws->bo_handles = util_hash_table_create_ptr_keys();
1085 qdws->bo_names = util_hash_table_create_ptr_keys();
1086 qdws->base.destroy = virgl_drm_winsys_destroy;
1087
1088 qdws->base.transfer_put = virgl_bo_transfer_put;
1089 qdws->base.transfer_get = virgl_bo_transfer_get;
1090 qdws->base.resource_create = virgl_drm_winsys_resource_cache_create;
1091 qdws->base.resource_reference = virgl_drm_resource_reference;
1092 qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle;
1093 qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle;
1094 qdws->base.resource_map = virgl_drm_resource_map;
1095 qdws->base.resource_wait = virgl_drm_resource_wait;
1096 qdws->base.resource_is_busy = virgl_drm_resource_is_busy;
1097 qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create;
1098 qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy;
1099 qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
1100 qdws->base.emit_res = virgl_drm_emit_res;
1101 qdws->base.res_is_referenced = virgl_drm_res_is_ref;
1102
1103 qdws->base.cs_create_fence = virgl_cs_create_fence;
1104 qdws->base.fence_wait = virgl_fence_wait;
1105 qdws->base.fence_reference = virgl_fence_reference;
1106 qdws->base.fence_server_sync = virgl_fence_server_sync;
1107 qdws->base.fence_get_fd = virgl_fence_get_fd;
1108 qdws->base.get_caps = virgl_drm_get_caps;
1109 qdws->base.supports_fences = drm_version >= VIRGL_DRM_VERSION_FENCE_FD;
1110 qdws->base.supports_encoded_transfers = 1;
1111
1112 qdws->base.supports_coherent = params[param_resource_blob].value &&
1113 params[param_host_visible].value;
1114 return &qdws->base;
1115
1116 }
1117
1118 static struct hash_table *fd_tab = NULL;
1119 static mtx_t virgl_screen_mutex = _MTX_INITIALIZER_NP;
1120
1121 static void
virgl_drm_screen_destroy(struct pipe_screen * pscreen)1122 virgl_drm_screen_destroy(struct pipe_screen *pscreen)
1123 {
1124 struct virgl_screen *screen = virgl_screen(pscreen);
1125 boolean destroy;
1126
1127 mtx_lock(&virgl_screen_mutex);
1128 destroy = --screen->refcnt == 0;
1129 if (destroy) {
1130 int fd = virgl_drm_winsys(screen->vws)->fd;
1131 _mesa_hash_table_remove_key(fd_tab, intptr_to_pointer(fd));
1132 close(fd);
1133 }
1134 mtx_unlock(&virgl_screen_mutex);
1135
1136 if (destroy) {
1137 pscreen->destroy = screen->winsys_priv;
1138 pscreen->destroy(pscreen);
1139 }
1140 }
1141
1142 struct pipe_screen *
virgl_drm_screen_create(int fd,const struct pipe_screen_config * config)1143 virgl_drm_screen_create(int fd, const struct pipe_screen_config *config)
1144 {
1145 struct pipe_screen *pscreen = NULL;
1146
1147 mtx_lock(&virgl_screen_mutex);
1148 if (!fd_tab) {
1149 fd_tab = util_hash_table_create_fd_keys();
1150 if (!fd_tab)
1151 goto unlock;
1152 }
1153
1154 pscreen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
1155 if (pscreen) {
1156 virgl_screen(pscreen)->refcnt++;
1157 } else {
1158 struct virgl_winsys *vws;
1159 int dup_fd = os_dupfd_cloexec(fd);
1160
1161 vws = virgl_drm_winsys_create(dup_fd);
1162 if (!vws) {
1163 close(dup_fd);
1164 goto unlock;
1165 }
1166
1167 pscreen = virgl_create_screen(vws, config);
1168 if (pscreen) {
1169 _mesa_hash_table_insert(fd_tab, intptr_to_pointer(dup_fd), pscreen);
1170
1171 /* Bit of a hack, to avoid circular linkage dependency,
1172 * ie. pipe driver having to call in to winsys, we
1173 * override the pipe drivers screen->destroy():
1174 */
1175 virgl_screen(pscreen)->winsys_priv = pscreen->destroy;
1176 pscreen->destroy = virgl_drm_screen_destroy;
1177 }
1178 }
1179
1180 unlock:
1181 mtx_unlock(&virgl_screen_mutex);
1182 return pscreen;
1183 }
1184