1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <stdint.h>
28 #include <string.h>
29 #include <strings.h>
30 #include <stdbool.h>
31 #include <assert.h>
32 #include <errno.h>
33 #include <fcntl.h>
34
35 #include <xf86drm.h>
36 #include <xf86atomic.h>
37 #include "libdrm_macros.h"
38 #include "libdrm_lists.h"
39 #include "nouveau_drm.h"
40
41 #include "nouveau.h"
42 #include "private.h"
43
44 #include "nvif/class.h"
45 #include "nvif/cl0080.h"
46 #include "nvif/ioctl.h"
47 #include "nvif/unpack.h"
48
49 drm_private FILE *nouveau_out = NULL;
50 drm_private uint32_t nouveau_debug = 0;
51
52 static void
debug_init(void)53 debug_init(void)
54 {
55 static bool once = false;
56 char *debug, *out;
57
58 if (once)
59 return;
60 once = true;
61
62 debug = getenv("NOUVEAU_LIBDRM_DEBUG");
63 if (debug) {
64 int n = strtol(debug, NULL, 0);
65 if (n >= 0)
66 nouveau_debug = n;
67
68 }
69
70 nouveau_out = stderr;
71 out = getenv("NOUVEAU_LIBDRM_OUT");
72 if (out) {
73 FILE *fout = fopen(out, "w");
74 if (fout)
75 nouveau_out = fout;
76 }
77 }
78
79 static int
nouveau_object_ioctl(struct nouveau_object * obj,void * data,uint32_t size)80 nouveau_object_ioctl(struct nouveau_object *obj, void *data, uint32_t size)
81 {
82 struct nouveau_drm *drm = nouveau_drm(obj);
83 union {
84 struct nvif_ioctl_v0 v0;
85 } *args = data;
86 uint32_t argc = size;
87 int ret = -ENOSYS;
88
89 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
90 if (!obj->length) {
91 if (obj != &drm->client)
92 args->v0.object = (unsigned long)(void *)obj;
93 else
94 args->v0.object = 0;
95 args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
96 args->v0.route = 0x00;
97 } else {
98 args->v0.route = 0xff;
99 args->v0.token = obj->handle;
100 }
101 } else
102 return ret;
103
104 return drmCommandWriteRead(drm->fd, DRM_NOUVEAU_NVIF, args, argc);
105 }
106
107 drm_public int
nouveau_object_mthd(struct nouveau_object * obj,uint32_t mthd,void * data,uint32_t size)108 nouveau_object_mthd(struct nouveau_object *obj,
109 uint32_t mthd, void *data, uint32_t size)
110 {
111 struct nouveau_drm *drm = nouveau_drm(obj);
112 struct {
113 struct nvif_ioctl_v0 ioctl;
114 struct nvif_ioctl_mthd_v0 mthd;
115 } *args;
116 uint32_t argc = sizeof(*args) + size;
117 uint8_t stack[128];
118 int ret;
119
120 if (!drm->nvif)
121 return -ENOSYS;
122
123 if (argc > sizeof(stack)) {
124 if (!(args = malloc(argc)))
125 return -ENOMEM;
126 } else {
127 args = (void *)stack;
128 }
129 args->ioctl.version = 0;
130 args->ioctl.type = NVIF_IOCTL_V0_MTHD;
131 args->mthd.version = 0;
132 args->mthd.method = mthd;
133
134 memcpy(args->mthd.data, data, size);
135 ret = nouveau_object_ioctl(obj, args, argc);
136 memcpy(data, args->mthd.data, size);
137 if (args != (void *)stack)
138 free(args);
139 return ret;
140 }
141
142 drm_public void
nouveau_object_sclass_put(struct nouveau_sclass ** psclass)143 nouveau_object_sclass_put(struct nouveau_sclass **psclass)
144 {
145 free(*psclass);
146 *psclass = NULL;
147 }
148
149 drm_public int
nouveau_object_sclass_get(struct nouveau_object * obj,struct nouveau_sclass ** psclass)150 nouveau_object_sclass_get(struct nouveau_object *obj,
151 struct nouveau_sclass **psclass)
152 {
153 struct nouveau_drm *drm = nouveau_drm(obj);
154 struct {
155 struct nvif_ioctl_v0 ioctl;
156 struct nvif_ioctl_sclass_v0 sclass;
157 } *args = NULL;
158 struct nouveau_sclass *sclass;
159 int ret, cnt = 0, i;
160 uint32_t size;
161
162 if (!drm->nvif)
163 return abi16_sclass(obj, psclass);
164
165 while (1) {
166 size = sizeof(*args) + cnt * sizeof(args->sclass.oclass[0]);
167 if (!(args = malloc(size)))
168 return -ENOMEM;
169 args->ioctl.version = 0;
170 args->ioctl.type = NVIF_IOCTL_V0_SCLASS;
171 args->sclass.version = 0;
172 args->sclass.count = cnt;
173
174 ret = nouveau_object_ioctl(obj, args, size);
175 if (ret == 0 && args->sclass.count <= cnt)
176 break;
177 cnt = args->sclass.count;
178 free(args);
179 if (ret != 0)
180 return ret;
181 }
182
183 if ((sclass = calloc(args->sclass.count, sizeof(*sclass)))) {
184 for (i = 0; i < args->sclass.count; i++) {
185 sclass[i].oclass = args->sclass.oclass[i].oclass;
186 sclass[i].minver = args->sclass.oclass[i].minver;
187 sclass[i].maxver = args->sclass.oclass[i].maxver;
188 }
189 *psclass = sclass;
190 ret = args->sclass.count;
191 } else {
192 ret = -ENOMEM;
193 }
194
195 free(args);
196 return ret;
197 }
198
199 drm_public int
nouveau_object_mclass(struct nouveau_object * obj,const struct nouveau_mclass * mclass)200 nouveau_object_mclass(struct nouveau_object *obj,
201 const struct nouveau_mclass *mclass)
202 {
203 struct nouveau_sclass *sclass;
204 int ret = -ENODEV;
205 int cnt, i, j;
206
207 cnt = nouveau_object_sclass_get(obj, &sclass);
208 if (cnt < 0)
209 return cnt;
210
211 for (i = 0; ret < 0 && mclass[i].oclass; i++) {
212 for (j = 0; j < cnt; j++) {
213 if (mclass[i].oclass == sclass[j].oclass &&
214 mclass[i].version >= sclass[j].minver &&
215 mclass[i].version <= sclass[j].maxver) {
216 ret = i;
217 break;
218 }
219 }
220 }
221
222 nouveau_object_sclass_put(&sclass);
223 return ret;
224 }
225
226 static void
nouveau_object_fini(struct nouveau_object * obj)227 nouveau_object_fini(struct nouveau_object *obj)
228 {
229 struct {
230 struct nvif_ioctl_v0 ioctl;
231 struct nvif_ioctl_del del;
232 } args = {
233 .ioctl.type = NVIF_IOCTL_V0_DEL,
234 };
235
236 if (obj->data) {
237 abi16_delete(obj);
238 free(obj->data);
239 obj->data = NULL;
240 return;
241 }
242
243 nouveau_object_ioctl(obj, &args, sizeof(args));
244 }
245
246 static int
nouveau_object_init(struct nouveau_object * parent,uint32_t handle,int32_t oclass,void * data,uint32_t size,struct nouveau_object * obj)247 nouveau_object_init(struct nouveau_object *parent, uint32_t handle,
248 int32_t oclass, void *data, uint32_t size,
249 struct nouveau_object *obj)
250 {
251 struct nouveau_drm *drm = nouveau_drm(parent);
252 struct {
253 struct nvif_ioctl_v0 ioctl;
254 struct nvif_ioctl_new_v0 new;
255 } *args;
256 uint32_t argc = sizeof(*args) + size;
257 int (*func)(struct nouveau_object *);
258 int ret = -ENOSYS;
259
260 obj->parent = parent;
261 obj->handle = handle;
262 obj->oclass = oclass;
263 obj->length = 0;
264 obj->data = NULL;
265
266 if (!abi16_object(obj, &func) && drm->nvif) {
267 if (!(args = malloc(argc)))
268 return -ENOMEM;
269 args->ioctl.version = 0;
270 args->ioctl.type = NVIF_IOCTL_V0_NEW;
271 args->new.version = 0;
272 args->new.route = NVIF_IOCTL_V0_ROUTE_NVIF;
273 args->new.token = (unsigned long)(void *)obj;
274 args->new.object = (unsigned long)(void *)obj;
275 args->new.handle = handle;
276 args->new.oclass = oclass;
277 memcpy(args->new.data, data, size);
278 ret = nouveau_object_ioctl(parent, args, argc);
279 memcpy(data, args->new.data, size);
280 free(args);
281 } else
282 if (func) {
283 obj->length = size ? size : sizeof(struct nouveau_object *);
284 if (!(obj->data = malloc(obj->length)))
285 return -ENOMEM;
286 if (data)
287 memcpy(obj->data, data, obj->length);
288 *(struct nouveau_object **)obj->data = obj;
289
290 ret = func(obj);
291 }
292
293 if (ret) {
294 nouveau_object_fini(obj);
295 return ret;
296 }
297
298 return 0;
299 }
300
301 drm_public int
nouveau_object_new(struct nouveau_object * parent,uint64_t handle,uint32_t oclass,void * data,uint32_t length,struct nouveau_object ** pobj)302 nouveau_object_new(struct nouveau_object *parent, uint64_t handle,
303 uint32_t oclass, void *data, uint32_t length,
304 struct nouveau_object **pobj)
305 {
306 struct nouveau_object *obj;
307 int ret;
308
309 if (!(obj = malloc(sizeof(*obj))))
310 return -ENOMEM;
311
312 ret = nouveau_object_init(parent, handle, oclass, data, length, obj);
313 if (ret) {
314 free(obj);
315 return ret;
316 }
317
318 *pobj = obj;
319 return 0;
320 }
321
322 drm_public void
nouveau_object_del(struct nouveau_object ** pobj)323 nouveau_object_del(struct nouveau_object **pobj)
324 {
325 struct nouveau_object *obj = *pobj;
326 if (obj) {
327 nouveau_object_fini(obj);
328 free(obj);
329 *pobj = NULL;
330 }
331 }
332
333 drm_public void
nouveau_drm_del(struct nouveau_drm ** pdrm)334 nouveau_drm_del(struct nouveau_drm **pdrm)
335 {
336 free(*pdrm);
337 *pdrm = NULL;
338 }
339
340 drm_public int
nouveau_drm_new(int fd,struct nouveau_drm ** pdrm)341 nouveau_drm_new(int fd, struct nouveau_drm **pdrm)
342 {
343 struct nouveau_drm *drm;
344 drmVersionPtr ver;
345
346 debug_init();
347
348 if (!(drm = calloc(1, sizeof(*drm))))
349 return -ENOMEM;
350 drm->fd = fd;
351
352 if (!(ver = drmGetVersion(fd))) {
353 nouveau_drm_del(&drm);
354 return -EINVAL;
355 }
356 *pdrm = drm;
357
358 drm->version = (ver->version_major << 24) |
359 (ver->version_minor << 8) |
360 ver->version_patchlevel;
361 drm->nvif = (drm->version >= 0x01000301);
362 drmFreeVersion(ver);
363 return 0;
364 }
365
366 /* this is the old libdrm's version of nouveau_device_wrap(), the symbol
367 * is kept here to prevent AIGLX from crashing if the DDX is linked against
368 * the new libdrm, but the DRI driver against the old
369 */
370 drm_public int
nouveau_device_open_existing(struct nouveau_device ** pdev,int close,int fd,drm_context_t ctx)371 nouveau_device_open_existing(struct nouveau_device **pdev, int close, int fd,
372 drm_context_t ctx)
373 {
374 return -EACCES;
375 }
376
377 drm_public int
nouveau_device_new(struct nouveau_object * parent,int32_t oclass,void * data,uint32_t size,struct nouveau_device ** pdev)378 nouveau_device_new(struct nouveau_object *parent, int32_t oclass,
379 void *data, uint32_t size, struct nouveau_device **pdev)
380 {
381 struct nv_device_info_v0 info = {};
382 union {
383 struct nv_device_v0 v0;
384 } *args = data;
385 uint32_t argc = size;
386 struct nouveau_drm *drm = nouveau_drm(parent);
387 struct nouveau_device_priv *nvdev;
388 struct nouveau_device *dev;
389 uint64_t v;
390 char *tmp;
391 int ret = -ENOSYS;
392
393 if (oclass != NV_DEVICE ||
394 nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))
395 return ret;
396
397 if (!(nvdev = calloc(1, sizeof(*nvdev))))
398 return -ENOMEM;
399 dev = *pdev = &nvdev->base;
400 dev->fd = -1;
401
402 if (drm->nvif) {
403 ret = nouveau_object_init(parent, 0, oclass, args, argc,
404 &dev->object);
405 if (ret)
406 goto done;
407
408 info.version = 0;
409
410 ret = nouveau_object_mthd(&dev->object, NV_DEVICE_V0_INFO,
411 &info, sizeof(info));
412 if (ret)
413 goto done;
414
415 nvdev->base.chipset = info.chipset;
416 nvdev->have_bo_usage = true;
417 } else
418 if (args->v0.device == ~0ULL) {
419 nvdev->base.object.parent = &drm->client;
420 nvdev->base.object.handle = ~0ULL;
421 nvdev->base.object.oclass = NOUVEAU_DEVICE_CLASS;
422 nvdev->base.object.length = ~0;
423
424 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_CHIPSET_ID, &v);
425 if (ret)
426 goto done;
427 nvdev->base.chipset = v;
428
429 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_HAS_BO_USAGE, &v);
430 if (ret == 0)
431 nvdev->have_bo_usage = (v != 0);
432 } else
433 return -ENOSYS;
434
435 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_FB_SIZE, &v);
436 if (ret)
437 goto done;
438 nvdev->base.vram_size = v;
439
440 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_AGP_SIZE, &v);
441 if (ret)
442 goto done;
443 nvdev->base.gart_size = v;
444
445 tmp = getenv("NOUVEAU_LIBDRM_VRAM_LIMIT_PERCENT");
446 if (tmp)
447 nvdev->vram_limit_percent = atoi(tmp);
448 else
449 nvdev->vram_limit_percent = 80;
450
451 nvdev->base.vram_limit =
452 (nvdev->base.vram_size * nvdev->vram_limit_percent) / 100;
453
454 tmp = getenv("NOUVEAU_LIBDRM_GART_LIMIT_PERCENT");
455 if (tmp)
456 nvdev->gart_limit_percent = atoi(tmp);
457 else
458 nvdev->gart_limit_percent = 80;
459
460 nvdev->base.gart_limit =
461 (nvdev->base.gart_size * nvdev->gart_limit_percent) / 100;
462
463 ret = pthread_mutex_init(&nvdev->lock, NULL);
464 DRMINITLISTHEAD(&nvdev->bo_list);
465 done:
466 if (ret)
467 nouveau_device_del(pdev);
468 return ret;
469 }
470
471 drm_public int
nouveau_device_wrap(int fd,int close,struct nouveau_device ** pdev)472 nouveau_device_wrap(int fd, int close, struct nouveau_device **pdev)
473 {
474 struct nouveau_drm *drm;
475 struct nouveau_device_priv *nvdev;
476 int ret;
477
478 ret = nouveau_drm_new(fd, &drm);
479 if (ret)
480 return ret;
481 drm->nvif = false;
482
483 ret = nouveau_device_new(&drm->client, NV_DEVICE,
484 &(struct nv_device_v0) {
485 .device = ~0ULL,
486 }, sizeof(struct nv_device_v0), pdev);
487 if (ret) {
488 nouveau_drm_del(&drm);
489 return ret;
490 }
491
492 nvdev = nouveau_device(*pdev);
493 nvdev->base.fd = drm->fd;
494 nvdev->base.drm_version = drm->version;
495 nvdev->close = close;
496 return 0;
497 }
498
499 drm_public int
nouveau_device_open(const char * busid,struct nouveau_device ** pdev)500 nouveau_device_open(const char *busid, struct nouveau_device **pdev)
501 {
502 int ret = -ENODEV, fd = drmOpen("nouveau", busid);
503 if (fd >= 0) {
504 ret = nouveau_device_wrap(fd, 1, pdev);
505 if (ret)
506 drmClose(fd);
507 }
508 return ret;
509 }
510
511 drm_public void
nouveau_device_del(struct nouveau_device ** pdev)512 nouveau_device_del(struct nouveau_device **pdev)
513 {
514 struct nouveau_device_priv *nvdev = nouveau_device(*pdev);
515 if (nvdev) {
516 free(nvdev->client);
517 pthread_mutex_destroy(&nvdev->lock);
518 if (nvdev->base.fd >= 0) {
519 struct nouveau_drm *drm =
520 nouveau_drm(&nvdev->base.object);
521 nouveau_drm_del(&drm);
522 if (nvdev->close)
523 drmClose(nvdev->base.fd);
524 }
525 free(nvdev);
526 *pdev = NULL;
527 }
528 }
529
530 drm_public int
nouveau_getparam(struct nouveau_device * dev,uint64_t param,uint64_t * value)531 nouveau_getparam(struct nouveau_device *dev, uint64_t param, uint64_t *value)
532 {
533 struct nouveau_drm *drm = nouveau_drm(&dev->object);
534 struct drm_nouveau_getparam r = { .param = param };
535 int fd = drm->fd, ret =
536 drmCommandWriteRead(fd, DRM_NOUVEAU_GETPARAM, &r, sizeof(r));
537 *value = r.value;
538 return ret;
539 }
540
541 drm_public int
nouveau_setparam(struct nouveau_device * dev,uint64_t param,uint64_t value)542 nouveau_setparam(struct nouveau_device *dev, uint64_t param, uint64_t value)
543 {
544 struct nouveau_drm *drm = nouveau_drm(&dev->object);
545 struct drm_nouveau_setparam r = { .param = param, .value = value };
546 return drmCommandWrite(drm->fd, DRM_NOUVEAU_SETPARAM, &r, sizeof(r));
547 }
548
549 drm_public int
nouveau_client_new(struct nouveau_device * dev,struct nouveau_client ** pclient)550 nouveau_client_new(struct nouveau_device *dev, struct nouveau_client **pclient)
551 {
552 struct nouveau_device_priv *nvdev = nouveau_device(dev);
553 struct nouveau_client_priv *pcli;
554 int id = 0, i, ret = -ENOMEM;
555 uint32_t *clients;
556
557 pthread_mutex_lock(&nvdev->lock);
558
559 for (i = 0; i < nvdev->nr_client; i++) {
560 id = ffs(nvdev->client[i]) - 1;
561 if (id >= 0)
562 goto out;
563 }
564
565 clients = realloc(nvdev->client, sizeof(uint32_t) * (i + 1));
566 if (!clients)
567 goto unlock;
568 nvdev->client = clients;
569 nvdev->client[i] = 0;
570 nvdev->nr_client++;
571
572 out:
573 pcli = calloc(1, sizeof(*pcli));
574 if (pcli) {
575 nvdev->client[i] |= (1 << id);
576 pcli->base.device = dev;
577 pcli->base.id = (i * 32) + id;
578 ret = 0;
579 }
580
581 *pclient = &pcli->base;
582
583 unlock:
584 pthread_mutex_unlock(&nvdev->lock);
585 return ret;
586 }
587
588 drm_public void
nouveau_client_del(struct nouveau_client ** pclient)589 nouveau_client_del(struct nouveau_client **pclient)
590 {
591 struct nouveau_client_priv *pcli = nouveau_client(*pclient);
592 struct nouveau_device_priv *nvdev;
593 if (pcli) {
594 int id = pcli->base.id;
595 nvdev = nouveau_device(pcli->base.device);
596 pthread_mutex_lock(&nvdev->lock);
597 nvdev->client[id / 32] &= ~(1 << (id % 32));
598 pthread_mutex_unlock(&nvdev->lock);
599 free(pcli->kref);
600 free(pcli);
601 }
602 }
603
604 static void
nouveau_bo_del(struct nouveau_bo * bo)605 nouveau_bo_del(struct nouveau_bo *bo)
606 {
607 struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
608 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
609 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
610
611 if (nvbo->head.next) {
612 pthread_mutex_lock(&nvdev->lock);
613 if (atomic_read(&nvbo->refcnt) == 0) {
614 DRMLISTDEL(&nvbo->head);
615 /*
616 * This bo has to be closed with the lock held because
617 * gem handles are not refcounted. If a shared bo is
618 * closed and re-opened in another thread a race
619 * against DRM_IOCTL_GEM_OPEN or drmPrimeFDToHandle
620 * might cause the bo to be closed accidentally while
621 * re-importing.
622 */
623 drmCloseBufferHandle(drm->fd, bo->handle);
624 }
625 pthread_mutex_unlock(&nvdev->lock);
626 } else {
627 drmCloseBufferHandle(drm->fd, bo->handle);
628 }
629 if (bo->map)
630 drm_munmap(bo->map, bo->size);
631 free(nvbo);
632 }
633
634 drm_public int
nouveau_bo_new(struct nouveau_device * dev,uint32_t flags,uint32_t align,uint64_t size,union nouveau_bo_config * config,struct nouveau_bo ** pbo)635 nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, uint32_t align,
636 uint64_t size, union nouveau_bo_config *config,
637 struct nouveau_bo **pbo)
638 {
639 struct nouveau_bo_priv *nvbo = calloc(1, sizeof(*nvbo));
640 struct nouveau_bo *bo = &nvbo->base;
641 int ret;
642
643 if (!nvbo)
644 return -ENOMEM;
645 atomic_set(&nvbo->refcnt, 1);
646 bo->device = dev;
647 bo->flags = flags;
648 bo->size = size;
649
650 ret = abi16_bo_init(bo, align, config);
651 if (ret) {
652 free(nvbo);
653 return ret;
654 }
655
656 *pbo = bo;
657 return 0;
658 }
659
660 static int
nouveau_bo_wrap_locked(struct nouveau_device * dev,uint32_t handle,struct nouveau_bo ** pbo,int name)661 nouveau_bo_wrap_locked(struct nouveau_device *dev, uint32_t handle,
662 struct nouveau_bo **pbo, int name)
663 {
664 struct nouveau_drm *drm = nouveau_drm(&dev->object);
665 struct nouveau_device_priv *nvdev = nouveau_device(dev);
666 struct drm_nouveau_gem_info req = { .handle = handle };
667 struct nouveau_bo_priv *nvbo;
668 int ret;
669
670 DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
671 if (nvbo->base.handle == handle) {
672 if (atomic_inc_return(&nvbo->refcnt) == 1) {
673 /*
674 * Uh oh, this bo is dead and someone else
675 * will free it, but because refcnt is
676 * now non-zero fortunately they won't
677 * call the ioctl to close the bo.
678 *
679 * Remove this bo from the list so other
680 * calls to nouveau_bo_wrap_locked will
681 * see our replacement nvbo.
682 */
683 DRMLISTDEL(&nvbo->head);
684 if (!name)
685 name = nvbo->name;
686 break;
687 }
688
689 *pbo = &nvbo->base;
690 return 0;
691 }
692 }
693
694 ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_INFO,
695 &req, sizeof(req));
696 if (ret)
697 return ret;
698
699 nvbo = calloc(1, sizeof(*nvbo));
700 if (nvbo) {
701 atomic_set(&nvbo->refcnt, 1);
702 nvbo->base.device = dev;
703 abi16_bo_info(&nvbo->base, &req);
704 nvbo->name = name;
705 DRMLISTADD(&nvbo->head, &nvdev->bo_list);
706 *pbo = &nvbo->base;
707 return 0;
708 }
709
710 return -ENOMEM;
711 }
712
713 static void
nouveau_bo_make_global(struct nouveau_bo_priv * nvbo)714 nouveau_bo_make_global(struct nouveau_bo_priv *nvbo)
715 {
716 if (!nvbo->head.next) {
717 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
718 pthread_mutex_lock(&nvdev->lock);
719 if (!nvbo->head.next)
720 DRMLISTADD(&nvbo->head, &nvdev->bo_list);
721 pthread_mutex_unlock(&nvdev->lock);
722 }
723 }
724
725 drm_public int
nouveau_bo_wrap(struct nouveau_device * dev,uint32_t handle,struct nouveau_bo ** pbo)726 nouveau_bo_wrap(struct nouveau_device *dev, uint32_t handle,
727 struct nouveau_bo **pbo)
728 {
729 struct nouveau_device_priv *nvdev = nouveau_device(dev);
730 int ret;
731 pthread_mutex_lock(&nvdev->lock);
732 ret = nouveau_bo_wrap_locked(dev, handle, pbo, 0);
733 pthread_mutex_unlock(&nvdev->lock);
734 return ret;
735 }
736
737 drm_public int
nouveau_bo_name_ref(struct nouveau_device * dev,uint32_t name,struct nouveau_bo ** pbo)738 nouveau_bo_name_ref(struct nouveau_device *dev, uint32_t name,
739 struct nouveau_bo **pbo)
740 {
741 struct nouveau_drm *drm = nouveau_drm(&dev->object);
742 struct nouveau_device_priv *nvdev = nouveau_device(dev);
743 struct nouveau_bo_priv *nvbo;
744 struct drm_gem_open req = { .name = name };
745 int ret;
746
747 pthread_mutex_lock(&nvdev->lock);
748 DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
749 if (nvbo->name == name) {
750 ret = nouveau_bo_wrap_locked(dev, nvbo->base.handle,
751 pbo, name);
752 pthread_mutex_unlock(&nvdev->lock);
753 return ret;
754 }
755 }
756
757 ret = drmIoctl(drm->fd, DRM_IOCTL_GEM_OPEN, &req);
758 if (ret == 0) {
759 ret = nouveau_bo_wrap_locked(dev, req.handle, pbo, name);
760 }
761
762 pthread_mutex_unlock(&nvdev->lock);
763 return ret;
764 }
765
766 drm_public int
nouveau_bo_name_get(struct nouveau_bo * bo,uint32_t * name)767 nouveau_bo_name_get(struct nouveau_bo *bo, uint32_t *name)
768 {
769 struct drm_gem_flink req = { .handle = bo->handle };
770 struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
771 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
772
773 *name = nvbo->name;
774 if (!*name) {
775 int ret = drmIoctl(drm->fd, DRM_IOCTL_GEM_FLINK, &req);
776
777 if (ret) {
778 *name = 0;
779 return ret;
780 }
781 nvbo->name = *name = req.name;
782
783 nouveau_bo_make_global(nvbo);
784 }
785 return 0;
786 }
787
788 drm_public void
nouveau_bo_ref(struct nouveau_bo * bo,struct nouveau_bo ** pref)789 nouveau_bo_ref(struct nouveau_bo *bo, struct nouveau_bo **pref)
790 {
791 struct nouveau_bo *ref = *pref;
792 if (bo) {
793 atomic_inc(&nouveau_bo(bo)->refcnt);
794 }
795 if (ref) {
796 if (atomic_dec_and_test(&nouveau_bo(ref)->refcnt))
797 nouveau_bo_del(ref);
798 }
799 *pref = bo;
800 }
801
802 drm_public int
nouveau_bo_prime_handle_ref(struct nouveau_device * dev,int prime_fd,struct nouveau_bo ** bo)803 nouveau_bo_prime_handle_ref(struct nouveau_device *dev, int prime_fd,
804 struct nouveau_bo **bo)
805 {
806 struct nouveau_drm *drm = nouveau_drm(&dev->object);
807 struct nouveau_device_priv *nvdev = nouveau_device(dev);
808 int ret;
809 unsigned int handle;
810
811 nouveau_bo_ref(NULL, bo);
812
813 pthread_mutex_lock(&nvdev->lock);
814 ret = drmPrimeFDToHandle(drm->fd, prime_fd, &handle);
815 if (ret == 0) {
816 ret = nouveau_bo_wrap_locked(dev, handle, bo, 0);
817 }
818 pthread_mutex_unlock(&nvdev->lock);
819 return ret;
820 }
821
822 drm_public int
nouveau_bo_set_prime(struct nouveau_bo * bo,int * prime_fd)823 nouveau_bo_set_prime(struct nouveau_bo *bo, int *prime_fd)
824 {
825 struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
826 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
827 int ret;
828
829 ret = drmPrimeHandleToFD(drm->fd, nvbo->base.handle, DRM_CLOEXEC, prime_fd);
830 if (ret)
831 return ret;
832
833 nouveau_bo_make_global(nvbo);
834 return 0;
835 }
836
837 drm_public int
nouveau_bo_wait(struct nouveau_bo * bo,uint32_t access,struct nouveau_client * client)838 nouveau_bo_wait(struct nouveau_bo *bo, uint32_t access,
839 struct nouveau_client *client)
840 {
841 struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
842 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
843 struct drm_nouveau_gem_cpu_prep req;
844 struct nouveau_pushbuf *push;
845 int ret = 0;
846
847 if (!(access & NOUVEAU_BO_RDWR))
848 return 0;
849
850 push = cli_push_get(client, bo);
851 if (push && push->channel)
852 nouveau_pushbuf_kick(push, push->channel);
853
854 if (!nvbo->head.next && !(nvbo->access & NOUVEAU_BO_WR) &&
855 !(access & NOUVEAU_BO_WR))
856 return 0;
857
858 req.handle = bo->handle;
859 req.flags = 0;
860 if (access & NOUVEAU_BO_WR)
861 req.flags |= NOUVEAU_GEM_CPU_PREP_WRITE;
862 if (access & NOUVEAU_BO_NOBLOCK)
863 req.flags |= NOUVEAU_GEM_CPU_PREP_NOWAIT;
864
865 ret = drmCommandWrite(drm->fd, DRM_NOUVEAU_GEM_CPU_PREP,
866 &req, sizeof(req));
867 if (ret == 0)
868 nvbo->access = 0;
869 return ret;
870 }
871
872 drm_public int
nouveau_bo_map(struct nouveau_bo * bo,uint32_t access,struct nouveau_client * client)873 nouveau_bo_map(struct nouveau_bo *bo, uint32_t access,
874 struct nouveau_client *client)
875 {
876 struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
877 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
878 if (bo->map == NULL) {
879 bo->map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
880 MAP_SHARED, drm->fd, nvbo->map_handle);
881 if (bo->map == MAP_FAILED) {
882 bo->map = NULL;
883 return -errno;
884 }
885 }
886 return nouveau_bo_wait(bo, access, client);
887 }
888