• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include <unistd.h>
28 #include <sys/stat.h>
29 #include <sys/types.h>
30 
31 #include "util/os_file.h"
32 
33 #include "freedreno_drmif.h"
34 #include "freedreno_priv.h"
35 
36 struct fd_device *msm_device_new(int fd, drmVersionPtr version);
37 #ifdef HAVE_FREEDRENO_VIRTIO
38 struct fd_device *virtio_device_new(int fd, drmVersionPtr version);
39 #endif
40 
41 struct fd_device *
fd_device_new(int fd)42 fd_device_new(int fd)
43 {
44    struct fd_device *dev = NULL;
45    drmVersionPtr version;
46    bool use_heap = false;
47 
48    /* figure out if we are kgsl or msm drm driver: */
49    version = drmGetVersion(fd);
50    if (!version) {
51       ERROR_MSG("cannot get version: %s", strerror(errno));
52       return NULL;
53    }
54 
55    if (!strcmp(version->name, "msm")) {
56       DEBUG_MSG("msm DRM device");
57       if (version->version_major != 1) {
58          ERROR_MSG("unsupported version: %u.%u.%u", version->version_major,
59                    version->version_minor, version->version_patchlevel);
60          goto out;
61       }
62 
63       dev = msm_device_new(fd, version);
64 #ifdef HAVE_FREEDRENO_VIRTIO
65    } else if (!strcmp(version->name, "virtio_gpu")) {
66       DEBUG_MSG("virtio_gpu DRM device");
67       dev = virtio_device_new(fd, version);
68       /* Only devices that support a hypervisor are a6xx+, so avoid the
69        * extra guest<->host round trips associated with pipe creation:
70        */
71       use_heap = true;
72 #endif
73 #if HAVE_FREEDRENO_KGSL
74    } else if (!strcmp(version->name, "kgsl")) {
75       DEBUG_MSG("kgsl DRM device");
76       dev = kgsl_device_new(fd);
77 #endif
78    }
79 
80    if (!dev) {
81       INFO_MSG("unsupported device: %s", version->name);
82       goto out;
83    }
84 
85 out:
86    drmFreeVersion(version);
87 
88    if (!dev)
89       return NULL;
90 
91    p_atomic_set(&dev->refcnt, 1);
92    dev->fd = fd;
93    dev->handle_table =
94       _mesa_hash_table_create(NULL, _mesa_hash_u32, _mesa_key_u32_equal);
95    dev->name_table =
96       _mesa_hash_table_create(NULL, _mesa_hash_u32, _mesa_key_u32_equal);
97    fd_bo_cache_init(&dev->bo_cache, false, "bo");
98    fd_bo_cache_init(&dev->ring_cache, true, "ring");
99 
100    list_inithead(&dev->deferred_submits);
101    simple_mtx_init(&dev->submit_lock, mtx_plain);
102    simple_mtx_init(&dev->suballoc_lock, mtx_plain);
103 
104    if (!use_heap) {
105       struct fd_pipe *pipe = fd_pipe_new(dev, FD_PIPE_3D);
106 
107       if (!pipe)
108          goto fail;
109 
110       /* Userspace fences don't appear to be reliable enough (missing some
111        * cache flushes?) on older gens, so limit sub-alloc heaps to a6xx+
112        * for now:
113        */
114       use_heap = fd_dev_gen(&pipe->dev_id) >= 6;
115 
116       fd_pipe_del(pipe);
117    }
118 
119    if (use_heap) {
120       dev->ring_heap = fd_bo_heap_new(dev, RING_FLAGS);
121       dev->default_heap = fd_bo_heap_new(dev, 0);
122    }
123 
124    return dev;
125 
126 fail:
127    fd_device_del(dev);
128    return NULL;
129 }
130 
131 /* like fd_device_new() but creates it's own private dup() of the fd
132  * which is close()d when the device is finalized.
133  */
134 struct fd_device *
fd_device_new_dup(int fd)135 fd_device_new_dup(int fd)
136 {
137    int dup_fd = os_dupfd_cloexec(fd);
138    struct fd_device *dev = fd_device_new(dup_fd);
139    if (dev)
140       dev->closefd = 1;
141    else
142       close(dup_fd);
143    return dev;
144 }
145 
146 /* Convenience helper to open the drm device and return new fd_device:
147  */
148 struct fd_device *
fd_device_open(void)149 fd_device_open(void)
150 {
151    int fd;
152 
153    fd = drmOpenWithType("msm", NULL, DRM_NODE_RENDER);
154 #ifdef HAVE_FREEDRENO_VIRTIO
155    if (fd < 0)
156       fd = drmOpenWithType("virtio_gpu", NULL, DRM_NODE_RENDER);
157 #endif
158    if (fd < 0)
159       return NULL;
160 
161    return fd_device_new(fd);
162 }
163 
164 struct fd_device *
fd_device_ref(struct fd_device * dev)165 fd_device_ref(struct fd_device *dev)
166 {
167    ref(&dev->refcnt);
168    return dev;
169 }
170 
171 void
fd_device_purge(struct fd_device * dev)172 fd_device_purge(struct fd_device *dev)
173 {
174    fd_bo_cache_cleanup(&dev->bo_cache, 0);
175    fd_bo_cache_cleanup(&dev->ring_cache, 0);
176 }
177 
178 void
fd_device_del(struct fd_device * dev)179 fd_device_del(struct fd_device *dev)
180 {
181    if (!unref(&dev->refcnt))
182       return;
183 
184    assert(list_is_empty(&dev->deferred_submits));
185    assert(!dev->deferred_submits_fence);
186 
187    if (dev->suballoc_bo)
188       fd_bo_del(dev->suballoc_bo);
189 
190    if (dev->ring_heap)
191       fd_bo_heap_destroy(dev->ring_heap);
192 
193    if (dev->default_heap)
194       fd_bo_heap_destroy(dev->default_heap);
195 
196    fd_bo_cache_cleanup(&dev->bo_cache, 0);
197    fd_bo_cache_cleanup(&dev->ring_cache, 0);
198 
199    /* Needs to be after bo cache cleanup in case backend has a
200     * util_vma_heap that it destroys:
201     */
202    dev->funcs->destroy(dev);
203 
204    _mesa_hash_table_destroy(dev->handle_table, NULL);
205    _mesa_hash_table_destroy(dev->name_table, NULL);
206 
207    if (fd_device_threaded_submit(dev))
208       util_queue_destroy(&dev->submit_queue);
209 
210    if (dev->closefd)
211       close(dev->fd);
212 
213    free(dev);
214 }
215 
216 int
fd_device_fd(struct fd_device * dev)217 fd_device_fd(struct fd_device *dev)
218 {
219    return dev->fd;
220 }
221 
222 enum fd_version
fd_device_version(struct fd_device * dev)223 fd_device_version(struct fd_device *dev)
224 {
225    return dev->version;
226 }
227 
228 DEBUG_GET_ONCE_BOOL_OPTION(libgl, "LIBGL_DEBUG", false)
229 
230 bool
fd_dbg(void)231 fd_dbg(void)
232 {
233    return debug_get_option_libgl();
234 }
235 
236 bool
fd_has_syncobj(struct fd_device * dev)237 fd_has_syncobj(struct fd_device *dev)
238 {
239    uint64_t value;
240    if (drmGetCap(dev->fd, DRM_CAP_SYNCOBJ, &value))
241       return false;
242    return value && dev->version >= FD_VERSION_FENCE_FD;
243 }
244