1 /*
2 * Copyright (C) 2014 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Christian Gmeiner <christian.gmeiner@gmail.com>
25 */
26
27 #include "etnaviv_priv.h"
28 #include "etnaviv_drmif.h"
29
30 drm_private pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
31 drm_private void bo_del(struct etna_bo *bo);
32
33 /* set buffer name, and add to table, call w/ table_lock held: */
set_name(struct etna_bo * bo,uint32_t name)34 static void set_name(struct etna_bo *bo, uint32_t name)
35 {
36 bo->name = name;
37 /* add ourself into the name table: */
38 drmHashInsert(bo->dev->name_table, name, bo);
39 }
40
41 /* Called under table_lock */
bo_del(struct etna_bo * bo)42 drm_private void bo_del(struct etna_bo *bo)
43 {
44 if (bo->map)
45 drm_munmap(bo->map, bo->size);
46
47 if (bo->name)
48 drmHashDelete(bo->dev->name_table, bo->name);
49
50 if (bo->handle) {
51 drmHashDelete(bo->dev->handle_table, bo->handle);
52 drmCloseBufferHandle(bo->dev->fd, bo->handle);
53 }
54
55 free(bo);
56 }
57
58 /* lookup a buffer from it's handle, call w/ table_lock held: */
lookup_bo(void * tbl,uint32_t handle)59 static struct etna_bo *lookup_bo(void *tbl, uint32_t handle)
60 {
61 struct etna_bo *bo = NULL;
62
63 if (!drmHashLookup(tbl, handle, (void **)&bo)) {
64 /* found, incr refcnt and return: */
65 bo = etna_bo_ref(bo);
66
67 /* don't break the bucket if this bo was found in one */
68 list_delinit(&bo->list);
69 }
70
71 return bo;
72 }
73
74 /* allocate a new buffer object, call w/ table_lock held */
bo_from_handle(struct etna_device * dev,uint32_t size,uint32_t handle,uint32_t flags)75 static struct etna_bo *bo_from_handle(struct etna_device *dev,
76 uint32_t size, uint32_t handle, uint32_t flags)
77 {
78 struct etna_bo *bo = calloc(sizeof(*bo), 1);
79
80 if (!bo) {
81 drmCloseBufferHandle(dev->fd, handle);
82 return NULL;
83 }
84
85 bo->dev = etna_device_ref(dev);
86 bo->size = size;
87 bo->handle = handle;
88 bo->flags = flags;
89 atomic_set(&bo->refcnt, 1);
90 list_inithead(&bo->list);
91 /* add ourselves to the handle table: */
92 drmHashInsert(dev->handle_table, handle, bo);
93
94 return bo;
95 }
96
97 /* allocate a new (un-tiled) buffer object */
etna_bo_new(struct etna_device * dev,uint32_t size,uint32_t flags)98 drm_public struct etna_bo *etna_bo_new(struct etna_device *dev, uint32_t size,
99 uint32_t flags)
100 {
101 struct etna_bo *bo;
102 int ret;
103 struct drm_etnaviv_gem_new req = {
104 .flags = flags,
105 };
106
107 bo = etna_bo_cache_alloc(&dev->bo_cache, &size, flags);
108 if (bo)
109 return bo;
110
111 req.size = size;
112 ret = drmCommandWriteRead(dev->fd, DRM_ETNAVIV_GEM_NEW,
113 &req, sizeof(req));
114 if (ret)
115 return NULL;
116
117 pthread_mutex_lock(&table_lock);
118 bo = bo_from_handle(dev, size, req.handle, flags);
119 bo->reuse = 1;
120 pthread_mutex_unlock(&table_lock);
121
122 return bo;
123 }
124
etna_bo_ref(struct etna_bo * bo)125 drm_public struct etna_bo *etna_bo_ref(struct etna_bo *bo)
126 {
127 atomic_inc(&bo->refcnt);
128
129 return bo;
130 }
131
132 /* get buffer info */
get_buffer_info(struct etna_bo * bo)133 static int get_buffer_info(struct etna_bo *bo)
134 {
135 int ret;
136 struct drm_etnaviv_gem_info req = {
137 .handle = bo->handle,
138 };
139
140 ret = drmCommandWriteRead(bo->dev->fd, DRM_ETNAVIV_GEM_INFO,
141 &req, sizeof(req));
142 if (ret) {
143 return ret;
144 }
145
146 /* really all we need for now is mmap offset */
147 bo->offset = req.offset;
148
149 return 0;
150 }
151
152 /* import a buffer object from DRI2 name */
etna_bo_from_name(struct etna_device * dev,uint32_t name)153 drm_public struct etna_bo *etna_bo_from_name(struct etna_device *dev,
154 uint32_t name)
155 {
156 struct etna_bo *bo;
157 struct drm_gem_open req = {
158 .name = name,
159 };
160
161 pthread_mutex_lock(&table_lock);
162
163 /* check name table first, to see if bo is already open: */
164 bo = lookup_bo(dev->name_table, name);
165 if (bo)
166 goto out_unlock;
167
168 if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
169 ERROR_MSG("gem-open failed: %s", strerror(errno));
170 goto out_unlock;
171 }
172
173 bo = lookup_bo(dev->handle_table, req.handle);
174 if (bo)
175 goto out_unlock;
176
177 bo = bo_from_handle(dev, req.size, req.handle, 0);
178 if (bo)
179 set_name(bo, name);
180
181 out_unlock:
182 pthread_mutex_unlock(&table_lock);
183
184 return bo;
185 }
186
187 /* import a buffer from dmabuf fd, does not take ownership of the
188 * fd so caller should close() the fd when it is otherwise done
189 * with it (even if it is still using the 'struct etna_bo *')
190 */
etna_bo_from_dmabuf(struct etna_device * dev,int fd)191 drm_public struct etna_bo *etna_bo_from_dmabuf(struct etna_device *dev, int fd)
192 {
193 struct etna_bo *bo;
194 int ret, size;
195 uint32_t handle;
196
197 /* take the lock before calling drmPrimeFDToHandle to avoid
198 * racing against etna_bo_del, which might invalidate the
199 * returned handle.
200 */
201 pthread_mutex_lock(&table_lock);
202
203 ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
204 if (ret) {
205 pthread_mutex_unlock(&table_lock);
206 return NULL;
207 }
208
209 bo = lookup_bo(dev->handle_table, handle);
210 if (bo)
211 goto out_unlock;
212
213 /* lseek() to get bo size */
214 size = lseek(fd, 0, SEEK_END);
215 lseek(fd, 0, SEEK_CUR);
216
217 bo = bo_from_handle(dev, size, handle, 0);
218
219 out_unlock:
220 pthread_mutex_unlock(&table_lock);
221
222 return bo;
223 }
224
225 /* destroy a buffer object */
etna_bo_del(struct etna_bo * bo)226 drm_public void etna_bo_del(struct etna_bo *bo)
227 {
228 struct etna_device *dev = bo->dev;
229
230 if (!bo)
231 return;
232
233 if (!atomic_dec_and_test(&bo->refcnt))
234 return;
235
236 pthread_mutex_lock(&table_lock);
237
238 if (bo->reuse && (etna_bo_cache_free(&dev->bo_cache, bo) == 0))
239 goto out;
240
241 bo_del(bo);
242 etna_device_del_locked(dev);
243 out:
244 pthread_mutex_unlock(&table_lock);
245 }
246
247 /* get the global flink/DRI2 buffer name */
etna_bo_get_name(struct etna_bo * bo,uint32_t * name)248 drm_public int etna_bo_get_name(struct etna_bo *bo, uint32_t *name)
249 {
250 if (!bo->name) {
251 struct drm_gem_flink req = {
252 .handle = bo->handle,
253 };
254 int ret;
255
256 ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
257 if (ret) {
258 return ret;
259 }
260
261 pthread_mutex_lock(&table_lock);
262 set_name(bo, req.name);
263 pthread_mutex_unlock(&table_lock);
264 bo->reuse = 0;
265 }
266
267 *name = bo->name;
268
269 return 0;
270 }
271
etna_bo_handle(struct etna_bo * bo)272 drm_public uint32_t etna_bo_handle(struct etna_bo *bo)
273 {
274 return bo->handle;
275 }
276
277 /* caller owns the dmabuf fd that is returned and is responsible
278 * to close() it when done
279 */
etna_bo_dmabuf(struct etna_bo * bo)280 drm_public int etna_bo_dmabuf(struct etna_bo *bo)
281 {
282 int ret, prime_fd;
283
284 ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
285 &prime_fd);
286 if (ret) {
287 ERROR_MSG("failed to get dmabuf fd: %d", ret);
288 return ret;
289 }
290
291 bo->reuse = 0;
292
293 return prime_fd;
294 }
295
etna_bo_size(struct etna_bo * bo)296 drm_public uint32_t etna_bo_size(struct etna_bo *bo)
297 {
298 return bo->size;
299 }
300
etna_bo_map(struct etna_bo * bo)301 drm_public void *etna_bo_map(struct etna_bo *bo)
302 {
303 if (!bo->map) {
304 if (!bo->offset) {
305 get_buffer_info(bo);
306 }
307
308 bo->map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
309 MAP_SHARED, bo->dev->fd, bo->offset);
310 if (bo->map == MAP_FAILED) {
311 ERROR_MSG("mmap failed: %s", strerror(errno));
312 bo->map = NULL;
313 }
314 }
315
316 return bo->map;
317 }
318
etna_bo_cpu_prep(struct etna_bo * bo,uint32_t op)319 drm_public int etna_bo_cpu_prep(struct etna_bo *bo, uint32_t op)
320 {
321 struct drm_etnaviv_gem_cpu_prep req = {
322 .handle = bo->handle,
323 .op = op,
324 };
325
326 get_abs_timeout(&req.timeout, 5000000000);
327
328 return drmCommandWrite(bo->dev->fd, DRM_ETNAVIV_GEM_CPU_PREP,
329 &req, sizeof(req));
330 }
331
etna_bo_cpu_fini(struct etna_bo * bo)332 drm_public void etna_bo_cpu_fini(struct etna_bo *bo)
333 {
334 struct drm_etnaviv_gem_cpu_fini req = {
335 .handle = bo->handle,
336 };
337
338 drmCommandWrite(bo->dev->fd, DRM_ETNAVIV_GEM_CPU_FINI,
339 &req, sizeof(req));
340 }
341