• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2 
3 /*
4  * Copyright (C) 2011 Texas Instruments, Inc
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23  * SOFTWARE.
24  *
25  * Authors:
26  *    Rob Clark <rob@ti.com>
27  */
28 
29 #ifdef HAVE_CONFIG_H
30 #include "config.h"
31 #endif
32 
33 #include <stdlib.h>
34 #include <linux/stddef.h>
35 #include <linux/types.h>
36 #include <errno.h>
37 #include <sys/mman.h>
38 #include <fcntl.h>
39 #include <unistd.h>
40 #include <pthread.h>
41 
42 #include <libdrm_macros.h>
43 #include <xf86drm.h>
44 #include <xf86atomic.h>
45 
46 #include "omap_drm.h"
47 #include "omap_drmif.h"
48 
49 #define __round_mask(x, y) ((__typeof__(x))((y)-1))
50 #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
51 #define PAGE_SIZE 4096
52 
53 static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
54 static void * dev_table;
55 
56 struct omap_device {
57 	int fd;
58 	atomic_t refcnt;
59 
60 	/* The handle_table is used to track GEM bo handles associated w/
61 	 * this fd.  This is needed, in particular, when importing
62 	 * dmabuf's because we don't want multiple 'struct omap_bo's
63 	 * floating around with the same handle.  Otherwise, when the
64 	 * first one is omap_bo_del()'d the handle becomes no longer
65 	 * valid, and the remaining 'struct omap_bo's are left pointing
66 	 * to an invalid handle (and possible a GEM bo that is already
67 	 * free'd).
68 	 */
69 	void *handle_table;
70 };
71 
72 /* a GEM buffer object allocated from the DRM device */
73 struct omap_bo {
74 	struct omap_device	*dev;
75 	void		*map;		/* userspace mmap'ing (if there is one) */
76 	uint32_t	size;
77 	uint32_t	handle;
78 	uint32_t	name;		/* flink global handle (DRI2 name) */
79 	uint64_t	offset;		/* offset to mmap() */
80 	int		fd;		/* dmabuf handle */
81 	atomic_t	refcnt;
82 };
83 
omap_device_new_impl(int fd)84 static struct omap_device * omap_device_new_impl(int fd)
85 {
86 	struct omap_device *dev = calloc(sizeof(*dev), 1);
87 	if (!dev)
88 		return NULL;
89 	dev->fd = fd;
90 	atomic_set(&dev->refcnt, 1);
91 	dev->handle_table = drmHashCreate();
92 	return dev;
93 }
94 
omap_device_new(int fd)95 struct omap_device * omap_device_new(int fd)
96 {
97 	struct omap_device *dev = NULL;
98 
99 	pthread_mutex_lock(&table_lock);
100 
101 	if (!dev_table)
102 		dev_table = drmHashCreate();
103 
104 	if (drmHashLookup(dev_table, fd, (void **)&dev)) {
105 		/* not found, create new device */
106 		dev = omap_device_new_impl(fd);
107 		drmHashInsert(dev_table, fd, dev);
108 	} else {
109 		/* found, just incr refcnt */
110 		dev = omap_device_ref(dev);
111 	}
112 
113 	pthread_mutex_unlock(&table_lock);
114 
115 	return dev;
116 }
117 
omap_device_ref(struct omap_device * dev)118 struct omap_device * omap_device_ref(struct omap_device *dev)
119 {
120 	atomic_inc(&dev->refcnt);
121 	return dev;
122 }
123 
omap_device_del(struct omap_device * dev)124 void omap_device_del(struct omap_device *dev)
125 {
126 	if (!atomic_dec_and_test(&dev->refcnt))
127 		return;
128 	pthread_mutex_lock(&table_lock);
129 	drmHashDestroy(dev->handle_table);
130 	drmHashDelete(dev_table, dev->fd);
131 	pthread_mutex_unlock(&table_lock);
132 	free(dev);
133 }
134 
135 int
omap_get_param(struct omap_device * dev,uint64_t param,uint64_t * value)136 omap_get_param(struct omap_device *dev, uint64_t param, uint64_t *value)
137 {
138 	struct drm_omap_param req = {
139 			.param = param,
140 	};
141 	int ret;
142 
143 	ret = drmCommandWriteRead(dev->fd, DRM_OMAP_GET_PARAM, &req, sizeof(req));
144 	if (ret) {
145 		return ret;
146 	}
147 
148 	*value = req.value;
149 
150 	return 0;
151 }
152 
153 int
omap_set_param(struct omap_device * dev,uint64_t param,uint64_t value)154 omap_set_param(struct omap_device *dev, uint64_t param, uint64_t value)
155 {
156 	struct drm_omap_param req = {
157 			.param = param,
158 			.value = value,
159 	};
160 	return drmCommandWrite(dev->fd, DRM_OMAP_SET_PARAM, &req, sizeof(req));
161 }
162 
163 /* lookup a buffer from it's handle, call w/ table_lock held: */
lookup_bo(struct omap_device * dev,uint32_t handle)164 static struct omap_bo * lookup_bo(struct omap_device *dev,
165 		uint32_t handle)
166 {
167 	struct omap_bo *bo = NULL;
168 	if (!drmHashLookup(dev->handle_table, handle, (void **)&bo)) {
169 		/* found, incr refcnt and return: */
170 		bo = omap_bo_ref(bo);
171 	}
172 	return bo;
173 }
174 
175 /* allocate a new buffer object, call w/ table_lock held */
bo_from_handle(struct omap_device * dev,uint32_t handle)176 static struct omap_bo * bo_from_handle(struct omap_device *dev,
177 		uint32_t handle)
178 {
179 	struct omap_bo *bo = calloc(sizeof(*bo), 1);
180 	if (!bo) {
181 		struct drm_gem_close req = {
182 				.handle = handle,
183 		};
184 		drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
185 		return NULL;
186 	}
187 	bo->dev = omap_device_ref(dev);
188 	bo->handle = handle;
189 	bo->fd = -1;
190 	atomic_set(&bo->refcnt, 1);
191 	/* add ourselves to the handle table: */
192 	drmHashInsert(dev->handle_table, handle, bo);
193 	return bo;
194 }
195 
196 /* allocate a new buffer object */
omap_bo_new_impl(struct omap_device * dev,union omap_gem_size size,uint32_t flags)197 static struct omap_bo * omap_bo_new_impl(struct omap_device *dev,
198 		union omap_gem_size size, uint32_t flags)
199 {
200 	struct omap_bo *bo = NULL;
201 	struct drm_omap_gem_new req = {
202 			.size = size,
203 			.flags = flags,
204 	};
205 
206 	if (size.bytes == 0) {
207 		goto fail;
208 	}
209 
210 	if (drmCommandWriteRead(dev->fd, DRM_OMAP_GEM_NEW, &req, sizeof(req))) {
211 		goto fail;
212 	}
213 
214 	pthread_mutex_lock(&table_lock);
215 	bo = bo_from_handle(dev, req.handle);
216 	pthread_mutex_unlock(&table_lock);
217 
218 	if (flags & OMAP_BO_TILED) {
219 		bo->size = round_up(size.tiled.width, PAGE_SIZE) * size.tiled.height;
220 	} else {
221 		bo->size = size.bytes;
222 	}
223 
224 	return bo;
225 
226 fail:
227 	free(bo);
228 	return NULL;
229 }
230 
231 
232 /* allocate a new (un-tiled) buffer object */
233 struct omap_bo *
omap_bo_new(struct omap_device * dev,uint32_t size,uint32_t flags)234 omap_bo_new(struct omap_device *dev, uint32_t size, uint32_t flags)
235 {
236 	union omap_gem_size gsize = {
237 			.bytes = size,
238 	};
239 	if (flags & OMAP_BO_TILED) {
240 		return NULL;
241 	}
242 	return omap_bo_new_impl(dev, gsize, flags);
243 }
244 
245 /* allocate a new buffer object */
246 struct omap_bo *
omap_bo_new_tiled(struct omap_device * dev,uint32_t width,uint32_t height,uint32_t flags)247 omap_bo_new_tiled(struct omap_device *dev, uint32_t width,
248 		  uint32_t height, uint32_t flags)
249 {
250 	union omap_gem_size gsize = {
251 			.tiled = {
252 				.width = width,
253 				.height = height,
254 			},
255 	};
256 	if (!(flags & OMAP_BO_TILED)) {
257 		return NULL;
258 	}
259 	return omap_bo_new_impl(dev, gsize, flags);
260 }
261 
omap_bo_ref(struct omap_bo * bo)262 struct omap_bo *omap_bo_ref(struct omap_bo *bo)
263 {
264 	atomic_inc(&bo->refcnt);
265 	return bo;
266 }
267 
268 /* get buffer info */
get_buffer_info(struct omap_bo * bo)269 static int get_buffer_info(struct omap_bo *bo)
270 {
271 	struct drm_omap_gem_info req = {
272 			.handle = bo->handle,
273 	};
274 	int ret = drmCommandWriteRead(bo->dev->fd, DRM_OMAP_GEM_INFO,
275 			&req, sizeof(req));
276 	if (ret) {
277 		return ret;
278 	}
279 
280 	/* really all we need for now is mmap offset */
281 	bo->offset = req.offset;
282 	bo->size = req.size;
283 
284 	return 0;
285 }
286 
287 /* import a buffer object from DRI2 name */
288 struct omap_bo *
omap_bo_from_name(struct omap_device * dev,uint32_t name)289 omap_bo_from_name(struct omap_device *dev, uint32_t name)
290 {
291 	struct omap_bo *bo = NULL;
292 	struct drm_gem_open req = {
293 			.name = name,
294 	};
295 
296 	pthread_mutex_lock(&table_lock);
297 
298 	if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
299 		goto fail;
300 	}
301 
302 	bo = lookup_bo(dev, req.handle);
303 	if (!bo) {
304 		bo = bo_from_handle(dev, req.handle);
305 		bo->name = name;
306 	}
307 
308 	pthread_mutex_unlock(&table_lock);
309 
310 	return bo;
311 
312 fail:
313 	pthread_mutex_unlock(&table_lock);
314 	free(bo);
315 	return NULL;
316 }
317 
318 /* import a buffer from dmabuf fd, does not take ownership of the
319  * fd so caller should close() the fd when it is otherwise done
320  * with it (even if it is still using the 'struct omap_bo *')
321  */
322 struct omap_bo *
omap_bo_from_dmabuf(struct omap_device * dev,int fd)323 omap_bo_from_dmabuf(struct omap_device *dev, int fd)
324 {
325 	struct omap_bo *bo = NULL;
326 	struct drm_prime_handle req = {
327 			.fd = fd,
328 	};
329 	int ret;
330 
331 	pthread_mutex_lock(&table_lock);
332 
333 	ret = drmIoctl(dev->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &req);
334 	if (ret) {
335 		goto fail;
336 	}
337 
338 	bo = lookup_bo(dev, req.handle);
339 	if (!bo) {
340 		bo = bo_from_handle(dev, req.handle);
341 	}
342 
343 	pthread_mutex_unlock(&table_lock);
344 
345 	return bo;
346 
347 fail:
348 	pthread_mutex_unlock(&table_lock);
349 	free(bo);
350 	return NULL;
351 }
352 
353 /* destroy a buffer object */
omap_bo_del(struct omap_bo * bo)354 void omap_bo_del(struct omap_bo *bo)
355 {
356 	if (!bo) {
357 		return;
358 	}
359 
360 	if (!atomic_dec_and_test(&bo->refcnt))
361 		return;
362 
363 	if (bo->map) {
364 		munmap(bo->map, bo->size);
365 	}
366 
367 	if (bo->fd >= 0) {
368 		close(bo->fd);
369 	}
370 
371 	if (bo->handle) {
372 		struct drm_gem_close req = {
373 				.handle = bo->handle,
374 		};
375 		pthread_mutex_lock(&table_lock);
376 		drmHashDelete(bo->dev->handle_table, bo->handle);
377 		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
378 		pthread_mutex_unlock(&table_lock);
379 	}
380 
381 	omap_device_del(bo->dev);
382 
383 	free(bo);
384 }
385 
386 /* get the global flink/DRI2 buffer name */
omap_bo_get_name(struct omap_bo * bo,uint32_t * name)387 int omap_bo_get_name(struct omap_bo *bo, uint32_t *name)
388 {
389 	if (!bo->name) {
390 		struct drm_gem_flink req = {
391 				.handle = bo->handle,
392 		};
393 		int ret;
394 
395 		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
396 		if (ret) {
397 			return ret;
398 		}
399 
400 		bo->name = req.name;
401 	}
402 
403 	*name = bo->name;
404 
405 	return 0;
406 }
407 
omap_bo_handle(struct omap_bo * bo)408 uint32_t omap_bo_handle(struct omap_bo *bo)
409 {
410 	return bo->handle;
411 }
412 
413 /* caller owns the dmabuf fd that is returned and is responsible
414  * to close() it when done
415  */
omap_bo_dmabuf(struct omap_bo * bo)416 int omap_bo_dmabuf(struct omap_bo *bo)
417 {
418 	if (bo->fd < 0) {
419 		struct drm_prime_handle req = {
420 				.handle = bo->handle,
421 				.flags = DRM_CLOEXEC,
422 		};
423 		int ret;
424 
425 		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &req);
426 		if (ret) {
427 			return ret;
428 		}
429 
430 		bo->fd = req.fd;
431 	}
432 	return dup(bo->fd);
433 }
434 
omap_bo_size(struct omap_bo * bo)435 uint32_t omap_bo_size(struct omap_bo *bo)
436 {
437 	if (!bo->size) {
438 		get_buffer_info(bo);
439 	}
440 	return bo->size;
441 }
442 
omap_bo_map(struct omap_bo * bo)443 void *omap_bo_map(struct omap_bo *bo)
444 {
445 	if (!bo->map) {
446 		if (!bo->offset) {
447 			get_buffer_info(bo);
448 		}
449 
450 		bo->map = mmap(0, bo->size, PROT_READ | PROT_WRITE,
451 				MAP_SHARED, bo->dev->fd, bo->offset);
452 		if (bo->map == MAP_FAILED) {
453 			bo->map = NULL;
454 		}
455 	}
456 	return bo->map;
457 }
458 
omap_bo_cpu_prep(struct omap_bo * bo,enum omap_gem_op op)459 int omap_bo_cpu_prep(struct omap_bo *bo, enum omap_gem_op op)
460 {
461 	struct drm_omap_gem_cpu_prep req = {
462 			.handle = bo->handle,
463 			.op = op,
464 	};
465 	return drmCommandWrite(bo->dev->fd,
466 			DRM_OMAP_GEM_CPU_PREP, &req, sizeof(req));
467 }
468 
omap_bo_cpu_fini(struct omap_bo * bo,enum omap_gem_op op)469 int omap_bo_cpu_fini(struct omap_bo *bo, enum omap_gem_op op)
470 {
471 	struct drm_omap_gem_cpu_fini req = {
472 			.handle = bo->handle,
473 			.op = op,
474 			.nregions = 0,
475 	};
476 	return drmCommandWrite(bo->dev->fd,
477 			DRM_OMAP_GEM_CPU_FINI, &req, sizeof(req));
478 }
479