• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "os/os_mman.h"
28 
29 #include "freedreno_drmif.h"
30 #include "freedreno_priv.h"
31 
32 pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
33 void bo_del(struct fd_bo *bo);
34 
35 /* set buffer name, and add to table, call w/ table_lock held: */
set_name(struct fd_bo * bo,uint32_t name)36 static void set_name(struct fd_bo *bo, uint32_t name)
37 {
38 	bo->name = name;
39 	/* add ourself into the handle table: */
40 	_mesa_hash_table_insert(bo->dev->name_table, &bo->name, bo);
41 }
42 
43 /* lookup a buffer, call w/ table_lock held: */
lookup_bo(struct hash_table * tbl,uint32_t key)44 static struct fd_bo * lookup_bo(struct hash_table *tbl, uint32_t key)
45 {
46 	struct fd_bo *bo = NULL;
47 	struct hash_entry *entry = _mesa_hash_table_search(tbl, &key);
48 	if (entry) {
49 		/* found, incr refcnt and return: */
50 		bo = fd_bo_ref(entry->data);
51 
52 		/* don't break the bucket if this bo was found in one */
53 		list_delinit(&bo->list);
54 	}
55 	return bo;
56 }
57 
58 /* allocate a new buffer object, call w/ table_lock held */
bo_from_handle(struct fd_device * dev,uint32_t size,uint32_t handle)59 static struct fd_bo * bo_from_handle(struct fd_device *dev,
60 		uint32_t size, uint32_t handle)
61 {
62 	struct fd_bo *bo;
63 
64 	bo = dev->funcs->bo_from_handle(dev, size, handle);
65 	if (!bo) {
66 		struct drm_gem_close req = {
67 				.handle = handle,
68 		};
69 		drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
70 		return NULL;
71 	}
72 	bo->dev = dev;
73 	bo->size = size;
74 	bo->handle = handle;
75 	bo->iova = bo->funcs->iova(bo);
76 	bo->flags = FD_RELOC_FLAGS_INIT;
77 
78 	p_atomic_set(&bo->refcnt, 1);
79 	list_inithead(&bo->list);
80 	/* add ourself into the handle table: */
81 	_mesa_hash_table_insert(dev->handle_table, &bo->handle, bo);
82 	return bo;
83 }
84 
85 static struct fd_bo *
bo_new(struct fd_device * dev,uint32_t size,uint32_t flags,struct fd_bo_cache * cache)86 bo_new(struct fd_device *dev, uint32_t size, uint32_t flags,
87 		struct fd_bo_cache *cache)
88 {
89 	struct fd_bo *bo = NULL;
90 	uint32_t handle;
91 	int ret;
92 
93 	bo = fd_bo_cache_alloc(cache, &size, flags);
94 	if (bo)
95 		return bo;
96 
97 	ret = dev->funcs->bo_new_handle(dev, size, flags, &handle);
98 	if (ret)
99 		return NULL;
100 
101 	pthread_mutex_lock(&table_lock);
102 	bo = bo_from_handle(dev, size, handle);
103 	pthread_mutex_unlock(&table_lock);
104 
105 	VG_BO_ALLOC(bo);
106 
107 	return bo;
108 }
109 
110 struct fd_bo *
_fd_bo_new(struct fd_device * dev,uint32_t size,uint32_t flags)111 _fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
112 {
113 	struct fd_bo *bo = bo_new(dev, size, flags, &dev->bo_cache);
114 	if (bo)
115 		bo->bo_reuse = BO_CACHE;
116 	return bo;
117 }
118 
119 void
_fd_bo_set_name(struct fd_bo * bo,const char * fmt,va_list ap)120 _fd_bo_set_name(struct fd_bo *bo, const char *fmt, va_list ap)
121 {
122 	bo->funcs->set_name(bo, fmt, ap);
123 }
124 
125 /* internal function to allocate bo's that use the ringbuffer cache
126  * instead of the normal bo_cache.  The purpose is, because cmdstream
127  * bo's get vmap'd on the kernel side, and that is expensive, we want
128  * to re-use cmdstream bo's for cmdstream and not unrelated purposes.
129  */
130 struct fd_bo *
fd_bo_new_ring(struct fd_device * dev,uint32_t size)131 fd_bo_new_ring(struct fd_device *dev, uint32_t size)
132 {
133 	uint32_t flags = DRM_FREEDRENO_GEM_GPUREADONLY;
134 	struct fd_bo *bo = bo_new(dev, size, flags, &dev->ring_cache);
135 	if (bo) {
136 		bo->bo_reuse = RING_CACHE;
137 		bo->flags |= FD_RELOC_DUMP;
138 		fd_bo_set_name(bo, "cmdstream");
139 	}
140 	return bo;
141 }
142 
143 struct fd_bo *
fd_bo_from_handle(struct fd_device * dev,uint32_t handle,uint32_t size)144 fd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size)
145 {
146 	struct fd_bo *bo = NULL;
147 
148 	pthread_mutex_lock(&table_lock);
149 
150 	bo = lookup_bo(dev->handle_table, handle);
151 	if (bo)
152 		goto out_unlock;
153 
154 	bo = bo_from_handle(dev, size, handle);
155 
156 	VG_BO_ALLOC(bo);
157 
158 out_unlock:
159 	pthread_mutex_unlock(&table_lock);
160 
161 	return bo;
162 }
163 
164 struct fd_bo *
fd_bo_from_dmabuf(struct fd_device * dev,int fd)165 fd_bo_from_dmabuf(struct fd_device *dev, int fd)
166 {
167 	int ret, size;
168 	uint32_t handle;
169 	struct fd_bo *bo;
170 
171 	pthread_mutex_lock(&table_lock);
172 	ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
173 	if (ret) {
174 		pthread_mutex_unlock(&table_lock);
175 		return NULL;
176 	}
177 
178 	bo = lookup_bo(dev->handle_table, handle);
179 	if (bo)
180 		goto out_unlock;
181 
182 	/* lseek() to get bo size */
183 	size = lseek(fd, 0, SEEK_END);
184 	lseek(fd, 0, SEEK_CUR);
185 
186 	bo = bo_from_handle(dev, size, handle);
187 
188 	VG_BO_ALLOC(bo);
189 
190 out_unlock:
191 	pthread_mutex_unlock(&table_lock);
192 
193 	return bo;
194 }
195 
fd_bo_from_name(struct fd_device * dev,uint32_t name)196 struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name)
197 {
198 	struct drm_gem_open req = {
199 			.name = name,
200 	};
201 	struct fd_bo *bo;
202 
203 	pthread_mutex_lock(&table_lock);
204 
205 	/* check name table first, to see if bo is already open: */
206 	bo = lookup_bo(dev->name_table, name);
207 	if (bo)
208 		goto out_unlock;
209 
210 	if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
211 		ERROR_MSG("gem-open failed: %s", strerror(errno));
212 		goto out_unlock;
213 	}
214 
215 	bo = lookup_bo(dev->handle_table, req.handle);
216 	if (bo)
217 		goto out_unlock;
218 
219 	bo = bo_from_handle(dev, req.size, req.handle);
220 	if (bo) {
221 		set_name(bo, name);
222 		VG_BO_ALLOC(bo);
223 	}
224 
225 out_unlock:
226 	pthread_mutex_unlock(&table_lock);
227 
228 	return bo;
229 }
230 
231 void
fd_bo_mark_for_dump(struct fd_bo * bo)232 fd_bo_mark_for_dump(struct fd_bo *bo)
233 {
234 	bo->flags |= FD_RELOC_DUMP;
235 }
236 
fd_bo_get_iova(struct fd_bo * bo)237 uint64_t fd_bo_get_iova(struct fd_bo *bo)
238 {
239 	/* ancient kernels did not support this */
240 	assert(bo->iova != 0);
241 	return bo->iova;
242 }
243 
fd_bo_ref(struct fd_bo * bo)244 struct fd_bo * fd_bo_ref(struct fd_bo *bo)
245 {
246 	p_atomic_inc(&bo->refcnt);
247 	return bo;
248 }
249 
fd_bo_del(struct fd_bo * bo)250 void fd_bo_del(struct fd_bo *bo)
251 {
252 	struct fd_device *dev = bo->dev;
253 
254 	if (!atomic_dec_and_test(&bo->refcnt))
255 		return;
256 
257 	pthread_mutex_lock(&table_lock);
258 
259 	if ((bo->bo_reuse == BO_CACHE) && (fd_bo_cache_free(&dev->bo_cache, bo) == 0))
260 		goto out;
261 	if ((bo->bo_reuse == RING_CACHE) && (fd_bo_cache_free(&dev->ring_cache, bo) == 0))
262 		goto out;
263 
264 	bo_del(bo);
265 
266 out:
267 	pthread_mutex_unlock(&table_lock);
268 }
269 
270 /* Called under table_lock */
bo_del(struct fd_bo * bo)271 void bo_del(struct fd_bo *bo)
272 {
273 	VG_BO_FREE(bo);
274 
275 	if (bo->map)
276 		os_munmap(bo->map, bo->size);
277 
278 	/* TODO probably bo's in bucket list get removed from
279 	 * handle table??
280 	 */
281 
282 	if (bo->handle) {
283 		struct drm_gem_close req = {
284 				.handle = bo->handle,
285 		};
286 		_mesa_hash_table_remove_key(bo->dev->handle_table, &bo->handle);
287 		if (bo->name)
288 			_mesa_hash_table_remove_key(bo->dev->name_table, &bo->name);
289 		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
290 	}
291 
292 	bo->funcs->destroy(bo);
293 }
294 
fd_bo_get_name(struct fd_bo * bo,uint32_t * name)295 int fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
296 {
297 	if (!bo->name) {
298 		struct drm_gem_flink req = {
299 				.handle = bo->handle,
300 		};
301 		int ret;
302 
303 		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
304 		if (ret) {
305 			return ret;
306 		}
307 
308 		pthread_mutex_lock(&table_lock);
309 		set_name(bo, req.name);
310 		pthread_mutex_unlock(&table_lock);
311 		bo->bo_reuse = NO_CACHE;
312 	}
313 
314 	*name = bo->name;
315 
316 	return 0;
317 }
318 
fd_bo_handle(struct fd_bo * bo)319 uint32_t fd_bo_handle(struct fd_bo *bo)
320 {
321 	bo->bo_reuse = NO_CACHE;
322 	return bo->handle;
323 }
324 
fd_bo_dmabuf(struct fd_bo * bo)325 int fd_bo_dmabuf(struct fd_bo *bo)
326 {
327 	int ret, prime_fd;
328 
329 	ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
330 			&prime_fd);
331 	if (ret) {
332 		ERROR_MSG("failed to get dmabuf fd: %d", ret);
333 		return ret;
334 	}
335 
336 	bo->bo_reuse = NO_CACHE;
337 
338 	return prime_fd;
339 }
340 
fd_bo_size(struct fd_bo * bo)341 uint32_t fd_bo_size(struct fd_bo *bo)
342 {
343 	return bo->size;
344 }
345 
fd_bo_map(struct fd_bo * bo)346 void * fd_bo_map(struct fd_bo *bo)
347 {
348 	if (!bo->map) {
349 		uint64_t offset;
350 		int ret;
351 
352 		ret = bo->funcs->offset(bo, &offset);
353 		if (ret) {
354 			return NULL;
355 		}
356 
357 		bo->map = os_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
358 				bo->dev->fd, offset);
359 		if (bo->map == MAP_FAILED) {
360 			ERROR_MSG("mmap failed: %s", strerror(errno));
361 			bo->map = NULL;
362 		}
363 	}
364 	return bo->map;
365 }
366 
367 /* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */
fd_bo_cpu_prep(struct fd_bo * bo,struct fd_pipe * pipe,uint32_t op)368 int fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
369 {
370 	return bo->funcs->cpu_prep(bo, pipe, op);
371 }
372 
fd_bo_cpu_fini(struct fd_bo * bo)373 void fd_bo_cpu_fini(struct fd_bo *bo)
374 {
375 	bo->funcs->cpu_fini(bo);
376 }
377