• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2012 Red Hat
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Dave Airlie <airlied@redhat.com>
25  *      Rob Clark <rob.clark@linaro.org>
26  *
27  */
28 
29 #include <linux/export.h>
30 #include <linux/dma-buf.h>
31 #include <drm/drmP.h>
32 #include <drm/drm_gem.h>
33 
34 #include "drm_internal.h"
35 
36 /*
37  * DMA-BUF/GEM Object references and lifetime overview:
38  *
39  * On the export the dma_buf holds a reference to the exporting GEM
40  * object. It takes this reference in handle_to_fd_ioctl, when it
41  * first calls .prime_export and stores the exporting GEM object in
42  * the dma_buf priv. This reference is released when the dma_buf
43  * object goes away in the driver .release function.
44  *
45  * On the import the importing GEM object holds a reference to the
46  * dma_buf (which in turn holds a ref to the exporting GEM object).
47  * It takes that reference in the fd_to_handle ioctl.
48  * It calls dma_buf_get, creates an attachment to it and stores the
49  * attachment in the GEM object. When this attachment is destroyed
50  * when the imported object is destroyed, we remove the attachment
51  * and drop the reference to the dma_buf.
52  *
53  * Thus the chain of references always flows in one direction
54  * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
55  *
56  * Self-importing: if userspace is using PRIME as a replacement for flink
57  * then it will get a fd->handle request for a GEM object that it created.
58  * Drivers should detect this situation and return back the gem object
59  * from the dma-buf private.  Prime will do this automatically for drivers that
60  * use the drm_gem_prime_{import,export} helpers.
61  */
62 
63 struct drm_prime_member {
64 	struct list_head entry;
65 	struct dma_buf *dma_buf;
66 	uint32_t handle;
67 };
68 
69 struct drm_prime_attachment {
70 	struct sg_table *sgt;
71 	enum dma_data_direction dir;
72 };
73 
drm_prime_add_buf_handle(struct drm_prime_file_private * prime_fpriv,struct dma_buf * dma_buf,uint32_t handle)74 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
75 				    struct dma_buf *dma_buf, uint32_t handle)
76 {
77 	struct drm_prime_member *member;
78 
79 	member = kmalloc(sizeof(*member), GFP_KERNEL);
80 	if (!member)
81 		return -ENOMEM;
82 
83 	get_dma_buf(dma_buf);
84 	member->dma_buf = dma_buf;
85 	member->handle = handle;
86 	list_add(&member->entry, &prime_fpriv->head);
87 	return 0;
88 }
89 
drm_prime_lookup_buf_by_handle(struct drm_prime_file_private * prime_fpriv,uint32_t handle)90 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
91 						      uint32_t handle)
92 {
93 	struct drm_prime_member *member;
94 
95 	list_for_each_entry(member, &prime_fpriv->head, entry) {
96 		if (member->handle == handle)
97 			return member->dma_buf;
98 	}
99 
100 	return NULL;
101 }
102 
drm_prime_lookup_buf_handle(struct drm_prime_file_private * prime_fpriv,struct dma_buf * dma_buf,uint32_t * handle)103 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
104 				       struct dma_buf *dma_buf,
105 				       uint32_t *handle)
106 {
107 	struct drm_prime_member *member;
108 
109 	list_for_each_entry(member, &prime_fpriv->head, entry) {
110 		if (member->dma_buf == dma_buf) {
111 			*handle = member->handle;
112 			return 0;
113 		}
114 	}
115 	return -ENOENT;
116 }
117 
drm_gem_map_attach(struct dma_buf * dma_buf,struct device * target_dev,struct dma_buf_attachment * attach)118 static int drm_gem_map_attach(struct dma_buf *dma_buf,
119 			      struct device *target_dev,
120 			      struct dma_buf_attachment *attach)
121 {
122 	struct drm_prime_attachment *prime_attach;
123 	struct drm_gem_object *obj = dma_buf->priv;
124 	struct drm_device *dev = obj->dev;
125 
126 	prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
127 	if (!prime_attach)
128 		return -ENOMEM;
129 
130 	prime_attach->dir = DMA_NONE;
131 	attach->priv = prime_attach;
132 
133 	if (!dev->driver->gem_prime_pin)
134 		return 0;
135 
136 	return dev->driver->gem_prime_pin(obj);
137 }
138 
drm_gem_map_detach(struct dma_buf * dma_buf,struct dma_buf_attachment * attach)139 static void drm_gem_map_detach(struct dma_buf *dma_buf,
140 			       struct dma_buf_attachment *attach)
141 {
142 	struct drm_prime_attachment *prime_attach = attach->priv;
143 	struct drm_gem_object *obj = dma_buf->priv;
144 	struct drm_device *dev = obj->dev;
145 	struct sg_table *sgt;
146 
147 	if (dev->driver->gem_prime_unpin)
148 		dev->driver->gem_prime_unpin(obj);
149 
150 	if (!prime_attach)
151 		return;
152 
153 	sgt = prime_attach->sgt;
154 	if (sgt) {
155 		if (prime_attach->dir != DMA_NONE)
156 			dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
157 					prime_attach->dir);
158 		sg_free_table(sgt);
159 	}
160 
161 	kfree(sgt);
162 	kfree(prime_attach);
163 	attach->priv = NULL;
164 }
165 
drm_prime_remove_buf_handle_locked(struct drm_prime_file_private * prime_fpriv,struct dma_buf * dma_buf)166 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
167 					struct dma_buf *dma_buf)
168 {
169 	struct drm_prime_member *member, *safe;
170 
171 	list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
172 		if (member->dma_buf == dma_buf) {
173 			dma_buf_put(dma_buf);
174 			list_del(&member->entry);
175 			kfree(member);
176 		}
177 	}
178 }
179 
drm_gem_map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction dir)180 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
181 					    enum dma_data_direction dir)
182 {
183 	struct drm_prime_attachment *prime_attach = attach->priv;
184 	struct drm_gem_object *obj = attach->dmabuf->priv;
185 	struct sg_table *sgt;
186 
187 	if (WARN_ON(dir == DMA_NONE || !prime_attach))
188 		return ERR_PTR(-EINVAL);
189 
190 	/* return the cached mapping when possible */
191 	if (prime_attach->dir == dir)
192 		return prime_attach->sgt;
193 
194 	/*
195 	 * two mappings with different directions for the same attachment are
196 	 * not allowed
197 	 */
198 	if (WARN_ON(prime_attach->dir != DMA_NONE))
199 		return ERR_PTR(-EBUSY);
200 
201 	sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
202 
203 	if (!IS_ERR(sgt)) {
204 		if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
205 			sg_free_table(sgt);
206 			kfree(sgt);
207 			sgt = ERR_PTR(-ENOMEM);
208 		} else {
209 			prime_attach->sgt = sgt;
210 			prime_attach->dir = dir;
211 		}
212 	}
213 
214 	return sgt;
215 }
216 
drm_gem_unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)217 static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
218 				  struct sg_table *sgt,
219 				  enum dma_data_direction dir)
220 {
221 	/* nothing to be done here */
222 }
223 
224 /**
225  * drm_gem_dmabuf_release - dma_buf release implementation for GEM
226  * @dma_buf: buffer to be released
227  *
228  * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
229  * must use this in their dma_buf ops structure as the release callback.
230  */
drm_gem_dmabuf_release(struct dma_buf * dma_buf)231 void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
232 {
233 	struct drm_gem_object *obj = dma_buf->priv;
234 
235 	/* drop the reference on the export fd holds */
236 	drm_gem_object_unreference_unlocked(obj);
237 }
238 EXPORT_SYMBOL(drm_gem_dmabuf_release);
239 
drm_gem_dmabuf_vmap(struct dma_buf * dma_buf)240 static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
241 {
242 	struct drm_gem_object *obj = dma_buf->priv;
243 	struct drm_device *dev = obj->dev;
244 
245 	return dev->driver->gem_prime_vmap(obj);
246 }
247 
drm_gem_dmabuf_vunmap(struct dma_buf * dma_buf,void * vaddr)248 static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
249 {
250 	struct drm_gem_object *obj = dma_buf->priv;
251 	struct drm_device *dev = obj->dev;
252 
253 	dev->driver->gem_prime_vunmap(obj, vaddr);
254 }
255 
drm_gem_dmabuf_kmap_atomic(struct dma_buf * dma_buf,unsigned long page_num)256 static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
257 					unsigned long page_num)
258 {
259 	return NULL;
260 }
261 
drm_gem_dmabuf_kunmap_atomic(struct dma_buf * dma_buf,unsigned long page_num,void * addr)262 static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
263 					 unsigned long page_num, void *addr)
264 {
265 
266 }
drm_gem_dmabuf_kmap(struct dma_buf * dma_buf,unsigned long page_num)267 static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
268 				 unsigned long page_num)
269 {
270 	return NULL;
271 }
272 
drm_gem_dmabuf_kunmap(struct dma_buf * dma_buf,unsigned long page_num,void * addr)273 static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
274 				  unsigned long page_num, void *addr)
275 {
276 
277 }
278 
drm_gem_dmabuf_mmap(struct dma_buf * dma_buf,struct vm_area_struct * vma)279 static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
280 			       struct vm_area_struct *vma)
281 {
282 	struct drm_gem_object *obj = dma_buf->priv;
283 	struct drm_device *dev = obj->dev;
284 
285 	if (!dev->driver->gem_prime_mmap)
286 		return -ENOSYS;
287 
288 	return dev->driver->gem_prime_mmap(obj, vma);
289 }
290 
291 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
292 	.attach = drm_gem_map_attach,
293 	.detach = drm_gem_map_detach,
294 	.map_dma_buf = drm_gem_map_dma_buf,
295 	.unmap_dma_buf = drm_gem_unmap_dma_buf,
296 	.release = drm_gem_dmabuf_release,
297 	.kmap = drm_gem_dmabuf_kmap,
298 	.kmap_atomic = drm_gem_dmabuf_kmap_atomic,
299 	.kunmap = drm_gem_dmabuf_kunmap,
300 	.kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
301 	.mmap = drm_gem_dmabuf_mmap,
302 	.vmap = drm_gem_dmabuf_vmap,
303 	.vunmap = drm_gem_dmabuf_vunmap,
304 };
305 
306 /**
307  * DOC: PRIME Helpers
308  *
309  * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
310  * simpler APIs by using the helper functions @drm_gem_prime_export and
311  * @drm_gem_prime_import.  These functions implement dma-buf support in terms of
312  * six lower-level driver callbacks:
313  *
314  * Export callbacks:
315  *
316  *  - @gem_prime_pin (optional): prepare a GEM object for exporting
317  *
318  *  - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
319  *
320  *  - @gem_prime_vmap: vmap a buffer exported by your driver
321  *
322  *  - @gem_prime_vunmap: vunmap a buffer exported by your driver
323  *
324  *  - @gem_prime_mmap (optional): mmap a buffer exported by your driver
325  *
326  * Import callback:
327  *
328  *  - @gem_prime_import_sg_table (import): produce a GEM object from another
329  *    driver's scatter/gather table
330  */
331 
332 /**
333  * drm_gem_prime_export - helper library implementation of the export callback
334  * @dev: drm_device to export from
335  * @obj: GEM object to export
336  * @flags: flags like DRM_CLOEXEC
337  *
338  * This is the implementation of the gem_prime_export functions for GEM drivers
339  * using the PRIME helpers.
340  */
drm_gem_prime_export(struct drm_device * dev,struct drm_gem_object * obj,int flags)341 struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
342 				     struct drm_gem_object *obj,
343 				     int flags)
344 {
345 	struct dma_buf_export_info exp_info = {
346 		.exp_name = KBUILD_MODNAME, /* white lie for debug */
347 		.owner = dev->driver->fops->owner,
348 		.ops = &drm_gem_prime_dmabuf_ops,
349 		.size = obj->size,
350 		.flags = flags,
351 		.priv = obj,
352 	};
353 
354 	if (dev->driver->gem_prime_res_obj)
355 		exp_info.resv = dev->driver->gem_prime_res_obj(obj);
356 
357 	return dma_buf_export(&exp_info);
358 }
359 EXPORT_SYMBOL(drm_gem_prime_export);
360 
export_and_register_object(struct drm_device * dev,struct drm_gem_object * obj,uint32_t flags)361 static struct dma_buf *export_and_register_object(struct drm_device *dev,
362 						  struct drm_gem_object *obj,
363 						  uint32_t flags)
364 {
365 	struct dma_buf *dmabuf;
366 
367 	/* prevent races with concurrent gem_close. */
368 	if (obj->handle_count == 0) {
369 		dmabuf = ERR_PTR(-ENOENT);
370 		return dmabuf;
371 	}
372 
373 	dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
374 	if (IS_ERR(dmabuf)) {
375 		/* normally the created dma-buf takes ownership of the ref,
376 		 * but if that fails then drop the ref
377 		 */
378 		return dmabuf;
379 	}
380 
381 	/*
382 	 * Note that callers do not need to clean up the export cache
383 	 * since the check for obj->handle_count guarantees that someone
384 	 * will clean it up.
385 	 */
386 	obj->dma_buf = dmabuf;
387 	get_dma_buf(obj->dma_buf);
388 	/* Grab a new ref since the callers is now used by the dma-buf */
389 	drm_gem_object_reference(obj);
390 
391 	return dmabuf;
392 }
393 
394 /**
395  * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
396  * @dev: dev to export the buffer from
397  * @file_priv: drm file-private structure
398  * @handle: buffer handle to export
399  * @flags: flags like DRM_CLOEXEC
400  * @prime_fd: pointer to storage for the fd id of the create dma-buf
401  *
402  * This is the PRIME export function which must be used mandatorily by GEM
403  * drivers to ensure correct lifetime management of the underlying GEM object.
404  * The actual exporting from GEM object to a dma-buf is done through the
405  * gem_prime_export driver callback.
406  */
drm_gem_prime_handle_to_fd(struct drm_device * dev,struct drm_file * file_priv,uint32_t handle,uint32_t flags,int * prime_fd)407 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
408 			       struct drm_file *file_priv, uint32_t handle,
409 			       uint32_t flags,
410 			       int *prime_fd)
411 {
412 	struct drm_gem_object *obj;
413 	int ret = 0;
414 	struct dma_buf *dmabuf;
415 
416 	mutex_lock(&file_priv->prime.lock);
417 	obj = drm_gem_object_lookup(dev, file_priv, handle);
418 	if (!obj)  {
419 		ret = -ENOENT;
420 		goto out_unlock;
421 	}
422 
423 	dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
424 	if (dmabuf) {
425 		get_dma_buf(dmabuf);
426 		goto out_have_handle;
427 	}
428 
429 	mutex_lock(&dev->object_name_lock);
430 	/* re-export the original imported object */
431 	if (obj->import_attach) {
432 		dmabuf = obj->import_attach->dmabuf;
433 		get_dma_buf(dmabuf);
434 		goto out_have_obj;
435 	}
436 
437 	if (obj->dma_buf) {
438 		get_dma_buf(obj->dma_buf);
439 		dmabuf = obj->dma_buf;
440 		goto out_have_obj;
441 	}
442 
443 	dmabuf = export_and_register_object(dev, obj, flags);
444 	if (IS_ERR(dmabuf)) {
445 		/* normally the created dma-buf takes ownership of the ref,
446 		 * but if that fails then drop the ref
447 		 */
448 		ret = PTR_ERR(dmabuf);
449 		mutex_unlock(&dev->object_name_lock);
450 		goto out;
451 	}
452 
453 out_have_obj:
454 	/*
455 	 * If we've exported this buffer then cheat and add it to the import list
456 	 * so we get the correct handle back. We must do this under the
457 	 * protection of dev->object_name_lock to ensure that a racing gem close
458 	 * ioctl doesn't miss to remove this buffer handle from the cache.
459 	 */
460 	ret = drm_prime_add_buf_handle(&file_priv->prime,
461 				       dmabuf, handle);
462 	mutex_unlock(&dev->object_name_lock);
463 	if (ret)
464 		goto fail_put_dmabuf;
465 
466 out_have_handle:
467 	ret = dma_buf_fd(dmabuf, flags);
468 	/*
469 	 * We must _not_ remove the buffer from the handle cache since the newly
470 	 * created dma buf is already linked in the global obj->dma_buf pointer,
471 	 * and that is invariant as long as a userspace gem handle exists.
472 	 * Closing the handle will clean out the cache anyway, so we don't leak.
473 	 */
474 	if (ret < 0) {
475 		goto fail_put_dmabuf;
476 	} else {
477 		*prime_fd = ret;
478 		ret = 0;
479 	}
480 
481 	goto out;
482 
483 fail_put_dmabuf:
484 	dma_buf_put(dmabuf);
485 out:
486 	drm_gem_object_unreference_unlocked(obj);
487 out_unlock:
488 	mutex_unlock(&file_priv->prime.lock);
489 
490 	return ret;
491 }
492 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
493 
494 /**
495  * drm_gem_prime_import - helper library implementation of the import callback
496  * @dev: drm_device to import into
497  * @dma_buf: dma-buf object to import
498  *
499  * This is the implementation of the gem_prime_import functions for GEM drivers
500  * using the PRIME helpers.
501  */
drm_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)502 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
503 					    struct dma_buf *dma_buf)
504 {
505 	struct dma_buf_attachment *attach;
506 	struct sg_table *sgt;
507 	struct drm_gem_object *obj;
508 	int ret;
509 
510 	if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
511 		obj = dma_buf->priv;
512 		if (obj->dev == dev) {
513 			/*
514 			 * Importing dmabuf exported from out own gem increases
515 			 * refcount on gem itself instead of f_count of dmabuf.
516 			 */
517 			drm_gem_object_reference(obj);
518 			return obj;
519 		}
520 	}
521 
522 	if (!dev->driver->gem_prime_import_sg_table)
523 		return ERR_PTR(-EINVAL);
524 
525 	attach = dma_buf_attach(dma_buf, dev->dev);
526 	if (IS_ERR(attach))
527 		return ERR_CAST(attach);
528 
529 	get_dma_buf(dma_buf);
530 
531 	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
532 	if (IS_ERR(sgt)) {
533 		ret = PTR_ERR(sgt);
534 		goto fail_detach;
535 	}
536 
537 	obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
538 	if (IS_ERR(obj)) {
539 		ret = PTR_ERR(obj);
540 		goto fail_unmap;
541 	}
542 
543 	obj->import_attach = attach;
544 
545 	return obj;
546 
547 fail_unmap:
548 	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
549 fail_detach:
550 	dma_buf_detach(dma_buf, attach);
551 	dma_buf_put(dma_buf);
552 
553 	return ERR_PTR(ret);
554 }
555 EXPORT_SYMBOL(drm_gem_prime_import);
556 
557 /**
558  * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
559  * @dev: dev to export the buffer from
560  * @file_priv: drm file-private structure
561  * @prime_fd: fd id of the dma-buf which should be imported
562  * @handle: pointer to storage for the handle of the imported buffer object
563  *
564  * This is the PRIME import function which must be used mandatorily by GEM
565  * drivers to ensure correct lifetime management of the underlying GEM object.
566  * The actual importing of GEM object from the dma-buf is done through the
567  * gem_import_export driver callback.
568  */
drm_gem_prime_fd_to_handle(struct drm_device * dev,struct drm_file * file_priv,int prime_fd,uint32_t * handle)569 int drm_gem_prime_fd_to_handle(struct drm_device *dev,
570 			       struct drm_file *file_priv, int prime_fd,
571 			       uint32_t *handle)
572 {
573 	struct dma_buf *dma_buf;
574 	struct drm_gem_object *obj;
575 	int ret;
576 
577 	dma_buf = dma_buf_get(prime_fd);
578 	if (IS_ERR(dma_buf))
579 		return PTR_ERR(dma_buf);
580 
581 	mutex_lock(&file_priv->prime.lock);
582 
583 	ret = drm_prime_lookup_buf_handle(&file_priv->prime,
584 			dma_buf, handle);
585 	if (ret == 0)
586 		goto out_put;
587 
588 	/* never seen this one, need to import */
589 	mutex_lock(&dev->object_name_lock);
590 	obj = dev->driver->gem_prime_import(dev, dma_buf);
591 	if (IS_ERR(obj)) {
592 		ret = PTR_ERR(obj);
593 		goto out_unlock;
594 	}
595 
596 	if (obj->dma_buf) {
597 		WARN_ON(obj->dma_buf != dma_buf);
598 	} else {
599 		obj->dma_buf = dma_buf;
600 		get_dma_buf(dma_buf);
601 	}
602 
603 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
604 	ret = drm_gem_handle_create_tail(file_priv, obj, handle);
605 	drm_gem_object_unreference_unlocked(obj);
606 	if (ret)
607 		goto out_put;
608 
609 	ret = drm_prime_add_buf_handle(&file_priv->prime,
610 			dma_buf, *handle);
611 	if (ret)
612 		goto fail;
613 
614 	mutex_unlock(&file_priv->prime.lock);
615 
616 	dma_buf_put(dma_buf);
617 
618 	return 0;
619 
620 fail:
621 	/* hmm, if driver attached, we are relying on the free-object path
622 	 * to detach.. which seems ok..
623 	 */
624 	drm_gem_handle_delete(file_priv, *handle);
625 out_unlock:
626 	mutex_unlock(&dev->object_name_lock);
627 out_put:
628 	dma_buf_put(dma_buf);
629 	mutex_unlock(&file_priv->prime.lock);
630 	return ret;
631 }
632 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
633 
drm_prime_handle_to_fd_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)634 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
635 				 struct drm_file *file_priv)
636 {
637 	struct drm_prime_handle *args = data;
638 	uint32_t flags;
639 
640 	if (!drm_core_check_feature(dev, DRIVER_PRIME))
641 		return -EINVAL;
642 
643 	if (!dev->driver->prime_handle_to_fd)
644 		return -ENOSYS;
645 
646 	/* check flags are valid */
647 	if (args->flags & ~DRM_CLOEXEC)
648 		return -EINVAL;
649 
650 	/* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
651 	flags = args->flags & DRM_CLOEXEC;
652 
653 	return dev->driver->prime_handle_to_fd(dev, file_priv,
654 			args->handle, flags, &args->fd);
655 }
656 
drm_prime_fd_to_handle_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)657 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
658 				 struct drm_file *file_priv)
659 {
660 	struct drm_prime_handle *args = data;
661 
662 	if (!drm_core_check_feature(dev, DRIVER_PRIME))
663 		return -EINVAL;
664 
665 	if (!dev->driver->prime_fd_to_handle)
666 		return -ENOSYS;
667 
668 	return dev->driver->prime_fd_to_handle(dev, file_priv,
669 			args->fd, &args->handle);
670 }
671 
672 /**
673  * drm_prime_pages_to_sg - converts a page array into an sg list
674  * @pages: pointer to the array of page pointers to convert
675  * @nr_pages: length of the page vector
676  *
677  * This helper creates an sg table object from a set of pages
678  * the driver is responsible for mapping the pages into the
679  * importers address space for use with dma_buf itself.
680  */
drm_prime_pages_to_sg(struct page ** pages,unsigned int nr_pages)681 struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
682 {
683 	struct sg_table *sg = NULL;
684 	int ret;
685 
686 	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
687 	if (!sg) {
688 		ret = -ENOMEM;
689 		goto out;
690 	}
691 
692 	ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
693 				nr_pages << PAGE_SHIFT, GFP_KERNEL);
694 	if (ret)
695 		goto out;
696 
697 	return sg;
698 out:
699 	kfree(sg);
700 	return ERR_PTR(ret);
701 }
702 EXPORT_SYMBOL(drm_prime_pages_to_sg);
703 
704 /**
705  * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
706  * @sgt: scatter-gather table to convert
707  * @pages: array of page pointers to store the page array in
708  * @addrs: optional array to store the dma bus address of each page
709  * @max_pages: size of both the passed-in arrays
710  *
711  * Exports an sg table into an array of pages and addresses. This is currently
712  * required by the TTM driver in order to do correct fault handling.
713  */
drm_prime_sg_to_page_addr_arrays(struct sg_table * sgt,struct page ** pages,dma_addr_t * addrs,int max_pages)714 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
715 				     dma_addr_t *addrs, int max_pages)
716 {
717 	unsigned count;
718 	struct scatterlist *sg;
719 	struct page *page;
720 	u32 len;
721 	int pg_index;
722 	dma_addr_t addr;
723 
724 	pg_index = 0;
725 	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
726 		len = sg->length;
727 		page = sg_page(sg);
728 		addr = sg_dma_address(sg);
729 
730 		while (len > 0) {
731 			if (WARN_ON(pg_index >= max_pages))
732 				return -1;
733 			pages[pg_index] = page;
734 			if (addrs)
735 				addrs[pg_index] = addr;
736 
737 			page++;
738 			addr += PAGE_SIZE;
739 			len -= PAGE_SIZE;
740 			pg_index++;
741 		}
742 	}
743 	return 0;
744 }
745 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
746 
747 /**
748  * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
749  * @obj: GEM object which was created from a dma-buf
750  * @sg: the sg-table which was pinned at import time
751  *
752  * This is the cleanup functions which GEM drivers need to call when they use
753  * @drm_gem_prime_import to import dma-bufs.
754  */
drm_prime_gem_destroy(struct drm_gem_object * obj,struct sg_table * sg)755 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
756 {
757 	struct dma_buf_attachment *attach;
758 	struct dma_buf *dma_buf;
759 	attach = obj->import_attach;
760 	if (sg)
761 		dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
762 	dma_buf = attach->dmabuf;
763 	dma_buf_detach(attach->dmabuf, attach);
764 	/* remove the reference */
765 	dma_buf_put(dma_buf);
766 }
767 EXPORT_SYMBOL(drm_prime_gem_destroy);
768 
drm_prime_init_file_private(struct drm_prime_file_private * prime_fpriv)769 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
770 {
771 	INIT_LIST_HEAD(&prime_fpriv->head);
772 	mutex_init(&prime_fpriv->lock);
773 }
774 
drm_prime_destroy_file_private(struct drm_prime_file_private * prime_fpriv)775 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
776 {
777 	/* by now drm_gem_release should've made sure the list is empty */
778 	WARN_ON(!list_empty(&prime_fpriv->head));
779 }
780