• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2012 Red Hat
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Dave Airlie <airlied@redhat.com>
25  *      Rob Clark <rob.clark@linaro.org>
26  *
27  */
28 
29 #include <linux/export.h>
30 #include <linux/dma-buf.h>
31 #include <linux/rbtree.h>
32 
33 #include <drm/drm.h>
34 #include <drm/drm_drv.h>
35 #include <drm/drm_file.h>
36 #include <drm/drm_framebuffer.h>
37 #include <drm/drm_gem.h>
38 #include <drm/drm_prime.h>
39 
40 #include "drm_internal.h"
41 
42 /**
43  * DOC: overview and lifetime rules
44  *
45  * Similar to GEM global names, PRIME file descriptors are also used to share
46  * buffer objects across processes. They offer additional security: as file
47  * descriptors must be explicitly sent over UNIX domain sockets to be shared
48  * between applications, they can't be guessed like the globally unique GEM
49  * names.
50  *
51  * Drivers that support the PRIME API implement the
52  * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations.
53  * GEM based drivers must use drm_gem_prime_handle_to_fd() and
54  * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the
55  * actual driver interfaces is provided through the &drm_gem_object_funcs.export
56  * and &drm_driver.gem_prime_import hooks.
57  *
58  * &dma_buf_ops implementations for GEM drivers are all individually exported
59  * for drivers which need to overwrite or reimplement some of them.
60  *
61  * Reference Counting for GEM Drivers
62  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
63  *
64  * On the export the &dma_buf holds a reference to the exported buffer object,
65  * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
66  * IOCTL, when it first calls &drm_gem_object_funcs.export
67  * and stores the exporting GEM object in the &dma_buf.priv field. This
68  * reference needs to be released when the final reference to the &dma_buf
69  * itself is dropped and its &dma_buf_ops.release function is called.  For
70  * GEM-based drivers, the &dma_buf should be exported using
71  * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
72  *
73  * Thus the chain of references always flows in one direction, avoiding loops:
74  * importing GEM object -> dma-buf -> exported GEM bo. A further complication
75  * are the lookup caches for import and export. These are required to guarantee
76  * that any given object will always have only one uniqe userspace handle. This
77  * is required to allow userspace to detect duplicated imports, since some GEM
78  * drivers do fail command submissions if a given buffer object is listed more
79  * than once. These import and export caches in &drm_prime_file_private only
80  * retain a weak reference, which is cleaned up when the corresponding object is
81  * released.
82  *
83  * Self-importing: If userspace is using PRIME as a replacement for flink then
84  * it will get a fd->handle request for a GEM object that it created.  Drivers
85  * should detect this situation and return back the underlying object from the
86  * dma-buf private. For GEM based drivers this is handled in
87  * drm_gem_prime_import() already.
88  */
89 
90 struct drm_prime_member {
91     struct dma_buf *dma_buf;
92     uint32_t handle;
93 
94     struct rb_node dmabuf_rb;
95     struct rb_node handle_rb;
96 };
97 
drm_prime_add_buf_handle(struct drm_prime_file_private * prime_fpriv,struct dma_buf * dma_buf,uint32_t handle)98 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf,
99                                     uint32_t handle)
100 {
101     struct drm_prime_member *member;
102     struct rb_node **p, *rb;
103 
104     member = kmalloc(sizeof(*member), GFP_KERNEL);
105     if (!member) {
106         return -ENOMEM;
107     }
108 
109     get_dma_buf(dma_buf);
110     member->dma_buf = dma_buf;
111     member->handle = handle;
112 
113     rb = NULL;
114     p = &prime_fpriv->dmabufs.rb_node;
115     while (*p) {
116         struct drm_prime_member *pos;
117 
118         rb = *p;
119         pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
120         if (dma_buf > pos->dma_buf) {
121             p = &rb->rb_right;
122         } else {
123             p = &rb->rb_left;
124         }
125     }
126     rb_link_node(&member->dmabuf_rb, rb, p);
127     rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
128 
129     rb = NULL;
130     p = &prime_fpriv->handles.rb_node;
131     while (*p) {
132         struct drm_prime_member *pos;
133 
134         rb = *p;
135         pos = rb_entry(rb, struct drm_prime_member, handle_rb);
136         if (handle > pos->handle) {
137             p = &rb->rb_right;
138         } else {
139             p = &rb->rb_left;
140         }
141     }
142     rb_link_node(&member->handle_rb, rb, p);
143     rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
144 
145     return 0;
146 }
147 
drm_prime_lookup_buf_by_handle(struct drm_prime_file_private * prime_fpriv,uint32_t handle)148 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv, uint32_t handle)
149 {
150     struct rb_node *rb;
151 
152     rb = prime_fpriv->handles.rb_node;
153     while (rb) {
154         struct drm_prime_member *member;
155 
156         member = rb_entry(rb, struct drm_prime_member, handle_rb);
157         if (member->handle == handle) {
158             return member->dma_buf;
159         } else if (member->handle < handle) {
160             rb = rb->rb_right;
161         } else {
162             rb = rb->rb_left;
163         }
164     }
165 
166     return NULL;
167 }
168 
drm_prime_lookup_buf_handle(struct drm_prime_file_private * prime_fpriv,struct dma_buf * dma_buf,uint32_t * handle)169 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf,
170                                        uint32_t *handle)
171 {
172     struct rb_node *rb;
173 
174     rb = prime_fpriv->dmabufs.rb_node;
175     while (rb) {
176         struct drm_prime_member *member;
177 
178         member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
179         if (member->dma_buf == dma_buf) {
180             *handle = member->handle;
181             return 0;
182         } else if (member->dma_buf < dma_buf) {
183             rb = rb->rb_right;
184         } else {
185             rb = rb->rb_left;
186         }
187     }
188 
189     return -ENOENT;
190 }
191 
drm_prime_remove_buf_handle_locked(struct drm_prime_file_private * prime_fpriv,struct dma_buf * dma_buf)192 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
193                     struct dma_buf *dma_buf)
194 {
195     struct rb_node *rb;
196 
197     rb = prime_fpriv->dmabufs.rb_node;
198     while (rb) {
199         struct drm_prime_member *member;
200 
201         member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
202         if (member->dma_buf == dma_buf) {
203             rb_erase(&member->handle_rb, &prime_fpriv->handles);
204             rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
205 
206             dma_buf_put(dma_buf);
207             kfree(member);
208             return;
209         } else if (member->dma_buf < dma_buf) {
210             rb = rb->rb_right;
211         } else {
212             rb = rb->rb_left;
213         }
214     }
215 }
216 
drm_prime_init_file_private(struct drm_prime_file_private * prime_fpriv)217 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
218 {
219     mutex_init(&prime_fpriv->lock);
220     prime_fpriv->dmabufs = RB_ROOT;
221     prime_fpriv->handles = RB_ROOT;
222 }
223 
drm_prime_destroy_file_private(struct drm_prime_file_private * prime_fpriv)224 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
225 {
226     /* by now drm_gem_release should've made sure the list is empty */
227     WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
228 }
229 
230 /**
231  * drm_gem_dmabuf_export - &dma_buf export implementation for GEM
232  * @dev: parent device for the exported dmabuf
233  * @exp_info: the export information used by dma_buf_export()
234  *
235  * This wraps dma_buf_export() for use by generic GEM drivers that are using
236  * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
237  * a reference to the &drm_device and the exported &drm_gem_object (stored in
238  * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
239  *
240  * Returns the new dmabuf.
241  */
drm_gem_dmabuf_export(struct drm_device * dev,struct dma_buf_export_info * exp_info)242 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, struct dma_buf_export_info *exp_info)
243 {
244     struct drm_gem_object *obj = exp_info->priv;
245     struct dma_buf *dma_buf;
246 
247     dma_buf = dma_buf_export(exp_info);
248     if (IS_ERR(dma_buf)) {
249         return dma_buf;
250     }
251 
252     drm_dev_get(dev);
253     drm_gem_object_get(obj);
254     dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
255 
256     return dma_buf;
257 }
258 EXPORT_SYMBOL(drm_gem_dmabuf_export);
259 
260 /**
261  * drm_gem_dmabuf_release - &dma_buf release implementation for GEM
262  * @dma_buf: buffer to be released
263  *
264  * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
265  * must use this in their &dma_buf_ops structure as the release callback.
266  * drm_gem_dmabuf_release() should be used in conjunction with
267  * drm_gem_dmabuf_export().
268  */
drm_gem_dmabuf_release(struct dma_buf * dma_buf)269 void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
270 {
271     struct drm_gem_object *obj = dma_buf->priv;
272     struct drm_device *dev = obj->dev;
273 
274     /* drop the reference on the export fd holds */
275     drm_gem_object_put(obj);
276 
277     drm_dev_put(dev);
278 }
279 EXPORT_SYMBOL(drm_gem_dmabuf_release);
280 
281 /**
282  * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
283  * @dev: dev to export the buffer from
284  * @file_priv: drm file-private structure
285  * @prime_fd: fd id of the dma-buf which should be imported
286  * @handle: pointer to storage for the handle of the imported buffer object
287  *
288  * This is the PRIME import function which must be used mandatorily by GEM
289  * drivers to ensure correct lifetime management of the underlying GEM object.
290  * The actual importing of GEM object from the dma-buf is done through the
291  * &drm_driver.gem_prime_import driver callback.
292  *
293  * Returns 0 on success or a negative error code on failure.
294  */
drm_gem_prime_fd_to_handle(struct drm_device * dev,struct drm_file * file_priv,int prime_fd,uint32_t * handle)295 int drm_gem_prime_fd_to_handle(struct drm_device *dev, struct drm_file *file_priv, int prime_fd, uint32_t *handle)
296 {
297     struct dma_buf *dma_buf;
298     struct drm_gem_object *obj;
299     int ret;
300 
301     dma_buf = dma_buf_get(prime_fd);
302     if (IS_ERR(dma_buf)) {
303         return PTR_ERR(dma_buf);
304     }
305 
306     mutex_lock(&file_priv->prime.lock);
307 
308     ret = drm_prime_lookup_buf_handle(&file_priv->prime, dma_buf, handle);
309     if (ret == 0) {
310         goto out_put;
311     }
312 
313     /* never seen this one, need to import */
314     mutex_lock(&dev->object_name_lock);
315     if (dev->driver->gem_prime_import) {
316         obj = dev->driver->gem_prime_import(dev, dma_buf);
317     } else {
318         obj = drm_gem_prime_import(dev, dma_buf);
319     }
320     if (IS_ERR(obj)) {
321         ret = PTR_ERR(obj);
322         goto out_unlock;
323     }
324 
325     if (obj->dma_buf) {
326         WARN_ON(obj->dma_buf != dma_buf);
327     } else {
328         obj->dma_buf = dma_buf;
329         get_dma_buf(dma_buf);
330     }
331 
332     /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
333     ret = drm_gem_handle_create_tail(file_priv, obj, handle);
334     drm_gem_object_put(obj);
335     if (ret) {
336         goto out_put;
337     }
338 
339     ret = drm_prime_add_buf_handle(&file_priv->prime, dma_buf, *handle);
340     mutex_unlock(&file_priv->prime.lock);
341     if (ret) {
342         goto fail;
343     }
344 
345     dma_buf_put(dma_buf);
346 
347     return 0;
348 
349 fail:
350     /* hmm, if driver attached, we are relying on the free-object path
351      * to detach.. which seems ok..
352      */
353     drm_gem_handle_delete(file_priv, *handle);
354     dma_buf_put(dma_buf);
355     return ret;
356 
357 out_unlock:
358     mutex_unlock(&dev->object_name_lock);
359 out_put:
360     mutex_unlock(&file_priv->prime.lock);
361     dma_buf_put(dma_buf);
362     return ret;
363 }
364 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
365 
drm_prime_fd_to_handle_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)366 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
367 {
368     struct drm_prime_handle *args = data;
369 
370     if (!dev->driver->prime_fd_to_handle) {
371         return -ENOSYS;
372     }
373 
374     return dev->driver->prime_fd_to_handle(dev, file_priv, args->fd, &args->handle);
375 }
376 
export_and_register_object(struct drm_device * dev,struct drm_gem_object * obj,uint32_t flags)377 static struct dma_buf *export_and_register_object(struct drm_device *dev, struct drm_gem_object *obj, uint32_t flags)
378 {
379     struct dma_buf *dmabuf;
380 
381     /* prevent races with concurrent gem_close. */
382     if (obj->handle_count == 0) {
383         dmabuf = ERR_PTR(-ENOENT);
384         return dmabuf;
385     }
386 
387     if (obj->funcs && obj->funcs->export) {
388         dmabuf = obj->funcs->export(obj, flags);
389     } else if (dev->driver->gem_prime_export) {
390         dmabuf = dev->driver->gem_prime_export(obj, flags);
391     } else {
392         dmabuf = drm_gem_prime_export(obj, flags);
393     }
394     if (IS_ERR(dmabuf)) {
395         /* normally the created dma-buf takes ownership of the ref,
396          * but if that fails then drop the ref
397          */
398         return dmabuf;
399     }
400 
401     /*
402      * Note that callers do not need to clean up the export cache
403      * since the check for obj->handle_count guarantees that someone
404      * will clean it up.
405      */
406     obj->dma_buf = dmabuf;
407     get_dma_buf(obj->dma_buf);
408 
409     return dmabuf;
410 }
411 
412 /**
413  * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
414  * @dev: dev to export the buffer from
415  * @file_priv: drm file-private structure
416  * @handle: buffer handle to export
417  * @flags: flags like DRM_CLOEXEC
418  * @prime_fd: pointer to storage for the fd id of the create dma-buf
419  *
420  * This is the PRIME export function which must be used mandatorily by GEM
421  * drivers to ensure correct lifetime management of the underlying GEM object.
422  * The actual exporting from GEM object to a dma-buf is done through the
423  * &drm_driver.gem_prime_export driver callback.
424  */
drm_gem_prime_handle_to_fd(struct drm_device * dev,struct drm_file * file_priv,uint32_t handle,uint32_t flags,int * prime_fd)425 int drm_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags,
426                                int *prime_fd)
427 {
428     struct drm_gem_object *obj;
429     int ret = 0;
430     struct dma_buf *dmabuf;
431 
432     mutex_lock(&file_priv->prime.lock);
433     obj = drm_gem_object_lookup(file_priv, handle);
434     if (!obj) {
435         ret = -ENOENT;
436         goto out_unlock;
437     }
438 
439     dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
440     if (dmabuf) {
441         get_dma_buf(dmabuf);
442         goto out_have_handle;
443     }
444 
445     mutex_lock(&dev->object_name_lock);
446     /* re-export the original imported object */
447     if (obj->import_attach) {
448         dmabuf = obj->import_attach->dmabuf;
449         get_dma_buf(dmabuf);
450         goto out_have_obj;
451     }
452 
453     if (obj->dma_buf) {
454         get_dma_buf(obj->dma_buf);
455         dmabuf = obj->dma_buf;
456         goto out_have_obj;
457     }
458 
459     dmabuf = export_and_register_object(dev, obj, flags);
460     if (IS_ERR(dmabuf)) {
461         /* normally the created dma-buf takes ownership of the ref,
462          * but if that fails then drop the ref
463          */
464         ret = PTR_ERR(dmabuf);
465         mutex_unlock(&dev->object_name_lock);
466         goto out;
467     }
468 
469 out_have_obj:
470     /*
471      * If we've exported this buffer then cheat and add it to the import list
472      * so we get the correct handle back. We must do this under the
473      * protection of dev->object_name_lock to ensure that a racing gem close
474      * ioctl doesn't miss to remove this buffer handle from the cache.
475      */
476     ret = drm_prime_add_buf_handle(&file_priv->prime, dmabuf, handle);
477     mutex_unlock(&dev->object_name_lock);
478     if (ret) {
479         goto fail_put_dmabuf;
480     }
481 
482 out_have_handle:
483     ret = dma_buf_fd(dmabuf, flags);
484     /*
485      * We must _not_ remove the buffer from the handle cache since the newly
486      * created dma buf is already linked in the global obj->dma_buf pointer,
487      * and that is invariant as long as a userspace gem handle exists.
488      * Closing the handle will clean out the cache anyway, so we don't leak.
489      */
490     if (ret < 0) {
491         goto fail_put_dmabuf;
492     } else {
493         *prime_fd = ret;
494         ret = 0;
495     }
496 
497     goto out;
498 
499 fail_put_dmabuf:
500     dma_buf_put(dmabuf);
501 out:
502     drm_gem_object_put(obj);
503 out_unlock:
504     mutex_unlock(&file_priv->prime.lock);
505 
506     return ret;
507 }
508 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
509 
drm_prime_handle_to_fd_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)510 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
511 {
512     struct drm_prime_handle *args = data;
513 
514     if (!dev->driver->prime_handle_to_fd) {
515         return -ENOSYS;
516     }
517 
518     /* check flags are valid */
519     if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR)) {
520         return -EINVAL;
521     }
522 
523     return dev->driver->prime_handle_to_fd(dev, file_priv, args->handle, args->flags, &args->fd);
524 }
525 
526 /**
527  * DOC: PRIME Helpers
528  *
529  * Drivers can implement &drm_gem_object_funcs.export and
530  * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
531  * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
532  * implement dma-buf support in terms of some lower-level helpers, which are
533  * again exported for drivers to use individually:
534  *
535  * Exporting buffers
536  * ~~~~~~~~~~~~~~~~~
537  *
538  * Optional pinning of buffers is handled at dma-buf attach and detach time in
539  * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
540  * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
541  * &drm_gem_object_funcs.get_sg_table.
542  *
543  * For kernel-internal access there's drm_gem_dmabuf_vmap() and
544  * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
545  * drm_gem_dmabuf_mmap().
546  *
547  * Note that these export helpers can only be used if the underlying backing
548  * storage is fully coherent and either permanently pinned, or it is safe to pin
549  * it indefinitely.
550  *
551  * The underlying helper functions are named rather inconsistently.
552  *
553  * Exporting buffers
554  * ~~~~~~~~~~~~~~~~~
555  *
556  * Importing dma-bufs using drm_gem_prime_import() relies on
557  * &drm_driver.gem_prime_import_sg_table.
558  *
559  * Note that similarly to the export helpers this permanently pins the
560  * underlying backing storage. Which is ok for scanout, but is not the best
561  * option for sharing lots of buffers for rendering.
562  */
563 
564 /**
565  * drm_gem_map_attach - dma_buf attach implementation for GEM
566  * @dma_buf: buffer to attach device to
567  * @attach: buffer attachment data
568  *
569  * Calls &drm_gem_object_funcs.pin for device specific handling. This can be
570  * used as the &dma_buf_ops.attach callback. Must be used together with
571  * drm_gem_map_detach().
572  *
573  * Returns 0 on success, negative error code on failure.
574  */
drm_gem_map_attach(struct dma_buf * dma_buf,struct dma_buf_attachment * attach)575 int drm_gem_map_attach(struct dma_buf *dma_buf, struct dma_buf_attachment *attach)
576 {
577     struct drm_gem_object *obj = dma_buf->priv;
578 
579     return drm_gem_pin(obj);
580 }
581 EXPORT_SYMBOL(drm_gem_map_attach);
582 
583 /**
584  * drm_gem_map_detach - dma_buf detach implementation for GEM
585  * @dma_buf: buffer to detach from
586  * @attach: attachment to be detached
587  *
588  * Calls &drm_gem_object_funcs.pin for device specific handling.  Cleans up
589  * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
590  * &dma_buf_ops.detach callback.
591  */
drm_gem_map_detach(struct dma_buf * dma_buf,struct dma_buf_attachment * attach)592 void drm_gem_map_detach(struct dma_buf *dma_buf, struct dma_buf_attachment *attach)
593 {
594     struct drm_gem_object *obj = dma_buf->priv;
595 
596     drm_gem_unpin(obj);
597 }
598 EXPORT_SYMBOL(drm_gem_map_detach);
599 
600 /**
601  * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
602  * @attach: attachment whose scatterlist is to be returned
603  * @dir: direction of DMA transfer
604  *
605  * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
606  * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
607  * with drm_gem_unmap_dma_buf().
608  *
609  * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
610  * on error. May return -EINTR if it is interrupted by a signal.
611  */
drm_gem_map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction dir)612 struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction dir)
613 {
614     struct drm_gem_object *obj = attach->dmabuf->priv;
615     struct sg_table *sgt;
616     int ret;
617 
618     if (WARN_ON(dir == DMA_NONE)) {
619         return ERR_PTR(-EINVAL);
620     }
621 
622     if (obj->funcs) {
623         sgt = obj->funcs->get_sg_table(obj);
624     } else {
625         sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
626     }
627 
628     ret = dma_map_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
629     if (ret) {
630         sg_free_table(sgt);
631         kfree(sgt);
632         sgt = ERR_PTR(ret);
633     }
634 
635     return sgt;
636 }
637 EXPORT_SYMBOL(drm_gem_map_dma_buf);
638 
639 /**
640  * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
641  * @attach: attachment to unmap buffer from
642  * @sgt: scatterlist info of the buffer to unmap
643  * @dir: direction of DMA transfer
644  *
645  * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
646  */
drm_gem_unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)647 void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir)
648 {
649     if (!sgt) {
650         return;
651     }
652 
653     dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
654     sg_free_table(sgt);
655     kfree(sgt);
656 }
657 EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
658 
659 /**
660  * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
661  * @dma_buf: buffer to be mapped
662  *
663  * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
664  * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
665  *
666  * Returns the kernel virtual address or NULL on failure.
667  */
drm_gem_dmabuf_vmap(struct dma_buf * dma_buf)668 void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
669 {
670     struct drm_gem_object *obj = dma_buf->priv;
671     void *vaddr;
672 
673     vaddr = drm_gem_vmap(obj);
674     if (IS_ERR(vaddr)) {
675         vaddr = NULL;
676     }
677 
678     return vaddr;
679 }
680 EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
681 
682 /**
683  * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
684  * @dma_buf: buffer to be unmapped
685  * @vaddr: the virtual address of the buffer
686  *
687  * Releases a kernel virtual mapping. This can be used as the
688  * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
689  */
drm_gem_dmabuf_vunmap(struct dma_buf * dma_buf,void * vaddr)690 void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
691 {
692     struct drm_gem_object *obj = dma_buf->priv;
693 
694     drm_gem_vunmap(obj, vaddr);
695 }
696 EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
697 
698 /**
699  * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
700  * @obj: GEM object
701  * @vma: Virtual address range
702  *
703  * This function sets up a userspace mapping for PRIME exported buffers using
704  * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
705  * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
706  * called to set up the mapping.
707  *
708  * Drivers can use this as their &drm_driver.gem_prime_mmap callback.
709  */
drm_gem_prime_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)710 int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
711 {
712     struct drm_file *priv;
713     struct file *fil;
714     int ret;
715 
716     /* Add the fake offset */
717     vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
718 
719     if (obj->funcs && obj->funcs->mmap) {
720         ret = obj->funcs->mmap(obj, vma);
721         if (ret) {
722             return ret;
723         }
724         vma->vm_private_data = obj;
725         drm_gem_object_get(obj);
726         return 0;
727     }
728 
729     priv = kzalloc(sizeof(*priv), GFP_KERNEL);
730     fil = kzalloc(sizeof(*fil), GFP_KERNEL);
731     if (!priv || !fil) {
732         ret = -ENOMEM;
733         goto out;
734     }
735 
736     /* Used by drm_gem_mmap() to lookup the GEM object */
737     priv->minor = obj->dev->primary;
738     fil->private_data = priv;
739 
740     ret = drm_vma_node_allow(&obj->vma_node, priv);
741     if (ret) {
742         goto out;
743     }
744 
745     ret = obj->dev->driver->fops->mmap(fil, vma);
746 
747     drm_vma_node_revoke(&obj->vma_node, priv);
748 out:
749     kfree(priv);
750     kfree(fil);
751 
752     return ret;
753 }
754 EXPORT_SYMBOL(drm_gem_prime_mmap);
755 
756 /**
757  * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
758  * @dma_buf: buffer to be mapped
759  * @vma: virtual address range
760  *
761  * Provides memory mapping for the buffer. This can be used as the
762  * &dma_buf_ops.mmap callback. It just forwards to &drm_driver.gem_prime_mmap,
763  * which should be set to drm_gem_prime_mmap().
764  *
765  * There's really no point to this wrapper, drivers which need anything
766  * else but drm_gem_prime_mmap can roll their own &dma_buf_ops.mmap callback.
767  *
768  * Returns 0 on success or a negative error code on failure.
769  */
drm_gem_dmabuf_mmap(struct dma_buf * dma_buf,struct vm_area_struct * vma)770 int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
771 {
772     struct drm_gem_object *obj = dma_buf->priv;
773     struct drm_device *dev = obj->dev;
774 
775     if (!dev->driver->gem_prime_mmap) {
776         return -ENOSYS;
777     }
778 
779     return dev->driver->gem_prime_mmap(obj, vma);
780 }
781 EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
782 
783 /**
784  * drm_gem_dmabuf_get_uuid - dma_buf get_uuid implementation for GEM
785  * @dma_buf: buffer to query
786  * @uuid: uuid outparam
787  *
788  * Queries the buffer's virtio UUID. This can be used as the
789  * &dma_buf_ops.get_uuid callback. Calls into &drm_driver.gem_prime_get_uuid.
790  *
791  * Returns 0 on success or a negative error code on failure.
792  */
drm_gem_dmabuf_get_uuid(struct dma_buf * dma_buf,uuid_t * uuid)793 int drm_gem_dmabuf_get_uuid(struct dma_buf *dma_buf, uuid_t *uuid)
794 {
795     struct drm_gem_object *obj = dma_buf->priv;
796     struct drm_device *dev = obj->dev;
797 
798     if (!dev->driver->gem_prime_get_uuid) {
799         return -ENODEV;
800     }
801 
802     return dev->driver->gem_prime_get_uuid(obj, uuid);
803 }
804 EXPORT_SYMBOL(drm_gem_dmabuf_get_uuid);
805 
806 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
807     .cache_sgt_mapping = true,
808     .attach = drm_gem_map_attach,
809     .detach = drm_gem_map_detach,
810     .map_dma_buf = drm_gem_map_dma_buf,
811     .unmap_dma_buf = drm_gem_unmap_dma_buf,
812     .release = drm_gem_dmabuf_release,
813     .mmap = drm_gem_dmabuf_mmap,
814     .vmap = drm_gem_dmabuf_vmap,
815     .vunmap = drm_gem_dmabuf_vunmap,
816     .get_uuid = drm_gem_dmabuf_get_uuid,
817 };
818 
819 /**
820  * drm_prime_pages_to_sg - converts a page array into an sg list
821  * @dev: DRM device
822  * @pages: pointer to the array of page pointers to convert
823  * @nr_pages: length of the page vector
824  *
825  * This helper creates an sg table object from a set of pages
826  * the driver is responsible for mapping the pages into the
827  * importers address space for use with dma_buf itself.
828  *
829  * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
830  */
drm_prime_pages_to_sg(struct drm_device * dev,struct page ** pages,unsigned int nr_pages)831 struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, struct page **pages, unsigned int nr_pages)
832 {
833     struct sg_table *sg;
834     struct scatterlist *sge;
835     size_t max_segment = 0;
836 
837     sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
838     if (!sg) {
839         return ERR_PTR(-ENOMEM);
840     }
841 
842     if (dev) {
843         max_segment = dma_max_mapping_size(dev->dev);
844     }
845     if (max_segment == 0 || max_segment > SCATTERLIST_MAX_SEGMENT) {
846         max_segment = SCATTERLIST_MAX_SEGMENT;
847     }
848     sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0, nr_pages << PAGE_SHIFT, max_segment, NULL, 0, GFP_KERNEL);
849     if (IS_ERR(sge)) {
850         kfree(sg);
851         sg = ERR_CAST(sge);
852     }
853     return sg;
854 }
855 EXPORT_SYMBOL(drm_prime_pages_to_sg);
856 
857 /**
858  * drm_prime_get_contiguous_size - returns the contiguous size of the buffer
859  * @sgt: sg_table describing the buffer to check
860  *
861  * This helper calculates the contiguous size in the DMA address space
862  * of the the buffer described by the provided sg_table.
863  *
864  * This is useful for implementing
865  * &drm_gem_object_funcs.gem_prime_import_sg_table.
866  */
drm_prime_get_contiguous_size(struct sg_table * sgt)867 unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
868 {
869     dma_addr_t expected = sg_dma_address(sgt->sgl);
870     struct scatterlist *sg;
871     unsigned long size = 0;
872     int i;
873 
874     for_each_sgtable_dma_sg(sgt, sg, i)
875     {
876         unsigned int len = sg_dma_len(sg);
877         if (!len) {
878             break;
879         }
880         if (sg_dma_address(sg) != expected) {
881             break;
882         }
883         expected += len;
884         size += len;
885     }
886     return size;
887 }
888 EXPORT_SYMBOL(drm_prime_get_contiguous_size);
889 
890 /**
891  * drm_gem_prime_export - helper library implementation of the export callback
892  * @obj: GEM object to export
893  * @flags: flags like DRM_CLOEXEC and DRM_RDWR
894  *
895  * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
896  * using the PRIME helpers. It is used as the default in
897  * drm_gem_prime_handle_to_fd().
898  */
drm_gem_prime_export(struct drm_gem_object * obj,int flags)899 struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj, int flags)
900 {
901     struct drm_device *dev = obj->dev;
902     struct dma_buf_export_info exp_info = {
903         .exp_name = KBUILD_MODNAME, /* white lie for debug */
904         .owner = dev->driver->fops->owner,
905         .ops = &drm_gem_prime_dmabuf_ops,
906         .size = obj->size,
907         .flags = flags,
908         .priv = obj,
909         .resv = obj->resv,
910     };
911 
912     return drm_gem_dmabuf_export(dev, &exp_info);
913 }
914 EXPORT_SYMBOL(drm_gem_prime_export);
915 
916 /**
917  * drm_gem_prime_import_dev - core implementation of the import callback
918  * @dev: drm_device to import into
919  * @dma_buf: dma-buf object to import
920  * @attach_dev: struct device to dma_buf attach
921  *
922  * This is the core of drm_gem_prime_import(). It's designed to be called by
923  * drivers who want to use a different device structure than &drm_device.dev for
924  * attaching via dma_buf. This function calls
925  * &drm_driver.gem_prime_import_sg_table internally.
926  *
927  * Drivers must arrange to call drm_prime_gem_destroy() from their
928  * &drm_gem_object_funcs.free hook when using this function.
929  */
drm_gem_prime_import_dev(struct drm_device * dev,struct dma_buf * dma_buf,struct device * attach_dev)930 struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev, struct dma_buf *dma_buf,
931                                                 struct device *attach_dev)
932 {
933     struct dma_buf_attachment *attach;
934     struct sg_table *sgt;
935     struct drm_gem_object *obj;
936     int ret;
937 
938     if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
939         obj = dma_buf->priv;
940         if (obj->dev == dev) {
941             /*
942              * Importing dmabuf exported from out own gem increases
943              * refcount on gem itself instead of f_count of dmabuf.
944              */
945             drm_gem_object_get(obj);
946             return obj;
947         }
948     }
949 
950     if (!dev->driver->gem_prime_import_sg_table) {
951         return ERR_PTR(-EINVAL);
952     }
953 
954     attach = dma_buf_attach(dma_buf, attach_dev);
955     if (IS_ERR(attach)) {
956         return ERR_CAST(attach);
957     }
958 
959     get_dma_buf(dma_buf);
960 
961     sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
962     if (IS_ERR(sgt)) {
963         ret = PTR_ERR(sgt);
964         goto fail_detach;
965     }
966 
967     obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
968     if (IS_ERR(obj)) {
969         ret = PTR_ERR(obj);
970         goto fail_unmap;
971     }
972 
973     obj->import_attach = attach;
974     obj->resv = dma_buf->resv;
975 
976     return obj;
977 
978 fail_unmap:
979     dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
980 fail_detach:
981     dma_buf_detach(dma_buf, attach);
982     dma_buf_put(dma_buf);
983 
984     return ERR_PTR(ret);
985 }
986 EXPORT_SYMBOL(drm_gem_prime_import_dev);
987 
988 /**
989  * drm_gem_prime_import - helper library implementation of the import callback
990  * @dev: drm_device to import into
991  * @dma_buf: dma-buf object to import
992  *
993  * This is the implementation of the gem_prime_import functions for GEM drivers
994  * using the PRIME helpers. Drivers can use this as their
995  * &drm_driver.gem_prime_import implementation. It is used as the default
996  * implementation in drm_gem_prime_fd_to_handle().
997  *
998  * Drivers must arrange to call drm_prime_gem_destroy() from their
999  * &drm_gem_object_funcs.free hook when using this function.
1000  */
drm_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)1001 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
1002 {
1003     return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
1004 }
1005 EXPORT_SYMBOL(drm_gem_prime_import);
1006 
1007 /**
1008  * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
1009  * @sgt: scatter-gather table to convert
1010  * @pages: optional array of page pointers to store the page array in
1011  * @addrs: optional array to store the dma bus address of each page
1012  * @max_entries: size of both the passed-in arrays
1013  *
1014  * Exports an sg table into an array of pages and addresses. This is currently
1015  * required by the TTM driver in order to do correct fault handling.
1016  *
1017  * Drivers can use this in their &drm_driver.gem_prime_import_sg_table
1018  * implementation.
1019  */
drm_prime_sg_to_page_addr_arrays(struct sg_table * sgt,struct page ** pages,dma_addr_t * addrs,int max_entries)1020 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, dma_addr_t *addrs, int max_entries)
1021 {
1022     struct sg_dma_page_iter dma_iter;
1023     struct sg_page_iter page_iter;
1024     struct page **p = pages;
1025     dma_addr_t *a = addrs;
1026 
1027     if (pages) {
1028         for_each_sgtable_page(sgt, &page_iter, 0)
1029         {
1030             if (WARN_ON(p - pages >= max_entries)) {
1031                 return -1;
1032             }
1033             *p++ = sg_page_iter_page(&page_iter);
1034         }
1035     }
1036     if (addrs) {
1037         for_each_sgtable_dma_page(sgt, &dma_iter, 0)
1038         {
1039             if (WARN_ON(a - addrs >= max_entries)) {
1040                 return -1;
1041             }
1042             *a++ = sg_page_iter_dma_address(&dma_iter);
1043         }
1044     }
1045 
1046     return 0;
1047 }
1048 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
1049 
1050 /**
1051  * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
1052  * @obj: GEM object which was created from a dma-buf
1053  * @sg: the sg-table which was pinned at import time
1054  *
1055  * This is the cleanup functions which GEM drivers need to call when they use
1056  * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
1057  */
drm_prime_gem_destroy(struct drm_gem_object * obj,struct sg_table * sg)1058 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
1059 {
1060     struct dma_buf_attachment *attach;
1061     struct dma_buf *dma_buf;
1062 
1063     attach = obj->import_attach;
1064     if (sg) {
1065         dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
1066     }
1067     dma_buf = attach->dmabuf;
1068     dma_buf_detach(attach->dmabuf, attach);
1069     /* remove the reference */
1070     dma_buf_put(dma_buf);
1071 }
1072 EXPORT_SYMBOL(drm_prime_gem_destroy);
1073