• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Framework for buffer objects that can be shared across devices/subsystems.
4  *
5  * Copyright(C) 2011 Linaro Limited. All rights reserved.
6  * Author: Sumit Semwal <sumit.semwal@ti.com>
7  *
8  * Many thanks to linaro-mm-sig list, and specially
9  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11  * refining of this idea.
12  */
13 
14 #include <linux/fs.h>
15 #include <linux/slab.h>
16 #include <linux/dma-buf.h>
17 #include <linux/dma-fence.h>
18 #include <linux/anon_inodes.h>
19 #include <linux/export.h>
20 #include <linux/debugfs.h>
21 #include <linux/module.h>
22 #include <linux/seq_file.h>
23 #include <linux/poll.h>
24 #include <linux/dma-resv.h>
25 #include <linux/mm.h>
26 #include <linux/mount.h>
27 #include <linux/pseudo_fs.h>
28 
29 #include <uapi/linux/dma-buf.h>
30 #include <uapi/linux/magic.h>
31 
32 #include "dma-buf-sysfs-stats.h"
33 #include "dma-buf-process-info.h"
34 
35 struct dma_buf_list {
36     struct list_head head;
37     struct mutex lock;
38 };
39 
40 static struct dma_buf_list db_list;
41 
42 /*
43  * This function helps in traversing the db_list and calls the
44  * callback function which can extract required info out of each
45  * dmabuf.
46  */
get_each_dmabuf(int (* callback)(const struct dma_buf * dmabuf,void * private),void * private)47 int get_each_dmabuf(int (*callback)(const struct dma_buf *dmabuf, void *private), void *private)
48 {
49     struct dma_buf *buf;
50     int ret = mutex_lock_interruptible(&db_list.lock);
51     if (ret) {
52         return ret;
53     }
54 
55     list_for_each_entry(buf, &db_list.head, list_node)
56     {
57         ret = callback(buf, private);
58         if (ret) {
59             break;
60         }
61     }
62     mutex_unlock(&db_list.lock);
63     return ret;
64 }
65 EXPORT_SYMBOL_GPL(get_each_dmabuf);
66 
dmabuffs_dname(struct dentry * dentry,char * buffer,int buflen)67 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
68 {
69     struct dma_buf *dmabuf;
70     char name[DMA_BUF_NAME_LEN];
71     size_t ret = 0;
72 
73     dmabuf = dentry->d_fsdata;
74     spin_lock(&dmabuf->name_lock);
75     if (dmabuf->name) {
76         ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
77     }
78     spin_unlock(&dmabuf->name_lock);
79 
80     return dynamic_dname(dentry, buffer, buflen, "/%s:%s", dentry->d_name.name, ret > 0 ? name : "");
81 }
82 
dma_buf_release(struct dentry * dentry)83 static void dma_buf_release(struct dentry *dentry)
84 {
85     struct dma_buf *dmabuf;
86 
87     dmabuf = dentry->d_fsdata;
88     if (unlikely(!dmabuf)) {
89         return;
90     }
91 
92     BUG_ON(dmabuf->vmapping_counter);
93 
94     /*
95      * Any fences that a dma-buf poll can wait on should be signaled
96      * before releasing dma-buf. This is the responsibility of each
97      * driver that uses the reservation objects.
98      *
99      * If you hit this BUG() it means someone dropped their ref to the
100      * dma-buf while still having pending operation to the buffer.
101      */
102     BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
103 
104     dmabuf->ops->release(dmabuf);
105 
106     if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) {
107         dma_resv_fini(dmabuf->resv);
108     }
109 
110     WARN_ON(!list_empty(&dmabuf->attachments));
111     dma_buf_stats_teardown(dmabuf);
112     module_put(dmabuf->owner);
113     kfree(dmabuf->name);
114     kfree(dmabuf);
115 }
116 
dma_buf_file_release(struct inode * inode,struct file * file)117 static int dma_buf_file_release(struct inode *inode, struct file *file)
118 {
119     struct dma_buf *dmabuf;
120 
121     if (!is_dma_buf_file(file)) {
122         return -EINVAL;
123     }
124 
125     dmabuf = file->private_data;
126 
127     mutex_lock(&db_list.lock);
128     list_del(&dmabuf->list_node);
129     mutex_unlock(&db_list.lock);
130 
131     return 0;
132 }
133 
134 static const struct dentry_operations dma_buf_dentry_ops = {
135     .d_dname = dmabuffs_dname,
136     .d_release = dma_buf_release,
137 };
138 
139 static struct vfsmount *dma_buf_mnt;
140 
dma_buf_fs_init_context(struct fs_context * fc)141 static int dma_buf_fs_init_context(struct fs_context *fc)
142 {
143     struct pseudo_fs_context *ctx;
144 
145     ctx = init_pseudo(fc, DMA_BUF_MAGIC);
146     if (!ctx) {
147         return -ENOMEM;
148     }
149     ctx->dops = &dma_buf_dentry_ops;
150     return 0;
151 }
152 
153 static struct file_system_type dma_buf_fs_type = {
154     .name = "dmabuf",
155     .init_fs_context = dma_buf_fs_init_context,
156     .kill_sb = kill_anon_super,
157 };
158 
159 #ifdef CONFIG_DMABUF_SYSFS_STATS
dma_buf_vma_open(struct vm_area_struct * vma)160 static void dma_buf_vma_open(struct vm_area_struct *vma)
161 {
162     struct dma_buf *dmabuf = vma->vm_file->private_data;
163 
164     dmabuf->mmap_count++;
165     /* call the heap provided vma open() op */
166     if (dmabuf->exp_vm_ops->open) {
167         dmabuf->exp_vm_ops->open(vma);
168     }
169 }
170 
dma_buf_vma_close(struct vm_area_struct * vma)171 static void dma_buf_vma_close(struct vm_area_struct *vma)
172 {
173     struct dma_buf *dmabuf = vma->vm_file->private_data;
174 
175     if (dmabuf->mmap_count) {
176         dmabuf->mmap_count--;
177     }
178     /* call the heap provided vma close() op */
179     if (dmabuf->exp_vm_ops->close) {
180         dmabuf->exp_vm_ops->close(vma);
181     }
182 }
183 
dma_buf_do_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)184 static int dma_buf_do_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
185 {
186     /* call this first because the exporter might override vma->vm_ops */
187     int ret = dmabuf->ops->mmap(dmabuf, vma);
188     if (ret) {
189         return ret;
190     }
191 
192     /* save the exporter provided vm_ops */
193     dmabuf->exp_vm_ops = vma->vm_ops;
194     dmabuf->vm_ops = *(dmabuf->exp_vm_ops);
195     /* override open() and close() to provide buffer mmap count */
196     dmabuf->vm_ops.open = dma_buf_vma_open;
197     dmabuf->vm_ops.close = dma_buf_vma_close;
198     vma->vm_ops = &dmabuf->vm_ops;
199     dmabuf->mmap_count++;
200 
201     return ret;
202 }
203 #else  /* CONFIG_DMABUF_SYSFS_STATS */
dma_buf_do_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)204 static int dma_buf_do_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
205 {
206     return dmabuf->ops->mmap(dmabuf, vma);
207 }
208 #endif /* CONFIG_DMABUF_SYSFS_STATS */
209 
dma_buf_mmap_internal(struct file * file,struct vm_area_struct * vma)210 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
211 {
212     struct dma_buf *dmabuf;
213 
214     if (!is_dma_buf_file(file)) {
215         return -EINVAL;
216     }
217 
218     dmabuf = file->private_data;
219 
220     /* check if buffer supports mmap */
221     if (!dmabuf->ops->mmap) {
222         return -EINVAL;
223     }
224 
225     /* check for overflowing the buffer's size */
226     if ((vma->vm_pgoff + vma_pages(vma)) > (dmabuf->size >> PAGE_SHIFT)) {
227         return -EINVAL;
228     }
229 
230     return dma_buf_do_mmap(dmabuf, vma);
231 }
232 
dma_buf_llseek(struct file * file,loff_t offset,int whence)233 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
234 {
235     struct dma_buf *dmabuf;
236     loff_t base;
237 
238     if (!is_dma_buf_file(file)) {
239         return -EBADF;
240     }
241 
242     dmabuf = file->private_data;
243 
244     /* only support discovering the end of the buffer,
245        but also allow SEEK_SET to maintain the idiomatic
246        SEEK_END(0), SEEK_CUR(0) pattern */
247     if (whence == SEEK_END) {
248         base = dmabuf->size;
249     } else if (whence == SEEK_SET) {
250         base = 0;
251     } else {
252         return -EINVAL;
253     }
254 
255     if (offset != 0) {
256         return -EINVAL;
257     }
258 
259     return base + offset;
260 }
261 
262 /**
263  * DOC: implicit fence polling
264  *
265  * To support cross-device and cross-driver synchronization of buffer access
266  * implicit fences (represented internally in the kernel with &struct dma_fence)
267  * can be attached to a &dma_buf. The glue for that and a few related things are
268  * provided in the &dma_resv structure.
269  *
270  * Userspace can query the state of these implicitly tracked fences using poll()
271  * and related system calls
272  *
273  * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
274  *   most recent write or exclusive fence.
275  *
276  * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
277  *   all attached fences, shared and exclusive ones.
278  *
279  * Note that this only signals the completion of the respective fences, i.e. the
280  * DMA transfers are complete. Cache flushing and any other necessary
281  * preparations before CPU access can begin still need to happen.
282  */
283 
dma_buf_poll_cb(struct dma_fence * fence,struct dma_fence_cb * cb)284 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
285 {
286     struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
287     unsigned long flags;
288 
289     spin_lock_irqsave(&dcb->poll->lock, flags);
290     wake_up_locked_poll(dcb->poll, dcb->active);
291     dcb->active = 0;
292     spin_unlock_irqrestore(&dcb->poll->lock, flags);
293 }
294 
dma_buf_poll(struct file * file,poll_table * poll)295 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
296 {
297     struct dma_buf *dmabuf;
298     struct dma_resv *resv;
299     struct dma_resv_list *fobj;
300     struct dma_fence *fence_excl;
301     __poll_t events;
302     unsigned shared_count, seq;
303 
304     dmabuf = file->private_data;
305     if (!dmabuf || !dmabuf->resv) {
306         return EPOLLERR;
307     }
308 
309     resv = dmabuf->resv;
310 
311     poll_wait(file, &dmabuf->poll, poll);
312 
313     events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
314     if (!events) {
315         return 0;
316     }
317 
318     while (1) {
319         seq = read_seqcount_begin(&resv->seq);
320         rcu_read_lock();
321 
322         fobj = rcu_dereference(resv->fence);
323         if (fobj) {
324             shared_count = fobj->shared_count;
325         } else {
326             shared_count = 0;
327         }
328         fence_excl = rcu_dereference(resv->fence_excl);
329         if (read_seqcount_retry(&resv->seq, seq)) {
330             rcu_read_unlock();
331             continue;
332         }
333         break;
334     }
335     if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
336         struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
337         __poll_t pevents = EPOLLIN;
338 
339         if (shared_count == 0) {
340             pevents |= EPOLLOUT;
341         }
342 
343         spin_lock_irq(&dmabuf->poll.lock);
344         if (dcb->active) {
345             dcb->active |= pevents;
346             events &= ~pevents;
347         } else {
348             dcb->active = pevents;
349         }
350         spin_unlock_irq(&dmabuf->poll.lock);
351 
352         if (events & pevents) {
353             if (!dma_fence_get_rcu(fence_excl)) {
354                 /* force a recheck */
355                 events &= ~pevents;
356                 dma_buf_poll_cb(NULL, &dcb->cb);
357             } else if (!dma_fence_add_callback(fence_excl, &dcb->cb, dma_buf_poll_cb)) {
358                 events &= ~pevents;
359                 dma_fence_put(fence_excl);
360             } else {
361                 /*
362                  * No callback queued, wake up any additional
363                  * waiters.
364                  */
365                 dma_fence_put(fence_excl);
366                 dma_buf_poll_cb(NULL, &dcb->cb);
367             }
368         }
369     }
370 
371     if ((events & EPOLLOUT) && shared_count > 0) {
372         struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
373         int i;
374 
375         /* Only queue a new callback if no event has fired yet */
376         spin_lock_irq(&dmabuf->poll.lock);
377         if (dcb->active) {
378             events &= ~EPOLLOUT;
379         } else {
380             dcb->active = EPOLLOUT;
381         }
382         spin_unlock_irq(&dmabuf->poll.lock);
383 
384         if (!(events & EPOLLOUT)) {
385             goto out;
386         }
387 
388         for (i = 0; i < shared_count; ++i) {
389             struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
390 
391             if (!dma_fence_get_rcu(fence)) {
392                 /*
393                  * fence refcount dropped to zero, this means
394                  * that fobj has been freed
395                  *
396                  * call dma_buf_poll_cb and force a recheck!
397                  */
398                 events &= ~EPOLLOUT;
399                 dma_buf_poll_cb(NULL, &dcb->cb);
400                 break;
401             }
402             if (!dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb)) {
403                 dma_fence_put(fence);
404                 events &= ~EPOLLOUT;
405                 break;
406             }
407             dma_fence_put(fence);
408         }
409 
410         /* No callback queued, wake up any additional waiters. */
411         if (i == shared_count) {
412             dma_buf_poll_cb(NULL, &dcb->cb);
413         }
414     }
415 
416 out:
417     rcu_read_unlock();
418     return events;
419 }
420 
421 /**
422  * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
423  * The name of the dma-buf buffer can only be set when the dma-buf is not
424  * attached to any devices. It could theoritically support changing the
425  * name of the dma-buf if the same piece of memory is used for multiple
426  * purpose between different devices.
427  *
428  * @dmabuf: [in]     dmabuf buffer that will be renamed.
429  * @buf:    [in]     A piece of userspace memory that contains the name of
430  *                   the dma-buf.
431  *
432  * Returns 0 on success. If the dma-buf buffer is already attached to
433  * devices, return -EBUSY.
434  *
435  */
dma_buf_set_name(struct dma_buf * dmabuf,const char __user * buf)436 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
437 {
438     char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
439     long ret = 0;
440 
441     if (IS_ERR(name)) {
442         return PTR_ERR(name);
443     }
444 
445     dma_resv_lock(dmabuf->resv, NULL);
446     if (!list_empty(&dmabuf->attachments)) {
447         ret = -EBUSY;
448         kfree(name);
449         goto out_unlock;
450     }
451     spin_lock(&dmabuf->name_lock);
452     kfree(dmabuf->name);
453     dmabuf->name = name;
454     spin_unlock(&dmabuf->name_lock);
455 
456 out_unlock:
457     dma_resv_unlock(dmabuf->resv);
458     return ret;
459 }
460 
dma_buf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)461 static long dma_buf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
462 {
463     struct dma_buf *dmabuf;
464     struct dma_buf_sync sync;
465     enum dma_data_direction direction;
466     int ret;
467 
468     dmabuf = file->private_data;
469 
470     switch (cmd) {
471         case DMA_BUF_IOCTL_SYNC:
472             if (copy_from_user(&sync, (void __user *)arg, sizeof(sync))) {
473                 return -EFAULT;
474             }
475 
476             if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK) {
477                 return -EINVAL;
478             }
479 
480             switch (sync.flags & DMA_BUF_SYNC_RW) {
481                 case DMA_BUF_SYNC_READ:
482                     direction = DMA_FROM_DEVICE;
483                     break;
484                 case DMA_BUF_SYNC_WRITE:
485                     direction = DMA_TO_DEVICE;
486                     break;
487                 case DMA_BUF_SYNC_RW:
488                     direction = DMA_BIDIRECTIONAL;
489                     break;
490                 default:
491                     return -EINVAL;
492             }
493 
494             if (sync.flags & DMA_BUF_SYNC_END) {
495                 ret = dma_buf_end_cpu_access(dmabuf, direction);
496             } else {
497                 ret = dma_buf_begin_cpu_access(dmabuf, direction);
498             }
499 
500             return ret;
501 
502         case DMA_BUF_SET_NAME_A:
503         case DMA_BUF_SET_NAME_B:
504             return dma_buf_set_name(dmabuf, (const char __user *)arg);
505 
506         default:
507             return -ENOTTY;
508     }
509 }
510 
dma_buf_show_fdinfo(struct seq_file * m,struct file * file)511 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
512 {
513     struct dma_buf *dmabuf = file->private_data;
514 
515     seq_printf(m, "size:\t%zu\n", dmabuf->size);
516     /* Don't count the temporary reference taken inside procfs seq_show */
517     seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
518     seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
519     spin_lock(&dmabuf->name_lock);
520     if (dmabuf->name) {
521         seq_printf(m, "name:\t%s\n", dmabuf->name);
522     }
523     spin_unlock(&dmabuf->name_lock);
524 }
525 
526 static const struct file_operations dma_buf_fops = {
527     .release = dma_buf_file_release,
528     .mmap = dma_buf_mmap_internal,
529     .llseek = dma_buf_llseek,
530     .poll = dma_buf_poll,
531     .unlocked_ioctl = dma_buf_ioctl,
532     .compat_ioctl = compat_ptr_ioctl,
533     .show_fdinfo = dma_buf_show_fdinfo,
534 };
535 
536 /*
537  * is_dma_buf_file - Check if struct file* is associated with dma_buf
538  */
is_dma_buf_file(struct file * file)539 int is_dma_buf_file(struct file *file)
540 {
541     return file->f_op == &dma_buf_fops;
542 }
543 EXPORT_SYMBOL_GPL(is_dma_buf_file);
544 
dma_buf_getfile(struct dma_buf * dmabuf,int flags)545 static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
546 {
547     struct file *file;
548     struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
549 
550     if (IS_ERR(inode)) {
551         return ERR_CAST(inode);
552     }
553 
554     inode->i_size = dmabuf->size;
555     inode_set_bytes(inode, dmabuf->size);
556 
557     file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf", flags, &dma_buf_fops);
558     if (IS_ERR(file)) {
559         goto err_alloc_file;
560     }
561     file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
562     file->private_data = dmabuf;
563     file->f_path.dentry->d_fsdata = dmabuf;
564 
565     return file;
566 
567 err_alloc_file:
568     iput(inode);
569     return file;
570 }
571 
572 /**
573  * DOC: dma buf device access
574  *
575  * For device DMA access to a shared DMA buffer the usual sequence of operations
576  * is fairly simple
577  *
578  * 1. The exporter defines his exporter instance using
579  *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
580  *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
581  *    as a file descriptor by calling dma_buf_fd().
582  *
583  * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
584  *    to share with: First the filedescriptor is converted to a &dma_buf using
585  *    dma_buf_get(). Then the buffer is attached to the device using
586  *    dma_buf_attach().
587  *
588  *    Up to this stage the exporter is still free to migrate or reallocate the
589  *    backing storage.
590  *
591  * 3. Once the buffer is attached to all devices userspace can initiate DMA
592  *    access to the shared buffer. In the kernel this is done by calling
593  *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
594  *
595  * 4. Once a driver is done with a shared buffer it needs to call
596  *    dma_buf_detach() (after cleaning up any mappings) and then release the
597  *    reference acquired with dma_buf_get by calling dma_buf_put().
598  *
599  * For the detailed semantics exporters are expected to implement see
600  * &dma_buf_ops.
601  */
602 
603 /**
604  * dma_buf_export - Creates a new dma_buf, and associates an anon file
605  * with this buffer, so it can be exported.
606  * Also connect the allocator specific data and ops to the buffer.
607  * Additionally, provide a name string for exporter; useful in debugging.
608  *
609  * @exp_info:    [in]    holds all the export related information provided
610  *            by the exporter. see &struct dma_buf_export_info
611  *            for further details.
612  *
613  * Returns, on success, a newly created dma_buf object, which wraps the
614  * supplied private data and operations for dma_buf_ops. On either missing
615  * ops, or error in allocating struct dma_buf, will return negative error.
616  *
617  * For most cases the easiest way to create @exp_info is through the
618  * %DEFINE_DMA_BUF_EXPORT_INFO macro.
619  */
dma_buf_export(const struct dma_buf_export_info * exp_info)620 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
621 {
622     struct dma_buf *dmabuf;
623     struct dma_resv *resv = exp_info->resv;
624     struct file *file;
625     size_t alloc_size = sizeof(struct dma_buf);
626     int ret;
627 
628     if (!exp_info->resv) {
629         alloc_size += sizeof(struct dma_resv);
630     } else {
631         /* prevent &dma_buf[1] == dma_buf->resv */
632         alloc_size += 1;
633     }
634 
635     if (WARN_ON(!exp_info->priv || !exp_info->ops || !exp_info->ops->map_dma_buf || !exp_info->ops->unmap_dma_buf ||
636                 !exp_info->ops->release)) {
637         return ERR_PTR(-EINVAL);
638     }
639 
640     if (WARN_ON(exp_info->ops->cache_sgt_mapping && (exp_info->ops->pin || exp_info->ops->unpin))) {
641         return ERR_PTR(-EINVAL);
642     }
643 
644     if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin)) {
645         return ERR_PTR(-EINVAL);
646     }
647 
648     if (!try_module_get(exp_info->owner)) {
649         return ERR_PTR(-ENOENT);
650     }
651 
652     dmabuf = kzalloc(alloc_size, GFP_KERNEL);
653     if (!dmabuf) {
654         ret = -ENOMEM;
655         goto err_module;
656     }
657 
658     dmabuf->priv = exp_info->priv;
659     dmabuf->ops = exp_info->ops;
660     dmabuf->size = exp_info->size;
661     dmabuf->exp_name = exp_info->exp_name;
662     dmabuf->owner = exp_info->owner;
663     spin_lock_init(&dmabuf->name_lock);
664     init_waitqueue_head(&dmabuf->poll);
665     dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
666     dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
667 
668     if (!resv) {
669         resv = (struct dma_resv *)&dmabuf[1];
670         dma_resv_init(resv);
671     }
672     dmabuf->resv = resv;
673 
674     file = dma_buf_getfile(dmabuf, exp_info->flags);
675     if (IS_ERR(file)) {
676         ret = PTR_ERR(file);
677         goto err_dmabuf;
678     }
679 
680     file->f_mode |= FMODE_LSEEK;
681     dmabuf->file = file;
682 
683     ret = dma_buf_stats_setup(dmabuf);
684     if (ret) {
685         goto err_sysfs;
686     }
687 
688     mutex_init(&dmabuf->lock);
689     INIT_LIST_HEAD(&dmabuf->attachments);
690 
691     mutex_lock(&db_list.lock);
692     list_add(&dmabuf->list_node, &db_list.head);
693     mutex_unlock(&db_list.lock);
694 
695     init_dma_buf_task_info(dmabuf);
696     return dmabuf;
697 
698 err_sysfs:
699     /*
700      * Set file->f_path.dentry->d_fsdata to NULL so that when
701      * dma_buf_release() gets invoked by dentry_ops, it exits
702      * early before calling the release() dma_buf op.
703      */
704     file->f_path.dentry->d_fsdata = NULL;
705     fput(file);
706 err_dmabuf:
707     kfree(dmabuf);
708 err_module:
709     module_put(exp_info->owner);
710     return ERR_PTR(ret);
711 }
712 EXPORT_SYMBOL_GPL(dma_buf_export);
713 
714 /**
715  * dma_buf_fd - returns a file descriptor for the given dma_buf
716  * @dmabuf:    [in]    pointer to dma_buf for which fd is required.
717  * @flags:      [in]    flags to give to fd
718  *
719  * On success, returns an associated 'fd'. Else, returns error.
720  */
dma_buf_fd(struct dma_buf * dmabuf,int flags)721 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
722 {
723     int fd;
724 
725     if (!dmabuf || !dmabuf->file) {
726         return -EINVAL;
727     }
728 
729     fd = get_unused_fd_flags(flags);
730     if (fd < 0) {
731         return fd;
732     }
733 
734     fd_install(fd, dmabuf->file);
735 
736     return fd;
737 }
738 EXPORT_SYMBOL_GPL(dma_buf_fd);
739 
740 /**
741  * dma_buf_get - returns the dma_buf structure related to an fd
742  * @fd:    [in]    fd associated with the dma_buf to be returned
743  *
744  * On success, returns the dma_buf structure associated with an fd; uses
745  * file's refcounting done by fget to increase refcount. returns ERR_PTR
746  * otherwise.
747  */
dma_buf_get(int fd)748 struct dma_buf *dma_buf_get(int fd)
749 {
750     struct file *file;
751 
752     file = fget(fd);
753     if (!file) {
754         return ERR_PTR(-EBADF);
755     }
756 
757     if (!is_dma_buf_file(file)) {
758         fput(file);
759         return ERR_PTR(-EINVAL);
760     }
761 
762     return file->private_data;
763 }
764 EXPORT_SYMBOL_GPL(dma_buf_get);
765 
766 /**
767  * dma_buf_put - decreases refcount of the buffer
768  * @dmabuf:    [in]    buffer to reduce refcount of
769  *
770  * Uses file's refcounting done implicitly by fput().
771  *
772  * If, as a result of this call, the refcount becomes 0, the 'release' file
773  * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
774  * in turn, and frees the memory allocated for dmabuf when exported.
775  */
dma_buf_put(struct dma_buf * dmabuf)776 void dma_buf_put(struct dma_buf *dmabuf)
777 {
778     if (WARN_ON(!dmabuf || !dmabuf->file)) {
779         return;
780     }
781 
782     fput(dmabuf->file);
783 }
784 EXPORT_SYMBOL_GPL(dma_buf_put);
785 
786 /**
787  * dma_buf_pin - Lock down the DMA-buf
788  *
789  * @attach:    [in]    attachment which should be pinned
790  *
791  * Returns:
792  * 0 on success, negative error code on failure.
793  */
dma_buf_pin(struct dma_buf_attachment * attach)794 int dma_buf_pin(struct dma_buf_attachment *attach)
795 {
796     struct dma_buf *dmabuf = attach->dmabuf;
797     int ret = 0;
798 
799     dma_resv_assert_held(dmabuf->resv);
800 
801     if (dmabuf->ops->pin) {
802         ret = dmabuf->ops->pin(attach);
803     }
804 
805     return ret;
806 }
807 EXPORT_SYMBOL_GPL(dma_buf_pin);
808 
809 /**
810  * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally,
811  * calls attach() of dma_buf_ops to allow device-specific attach functionality
812  * @dmabuf:        [in]    buffer to attach device to.
813  * @dev:        [in]    device to be attached.
814  * @importer_ops:    [in]    importer operations for the attachment
815  * @importer_priv:    [in]    importer private pointer for the attachment
816  *
817  * Returns struct dma_buf_attachment pointer for this attachment. Attachments
818  * must be cleaned up by calling dma_buf_detach().
819  *
820  * Returns
821  *
822  * A pointer to newly created &dma_buf_attachment on success, or a negative
823  * error code wrapped into a pointer on failure.
824  *
825  * Note that this can fail if the backing storage of @dmabuf is in a place not
826  * accessible to @dev, and cannot be moved to a more suitable place. This is
827  * indicated with the error code -EBUSY.
828  */
dma_buf_dynamic_attach(struct dma_buf * dmabuf,struct device * dev,const struct dma_buf_attach_ops * importer_ops,void * importer_priv)829 struct dma_buf_attachment *dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
830                                                   const struct dma_buf_attach_ops *importer_ops, void *importer_priv)
831 {
832     struct dma_buf_attachment *attach;
833     int ret;
834 
835     if (WARN_ON(!dmabuf || !dev)) {
836         return ERR_PTR(-EINVAL);
837     }
838 
839     if (WARN_ON(importer_ops && !importer_ops->move_notify)) {
840         return ERR_PTR(-EINVAL);
841     }
842 
843     attach = kzalloc(sizeof(*attach), GFP_KERNEL);
844     if (!attach) {
845         return ERR_PTR(-ENOMEM);
846     }
847 
848     attach->dev = dev;
849     attach->dmabuf = dmabuf;
850     if (importer_ops) {
851         attach->peer2peer = importer_ops->allow_peer2peer;
852     }
853     attach->importer_ops = importer_ops;
854     attach->importer_priv = importer_priv;
855 
856     if (dmabuf->ops->attach) {
857         ret = dmabuf->ops->attach(dmabuf, attach);
858         if (ret) {
859             goto err_attach;
860         }
861     }
862     dma_resv_lock(dmabuf->resv, NULL);
863     list_add(&attach->node, &dmabuf->attachments);
864     dma_resv_unlock(dmabuf->resv);
865 
866     /* When either the importer or the exporter can't handle dynamic
867      * mappings we cache the mapping here to avoid issues with the
868      * reservation object lock.
869      */
870     if (dma_buf_attachment_is_dynamic(attach) != dma_buf_is_dynamic(dmabuf)) {
871         struct sg_table *sgt;
872 
873         if (dma_buf_is_dynamic(attach->dmabuf)) {
874             dma_resv_lock(attach->dmabuf->resv, NULL);
875             ret = dma_buf_pin(attach);
876             if (ret) {
877                 goto err_unlock;
878             }
879         }
880 
881         sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
882         if (!sgt) {
883             sgt = ERR_PTR(-ENOMEM);
884         }
885         if (IS_ERR(sgt)) {
886             ret = PTR_ERR(sgt);
887             goto err_unpin;
888         }
889         if (dma_buf_is_dynamic(attach->dmabuf)) {
890             dma_resv_unlock(attach->dmabuf->resv);
891         }
892         attach->sgt = sgt;
893         attach->dir = DMA_BIDIRECTIONAL;
894     }
895 
896     return attach;
897 
898 err_attach:
899     kfree(attach);
900     return ERR_PTR(ret);
901 
902 err_unpin:
903     if (dma_buf_is_dynamic(attach->dmabuf)) {
904         dma_buf_unpin(attach);
905     }
906 
907 err_unlock:
908     if (dma_buf_is_dynamic(attach->dmabuf)) {
909         dma_resv_unlock(attach->dmabuf->resv);
910     }
911 
912     dma_buf_detach(dmabuf, attach);
913     return ERR_PTR(ret);
914 }
915 EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
916 
917 /**
918  * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
919  * @dmabuf:    [in]    buffer to attach device to.
920  * @dev:    [in]    device to be attached.
921  *
922  * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
923  * mapping.
924  */
dma_buf_attach(struct dma_buf * dmabuf,struct device * dev)925 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, struct device *dev)
926 {
927     return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
928 }
929 EXPORT_SYMBOL_GPL(dma_buf_attach);
930 
931 /**
932  * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
933  * optionally calls detach() of dma_buf_ops for device-specific detach
934  * @dmabuf:    [in]    buffer to detach from.
935  * @attach:    [in]    attachment to be detached; is free'd after this call.
936  *
937  * Clean up a device attachment obtained by calling dma_buf_attach().
938  */
dma_buf_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)939 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
940 {
941     if (WARN_ON(!dmabuf || !attach)) {
942         return;
943     }
944 
945     if (attach->sgt) {
946         if (dma_buf_is_dynamic(attach->dmabuf)) {
947             dma_resv_lock(attach->dmabuf->resv, NULL);
948         }
949 
950         dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
951 
952         if (dma_buf_is_dynamic(attach->dmabuf)) {
953             dma_buf_unpin(attach);
954             dma_resv_unlock(attach->dmabuf->resv);
955         }
956     }
957 
958     dma_resv_lock(dmabuf->resv, NULL);
959     list_del(&attach->node);
960     dma_resv_unlock(dmabuf->resv);
961     if (dmabuf->ops->detach) {
962         dmabuf->ops->detach(dmabuf, attach);
963     }
964 
965     kfree(attach);
966 }
967 EXPORT_SYMBOL_GPL(dma_buf_detach);
968 
969 /**
970  * dma_buf_unpin - Remove lock from DMA-buf
971  *
972  * @attach:    [in]    attachment which should be unpinned
973  */
dma_buf_unpin(struct dma_buf_attachment * attach)974 void dma_buf_unpin(struct dma_buf_attachment *attach)
975 {
976     struct dma_buf *dmabuf = attach->dmabuf;
977 
978     dma_resv_assert_held(dmabuf->resv);
979 
980     if (dmabuf->ops->unpin) {
981         dmabuf->ops->unpin(attach);
982     }
983 }
984 EXPORT_SYMBOL_GPL(dma_buf_unpin);
985 
986 /**
987  * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
988  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
989  * dma_buf_ops.
990  * @attach:    [in]    attachment whose scatterlist is to be returned
991  * @direction:    [in]    direction of DMA transfer
992  *
993  * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
994  * on error. May return -EINTR if it is interrupted by a signal.
995  *
996  * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
997  * the underlying backing storage is pinned for as long as a mapping exists,
998  * therefore users/importers should not hold onto a mapping for undue amounts of
999  * time.
1000  */
dma_buf_map_attachment(struct dma_buf_attachment * attach,enum dma_data_direction direction)1001 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, enum dma_data_direction direction)
1002 {
1003     struct sg_table *sg_table;
1004     int r;
1005 
1006     might_sleep();
1007 
1008     if (WARN_ON(!attach || !attach->dmabuf)) {
1009         return ERR_PTR(-EINVAL);
1010     }
1011 
1012     if (dma_buf_attachment_is_dynamic(attach)) {
1013         dma_resv_assert_held(attach->dmabuf->resv);
1014     }
1015 
1016     if (attach->sgt) {
1017         /*
1018          * Two mappings with different directions for the same
1019          * attachment are not allowed.
1020          */
1021         if (attach->dir != direction && attach->dir != DMA_BIDIRECTIONAL) {
1022             return ERR_PTR(-EBUSY);
1023         }
1024 
1025         return attach->sgt;
1026     }
1027 
1028     if (dma_buf_is_dynamic(attach->dmabuf)) {
1029         dma_resv_assert_held(attach->dmabuf->resv);
1030         if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1031             r = dma_buf_pin(attach);
1032             if (r) {
1033                 return ERR_PTR(r);
1034             }
1035         }
1036     }
1037 
1038     sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
1039     if (!sg_table) {
1040         sg_table = ERR_PTR(-ENOMEM);
1041     }
1042 
1043     if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) && !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1044         dma_buf_unpin(attach);
1045     }
1046 
1047     if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
1048         attach->sgt = sg_table;
1049         attach->dir = direction;
1050     }
1051 
1052     return sg_table;
1053 }
1054 EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
1055 
1056 /**
1057  * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
1058  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1059  * dma_buf_ops.
1060  * @attach:    [in]    attachment to unmap buffer from
1061  * @sg_table:    [in]    scatterlist info of the buffer to unmap
1062  * @direction:  [in]    direction of DMA transfer
1063  *
1064  * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
1065  */
dma_buf_unmap_attachment(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)1066 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, struct sg_table *sg_table,
1067                               enum dma_data_direction direction)
1068 {
1069     might_sleep();
1070 
1071     if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) {
1072         return;
1073     }
1074 
1075     if (dma_buf_attachment_is_dynamic(attach)) {
1076         dma_resv_assert_held(attach->dmabuf->resv);
1077     }
1078 
1079     if (attach->sgt == sg_table) {
1080         return;
1081     }
1082 
1083     if (dma_buf_is_dynamic(attach->dmabuf)) {
1084         dma_resv_assert_held(attach->dmabuf->resv);
1085     }
1086 
1087     attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
1088 
1089     if (dma_buf_is_dynamic(attach->dmabuf) && !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1090         dma_buf_unpin(attach);
1091     }
1092 }
1093 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
1094 
1095 /**
1096  * dma_buf_move_notify - notify attachments that DMA-buf is moving
1097  *
1098  * @dmabuf:    [in]    buffer which is moving
1099  *
1100  * Informs all attachmenst that they need to destroy and recreated all their
1101  * mappings.
1102  */
dma_buf_move_notify(struct dma_buf * dmabuf)1103 void dma_buf_move_notify(struct dma_buf *dmabuf)
1104 {
1105     struct dma_buf_attachment *attach;
1106 
1107     dma_resv_assert_held(dmabuf->resv);
1108 
1109     list_for_each_entry(attach, &dmabuf->attachments, node) if (attach->importer_ops)
1110         attach->importer_ops->move_notify(attach);
1111 }
1112 EXPORT_SYMBOL_GPL(dma_buf_move_notify);
1113 
1114 /**
1115  * DOC: cpu access
1116  *
1117  * There are mutliple reasons for supporting CPU access to a dma buffer object:
1118  *
1119  * - Fallback operations in the kernel, for example when a device is connected
1120  *   over USB and the kernel needs to shuffle the data around first before
1121  *   sending it away. Cache coherency is handled by braketing any transactions
1122  *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1123  *   access.
1124  *
1125  *   Since for most kernel internal dma-buf accesses need the entire buffer, a
1126  *   vmap interface is introduced. Note that on very old 32-bit architectures
1127  *   vmalloc space might be limited and result in vmap calls failing.
1128  *
1129  *   Interfaces::
1130  *      void \*dma_buf_vmap(struct dma_buf \*dmabuf)
1131  *      void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
1132  *
1133  *   The vmap call can fail if there is no vmap support in the exporter, or if
1134  *   it runs out of vmalloc space. Fallback to kmap should be implemented. Note
1135  *   that the dma-buf layer keeps a reference count for all vmap access and
1136  *   calls down into the exporter's vmap function only when no vmapping exists,
1137  *   and only unmaps it once. Protection against concurrent vmap/vunmap calls is
1138  *   provided by taking the dma_buf->lock mutex.
1139  *
1140  * - For full compatibility on the importer side with existing userspace
1141  *   interfaces, which might already support mmap'ing buffers. This is needed in
1142  *   many processing pipelines (e.g. feeding a software rendered image into a
1143  *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1144  *   framework already supported this and for DMA buffer file descriptors to
1145  *   replace ION buffers mmap support was needed.
1146  *
1147  *   There is no special interfaces, userspace simply calls mmap on the dma-buf
1148  *   fd. But like for CPU access there's a need to braket the actual access,
1149  *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1150  *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1151  *   be restarted.
1152  *
1153  *   Some systems might need some sort of cache coherency management e.g. when
1154  *   CPU and GPU domains are being accessed through dma-buf at the same time.
1155  *   To circumvent this problem there are begin/end coherency markers, that
1156  *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1157  *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1158  *   sequence would be used like following:
1159  *
1160  *     - mmap dma-buf fd
1161  *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1162  *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1163  *       want (with the new data being consumed by say the GPU or the scanout
1164  *       device)
1165  *     - munmap once you don't need the buffer any more
1166  *
1167  *    For correctness and optimal performance, it is always required to use
1168  *    SYNC_START and SYNC_END before and after, respectively, when accessing the
1169  *    mapped address. Userspace cannot rely on coherent access, even when there
1170  *    are systems where it just works without calling these ioctls.
1171  *
1172  * - And as a CPU fallback in userspace processing pipelines.
1173  *
1174  *   Similar to the motivation for kernel cpu access it is again important that
1175  *   the userspace code of a given importing subsystem can use the same
1176  *   interfaces with a imported dma-buf buffer object as with a native buffer
1177  *   object. This is especially important for drm where the userspace part of
1178  *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
1179  *   use a different way to mmap a buffer rather invasive.
1180  *
1181  *   The assumption in the current dma-buf interfaces is that redirecting the
1182  *   initial mmap is all that's needed. A survey of some of the existing
1183  *   subsystems shows that no driver seems to do any nefarious thing like
1184  *   syncing up with outstanding asynchronous processing on the device or
1185  *   allocating special resources at fault time. So hopefully this is good
1186  *   enough, since adding interfaces to intercept pagefaults and allow pte
1187  *   shootdowns would increase the complexity quite a bit.
1188  *
1189  *   Interface::
1190  *      int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1191  *               unsigned long);
1192  *
1193  *   If the importing subsystem simply provides a special-purpose mmap call to
1194  *   set up a mapping in userspace, calling do_mmap with dma_buf->file will
1195  *   equally achieve that for a dma-buf object.
1196  */
1197 
_dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1198 static int _dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)
1199 {
1200     bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
1201     struct dma_resv *resv = dmabuf->resv;
1202     long ret;
1203 
1204     /* Wait on any implicit rendering fences */
1205     ret = dma_resv_wait_timeout_rcu(resv, write, true, MAX_SCHEDULE_TIMEOUT);
1206     if (ret < 0) {
1207         return ret;
1208     }
1209 
1210     return 0;
1211 }
1212 
1213 /**
1214  * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1215  * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1216  * preparations. Coherency is only guaranteed in the specified range for the
1217  * specified access direction.
1218  * @dmabuf:    [in]    buffer to prepare cpu access for.
1219  * @direction:    [in]    length of range for cpu access.
1220  *
1221  * After the cpu access is complete the caller should call
1222  * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
1223  * it guaranteed to be coherent with other DMA access.
1224  *
1225  * Can return negative error values, returns 0 on success.
1226  */
dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1227 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)
1228 {
1229     int ret = 0;
1230 
1231     if (WARN_ON(!dmabuf)) {
1232         return -EINVAL;
1233     }
1234 
1235     if (dmabuf->ops->begin_cpu_access) {
1236         ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1237     }
1238 
1239     /* Ensure that all fences are waited upon - but we first allow
1240      * the native handler the chance to do so more efficiently if it
1241      * chooses. A double invocation here will be reasonably cheap no-op.
1242      */
1243     if (ret == 0) {
1244         ret = _dma_buf_begin_cpu_access(dmabuf, direction);
1245     }
1246 
1247     return ret;
1248 }
1249 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
1250 
dma_buf_begin_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)1251 int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf, enum dma_data_direction direction, unsigned int offset,
1252                                      unsigned int len)
1253 {
1254     int ret = 0;
1255 
1256     if (WARN_ON(!dmabuf)) {
1257         return -EINVAL;
1258     }
1259 
1260     if (dmabuf->ops->begin_cpu_access_partial) {
1261         ret = dmabuf->ops->begin_cpu_access_partial(dmabuf, direction, offset, len);
1262     }
1263 
1264     /* Ensure that all fences are waited upon - but we first allow
1265      * the native handler the chance to do so more efficiently if it
1266      * chooses. A double invocation here will be reasonably cheap no-op.
1267      */
1268     if (ret == 0) {
1269         ret = _dma_buf_begin_cpu_access(dmabuf, direction);
1270     }
1271 
1272     return ret;
1273 }
1274 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access_partial);
1275 
1276 /**
1277  * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1278  * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1279  * actions. Coherency is only guaranteed in the specified range for the
1280  * specified access direction.
1281  * @dmabuf:    [in]    buffer to complete cpu access for.
1282  * @direction:    [in]    length of range for cpu access.
1283  *
1284  * This terminates CPU access started with dma_buf_begin_cpu_access().
1285  *
1286  * Can return negative error values, returns 0 on success.
1287  */
dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1288 int dma_buf_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)
1289 {
1290     int ret = 0;
1291 
1292     WARN_ON(!dmabuf);
1293 
1294     if (dmabuf->ops->end_cpu_access) {
1295         ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1296     }
1297 
1298     return ret;
1299 }
1300 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
1301 
dma_buf_end_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)1302 int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf, enum dma_data_direction direction, unsigned int offset,
1303                                    unsigned int len)
1304 {
1305     int ret = 0;
1306 
1307     WARN_ON(!dmabuf);
1308 
1309     if (dmabuf->ops->end_cpu_access_partial) {
1310         ret = dmabuf->ops->end_cpu_access_partial(dmabuf, direction, offset, len);
1311     }
1312 
1313     return ret;
1314 }
1315 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access_partial);
1316 
1317 /**
1318  * dma_buf_mmap - Setup up a userspace mmap with the given vma
1319  * @dmabuf:    [in]    buffer that should back the vma
1320  * @vma:    [in]    vma for the mmap
1321  * @pgoff:    [in]    offset in pages where this mmap should start within the
1322  *            dma-buf buffer.
1323  *
1324  * This function adjusts the passed in vma so that it points at the file of the
1325  * dma_buf operation. It also adjusts the starting pgoff and does bounds
1326  * checking on the size of the vma. Then it calls the exporters mmap function to
1327  * set up the mapping.
1328  *
1329  * Can return negative error values, returns 0 on success.
1330  */
dma_buf_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma,unsigned long pgoff)1331 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, unsigned long pgoff)
1332 {
1333     struct file *oldfile;
1334     int ret;
1335 
1336     if (WARN_ON(!dmabuf || !vma)) {
1337         return -EINVAL;
1338     }
1339 
1340     /* check if buffer supports mmap */
1341     if (!dmabuf->ops->mmap) {
1342         return -EINVAL;
1343     }
1344 
1345     /* check for offset overflow */
1346     if (pgoff + vma_pages(vma) < pgoff) {
1347         return -EOVERFLOW;
1348     }
1349 
1350     /* check for overflowing the buffer's size */
1351     if ((pgoff + vma_pages(vma)) > (dmabuf->size >> PAGE_SHIFT)) {
1352         return -EINVAL;
1353     }
1354 
1355     /* readjust the vma */
1356     get_file(dmabuf->file);
1357     oldfile = vma->vm_file;
1358     vma->vm_file = dmabuf->file;
1359     vma->vm_pgoff = pgoff;
1360 
1361     ret = dmabuf->ops->mmap(dmabuf, vma);
1362     if (ret) {
1363         /* restore old parameters on failure */
1364         vma->vm_file = oldfile;
1365         fput(dmabuf->file);
1366     } else {
1367         if (oldfile) {
1368             fput(oldfile);
1369         }
1370     }
1371     return ret;
1372 }
1373 EXPORT_SYMBOL_GPL(dma_buf_mmap);
1374 
1375 /**
1376  * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1377  * address space. Same restrictions as for vmap and friends apply.
1378  * @dmabuf:    [in]    buffer to vmap
1379  *
1380  * This call may fail due to lack of virtual mapping address space.
1381  * These calls are optional in drivers. The intended use for them
1382  * is for mapping objects linear in kernel space for high use objects.
1383  * Please attempt to use kmap/kunmap before thinking about these interfaces.
1384  *
1385  * Returns NULL on error.
1386  */
dma_buf_vmap(struct dma_buf * dmabuf)1387 void *dma_buf_vmap(struct dma_buf *dmabuf)
1388 {
1389     void *ptr;
1390 
1391     if (WARN_ON(!dmabuf)) {
1392         return NULL;
1393     }
1394 
1395     if (!dmabuf->ops->vmap) {
1396         return NULL;
1397     }
1398 
1399     mutex_lock(&dmabuf->lock);
1400     if (dmabuf->vmapping_counter) {
1401         dmabuf->vmapping_counter++;
1402         BUG_ON(!dmabuf->vmap_ptr);
1403         ptr = dmabuf->vmap_ptr;
1404         goto out_unlock;
1405     }
1406 
1407     BUG_ON(dmabuf->vmap_ptr);
1408 
1409     ptr = dmabuf->ops->vmap(dmabuf);
1410     if (WARN_ON_ONCE(IS_ERR(ptr))) {
1411         ptr = NULL;
1412     }
1413     if (!ptr) {
1414         goto out_unlock;
1415     }
1416 
1417     dmabuf->vmap_ptr = ptr;
1418     dmabuf->vmapping_counter = 1;
1419 
1420 out_unlock:
1421     mutex_unlock(&dmabuf->lock);
1422     return ptr;
1423 }
1424 EXPORT_SYMBOL_GPL(dma_buf_vmap);
1425 
1426 /**
1427  * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1428  * @dmabuf:    [in]    buffer to vunmap
1429  * @vaddr:    [in]    vmap to vunmap
1430  */
dma_buf_vunmap(struct dma_buf * dmabuf,void * vaddr)1431 void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
1432 {
1433     if (WARN_ON(!dmabuf)) {
1434         return;
1435     }
1436 
1437     BUG_ON(!dmabuf->vmap_ptr);
1438     BUG_ON(dmabuf->vmapping_counter == 0);
1439     BUG_ON(dmabuf->vmap_ptr != vaddr);
1440 
1441     mutex_lock(&dmabuf->lock);
1442     if (--dmabuf->vmapping_counter == 0) {
1443         if (dmabuf->ops->vunmap) {
1444             dmabuf->ops->vunmap(dmabuf, vaddr);
1445         }
1446         dmabuf->vmap_ptr = NULL;
1447     }
1448     mutex_unlock(&dmabuf->lock);
1449 }
1450 EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1451 
dma_buf_get_flags(struct dma_buf * dmabuf,unsigned long * flags)1452 int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags)
1453 {
1454     int ret = 0;
1455 
1456     if (WARN_ON(!dmabuf) || !flags) {
1457         return -EINVAL;
1458     }
1459 
1460     if (dmabuf->ops->get_flags) {
1461         ret = dmabuf->ops->get_flags(dmabuf, flags);
1462     }
1463 
1464     return ret;
1465 }
1466 EXPORT_SYMBOL_GPL(dma_buf_get_flags);
1467 
dma_buf_get_uuid(struct dma_buf * dmabuf,uuid_t * uuid)1468 int dma_buf_get_uuid(struct dma_buf *dmabuf, uuid_t *uuid)
1469 {
1470     if (WARN_ON(!dmabuf) || !uuid) {
1471         return -EINVAL;
1472     }
1473 
1474     if (!dmabuf->ops->get_uuid) {
1475         return -ENODEV;
1476     }
1477 
1478     return dmabuf->ops->get_uuid(dmabuf, uuid);
1479 }
1480 EXPORT_SYMBOL_GPL(dma_buf_get_uuid);
1481 
1482 #ifdef CONFIG_DEBUG_FS
dma_buf_debug_show(struct seq_file * s,void * unused)1483 static int dma_buf_debug_show(struct seq_file *s, void *unused)
1484 {
1485     int ret;
1486     struct dma_buf *buf_obj;
1487     struct dma_buf_attachment *attach_obj;
1488     struct dma_resv *robj;
1489     struct dma_resv_list *fobj;
1490     struct dma_fence *fence;
1491     unsigned seq;
1492     int count = 0, attach_count, shared_count, i;
1493     size_t size = 0;
1494 
1495     ret = mutex_lock_interruptible(&db_list.lock);
1496     if (ret) {
1497         return ret;
1498     }
1499 
1500     seq_puts(s, "\nDma-buf Objects:\n");
1501     seq_printf(s,
1502                "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\t"
1503                "%-16s\t%-16s\t%-16s\n",
1504                "size", "flags", "mode", "count", "ino", "buf_name", "exp_pid", "exp_task_comm");
1505 
1506     list_for_each_entry(buf_obj, &db_list.head, list_node)
1507     {
1508         ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1509         if (ret) {
1510             goto error_unlock;
1511         }
1512 
1513         seq_printf(s,
1514                    "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\t"
1515                    "%-16d\t%-16s\n",
1516                    buf_obj->size, buf_obj->file->f_flags, buf_obj->file->f_mode, file_count(buf_obj->file),
1517                    buf_obj->exp_name, file_inode(buf_obj->file)->i_ino, buf_obj->name ?: "NULL",
1518                    dma_buf_exp_pid(buf_obj), dma_buf_exp_task_comm(buf_obj) ?: "NULL");
1519 
1520         robj = buf_obj->resv;
1521         while (true) {
1522             seq = read_seqcount_begin(&robj->seq);
1523             rcu_read_lock();
1524             fobj = rcu_dereference(robj->fence);
1525             shared_count = fobj ? fobj->shared_count : 0;
1526             fence = rcu_dereference(robj->fence_excl);
1527             if (!read_seqcount_retry(&robj->seq, seq)) {
1528                 break;
1529             }
1530             rcu_read_unlock();
1531         }
1532 
1533         if (fence) {
1534             seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n", fence->ops->get_driver_name(fence),
1535                        fence->ops->get_timeline_name(fence), dma_fence_is_signaled(fence) ? "" : "un");
1536         }
1537         for (i = 0; i < shared_count; i++) {
1538             fence = rcu_dereference(fobj->shared[i]);
1539             if (!dma_fence_get_rcu(fence)) {
1540                 continue;
1541             }
1542             seq_printf(s, "\tShared fence: %s %s %ssignalled\n", fence->ops->get_driver_name(fence),
1543                        fence->ops->get_timeline_name(fence), dma_fence_is_signaled(fence) ? "" : "un");
1544             dma_fence_put(fence);
1545         }
1546         rcu_read_unlock();
1547 
1548         seq_puts(s, "\tAttached Devices:\n");
1549         attach_count = 0;
1550 
1551         list_for_each_entry(attach_obj, &buf_obj->attachments, node)
1552         {
1553             seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1554             attach_count++;
1555         }
1556         dma_resv_unlock(buf_obj->resv);
1557 
1558         seq_printf(s, "Total %d devices attached\n\n", attach_count);
1559 
1560         count++;
1561         size += buf_obj->size;
1562     }
1563 
1564     seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1565 
1566     mutex_unlock(&db_list.lock);
1567     return 0;
1568 
1569 error_unlock:
1570     mutex_unlock(&db_list.lock);
1571     return ret;
1572 }
1573 
1574 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1575 
1576 static struct dentry *dma_buf_debugfs_dir;
1577 
dma_buf_init_debugfs(void)1578 static int dma_buf_init_debugfs(void)
1579 {
1580     struct dentry *d;
1581     int err = 0;
1582 
1583     d = debugfs_create_dir("dma_buf", NULL);
1584     if (IS_ERR(d)) {
1585         return PTR_ERR(d);
1586     }
1587 
1588     dma_buf_debugfs_dir = d;
1589 
1590     d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir, NULL, &dma_buf_debug_fops);
1591     if (IS_ERR(d)) {
1592         pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1593         debugfs_remove_recursive(dma_buf_debugfs_dir);
1594         dma_buf_debugfs_dir = NULL;
1595         err = PTR_ERR(d);
1596     }
1597 
1598     dma_buf_process_info_init_debugfs(dma_buf_debugfs_dir);
1599     return err;
1600 }
1601 
dma_buf_uninit_debugfs(void)1602 static void dma_buf_uninit_debugfs(void)
1603 {
1604     debugfs_remove_recursive(dma_buf_debugfs_dir);
1605 }
1606 #else
dma_buf_init_debugfs(void)1607 static inline int dma_buf_init_debugfs(void)
1608 {
1609     return 0;
1610 }
dma_buf_uninit_debugfs(void)1611 static inline void dma_buf_uninit_debugfs(void)
1612 {
1613 }
1614 #endif
1615 
1616 #ifdef CONFIG_DMABUF_PROCESS_INFO
get_dma_buf_from_file(struct file * f)1617 struct dma_buf *get_dma_buf_from_file(struct file *f)
1618 {
1619     if (IS_ERR_OR_NULL(f)) {
1620         return NULL;
1621     }
1622 
1623     if (!is_dma_buf_file(f)) {
1624         return NULL;
1625     }
1626 
1627     return f->private_data;
1628 }
1629 #endif /* CONFIG_DMABUF_PROCESS_INFO */
1630 
dma_buf_init(void)1631 static int __init dma_buf_init(void)
1632 {
1633     int ret;
1634 
1635     ret = dma_buf_init_sysfs_statistics();
1636     if (ret) {
1637         return ret;
1638     }
1639 
1640     dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1641     if (IS_ERR(dma_buf_mnt)) {
1642         return PTR_ERR(dma_buf_mnt);
1643     }
1644 
1645     mutex_init(&db_list.lock);
1646     INIT_LIST_HEAD(&db_list.head);
1647     dma_buf_init_debugfs();
1648     dma_buf_process_info_init_procfs();
1649     return 0;
1650 }
1651 subsys_initcall(dma_buf_init);
1652 
dma_buf_deinit(void)1653 static void __exit dma_buf_deinit(void)
1654 {
1655     dma_buf_uninit_debugfs();
1656     kern_unmount(dma_buf_mnt);
1657     dma_buf_uninit_sysfs_statistics();
1658     dma_buf_process_info_uninit_procfs();
1659 }
1660 __exitcall(dma_buf_deinit);
1661