1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Framework for buffer objects that can be shared across devices/subsystems.
4 *
5 * Copyright(C) 2011 Linaro Limited. All rights reserved.
6 * Author: Sumit Semwal <sumit.semwal@ti.com>
7 *
8 * Many thanks to linaro-mm-sig list, and specially
9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11 * refining of this idea.
12 */
13
14 #include <linux/fs.h>
15 #include <linux/slab.h>
16 #include <linux/dma-buf.h>
17 #include <linux/dma-fence.h>
18 #include <linux/dma-fence-unwrap.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/export.h>
21 #include <linux/debugfs.h>
22 #include <linux/module.h>
23 #include <linux/seq_file.h>
24 #include <linux/sync_file.h>
25 #include <linux/poll.h>
26 #include <linux/dma-resv.h>
27 #include <linux/mm.h>
28 #include <linux/mount.h>
29 #include <linux/pseudo_fs.h>
30
31 #include <uapi/linux/dma-buf.h>
32 #include <uapi/linux/magic.h>
33
34 #include "dma-buf-sysfs-stats.h"
35
36 struct dma_buf_list {
37 struct list_head head;
38 struct mutex lock;
39 };
40
41 static struct dma_buf_list db_list;
42
43 /**
44 * dma_buf_get_each - Helps in traversing the db_list and calls the
45 * callback function which can extract required info out of each
46 * dmabuf.
47 * The db_list needs to be locked to prevent the db_list from being
48 * dynamically updated during the traversal process.
49 *
50 * @callback: [in] Handle for each dmabuf buffer in db_list.
51 * @private: [in] User-defined, used to pass in when callback is
52 * called.
53 *
54 * Returns 0 on success, otherwise returns a non-zero value for
55 * mutex_lock_interruptible or callback.
56 */
dma_buf_get_each(int (* callback)(const struct dma_buf * dmabuf,void * private),void * private)57 int dma_buf_get_each(int (*callback)(const struct dma_buf *dmabuf,
58 void *private), void *private)
59 {
60 struct dma_buf *buf;
61 int ret = mutex_lock_interruptible(&db_list.lock);
62
63 if (ret)
64 return ret;
65
66 list_for_each_entry(buf, &db_list.head, list_node) {
67 ret = callback(buf, private);
68 if (ret)
69 break;
70 }
71 mutex_unlock(&db_list.lock);
72 return ret;
73 }
74 EXPORT_SYMBOL_NS_GPL(dma_buf_get_each, MINIDUMP);
75
dmabuffs_dname(struct dentry * dentry,char * buffer,int buflen)76 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
77 {
78 struct dma_buf *dmabuf;
79 char name[DMA_BUF_NAME_LEN];
80 size_t ret = 0;
81
82 dmabuf = dentry->d_fsdata;
83 spin_lock(&dmabuf->name_lock);
84 if (dmabuf->name)
85 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
86 spin_unlock(&dmabuf->name_lock);
87
88 return dynamic_dname(buffer, buflen, "/%s:%s",
89 dentry->d_name.name, ret > 0 ? name : "");
90 }
91
dma_buf_release(struct dentry * dentry)92 static void dma_buf_release(struct dentry *dentry)
93 {
94 struct dma_buf *dmabuf;
95
96 dmabuf = dentry->d_fsdata;
97 if (unlikely(!dmabuf))
98 return;
99
100 BUG_ON(dmabuf->vmapping_counter);
101
102 /*
103 * If you hit this BUG() it could mean:
104 * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
105 * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
106 */
107 BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
108
109 dma_buf_stats_teardown(dmabuf);
110 dmabuf->ops->release(dmabuf);
111
112 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
113 dma_resv_fini(dmabuf->resv);
114
115 WARN_ON(!list_empty(&dmabuf->attachments));
116 module_put(dmabuf->owner);
117 kfree(dmabuf->name);
118 kfree(dmabuf);
119 }
120
dma_buf_file_release(struct inode * inode,struct file * file)121 static int dma_buf_file_release(struct inode *inode, struct file *file)
122 {
123 struct dma_buf *dmabuf;
124
125 if (!is_dma_buf_file(file))
126 return -EINVAL;
127
128 dmabuf = file->private_data;
129 if (dmabuf) {
130 mutex_lock(&db_list.lock);
131 list_del(&dmabuf->list_node);
132 mutex_unlock(&db_list.lock);
133 }
134
135 return 0;
136 }
137
138 static const struct dentry_operations dma_buf_dentry_ops = {
139 .d_dname = dmabuffs_dname,
140 .d_release = dma_buf_release,
141 };
142
143 static struct vfsmount *dma_buf_mnt;
144
dma_buf_fs_init_context(struct fs_context * fc)145 static int dma_buf_fs_init_context(struct fs_context *fc)
146 {
147 struct pseudo_fs_context *ctx;
148
149 ctx = init_pseudo(fc, DMA_BUF_MAGIC);
150 if (!ctx)
151 return -ENOMEM;
152 ctx->dops = &dma_buf_dentry_ops;
153 return 0;
154 }
155
156 static struct file_system_type dma_buf_fs_type = {
157 .name = "dmabuf",
158 .init_fs_context = dma_buf_fs_init_context,
159 .kill_sb = kill_anon_super,
160 };
161
dma_buf_mmap_internal(struct file * file,struct vm_area_struct * vma)162 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
163 {
164 struct dma_buf *dmabuf;
165
166 if (!is_dma_buf_file(file))
167 return -EINVAL;
168
169 dmabuf = file->private_data;
170
171 /* check if buffer supports mmap */
172 if (!dmabuf->ops->mmap)
173 return -EINVAL;
174
175 /* check for overflowing the buffer's size */
176 if (vma->vm_pgoff + vma_pages(vma) >
177 dmabuf->size >> PAGE_SHIFT)
178 return -EINVAL;
179
180 return dmabuf->ops->mmap(dmabuf, vma);
181 }
182
dma_buf_llseek(struct file * file,loff_t offset,int whence)183 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
184 {
185 struct dma_buf *dmabuf;
186 loff_t base;
187
188 if (!is_dma_buf_file(file))
189 return -EBADF;
190
191 dmabuf = file->private_data;
192
193 /* only support discovering the end of the buffer,
194 but also allow SEEK_SET to maintain the idiomatic
195 SEEK_END(0), SEEK_CUR(0) pattern */
196 if (whence == SEEK_END)
197 base = dmabuf->size;
198 else if (whence == SEEK_SET)
199 base = 0;
200 else
201 return -EINVAL;
202
203 if (offset != 0)
204 return -EINVAL;
205
206 return base + offset;
207 }
208
209 /**
210 * DOC: implicit fence polling
211 *
212 * To support cross-device and cross-driver synchronization of buffer access
213 * implicit fences (represented internally in the kernel with &struct dma_fence)
214 * can be attached to a &dma_buf. The glue for that and a few related things are
215 * provided in the &dma_resv structure.
216 *
217 * Userspace can query the state of these implicitly tracked fences using poll()
218 * and related system calls:
219 *
220 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
221 * most recent write or exclusive fence.
222 *
223 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
224 * all attached fences, shared and exclusive ones.
225 *
226 * Note that this only signals the completion of the respective fences, i.e. the
227 * DMA transfers are complete. Cache flushing and any other necessary
228 * preparations before CPU access can begin still need to happen.
229 *
230 * As an alternative to poll(), the set of fences on DMA buffer can be
231 * exported as a &sync_file using &dma_buf_sync_file_export.
232 */
233
dma_buf_poll_cb(struct dma_fence * fence,struct dma_fence_cb * cb)234 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
235 {
236 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
237 struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
238 unsigned long flags;
239
240 spin_lock_irqsave(&dcb->poll->lock, flags);
241 wake_up_locked_poll(dcb->poll, dcb->active);
242 dcb->active = 0;
243 spin_unlock_irqrestore(&dcb->poll->lock, flags);
244 dma_fence_put(fence);
245 /* Paired with get_file in dma_buf_poll */
246 fput(dmabuf->file);
247 }
248
dma_buf_poll_add_cb(struct dma_resv * resv,bool write,struct dma_buf_poll_cb_t * dcb)249 static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
250 struct dma_buf_poll_cb_t *dcb)
251 {
252 struct dma_resv_iter cursor;
253 struct dma_fence *fence;
254 int r;
255
256 dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
257 fence) {
258 dma_fence_get(fence);
259 r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
260 if (!r)
261 return true;
262 dma_fence_put(fence);
263 }
264
265 return false;
266 }
267
dma_buf_poll(struct file * file,poll_table * poll)268 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
269 {
270 struct dma_buf *dmabuf;
271 struct dma_resv *resv;
272 __poll_t events;
273
274 dmabuf = file->private_data;
275 if (!dmabuf || !dmabuf->resv)
276 return EPOLLERR;
277
278 resv = dmabuf->resv;
279
280 poll_wait(file, &dmabuf->poll, poll);
281
282 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
283 if (!events)
284 return 0;
285
286 dma_resv_lock(resv, NULL);
287
288 if (events & EPOLLOUT) {
289 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
290
291 /* Check that callback isn't busy */
292 spin_lock_irq(&dmabuf->poll.lock);
293 if (dcb->active)
294 events &= ~EPOLLOUT;
295 else
296 dcb->active = EPOLLOUT;
297 spin_unlock_irq(&dmabuf->poll.lock);
298
299 if (events & EPOLLOUT) {
300 /* Paired with fput in dma_buf_poll_cb */
301 get_file(dmabuf->file);
302
303 if (!dma_buf_poll_add_cb(resv, true, dcb))
304 /* No callback queued, wake up any other waiters */
305 dma_buf_poll_cb(NULL, &dcb->cb);
306 else
307 events &= ~EPOLLOUT;
308 }
309 }
310
311 if (events & EPOLLIN) {
312 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
313
314 /* Check that callback isn't busy */
315 spin_lock_irq(&dmabuf->poll.lock);
316 if (dcb->active)
317 events &= ~EPOLLIN;
318 else
319 dcb->active = EPOLLIN;
320 spin_unlock_irq(&dmabuf->poll.lock);
321
322 if (events & EPOLLIN) {
323 /* Paired with fput in dma_buf_poll_cb */
324 get_file(dmabuf->file);
325
326 if (!dma_buf_poll_add_cb(resv, false, dcb))
327 /* No callback queued, wake up any other waiters */
328 dma_buf_poll_cb(NULL, &dcb->cb);
329 else
330 events &= ~EPOLLIN;
331 }
332 }
333
334 dma_resv_unlock(resv);
335 return events;
336 }
337
_dma_buf_set_name(struct dma_buf * dmabuf,const char * name)338 static void _dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
339 {
340 spin_lock(&dmabuf->name_lock);
341 kfree(dmabuf->name);
342 dmabuf->name = name;
343 spin_unlock(&dmabuf->name_lock);
344 }
345
346 /**
347 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
348 * It could support changing the name of the dma-buf if the same
349 * piece of memory is used for multiple purpose between different devices.
350 *
351 * @dmabuf: [in] dmabuf buffer that will be renamed.
352 * @buf: [in] A piece of userspace memory that contains the name of
353 * the dma-buf.
354 *
355 * Returns 0 on success. If the dma-buf buffer is already attached to
356 * devices, return -EBUSY.
357 *
358 */
dma_buf_set_name(struct dma_buf * dmabuf,const char * name)359 long dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
360 {
361 char *buf = kstrndup(name, DMA_BUF_NAME_LEN, GFP_KERNEL);
362 if (!buf)
363 return -ENOMEM;
364
365 _dma_buf_set_name(dmabuf, buf);
366
367 return 0;
368 }
369 EXPORT_SYMBOL_GPL(dma_buf_set_name);
370
dma_buf_set_name_user(struct dma_buf * dmabuf,const char __user * buf)371 static long dma_buf_set_name_user(struct dma_buf *dmabuf, const char __user *buf)
372 {
373 char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
374
375 if (IS_ERR(name))
376 return PTR_ERR(name);
377
378 _dma_buf_set_name(dmabuf, name);
379
380 return 0;
381 }
382
383 #if IS_ENABLED(CONFIG_SYNC_FILE)
dma_buf_export_sync_file(struct dma_buf * dmabuf,void __user * user_data)384 static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
385 void __user *user_data)
386 {
387 struct dma_buf_export_sync_file arg;
388 enum dma_resv_usage usage;
389 struct dma_fence *fence = NULL;
390 struct sync_file *sync_file;
391 int fd, ret;
392
393 if (copy_from_user(&arg, user_data, sizeof(arg)))
394 return -EFAULT;
395
396 if (arg.flags & ~DMA_BUF_SYNC_RW)
397 return -EINVAL;
398
399 if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
400 return -EINVAL;
401
402 fd = get_unused_fd_flags(O_CLOEXEC);
403 if (fd < 0)
404 return fd;
405
406 usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);
407 ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);
408 if (ret)
409 goto err_put_fd;
410
411 if (!fence)
412 fence = dma_fence_get_stub();
413
414 sync_file = sync_file_create(fence);
415
416 dma_fence_put(fence);
417
418 if (!sync_file) {
419 ret = -ENOMEM;
420 goto err_put_fd;
421 }
422
423 arg.fd = fd;
424 if (copy_to_user(user_data, &arg, sizeof(arg))) {
425 ret = -EFAULT;
426 goto err_put_file;
427 }
428
429 fd_install(fd, sync_file->file);
430
431 return 0;
432
433 err_put_file:
434 fput(sync_file->file);
435 err_put_fd:
436 put_unused_fd(fd);
437 return ret;
438 }
439
dma_buf_import_sync_file(struct dma_buf * dmabuf,const void __user * user_data)440 static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
441 const void __user *user_data)
442 {
443 struct dma_buf_import_sync_file arg;
444 struct dma_fence *fence, *f;
445 enum dma_resv_usage usage;
446 struct dma_fence_unwrap iter;
447 unsigned int num_fences;
448 int ret = 0;
449
450 if (copy_from_user(&arg, user_data, sizeof(arg)))
451 return -EFAULT;
452
453 if (arg.flags & ~DMA_BUF_SYNC_RW)
454 return -EINVAL;
455
456 if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
457 return -EINVAL;
458
459 fence = sync_file_get_fence(arg.fd);
460 if (!fence)
461 return -EINVAL;
462
463 usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
464 DMA_RESV_USAGE_READ;
465
466 num_fences = 0;
467 dma_fence_unwrap_for_each(f, &iter, fence)
468 ++num_fences;
469
470 if (num_fences > 0) {
471 dma_resv_lock(dmabuf->resv, NULL);
472
473 ret = dma_resv_reserve_fences(dmabuf->resv, num_fences);
474 if (!ret) {
475 dma_fence_unwrap_for_each(f, &iter, fence)
476 dma_resv_add_fence(dmabuf->resv, f, usage);
477 }
478
479 dma_resv_unlock(dmabuf->resv);
480 }
481
482 dma_fence_put(fence);
483
484 return ret;
485 }
486 #endif
487
dma_buf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)488 static long dma_buf_ioctl(struct file *file,
489 unsigned int cmd, unsigned long arg)
490 {
491 struct dma_buf *dmabuf;
492 struct dma_buf_sync sync;
493 enum dma_data_direction direction;
494 int ret;
495
496 dmabuf = file->private_data;
497
498 switch (cmd) {
499 case DMA_BUF_IOCTL_SYNC:
500 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
501 return -EFAULT;
502
503 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
504 return -EINVAL;
505
506 switch (sync.flags & DMA_BUF_SYNC_RW) {
507 case DMA_BUF_SYNC_READ:
508 direction = DMA_FROM_DEVICE;
509 break;
510 case DMA_BUF_SYNC_WRITE:
511 direction = DMA_TO_DEVICE;
512 break;
513 case DMA_BUF_SYNC_RW:
514 direction = DMA_BIDIRECTIONAL;
515 break;
516 default:
517 return -EINVAL;
518 }
519
520 if (sync.flags & DMA_BUF_SYNC_END)
521 ret = dma_buf_end_cpu_access(dmabuf, direction);
522 else
523 ret = dma_buf_begin_cpu_access(dmabuf, direction);
524
525 return ret;
526
527 case DMA_BUF_SET_NAME_A:
528 case DMA_BUF_SET_NAME_B:
529 return dma_buf_set_name_user(dmabuf, (const char __user *)arg);
530
531 #if IS_ENABLED(CONFIG_SYNC_FILE)
532 case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
533 return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
534 case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
535 return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
536 #endif
537
538 default:
539 return -ENOTTY;
540 }
541 }
542
dma_buf_show_fdinfo(struct seq_file * m,struct file * file)543 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
544 {
545 struct dma_buf *dmabuf = file->private_data;
546
547 seq_printf(m, "size:\t%zu\n", dmabuf->size);
548 /* Don't count the temporary reference taken inside procfs seq_show */
549 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
550 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
551 spin_lock(&dmabuf->name_lock);
552 if (dmabuf->name)
553 seq_printf(m, "name:\t%s\n", dmabuf->name);
554 spin_unlock(&dmabuf->name_lock);
555 }
556
557 static const struct file_operations dma_buf_fops = {
558 .release = dma_buf_file_release,
559 .mmap = dma_buf_mmap_internal,
560 .llseek = dma_buf_llseek,
561 .poll = dma_buf_poll,
562 .unlocked_ioctl = dma_buf_ioctl,
563 .compat_ioctl = compat_ptr_ioctl,
564 .show_fdinfo = dma_buf_show_fdinfo,
565 };
566
567 /*
568 * is_dma_buf_file - Check if struct file* is associated with dma_buf
569 */
is_dma_buf_file(struct file * file)570 int is_dma_buf_file(struct file *file)
571 {
572 return file->f_op == &dma_buf_fops;
573 }
574 EXPORT_SYMBOL_NS_GPL(is_dma_buf_file, DMA_BUF);
575
dma_buf_getfile(size_t size,int flags)576 static struct file *dma_buf_getfile(size_t size, int flags)
577 {
578 static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
579 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
580 struct file *file;
581
582 if (IS_ERR(inode))
583 return ERR_CAST(inode);
584
585 inode->i_size = size;
586 inode_set_bytes(inode, size);
587
588 /*
589 * The ->i_ino acquired from get_next_ino() is not unique thus
590 * not suitable for using it as dentry name by dmabuf stats.
591 * Override ->i_ino with the unique and dmabuffs specific
592 * value.
593 */
594 inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
595 flags &= O_ACCMODE | O_NONBLOCK;
596 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
597 flags, &dma_buf_fops);
598 if (IS_ERR(file))
599 goto err_alloc_file;
600
601 return file;
602
603 err_alloc_file:
604 iput(inode);
605 return file;
606 }
607
608 /**
609 * DOC: dma buf device access
610 *
611 * For device DMA access to a shared DMA buffer the usual sequence of operations
612 * is fairly simple:
613 *
614 * 1. The exporter defines his exporter instance using
615 * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
616 * buffer object into a &dma_buf. It then exports that &dma_buf to userspace
617 * as a file descriptor by calling dma_buf_fd().
618 *
619 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
620 * to share with: First the file descriptor is converted to a &dma_buf using
621 * dma_buf_get(). Then the buffer is attached to the device using
622 * dma_buf_attach().
623 *
624 * Up to this stage the exporter is still free to migrate or reallocate the
625 * backing storage.
626 *
627 * 3. Once the buffer is attached to all devices userspace can initiate DMA
628 * access to the shared buffer. In the kernel this is done by calling
629 * dma_buf_map_attachment() and dma_buf_unmap_attachment().
630 *
631 * 4. Once a driver is done with a shared buffer it needs to call
632 * dma_buf_detach() (after cleaning up any mappings) and then release the
633 * reference acquired with dma_buf_get() by calling dma_buf_put().
634 *
635 * For the detailed semantics exporters are expected to implement see
636 * &dma_buf_ops.
637 */
638
639 /**
640 * dma_buf_export - Creates a new dma_buf, and associates an anon file
641 * with this buffer, so it can be exported.
642 * Also connect the allocator specific data and ops to the buffer.
643 * Additionally, provide a name string for exporter; useful in debugging.
644 *
645 * @exp_info: [in] holds all the export related information provided
646 * by the exporter. see &struct dma_buf_export_info
647 * for further details.
648 *
649 * Returns, on success, a newly created struct dma_buf object, which wraps the
650 * supplied private data and operations for struct dma_buf_ops. On either
651 * missing ops, or error in allocating struct dma_buf, will return negative
652 * error.
653 *
654 * For most cases the easiest way to create @exp_info is through the
655 * %DEFINE_DMA_BUF_EXPORT_INFO macro.
656 */
dma_buf_export(const struct dma_buf_export_info * exp_info)657 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
658 {
659 struct dma_buf *dmabuf;
660 struct dma_resv *resv = exp_info->resv;
661 struct file *file;
662 size_t alloc_size = sizeof(struct dma_buf);
663 int ret;
664
665 if (WARN_ON(!exp_info->priv || !exp_info->ops
666 || !exp_info->ops->map_dma_buf
667 || !exp_info->ops->unmap_dma_buf
668 || !exp_info->ops->release))
669 return ERR_PTR(-EINVAL);
670
671 if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
672 (exp_info->ops->pin || exp_info->ops->unpin)))
673 return ERR_PTR(-EINVAL);
674
675 if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
676 return ERR_PTR(-EINVAL);
677
678 if (!try_module_get(exp_info->owner))
679 return ERR_PTR(-ENOENT);
680
681 file = dma_buf_getfile(exp_info->size, exp_info->flags);
682 if (IS_ERR(file)) {
683 ret = PTR_ERR(file);
684 goto err_module;
685 }
686
687 if (!exp_info->resv)
688 alloc_size += sizeof(struct dma_resv);
689 else
690 /* prevent &dma_buf[1] == dma_buf->resv */
691 alloc_size += 1;
692 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
693 if (!dmabuf) {
694 ret = -ENOMEM;
695 goto err_file;
696 }
697
698 dmabuf->priv = exp_info->priv;
699 dmabuf->ops = exp_info->ops;
700 dmabuf->size = exp_info->size;
701 dmabuf->exp_name = exp_info->exp_name;
702 dmabuf->owner = exp_info->owner;
703 spin_lock_init(&dmabuf->name_lock);
704 init_waitqueue_head(&dmabuf->poll);
705 dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
706 dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
707 INIT_LIST_HEAD(&dmabuf->attachments);
708
709 if (!resv) {
710 dmabuf->resv = (struct dma_resv *)&dmabuf[1];
711 dma_resv_init(dmabuf->resv);
712 } else {
713 dmabuf->resv = resv;
714 }
715
716 file->private_data = dmabuf;
717 file->f_path.dentry->d_fsdata = dmabuf;
718 dmabuf->file = file;
719
720 mutex_lock(&db_list.lock);
721 list_add(&dmabuf->list_node, &db_list.head);
722 mutex_unlock(&db_list.lock);
723
724 ret = dma_buf_stats_setup(dmabuf, file);
725 if (ret)
726 goto err_sysfs;
727
728 return dmabuf;
729
730 err_sysfs:
731 mutex_lock(&db_list.lock);
732 list_del(&dmabuf->list_node);
733 mutex_unlock(&db_list.lock);
734 dmabuf->file = NULL;
735 file->f_path.dentry->d_fsdata = NULL;
736 file->private_data = NULL;
737 if (!resv)
738 dma_resv_fini(dmabuf->resv);
739 kfree(dmabuf);
740 err_file:
741 fput(file);
742 err_module:
743 module_put(exp_info->owner);
744 return ERR_PTR(ret);
745 }
746 EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
747
748 /**
749 * dma_buf_fd - returns a file descriptor for the given struct dma_buf
750 * @dmabuf: [in] pointer to dma_buf for which fd is required.
751 * @flags: [in] flags to give to fd
752 *
753 * On success, returns an associated 'fd'. Else, returns error.
754 */
dma_buf_fd(struct dma_buf * dmabuf,int flags)755 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
756 {
757 int fd;
758
759 if (!dmabuf || !dmabuf->file)
760 return -EINVAL;
761
762 fd = get_unused_fd_flags(flags);
763 if (fd < 0)
764 return fd;
765
766 fd_install(fd, dmabuf->file);
767
768 return fd;
769 }
770 EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
771
772 /**
773 * dma_buf_get - returns the struct dma_buf related to an fd
774 * @fd: [in] fd associated with the struct dma_buf to be returned
775 *
776 * On success, returns the struct dma_buf associated with an fd; uses
777 * file's refcounting done by fget to increase refcount. returns ERR_PTR
778 * otherwise.
779 */
dma_buf_get(int fd)780 struct dma_buf *dma_buf_get(int fd)
781 {
782 struct file *file;
783
784 file = fget(fd);
785
786 if (!file)
787 return ERR_PTR(-EBADF);
788
789 if (!is_dma_buf_file(file)) {
790 fput(file);
791 return ERR_PTR(-EINVAL);
792 }
793
794 return file->private_data;
795 }
796 EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
797
798 /**
799 * dma_buf_put - decreases refcount of the buffer
800 * @dmabuf: [in] buffer to reduce refcount of
801 *
802 * Uses file's refcounting done implicitly by fput().
803 *
804 * If, as a result of this call, the refcount becomes 0, the 'release' file
805 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
806 * in turn, and frees the memory allocated for dmabuf when exported.
807 */
dma_buf_put(struct dma_buf * dmabuf)808 void dma_buf_put(struct dma_buf *dmabuf)
809 {
810 if (WARN_ON(!dmabuf || !dmabuf->file))
811 return;
812
813 fput(dmabuf->file);
814 }
815 EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
816
mangle_sg_table(struct sg_table * sg_table)817 static void mangle_sg_table(struct sg_table *sg_table)
818 {
819 #ifdef CONFIG_DMABUF_DEBUG
820 int i;
821 struct scatterlist *sg;
822
823 /* To catch abuse of the underlying struct page by importers mix
824 * up the bits, but take care to preserve the low SG_ bits to
825 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
826 * before passing the sgt back to the exporter. */
827 for_each_sgtable_sg(sg_table, sg, i)
828 sg->page_link ^= ~0xffUL;
829 #endif
830
831 }
__map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction direction)832 static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
833 enum dma_data_direction direction)
834 {
835 struct sg_table *sg_table;
836 signed long ret;
837
838 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
839 if (IS_ERR_OR_NULL(sg_table))
840 return sg_table;
841
842 if (!dma_buf_attachment_is_dynamic(attach)) {
843 ret = dma_resv_wait_timeout(attach->dmabuf->resv,
844 DMA_RESV_USAGE_KERNEL, true,
845 MAX_SCHEDULE_TIMEOUT);
846 if (ret < 0) {
847 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
848 direction);
849 return ERR_PTR(ret);
850 }
851 }
852
853 mangle_sg_table(sg_table);
854 return sg_table;
855 }
856
857 /**
858 * DOC: locking convention
859 *
860 * In order to avoid deadlock situations between dma-buf exports and importers,
861 * all dma-buf API users must follow the common dma-buf locking convention.
862 *
863 * Convention for importers
864 *
865 * 1. Importers must hold the dma-buf reservation lock when calling these
866 * functions:
867 *
868 * - dma_buf_pin()
869 * - dma_buf_unpin()
870 * - dma_buf_map_attachment()
871 * - dma_buf_unmap_attachment()
872 * - dma_buf_vmap()
873 * - dma_buf_vunmap()
874 *
875 * 2. Importers must not hold the dma-buf reservation lock when calling these
876 * functions:
877 *
878 * - dma_buf_attach()
879 * - dma_buf_dynamic_attach()
880 * - dma_buf_detach()
881 * - dma_buf_export()
882 * - dma_buf_fd()
883 * - dma_buf_get()
884 * - dma_buf_put()
885 * - dma_buf_mmap()
886 * - dma_buf_begin_cpu_access()
887 * - dma_buf_end_cpu_access()
888 * - dma_buf_map_attachment_unlocked()
889 * - dma_buf_unmap_attachment_unlocked()
890 * - dma_buf_vmap_unlocked()
891 * - dma_buf_vunmap_unlocked()
892 *
893 * Convention for exporters
894 *
895 * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf
896 * reservation and exporter can take the lock:
897 *
898 * - &dma_buf_ops.attach()
899 * - &dma_buf_ops.detach()
900 * - &dma_buf_ops.release()
901 * - &dma_buf_ops.begin_cpu_access()
902 * - &dma_buf_ops.end_cpu_access()
903 * - &dma_buf_ops.mmap()
904 *
905 * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf
906 * reservation and exporter can't take the lock:
907 *
908 * - &dma_buf_ops.pin()
909 * - &dma_buf_ops.unpin()
910 * - &dma_buf_ops.map_dma_buf()
911 * - &dma_buf_ops.unmap_dma_buf()
912 * - &dma_buf_ops.vmap()
913 * - &dma_buf_ops.vunmap()
914 *
915 * 3. Exporters must hold the dma-buf reservation lock when calling these
916 * functions:
917 *
918 * - dma_buf_move_notify()
919 */
920
921 /**
922 * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
923 * @dmabuf: [in] buffer to attach device to.
924 * @dev: [in] device to be attached.
925 * @importer_ops: [in] importer operations for the attachment
926 * @importer_priv: [in] importer private pointer for the attachment
927 *
928 * Returns struct dma_buf_attachment pointer for this attachment. Attachments
929 * must be cleaned up by calling dma_buf_detach().
930 *
931 * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
932 * functionality.
933 *
934 * Returns:
935 *
936 * A pointer to newly created &dma_buf_attachment on success, or a negative
937 * error code wrapped into a pointer on failure.
938 *
939 * Note that this can fail if the backing storage of @dmabuf is in a place not
940 * accessible to @dev, and cannot be moved to a more suitable place. This is
941 * indicated with the error code -EBUSY.
942 */
943 struct dma_buf_attachment *
dma_buf_dynamic_attach(struct dma_buf * dmabuf,struct device * dev,const struct dma_buf_attach_ops * importer_ops,void * importer_priv)944 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
945 const struct dma_buf_attach_ops *importer_ops,
946 void *importer_priv)
947 {
948 struct dma_buf_attachment *attach;
949 int ret;
950
951 if (WARN_ON(!dmabuf || !dev))
952 return ERR_PTR(-EINVAL);
953
954 if (WARN_ON(importer_ops && !importer_ops->move_notify))
955 return ERR_PTR(-EINVAL);
956
957 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
958 if (!attach)
959 return ERR_PTR(-ENOMEM);
960
961 attach->dev = dev;
962 attach->dmabuf = dmabuf;
963 if (importer_ops)
964 attach->peer2peer = importer_ops->allow_peer2peer;
965 attach->importer_ops = importer_ops;
966 attach->importer_priv = importer_priv;
967
968 if (dmabuf->ops->attach) {
969 ret = dmabuf->ops->attach(dmabuf, attach);
970 if (ret)
971 goto err_attach;
972 }
973 dma_resv_lock(dmabuf->resv, NULL);
974 list_add(&attach->node, &dmabuf->attachments);
975 dma_resv_unlock(dmabuf->resv);
976
977 /* When either the importer or the exporter can't handle dynamic
978 * mappings we cache the mapping here to avoid issues with the
979 * reservation object lock.
980 */
981 if (dma_buf_attachment_is_dynamic(attach) !=
982 dma_buf_is_dynamic(dmabuf)) {
983 struct sg_table *sgt;
984
985 dma_resv_lock(attach->dmabuf->resv, NULL);
986 if (dma_buf_is_dynamic(attach->dmabuf)) {
987 ret = dmabuf->ops->pin(attach);
988 if (ret)
989 goto err_unlock;
990 }
991
992 sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
993 if (!sgt)
994 sgt = ERR_PTR(-ENOMEM);
995 if (IS_ERR(sgt)) {
996 ret = PTR_ERR(sgt);
997 goto err_unpin;
998 }
999 dma_resv_unlock(attach->dmabuf->resv);
1000 attach->sgt = sgt;
1001 attach->dir = DMA_BIDIRECTIONAL;
1002 }
1003
1004 return attach;
1005
1006 err_attach:
1007 kfree(attach);
1008 return ERR_PTR(ret);
1009
1010 err_unpin:
1011 if (dma_buf_is_dynamic(attach->dmabuf))
1012 dmabuf->ops->unpin(attach);
1013
1014 err_unlock:
1015 dma_resv_unlock(attach->dmabuf->resv);
1016
1017 dma_buf_detach(dmabuf, attach);
1018 return ERR_PTR(ret);
1019 }
1020 EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
1021
1022 /**
1023 * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
1024 * @dmabuf: [in] buffer to attach device to.
1025 * @dev: [in] device to be attached.
1026 *
1027 * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
1028 * mapping.
1029 */
dma_buf_attach(struct dma_buf * dmabuf,struct device * dev)1030 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
1031 struct device *dev)
1032 {
1033 return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
1034 }
1035 EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
1036
__unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)1037 static void __unmap_dma_buf(struct dma_buf_attachment *attach,
1038 struct sg_table *sg_table,
1039 enum dma_data_direction direction)
1040 {
1041 /* uses XOR, hence this unmangles */
1042 mangle_sg_table(sg_table);
1043
1044 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
1045 }
1046
1047 /**
1048 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
1049 * @dmabuf: [in] buffer to detach from.
1050 * @attach: [in] attachment to be detached; is free'd after this call.
1051 *
1052 * Clean up a device attachment obtained by calling dma_buf_attach().
1053 *
1054 * Optionally this calls &dma_buf_ops.detach for device-specific detach.
1055 */
dma_buf_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)1056 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
1057 {
1058 if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf))
1059 return;
1060
1061 dma_resv_lock(dmabuf->resv, NULL);
1062
1063 if (attach->sgt) {
1064
1065 __unmap_dma_buf(attach, attach->sgt, attach->dir);
1066
1067 if (dma_buf_is_dynamic(attach->dmabuf))
1068 dmabuf->ops->unpin(attach);
1069 }
1070 list_del(&attach->node);
1071
1072 dma_resv_unlock(dmabuf->resv);
1073
1074 if (dmabuf->ops->detach)
1075 dmabuf->ops->detach(dmabuf, attach);
1076
1077 kfree(attach);
1078 }
1079 EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
1080
1081 /**
1082 * dma_buf_pin - Lock down the DMA-buf
1083 * @attach: [in] attachment which should be pinned
1084 *
1085 * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
1086 * call this, and only for limited use cases like scanout and not for temporary
1087 * pin operations. It is not permitted to allow userspace to pin arbitrary
1088 * amounts of buffers through this interface.
1089 *
1090 * Buffers must be unpinned by calling dma_buf_unpin().
1091 *
1092 * Returns:
1093 * 0 on success, negative error code on failure.
1094 */
dma_buf_pin(struct dma_buf_attachment * attach)1095 int dma_buf_pin(struct dma_buf_attachment *attach)
1096 {
1097 struct dma_buf *dmabuf = attach->dmabuf;
1098 int ret = 0;
1099
1100 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1101
1102 dma_resv_assert_held(dmabuf->resv);
1103
1104 if (dmabuf->ops->pin)
1105 ret = dmabuf->ops->pin(attach);
1106
1107 return ret;
1108 }
1109 EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
1110
1111 /**
1112 * dma_buf_unpin - Unpin a DMA-buf
1113 * @attach: [in] attachment which should be unpinned
1114 *
1115 * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
1116 * any mapping of @attach again and inform the importer through
1117 * &dma_buf_attach_ops.move_notify.
1118 */
dma_buf_unpin(struct dma_buf_attachment * attach)1119 void dma_buf_unpin(struct dma_buf_attachment *attach)
1120 {
1121 struct dma_buf *dmabuf = attach->dmabuf;
1122
1123 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1124
1125 dma_resv_assert_held(dmabuf->resv);
1126
1127 if (dmabuf->ops->unpin)
1128 dmabuf->ops->unpin(attach);
1129 }
1130 EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
1131
1132 /**
1133 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
1134 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1135 * dma_buf_ops.
1136 * @attach: [in] attachment whose scatterlist is to be returned
1137 * @direction: [in] direction of DMA transfer
1138 *
1139 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
1140 * on error. May return -EINTR if it is interrupted by a signal.
1141 *
1142 * On success, the DMA addresses and lengths in the returned scatterlist are
1143 * PAGE_SIZE aligned.
1144 *
1145 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
1146 * the underlying backing storage is pinned for as long as a mapping exists,
1147 * therefore users/importers should not hold onto a mapping for undue amounts of
1148 * time.
1149 *
1150 * Important: Dynamic importers must wait for the exclusive fence of the struct
1151 * dma_resv attached to the DMA-BUF first.
1152 */
dma_buf_map_attachment(struct dma_buf_attachment * attach,enum dma_data_direction direction)1153 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
1154 enum dma_data_direction direction)
1155 {
1156 struct sg_table *sg_table;
1157 int r;
1158
1159 might_sleep();
1160
1161 if (WARN_ON(!attach || !attach->dmabuf))
1162 return ERR_PTR(-EINVAL);
1163
1164 dma_resv_assert_held(attach->dmabuf->resv);
1165
1166 if (attach->sgt) {
1167 /*
1168 * Two mappings with different directions for the same
1169 * attachment are not allowed.
1170 */
1171 if (attach->dir != direction &&
1172 attach->dir != DMA_BIDIRECTIONAL)
1173 return ERR_PTR(-EBUSY);
1174
1175 return attach->sgt;
1176 }
1177
1178 if (dma_buf_is_dynamic(attach->dmabuf)) {
1179 if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1180 r = attach->dmabuf->ops->pin(attach);
1181 if (r)
1182 return ERR_PTR(r);
1183 }
1184 }
1185
1186 sg_table = __map_dma_buf(attach, direction);
1187 if (!sg_table)
1188 sg_table = ERR_PTR(-ENOMEM);
1189
1190 if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
1191 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1192 attach->dmabuf->ops->unpin(attach);
1193
1194 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
1195 attach->sgt = sg_table;
1196 attach->dir = direction;
1197 }
1198
1199 #ifdef CONFIG_DMA_API_DEBUG
1200 if (!IS_ERR(sg_table)) {
1201 struct scatterlist *sg;
1202 u64 addr;
1203 int len;
1204 int i;
1205
1206 for_each_sgtable_dma_sg(sg_table, sg, i) {
1207 addr = sg_dma_address(sg);
1208 len = sg_dma_len(sg);
1209 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
1210 pr_debug("%s: addr %llx or len %x is not page aligned!\n",
1211 __func__, addr, len);
1212 }
1213 }
1214 }
1215 #endif /* CONFIG_DMA_API_DEBUG */
1216 return sg_table;
1217 }
1218 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
1219
1220 /**
1221 * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
1222 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1223 * dma_buf_ops.
1224 * @attach: [in] attachment whose scatterlist is to be returned
1225 * @direction: [in] direction of DMA transfer
1226 *
1227 * Unlocked variant of dma_buf_map_attachment().
1228 */
1229 struct sg_table *
dma_buf_map_attachment_unlocked(struct dma_buf_attachment * attach,enum dma_data_direction direction)1230 dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
1231 enum dma_data_direction direction)
1232 {
1233 struct sg_table *sg_table;
1234
1235 might_sleep();
1236
1237 if (WARN_ON(!attach || !attach->dmabuf))
1238 return ERR_PTR(-EINVAL);
1239
1240 dma_resv_lock(attach->dmabuf->resv, NULL);
1241 sg_table = dma_buf_map_attachment(attach, direction);
1242 dma_resv_unlock(attach->dmabuf->resv);
1243
1244 return sg_table;
1245 }
1246 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF);
1247
1248 /**
1249 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
1250 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1251 * dma_buf_ops.
1252 * @attach: [in] attachment to unmap buffer from
1253 * @sg_table: [in] scatterlist info of the buffer to unmap
1254 * @direction: [in] direction of DMA transfer
1255 *
1256 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
1257 */
dma_buf_unmap_attachment(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)1258 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1259 struct sg_table *sg_table,
1260 enum dma_data_direction direction)
1261 {
1262 might_sleep();
1263
1264 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1265 return;
1266
1267 dma_resv_assert_held(attach->dmabuf->resv);
1268
1269 if (attach->sgt == sg_table)
1270 return;
1271
1272 __unmap_dma_buf(attach, sg_table, direction);
1273
1274 if (dma_buf_is_dynamic(attach->dmabuf) &&
1275 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1276 dma_buf_unpin(attach);
1277 }
1278 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
1279
1280 /**
1281 * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
1282 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1283 * dma_buf_ops.
1284 * @attach: [in] attachment to unmap buffer from
1285 * @sg_table: [in] scatterlist info of the buffer to unmap
1286 * @direction: [in] direction of DMA transfer
1287 *
1288 * Unlocked variant of dma_buf_unmap_attachment().
1289 */
dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)1290 void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
1291 struct sg_table *sg_table,
1292 enum dma_data_direction direction)
1293 {
1294 might_sleep();
1295
1296 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1297 return;
1298
1299 dma_resv_lock(attach->dmabuf->resv, NULL);
1300 dma_buf_unmap_attachment(attach, sg_table, direction);
1301 dma_resv_unlock(attach->dmabuf->resv);
1302 }
1303 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF);
1304
1305 /**
1306 * dma_buf_move_notify - notify attachments that DMA-buf is moving
1307 *
1308 * @dmabuf: [in] buffer which is moving
1309 *
1310 * Informs all attachments that they need to destroy and recreate all their
1311 * mappings.
1312 */
dma_buf_move_notify(struct dma_buf * dmabuf)1313 void dma_buf_move_notify(struct dma_buf *dmabuf)
1314 {
1315 struct dma_buf_attachment *attach;
1316
1317 dma_resv_assert_held(dmabuf->resv);
1318
1319 list_for_each_entry(attach, &dmabuf->attachments, node)
1320 if (attach->importer_ops)
1321 attach->importer_ops->move_notify(attach);
1322 }
1323 EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
1324
1325 /**
1326 * DOC: cpu access
1327 *
1328 * There are multiple reasons for supporting CPU access to a dma buffer object:
1329 *
1330 * - Fallback operations in the kernel, for example when a device is connected
1331 * over USB and the kernel needs to shuffle the data around first before
1332 * sending it away. Cache coherency is handled by bracketing any transactions
1333 * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1334 * access.
1335 *
1336 * Since for most kernel internal dma-buf accesses need the entire buffer, a
1337 * vmap interface is introduced. Note that on very old 32-bit architectures
1338 * vmalloc space might be limited and result in vmap calls failing.
1339 *
1340 * Interfaces::
1341 *
1342 * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1343 * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1344 *
1345 * The vmap call can fail if there is no vmap support in the exporter, or if
1346 * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1347 * count for all vmap access and calls down into the exporter's vmap function
1348 * only when no vmapping exists, and only unmaps it once. Protection against
1349 * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
1350 *
1351 * - For full compatibility on the importer side with existing userspace
1352 * interfaces, which might already support mmap'ing buffers. This is needed in
1353 * many processing pipelines (e.g. feeding a software rendered image into a
1354 * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1355 * framework already supported this and for DMA buffer file descriptors to
1356 * replace ION buffers mmap support was needed.
1357 *
1358 * There is no special interfaces, userspace simply calls mmap on the dma-buf
1359 * fd. But like for CPU access there's a need to bracket the actual access,
1360 * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1361 * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1362 * be restarted.
1363 *
1364 * Some systems might need some sort of cache coherency management e.g. when
1365 * CPU and GPU domains are being accessed through dma-buf at the same time.
1366 * To circumvent this problem there are begin/end coherency markers, that
1367 * forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1368 * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1369 * sequence would be used like following:
1370 *
1371 * - mmap dma-buf fd
1372 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1373 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1374 * want (with the new data being consumed by say the GPU or the scanout
1375 * device)
1376 * - munmap once you don't need the buffer any more
1377 *
1378 * For correctness and optimal performance, it is always required to use
1379 * SYNC_START and SYNC_END before and after, respectively, when accessing the
1380 * mapped address. Userspace cannot rely on coherent access, even when there
1381 * are systems where it just works without calling these ioctls.
1382 *
1383 * - And as a CPU fallback in userspace processing pipelines.
1384 *
1385 * Similar to the motivation for kernel cpu access it is again important that
1386 * the userspace code of a given importing subsystem can use the same
1387 * interfaces with a imported dma-buf buffer object as with a native buffer
1388 * object. This is especially important for drm where the userspace part of
1389 * contemporary OpenGL, X, and other drivers is huge, and reworking them to
1390 * use a different way to mmap a buffer rather invasive.
1391 *
1392 * The assumption in the current dma-buf interfaces is that redirecting the
1393 * initial mmap is all that's needed. A survey of some of the existing
1394 * subsystems shows that no driver seems to do any nefarious thing like
1395 * syncing up with outstanding asynchronous processing on the device or
1396 * allocating special resources at fault time. So hopefully this is good
1397 * enough, since adding interfaces to intercept pagefaults and allow pte
1398 * shootdowns would increase the complexity quite a bit.
1399 *
1400 * Interface::
1401 *
1402 * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1403 * unsigned long);
1404 *
1405 * If the importing subsystem simply provides a special-purpose mmap call to
1406 * set up a mapping in userspace, calling do_mmap with &dma_buf.file will
1407 * equally achieve that for a dma-buf object.
1408 */
1409
__dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1410 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1411 enum dma_data_direction direction)
1412 {
1413 bool write = (direction == DMA_BIDIRECTIONAL ||
1414 direction == DMA_TO_DEVICE);
1415 struct dma_resv *resv = dmabuf->resv;
1416 long ret;
1417
1418 /* Wait on any implicit rendering fences */
1419 ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
1420 true, MAX_SCHEDULE_TIMEOUT);
1421 if (ret < 0)
1422 return ret;
1423
1424 return 0;
1425 }
1426
1427 /**
1428 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1429 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1430 * preparations. Coherency is only guaranteed in the specified range for the
1431 * specified access direction.
1432 * @dmabuf: [in] buffer to prepare cpu access for.
1433 * @direction: [in] direction of access.
1434 *
1435 * After the cpu access is complete the caller should call
1436 * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is
1437 * it guaranteed to be coherent with other DMA access.
1438 *
1439 * This function will also wait for any DMA transactions tracked through
1440 * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1441 * synchronization this function will only ensure cache coherency, callers must
1442 * ensure synchronization with such DMA transactions on their own.
1443 *
1444 * Can return negative error values, returns 0 on success.
1445 */
dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1446 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1447 enum dma_data_direction direction)
1448 {
1449 int ret = 0;
1450
1451 if (WARN_ON(!dmabuf))
1452 return -EINVAL;
1453
1454 might_lock(&dmabuf->resv->lock.base);
1455
1456 if (dmabuf->ops->begin_cpu_access)
1457 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1458
1459 /* Ensure that all fences are waited upon - but we first allow
1460 * the native handler the chance to do so more efficiently if it
1461 * chooses. A double invocation here will be reasonably cheap no-op.
1462 */
1463 if (ret == 0)
1464 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1465
1466 return ret;
1467 }
1468 EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
1469
dma_buf_begin_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)1470 int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
1471 enum dma_data_direction direction,
1472 unsigned int offset, unsigned int len)
1473 {
1474 int ret = 0;
1475
1476 if (WARN_ON(!dmabuf))
1477 return -EINVAL;
1478
1479 if (dmabuf->ops->begin_cpu_access_partial)
1480 ret = dmabuf->ops->begin_cpu_access_partial(dmabuf, direction,
1481 offset, len);
1482
1483 /* Ensure that all fences are waited upon - but we first allow
1484 * the native handler the chance to do so more efficiently if it
1485 * chooses. A double invocation here will be reasonably cheap no-op.
1486 */
1487 if (ret == 0)
1488 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1489
1490 return ret;
1491 }
1492 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access_partial);
1493
1494 /**
1495 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1496 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1497 * actions. Coherency is only guaranteed in the specified range for the
1498 * specified access direction.
1499 * @dmabuf: [in] buffer to complete cpu access for.
1500 * @direction: [in] direction of access.
1501 *
1502 * This terminates CPU access started with dma_buf_begin_cpu_access().
1503 *
1504 * Can return negative error values, returns 0 on success.
1505 */
dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1506 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1507 enum dma_data_direction direction)
1508 {
1509 int ret = 0;
1510
1511 WARN_ON(!dmabuf);
1512
1513 might_lock(&dmabuf->resv->lock.base);
1514
1515 if (dmabuf->ops->end_cpu_access)
1516 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1517
1518 return ret;
1519 }
1520 EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
1521
dma_buf_end_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)1522 int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
1523 enum dma_data_direction direction,
1524 unsigned int offset, unsigned int len)
1525 {
1526 int ret = 0;
1527
1528 WARN_ON(!dmabuf);
1529
1530 if (dmabuf->ops->end_cpu_access_partial)
1531 ret = dmabuf->ops->end_cpu_access_partial(dmabuf, direction,
1532 offset, len);
1533
1534 return ret;
1535 }
1536 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access_partial);
1537
1538 /**
1539 * dma_buf_mmap - Setup up a userspace mmap with the given vma
1540 * @dmabuf: [in] buffer that should back the vma
1541 * @vma: [in] vma for the mmap
1542 * @pgoff: [in] offset in pages where this mmap should start within the
1543 * dma-buf buffer.
1544 *
1545 * This function adjusts the passed in vma so that it points at the file of the
1546 * dma_buf operation. It also adjusts the starting pgoff and does bounds
1547 * checking on the size of the vma. Then it calls the exporters mmap function to
1548 * set up the mapping.
1549 *
1550 * Can return negative error values, returns 0 on success.
1551 */
dma_buf_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma,unsigned long pgoff)1552 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1553 unsigned long pgoff)
1554 {
1555 if (WARN_ON(!dmabuf || !vma))
1556 return -EINVAL;
1557
1558 /* check if buffer supports mmap */
1559 if (!dmabuf->ops->mmap)
1560 return -EINVAL;
1561
1562 /* check for offset overflow */
1563 if (pgoff + vma_pages(vma) < pgoff)
1564 return -EOVERFLOW;
1565
1566 /* check for overflowing the buffer's size */
1567 if (pgoff + vma_pages(vma) >
1568 dmabuf->size >> PAGE_SHIFT)
1569 return -EINVAL;
1570
1571 /* readjust the vma */
1572 vma_set_file(vma, dmabuf->file);
1573 vma->vm_pgoff = pgoff;
1574
1575 return dmabuf->ops->mmap(dmabuf, vma);
1576 }
1577 EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
1578
1579 /**
1580 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1581 * address space. Same restrictions as for vmap and friends apply.
1582 * @dmabuf: [in] buffer to vmap
1583 * @map: [out] returns the vmap pointer
1584 *
1585 * This call may fail due to lack of virtual mapping address space.
1586 * These calls are optional in drivers. The intended use for them
1587 * is for mapping objects linear in kernel space for high use objects.
1588 *
1589 * To ensure coherency users must call dma_buf_begin_cpu_access() and
1590 * dma_buf_end_cpu_access() around any cpu access performed through this
1591 * mapping.
1592 *
1593 * Returns 0 on success, or a negative errno code otherwise.
1594 */
dma_buf_vmap(struct dma_buf * dmabuf,struct iosys_map * map)1595 int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
1596 {
1597 struct iosys_map ptr;
1598 int ret;
1599
1600 iosys_map_clear(map);
1601
1602 if (WARN_ON(!dmabuf))
1603 return -EINVAL;
1604
1605 dma_resv_assert_held(dmabuf->resv);
1606
1607 if (!dmabuf->ops->vmap)
1608 return -EINVAL;
1609
1610 if (dmabuf->vmapping_counter) {
1611 dmabuf->vmapping_counter++;
1612 BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1613 *map = dmabuf->vmap_ptr;
1614 return 0;
1615 }
1616
1617 BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
1618
1619 ret = dmabuf->ops->vmap(dmabuf, &ptr);
1620 if (WARN_ON_ONCE(ret))
1621 return ret;
1622
1623 dmabuf->vmap_ptr = ptr;
1624 dmabuf->vmapping_counter = 1;
1625
1626 *map = dmabuf->vmap_ptr;
1627
1628 return 0;
1629 }
1630 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
1631
1632 /**
1633 * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
1634 * address space. Same restrictions as for vmap and friends apply.
1635 * @dmabuf: [in] buffer to vmap
1636 * @map: [out] returns the vmap pointer
1637 *
1638 * Unlocked version of dma_buf_vmap()
1639 *
1640 * Returns 0 on success, or a negative errno code otherwise.
1641 */
dma_buf_vmap_unlocked(struct dma_buf * dmabuf,struct iosys_map * map)1642 int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
1643 {
1644 int ret;
1645
1646 iosys_map_clear(map);
1647
1648 if (WARN_ON(!dmabuf))
1649 return -EINVAL;
1650
1651 dma_resv_lock(dmabuf->resv, NULL);
1652 ret = dma_buf_vmap(dmabuf, map);
1653 dma_resv_unlock(dmabuf->resv);
1654
1655 return ret;
1656 }
1657 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF);
1658
1659 /**
1660 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1661 * @dmabuf: [in] buffer to vunmap
1662 * @map: [in] vmap pointer to vunmap
1663 */
dma_buf_vunmap(struct dma_buf * dmabuf,struct iosys_map * map)1664 void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1665 {
1666 if (WARN_ON(!dmabuf))
1667 return;
1668
1669 dma_resv_assert_held(dmabuf->resv);
1670
1671 BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1672 BUG_ON(dmabuf->vmapping_counter == 0);
1673 BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
1674
1675 if (--dmabuf->vmapping_counter == 0) {
1676 if (dmabuf->ops->vunmap)
1677 dmabuf->ops->vunmap(dmabuf, map);
1678 iosys_map_clear(&dmabuf->vmap_ptr);
1679 }
1680 }
1681 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
1682
1683 /**
1684 * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
1685 * @dmabuf: [in] buffer to vunmap
1686 * @map: [in] vmap pointer to vunmap
1687 */
dma_buf_vunmap_unlocked(struct dma_buf * dmabuf,struct iosys_map * map)1688 void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
1689 {
1690 if (WARN_ON(!dmabuf))
1691 return;
1692
1693 dma_resv_lock(dmabuf->resv, NULL);
1694 dma_buf_vunmap(dmabuf, map);
1695 dma_resv_unlock(dmabuf->resv);
1696 }
1697 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF);
1698
dma_buf_get_flags(struct dma_buf * dmabuf,unsigned long * flags)1699 int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags)
1700 {
1701 int ret = 0;
1702
1703 if (WARN_ON(!dmabuf) || !flags)
1704 return -EINVAL;
1705
1706 if (dmabuf->ops->get_flags)
1707 ret = dmabuf->ops->get_flags(dmabuf, flags);
1708
1709 return ret;
1710 }
1711 EXPORT_SYMBOL_GPL(dma_buf_get_flags);
1712
1713 #ifdef CONFIG_DEBUG_FS
dma_buf_debug_show(struct seq_file * s,void * unused)1714 static int dma_buf_debug_show(struct seq_file *s, void *unused)
1715 {
1716 struct dma_buf *buf_obj;
1717 struct dma_buf_attachment *attach_obj;
1718 int count = 0, attach_count;
1719 size_t size = 0;
1720 int ret;
1721
1722 ret = mutex_lock_interruptible(&db_list.lock);
1723
1724 if (ret)
1725 return ret;
1726
1727 seq_puts(s, "\nDma-buf Objects:\n");
1728 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
1729 "size", "flags", "mode", "count", "ino");
1730
1731 list_for_each_entry(buf_obj, &db_list.head, list_node) {
1732
1733 ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1734 if (ret)
1735 goto error_unlock;
1736
1737
1738 spin_lock(&buf_obj->name_lock);
1739 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1740 buf_obj->size,
1741 buf_obj->file->f_flags, buf_obj->file->f_mode,
1742 file_count(buf_obj->file),
1743 buf_obj->exp_name,
1744 file_inode(buf_obj->file)->i_ino,
1745 buf_obj->name ?: "<none>");
1746 spin_unlock(&buf_obj->name_lock);
1747
1748 dma_resv_describe(buf_obj->resv, s);
1749
1750 seq_puts(s, "\tAttached Devices:\n");
1751 attach_count = 0;
1752
1753 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1754 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1755 attach_count++;
1756 }
1757 dma_resv_unlock(buf_obj->resv);
1758
1759 seq_printf(s, "Total %d devices attached\n\n",
1760 attach_count);
1761
1762 count++;
1763 size += buf_obj->size;
1764 }
1765
1766 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1767
1768 mutex_unlock(&db_list.lock);
1769 return 0;
1770
1771 error_unlock:
1772 mutex_unlock(&db_list.lock);
1773 return ret;
1774 }
1775
1776 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1777
1778 static struct dentry *dma_buf_debugfs_dir;
1779
dma_buf_init_debugfs(void)1780 static int dma_buf_init_debugfs(void)
1781 {
1782 struct dentry *d;
1783 int err = 0;
1784
1785 d = debugfs_create_dir("dma_buf", NULL);
1786 if (IS_ERR(d))
1787 return PTR_ERR(d);
1788
1789 dma_buf_debugfs_dir = d;
1790
1791 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1792 NULL, &dma_buf_debug_fops);
1793 if (IS_ERR(d)) {
1794 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1795 debugfs_remove_recursive(dma_buf_debugfs_dir);
1796 dma_buf_debugfs_dir = NULL;
1797 err = PTR_ERR(d);
1798 }
1799
1800 return err;
1801 }
1802
dma_buf_uninit_debugfs(void)1803 static void dma_buf_uninit_debugfs(void)
1804 {
1805 debugfs_remove_recursive(dma_buf_debugfs_dir);
1806 }
1807 #else
dma_buf_init_debugfs(void)1808 static inline int dma_buf_init_debugfs(void)
1809 {
1810 return 0;
1811 }
dma_buf_uninit_debugfs(void)1812 static inline void dma_buf_uninit_debugfs(void)
1813 {
1814 }
1815 #endif
1816
dma_buf_init(void)1817 static int __init dma_buf_init(void)
1818 {
1819 int ret;
1820
1821 ret = dma_buf_init_sysfs_statistics();
1822 if (ret)
1823 return ret;
1824
1825 dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1826 if (IS_ERR(dma_buf_mnt))
1827 return PTR_ERR(dma_buf_mnt);
1828
1829 mutex_init(&db_list.lock);
1830 INIT_LIST_HEAD(&db_list.head);
1831 dma_buf_init_debugfs();
1832 return 0;
1833 }
1834 subsys_initcall(dma_buf_init);
1835
dma_buf_deinit(void)1836 static void __exit dma_buf_deinit(void)
1837 {
1838 dma_buf_uninit_debugfs();
1839 kern_unmount(dma_buf_mnt);
1840 dma_buf_uninit_sysfs_statistics();
1841 }
1842 __exitcall(dma_buf_deinit);
1843