1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Framework for buffer objects that can be shared across devices/subsystems.
4 *
5 * Copyright(C) 2011 Linaro Limited. All rights reserved.
6 * Author: Sumit Semwal <sumit.semwal@ti.com>
7 *
8 * Many thanks to linaro-mm-sig list, and specially
9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11 * refining of this idea.
12 */
13
14 #include <linux/fs.h>
15 #include <linux/slab.h>
16 #include <linux/dma-buf.h>
17 #include <linux/dma-fence.h>
18 #include <linux/dma-fence-unwrap.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/export.h>
21 #include <linux/debugfs.h>
22 #include <linux/module.h>
23 #include <linux/seq_file.h>
24 #include <linux/sync_file.h>
25 #include <linux/poll.h>
26 #include <linux/dma-resv.h>
27 #include <linux/mm.h>
28 #include <linux/mount.h>
29 #include <linux/pseudo_fs.h>
30 #include <trace/hooks/dmabuf.h>
31
32 #include <uapi/linux/dma-buf.h>
33 #include <uapi/linux/magic.h>
34
35 #include "dma-buf-sysfs-stats.h"
36
37 struct dma_buf_list {
38 struct list_head head;
39 struct mutex lock;
40 };
41
42 static struct dma_buf_list db_list;
43
44 /**
45 * dma_buf_get_each - Helps in traversing the db_list and calls the
46 * callback function which can extract required info out of each
47 * dmabuf.
48 * The db_list needs to be locked to prevent the db_list from being
49 * dynamically updated during the traversal process.
50 *
51 * @callback: [in] Handle for each dmabuf buffer in db_list.
52 * @private: [in] User-defined, used to pass in when callback is
53 * called.
54 *
55 * Returns 0 on success, otherwise returns a non-zero value for
56 * mutex_lock_interruptible or callback.
57 */
dma_buf_get_each(int (* callback)(const struct dma_buf * dmabuf,void * private),void * private)58 int dma_buf_get_each(int (*callback)(const struct dma_buf *dmabuf,
59 void *private), void *private)
60 {
61 struct dma_buf *buf;
62 int ret = mutex_lock_interruptible(&db_list.lock);
63
64 if (ret)
65 return ret;
66
67 list_for_each_entry(buf, &db_list.head, list_node) {
68 ret = callback(buf, private);
69 if (ret)
70 break;
71 }
72 mutex_unlock(&db_list.lock);
73 return ret;
74 }
75 EXPORT_SYMBOL_NS_GPL(dma_buf_get_each, MINIDUMP);
76
dmabuffs_dname(struct dentry * dentry,char * buffer,int buflen)77 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
78 {
79 struct dma_buf *dmabuf;
80 char name[DMA_BUF_NAME_LEN];
81 size_t ret = 0;
82
83 dmabuf = dentry->d_fsdata;
84 spin_lock(&dmabuf->name_lock);
85 if (dmabuf->name)
86 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
87 spin_unlock(&dmabuf->name_lock);
88
89 return dynamic_dname(buffer, buflen, "/%s:%s",
90 dentry->d_name.name, ret > 0 ? name : "");
91 }
92
dma_buf_release(struct dentry * dentry)93 static void dma_buf_release(struct dentry *dentry)
94 {
95 struct dma_buf *dmabuf;
96
97 dmabuf = dentry->d_fsdata;
98 if (unlikely(!dmabuf))
99 return;
100
101 BUG_ON(dmabuf->vmapping_counter);
102
103 /*
104 * If you hit this BUG() it could mean:
105 * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
106 * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
107 */
108 BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
109
110 dma_buf_stats_teardown(dmabuf);
111 dmabuf->ops->release(dmabuf);
112
113 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
114 dma_resv_fini(dmabuf->resv);
115
116 WARN_ON(!list_empty(&dmabuf->attachments));
117 module_put(dmabuf->owner);
118 kfree(dmabuf->name);
119 kfree(dmabuf);
120 }
121
dma_buf_file_release(struct inode * inode,struct file * file)122 static int dma_buf_file_release(struct inode *inode, struct file *file)
123 {
124 struct dma_buf *dmabuf;
125
126 if (!is_dma_buf_file(file))
127 return -EINVAL;
128
129 dmabuf = file->private_data;
130 if (dmabuf) {
131 mutex_lock(&db_list.lock);
132 list_del(&dmabuf->list_node);
133 mutex_unlock(&db_list.lock);
134 }
135
136 return 0;
137 }
138
139 static const struct dentry_operations dma_buf_dentry_ops = {
140 .d_dname = dmabuffs_dname,
141 .d_release = dma_buf_release,
142 };
143
144 static struct vfsmount *dma_buf_mnt;
145
dma_buf_fs_init_context(struct fs_context * fc)146 static int dma_buf_fs_init_context(struct fs_context *fc)
147 {
148 struct pseudo_fs_context *ctx;
149
150 ctx = init_pseudo(fc, DMA_BUF_MAGIC);
151 if (!ctx)
152 return -ENOMEM;
153 ctx->dops = &dma_buf_dentry_ops;
154 return 0;
155 }
156
157 static struct file_system_type dma_buf_fs_type = {
158 .name = "dmabuf",
159 .init_fs_context = dma_buf_fs_init_context,
160 .kill_sb = kill_anon_super,
161 };
162
dma_buf_mmap_internal(struct file * file,struct vm_area_struct * vma)163 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
164 {
165 struct dma_buf *dmabuf;
166 bool ignore_bounds = false;
167
168 if (!is_dma_buf_file(file))
169 return -EINVAL;
170
171 dmabuf = file->private_data;
172
173 /* check if buffer supports mmap */
174 if (!dmabuf->ops->mmap)
175 return -EINVAL;
176
177 trace_android_vh_ignore_dmabuf_vmap_bounds(dmabuf, &ignore_bounds);
178
179 /* check for overflowing the buffer's size */
180 if ((vma->vm_pgoff + vma_pages(vma) >
181 dmabuf->size >> PAGE_SHIFT) && !ignore_bounds)
182 return -EINVAL;
183
184 return dmabuf->ops->mmap(dmabuf, vma);
185 }
186
dma_buf_llseek(struct file * file,loff_t offset,int whence)187 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
188 {
189 struct dma_buf *dmabuf;
190 loff_t base;
191
192 if (!is_dma_buf_file(file))
193 return -EBADF;
194
195 dmabuf = file->private_data;
196
197 /* only support discovering the end of the buffer,
198 but also allow SEEK_SET to maintain the idiomatic
199 SEEK_END(0), SEEK_CUR(0) pattern */
200 if (whence == SEEK_END)
201 base = dmabuf->size;
202 else if (whence == SEEK_SET)
203 base = 0;
204 else
205 return -EINVAL;
206
207 if (offset != 0)
208 return -EINVAL;
209
210 return base + offset;
211 }
212
213 /**
214 * DOC: implicit fence polling
215 *
216 * To support cross-device and cross-driver synchronization of buffer access
217 * implicit fences (represented internally in the kernel with &struct dma_fence)
218 * can be attached to a &dma_buf. The glue for that and a few related things are
219 * provided in the &dma_resv structure.
220 *
221 * Userspace can query the state of these implicitly tracked fences using poll()
222 * and related system calls:
223 *
224 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
225 * most recent write or exclusive fence.
226 *
227 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
228 * all attached fences, shared and exclusive ones.
229 *
230 * Note that this only signals the completion of the respective fences, i.e. the
231 * DMA transfers are complete. Cache flushing and any other necessary
232 * preparations before CPU access can begin still need to happen.
233 *
234 * As an alternative to poll(), the set of fences on DMA buffer can be
235 * exported as a &sync_file using &dma_buf_sync_file_export.
236 */
237
dma_buf_poll_cb(struct dma_fence * fence,struct dma_fence_cb * cb)238 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
239 {
240 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
241 struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
242 unsigned long flags;
243
244 spin_lock_irqsave(&dcb->poll->lock, flags);
245 wake_up_locked_poll(dcb->poll, dcb->active);
246 dcb->active = 0;
247 spin_unlock_irqrestore(&dcb->poll->lock, flags);
248 dma_fence_put(fence);
249 /* Paired with get_file in dma_buf_poll */
250 fput(dmabuf->file);
251 }
252
dma_buf_poll_add_cb(struct dma_resv * resv,bool write,struct dma_buf_poll_cb_t * dcb)253 static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
254 struct dma_buf_poll_cb_t *dcb)
255 {
256 struct dma_resv_iter cursor;
257 struct dma_fence *fence;
258 int r;
259
260 dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
261 fence) {
262 dma_fence_get(fence);
263 r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
264 if (!r)
265 return true;
266 dma_fence_put(fence);
267 }
268
269 return false;
270 }
271
dma_buf_poll(struct file * file,poll_table * poll)272 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
273 {
274 struct dma_buf *dmabuf;
275 struct dma_resv *resv;
276 __poll_t events;
277
278 dmabuf = file->private_data;
279 if (!dmabuf || !dmabuf->resv)
280 return EPOLLERR;
281
282 resv = dmabuf->resv;
283
284 poll_wait(file, &dmabuf->poll, poll);
285
286 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
287 if (!events)
288 return 0;
289
290 dma_resv_lock(resv, NULL);
291
292 if (events & EPOLLOUT) {
293 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
294
295 /* Check that callback isn't busy */
296 spin_lock_irq(&dmabuf->poll.lock);
297 if (dcb->active)
298 events &= ~EPOLLOUT;
299 else
300 dcb->active = EPOLLOUT;
301 spin_unlock_irq(&dmabuf->poll.lock);
302
303 if (events & EPOLLOUT) {
304 /* Paired with fput in dma_buf_poll_cb */
305 get_file(dmabuf->file);
306
307 if (!dma_buf_poll_add_cb(resv, true, dcb))
308 /* No callback queued, wake up any other waiters */
309 dma_buf_poll_cb(NULL, &dcb->cb);
310 else
311 events &= ~EPOLLOUT;
312 }
313 }
314
315 if (events & EPOLLIN) {
316 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
317
318 /* Check that callback isn't busy */
319 spin_lock_irq(&dmabuf->poll.lock);
320 if (dcb->active)
321 events &= ~EPOLLIN;
322 else
323 dcb->active = EPOLLIN;
324 spin_unlock_irq(&dmabuf->poll.lock);
325
326 if (events & EPOLLIN) {
327 /* Paired with fput in dma_buf_poll_cb */
328 get_file(dmabuf->file);
329
330 if (!dma_buf_poll_add_cb(resv, false, dcb))
331 /* No callback queued, wake up any other waiters */
332 dma_buf_poll_cb(NULL, &dcb->cb);
333 else
334 events &= ~EPOLLIN;
335 }
336 }
337
338 dma_resv_unlock(resv);
339 return events;
340 }
341
_dma_buf_set_name(struct dma_buf * dmabuf,const char * name)342 static long _dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
343 {
344 spin_lock(&dmabuf->name_lock);
345 kfree(dmabuf->name);
346 dmabuf->name = name;
347 spin_unlock(&dmabuf->name_lock);
348
349 return 0;
350 }
351
352 /**
353 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
354 * It could support changing the name of the dma-buf if the same
355 * piece of memory is used for multiple purpose between different devices.
356 *
357 * @dmabuf: [in] dmabuf buffer that will be renamed.
358 * @buf: [in] A piece of userspace memory that contains the name of
359 * the dma-buf.
360 *
361 * Returns 0 on success. If the dma-buf buffer is already attached to
362 * devices, return -EBUSY.
363 *
364 */
dma_buf_set_name(struct dma_buf * dmabuf,const char * name)365 long dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
366 {
367 long ret = 0;
368 char *buf = kstrndup(name, DMA_BUF_NAME_LEN, GFP_KERNEL);
369
370 if (!buf)
371 return -ENOMEM;
372
373 ret = _dma_buf_set_name(dmabuf, buf);
374 if (ret)
375 kfree(buf);
376
377 return ret;
378 }
379 EXPORT_SYMBOL_GPL(dma_buf_set_name);
380
dma_buf_set_name_user(struct dma_buf * dmabuf,const char __user * buf)381 static long dma_buf_set_name_user(struct dma_buf *dmabuf, const char __user *buf)
382 {
383 char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
384 long ret = 0;
385
386 if (IS_ERR(name))
387 return PTR_ERR(name);
388
389 ret = _dma_buf_set_name(dmabuf, name);
390 if (ret)
391 kfree(name);
392
393 return ret;
394 }
395
396 #if IS_ENABLED(CONFIG_SYNC_FILE)
dma_buf_export_sync_file(struct dma_buf * dmabuf,void __user * user_data)397 static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
398 void __user *user_data)
399 {
400 struct dma_buf_export_sync_file arg;
401 enum dma_resv_usage usage;
402 struct dma_fence *fence = NULL;
403 struct sync_file *sync_file;
404 int fd, ret;
405
406 if (copy_from_user(&arg, user_data, sizeof(arg)))
407 return -EFAULT;
408
409 if (arg.flags & ~DMA_BUF_SYNC_RW)
410 return -EINVAL;
411
412 if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
413 return -EINVAL;
414
415 fd = get_unused_fd_flags(O_CLOEXEC);
416 if (fd < 0)
417 return fd;
418
419 usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);
420 ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);
421 if (ret)
422 goto err_put_fd;
423
424 if (!fence)
425 fence = dma_fence_get_stub();
426
427 sync_file = sync_file_create(fence);
428
429 dma_fence_put(fence);
430
431 if (!sync_file) {
432 ret = -ENOMEM;
433 goto err_put_fd;
434 }
435
436 arg.fd = fd;
437 if (copy_to_user(user_data, &arg, sizeof(arg))) {
438 ret = -EFAULT;
439 goto err_put_file;
440 }
441
442 fd_install(fd, sync_file->file);
443
444 return 0;
445
446 err_put_file:
447 fput(sync_file->file);
448 err_put_fd:
449 put_unused_fd(fd);
450 return ret;
451 }
452
dma_buf_import_sync_file(struct dma_buf * dmabuf,const void __user * user_data)453 static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
454 const void __user *user_data)
455 {
456 struct dma_buf_import_sync_file arg;
457 struct dma_fence *fence, *f;
458 enum dma_resv_usage usage;
459 struct dma_fence_unwrap iter;
460 unsigned int num_fences;
461 int ret = 0;
462
463 if (copy_from_user(&arg, user_data, sizeof(arg)))
464 return -EFAULT;
465
466 if (arg.flags & ~DMA_BUF_SYNC_RW)
467 return -EINVAL;
468
469 if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
470 return -EINVAL;
471
472 fence = sync_file_get_fence(arg.fd);
473 if (!fence)
474 return -EINVAL;
475
476 usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
477 DMA_RESV_USAGE_READ;
478
479 num_fences = 0;
480 dma_fence_unwrap_for_each(f, &iter, fence)
481 ++num_fences;
482
483 if (num_fences > 0) {
484 dma_resv_lock(dmabuf->resv, NULL);
485
486 ret = dma_resv_reserve_fences(dmabuf->resv, num_fences);
487 if (!ret) {
488 dma_fence_unwrap_for_each(f, &iter, fence)
489 dma_resv_add_fence(dmabuf->resv, f, usage);
490 }
491
492 dma_resv_unlock(dmabuf->resv);
493 }
494
495 dma_fence_put(fence);
496
497 return ret;
498 }
499 #endif
500
dma_buf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)501 static long dma_buf_ioctl(struct file *file,
502 unsigned int cmd, unsigned long arg)
503 {
504 struct dma_buf *dmabuf;
505 struct dma_buf_sync sync;
506 enum dma_data_direction direction;
507 int ret;
508
509 dmabuf = file->private_data;
510
511 switch (cmd) {
512 case DMA_BUF_IOCTL_SYNC:
513 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
514 return -EFAULT;
515
516 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
517 return -EINVAL;
518
519 switch (sync.flags & DMA_BUF_SYNC_RW) {
520 case DMA_BUF_SYNC_READ:
521 direction = DMA_FROM_DEVICE;
522 break;
523 case DMA_BUF_SYNC_WRITE:
524 direction = DMA_TO_DEVICE;
525 break;
526 case DMA_BUF_SYNC_RW:
527 direction = DMA_BIDIRECTIONAL;
528 break;
529 default:
530 return -EINVAL;
531 }
532
533 if (sync.flags & DMA_BUF_SYNC_END)
534 ret = dma_buf_end_cpu_access(dmabuf, direction);
535 else
536 ret = dma_buf_begin_cpu_access(dmabuf, direction);
537
538 return ret;
539
540 case DMA_BUF_SET_NAME_A:
541 case DMA_BUF_SET_NAME_B:
542 return dma_buf_set_name_user(dmabuf, (const char __user *)arg);
543
544 #if IS_ENABLED(CONFIG_SYNC_FILE)
545 case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
546 return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
547 case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
548 return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
549 #endif
550
551 default:
552 return -ENOTTY;
553 }
554 }
555
dma_buf_show_fdinfo(struct seq_file * m,struct file * file)556 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
557 {
558 struct dma_buf *dmabuf = file->private_data;
559
560 seq_printf(m, "size:\t%zu\n", dmabuf->size);
561 /* Don't count the temporary reference taken inside procfs seq_show */
562 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
563 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
564 spin_lock(&dmabuf->name_lock);
565 if (dmabuf->name)
566 seq_printf(m, "name:\t%s\n", dmabuf->name);
567 spin_unlock(&dmabuf->name_lock);
568 }
569
570 static const struct file_operations dma_buf_fops = {
571 .release = dma_buf_file_release,
572 .mmap = dma_buf_mmap_internal,
573 .llseek = dma_buf_llseek,
574 .poll = dma_buf_poll,
575 .unlocked_ioctl = dma_buf_ioctl,
576 .compat_ioctl = compat_ptr_ioctl,
577 .show_fdinfo = dma_buf_show_fdinfo,
578 };
579
580 /*
581 * is_dma_buf_file - Check if struct file* is associated with dma_buf
582 */
is_dma_buf_file(struct file * file)583 int is_dma_buf_file(struct file *file)
584 {
585 return file->f_op == &dma_buf_fops;
586 }
587 EXPORT_SYMBOL_NS_GPL(is_dma_buf_file, DMA_BUF);
588
dma_buf_getfile(size_t size,int flags)589 static struct file *dma_buf_getfile(size_t size, int flags)
590 {
591 static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
592 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
593 struct file *file;
594
595 if (IS_ERR(inode))
596 return ERR_CAST(inode);
597
598 inode->i_size = size;
599 inode_set_bytes(inode, size);
600
601 /*
602 * The ->i_ino acquired from get_next_ino() is not unique thus
603 * not suitable for using it as dentry name by dmabuf stats.
604 * Override ->i_ino with the unique and dmabuffs specific
605 * value.
606 */
607 inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
608 flags &= O_ACCMODE | O_NONBLOCK;
609 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
610 flags, &dma_buf_fops);
611 if (IS_ERR(file))
612 goto err_alloc_file;
613
614 return file;
615
616 err_alloc_file:
617 iput(inode);
618 return file;
619 }
620
621 /**
622 * DOC: dma buf device access
623 *
624 * For device DMA access to a shared DMA buffer the usual sequence of operations
625 * is fairly simple:
626 *
627 * 1. The exporter defines his exporter instance using
628 * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
629 * buffer object into a &dma_buf. It then exports that &dma_buf to userspace
630 * as a file descriptor by calling dma_buf_fd().
631 *
632 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
633 * to share with: First the file descriptor is converted to a &dma_buf using
634 * dma_buf_get(). Then the buffer is attached to the device using
635 * dma_buf_attach().
636 *
637 * Up to this stage the exporter is still free to migrate or reallocate the
638 * backing storage.
639 *
640 * 3. Once the buffer is attached to all devices userspace can initiate DMA
641 * access to the shared buffer. In the kernel this is done by calling
642 * dma_buf_map_attachment() and dma_buf_unmap_attachment().
643 *
644 * 4. Once a driver is done with a shared buffer it needs to call
645 * dma_buf_detach() (after cleaning up any mappings) and then release the
646 * reference acquired with dma_buf_get() by calling dma_buf_put().
647 *
648 * For the detailed semantics exporters are expected to implement see
649 * &dma_buf_ops.
650 */
651
652 /**
653 * dma_buf_export - Creates a new dma_buf, and associates an anon file
654 * with this buffer, so it can be exported.
655 * Also connect the allocator specific data and ops to the buffer.
656 * Additionally, provide a name string for exporter; useful in debugging.
657 *
658 * @exp_info: [in] holds all the export related information provided
659 * by the exporter. see &struct dma_buf_export_info
660 * for further details.
661 *
662 * Returns, on success, a newly created struct dma_buf object, which wraps the
663 * supplied private data and operations for struct dma_buf_ops. On either
664 * missing ops, or error in allocating struct dma_buf, will return negative
665 * error.
666 *
667 * For most cases the easiest way to create @exp_info is through the
668 * %DEFINE_DMA_BUF_EXPORT_INFO macro.
669 */
dma_buf_export(const struct dma_buf_export_info * exp_info)670 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
671 {
672 struct dma_buf *dmabuf;
673 struct dma_resv *resv = exp_info->resv;
674 struct file *file;
675 size_t alloc_size = sizeof(struct dma_buf);
676 int ret;
677
678 if (WARN_ON(!exp_info->priv || !exp_info->ops
679 || !exp_info->ops->map_dma_buf
680 || !exp_info->ops->unmap_dma_buf
681 || !exp_info->ops->release))
682 return ERR_PTR(-EINVAL);
683
684 if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
685 (exp_info->ops->pin || exp_info->ops->unpin)))
686 return ERR_PTR(-EINVAL);
687
688 if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
689 return ERR_PTR(-EINVAL);
690
691 if (!try_module_get(exp_info->owner))
692 return ERR_PTR(-ENOENT);
693
694 file = dma_buf_getfile(exp_info->size, exp_info->flags);
695 if (IS_ERR(file)) {
696 ret = PTR_ERR(file);
697 goto err_module;
698 }
699
700 if (!exp_info->resv)
701 alloc_size += sizeof(struct dma_resv);
702 else
703 /* prevent &dma_buf[1] == dma_buf->resv */
704 alloc_size += 1;
705 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
706 if (!dmabuf) {
707 ret = -ENOMEM;
708 goto err_file;
709 }
710
711 dmabuf->priv = exp_info->priv;
712 dmabuf->ops = exp_info->ops;
713 dmabuf->size = exp_info->size;
714 dmabuf->exp_name = exp_info->exp_name;
715 dmabuf->owner = exp_info->owner;
716 spin_lock_init(&dmabuf->name_lock);
717 init_waitqueue_head(&dmabuf->poll);
718 dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
719 dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
720 mutex_init(&dmabuf->lock);
721 INIT_LIST_HEAD(&dmabuf->attachments);
722
723 if (!resv) {
724 dmabuf->resv = (struct dma_resv *)&dmabuf[1];
725 dma_resv_init(dmabuf->resv);
726 } else {
727 dmabuf->resv = resv;
728 }
729
730 file->private_data = dmabuf;
731 file->f_path.dentry->d_fsdata = dmabuf;
732 dmabuf->file = file;
733
734 mutex_lock(&db_list.lock);
735 list_add(&dmabuf->list_node, &db_list.head);
736 mutex_unlock(&db_list.lock);
737
738 ret = dma_buf_stats_setup(dmabuf, file);
739 if (ret)
740 goto err_sysfs;
741
742 return dmabuf;
743
744 err_sysfs:
745 mutex_lock(&db_list.lock);
746 list_del(&dmabuf->list_node);
747 mutex_unlock(&db_list.lock);
748 dmabuf->file = NULL;
749 file->f_path.dentry->d_fsdata = NULL;
750 file->private_data = NULL;
751 if (!resv)
752 dma_resv_fini(dmabuf->resv);
753 kfree(dmabuf);
754 err_file:
755 fput(file);
756 err_module:
757 module_put(exp_info->owner);
758 return ERR_PTR(ret);
759 }
760 EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
761
762 /**
763 * dma_buf_fd - returns a file descriptor for the given struct dma_buf
764 * @dmabuf: [in] pointer to dma_buf for which fd is required.
765 * @flags: [in] flags to give to fd
766 *
767 * On success, returns an associated 'fd'. Else, returns error.
768 */
dma_buf_fd(struct dma_buf * dmabuf,int flags)769 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
770 {
771 int fd;
772
773 if (!dmabuf || !dmabuf->file)
774 return -EINVAL;
775
776 fd = get_unused_fd_flags(flags);
777 if (fd < 0)
778 return fd;
779
780 fd_install(fd, dmabuf->file);
781
782 return fd;
783 }
784 EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
785
786 /**
787 * dma_buf_get - returns the struct dma_buf related to an fd
788 * @fd: [in] fd associated with the struct dma_buf to be returned
789 *
790 * On success, returns the struct dma_buf associated with an fd; uses
791 * file's refcounting done by fget to increase refcount. returns ERR_PTR
792 * otherwise.
793 */
dma_buf_get(int fd)794 struct dma_buf *dma_buf_get(int fd)
795 {
796 struct file *file;
797
798 file = fget(fd);
799
800 if (!file)
801 return ERR_PTR(-EBADF);
802
803 if (!is_dma_buf_file(file)) {
804 fput(file);
805 return ERR_PTR(-EINVAL);
806 }
807
808 return file->private_data;
809 }
810 EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
811
812 /**
813 * dma_buf_put - decreases refcount of the buffer
814 * @dmabuf: [in] buffer to reduce refcount of
815 *
816 * Uses file's refcounting done implicitly by fput().
817 *
818 * If, as a result of this call, the refcount becomes 0, the 'release' file
819 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
820 * in turn, and frees the memory allocated for dmabuf when exported.
821 */
dma_buf_put(struct dma_buf * dmabuf)822 void dma_buf_put(struct dma_buf *dmabuf)
823 {
824 if (WARN_ON(!dmabuf || !dmabuf->file))
825 return;
826
827 fput(dmabuf->file);
828 }
829 EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
830
mangle_sg_table(struct sg_table * sg_table)831 static void mangle_sg_table(struct sg_table *sg_table)
832 {
833 #ifdef CONFIG_DMABUF_DEBUG
834 int i;
835 struct scatterlist *sg;
836
837 /* To catch abuse of the underlying struct page by importers mix
838 * up the bits, but take care to preserve the low SG_ bits to
839 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
840 * before passing the sgt back to the exporter. */
841 for_each_sgtable_sg(sg_table, sg, i)
842 sg->page_link ^= ~0xffUL;
843 #endif
844
845 }
__map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction direction)846 static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
847 enum dma_data_direction direction)
848 {
849 struct sg_table *sg_table;
850 signed long ret;
851
852 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
853 if (IS_ERR_OR_NULL(sg_table))
854 return sg_table;
855
856 if (!dma_buf_attachment_is_dynamic(attach)) {
857 ret = dma_resv_wait_timeout(attach->dmabuf->resv,
858 DMA_RESV_USAGE_KERNEL, true,
859 MAX_SCHEDULE_TIMEOUT);
860 if (ret < 0) {
861 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
862 direction);
863 return ERR_PTR(ret);
864 }
865 }
866
867 mangle_sg_table(sg_table);
868 return sg_table;
869 }
870
871 /**
872 * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
873 * @dmabuf: [in] buffer to attach device to.
874 * @dev: [in] device to be attached.
875 * @importer_ops: [in] importer operations for the attachment
876 * @importer_priv: [in] importer private pointer for the attachment
877 *
878 * Returns struct dma_buf_attachment pointer for this attachment. Attachments
879 * must be cleaned up by calling dma_buf_detach().
880 *
881 * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
882 * functionality.
883 *
884 * Returns:
885 *
886 * A pointer to newly created &dma_buf_attachment on success, or a negative
887 * error code wrapped into a pointer on failure.
888 *
889 * Note that this can fail if the backing storage of @dmabuf is in a place not
890 * accessible to @dev, and cannot be moved to a more suitable place. This is
891 * indicated with the error code -EBUSY.
892 */
893 struct dma_buf_attachment *
dma_buf_dynamic_attach(struct dma_buf * dmabuf,struct device * dev,const struct dma_buf_attach_ops * importer_ops,void * importer_priv)894 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
895 const struct dma_buf_attach_ops *importer_ops,
896 void *importer_priv)
897 {
898 struct dma_buf_attachment *attach;
899 int ret;
900
901 if (WARN_ON(!dmabuf || !dev))
902 return ERR_PTR(-EINVAL);
903
904 if (WARN_ON(importer_ops && !importer_ops->move_notify))
905 return ERR_PTR(-EINVAL);
906
907 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
908 if (!attach)
909 return ERR_PTR(-ENOMEM);
910
911 attach->dev = dev;
912 attach->dmabuf = dmabuf;
913 if (importer_ops)
914 attach->peer2peer = importer_ops->allow_peer2peer;
915 attach->importer_ops = importer_ops;
916 attach->importer_priv = importer_priv;
917
918 if (dmabuf->ops->attach) {
919 ret = dmabuf->ops->attach(dmabuf, attach);
920 if (ret)
921 goto err_attach;
922 }
923 dma_resv_lock(dmabuf->resv, NULL);
924 list_add(&attach->node, &dmabuf->attachments);
925 dma_resv_unlock(dmabuf->resv);
926
927 /* When either the importer or the exporter can't handle dynamic
928 * mappings we cache the mapping here to avoid issues with the
929 * reservation object lock.
930 */
931 if (dma_buf_attachment_is_dynamic(attach) !=
932 dma_buf_is_dynamic(dmabuf)) {
933 struct sg_table *sgt;
934
935 if (dma_buf_is_dynamic(attach->dmabuf)) {
936 dma_resv_lock(attach->dmabuf->resv, NULL);
937 ret = dmabuf->ops->pin(attach);
938 if (ret)
939 goto err_unlock;
940 }
941
942 sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
943 if (!sgt)
944 sgt = ERR_PTR(-ENOMEM);
945 if (IS_ERR(sgt)) {
946 ret = PTR_ERR(sgt);
947 goto err_unpin;
948 }
949 if (dma_buf_is_dynamic(attach->dmabuf))
950 dma_resv_unlock(attach->dmabuf->resv);
951 attach->sgt = sgt;
952 attach->dir = DMA_BIDIRECTIONAL;
953 }
954
955 return attach;
956
957 err_attach:
958 kfree(attach);
959 return ERR_PTR(ret);
960
961 err_unpin:
962 if (dma_buf_is_dynamic(attach->dmabuf))
963 dmabuf->ops->unpin(attach);
964
965 err_unlock:
966 if (dma_buf_is_dynamic(attach->dmabuf))
967 dma_resv_unlock(attach->dmabuf->resv);
968
969 dma_buf_detach(dmabuf, attach);
970 return ERR_PTR(ret);
971 }
972 EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
973
974 /**
975 * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
976 * @dmabuf: [in] buffer to attach device to.
977 * @dev: [in] device to be attached.
978 *
979 * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
980 * mapping.
981 */
dma_buf_attach(struct dma_buf * dmabuf,struct device * dev)982 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
983 struct device *dev)
984 {
985 return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
986 }
987 EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
988
__unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)989 static void __unmap_dma_buf(struct dma_buf_attachment *attach,
990 struct sg_table *sg_table,
991 enum dma_data_direction direction)
992 {
993 /* uses XOR, hence this unmangles */
994 mangle_sg_table(sg_table);
995
996 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
997 }
998
999 /**
1000 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
1001 * @dmabuf: [in] buffer to detach from.
1002 * @attach: [in] attachment to be detached; is free'd after this call.
1003 *
1004 * Clean up a device attachment obtained by calling dma_buf_attach().
1005 *
1006 * Optionally this calls &dma_buf_ops.detach for device-specific detach.
1007 */
dma_buf_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)1008 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
1009 {
1010 if (WARN_ON(!dmabuf || !attach))
1011 return;
1012
1013 if (attach->sgt) {
1014 if (dma_buf_is_dynamic(attach->dmabuf))
1015 dma_resv_lock(attach->dmabuf->resv, NULL);
1016
1017 __unmap_dma_buf(attach, attach->sgt, attach->dir);
1018
1019 if (dma_buf_is_dynamic(attach->dmabuf)) {
1020 dmabuf->ops->unpin(attach);
1021 dma_resv_unlock(attach->dmabuf->resv);
1022 }
1023 }
1024
1025 dma_resv_lock(dmabuf->resv, NULL);
1026 list_del(&attach->node);
1027 dma_resv_unlock(dmabuf->resv);
1028 if (dmabuf->ops->detach)
1029 dmabuf->ops->detach(dmabuf, attach);
1030
1031 kfree(attach);
1032 }
1033 EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
1034
1035 /**
1036 * dma_buf_pin - Lock down the DMA-buf
1037 * @attach: [in] attachment which should be pinned
1038 *
1039 * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
1040 * call this, and only for limited use cases like scanout and not for temporary
1041 * pin operations. It is not permitted to allow userspace to pin arbitrary
1042 * amounts of buffers through this interface.
1043 *
1044 * Buffers must be unpinned by calling dma_buf_unpin().
1045 *
1046 * Returns:
1047 * 0 on success, negative error code on failure.
1048 */
dma_buf_pin(struct dma_buf_attachment * attach)1049 int dma_buf_pin(struct dma_buf_attachment *attach)
1050 {
1051 struct dma_buf *dmabuf = attach->dmabuf;
1052 int ret = 0;
1053
1054 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1055
1056 dma_resv_assert_held(dmabuf->resv);
1057
1058 if (dmabuf->ops->pin)
1059 ret = dmabuf->ops->pin(attach);
1060
1061 return ret;
1062 }
1063 EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
1064
1065 /**
1066 * dma_buf_unpin - Unpin a DMA-buf
1067 * @attach: [in] attachment which should be unpinned
1068 *
1069 * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
1070 * any mapping of @attach again and inform the importer through
1071 * &dma_buf_attach_ops.move_notify.
1072 */
dma_buf_unpin(struct dma_buf_attachment * attach)1073 void dma_buf_unpin(struct dma_buf_attachment *attach)
1074 {
1075 struct dma_buf *dmabuf = attach->dmabuf;
1076
1077 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1078
1079 dma_resv_assert_held(dmabuf->resv);
1080
1081 if (dmabuf->ops->unpin)
1082 dmabuf->ops->unpin(attach);
1083 }
1084 EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
1085
1086 /**
1087 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
1088 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1089 * dma_buf_ops.
1090 * @attach: [in] attachment whose scatterlist is to be returned
1091 * @direction: [in] direction of DMA transfer
1092 *
1093 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
1094 * on error. May return -EINTR if it is interrupted by a signal.
1095 *
1096 * On success, the DMA addresses and lengths in the returned scatterlist are
1097 * PAGE_SIZE aligned.
1098 *
1099 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
1100 * the underlying backing storage is pinned for as long as a mapping exists,
1101 * therefore users/importers should not hold onto a mapping for undue amounts of
1102 * time.
1103 *
1104 * Important: Dynamic importers must wait for the exclusive fence of the struct
1105 * dma_resv attached to the DMA-BUF first.
1106 */
dma_buf_map_attachment(struct dma_buf_attachment * attach,enum dma_data_direction direction)1107 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
1108 enum dma_data_direction direction)
1109 {
1110 struct sg_table *sg_table;
1111 int r;
1112
1113 might_sleep();
1114
1115 if (WARN_ON(!attach || !attach->dmabuf))
1116 return ERR_PTR(-EINVAL);
1117
1118 if (dma_buf_attachment_is_dynamic(attach))
1119 dma_resv_assert_held(attach->dmabuf->resv);
1120
1121 if (attach->sgt) {
1122 /*
1123 * Two mappings with different directions for the same
1124 * attachment are not allowed.
1125 */
1126 if (attach->dir != direction &&
1127 attach->dir != DMA_BIDIRECTIONAL)
1128 return ERR_PTR(-EBUSY);
1129
1130 return attach->sgt;
1131 }
1132
1133 if (dma_buf_is_dynamic(attach->dmabuf)) {
1134 dma_resv_assert_held(attach->dmabuf->resv);
1135 if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1136 r = attach->dmabuf->ops->pin(attach);
1137 if (r)
1138 return ERR_PTR(r);
1139 }
1140 }
1141
1142 sg_table = __map_dma_buf(attach, direction);
1143 if (!sg_table)
1144 sg_table = ERR_PTR(-ENOMEM);
1145
1146 if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
1147 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1148 attach->dmabuf->ops->unpin(attach);
1149
1150 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
1151 attach->sgt = sg_table;
1152 attach->dir = direction;
1153 }
1154
1155 #ifdef CONFIG_DMA_API_DEBUG
1156 if (!IS_ERR(sg_table)) {
1157 struct scatterlist *sg;
1158 u64 addr;
1159 int len;
1160 int i;
1161
1162 for_each_sgtable_dma_sg(sg_table, sg, i) {
1163 addr = sg_dma_address(sg);
1164 len = sg_dma_len(sg);
1165 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
1166 pr_debug("%s: addr %llx or len %x is not page aligned!\n",
1167 __func__, addr, len);
1168 }
1169 }
1170 }
1171 #endif /* CONFIG_DMA_API_DEBUG */
1172 return sg_table;
1173 }
1174 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
1175
1176 /**
1177 * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
1178 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1179 * dma_buf_ops.
1180 * @attach: [in] attachment whose scatterlist is to be returned
1181 * @direction: [in] direction of DMA transfer
1182 *
1183 * Unlocked variant of dma_buf_map_attachment().
1184 */
1185 struct sg_table *
dma_buf_map_attachment_unlocked(struct dma_buf_attachment * attach,enum dma_data_direction direction)1186 dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
1187 enum dma_data_direction direction)
1188 {
1189 struct sg_table *sg_table;
1190
1191 might_sleep();
1192
1193 if (WARN_ON(!attach || !attach->dmabuf))
1194 return ERR_PTR(-EINVAL);
1195
1196 dma_resv_lock(attach->dmabuf->resv, NULL);
1197 sg_table = dma_buf_map_attachment(attach, direction);
1198 dma_resv_unlock(attach->dmabuf->resv);
1199
1200 return sg_table;
1201 }
1202 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF);
1203
1204 /**
1205 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
1206 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1207 * dma_buf_ops.
1208 * @attach: [in] attachment to unmap buffer from
1209 * @sg_table: [in] scatterlist info of the buffer to unmap
1210 * @direction: [in] direction of DMA transfer
1211 *
1212 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
1213 */
dma_buf_unmap_attachment(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)1214 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1215 struct sg_table *sg_table,
1216 enum dma_data_direction direction)
1217 {
1218 might_sleep();
1219
1220 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1221 return;
1222
1223 if (dma_buf_attachment_is_dynamic(attach))
1224 dma_resv_assert_held(attach->dmabuf->resv);
1225
1226 if (attach->sgt == sg_table)
1227 return;
1228
1229 if (dma_buf_is_dynamic(attach->dmabuf))
1230 dma_resv_assert_held(attach->dmabuf->resv);
1231
1232 __unmap_dma_buf(attach, sg_table, direction);
1233
1234 if (dma_buf_is_dynamic(attach->dmabuf) &&
1235 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1236 dma_buf_unpin(attach);
1237 }
1238 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
1239
1240 /**
1241 * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
1242 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1243 * dma_buf_ops.
1244 * @attach: [in] attachment to unmap buffer from
1245 * @sg_table: [in] scatterlist info of the buffer to unmap
1246 * @direction: [in] direction of DMA transfer
1247 *
1248 * Unlocked variant of dma_buf_unmap_attachment().
1249 */
dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)1250 void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
1251 struct sg_table *sg_table,
1252 enum dma_data_direction direction)
1253 {
1254 might_sleep();
1255
1256 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1257 return;
1258
1259 dma_resv_lock(attach->dmabuf->resv, NULL);
1260 dma_buf_unmap_attachment(attach, sg_table, direction);
1261 dma_resv_unlock(attach->dmabuf->resv);
1262 }
1263 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF);
1264
1265 /**
1266 * dma_buf_move_notify - notify attachments that DMA-buf is moving
1267 *
1268 * @dmabuf: [in] buffer which is moving
1269 *
1270 * Informs all attachmenst that they need to destroy and recreated all their
1271 * mappings.
1272 */
dma_buf_move_notify(struct dma_buf * dmabuf)1273 void dma_buf_move_notify(struct dma_buf *dmabuf)
1274 {
1275 struct dma_buf_attachment *attach;
1276
1277 dma_resv_assert_held(dmabuf->resv);
1278
1279 list_for_each_entry(attach, &dmabuf->attachments, node)
1280 if (attach->importer_ops)
1281 attach->importer_ops->move_notify(attach);
1282 }
1283 EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
1284
1285 /**
1286 * DOC: cpu access
1287 *
1288 * There are mutliple reasons for supporting CPU access to a dma buffer object:
1289 *
1290 * - Fallback operations in the kernel, for example when a device is connected
1291 * over USB and the kernel needs to shuffle the data around first before
1292 * sending it away. Cache coherency is handled by braketing any transactions
1293 * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1294 * access.
1295 *
1296 * Since for most kernel internal dma-buf accesses need the entire buffer, a
1297 * vmap interface is introduced. Note that on very old 32-bit architectures
1298 * vmalloc space might be limited and result in vmap calls failing.
1299 *
1300 * Interfaces::
1301 *
1302 * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1303 * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1304 *
1305 * The vmap call can fail if there is no vmap support in the exporter, or if
1306 * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1307 * count for all vmap access and calls down into the exporter's vmap function
1308 * only when no vmapping exists, and only unmaps it once. Protection against
1309 * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
1310 *
1311 * - For full compatibility on the importer side with existing userspace
1312 * interfaces, which might already support mmap'ing buffers. This is needed in
1313 * many processing pipelines (e.g. feeding a software rendered image into a
1314 * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1315 * framework already supported this and for DMA buffer file descriptors to
1316 * replace ION buffers mmap support was needed.
1317 *
1318 * There is no special interfaces, userspace simply calls mmap on the dma-buf
1319 * fd. But like for CPU access there's a need to braket the actual access,
1320 * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1321 * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1322 * be restarted.
1323 *
1324 * Some systems might need some sort of cache coherency management e.g. when
1325 * CPU and GPU domains are being accessed through dma-buf at the same time.
1326 * To circumvent this problem there are begin/end coherency markers, that
1327 * forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1328 * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1329 * sequence would be used like following:
1330 *
1331 * - mmap dma-buf fd
1332 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1333 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1334 * want (with the new data being consumed by say the GPU or the scanout
1335 * device)
1336 * - munmap once you don't need the buffer any more
1337 *
1338 * For correctness and optimal performance, it is always required to use
1339 * SYNC_START and SYNC_END before and after, respectively, when accessing the
1340 * mapped address. Userspace cannot rely on coherent access, even when there
1341 * are systems where it just works without calling these ioctls.
1342 *
1343 * - And as a CPU fallback in userspace processing pipelines.
1344 *
1345 * Similar to the motivation for kernel cpu access it is again important that
1346 * the userspace code of a given importing subsystem can use the same
1347 * interfaces with a imported dma-buf buffer object as with a native buffer
1348 * object. This is especially important for drm where the userspace part of
1349 * contemporary OpenGL, X, and other drivers is huge, and reworking them to
1350 * use a different way to mmap a buffer rather invasive.
1351 *
1352 * The assumption in the current dma-buf interfaces is that redirecting the
1353 * initial mmap is all that's needed. A survey of some of the existing
1354 * subsystems shows that no driver seems to do any nefarious thing like
1355 * syncing up with outstanding asynchronous processing on the device or
1356 * allocating special resources at fault time. So hopefully this is good
1357 * enough, since adding interfaces to intercept pagefaults and allow pte
1358 * shootdowns would increase the complexity quite a bit.
1359 *
1360 * Interface::
1361 *
1362 * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1363 * unsigned long);
1364 *
1365 * If the importing subsystem simply provides a special-purpose mmap call to
1366 * set up a mapping in userspace, calling do_mmap with &dma_buf.file will
1367 * equally achieve that for a dma-buf object.
1368 */
1369
__dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1370 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1371 enum dma_data_direction direction)
1372 {
1373 bool write = (direction == DMA_BIDIRECTIONAL ||
1374 direction == DMA_TO_DEVICE);
1375 struct dma_resv *resv = dmabuf->resv;
1376 long ret;
1377
1378 /* Wait on any implicit rendering fences */
1379 ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
1380 true, MAX_SCHEDULE_TIMEOUT);
1381 if (ret < 0)
1382 return ret;
1383
1384 return 0;
1385 }
1386
1387 /**
1388 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1389 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1390 * preparations. Coherency is only guaranteed in the specified range for the
1391 * specified access direction.
1392 * @dmabuf: [in] buffer to prepare cpu access for.
1393 * @direction: [in] length of range for cpu access.
1394 *
1395 * After the cpu access is complete the caller should call
1396 * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
1397 * it guaranteed to be coherent with other DMA access.
1398 *
1399 * This function will also wait for any DMA transactions tracked through
1400 * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1401 * synchronization this function will only ensure cache coherency, callers must
1402 * ensure synchronization with such DMA transactions on their own.
1403 *
1404 * Can return negative error values, returns 0 on success.
1405 */
dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1406 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1407 enum dma_data_direction direction)
1408 {
1409 int ret = 0;
1410
1411 if (WARN_ON(!dmabuf))
1412 return -EINVAL;
1413
1414 might_lock(&dmabuf->resv->lock.base);
1415
1416 if (dmabuf->ops->begin_cpu_access)
1417 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1418
1419 /* Ensure that all fences are waited upon - but we first allow
1420 * the native handler the chance to do so more efficiently if it
1421 * chooses. A double invocation here will be reasonably cheap no-op.
1422 */
1423 if (ret == 0)
1424 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1425
1426 return ret;
1427 }
1428 EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
1429
dma_buf_begin_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)1430 int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
1431 enum dma_data_direction direction,
1432 unsigned int offset, unsigned int len)
1433 {
1434 int ret = 0;
1435
1436 if (WARN_ON(!dmabuf))
1437 return -EINVAL;
1438
1439 if (dmabuf->ops->begin_cpu_access_partial)
1440 ret = dmabuf->ops->begin_cpu_access_partial(dmabuf, direction,
1441 offset, len);
1442
1443 /* Ensure that all fences are waited upon - but we first allow
1444 * the native handler the chance to do so more efficiently if it
1445 * chooses. A double invocation here will be reasonably cheap no-op.
1446 */
1447 if (ret == 0)
1448 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1449
1450 return ret;
1451 }
1452 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access_partial);
1453
1454 /**
1455 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1456 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1457 * actions. Coherency is only guaranteed in the specified range for the
1458 * specified access direction.
1459 * @dmabuf: [in] buffer to complete cpu access for.
1460 * @direction: [in] length of range for cpu access.
1461 *
1462 * This terminates CPU access started with dma_buf_begin_cpu_access().
1463 *
1464 * Can return negative error values, returns 0 on success.
1465 */
dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1466 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1467 enum dma_data_direction direction)
1468 {
1469 int ret = 0;
1470
1471 WARN_ON(!dmabuf);
1472
1473 might_lock(&dmabuf->resv->lock.base);
1474
1475 if (dmabuf->ops->end_cpu_access)
1476 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1477
1478 return ret;
1479 }
1480 EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
1481
dma_buf_end_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)1482 int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
1483 enum dma_data_direction direction,
1484 unsigned int offset, unsigned int len)
1485 {
1486 int ret = 0;
1487
1488 WARN_ON(!dmabuf);
1489
1490 if (dmabuf->ops->end_cpu_access_partial)
1491 ret = dmabuf->ops->end_cpu_access_partial(dmabuf, direction,
1492 offset, len);
1493
1494 return ret;
1495 }
1496 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access_partial);
1497
1498 /**
1499 * dma_buf_mmap - Setup up a userspace mmap with the given vma
1500 * @dmabuf: [in] buffer that should back the vma
1501 * @vma: [in] vma for the mmap
1502 * @pgoff: [in] offset in pages where this mmap should start within the
1503 * dma-buf buffer.
1504 *
1505 * This function adjusts the passed in vma so that it points at the file of the
1506 * dma_buf operation. It also adjusts the starting pgoff and does bounds
1507 * checking on the size of the vma. Then it calls the exporters mmap function to
1508 * set up the mapping.
1509 *
1510 * Can return negative error values, returns 0 on success.
1511 */
dma_buf_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma,unsigned long pgoff)1512 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1513 unsigned long pgoff)
1514 {
1515 if (WARN_ON(!dmabuf || !vma))
1516 return -EINVAL;
1517
1518 /* check if buffer supports mmap */
1519 if (!dmabuf->ops->mmap)
1520 return -EINVAL;
1521
1522 /* check for offset overflow */
1523 if (pgoff + vma_pages(vma) < pgoff)
1524 return -EOVERFLOW;
1525
1526 /* check for overflowing the buffer's size */
1527 if (pgoff + vma_pages(vma) >
1528 dmabuf->size >> PAGE_SHIFT)
1529 return -EINVAL;
1530
1531 /* readjust the vma */
1532 vma_set_file(vma, dmabuf->file);
1533 vma->vm_pgoff = pgoff;
1534
1535 return dmabuf->ops->mmap(dmabuf, vma);
1536 }
1537 EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
1538
1539 /**
1540 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1541 * address space. Same restrictions as for vmap and friends apply.
1542 * @dmabuf: [in] buffer to vmap
1543 * @map: [out] returns the vmap pointer
1544 *
1545 * This call may fail due to lack of virtual mapping address space.
1546 * These calls are optional in drivers. The intended use for them
1547 * is for mapping objects linear in kernel space for high use objects.
1548 *
1549 * To ensure coherency users must call dma_buf_begin_cpu_access() and
1550 * dma_buf_end_cpu_access() around any cpu access performed through this
1551 * mapping.
1552 *
1553 * Returns 0 on success, or a negative errno code otherwise.
1554 */
dma_buf_vmap(struct dma_buf * dmabuf,struct iosys_map * map)1555 int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
1556 {
1557 struct iosys_map ptr;
1558 int ret = 0;
1559
1560 iosys_map_clear(map);
1561
1562 if (WARN_ON(!dmabuf))
1563 return -EINVAL;
1564
1565 if (!dmabuf->ops->vmap)
1566 return -EINVAL;
1567
1568 mutex_lock(&dmabuf->lock);
1569 if (dmabuf->vmapping_counter) {
1570 dmabuf->vmapping_counter++;
1571 BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1572 *map = dmabuf->vmap_ptr;
1573 goto out_unlock;
1574 }
1575
1576 BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
1577
1578 ret = dmabuf->ops->vmap(dmabuf, &ptr);
1579 if (WARN_ON_ONCE(ret))
1580 goto out_unlock;
1581
1582 dmabuf->vmap_ptr = ptr;
1583 dmabuf->vmapping_counter = 1;
1584
1585 *map = dmabuf->vmap_ptr;
1586
1587 out_unlock:
1588 mutex_unlock(&dmabuf->lock);
1589 return ret;
1590 }
1591 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
1592
1593 /**
1594 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1595 * @dmabuf: [in] buffer to vunmap
1596 * @map: [in] vmap pointer to vunmap
1597 */
dma_buf_vunmap(struct dma_buf * dmabuf,struct iosys_map * map)1598 void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1599 {
1600 if (WARN_ON(!dmabuf))
1601 return;
1602
1603 BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1604 BUG_ON(dmabuf->vmapping_counter == 0);
1605 BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
1606
1607 mutex_lock(&dmabuf->lock);
1608 if (--dmabuf->vmapping_counter == 0) {
1609 if (dmabuf->ops->vunmap)
1610 dmabuf->ops->vunmap(dmabuf, map);
1611 iosys_map_clear(&dmabuf->vmap_ptr);
1612 }
1613 mutex_unlock(&dmabuf->lock);
1614 }
1615 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
1616
dma_buf_get_flags(struct dma_buf * dmabuf,unsigned long * flags)1617 int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags)
1618 {
1619 int ret = 0;
1620
1621 if (WARN_ON(!dmabuf) || !flags)
1622 return -EINVAL;
1623
1624 if (dmabuf->ops->get_flags)
1625 ret = dmabuf->ops->get_flags(dmabuf, flags);
1626
1627 return ret;
1628 }
1629 EXPORT_SYMBOL_GPL(dma_buf_get_flags);
1630
1631 #ifdef CONFIG_DEBUG_FS
dma_buf_debug_show(struct seq_file * s,void * unused)1632 static int dma_buf_debug_show(struct seq_file *s, void *unused)
1633 {
1634 struct dma_buf *buf_obj;
1635 struct dma_buf_attachment *attach_obj;
1636 int count = 0, attach_count;
1637 size_t size = 0;
1638 int ret;
1639
1640 ret = mutex_lock_interruptible(&db_list.lock);
1641
1642 if (ret)
1643 return ret;
1644
1645 seq_puts(s, "\nDma-buf Objects:\n");
1646 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
1647 "size", "flags", "mode", "count", "ino");
1648
1649 list_for_each_entry(buf_obj, &db_list.head, list_node) {
1650
1651 ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1652 if (ret)
1653 goto error_unlock;
1654
1655
1656 spin_lock(&buf_obj->name_lock);
1657 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1658 buf_obj->size,
1659 buf_obj->file->f_flags, buf_obj->file->f_mode,
1660 file_count(buf_obj->file),
1661 buf_obj->exp_name,
1662 file_inode(buf_obj->file)->i_ino,
1663 buf_obj->name ?: "<none>");
1664 spin_unlock(&buf_obj->name_lock);
1665
1666 dma_resv_describe(buf_obj->resv, s);
1667
1668 seq_puts(s, "\tAttached Devices:\n");
1669 attach_count = 0;
1670
1671 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1672 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1673 attach_count++;
1674 }
1675 dma_resv_unlock(buf_obj->resv);
1676
1677 seq_printf(s, "Total %d devices attached\n\n",
1678 attach_count);
1679
1680 count++;
1681 size += buf_obj->size;
1682 }
1683
1684 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1685
1686 mutex_unlock(&db_list.lock);
1687 return 0;
1688
1689 error_unlock:
1690 mutex_unlock(&db_list.lock);
1691 return ret;
1692 }
1693
1694 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1695
1696 static struct dentry *dma_buf_debugfs_dir;
1697
dma_buf_init_debugfs(void)1698 static int dma_buf_init_debugfs(void)
1699 {
1700 struct dentry *d;
1701 int err = 0;
1702
1703 d = debugfs_create_dir("dma_buf", NULL);
1704 if (IS_ERR(d))
1705 return PTR_ERR(d);
1706
1707 dma_buf_debugfs_dir = d;
1708
1709 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1710 NULL, &dma_buf_debug_fops);
1711 if (IS_ERR(d)) {
1712 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1713 debugfs_remove_recursive(dma_buf_debugfs_dir);
1714 dma_buf_debugfs_dir = NULL;
1715 err = PTR_ERR(d);
1716 }
1717
1718 return err;
1719 }
1720
dma_buf_uninit_debugfs(void)1721 static void dma_buf_uninit_debugfs(void)
1722 {
1723 debugfs_remove_recursive(dma_buf_debugfs_dir);
1724 }
1725 #else
dma_buf_init_debugfs(void)1726 static inline int dma_buf_init_debugfs(void)
1727 {
1728 return 0;
1729 }
dma_buf_uninit_debugfs(void)1730 static inline void dma_buf_uninit_debugfs(void)
1731 {
1732 }
1733 #endif
1734
dma_buf_init(void)1735 static int __init dma_buf_init(void)
1736 {
1737 int ret;
1738
1739 ret = dma_buf_init_sysfs_statistics();
1740 if (ret)
1741 return ret;
1742
1743 dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1744 if (IS_ERR(dma_buf_mnt))
1745 return PTR_ERR(dma_buf_mnt);
1746
1747 mutex_init(&db_list.lock);
1748 INIT_LIST_HEAD(&db_list.head);
1749 dma_buf_init_debugfs();
1750 return 0;
1751 }
1752 subsys_initcall(dma_buf_init);
1753
dma_buf_deinit(void)1754 static void __exit dma_buf_deinit(void)
1755 {
1756 dma_buf_uninit_debugfs();
1757 kern_unmount(dma_buf_mnt);
1758 dma_buf_uninit_sysfs_statistics();
1759 }
1760 __exitcall(dma_buf_deinit);
1761