• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Framework for buffer objects that can be shared across devices/subsystems.
4  *
5  * Copyright(C) 2011 Linaro Limited. All rights reserved.
6  * Author: Sumit Semwal <sumit.semwal@ti.com>
7  *
8  * Many thanks to linaro-mm-sig list, and specially
9  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11  * refining of this idea.
12  */
13 
14 #include <linux/fs.h>
15 #include <linux/slab.h>
16 #include <linux/dma-buf.h>
17 #include <linux/dma-fence.h>
18 #include <linux/dma-fence-unwrap.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/export.h>
21 #include <linux/debugfs.h>
22 #include <linux/module.h>
23 #include <linux/seq_file.h>
24 #include <linux/sync_file.h>
25 #include <linux/poll.h>
26 #include <linux/dma-resv.h>
27 #include <linux/mm.h>
28 #include <linux/mount.h>
29 #include <linux/pseudo_fs.h>
30 
31 #include <uapi/linux/dma-buf.h>
32 #include <uapi/linux/magic.h>
33 
34 #include "dma-buf-sysfs-stats.h"
35 #include "dma-buf-process-info.h"
36 
37 static inline int is_dma_buf_file(struct file *);
38 
39 struct dma_buf_list {
40 	struct list_head head;
41 	struct mutex lock;
42 };
43 
44 static struct dma_buf_list db_list;
45 
dmabuffs_dname(struct dentry * dentry,char * buffer,int buflen)46 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
47 {
48 	struct dma_buf *dmabuf;
49 	char name[DMA_BUF_NAME_LEN];
50 	size_t ret = 0;
51 
52 	dmabuf = dentry->d_fsdata;
53 	spin_lock(&dmabuf->name_lock);
54 	if (dmabuf->name)
55 		ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
56 	spin_unlock(&dmabuf->name_lock);
57 
58 	return dynamic_dname(buffer, buflen, "/%s:%s",
59 			     dentry->d_name.name, ret > 0 ? name : "");
60 }
61 
dma_buf_release(struct dentry * dentry)62 static void dma_buf_release(struct dentry *dentry)
63 {
64 	struct dma_buf *dmabuf;
65 
66 	dmabuf = dentry->d_fsdata;
67 	if (unlikely(!dmabuf))
68 		return;
69 
70 	BUG_ON(dmabuf->vmapping_counter);
71 
72 	/*
73 	 * If you hit this BUG() it could mean:
74 	 * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
75 	 * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
76 	 */
77 	BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
78 
79 	dma_buf_stats_teardown(dmabuf);
80 	dmabuf->ops->release(dmabuf);
81 
82 	if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
83 		dma_resv_fini(dmabuf->resv);
84 
85 	WARN_ON(!list_empty(&dmabuf->attachments));
86 	module_put(dmabuf->owner);
87 	kfree(dmabuf->name);
88 	kfree(dmabuf);
89 }
90 
dma_buf_file_release(struct inode * inode,struct file * file)91 static int dma_buf_file_release(struct inode *inode, struct file *file)
92 {
93 	struct dma_buf *dmabuf;
94 
95 	if (!is_dma_buf_file(file))
96 		return -EINVAL;
97 
98 	dmabuf = file->private_data;
99 	if (dmabuf) {
100 		mutex_lock(&db_list.lock);
101 		list_del(&dmabuf->list_node);
102 		mutex_unlock(&db_list.lock);
103 	}
104 
105 	return 0;
106 }
107 
108 static const struct dentry_operations dma_buf_dentry_ops = {
109 	.d_dname = dmabuffs_dname,
110 	.d_release = dma_buf_release,
111 };
112 
113 static struct vfsmount *dma_buf_mnt;
114 
dma_buf_fs_init_context(struct fs_context * fc)115 static int dma_buf_fs_init_context(struct fs_context *fc)
116 {
117 	struct pseudo_fs_context *ctx;
118 
119 	ctx = init_pseudo(fc, DMA_BUF_MAGIC);
120 	if (!ctx)
121 		return -ENOMEM;
122 	ctx->dops = &dma_buf_dentry_ops;
123 	return 0;
124 }
125 
126 static struct file_system_type dma_buf_fs_type = {
127 	.name = "dmabuf",
128 	.init_fs_context = dma_buf_fs_init_context,
129 	.kill_sb = kill_anon_super,
130 };
131 
dma_buf_mmap_internal(struct file * file,struct vm_area_struct * vma)132 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
133 {
134 	struct dma_buf *dmabuf;
135 
136 	if (!is_dma_buf_file(file))
137 		return -EINVAL;
138 
139 	dmabuf = file->private_data;
140 
141 	/* check if buffer supports mmap */
142 	if (!dmabuf->ops->mmap)
143 		return -EINVAL;
144 
145 	/* check for overflowing the buffer's size */
146 	if (vma->vm_pgoff + vma_pages(vma) >
147 	    dmabuf->size >> PAGE_SHIFT)
148 		return -EINVAL;
149 
150 	return dmabuf->ops->mmap(dmabuf, vma);
151 }
152 
dma_buf_llseek(struct file * file,loff_t offset,int whence)153 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
154 {
155 	struct dma_buf *dmabuf;
156 	loff_t base;
157 
158 	if (!is_dma_buf_file(file))
159 		return -EBADF;
160 
161 	dmabuf = file->private_data;
162 
163 	/* only support discovering the end of the buffer,
164 	   but also allow SEEK_SET to maintain the idiomatic
165 	   SEEK_END(0), SEEK_CUR(0) pattern */
166 	if (whence == SEEK_END)
167 		base = dmabuf->size;
168 	else if (whence == SEEK_SET)
169 		base = 0;
170 	else
171 		return -EINVAL;
172 
173 	if (offset != 0)
174 		return -EINVAL;
175 
176 	return base + offset;
177 }
178 
179 /**
180  * DOC: implicit fence polling
181  *
182  * To support cross-device and cross-driver synchronization of buffer access
183  * implicit fences (represented internally in the kernel with &struct dma_fence)
184  * can be attached to a &dma_buf. The glue for that and a few related things are
185  * provided in the &dma_resv structure.
186  *
187  * Userspace can query the state of these implicitly tracked fences using poll()
188  * and related system calls:
189  *
190  * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
191  *   most recent write or exclusive fence.
192  *
193  * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
194  *   all attached fences, shared and exclusive ones.
195  *
196  * Note that this only signals the completion of the respective fences, i.e. the
197  * DMA transfers are complete. Cache flushing and any other necessary
198  * preparations before CPU access can begin still need to happen.
199  *
200  * As an alternative to poll(), the set of fences on DMA buffer can be
201  * exported as a &sync_file using &dma_buf_sync_file_export.
202  */
203 
dma_buf_poll_cb(struct dma_fence * fence,struct dma_fence_cb * cb)204 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
205 {
206 	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
207 	struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
208 	unsigned long flags;
209 
210 	spin_lock_irqsave(&dcb->poll->lock, flags);
211 	wake_up_locked_poll(dcb->poll, dcb->active);
212 	dcb->active = 0;
213 	spin_unlock_irqrestore(&dcb->poll->lock, flags);
214 	dma_fence_put(fence);
215 	/* Paired with get_file in dma_buf_poll */
216 	fput(dmabuf->file);
217 }
218 
dma_buf_poll_add_cb(struct dma_resv * resv,bool write,struct dma_buf_poll_cb_t * dcb)219 static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
220 				struct dma_buf_poll_cb_t *dcb)
221 {
222 	struct dma_resv_iter cursor;
223 	struct dma_fence *fence;
224 	int r;
225 
226 	dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
227 				fence) {
228 		dma_fence_get(fence);
229 		r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
230 		if (!r)
231 			return true;
232 		dma_fence_put(fence);
233 	}
234 
235 	return false;
236 }
237 
dma_buf_poll(struct file * file,poll_table * poll)238 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
239 {
240 	struct dma_buf *dmabuf;
241 	struct dma_resv *resv;
242 	__poll_t events;
243 
244 	dmabuf = file->private_data;
245 	if (!dmabuf || !dmabuf->resv)
246 		return EPOLLERR;
247 
248 	resv = dmabuf->resv;
249 
250 	poll_wait(file, &dmabuf->poll, poll);
251 
252 	events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
253 	if (!events)
254 		return 0;
255 
256 	dma_resv_lock(resv, NULL);
257 
258 	if (events & EPOLLOUT) {
259 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
260 
261 		/* Check that callback isn't busy */
262 		spin_lock_irq(&dmabuf->poll.lock);
263 		if (dcb->active)
264 			events &= ~EPOLLOUT;
265 		else
266 			dcb->active = EPOLLOUT;
267 		spin_unlock_irq(&dmabuf->poll.lock);
268 
269 		if (events & EPOLLOUT) {
270 			/* Paired with fput in dma_buf_poll_cb */
271 			get_file(dmabuf->file);
272 
273 			if (!dma_buf_poll_add_cb(resv, true, dcb))
274 				/* No callback queued, wake up any other waiters */
275 				dma_buf_poll_cb(NULL, &dcb->cb);
276 			else
277 				events &= ~EPOLLOUT;
278 		}
279 	}
280 
281 	if (events & EPOLLIN) {
282 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
283 
284 		/* Check that callback isn't busy */
285 		spin_lock_irq(&dmabuf->poll.lock);
286 		if (dcb->active)
287 			events &= ~EPOLLIN;
288 		else
289 			dcb->active = EPOLLIN;
290 		spin_unlock_irq(&dmabuf->poll.lock);
291 
292 		if (events & EPOLLIN) {
293 			/* Paired with fput in dma_buf_poll_cb */
294 			get_file(dmabuf->file);
295 
296 			if (!dma_buf_poll_add_cb(resv, false, dcb))
297 				/* No callback queued, wake up any other waiters */
298 				dma_buf_poll_cb(NULL, &dcb->cb);
299 			else
300 				events &= ~EPOLLIN;
301 		}
302 	}
303 
304 	dma_resv_unlock(resv);
305 	return events;
306 }
307 
308 /**
309  * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
310  * It could support changing the name of the dma-buf if the same
311  * piece of memory is used for multiple purpose between different devices.
312  *
313  * @dmabuf: [in]     dmabuf buffer that will be renamed.
314  * @buf:    [in]     A piece of userspace memory that contains the name of
315  *                   the dma-buf.
316  *
317  * Returns 0 on success. If the dma-buf buffer is already attached to
318  * devices, return -EBUSY.
319  *
320  */
dma_buf_set_name(struct dma_buf * dmabuf,const char __user * buf)321 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
322 {
323 	char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
324 
325 	if (IS_ERR(name))
326 		return PTR_ERR(name);
327 
328 	spin_lock(&dmabuf->name_lock);
329 	kfree(dmabuf->name);
330 	dmabuf->name = name;
331 	spin_unlock(&dmabuf->name_lock);
332 
333 	return 0;
334 }
335 
336 #if IS_ENABLED(CONFIG_SYNC_FILE)
dma_buf_export_sync_file(struct dma_buf * dmabuf,void __user * user_data)337 static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
338 				     void __user *user_data)
339 {
340 	struct dma_buf_export_sync_file arg;
341 	enum dma_resv_usage usage;
342 	struct dma_fence *fence = NULL;
343 	struct sync_file *sync_file;
344 	int fd, ret;
345 
346 	if (copy_from_user(&arg, user_data, sizeof(arg)))
347 		return -EFAULT;
348 
349 	if (arg.flags & ~DMA_BUF_SYNC_RW)
350 		return -EINVAL;
351 
352 	if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
353 		return -EINVAL;
354 
355 	fd = get_unused_fd_flags(O_CLOEXEC);
356 	if (fd < 0)
357 		return fd;
358 
359 	usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);
360 	ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);
361 	if (ret)
362 		goto err_put_fd;
363 
364 	if (!fence)
365 		fence = dma_fence_get_stub();
366 
367 	sync_file = sync_file_create(fence);
368 
369 	dma_fence_put(fence);
370 
371 	if (!sync_file) {
372 		ret = -ENOMEM;
373 		goto err_put_fd;
374 	}
375 
376 	arg.fd = fd;
377 	if (copy_to_user(user_data, &arg, sizeof(arg))) {
378 		ret = -EFAULT;
379 		goto err_put_file;
380 	}
381 
382 	fd_install(fd, sync_file->file);
383 
384 	return 0;
385 
386 err_put_file:
387 	fput(sync_file->file);
388 err_put_fd:
389 	put_unused_fd(fd);
390 	return ret;
391 }
392 
dma_buf_import_sync_file(struct dma_buf * dmabuf,const void __user * user_data)393 static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
394 				     const void __user *user_data)
395 {
396 	struct dma_buf_import_sync_file arg;
397 	struct dma_fence *fence, *f;
398 	enum dma_resv_usage usage;
399 	struct dma_fence_unwrap iter;
400 	unsigned int num_fences;
401 	int ret = 0;
402 
403 	if (copy_from_user(&arg, user_data, sizeof(arg)))
404 		return -EFAULT;
405 
406 	if (arg.flags & ~DMA_BUF_SYNC_RW)
407 		return -EINVAL;
408 
409 	if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
410 		return -EINVAL;
411 
412 	fence = sync_file_get_fence(arg.fd);
413 	if (!fence)
414 		return -EINVAL;
415 
416 	usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
417 						   DMA_RESV_USAGE_READ;
418 
419 	num_fences = 0;
420 	dma_fence_unwrap_for_each(f, &iter, fence)
421 		++num_fences;
422 
423 	if (num_fences > 0) {
424 		dma_resv_lock(dmabuf->resv, NULL);
425 
426 		ret = dma_resv_reserve_fences(dmabuf->resv, num_fences);
427 		if (!ret) {
428 			dma_fence_unwrap_for_each(f, &iter, fence)
429 				dma_resv_add_fence(dmabuf->resv, f, usage);
430 		}
431 
432 		dma_resv_unlock(dmabuf->resv);
433 	}
434 
435 	dma_fence_put(fence);
436 
437 	return ret;
438 }
439 #endif
440 
dma_buf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)441 static long dma_buf_ioctl(struct file *file,
442 			  unsigned int cmd, unsigned long arg)
443 {
444 	struct dma_buf *dmabuf;
445 	struct dma_buf_sync sync;
446 	enum dma_data_direction direction;
447 	int ret;
448 
449 	dmabuf = file->private_data;
450 
451 	switch (cmd) {
452 	case DMA_BUF_IOCTL_SYNC:
453 		if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
454 			return -EFAULT;
455 
456 		if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
457 			return -EINVAL;
458 
459 		switch (sync.flags & DMA_BUF_SYNC_RW) {
460 		case DMA_BUF_SYNC_READ:
461 			direction = DMA_FROM_DEVICE;
462 			break;
463 		case DMA_BUF_SYNC_WRITE:
464 			direction = DMA_TO_DEVICE;
465 			break;
466 		case DMA_BUF_SYNC_RW:
467 			direction = DMA_BIDIRECTIONAL;
468 			break;
469 		default:
470 			return -EINVAL;
471 		}
472 
473 		if (sync.flags & DMA_BUF_SYNC_END)
474 			ret = dma_buf_end_cpu_access(dmabuf, direction);
475 		else
476 			ret = dma_buf_begin_cpu_access(dmabuf, direction);
477 
478 		return ret;
479 
480 	case DMA_BUF_SET_NAME_A:
481 	case DMA_BUF_SET_NAME_B:
482 		return dma_buf_set_name(dmabuf, (const char __user *)arg);
483 
484 #if IS_ENABLED(CONFIG_SYNC_FILE)
485 	case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
486 		return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
487 	case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
488 		return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
489 #endif
490 
491 	default:
492 		return -ENOTTY;
493 	}
494 }
495 
dma_buf_show_fdinfo(struct seq_file * m,struct file * file)496 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
497 {
498 	struct dma_buf *dmabuf = file->private_data;
499 
500 	seq_printf(m, "size:\t%zu\n", dmabuf->size);
501 	/* Don't count the temporary reference taken inside procfs seq_show */
502 	seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
503 	seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
504 	spin_lock(&dmabuf->name_lock);
505 	if (dmabuf->name)
506 		seq_printf(m, "name:\t%s\n", dmabuf->name);
507 	spin_unlock(&dmabuf->name_lock);
508 }
509 
510 static const struct file_operations dma_buf_fops = {
511 	.release	= dma_buf_file_release,
512 	.mmap		= dma_buf_mmap_internal,
513 	.llseek		= dma_buf_llseek,
514 	.poll		= dma_buf_poll,
515 	.unlocked_ioctl	= dma_buf_ioctl,
516 	.compat_ioctl	= compat_ptr_ioctl,
517 	.show_fdinfo	= dma_buf_show_fdinfo,
518 };
519 
520 /*
521  * is_dma_buf_file - Check if struct file* is associated with dma_buf
522  */
is_dma_buf_file(struct file * file)523 static inline int is_dma_buf_file(struct file *file)
524 {
525 	return file->f_op == &dma_buf_fops;
526 }
527 
dma_buf_getfile(size_t size,int flags)528 static struct file *dma_buf_getfile(size_t size, int flags)
529 {
530 	static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
531 	struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
532 	struct file *file;
533 
534 	if (IS_ERR(inode))
535 		return ERR_CAST(inode);
536 
537 	inode->i_size = size;
538 	inode_set_bytes(inode, size);
539 
540 	/*
541 	 * The ->i_ino acquired from get_next_ino() is not unique thus
542 	 * not suitable for using it as dentry name by dmabuf stats.
543 	 * Override ->i_ino with the unique and dmabuffs specific
544 	 * value.
545 	 */
546 	inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
547 	flags &= O_ACCMODE | O_NONBLOCK;
548 	file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
549 				 flags, &dma_buf_fops);
550 	if (IS_ERR(file))
551 		goto err_alloc_file;
552 
553 	return file;
554 
555 err_alloc_file:
556 	iput(inode);
557 	return file;
558 }
559 
560 /**
561  * DOC: dma buf device access
562  *
563  * For device DMA access to a shared DMA buffer the usual sequence of operations
564  * is fairly simple:
565  *
566  * 1. The exporter defines his exporter instance using
567  *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
568  *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
569  *    as a file descriptor by calling dma_buf_fd().
570  *
571  * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
572  *    to share with: First the file descriptor is converted to a &dma_buf using
573  *    dma_buf_get(). Then the buffer is attached to the device using
574  *    dma_buf_attach().
575  *
576  *    Up to this stage the exporter is still free to migrate or reallocate the
577  *    backing storage.
578  *
579  * 3. Once the buffer is attached to all devices userspace can initiate DMA
580  *    access to the shared buffer. In the kernel this is done by calling
581  *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
582  *
583  * 4. Once a driver is done with a shared buffer it needs to call
584  *    dma_buf_detach() (after cleaning up any mappings) and then release the
585  *    reference acquired with dma_buf_get() by calling dma_buf_put().
586  *
587  * For the detailed semantics exporters are expected to implement see
588  * &dma_buf_ops.
589  */
590 
591 /**
592  * dma_buf_export - Creates a new dma_buf, and associates an anon file
593  * with this buffer, so it can be exported.
594  * Also connect the allocator specific data and ops to the buffer.
595  * Additionally, provide a name string for exporter; useful in debugging.
596  *
597  * @exp_info:	[in]	holds all the export related information provided
598  *			by the exporter. see &struct dma_buf_export_info
599  *			for further details.
600  *
601  * Returns, on success, a newly created struct dma_buf object, which wraps the
602  * supplied private data and operations for struct dma_buf_ops. On either
603  * missing ops, or error in allocating struct dma_buf, will return negative
604  * error.
605  *
606  * For most cases the easiest way to create @exp_info is through the
607  * %DEFINE_DMA_BUF_EXPORT_INFO macro.
608  */
dma_buf_export(const struct dma_buf_export_info * exp_info)609 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
610 {
611 	struct dma_buf *dmabuf;
612 	struct dma_resv *resv = exp_info->resv;
613 	struct file *file;
614 	size_t alloc_size = sizeof(struct dma_buf);
615 	int ret;
616 
617 	if (WARN_ON(!exp_info->priv || !exp_info->ops
618 		    || !exp_info->ops->map_dma_buf
619 		    || !exp_info->ops->unmap_dma_buf
620 		    || !exp_info->ops->release))
621 		return ERR_PTR(-EINVAL);
622 
623 	if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
624 		    (exp_info->ops->pin || exp_info->ops->unpin)))
625 		return ERR_PTR(-EINVAL);
626 
627 	if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
628 		return ERR_PTR(-EINVAL);
629 
630 	if (!try_module_get(exp_info->owner))
631 		return ERR_PTR(-ENOENT);
632 
633 	file = dma_buf_getfile(exp_info->size, exp_info->flags);
634 	if (IS_ERR(file)) {
635 		ret = PTR_ERR(file);
636 		goto err_module;
637 	}
638 
639 	if (!exp_info->resv)
640 		alloc_size += sizeof(struct dma_resv);
641 	else
642 		/* prevent &dma_buf[1] == dma_buf->resv */
643 		alloc_size += 1;
644 	dmabuf = kzalloc(alloc_size, GFP_KERNEL);
645 	if (!dmabuf) {
646 		ret = -ENOMEM;
647 		goto err_file;
648 	}
649 
650 	dmabuf->priv = exp_info->priv;
651 	dmabuf->ops = exp_info->ops;
652 	dmabuf->size = exp_info->size;
653 	dmabuf->exp_name = exp_info->exp_name;
654 	dmabuf->owner = exp_info->owner;
655 	spin_lock_init(&dmabuf->name_lock);
656 	init_waitqueue_head(&dmabuf->poll);
657 	dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
658 	dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
659 	INIT_LIST_HEAD(&dmabuf->attachments);
660 
661 	if (!resv) {
662 		dmabuf->resv = (struct dma_resv *)&dmabuf[1];
663 		dma_resv_init(dmabuf->resv);
664 	} else {
665 		dmabuf->resv = resv;
666 	}
667 
668 	ret = dma_buf_stats_setup(dmabuf, file);
669 	if (ret)
670 		goto err_dmabuf;
671 
672 	file->private_data = dmabuf;
673 	file->f_path.dentry->d_fsdata = dmabuf;
674 	dmabuf->file = file;
675 
676 	mutex_lock(&db_list.lock);
677 	list_add(&dmabuf->list_node, &db_list.head);
678 	mutex_unlock(&db_list.lock);
679 
680 	return dmabuf;
681 
682 err_dmabuf:
683 	if (!resv)
684 		dma_resv_fini(dmabuf->resv);
685 	kfree(dmabuf);
686 err_file:
687 	fput(file);
688 err_module:
689 	module_put(exp_info->owner);
690 	return ERR_PTR(ret);
691 }
692 EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
693 
694 /**
695  * dma_buf_fd - returns a file descriptor for the given struct dma_buf
696  * @dmabuf:	[in]	pointer to dma_buf for which fd is required.
697  * @flags:      [in]    flags to give to fd
698  *
699  * On success, returns an associated 'fd'. Else, returns error.
700  */
dma_buf_fd(struct dma_buf * dmabuf,int flags)701 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
702 {
703 	int fd;
704 
705 	if (!dmabuf || !dmabuf->file)
706 		return -EINVAL;
707 
708 	fd = get_unused_fd_flags(flags);
709 	if (fd < 0)
710 		return fd;
711 
712 	fd_install(fd, dmabuf->file);
713 
714 	return fd;
715 }
716 EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
717 
718 /**
719  * dma_buf_get - returns the struct dma_buf related to an fd
720  * @fd:	[in]	fd associated with the struct dma_buf to be returned
721  *
722  * On success, returns the struct dma_buf associated with an fd; uses
723  * file's refcounting done by fget to increase refcount. returns ERR_PTR
724  * otherwise.
725  */
dma_buf_get(int fd)726 struct dma_buf *dma_buf_get(int fd)
727 {
728 	struct file *file;
729 
730 	file = fget(fd);
731 
732 	if (!file)
733 		return ERR_PTR(-EBADF);
734 
735 	if (!is_dma_buf_file(file)) {
736 		fput(file);
737 		return ERR_PTR(-EINVAL);
738 	}
739 
740 	return file->private_data;
741 }
742 EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
743 
744 /**
745  * dma_buf_put - decreases refcount of the buffer
746  * @dmabuf:	[in]	buffer to reduce refcount of
747  *
748  * Uses file's refcounting done implicitly by fput().
749  *
750  * If, as a result of this call, the refcount becomes 0, the 'release' file
751  * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
752  * in turn, and frees the memory allocated for dmabuf when exported.
753  */
dma_buf_put(struct dma_buf * dmabuf)754 void dma_buf_put(struct dma_buf *dmabuf)
755 {
756 	if (WARN_ON(!dmabuf || !dmabuf->file))
757 		return;
758 
759 	fput(dmabuf->file);
760 }
761 EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
762 
mangle_sg_table(struct sg_table * sg_table)763 static void mangle_sg_table(struct sg_table *sg_table)
764 {
765 #ifdef CONFIG_DMABUF_DEBUG
766 	int i;
767 	struct scatterlist *sg;
768 
769 	/* To catch abuse of the underlying struct page by importers mix
770 	 * up the bits, but take care to preserve the low SG_ bits to
771 	 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
772 	 * before passing the sgt back to the exporter. */
773 	for_each_sgtable_sg(sg_table, sg, i)
774 		sg->page_link ^= ~0xffUL;
775 #endif
776 
777 }
__map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction direction)778 static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
779 				       enum dma_data_direction direction)
780 {
781 	struct sg_table *sg_table;
782 	signed long ret;
783 
784 	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
785 	if (IS_ERR_OR_NULL(sg_table))
786 		return sg_table;
787 
788 	if (!dma_buf_attachment_is_dynamic(attach)) {
789 		ret = dma_resv_wait_timeout(attach->dmabuf->resv,
790 					    DMA_RESV_USAGE_KERNEL, true,
791 					    MAX_SCHEDULE_TIMEOUT);
792 		if (ret < 0) {
793 			attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
794 							   direction);
795 			return ERR_PTR(ret);
796 		}
797 	}
798 
799 	mangle_sg_table(sg_table);
800 	return sg_table;
801 }
802 
803 /**
804  * DOC: locking convention
805  *
806  * In order to avoid deadlock situations between dma-buf exports and importers,
807  * all dma-buf API users must follow the common dma-buf locking convention.
808  *
809  * Convention for importers
810  *
811  * 1. Importers must hold the dma-buf reservation lock when calling these
812  *    functions:
813  *
814  *     - dma_buf_pin()
815  *     - dma_buf_unpin()
816  *     - dma_buf_map_attachment()
817  *     - dma_buf_unmap_attachment()
818  *     - dma_buf_vmap()
819  *     - dma_buf_vunmap()
820  *
821  * 2. Importers must not hold the dma-buf reservation lock when calling these
822  *    functions:
823  *
824  *     - dma_buf_attach()
825  *     - dma_buf_dynamic_attach()
826  *     - dma_buf_detach()
827  *     - dma_buf_export()
828  *     - dma_buf_fd()
829  *     - dma_buf_get()
830  *     - dma_buf_put()
831  *     - dma_buf_mmap()
832  *     - dma_buf_begin_cpu_access()
833  *     - dma_buf_end_cpu_access()
834  *     - dma_buf_map_attachment_unlocked()
835  *     - dma_buf_unmap_attachment_unlocked()
836  *     - dma_buf_vmap_unlocked()
837  *     - dma_buf_vunmap_unlocked()
838  *
839  * Convention for exporters
840  *
841  * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf
842  *    reservation and exporter can take the lock:
843  *
844  *     - &dma_buf_ops.attach()
845  *     - &dma_buf_ops.detach()
846  *     - &dma_buf_ops.release()
847  *     - &dma_buf_ops.begin_cpu_access()
848  *     - &dma_buf_ops.end_cpu_access()
849  *     - &dma_buf_ops.mmap()
850  *
851  * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf
852  *    reservation and exporter can't take the lock:
853  *
854  *     - &dma_buf_ops.pin()
855  *     - &dma_buf_ops.unpin()
856  *     - &dma_buf_ops.map_dma_buf()
857  *     - &dma_buf_ops.unmap_dma_buf()
858  *     - &dma_buf_ops.vmap()
859  *     - &dma_buf_ops.vunmap()
860  *
861  * 3. Exporters must hold the dma-buf reservation lock when calling these
862  *    functions:
863  *
864  *     - dma_buf_move_notify()
865  */
866 
867 /**
868  * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
869  * @dmabuf:		[in]	buffer to attach device to.
870  * @dev:		[in]	device to be attached.
871  * @importer_ops:	[in]	importer operations for the attachment
872  * @importer_priv:	[in]	importer private pointer for the attachment
873  *
874  * Returns struct dma_buf_attachment pointer for this attachment. Attachments
875  * must be cleaned up by calling dma_buf_detach().
876  *
877  * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
878  * functionality.
879  *
880  * Returns:
881  *
882  * A pointer to newly created &dma_buf_attachment on success, or a negative
883  * error code wrapped into a pointer on failure.
884  *
885  * Note that this can fail if the backing storage of @dmabuf is in a place not
886  * accessible to @dev, and cannot be moved to a more suitable place. This is
887  * indicated with the error code -EBUSY.
888  */
889 struct dma_buf_attachment *
dma_buf_dynamic_attach(struct dma_buf * dmabuf,struct device * dev,const struct dma_buf_attach_ops * importer_ops,void * importer_priv)890 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
891 		       const struct dma_buf_attach_ops *importer_ops,
892 		       void *importer_priv)
893 {
894 	struct dma_buf_attachment *attach;
895 	int ret;
896 
897 	if (WARN_ON(!dmabuf || !dev))
898 		return ERR_PTR(-EINVAL);
899 
900 	if (WARN_ON(importer_ops && !importer_ops->move_notify))
901 		return ERR_PTR(-EINVAL);
902 
903 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
904 	if (!attach)
905 		return ERR_PTR(-ENOMEM);
906 
907 	attach->dev = dev;
908 	attach->dmabuf = dmabuf;
909 	if (importer_ops)
910 		attach->peer2peer = importer_ops->allow_peer2peer;
911 	attach->importer_ops = importer_ops;
912 	attach->importer_priv = importer_priv;
913 
914 	if (dmabuf->ops->attach) {
915 		ret = dmabuf->ops->attach(dmabuf, attach);
916 		if (ret)
917 			goto err_attach;
918 	}
919 	dma_resv_lock(dmabuf->resv, NULL);
920 	list_add(&attach->node, &dmabuf->attachments);
921 	dma_resv_unlock(dmabuf->resv);
922 
923 	/* When either the importer or the exporter can't handle dynamic
924 	 * mappings we cache the mapping here to avoid issues with the
925 	 * reservation object lock.
926 	 */
927 	if (dma_buf_attachment_is_dynamic(attach) !=
928 	    dma_buf_is_dynamic(dmabuf)) {
929 		struct sg_table *sgt;
930 
931 		dma_resv_lock(attach->dmabuf->resv, NULL);
932 		if (dma_buf_is_dynamic(attach->dmabuf)) {
933 			ret = dmabuf->ops->pin(attach);
934 			if (ret)
935 				goto err_unlock;
936 		}
937 
938 		sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
939 		if (!sgt)
940 			sgt = ERR_PTR(-ENOMEM);
941 		if (IS_ERR(sgt)) {
942 			ret = PTR_ERR(sgt);
943 			goto err_unpin;
944 		}
945 		dma_resv_unlock(attach->dmabuf->resv);
946 		attach->sgt = sgt;
947 		attach->dir = DMA_BIDIRECTIONAL;
948 	}
949 
950 	return attach;
951 
952 err_attach:
953 	kfree(attach);
954 	return ERR_PTR(ret);
955 
956 err_unpin:
957 	if (dma_buf_is_dynamic(attach->dmabuf))
958 		dmabuf->ops->unpin(attach);
959 
960 err_unlock:
961 	dma_resv_unlock(attach->dmabuf->resv);
962 
963 	dma_buf_detach(dmabuf, attach);
964 	return ERR_PTR(ret);
965 }
966 EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
967 
968 /**
969  * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
970  * @dmabuf:	[in]	buffer to attach device to.
971  * @dev:	[in]	device to be attached.
972  *
973  * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
974  * mapping.
975  */
dma_buf_attach(struct dma_buf * dmabuf,struct device * dev)976 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
977 					  struct device *dev)
978 {
979 	return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
980 }
981 EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
982 
__unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)983 static void __unmap_dma_buf(struct dma_buf_attachment *attach,
984 			    struct sg_table *sg_table,
985 			    enum dma_data_direction direction)
986 {
987 	/* uses XOR, hence this unmangles */
988 	mangle_sg_table(sg_table);
989 
990 	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
991 }
992 
993 /**
994  * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
995  * @dmabuf:	[in]	buffer to detach from.
996  * @attach:	[in]	attachment to be detached; is free'd after this call.
997  *
998  * Clean up a device attachment obtained by calling dma_buf_attach().
999  *
1000  * Optionally this calls &dma_buf_ops.detach for device-specific detach.
1001  */
dma_buf_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)1002 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
1003 {
1004 	if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf))
1005 		return;
1006 
1007 	dma_resv_lock(dmabuf->resv, NULL);
1008 
1009 	if (attach->sgt) {
1010 
1011 		__unmap_dma_buf(attach, attach->sgt, attach->dir);
1012 
1013 		if (dma_buf_is_dynamic(attach->dmabuf))
1014 			dmabuf->ops->unpin(attach);
1015 	}
1016 	list_del(&attach->node);
1017 
1018 	dma_resv_unlock(dmabuf->resv);
1019 
1020 	if (dmabuf->ops->detach)
1021 		dmabuf->ops->detach(dmabuf, attach);
1022 
1023 	kfree(attach);
1024 }
1025 EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
1026 
1027 /**
1028  * dma_buf_pin - Lock down the DMA-buf
1029  * @attach:	[in]	attachment which should be pinned
1030  *
1031  * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
1032  * call this, and only for limited use cases like scanout and not for temporary
1033  * pin operations. It is not permitted to allow userspace to pin arbitrary
1034  * amounts of buffers through this interface.
1035  *
1036  * Buffers must be unpinned by calling dma_buf_unpin().
1037  *
1038  * Returns:
1039  * 0 on success, negative error code on failure.
1040  */
dma_buf_pin(struct dma_buf_attachment * attach)1041 int dma_buf_pin(struct dma_buf_attachment *attach)
1042 {
1043 	struct dma_buf *dmabuf = attach->dmabuf;
1044 	int ret = 0;
1045 
1046 	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1047 
1048 	dma_resv_assert_held(dmabuf->resv);
1049 
1050 	if (dmabuf->ops->pin)
1051 		ret = dmabuf->ops->pin(attach);
1052 
1053 	return ret;
1054 }
1055 EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
1056 
1057 /**
1058  * dma_buf_unpin - Unpin a DMA-buf
1059  * @attach:	[in]	attachment which should be unpinned
1060  *
1061  * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
1062  * any mapping of @attach again and inform the importer through
1063  * &dma_buf_attach_ops.move_notify.
1064  */
dma_buf_unpin(struct dma_buf_attachment * attach)1065 void dma_buf_unpin(struct dma_buf_attachment *attach)
1066 {
1067 	struct dma_buf *dmabuf = attach->dmabuf;
1068 
1069 	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1070 
1071 	dma_resv_assert_held(dmabuf->resv);
1072 
1073 	if (dmabuf->ops->unpin)
1074 		dmabuf->ops->unpin(attach);
1075 }
1076 EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
1077 
1078 /**
1079  * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
1080  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1081  * dma_buf_ops.
1082  * @attach:	[in]	attachment whose scatterlist is to be returned
1083  * @direction:	[in]	direction of DMA transfer
1084  *
1085  * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
1086  * on error. May return -EINTR if it is interrupted by a signal.
1087  *
1088  * On success, the DMA addresses and lengths in the returned scatterlist are
1089  * PAGE_SIZE aligned.
1090  *
1091  * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
1092  * the underlying backing storage is pinned for as long as a mapping exists,
1093  * therefore users/importers should not hold onto a mapping for undue amounts of
1094  * time.
1095  *
1096  * Important: Dynamic importers must wait for the exclusive fence of the struct
1097  * dma_resv attached to the DMA-BUF first.
1098  */
dma_buf_map_attachment(struct dma_buf_attachment * attach,enum dma_data_direction direction)1099 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
1100 					enum dma_data_direction direction)
1101 {
1102 	struct sg_table *sg_table;
1103 	int r;
1104 
1105 	might_sleep();
1106 
1107 	if (WARN_ON(!attach || !attach->dmabuf))
1108 		return ERR_PTR(-EINVAL);
1109 
1110 	dma_resv_assert_held(attach->dmabuf->resv);
1111 
1112 	if (attach->sgt) {
1113 		/*
1114 		 * Two mappings with different directions for the same
1115 		 * attachment are not allowed.
1116 		 */
1117 		if (attach->dir != direction &&
1118 		    attach->dir != DMA_BIDIRECTIONAL)
1119 			return ERR_PTR(-EBUSY);
1120 
1121 		return attach->sgt;
1122 	}
1123 
1124 	if (dma_buf_is_dynamic(attach->dmabuf)) {
1125 		if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1126 			r = attach->dmabuf->ops->pin(attach);
1127 			if (r)
1128 				return ERR_PTR(r);
1129 		}
1130 	}
1131 
1132 	sg_table = __map_dma_buf(attach, direction);
1133 	if (!sg_table)
1134 		sg_table = ERR_PTR(-ENOMEM);
1135 
1136 	if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
1137 	     !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1138 		attach->dmabuf->ops->unpin(attach);
1139 
1140 	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
1141 		attach->sgt = sg_table;
1142 		attach->dir = direction;
1143 	}
1144 
1145 #ifdef CONFIG_DMA_API_DEBUG
1146 	if (!IS_ERR(sg_table)) {
1147 		struct scatterlist *sg;
1148 		u64 addr;
1149 		int len;
1150 		int i;
1151 
1152 		for_each_sgtable_dma_sg(sg_table, sg, i) {
1153 			addr = sg_dma_address(sg);
1154 			len = sg_dma_len(sg);
1155 			if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
1156 				pr_debug("%s: addr %llx or len %x is not page aligned!\n",
1157 					 __func__, addr, len);
1158 			}
1159 		}
1160 	}
1161 #endif /* CONFIG_DMA_API_DEBUG */
1162 	return sg_table;
1163 }
1164 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
1165 
1166 /**
1167  * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
1168  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1169  * dma_buf_ops.
1170  * @attach:	[in]	attachment whose scatterlist is to be returned
1171  * @direction:	[in]	direction of DMA transfer
1172  *
1173  * Unlocked variant of dma_buf_map_attachment().
1174  */
1175 struct sg_table *
dma_buf_map_attachment_unlocked(struct dma_buf_attachment * attach,enum dma_data_direction direction)1176 dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
1177 				enum dma_data_direction direction)
1178 {
1179 	struct sg_table *sg_table;
1180 
1181 	might_sleep();
1182 
1183 	if (WARN_ON(!attach || !attach->dmabuf))
1184 		return ERR_PTR(-EINVAL);
1185 
1186 	dma_resv_lock(attach->dmabuf->resv, NULL);
1187 	sg_table = dma_buf_map_attachment(attach, direction);
1188 	dma_resv_unlock(attach->dmabuf->resv);
1189 
1190 	return sg_table;
1191 }
1192 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF);
1193 
1194 /**
1195  * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
1196  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1197  * dma_buf_ops.
1198  * @attach:	[in]	attachment to unmap buffer from
1199  * @sg_table:	[in]	scatterlist info of the buffer to unmap
1200  * @direction:  [in]    direction of DMA transfer
1201  *
1202  * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
1203  */
dma_buf_unmap_attachment(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)1204 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1205 				struct sg_table *sg_table,
1206 				enum dma_data_direction direction)
1207 {
1208 	might_sleep();
1209 
1210 	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1211 		return;
1212 
1213 	dma_resv_assert_held(attach->dmabuf->resv);
1214 
1215 	if (attach->sgt == sg_table)
1216 		return;
1217 
1218 	__unmap_dma_buf(attach, sg_table, direction);
1219 
1220 	if (dma_buf_is_dynamic(attach->dmabuf) &&
1221 	    !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1222 		dma_buf_unpin(attach);
1223 }
1224 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
1225 
1226 /**
1227  * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
1228  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1229  * dma_buf_ops.
1230  * @attach:	[in]	attachment to unmap buffer from
1231  * @sg_table:	[in]	scatterlist info of the buffer to unmap
1232  * @direction:	[in]	direction of DMA transfer
1233  *
1234  * Unlocked variant of dma_buf_unmap_attachment().
1235  */
dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)1236 void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
1237 				       struct sg_table *sg_table,
1238 				       enum dma_data_direction direction)
1239 {
1240 	might_sleep();
1241 
1242 	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1243 		return;
1244 
1245 	dma_resv_lock(attach->dmabuf->resv, NULL);
1246 	dma_buf_unmap_attachment(attach, sg_table, direction);
1247 	dma_resv_unlock(attach->dmabuf->resv);
1248 }
1249 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF);
1250 
1251 /**
1252  * dma_buf_move_notify - notify attachments that DMA-buf is moving
1253  *
1254  * @dmabuf:	[in]	buffer which is moving
1255  *
1256  * Informs all attachments that they need to destroy and recreate all their
1257  * mappings.
1258  */
dma_buf_move_notify(struct dma_buf * dmabuf)1259 void dma_buf_move_notify(struct dma_buf *dmabuf)
1260 {
1261 	struct dma_buf_attachment *attach;
1262 
1263 	dma_resv_assert_held(dmabuf->resv);
1264 
1265 	list_for_each_entry(attach, &dmabuf->attachments, node)
1266 		if (attach->importer_ops)
1267 			attach->importer_ops->move_notify(attach);
1268 }
1269 EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
1270 
1271 /**
1272  * DOC: cpu access
1273  *
1274  * There are multiple reasons for supporting CPU access to a dma buffer object:
1275  *
1276  * - Fallback operations in the kernel, for example when a device is connected
1277  *   over USB and the kernel needs to shuffle the data around first before
1278  *   sending it away. Cache coherency is handled by bracketing any transactions
1279  *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1280  *   access.
1281  *
1282  *   Since for most kernel internal dma-buf accesses need the entire buffer, a
1283  *   vmap interface is introduced. Note that on very old 32-bit architectures
1284  *   vmalloc space might be limited and result in vmap calls failing.
1285  *
1286  *   Interfaces::
1287  *
1288  *      void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1289  *      void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1290  *
1291  *   The vmap call can fail if there is no vmap support in the exporter, or if
1292  *   it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1293  *   count for all vmap access and calls down into the exporter's vmap function
1294  *   only when no vmapping exists, and only unmaps it once. Protection against
1295  *   concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
1296  *
1297  * - For full compatibility on the importer side with existing userspace
1298  *   interfaces, which might already support mmap'ing buffers. This is needed in
1299  *   many processing pipelines (e.g. feeding a software rendered image into a
1300  *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1301  *   framework already supported this and for DMA buffer file descriptors to
1302  *   replace ION buffers mmap support was needed.
1303  *
1304  *   There is no special interfaces, userspace simply calls mmap on the dma-buf
1305  *   fd. But like for CPU access there's a need to bracket the actual access,
1306  *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1307  *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1308  *   be restarted.
1309  *
1310  *   Some systems might need some sort of cache coherency management e.g. when
1311  *   CPU and GPU domains are being accessed through dma-buf at the same time.
1312  *   To circumvent this problem there are begin/end coherency markers, that
1313  *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1314  *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1315  *   sequence would be used like following:
1316  *
1317  *     - mmap dma-buf fd
1318  *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1319  *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1320  *       want (with the new data being consumed by say the GPU or the scanout
1321  *       device)
1322  *     - munmap once you don't need the buffer any more
1323  *
1324  *    For correctness and optimal performance, it is always required to use
1325  *    SYNC_START and SYNC_END before and after, respectively, when accessing the
1326  *    mapped address. Userspace cannot rely on coherent access, even when there
1327  *    are systems where it just works without calling these ioctls.
1328  *
1329  * - And as a CPU fallback in userspace processing pipelines.
1330  *
1331  *   Similar to the motivation for kernel cpu access it is again important that
1332  *   the userspace code of a given importing subsystem can use the same
1333  *   interfaces with a imported dma-buf buffer object as with a native buffer
1334  *   object. This is especially important for drm where the userspace part of
1335  *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
1336  *   use a different way to mmap a buffer rather invasive.
1337  *
1338  *   The assumption in the current dma-buf interfaces is that redirecting the
1339  *   initial mmap is all that's needed. A survey of some of the existing
1340  *   subsystems shows that no driver seems to do any nefarious thing like
1341  *   syncing up with outstanding asynchronous processing on the device or
1342  *   allocating special resources at fault time. So hopefully this is good
1343  *   enough, since adding interfaces to intercept pagefaults and allow pte
1344  *   shootdowns would increase the complexity quite a bit.
1345  *
1346  *   Interface::
1347  *
1348  *      int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1349  *		       unsigned long);
1350  *
1351  *   If the importing subsystem simply provides a special-purpose mmap call to
1352  *   set up a mapping in userspace, calling do_mmap with &dma_buf.file will
1353  *   equally achieve that for a dma-buf object.
1354  */
1355 
__dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1356 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1357 				      enum dma_data_direction direction)
1358 {
1359 	bool write = (direction == DMA_BIDIRECTIONAL ||
1360 		      direction == DMA_TO_DEVICE);
1361 	struct dma_resv *resv = dmabuf->resv;
1362 	long ret;
1363 
1364 	/* Wait on any implicit rendering fences */
1365 	ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
1366 				    true, MAX_SCHEDULE_TIMEOUT);
1367 	if (ret < 0)
1368 		return ret;
1369 
1370 	return 0;
1371 }
1372 
1373 /**
1374  * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1375  * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1376  * preparations. Coherency is only guaranteed in the specified range for the
1377  * specified access direction.
1378  * @dmabuf:	[in]	buffer to prepare cpu access for.
1379  * @direction:	[in]	direction of access.
1380  *
1381  * After the cpu access is complete the caller should call
1382  * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is
1383  * it guaranteed to be coherent with other DMA access.
1384  *
1385  * This function will also wait for any DMA transactions tracked through
1386  * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1387  * synchronization this function will only ensure cache coherency, callers must
1388  * ensure synchronization with such DMA transactions on their own.
1389  *
1390  * Can return negative error values, returns 0 on success.
1391  */
dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1392 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1393 			     enum dma_data_direction direction)
1394 {
1395 	int ret = 0;
1396 
1397 	if (WARN_ON(!dmabuf))
1398 		return -EINVAL;
1399 
1400 	might_lock(&dmabuf->resv->lock.base);
1401 
1402 	if (dmabuf->ops->begin_cpu_access)
1403 		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1404 
1405 	/* Ensure that all fences are waited upon - but we first allow
1406 	 * the native handler the chance to do so more efficiently if it
1407 	 * chooses. A double invocation here will be reasonably cheap no-op.
1408 	 */
1409 	if (ret == 0)
1410 		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1411 
1412 	return ret;
1413 }
1414 EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
1415 
1416 /**
1417  * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1418  * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1419  * actions. Coherency is only guaranteed in the specified range for the
1420  * specified access direction.
1421  * @dmabuf:	[in]	buffer to complete cpu access for.
1422  * @direction:	[in]	direction of access.
1423  *
1424  * This terminates CPU access started with dma_buf_begin_cpu_access().
1425  *
1426  * Can return negative error values, returns 0 on success.
1427  */
dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1428 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1429 			   enum dma_data_direction direction)
1430 {
1431 	int ret = 0;
1432 
1433 	WARN_ON(!dmabuf);
1434 
1435 	might_lock(&dmabuf->resv->lock.base);
1436 
1437 	if (dmabuf->ops->end_cpu_access)
1438 		ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1439 
1440 	return ret;
1441 }
1442 EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
1443 
1444 
1445 /**
1446  * dma_buf_mmap - Setup up a userspace mmap with the given vma
1447  * @dmabuf:	[in]	buffer that should back the vma
1448  * @vma:	[in]	vma for the mmap
1449  * @pgoff:	[in]	offset in pages where this mmap should start within the
1450  *			dma-buf buffer.
1451  *
1452  * This function adjusts the passed in vma so that it points at the file of the
1453  * dma_buf operation. It also adjusts the starting pgoff and does bounds
1454  * checking on the size of the vma. Then it calls the exporters mmap function to
1455  * set up the mapping.
1456  *
1457  * Can return negative error values, returns 0 on success.
1458  */
dma_buf_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma,unsigned long pgoff)1459 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1460 		 unsigned long pgoff)
1461 {
1462 	if (WARN_ON(!dmabuf || !vma))
1463 		return -EINVAL;
1464 
1465 	/* check if buffer supports mmap */
1466 	if (!dmabuf->ops->mmap)
1467 		return -EINVAL;
1468 
1469 	/* check for offset overflow */
1470 	if (pgoff + vma_pages(vma) < pgoff)
1471 		return -EOVERFLOW;
1472 
1473 	/* check for overflowing the buffer's size */
1474 	if (pgoff + vma_pages(vma) >
1475 	    dmabuf->size >> PAGE_SHIFT)
1476 		return -EINVAL;
1477 
1478 	/* readjust the vma */
1479 	vma_set_file(vma, dmabuf->file);
1480 	vma->vm_pgoff = pgoff;
1481 
1482 	return dmabuf->ops->mmap(dmabuf, vma);
1483 }
1484 EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
1485 
1486 /**
1487  * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1488  * address space. Same restrictions as for vmap and friends apply.
1489  * @dmabuf:	[in]	buffer to vmap
1490  * @map:	[out]	returns the vmap pointer
1491  *
1492  * This call may fail due to lack of virtual mapping address space.
1493  * These calls are optional in drivers. The intended use for them
1494  * is for mapping objects linear in kernel space for high use objects.
1495  *
1496  * To ensure coherency users must call dma_buf_begin_cpu_access() and
1497  * dma_buf_end_cpu_access() around any cpu access performed through this
1498  * mapping.
1499  *
1500  * Returns 0 on success, or a negative errno code otherwise.
1501  */
dma_buf_vmap(struct dma_buf * dmabuf,struct iosys_map * map)1502 int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
1503 {
1504 	struct iosys_map ptr;
1505 	int ret;
1506 
1507 	iosys_map_clear(map);
1508 
1509 	if (WARN_ON(!dmabuf))
1510 		return -EINVAL;
1511 
1512 	dma_resv_assert_held(dmabuf->resv);
1513 
1514 	if (!dmabuf->ops->vmap)
1515 		return -EINVAL;
1516 
1517 	if (dmabuf->vmapping_counter) {
1518 		dmabuf->vmapping_counter++;
1519 		BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1520 		*map = dmabuf->vmap_ptr;
1521 		return 0;
1522 	}
1523 
1524 	BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
1525 
1526 	ret = dmabuf->ops->vmap(dmabuf, &ptr);
1527 	if (WARN_ON_ONCE(ret))
1528 		return ret;
1529 
1530 	dmabuf->vmap_ptr = ptr;
1531 	dmabuf->vmapping_counter = 1;
1532 
1533 	*map = dmabuf->vmap_ptr;
1534 
1535 	return 0;
1536 }
1537 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
1538 
1539 /**
1540  * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
1541  * address space. Same restrictions as for vmap and friends apply.
1542  * @dmabuf:	[in]	buffer to vmap
1543  * @map:	[out]	returns the vmap pointer
1544  *
1545  * Unlocked version of dma_buf_vmap()
1546  *
1547  * Returns 0 on success, or a negative errno code otherwise.
1548  */
dma_buf_vmap_unlocked(struct dma_buf * dmabuf,struct iosys_map * map)1549 int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
1550 {
1551 	int ret;
1552 
1553 	iosys_map_clear(map);
1554 
1555 	if (WARN_ON(!dmabuf))
1556 		return -EINVAL;
1557 
1558 	dma_resv_lock(dmabuf->resv, NULL);
1559 	ret = dma_buf_vmap(dmabuf, map);
1560 	dma_resv_unlock(dmabuf->resv);
1561 
1562 	return ret;
1563 }
1564 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF);
1565 
1566 /**
1567  * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1568  * @dmabuf:	[in]	buffer to vunmap
1569  * @map:	[in]	vmap pointer to vunmap
1570  */
dma_buf_vunmap(struct dma_buf * dmabuf,struct iosys_map * map)1571 void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1572 {
1573 	if (WARN_ON(!dmabuf))
1574 		return;
1575 
1576 	dma_resv_assert_held(dmabuf->resv);
1577 
1578 	BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1579 	BUG_ON(dmabuf->vmapping_counter == 0);
1580 	BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
1581 
1582 	if (--dmabuf->vmapping_counter == 0) {
1583 		if (dmabuf->ops->vunmap)
1584 			dmabuf->ops->vunmap(dmabuf, map);
1585 		iosys_map_clear(&dmabuf->vmap_ptr);
1586 	}
1587 }
1588 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
1589 
1590 /**
1591  * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
1592  * @dmabuf:	[in]	buffer to vunmap
1593  * @map:	[in]	vmap pointer to vunmap
1594  */
dma_buf_vunmap_unlocked(struct dma_buf * dmabuf,struct iosys_map * map)1595 void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
1596 {
1597 	if (WARN_ON(!dmabuf))
1598 		return;
1599 
1600 	dma_resv_lock(dmabuf->resv, NULL);
1601 	dma_buf_vunmap(dmabuf, map);
1602 	dma_resv_unlock(dmabuf->resv);
1603 }
1604 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF);
1605 
1606 #ifdef CONFIG_DEBUG_FS
dma_buf_debug_show(struct seq_file * s,void * unused)1607 static int dma_buf_debug_show(struct seq_file *s, void *unused)
1608 {
1609 	struct dma_buf *buf_obj;
1610 	struct dma_buf_attachment *attach_obj;
1611 	int count = 0, attach_count;
1612 	size_t size = 0;
1613 	int ret;
1614 
1615 	ret = mutex_lock_interruptible(&db_list.lock);
1616 
1617 	if (ret)
1618 		return ret;
1619 
1620 	seq_puts(s, "\nDma-buf Objects:\n");
1621 	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
1622 		   "size", "flags", "mode", "count", "ino");
1623 
1624 	list_for_each_entry(buf_obj, &db_list.head, list_node) {
1625 
1626 		ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1627 		if (ret)
1628 			goto error_unlock;
1629 
1630 
1631 		spin_lock(&buf_obj->name_lock);
1632 		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1633 				buf_obj->size,
1634 				buf_obj->file->f_flags, buf_obj->file->f_mode,
1635 				file_count(buf_obj->file),
1636 				buf_obj->exp_name,
1637 				file_inode(buf_obj->file)->i_ino,
1638 				buf_obj->name ?: "<none>");
1639 		spin_unlock(&buf_obj->name_lock);
1640 
1641 		dma_resv_describe(buf_obj->resv, s);
1642 
1643 		seq_puts(s, "\tAttached Devices:\n");
1644 		attach_count = 0;
1645 
1646 		list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1647 			seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1648 			attach_count++;
1649 		}
1650 		dma_resv_unlock(buf_obj->resv);
1651 
1652 		seq_printf(s, "Total %d devices attached\n\n",
1653 				attach_count);
1654 
1655 		count++;
1656 		size += buf_obj->size;
1657 	}
1658 
1659 	seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1660 
1661 	mutex_unlock(&db_list.lock);
1662 	return 0;
1663 
1664 error_unlock:
1665 	mutex_unlock(&db_list.lock);
1666 	return ret;
1667 }
1668 
1669 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1670 
1671 static struct dentry *dma_buf_debugfs_dir;
1672 
dma_buf_init_debugfs(void)1673 static int dma_buf_init_debugfs(void)
1674 {
1675 	struct dentry *d;
1676 	int err = 0;
1677 
1678 	d = debugfs_create_dir("dma_buf", NULL);
1679 	if (IS_ERR(d))
1680 		return PTR_ERR(d);
1681 
1682 	dma_buf_debugfs_dir = d;
1683 
1684 	d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1685 				NULL, &dma_buf_debug_fops);
1686 	if (IS_ERR(d)) {
1687 		pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1688 		debugfs_remove_recursive(dma_buf_debugfs_dir);
1689 		dma_buf_debugfs_dir = NULL;
1690 		err = PTR_ERR(d);
1691 	}
1692 
1693 	dma_buf_process_info_init_debugfs(dma_buf_debugfs_dir);
1694 	return err;
1695 }
1696 
dma_buf_uninit_debugfs(void)1697 static void dma_buf_uninit_debugfs(void)
1698 {
1699 	debugfs_remove_recursive(dma_buf_debugfs_dir);
1700 }
1701 #else
dma_buf_init_debugfs(void)1702 static inline int dma_buf_init_debugfs(void)
1703 {
1704 	return 0;
1705 }
dma_buf_uninit_debugfs(void)1706 static inline void dma_buf_uninit_debugfs(void)
1707 {
1708 }
1709 #endif
1710 
1711 #ifdef CONFIG_DMABUF_PROCESS_INFO
get_dma_buf_from_file(struct file * f)1712 struct dma_buf *get_dma_buf_from_file(struct file *f)
1713 {
1714 	if (IS_ERR_OR_NULL(f))
1715 		return NULL;
1716 
1717 	if (!is_dma_buf_file(f))
1718 		return NULL;
1719 
1720 	return f->private_data;
1721 }
1722 #endif /* CONFIG_DMABUF_PROCESS_INFO */
1723 
dma_buf_init(void)1724 static int __init dma_buf_init(void)
1725 {
1726 	int ret;
1727 
1728 	ret = dma_buf_init_sysfs_statistics();
1729 	if (ret)
1730 		return ret;
1731 
1732 	dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1733 	if (IS_ERR(dma_buf_mnt))
1734 		return PTR_ERR(dma_buf_mnt);
1735 
1736 	mutex_init(&db_list.lock);
1737 	INIT_LIST_HEAD(&db_list.head);
1738 	dma_buf_init_debugfs();
1739 	dma_buf_process_info_init_procfs();
1740 	return 0;
1741 }
1742 subsys_initcall(dma_buf_init);
1743 
dma_buf_deinit(void)1744 static void __exit dma_buf_deinit(void)
1745 {
1746 	dma_buf_uninit_debugfs();
1747 	kern_unmount(dma_buf_mnt);
1748 	dma_buf_uninit_sysfs_statistics();
1749 	dma_buf_process_info_uninit_procfs();
1750 }
1751 __exitcall(dma_buf_deinit);
1752