• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Framework for buffer objects that can be shared across devices/subsystems.
4  *
5  * Copyright(C) 2011 Linaro Limited. All rights reserved.
6  * Author: Sumit Semwal <sumit.semwal@ti.com>
7  *
8  * Many thanks to linaro-mm-sig list, and specially
9  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11  * refining of this idea.
12  */
13 
14 #include <linux/fs.h>
15 #include <linux/slab.h>
16 #include <linux/dma-buf.h>
17 #include <linux/dma-fence.h>
18 #include <linux/anon_inodes.h>
19 #include <linux/export.h>
20 #include <linux/debugfs.h>
21 #include <linux/module.h>
22 #include <linux/seq_file.h>
23 #include <linux/poll.h>
24 #include <linux/dma-resv.h>
25 #include <linux/mm.h>
26 #include <linux/mount.h>
27 #include <linux/pseudo_fs.h>
28 
29 #include <uapi/linux/dma-buf.h>
30 #include <uapi/linux/magic.h>
31 
32 #include <trace/hooks/dmabuf.h>
33 
34 #include "dma-buf-sysfs-stats.h"
35 
36 struct dma_buf_list {
37 	struct list_head head;
38 	struct mutex lock;
39 };
40 
41 static struct dma_buf_list db_list;
42 
43 /*
44  * This function helps in traversing the db_list and calls the
45  * callback function which can extract required info out of each
46  * dmabuf.
47  */
get_each_dmabuf(int (* callback)(const struct dma_buf * dmabuf,void * private),void * private)48 int get_each_dmabuf(int (*callback)(const struct dma_buf *dmabuf,
49 		    void *private), void *private)
50 {
51 	struct dma_buf *buf;
52 	int ret = mutex_lock_interruptible(&db_list.lock);
53 
54 	if (ret)
55 		return ret;
56 
57 	list_for_each_entry(buf, &db_list.head, list_node) {
58 		ret = callback(buf, private);
59 		if (ret)
60 			break;
61 	}
62 	mutex_unlock(&db_list.lock);
63 	return ret;
64 }
65 EXPORT_SYMBOL_NS_GPL(get_each_dmabuf, MINIDUMP);
66 
dmabuffs_dname(struct dentry * dentry,char * buffer,int buflen)67 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
68 {
69 	struct dma_buf *dmabuf;
70 	char name[DMA_BUF_NAME_LEN];
71 	size_t ret = 0;
72 
73 	dmabuf = dentry->d_fsdata;
74 	spin_lock(&dmabuf->name_lock);
75 	if (dmabuf->name)
76 		ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
77 	spin_unlock(&dmabuf->name_lock);
78 
79 	return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
80 			     dentry->d_name.name, ret > 0 ? name : "");
81 }
82 
dma_buf_release(struct dentry * dentry)83 static void dma_buf_release(struct dentry *dentry)
84 {
85 	struct dma_buf *dmabuf;
86 
87 	dmabuf = dentry->d_fsdata;
88 	if (unlikely(!dmabuf))
89 		return;
90 
91 	BUG_ON(dmabuf->vmapping_counter);
92 
93 	/*
94 	 * If you hit this BUG() it could mean:
95 	 * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
96 	 * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
97 	 */
98 	BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
99 
100 	dma_buf_stats_teardown(dmabuf);
101 	dmabuf->ops->release(dmabuf);
102 
103 	trace_android_vh_dma_buf_release(dmabuf);
104 	if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
105 		dma_resv_fini(dmabuf->resv);
106 
107 	WARN_ON(!list_empty(&dmabuf->attachments));
108 	module_put(dmabuf->owner);
109 	kfree(dmabuf->name);
110 	kfree(dmabuf);
111 }
112 
dma_buf_file_release(struct inode * inode,struct file * file)113 static int dma_buf_file_release(struct inode *inode, struct file *file)
114 {
115 	struct dma_buf *dmabuf;
116 
117 	if (!is_dma_buf_file(file))
118 		return -EINVAL;
119 
120 	dmabuf = file->private_data;
121 
122 	mutex_lock(&db_list.lock);
123 	list_del(&dmabuf->list_node);
124 	mutex_unlock(&db_list.lock);
125 
126 	return 0;
127 }
128 
129 static const struct dentry_operations dma_buf_dentry_ops = {
130 	.d_dname = dmabuffs_dname,
131 	.d_release = dma_buf_release,
132 };
133 
134 static struct vfsmount *dma_buf_mnt;
135 
dma_buf_fs_init_context(struct fs_context * fc)136 static int dma_buf_fs_init_context(struct fs_context *fc)
137 {
138 	struct pseudo_fs_context *ctx;
139 
140 	ctx = init_pseudo(fc, DMA_BUF_MAGIC);
141 	if (!ctx)
142 		return -ENOMEM;
143 	ctx->dops = &dma_buf_dentry_ops;
144 	return 0;
145 }
146 
147 static struct file_system_type dma_buf_fs_type = {
148 	.name = "dmabuf",
149 	.init_fs_context = dma_buf_fs_init_context,
150 	.kill_sb = kill_anon_super,
151 };
152 
dma_buf_mmap_internal(struct file * file,struct vm_area_struct * vma)153 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
154 {
155 	struct dma_buf *dmabuf;
156 
157 	if (!is_dma_buf_file(file))
158 		return -EINVAL;
159 
160 	dmabuf = file->private_data;
161 
162 	/* check if buffer supports mmap */
163 	if (!dmabuf->ops->mmap)
164 		return -EINVAL;
165 
166 	/* check for overflowing the buffer's size */
167 	if (vma->vm_pgoff + vma_pages(vma) >
168 	    dmabuf->size >> PAGE_SHIFT)
169 		return -EINVAL;
170 
171 	return dmabuf->ops->mmap(dmabuf, vma);
172 }
173 
dma_buf_llseek(struct file * file,loff_t offset,int whence)174 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
175 {
176 	struct dma_buf *dmabuf;
177 	loff_t base;
178 
179 	if (!is_dma_buf_file(file))
180 		return -EBADF;
181 
182 	dmabuf = file->private_data;
183 
184 	/* only support discovering the end of the buffer,
185 	   but also allow SEEK_SET to maintain the idiomatic
186 	   SEEK_END(0), SEEK_CUR(0) pattern */
187 	if (whence == SEEK_END)
188 		base = dmabuf->size;
189 	else if (whence == SEEK_SET)
190 		base = 0;
191 	else
192 		return -EINVAL;
193 
194 	if (offset != 0)
195 		return -EINVAL;
196 
197 	return base + offset;
198 }
199 
200 /**
201  * DOC: implicit fence polling
202  *
203  * To support cross-device and cross-driver synchronization of buffer access
204  * implicit fences (represented internally in the kernel with &struct dma_fence)
205  * can be attached to a &dma_buf. The glue for that and a few related things are
206  * provided in the &dma_resv structure.
207  *
208  * Userspace can query the state of these implicitly tracked fences using poll()
209  * and related system calls:
210  *
211  * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
212  *   most recent write or exclusive fence.
213  *
214  * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
215  *   all attached fences, shared and exclusive ones.
216  *
217  * Note that this only signals the completion of the respective fences, i.e. the
218  * DMA transfers are complete. Cache flushing and any other necessary
219  * preparations before CPU access can begin still need to happen.
220  */
221 
dma_buf_poll_cb(struct dma_fence * fence,struct dma_fence_cb * cb)222 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
223 {
224 	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
225 	struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
226 	unsigned long flags;
227 
228 	spin_lock_irqsave(&dcb->poll->lock, flags);
229 	wake_up_locked_poll(dcb->poll, dcb->active);
230 	dcb->active = 0;
231 	spin_unlock_irqrestore(&dcb->poll->lock, flags);
232 	dma_fence_put(fence);
233 	/* Paired with get_file in dma_buf_poll */
234 	fput(dmabuf->file);
235 }
236 
dma_buf_poll_shared(struct dma_resv * resv,struct dma_buf_poll_cb_t * dcb)237 static bool dma_buf_poll_shared(struct dma_resv *resv,
238 				struct dma_buf_poll_cb_t *dcb)
239 {
240 	struct dma_resv_list *fobj = dma_resv_shared_list(resv);
241 	struct dma_fence *fence;
242 	int i, r;
243 
244 	if (!fobj)
245 		return false;
246 
247 	for (i = 0; i < fobj->shared_count; ++i) {
248 		fence = rcu_dereference_protected(fobj->shared[i],
249 						  dma_resv_held(resv));
250 		dma_fence_get(fence);
251 		r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
252 		if (!r)
253 			return true;
254 		dma_fence_put(fence);
255 	}
256 
257 	return false;
258 }
259 
dma_buf_poll_excl(struct dma_resv * resv,struct dma_buf_poll_cb_t * dcb)260 static bool dma_buf_poll_excl(struct dma_resv *resv,
261 			      struct dma_buf_poll_cb_t *dcb)
262 {
263 	struct dma_fence *fence = dma_resv_excl_fence(resv);
264 	int r;
265 
266 	if (!fence)
267 		return false;
268 
269 	dma_fence_get(fence);
270 	r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
271 	if (!r)
272 		return true;
273 	dma_fence_put(fence);
274 
275 	return false;
276 }
277 
dma_buf_poll(struct file * file,poll_table * poll)278 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
279 {
280 	struct dma_buf *dmabuf;
281 	struct dma_resv *resv;
282 	__poll_t events;
283 
284 	dmabuf = file->private_data;
285 	if (!dmabuf || !dmabuf->resv)
286 		return EPOLLERR;
287 
288 	resv = dmabuf->resv;
289 
290 	poll_wait(file, &dmabuf->poll, poll);
291 
292 	events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
293 	if (!events)
294 		return 0;
295 
296 	dma_resv_lock(resv, NULL);
297 
298 	if (events & EPOLLOUT) {
299 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
300 
301 		/* Check that callback isn't busy */
302 		spin_lock_irq(&dmabuf->poll.lock);
303 		if (dcb->active)
304 			events &= ~EPOLLOUT;
305 		else
306 			dcb->active = EPOLLOUT;
307 		spin_unlock_irq(&dmabuf->poll.lock);
308 
309 		if (events & EPOLLOUT) {
310 			/* Paired with fput in dma_buf_poll_cb */
311 			get_file(dmabuf->file);
312 
313 			if (!dma_buf_poll_shared(resv, dcb) &&
314 			    !dma_buf_poll_excl(resv, dcb))
315 
316 				/* No callback queued, wake up any other waiters */
317 				dma_buf_poll_cb(NULL, &dcb->cb);
318 			else
319 				events &= ~EPOLLOUT;
320 		}
321 	}
322 
323 	if (events & EPOLLIN) {
324 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
325 
326 		/* Check that callback isn't busy */
327 		spin_lock_irq(&dmabuf->poll.lock);
328 		if (dcb->active)
329 			events &= ~EPOLLIN;
330 		else
331 			dcb->active = EPOLLIN;
332 		spin_unlock_irq(&dmabuf->poll.lock);
333 
334 		if (events & EPOLLIN) {
335 			/* Paired with fput in dma_buf_poll_cb */
336 			get_file(dmabuf->file);
337 
338 			if (!dma_buf_poll_excl(resv, dcb))
339 				/* No callback queued, wake up any other waiters */
340 				dma_buf_poll_cb(NULL, &dcb->cb);
341 			else
342 				events &= ~EPOLLIN;
343 		}
344 	}
345 
346 	dma_resv_unlock(resv);
347 	return events;
348 }
349 
_dma_buf_set_name(struct dma_buf * dmabuf,const char * name)350 static long _dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
351 {
352 	long ret = 0;
353 
354 	dma_resv_lock(dmabuf->resv, NULL);
355 	if (!list_empty(&dmabuf->attachments)) {
356 		ret = -EBUSY;
357 		goto out_unlock;
358 	}
359 	spin_lock(&dmabuf->name_lock);
360 	kfree(dmabuf->name);
361 	dmabuf->name = name;
362 	spin_unlock(&dmabuf->name_lock);
363 
364 out_unlock:
365 	dma_resv_unlock(dmabuf->resv);
366 	return ret;
367 }
368 
369 /**
370  * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
371  * The name of the dma-buf buffer can only be set when the dma-buf is not
372  * attached to any devices. It could theoritically support changing the
373  * name of the dma-buf if the same piece of memory is used for multiple
374  * purpose between different devices.
375  *
376  * @dmabuf: [in]     dmabuf buffer that will be renamed.
377  * @buf:    [in]     A piece of userspace memory that contains the name of
378  *                   the dma-buf.
379  *
380  * Returns 0 on success. If the dma-buf buffer is already attached to
381  * devices, return -EBUSY.
382  *
383  */
dma_buf_set_name(struct dma_buf * dmabuf,const char * name)384 long dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
385 {
386 	long ret = 0;
387 	char *buf = kstrndup(name, DMA_BUF_NAME_LEN, GFP_KERNEL);
388 
389 	if (!buf)
390 		return -ENOMEM;
391 
392 	ret = _dma_buf_set_name(dmabuf, buf);
393 	if (ret)
394 		kfree(buf);
395 
396 	return ret;
397 }
398 EXPORT_SYMBOL_GPL(dma_buf_set_name);
399 
dma_buf_set_name_user(struct dma_buf * dmabuf,const char __user * buf)400 static long dma_buf_set_name_user(struct dma_buf *dmabuf, const char __user *buf)
401 {
402 	char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
403 	long ret = 0;
404 
405 	if (IS_ERR(name))
406 		return PTR_ERR(name);
407 
408 	ret = _dma_buf_set_name(dmabuf, name);
409 	if (ret)
410 		kfree(name);
411 
412 	return ret;
413 }
414 
dma_buf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)415 static long dma_buf_ioctl(struct file *file,
416 			  unsigned int cmd, unsigned long arg)
417 {
418 	struct dma_buf *dmabuf;
419 	struct dma_buf_sync sync;
420 	enum dma_data_direction direction;
421 	int ret;
422 
423 	dmabuf = file->private_data;
424 
425 	switch (cmd) {
426 	case DMA_BUF_IOCTL_SYNC:
427 		if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
428 			return -EFAULT;
429 
430 		if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
431 			return -EINVAL;
432 
433 		switch (sync.flags & DMA_BUF_SYNC_RW) {
434 		case DMA_BUF_SYNC_READ:
435 			direction = DMA_FROM_DEVICE;
436 			break;
437 		case DMA_BUF_SYNC_WRITE:
438 			direction = DMA_TO_DEVICE;
439 			break;
440 		case DMA_BUF_SYNC_RW:
441 			direction = DMA_BIDIRECTIONAL;
442 			break;
443 		default:
444 			return -EINVAL;
445 		}
446 
447 		if (sync.flags & DMA_BUF_SYNC_END)
448 			ret = dma_buf_end_cpu_access(dmabuf, direction);
449 		else
450 			ret = dma_buf_begin_cpu_access(dmabuf, direction);
451 
452 		return ret;
453 
454 	case DMA_BUF_SET_NAME_A:
455 	case DMA_BUF_SET_NAME_B:
456 		return dma_buf_set_name_user(dmabuf, (const char __user *)arg);
457 
458 	default:
459 		return -ENOTTY;
460 	}
461 }
462 
dma_buf_show_fdinfo(struct seq_file * m,struct file * file)463 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
464 {
465 	struct dma_buf *dmabuf = file->private_data;
466 
467 	seq_printf(m, "size:\t%zu\n", dmabuf->size);
468 	/* Don't count the temporary reference taken inside procfs seq_show */
469 	seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
470 	seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
471 	spin_lock(&dmabuf->name_lock);
472 	if (dmabuf->name)
473 		seq_printf(m, "name:\t%s\n", dmabuf->name);
474 	spin_unlock(&dmabuf->name_lock);
475 }
476 
477 static const struct file_operations dma_buf_fops = {
478 	.release	= dma_buf_file_release,
479 	.mmap		= dma_buf_mmap_internal,
480 	.llseek		= dma_buf_llseek,
481 	.poll		= dma_buf_poll,
482 	.unlocked_ioctl	= dma_buf_ioctl,
483 	.compat_ioctl	= compat_ptr_ioctl,
484 	.show_fdinfo	= dma_buf_show_fdinfo,
485 };
486 
487 /*
488  * is_dma_buf_file - Check if struct file* is associated with dma_buf
489  */
is_dma_buf_file(struct file * file)490 int is_dma_buf_file(struct file *file)
491 {
492 	return file->f_op == &dma_buf_fops;
493 }
494 EXPORT_SYMBOL_NS_GPL(is_dma_buf_file, MINIDUMP);
495 
dma_buf_getfile(struct dma_buf * dmabuf,int flags)496 static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
497 {
498 	static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
499 	struct file *file;
500 	struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
501 
502 	if (IS_ERR(inode))
503 		return ERR_CAST(inode);
504 
505 	inode->i_size = dmabuf->size;
506 	inode_set_bytes(inode, dmabuf->size);
507 
508 	/*
509 	 * The ->i_ino acquired from get_next_ino() is not unique thus
510 	 * not suitable for using it as dentry name by dmabuf stats.
511 	 * Override ->i_ino with the unique and dmabuffs specific
512 	 * value.
513 	 */
514 	inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
515 	file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
516 				 flags, &dma_buf_fops);
517 	if (IS_ERR(file))
518 		goto err_alloc_file;
519 	file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
520 	file->private_data = dmabuf;
521 	file->f_path.dentry->d_fsdata = dmabuf;
522 
523 	return file;
524 
525 err_alloc_file:
526 	iput(inode);
527 	return file;
528 }
529 
530 /**
531  * DOC: dma buf device access
532  *
533  * For device DMA access to a shared DMA buffer the usual sequence of operations
534  * is fairly simple:
535  *
536  * 1. The exporter defines his exporter instance using
537  *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
538  *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
539  *    as a file descriptor by calling dma_buf_fd().
540  *
541  * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
542  *    to share with: First the filedescriptor is converted to a &dma_buf using
543  *    dma_buf_get(). Then the buffer is attached to the device using
544  *    dma_buf_attach().
545  *
546  *    Up to this stage the exporter is still free to migrate or reallocate the
547  *    backing storage.
548  *
549  * 3. Once the buffer is attached to all devices userspace can initiate DMA
550  *    access to the shared buffer. In the kernel this is done by calling
551  *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
552  *
553  * 4. Once a driver is done with a shared buffer it needs to call
554  *    dma_buf_detach() (after cleaning up any mappings) and then release the
555  *    reference acquired with dma_buf_get() by calling dma_buf_put().
556  *
557  * For the detailed semantics exporters are expected to implement see
558  * &dma_buf_ops.
559  */
560 
561 /**
562  * dma_buf_export - Creates a new dma_buf, and associates an anon file
563  * with this buffer, so it can be exported.
564  * Also connect the allocator specific data and ops to the buffer.
565  * Additionally, provide a name string for exporter; useful in debugging.
566  *
567  * @exp_info:	[in]	holds all the export related information provided
568  *			by the exporter. see &struct dma_buf_export_info
569  *			for further details.
570  *
571  * Returns, on success, a newly created struct dma_buf object, which wraps the
572  * supplied private data and operations for struct dma_buf_ops. On either
573  * missing ops, or error in allocating struct dma_buf, will return negative
574  * error.
575  *
576  * For most cases the easiest way to create @exp_info is through the
577  * %DEFINE_DMA_BUF_EXPORT_INFO macro.
578  */
dma_buf_export(const struct dma_buf_export_info * exp_info)579 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
580 {
581 	struct dma_buf *dmabuf;
582 	struct dma_resv *resv = exp_info->resv;
583 	struct file *file;
584 	size_t alloc_size = sizeof(struct dma_buf);
585 	int ret;
586 
587 	if (!exp_info->resv)
588 		alloc_size += sizeof(struct dma_resv);
589 	else
590 		/* prevent &dma_buf[1] == dma_buf->resv */
591 		alloc_size += 1;
592 
593 	if (WARN_ON(!exp_info->priv
594 			  || !exp_info->ops
595 			  || !exp_info->ops->map_dma_buf
596 			  || !exp_info->ops->unmap_dma_buf
597 			  || !exp_info->ops->release)) {
598 		return ERR_PTR(-EINVAL);
599 	}
600 
601 	if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
602 		    (exp_info->ops->pin || exp_info->ops->unpin)))
603 		return ERR_PTR(-EINVAL);
604 
605 	if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
606 		return ERR_PTR(-EINVAL);
607 
608 	if (!try_module_get(exp_info->owner))
609 		return ERR_PTR(-ENOENT);
610 
611 	dmabuf = kzalloc(alloc_size, GFP_KERNEL);
612 	if (!dmabuf) {
613 		ret = -ENOMEM;
614 		goto err_module;
615 	}
616 
617 	dmabuf->priv = exp_info->priv;
618 	dmabuf->ops = exp_info->ops;
619 	dmabuf->size = exp_info->size;
620 	dmabuf->exp_name = exp_info->exp_name;
621 	dmabuf->owner = exp_info->owner;
622 	spin_lock_init(&dmabuf->name_lock);
623 	init_waitqueue_head(&dmabuf->poll);
624 	dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
625 	dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
626 
627 	if (!resv) {
628 		resv = (struct dma_resv *)&dmabuf[1];
629 		dma_resv_init(resv);
630 	}
631 	dmabuf->resv = resv;
632 
633 	file = dma_buf_getfile(dmabuf, exp_info->flags);
634 	if (IS_ERR(file)) {
635 		ret = PTR_ERR(file);
636 		goto err_dmabuf;
637 	}
638 
639 	file->f_mode |= FMODE_LSEEK;
640 	dmabuf->file = file;
641 
642 	mutex_init(&dmabuf->lock);
643 	INIT_LIST_HEAD(&dmabuf->attachments);
644 
645 	mutex_lock(&db_list.lock);
646 	list_add(&dmabuf->list_node, &db_list.head);
647 	mutex_unlock(&db_list.lock);
648 
649 	ret = dma_buf_stats_setup(dmabuf);
650 	if (ret)
651 		goto err_sysfs;
652 
653 	return dmabuf;
654 
655 err_sysfs:
656 	/*
657 	 * Set file->f_path.dentry->d_fsdata to NULL so that when
658 	 * dma_buf_release() gets invoked by dentry_ops, it exits
659 	 * early before calling the release() dma_buf op.
660 	 */
661 	file->f_path.dentry->d_fsdata = NULL;
662 	fput(file);
663 err_dmabuf:
664 	kfree(dmabuf);
665 err_module:
666 	module_put(exp_info->owner);
667 	return ERR_PTR(ret);
668 }
669 EXPORT_SYMBOL_GPL(dma_buf_export);
670 
671 /**
672  * dma_buf_fd - returns a file descriptor for the given struct dma_buf
673  * @dmabuf:	[in]	pointer to dma_buf for which fd is required.
674  * @flags:      [in]    flags to give to fd
675  *
676  * On success, returns an associated 'fd'. Else, returns error.
677  */
dma_buf_fd(struct dma_buf * dmabuf,int flags)678 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
679 {
680 	int fd;
681 
682 	if (!dmabuf || !dmabuf->file)
683 		return -EINVAL;
684 
685 	fd = get_unused_fd_flags(flags);
686 	if (fd < 0)
687 		return fd;
688 
689 	fd_install(fd, dmabuf->file);
690 
691 	return fd;
692 }
693 EXPORT_SYMBOL_GPL(dma_buf_fd);
694 
695 /**
696  * dma_buf_get - returns the struct dma_buf related to an fd
697  * @fd:	[in]	fd associated with the struct dma_buf to be returned
698  *
699  * On success, returns the struct dma_buf associated with an fd; uses
700  * file's refcounting done by fget to increase refcount. returns ERR_PTR
701  * otherwise.
702  */
dma_buf_get(int fd)703 struct dma_buf *dma_buf_get(int fd)
704 {
705 	struct file *file;
706 
707 	file = fget(fd);
708 
709 	if (!file)
710 		return ERR_PTR(-EBADF);
711 
712 	if (!is_dma_buf_file(file)) {
713 		fput(file);
714 		return ERR_PTR(-EINVAL);
715 	}
716 
717 	return file->private_data;
718 }
719 EXPORT_SYMBOL_GPL(dma_buf_get);
720 
721 /**
722  * dma_buf_put - decreases refcount of the buffer
723  * @dmabuf:	[in]	buffer to reduce refcount of
724  *
725  * Uses file's refcounting done implicitly by fput().
726  *
727  * If, as a result of this call, the refcount becomes 0, the 'release' file
728  * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
729  * in turn, and frees the memory allocated for dmabuf when exported.
730  */
dma_buf_put(struct dma_buf * dmabuf)731 void dma_buf_put(struct dma_buf *dmabuf)
732 {
733 	if (WARN_ON(!dmabuf || !dmabuf->file))
734 		return;
735 
736 	fput(dmabuf->file);
737 }
738 EXPORT_SYMBOL_GPL(dma_buf_put);
739 
mangle_sg_table(struct sg_table * sg_table)740 static void mangle_sg_table(struct sg_table *sg_table)
741 {
742 #ifdef CONFIG_DMABUF_DEBUG
743 	int i;
744 	struct scatterlist *sg;
745 
746 	/* To catch abuse of the underlying struct page by importers mix
747 	 * up the bits, but take care to preserve the low SG_ bits to
748 	 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
749 	 * before passing the sgt back to the exporter. */
750 	for_each_sgtable_sg(sg_table, sg, i)
751 		sg->page_link ^= ~0xffUL;
752 #endif
753 
754 }
__map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction direction)755 static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
756 				       enum dma_data_direction direction)
757 {
758 	struct sg_table *sg_table;
759 
760 	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
761 
762 	if (!IS_ERR_OR_NULL(sg_table))
763 		mangle_sg_table(sg_table);
764 
765 	return sg_table;
766 }
767 
768 /**
769  * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
770  * @dmabuf:		[in]	buffer to attach device to.
771  * @dev:		[in]	device to be attached.
772  * @importer_ops:	[in]	importer operations for the attachment
773  * @importer_priv:	[in]	importer private pointer for the attachment
774  *
775  * Returns struct dma_buf_attachment pointer for this attachment. Attachments
776  * must be cleaned up by calling dma_buf_detach().
777  *
778  * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
779  * functionality.
780  *
781  * Returns:
782  *
783  * A pointer to newly created &dma_buf_attachment on success, or a negative
784  * error code wrapped into a pointer on failure.
785  *
786  * Note that this can fail if the backing storage of @dmabuf is in a place not
787  * accessible to @dev, and cannot be moved to a more suitable place. This is
788  * indicated with the error code -EBUSY.
789  */
790 struct dma_buf_attachment *
dma_buf_dynamic_attach(struct dma_buf * dmabuf,struct device * dev,const struct dma_buf_attach_ops * importer_ops,void * importer_priv)791 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
792 		       const struct dma_buf_attach_ops *importer_ops,
793 		       void *importer_priv)
794 {
795 	struct dma_buf_attachment *attach;
796 	int ret;
797 
798 	if (WARN_ON(!dmabuf || !dev))
799 		return ERR_PTR(-EINVAL);
800 
801 	if (WARN_ON(importer_ops && !importer_ops->move_notify))
802 		return ERR_PTR(-EINVAL);
803 
804 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
805 	if (!attach)
806 		return ERR_PTR(-ENOMEM);
807 
808 	attach->dev = dev;
809 	attach->dmabuf = dmabuf;
810 	if (importer_ops)
811 		attach->peer2peer = importer_ops->allow_peer2peer;
812 	attach->importer_ops = importer_ops;
813 	attach->importer_priv = importer_priv;
814 
815 	if (dmabuf->ops->attach) {
816 		ret = dmabuf->ops->attach(dmabuf, attach);
817 		if (ret)
818 			goto err_attach;
819 	}
820 	dma_resv_lock(dmabuf->resv, NULL);
821 	list_add(&attach->node, &dmabuf->attachments);
822 	dma_resv_unlock(dmabuf->resv);
823 
824 	/* When either the importer or the exporter can't handle dynamic
825 	 * mappings we cache the mapping here to avoid issues with the
826 	 * reservation object lock.
827 	 */
828 	if (dma_buf_attachment_is_dynamic(attach) !=
829 	    dma_buf_is_dynamic(dmabuf)) {
830 		struct sg_table *sgt;
831 
832 		if (dma_buf_is_dynamic(attach->dmabuf)) {
833 			dma_resv_lock(attach->dmabuf->resv, NULL);
834 			ret = dmabuf->ops->pin(attach);
835 			if (ret)
836 				goto err_unlock;
837 		}
838 
839 		sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
840 		if (!sgt)
841 			sgt = ERR_PTR(-ENOMEM);
842 		if (IS_ERR(sgt)) {
843 			ret = PTR_ERR(sgt);
844 			goto err_unpin;
845 		}
846 		if (dma_buf_is_dynamic(attach->dmabuf))
847 			dma_resv_unlock(attach->dmabuf->resv);
848 		attach->sgt = sgt;
849 		attach->dir = DMA_BIDIRECTIONAL;
850 	}
851 
852 	return attach;
853 
854 err_attach:
855 	kfree(attach);
856 	return ERR_PTR(ret);
857 
858 err_unpin:
859 	if (dma_buf_is_dynamic(attach->dmabuf))
860 		dmabuf->ops->unpin(attach);
861 
862 err_unlock:
863 	if (dma_buf_is_dynamic(attach->dmabuf))
864 		dma_resv_unlock(attach->dmabuf->resv);
865 
866 	dma_buf_detach(dmabuf, attach);
867 	return ERR_PTR(ret);
868 }
869 EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
870 
871 /**
872  * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
873  * @dmabuf:	[in]	buffer to attach device to.
874  * @dev:	[in]	device to be attached.
875  *
876  * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
877  * mapping.
878  */
dma_buf_attach(struct dma_buf * dmabuf,struct device * dev)879 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
880 					  struct device *dev)
881 {
882 	return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
883 }
884 EXPORT_SYMBOL_GPL(dma_buf_attach);
885 
__unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)886 static void __unmap_dma_buf(struct dma_buf_attachment *attach,
887 			    struct sg_table *sg_table,
888 			    enum dma_data_direction direction)
889 {
890 	/* uses XOR, hence this unmangles */
891 	mangle_sg_table(sg_table);
892 
893 	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
894 }
895 
896 /**
897  * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
898  * @dmabuf:	[in]	buffer to detach from.
899  * @attach:	[in]	attachment to be detached; is free'd after this call.
900  *
901  * Clean up a device attachment obtained by calling dma_buf_attach().
902  *
903  * Optionally this calls &dma_buf_ops.detach for device-specific detach.
904  */
dma_buf_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)905 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
906 {
907 	if (WARN_ON(!dmabuf || !attach))
908 		return;
909 
910 	if (attach->sgt) {
911 		if (dma_buf_is_dynamic(attach->dmabuf))
912 			dma_resv_lock(attach->dmabuf->resv, NULL);
913 
914 		__unmap_dma_buf(attach, attach->sgt, attach->dir);
915 
916 		if (dma_buf_is_dynamic(attach->dmabuf)) {
917 			dmabuf->ops->unpin(attach);
918 			dma_resv_unlock(attach->dmabuf->resv);
919 		}
920 	}
921 
922 	dma_resv_lock(dmabuf->resv, NULL);
923 	list_del(&attach->node);
924 	dma_resv_unlock(dmabuf->resv);
925 	if (dmabuf->ops->detach)
926 		dmabuf->ops->detach(dmabuf, attach);
927 
928 	kfree(attach);
929 }
930 EXPORT_SYMBOL_GPL(dma_buf_detach);
931 
932 /**
933  * dma_buf_pin - Lock down the DMA-buf
934  * @attach:	[in]	attachment which should be pinned
935  *
936  * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
937  * call this, and only for limited use cases like scanout and not for temporary
938  * pin operations. It is not permitted to allow userspace to pin arbitrary
939  * amounts of buffers through this interface.
940  *
941  * Buffers must be unpinned by calling dma_buf_unpin().
942  *
943  * Returns:
944  * 0 on success, negative error code on failure.
945  */
dma_buf_pin(struct dma_buf_attachment * attach)946 int dma_buf_pin(struct dma_buf_attachment *attach)
947 {
948 	struct dma_buf *dmabuf = attach->dmabuf;
949 	int ret = 0;
950 
951 	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
952 
953 	dma_resv_assert_held(dmabuf->resv);
954 
955 	if (dmabuf->ops->pin)
956 		ret = dmabuf->ops->pin(attach);
957 
958 	return ret;
959 }
960 EXPORT_SYMBOL_GPL(dma_buf_pin);
961 
962 /**
963  * dma_buf_unpin - Unpin a DMA-buf
964  * @attach:	[in]	attachment which should be unpinned
965  *
966  * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
967  * any mapping of @attach again and inform the importer through
968  * &dma_buf_attach_ops.move_notify.
969  */
dma_buf_unpin(struct dma_buf_attachment * attach)970 void dma_buf_unpin(struct dma_buf_attachment *attach)
971 {
972 	struct dma_buf *dmabuf = attach->dmabuf;
973 
974 	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
975 
976 	dma_resv_assert_held(dmabuf->resv);
977 
978 	if (dmabuf->ops->unpin)
979 		dmabuf->ops->unpin(attach);
980 }
981 EXPORT_SYMBOL_GPL(dma_buf_unpin);
982 
983 /**
984  * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
985  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
986  * dma_buf_ops.
987  * @attach:	[in]	attachment whose scatterlist is to be returned
988  * @direction:	[in]	direction of DMA transfer
989  *
990  * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
991  * on error. May return -EINTR if it is interrupted by a signal.
992  *
993  * On success, the DMA addresses and lengths in the returned scatterlist are
994  * PAGE_SIZE aligned.
995  *
996  * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
997  * the underlying backing storage is pinned for as long as a mapping exists,
998  * therefore users/importers should not hold onto a mapping for undue amounts of
999  * time.
1000  *
1001  * Important: Dynamic importers must wait for the exclusive fence of the struct
1002  * dma_resv attached to the DMA-BUF first.
1003  */
dma_buf_map_attachment(struct dma_buf_attachment * attach,enum dma_data_direction direction)1004 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
1005 					enum dma_data_direction direction)
1006 {
1007 	struct sg_table *sg_table;
1008 	int r;
1009 
1010 	might_sleep();
1011 
1012 	if (WARN_ON(!attach || !attach->dmabuf))
1013 		return ERR_PTR(-EINVAL);
1014 
1015 	if (dma_buf_attachment_is_dynamic(attach))
1016 		dma_resv_assert_held(attach->dmabuf->resv);
1017 
1018 	if (attach->sgt) {
1019 		/*
1020 		 * Two mappings with different directions for the same
1021 		 * attachment are not allowed.
1022 		 */
1023 		if (attach->dir != direction &&
1024 		    attach->dir != DMA_BIDIRECTIONAL)
1025 			return ERR_PTR(-EBUSY);
1026 
1027 		return attach->sgt;
1028 	}
1029 
1030 	if (dma_buf_is_dynamic(attach->dmabuf)) {
1031 		dma_resv_assert_held(attach->dmabuf->resv);
1032 		if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1033 			r = attach->dmabuf->ops->pin(attach);
1034 			if (r)
1035 				return ERR_PTR(r);
1036 		}
1037 	}
1038 
1039 	sg_table = __map_dma_buf(attach, direction);
1040 	if (!sg_table)
1041 		sg_table = ERR_PTR(-ENOMEM);
1042 
1043 	if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
1044 	     !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1045 		attach->dmabuf->ops->unpin(attach);
1046 
1047 	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
1048 		attach->sgt = sg_table;
1049 		attach->dir = direction;
1050 	}
1051 
1052 #ifdef CONFIG_DMA_API_DEBUG
1053 	if (!IS_ERR(sg_table)) {
1054 		struct scatterlist *sg;
1055 		u64 addr;
1056 		int len;
1057 		int i;
1058 
1059 		for_each_sgtable_dma_sg(sg_table, sg, i) {
1060 			addr = sg_dma_address(sg);
1061 			len = sg_dma_len(sg);
1062 			if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
1063 				pr_debug("%s: addr %llx or len %x is not page aligned!\n",
1064 					 __func__, addr, len);
1065 			}
1066 		}
1067 	}
1068 #endif /* CONFIG_DMA_API_DEBUG */
1069 	return sg_table;
1070 }
1071 EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
1072 
1073 /**
1074  * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
1075  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1076  * dma_buf_ops.
1077  * @attach:	[in]	attachment to unmap buffer from
1078  * @sg_table:	[in]	scatterlist info of the buffer to unmap
1079  * @direction:  [in]    direction of DMA transfer
1080  *
1081  * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
1082  */
dma_buf_unmap_attachment(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)1083 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1084 				struct sg_table *sg_table,
1085 				enum dma_data_direction direction)
1086 {
1087 	might_sleep();
1088 
1089 	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1090 		return;
1091 
1092 	if (dma_buf_attachment_is_dynamic(attach))
1093 		dma_resv_assert_held(attach->dmabuf->resv);
1094 
1095 	if (attach->sgt == sg_table)
1096 		return;
1097 
1098 	if (dma_buf_is_dynamic(attach->dmabuf))
1099 		dma_resv_assert_held(attach->dmabuf->resv);
1100 
1101 	__unmap_dma_buf(attach, sg_table, direction);
1102 
1103 	if (dma_buf_is_dynamic(attach->dmabuf) &&
1104 	    !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1105 		dma_buf_unpin(attach);
1106 }
1107 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
1108 
1109 /**
1110  * dma_buf_move_notify - notify attachments that DMA-buf is moving
1111  *
1112  * @dmabuf:	[in]	buffer which is moving
1113  *
1114  * Informs all attachmenst that they need to destroy and recreated all their
1115  * mappings.
1116  */
dma_buf_move_notify(struct dma_buf * dmabuf)1117 void dma_buf_move_notify(struct dma_buf *dmabuf)
1118 {
1119 	struct dma_buf_attachment *attach;
1120 
1121 	dma_resv_assert_held(dmabuf->resv);
1122 
1123 	list_for_each_entry(attach, &dmabuf->attachments, node)
1124 		if (attach->importer_ops)
1125 			attach->importer_ops->move_notify(attach);
1126 }
1127 EXPORT_SYMBOL_GPL(dma_buf_move_notify);
1128 
1129 /**
1130  * DOC: cpu access
1131  *
1132  * There are mutliple reasons for supporting CPU access to a dma buffer object:
1133  *
1134  * - Fallback operations in the kernel, for example when a device is connected
1135  *   over USB and the kernel needs to shuffle the data around first before
1136  *   sending it away. Cache coherency is handled by braketing any transactions
1137  *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1138  *   access.
1139  *
1140  *   Since for most kernel internal dma-buf accesses need the entire buffer, a
1141  *   vmap interface is introduced. Note that on very old 32-bit architectures
1142  *   vmalloc space might be limited and result in vmap calls failing.
1143  *
1144  *   Interfaces::
1145  *
1146  *      void \*dma_buf_vmap(struct dma_buf \*dmabuf)
1147  *      void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
1148  *
1149  *   The vmap call can fail if there is no vmap support in the exporter, or if
1150  *   it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1151  *   count for all vmap access and calls down into the exporter's vmap function
1152  *   only when no vmapping exists, and only unmaps it once. Protection against
1153  *   concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
1154  *
1155  * - For full compatibility on the importer side with existing userspace
1156  *   interfaces, which might already support mmap'ing buffers. This is needed in
1157  *   many processing pipelines (e.g. feeding a software rendered image into a
1158  *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1159  *   framework already supported this and for DMA buffer file descriptors to
1160  *   replace ION buffers mmap support was needed.
1161  *
1162  *   There is no special interfaces, userspace simply calls mmap on the dma-buf
1163  *   fd. But like for CPU access there's a need to braket the actual access,
1164  *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1165  *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1166  *   be restarted.
1167  *
1168  *   Some systems might need some sort of cache coherency management e.g. when
1169  *   CPU and GPU domains are being accessed through dma-buf at the same time.
1170  *   To circumvent this problem there are begin/end coherency markers, that
1171  *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1172  *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1173  *   sequence would be used like following:
1174  *
1175  *     - mmap dma-buf fd
1176  *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1177  *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1178  *       want (with the new data being consumed by say the GPU or the scanout
1179  *       device)
1180  *     - munmap once you don't need the buffer any more
1181  *
1182  *    For correctness and optimal performance, it is always required to use
1183  *    SYNC_START and SYNC_END before and after, respectively, when accessing the
1184  *    mapped address. Userspace cannot rely on coherent access, even when there
1185  *    are systems where it just works without calling these ioctls.
1186  *
1187  * - And as a CPU fallback in userspace processing pipelines.
1188  *
1189  *   Similar to the motivation for kernel cpu access it is again important that
1190  *   the userspace code of a given importing subsystem can use the same
1191  *   interfaces with a imported dma-buf buffer object as with a native buffer
1192  *   object. This is especially important for drm where the userspace part of
1193  *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
1194  *   use a different way to mmap a buffer rather invasive.
1195  *
1196  *   The assumption in the current dma-buf interfaces is that redirecting the
1197  *   initial mmap is all that's needed. A survey of some of the existing
1198  *   subsystems shows that no driver seems to do any nefarious thing like
1199  *   syncing up with outstanding asynchronous processing on the device or
1200  *   allocating special resources at fault time. So hopefully this is good
1201  *   enough, since adding interfaces to intercept pagefaults and allow pte
1202  *   shootdowns would increase the complexity quite a bit.
1203  *
1204  *   Interface::
1205  *
1206  *      int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1207  *		       unsigned long);
1208  *
1209  *   If the importing subsystem simply provides a special-purpose mmap call to
1210  *   set up a mapping in userspace, calling do_mmap with &dma_buf.file will
1211  *   equally achieve that for a dma-buf object.
1212  */
1213 
__dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1214 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1215 				      enum dma_data_direction direction)
1216 {
1217 	bool write = (direction == DMA_BIDIRECTIONAL ||
1218 		      direction == DMA_TO_DEVICE);
1219 	struct dma_resv *resv = dmabuf->resv;
1220 	long ret;
1221 
1222 	/* Wait on any implicit rendering fences */
1223 	ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
1224 	if (ret < 0)
1225 		return ret;
1226 
1227 	return 0;
1228 }
1229 
1230 /**
1231  * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1232  * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1233  * preparations. Coherency is only guaranteed in the specified range for the
1234  * specified access direction.
1235  * @dmabuf:	[in]	buffer to prepare cpu access for.
1236  * @direction:	[in]	length of range for cpu access.
1237  *
1238  * After the cpu access is complete the caller should call
1239  * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
1240  * it guaranteed to be coherent with other DMA access.
1241  *
1242  * This function will also wait for any DMA transactions tracked through
1243  * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1244  * synchronization this function will only ensure cache coherency, callers must
1245  * ensure synchronization with such DMA transactions on their own.
1246  *
1247  * Can return negative error values, returns 0 on success.
1248  */
dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1249 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1250 			     enum dma_data_direction direction)
1251 {
1252 	int ret = 0;
1253 
1254 	if (WARN_ON(!dmabuf))
1255 		return -EINVAL;
1256 
1257 	might_lock(&dmabuf->resv->lock.base);
1258 
1259 	if (dmabuf->ops->begin_cpu_access)
1260 		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1261 
1262 	/* Ensure that all fences are waited upon - but we first allow
1263 	 * the native handler the chance to do so more efficiently if it
1264 	 * chooses. A double invocation here will be reasonably cheap no-op.
1265 	 */
1266 	if (ret == 0)
1267 		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1268 
1269 	return ret;
1270 }
1271 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
1272 
1273 
1274 /**
1275  * dma_buf_begin_cpu_access_partial - An alternative to dma_buf_begin_cpu_access
1276  * which applies to a subset of the buffer instead of the entire buffer. This
1277  * can offer better performance than dma_buf_begin_cpu_access if implemented
1278  * by an exporter. If an exporter does not implement partial buffer access then
1279  * this function will return -EOPNOTSUPP and it is the responsibility of the
1280  * caller to fallback to dma_buf_begin_cpu_access or fail entirely.
1281  *
1282  * @dmabuf:	[in]	buffer to prepare cpu access for.
1283  * @direction:	[in]	direction for access.
1284  * @offset:	[in]	offset in bytes from beginning of buffer for partial
1285 			range.
1286  * @len:	[in]	length in bytes of partial range beginning at @offset.
1287  *
1288  * After the cpu access is complete the caller should call
1289  * dma_buf_end_cpu_access_partial(). Only when cpu access is bracketed by both
1290  * calls is it guaranteed to be coherent with other DMA access.
1291  *
1292  * This function will also wait for any DMA transactions tracked through
1293  * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1294  * synchronization this function will only ensure cache coherency, callers must
1295  * ensure synchronization with such DMA transactions on their own.
1296  *
1297  * Can return negative error values, returns 0 on success.
1298  */
dma_buf_begin_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)1299 int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
1300 				     enum dma_data_direction direction,
1301 				     unsigned int offset, unsigned int len)
1302 {
1303 	int ret;
1304 
1305 	if (WARN_ON(!dmabuf))
1306 		return -EINVAL;
1307 
1308 	if (!dmabuf->ops->begin_cpu_access_partial)
1309 		return -EOPNOTSUPP;
1310 
1311 	ret = dmabuf->ops->begin_cpu_access_partial(dmabuf, direction,
1312 						    offset, len);
1313 
1314 	/* Ensure that all fences are waited upon - but we first allow
1315 	 * the native handler the chance to do so more efficiently if it
1316 	 * chooses. A double invocation here will be reasonably cheap no-op.
1317 	 */
1318 	if (ret == 0)
1319 		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1320 
1321 	return ret;
1322 }
1323 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access_partial);
1324 
1325 /**
1326  * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1327  * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1328  * actions. Coherency is only guaranteed in the specified range for the
1329  * specified access direction.
1330  * @dmabuf:	[in]	buffer to complete cpu access for.
1331  * @direction:	[in]	length of range for cpu access.
1332  *
1333  * This terminates CPU access started with dma_buf_begin_cpu_access().
1334  *
1335  * Can return negative error values, returns 0 on success.
1336  */
dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1337 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1338 			   enum dma_data_direction direction)
1339 {
1340 	int ret = 0;
1341 
1342 	WARN_ON(!dmabuf);
1343 
1344 	might_lock(&dmabuf->resv->lock.base);
1345 
1346 	if (dmabuf->ops->end_cpu_access)
1347 		ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1348 
1349 	return ret;
1350 }
1351 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
1352 
1353 /**
1354  * dma_buf_end_cpu_access_partial - An alternative to dma_buf_end_cpu_access
1355  * which applies to a subset of the buffer instead of the entire buffer. This
1356  * can offer better performance than dma_buf_end_cpu_access if implemented
1357  * by an exporter. If an exporter does not implement partial buffer access then
1358  * this function will return -EOPNOTSUPP and it is the responsibility of the
1359  * caller to fallback to dma_buf_end_cpu_access or fail entirely.
1360  *
1361  * @dmabuf:	[in]	buffer to complete cpu access for.
1362  * @direction:	[in]	direction for access.
1363  * @offset:	[in]	offset in bytes from beginning of buffer for partial
1364 			range.
1365  * @len:	[in]	length in bytes of partial range beginning at @offset.
1366  *
1367  * This terminates CPU access started with dma_buf_begin_cpu_access_partial().
1368  *
1369  * Can return negative error values, returns 0 on success.
1370  */
dma_buf_end_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)1371 int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
1372 				   enum dma_data_direction direction,
1373 				   unsigned int offset, unsigned int len)
1374 {
1375 	WARN_ON(!dmabuf);
1376 
1377 	if (!dmabuf->ops->end_cpu_access_partial)
1378 		return -EOPNOTSUPP;
1379 
1380 	return dmabuf->ops->end_cpu_access_partial(dmabuf, direction,
1381 						   offset, len);
1382 }
1383 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access_partial);
1384 
1385 /**
1386  * dma_buf_mmap - Setup up a userspace mmap with the given vma
1387  * @dmabuf:	[in]	buffer that should back the vma
1388  * @vma:	[in]	vma for the mmap
1389  * @pgoff:	[in]	offset in pages where this mmap should start within the
1390  *			dma-buf buffer.
1391  *
1392  * This function adjusts the passed in vma so that it points at the file of the
1393  * dma_buf operation. It also adjusts the starting pgoff and does bounds
1394  * checking on the size of the vma. Then it calls the exporters mmap function to
1395  * set up the mapping.
1396  *
1397  * Can return negative error values, returns 0 on success.
1398  */
dma_buf_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma,unsigned long pgoff)1399 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1400 		 unsigned long pgoff)
1401 {
1402 	if (WARN_ON(!dmabuf || !vma))
1403 		return -EINVAL;
1404 
1405 	/* check if buffer supports mmap */
1406 	if (!dmabuf->ops->mmap)
1407 		return -EINVAL;
1408 
1409 	/* check for offset overflow */
1410 	if (pgoff + vma_pages(vma) < pgoff)
1411 		return -EOVERFLOW;
1412 
1413 	/* check for overflowing the buffer's size */
1414 	if (pgoff + vma_pages(vma) >
1415 	    dmabuf->size >> PAGE_SHIFT)
1416 		return -EINVAL;
1417 
1418 	/* readjust the vma */
1419 	vma_set_file(vma, dmabuf->file);
1420 	vma->vm_pgoff = pgoff;
1421 
1422 	return dmabuf->ops->mmap(dmabuf, vma);
1423 }
1424 EXPORT_SYMBOL_GPL(dma_buf_mmap);
1425 
1426 /**
1427  * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1428  * address space. Same restrictions as for vmap and friends apply.
1429  * @dmabuf:	[in]	buffer to vmap
1430  * @map:	[out]	returns the vmap pointer
1431  *
1432  * This call may fail due to lack of virtual mapping address space.
1433  * These calls are optional in drivers. The intended use for them
1434  * is for mapping objects linear in kernel space for high use objects.
1435  *
1436  * To ensure coherency users must call dma_buf_begin_cpu_access() and
1437  * dma_buf_end_cpu_access() around any cpu access performed through this
1438  * mapping.
1439  *
1440  * Returns 0 on success, or a negative errno code otherwise.
1441  */
dma_buf_vmap(struct dma_buf * dmabuf,struct dma_buf_map * map)1442 int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
1443 {
1444 	struct dma_buf_map ptr;
1445 	int ret = 0;
1446 
1447 	dma_buf_map_clear(map);
1448 
1449 	if (WARN_ON(!dmabuf))
1450 		return -EINVAL;
1451 
1452 	if (!dmabuf->ops->vmap)
1453 		return -EINVAL;
1454 
1455 	mutex_lock(&dmabuf->lock);
1456 	if (dmabuf->vmapping_counter) {
1457 		dmabuf->vmapping_counter++;
1458 		BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
1459 		*map = dmabuf->vmap_ptr;
1460 		goto out_unlock;
1461 	}
1462 
1463 	BUG_ON(dma_buf_map_is_set(&dmabuf->vmap_ptr));
1464 
1465 	ret = dmabuf->ops->vmap(dmabuf, &ptr);
1466 	if (WARN_ON_ONCE(ret))
1467 		goto out_unlock;
1468 
1469 	dmabuf->vmap_ptr = ptr;
1470 	dmabuf->vmapping_counter = 1;
1471 
1472 	*map = dmabuf->vmap_ptr;
1473 
1474 out_unlock:
1475 	mutex_unlock(&dmabuf->lock);
1476 	return ret;
1477 }
1478 EXPORT_SYMBOL_GPL(dma_buf_vmap);
1479 
1480 /**
1481  * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1482  * @dmabuf:	[in]	buffer to vunmap
1483  * @map:	[in]	vmap pointer to vunmap
1484  */
dma_buf_vunmap(struct dma_buf * dmabuf,struct dma_buf_map * map)1485 void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
1486 {
1487 	if (WARN_ON(!dmabuf))
1488 		return;
1489 
1490 	BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
1491 	BUG_ON(dmabuf->vmapping_counter == 0);
1492 	BUG_ON(!dma_buf_map_is_equal(&dmabuf->vmap_ptr, map));
1493 
1494 	mutex_lock(&dmabuf->lock);
1495 	if (--dmabuf->vmapping_counter == 0) {
1496 		if (dmabuf->ops->vunmap)
1497 			dmabuf->ops->vunmap(dmabuf, map);
1498 		dma_buf_map_clear(&dmabuf->vmap_ptr);
1499 	}
1500 	mutex_unlock(&dmabuf->lock);
1501 }
1502 EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1503 
dma_buf_get_flags(struct dma_buf * dmabuf,unsigned long * flags)1504 int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags)
1505 {
1506 	int ret = 0;
1507 
1508 	if (WARN_ON(!dmabuf) || !flags)
1509 		return -EINVAL;
1510 
1511 	if (dmabuf->ops->get_flags)
1512 		ret = dmabuf->ops->get_flags(dmabuf, flags);
1513 
1514 	return ret;
1515 }
1516 EXPORT_SYMBOL_GPL(dma_buf_get_flags);
1517 
1518 #ifdef CONFIG_DEBUG_FS
dma_buf_debug_show(struct seq_file * s,void * unused)1519 static int dma_buf_debug_show(struct seq_file *s, void *unused)
1520 {
1521 	struct dma_buf *buf_obj;
1522 	struct dma_buf_attachment *attach_obj;
1523 	struct dma_resv *robj;
1524 	struct dma_resv_list *fobj;
1525 	struct dma_fence *fence;
1526 	int count = 0, attach_count, shared_count, i;
1527 	size_t size = 0;
1528 	int ret;
1529 
1530 	ret = mutex_lock_interruptible(&db_list.lock);
1531 
1532 	if (ret)
1533 		return ret;
1534 
1535 	seq_puts(s, "\nDma-buf Objects:\n");
1536 	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1537 		   "size", "flags", "mode", "count", "ino");
1538 
1539 	list_for_each_entry(buf_obj, &db_list.head, list_node) {
1540 
1541 		ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1542 		if (ret)
1543 			goto error_unlock;
1544 
1545 		spin_lock(&buf_obj->name_lock);
1546 		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1547 				buf_obj->size,
1548 				buf_obj->file->f_flags, buf_obj->file->f_mode,
1549 				file_count(buf_obj->file),
1550 				buf_obj->exp_name,
1551 				file_inode(buf_obj->file)->i_ino,
1552 				buf_obj->name ?: "");
1553 		spin_unlock(&buf_obj->name_lock);
1554 
1555 		robj = buf_obj->resv;
1556 		fence = dma_resv_excl_fence(robj);
1557 		if (fence)
1558 			seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1559 				   fence->ops->get_driver_name(fence),
1560 				   fence->ops->get_timeline_name(fence),
1561 				   dma_fence_is_signaled(fence) ? "" : "un");
1562 
1563 		fobj = rcu_dereference_protected(robj->fence,
1564 						 dma_resv_held(robj));
1565 		shared_count = fobj ? fobj->shared_count : 0;
1566 		for (i = 0; i < shared_count; i++) {
1567 			fence = rcu_dereference_protected(fobj->shared[i],
1568 							  dma_resv_held(robj));
1569 			seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1570 				   fence->ops->get_driver_name(fence),
1571 				   fence->ops->get_timeline_name(fence),
1572 				   dma_fence_is_signaled(fence) ? "" : "un");
1573 		}
1574 
1575 		seq_puts(s, "\tAttached Devices:\n");
1576 		attach_count = 0;
1577 
1578 		list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1579 			seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1580 			attach_count++;
1581 		}
1582 		dma_resv_unlock(buf_obj->resv);
1583 
1584 		seq_printf(s, "Total %d devices attached\n\n",
1585 				attach_count);
1586 
1587 		count++;
1588 		size += buf_obj->size;
1589 	}
1590 
1591 	seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1592 
1593 	mutex_unlock(&db_list.lock);
1594 	return 0;
1595 
1596 error_unlock:
1597 	mutex_unlock(&db_list.lock);
1598 	return ret;
1599 }
1600 
1601 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1602 
1603 static struct dentry *dma_buf_debugfs_dir;
1604 
dma_buf_init_debugfs(void)1605 static int dma_buf_init_debugfs(void)
1606 {
1607 	struct dentry *d;
1608 	int err = 0;
1609 
1610 	d = debugfs_create_dir("dma_buf", NULL);
1611 	if (IS_ERR(d))
1612 		return PTR_ERR(d);
1613 
1614 	dma_buf_debugfs_dir = d;
1615 
1616 	d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1617 				NULL, &dma_buf_debug_fops);
1618 	if (IS_ERR(d)) {
1619 		pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1620 		debugfs_remove_recursive(dma_buf_debugfs_dir);
1621 		dma_buf_debugfs_dir = NULL;
1622 		err = PTR_ERR(d);
1623 	}
1624 
1625 	return err;
1626 }
1627 
dma_buf_uninit_debugfs(void)1628 static void dma_buf_uninit_debugfs(void)
1629 {
1630 	debugfs_remove_recursive(dma_buf_debugfs_dir);
1631 }
1632 #else
dma_buf_init_debugfs(void)1633 static inline int dma_buf_init_debugfs(void)
1634 {
1635 	return 0;
1636 }
dma_buf_uninit_debugfs(void)1637 static inline void dma_buf_uninit_debugfs(void)
1638 {
1639 }
1640 #endif
1641 
dma_buf_init(void)1642 static int __init dma_buf_init(void)
1643 {
1644 	int ret;
1645 
1646 	ret = dma_buf_init_sysfs_statistics();
1647 	if (ret)
1648 		return ret;
1649 
1650 	dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1651 	if (IS_ERR(dma_buf_mnt))
1652 		return PTR_ERR(dma_buf_mnt);
1653 
1654 	mutex_init(&db_list.lock);
1655 	INIT_LIST_HEAD(&db_list.head);
1656 	dma_buf_init_debugfs();
1657 	return 0;
1658 }
1659 subsys_initcall(dma_buf_init);
1660 
dma_buf_deinit(void)1661 static void __exit dma_buf_deinit(void)
1662 {
1663 	dma_buf_uninit_debugfs();
1664 	kern_unmount(dma_buf_mnt);
1665 	dma_buf_uninit_sysfs_statistics();
1666 }
1667 __exitcall(dma_buf_deinit);
1668