1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Header file for dma buffer sharing framework.
4  *
5  * Copyright(C) 2011 Linaro Limited. All rights reserved.
6  * Author: Sumit Semwal <sumit.semwal@ti.com>
7  *
8  * Many thanks to linaro-mm-sig list, and specially
9  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11  * refining of this idea.
12  */
13 #ifndef __DMA_BUF_H__
14 #define __DMA_BUF_H__
15 
16 #include <linux/android_kabi.h>
17 #include <linux/iosys-map.h>
18 #include <linux/file.h>
19 #include <linux/err.h>
20 #include <linux/scatterlist.h>
21 #include <linux/list.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/fs.h>
24 #include <linux/dma-fence.h>
25 #include <linux/wait.h>
26 #include <linux/workqueue.h>
27 #include <linux/android_kabi.h>
28 
29 struct device;
30 struct dma_buf;
31 struct dma_buf_attachment;
32 
33 /**
34  * struct dma_buf_ops - operations possible on struct dma_buf
35  * @vmap: [optional] creates a virtual mapping for the buffer into kernel
36  *	  address space. Same restrictions as for vmap and friends apply.
37  * @vunmap: [optional] unmaps a vmap from the buffer
38  */
39 struct dma_buf_ops {
40 	/**
41 	  * @cache_sgt_mapping:
42 	  *
43 	  * If true the framework will cache the first mapping made for each
44 	  * attachment. This avoids creating mappings for attachments multiple
45 	  * times.
46 	  */
47 	bool cache_sgt_mapping;
48 
49 	/**
50 	 * @attach:
51 	 *
52 	 * This is called from dma_buf_attach() to make sure that a given
53 	 * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters
54 	 * which support buffer objects in special locations like VRAM or
55 	 * device-specific carveout areas should check whether the buffer could
56 	 * be move to system memory (or directly accessed by the provided
57 	 * device), and otherwise need to fail the attach operation.
58 	 *
59 	 * The exporter should also in general check whether the current
60 	 * allocation fulfills the DMA constraints of the new device. If this
61 	 * is not the case, and the allocation cannot be moved, it should also
62 	 * fail the attach operation.
63 	 *
64 	 * Any exporter-private housekeeping data can be stored in the
65 	 * &dma_buf_attachment.priv pointer.
66 	 *
67 	 * This callback is optional.
68 	 *
69 	 * Returns:
70 	 *
71 	 * 0 on success, negative error code on failure. It might return -EBUSY
72 	 * to signal that backing storage is already allocated and incompatible
73 	 * with the requirements of requesting device.
74 	 */
75 	int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
76 
77 	/**
78 	 * @detach:
79 	 *
80 	 * This is called by dma_buf_detach() to release a &dma_buf_attachment.
81 	 * Provided so that exporters can clean up any housekeeping for an
82 	 * &dma_buf_attachment.
83 	 *
84 	 * This callback is optional.
85 	 */
86 	void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
87 
88 	/**
89 	 * @pin:
90 	 *
91 	 * This is called by dma_buf_pin() and lets the exporter know that the
92 	 * DMA-buf can't be moved any more. Ideally, the exporter should
93 	 * pin the buffer so that it is generally accessible by all
94 	 * devices.
95 	 *
96 	 * This is called with the &dmabuf.resv object locked and is mutual
97 	 * exclusive with @cache_sgt_mapping.
98 	 *
99 	 * This is called automatically for non-dynamic importers from
100 	 * dma_buf_attach().
101 	 *
102 	 * Note that similar to non-dynamic exporters in their @map_dma_buf
103 	 * callback the driver must guarantee that the memory is available for
104 	 * use and cleared of any old data by the time this function returns.
105 	 * Drivers which pipeline their buffer moves internally must wait for
106 	 * all moves and clears to complete.
107 	 *
108 	 * Returns:
109 	 *
110 	 * 0 on success, negative error code on failure.
111 	 */
112 	int (*pin)(struct dma_buf_attachment *attach);
113 
114 	/**
115 	 * @unpin:
116 	 *
117 	 * This is called by dma_buf_unpin() and lets the exporter know that the
118 	 * DMA-buf can be moved again.
119 	 *
120 	 * This is called with the dmabuf->resv object locked and is mutual
121 	 * exclusive with @cache_sgt_mapping.
122 	 *
123 	 * This callback is optional.
124 	 */
125 	void (*unpin)(struct dma_buf_attachment *attach);
126 
127 	/**
128 	 * @map_dma_buf:
129 	 *
130 	 * This is called by dma_buf_map_attachment() and is used to map a
131 	 * shared &dma_buf into device address space, and it is mandatory. It
132 	 * can only be called if @attach has been called successfully.
133 	 *
134 	 * This call may sleep, e.g. when the backing storage first needs to be
135 	 * allocated, or moved to a location suitable for all currently attached
136 	 * devices.
137 	 *
138 	 * Note that any specific buffer attributes required for this function
139 	 * should get added to device_dma_parameters accessible via
140 	 * &device.dma_params from the &dma_buf_attachment. The @attach callback
141 	 * should also check these constraints.
142 	 *
143 	 * If this is being called for the first time, the exporter can now
144 	 * choose to scan through the list of attachments for this buffer,
145 	 * collate the requirements of the attached devices, and choose an
146 	 * appropriate backing storage for the buffer.
147 	 *
148 	 * Based on enum dma_data_direction, it might be possible to have
149 	 * multiple users accessing at the same time (for reading, maybe), or
150 	 * any other kind of sharing that the exporter might wish to make
151 	 * available to buffer-users.
152 	 *
153 	 * This is always called with the dmabuf->resv object locked when
154 	 * the dynamic_mapping flag is true.
155 	 *
156 	 * Note that for non-dynamic exporters the driver must guarantee that
157 	 * that the memory is available for use and cleared of any old data by
158 	 * the time this function returns.  Drivers which pipeline their buffer
159 	 * moves internally must wait for all moves and clears to complete.
160 	 * Dynamic exporters do not need to follow this rule: For non-dynamic
161 	 * importers the buffer is already pinned through @pin, which has the
162 	 * same requirements. Dynamic importers otoh are required to obey the
163 	 * dma_resv fences.
164 	 *
165 	 * Returns:
166 	 *
167 	 * A &sg_table scatter list of the backing storage of the DMA buffer,
168 	 * already mapped into the device address space of the &device attached
169 	 * with the provided &dma_buf_attachment. The addresses and lengths in
170 	 * the scatter list are PAGE_SIZE aligned.
171 	 *
172 	 * On failure, returns a negative error value wrapped into a pointer.
173 	 * May also return -EINTR when a signal was received while being
174 	 * blocked.
175 	 *
176 	 * Note that exporters should not try to cache the scatter list, or
177 	 * return the same one for multiple calls. Caching is done either by the
178 	 * DMA-BUF code (for non-dynamic importers) or the importer. Ownership
179 	 * of the scatter list is transferred to the caller, and returned by
180 	 * @unmap_dma_buf.
181 	 */
182 	struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
183 					 enum dma_data_direction);
184 	/**
185 	 * @unmap_dma_buf:
186 	 *
187 	 * This is called by dma_buf_unmap_attachment() and should unmap and
188 	 * release the &sg_table allocated in @map_dma_buf, and it is mandatory.
189 	 * For static dma_buf handling this might also unpin the backing
190 	 * storage if this is the last mapping of the DMA buffer.
191 	 */
192 	void (*unmap_dma_buf)(struct dma_buf_attachment *,
193 			      struct sg_table *,
194 			      enum dma_data_direction);
195 
196 	/* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
197 	 * if the call would block.
198 	 */
199 
200 	/**
201 	 * @release:
202 	 *
203 	 * Called after the last dma_buf_put to release the &dma_buf, and
204 	 * mandatory.
205 	 */
206 	void (*release)(struct dma_buf *);
207 
208 	/**
209 	 * @begin_cpu_access:
210 	 *
211 	 * This is called from dma_buf_begin_cpu_access() and allows the
212 	 * exporter to ensure that the memory is actually coherent for cpu
213 	 * access. The exporter also needs to ensure that cpu access is coherent
214 	 * for the access direction. The direction can be used by the exporter
215 	 * to optimize the cache flushing, i.e. access with a different
216 	 * direction (read instead of write) might return stale or even bogus
217 	 * data (e.g. when the exporter needs to copy the data to temporary
218 	 * storage).
219 	 *
220 	 * Note that this is both called through the DMA_BUF_IOCTL_SYNC IOCTL
221 	 * command for userspace mappings established through @mmap, and also
222 	 * for kernel mappings established with @vmap.
223 	 *
224 	 * This callback is optional.
225 	 *
226 	 * Returns:
227 	 *
228 	 * 0 on success or a negative error code on failure. This can for
229 	 * example fail when the backing storage can't be allocated. Can also
230 	 * return -ERESTARTSYS or -EINTR when the call has been interrupted and
231 	 * needs to be restarted.
232 	 */
233 	int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
234 
235 	/**
236 	 * @begin_cpu_access_partial:
237 	 *
238 	 * This is called from dma_buf_begin_cpu_access_partial() and allows the
239 	 * exporter to ensure that the memory specified in the range is
240 	 * available for cpu access - the exporter might need to allocate or
241 	 * swap-in and pin the backing storage.
242 	 * The exporter also needs to ensure that cpu access is
243 	 * coherent for the access direction. The direction can be used by the
244 	 * exporter to optimize the cache flushing, i.e. access with a different
245 	 * direction (read instead of write) might return stale or even bogus
246 	 * data (e.g. when the exporter needs to copy the data to temporary
247 	 * storage).
248 	 *
249 	 * This callback is optional.
250 	 *
251 	 * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command
252 	 * from userspace (where storage shouldn't be pinned to avoid handing
253 	 * de-factor mlock rights to userspace) and for the kernel-internal
254 	 * users of the various kmap interfaces, where the backing storage must
255 	 * be pinned to guarantee that the atomic kmap calls can succeed. Since
256 	 * there's no in-kernel users of the kmap interfaces yet this isn't a
257 	 * real problem.
258 	 *
259 	 * Returns:
260 	 *
261 	 * 0 on success or a negative error code on failure. This can for
262 	 * example fail when the backing storage can't be allocated. Can also
263 	 * return -ERESTARTSYS or -EINTR when the call has been interrupted and
264 	 * needs to be restarted.
265 	 */
266 	int (*begin_cpu_access_partial)(struct dma_buf *dmabuf,
267 					enum dma_data_direction,
268 					unsigned int offset, unsigned int len);
269 
270 	/**
271 	 * @end_cpu_access:
272 	 *
273 	 * This is called from dma_buf_end_cpu_access() when the importer is
274 	 * done accessing the CPU. The exporter can use this to flush caches and
275 	 * undo anything else done in @begin_cpu_access.
276 	 *
277 	 * This callback is optional.
278 	 *
279 	 * Returns:
280 	 *
281 	 * 0 on success or a negative error code on failure. Can return
282 	 * -ERESTARTSYS or -EINTR when the call has been interrupted and needs
283 	 * to be restarted.
284 	 */
285 	int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
286 
287 	/**
288 	 * @end_cpu_access_partial:
289 	 *
290 	 * This is called from dma_buf_end_cpu_access_partial() when the
291 	 * importer is done accessing the CPU. The exporter can use to limit
292 	 * cache flushing to only the range specefied and to unpin any
293 	 * resources pinned in @begin_cpu_access_umapped.
294 	 * The result of any dma_buf kmap calls after end_cpu_access_partial is
295 	 * undefined.
296 	 *
297 	 * This callback is optional.
298 	 *
299 	 * Returns:
300 	 *
301 	 * 0 on success or a negative error code on failure. Can return
302 	 * -ERESTARTSYS or -EINTR when the call has been interrupted and needs
303 	 * to be restarted.
304 	 */
305 	int (*end_cpu_access_partial)(struct dma_buf *dmabuf,
306 				      enum dma_data_direction,
307 				      unsigned int offset, unsigned int len);
308 
309 	/**
310 	 * @mmap:
311 	 *
312 	 * This callback is used by the dma_buf_mmap() function
313 	 *
314 	 * Note that the mapping needs to be incoherent, userspace is expected
315 	 * to bracket CPU access using the DMA_BUF_IOCTL_SYNC interface.
316 	 *
317 	 * Because dma-buf buffers have invariant size over their lifetime, the
318 	 * dma-buf core checks whether a vma is too large and rejects such
319 	 * mappings. The exporter hence does not need to duplicate this check.
320 	 * Drivers do not need to check this themselves.
321 	 *
322 	 * If an exporter needs to manually flush caches and hence needs to fake
323 	 * coherency for mmap support, it needs to be able to zap all the ptes
324 	 * pointing at the backing storage. Now linux mm needs a struct
325 	 * address_space associated with the struct file stored in vma->vm_file
326 	 * to do that with the function unmap_mapping_range. But the dma_buf
327 	 * framework only backs every dma_buf fd with the anon_file struct file,
328 	 * i.e. all dma_bufs share the same file.
329 	 *
330 	 * Hence exporters need to setup their own file (and address_space)
331 	 * association by setting vma->vm_file and adjusting vma->vm_pgoff in
332 	 * the dma_buf mmap callback. In the specific case of a gem driver the
333 	 * exporter could use the shmem file already provided by gem (and set
334 	 * vm_pgoff = 0). Exporters can then zap ptes by unmapping the
335 	 * corresponding range of the struct address_space associated with their
336 	 * own file.
337 	 *
338 	 * This callback is optional.
339 	 *
340 	 * Returns:
341 	 *
342 	 * 0 on success or a negative error code on failure.
343 	 */
344 	int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
345 
346 	int (*vmap)(struct dma_buf *dmabuf, struct iosys_map *map);
347 	void (*vunmap)(struct dma_buf *dmabuf, struct iosys_map *map);
348 
349 	/**
350 	 * @get_flags:
351 	 *
352 	 * This is called by dma_buf_get_flags and is used to get the buffer's
353 	 * flags.
354 	 * This callback is optional.
355 	 *
356 	 * Returns:
357 	 *
358 	 * 0 on success or a negative error code on failure. On success flags
359 	 * will be populated with the buffer's flags.
360 	 */
361 	int (*get_flags)(struct dma_buf *dmabuf, unsigned long *flags);
362 
363 	ANDROID_KABI_RESERVE(1);
364 	ANDROID_KABI_RESERVE(2);
365 };
366 
367 /**
368  * struct dma_buf - shared buffer object
369  *
370  * This represents a shared buffer, created by calling dma_buf_export(). The
371  * userspace representation is a normal file descriptor, which can be created by
372  * calling dma_buf_fd().
373  *
374  * Shared dma buffers are reference counted using dma_buf_put() and
375  * get_dma_buf().
376  *
377  * Device DMA access is handled by the separate &struct dma_buf_attachment.
378  */
379 struct dma_buf {
380 	/**
381 	 * @size:
382 	 *
383 	 * Size of the buffer; invariant over the lifetime of the buffer.
384 	 */
385 	size_t size;
386 
387 	/**
388 	 * @file:
389 	 *
390 	 * File pointer used for sharing buffers across, and for refcounting.
391 	 * See dma_buf_get() and dma_buf_put().
392 	 */
393 	struct file *file;
394 
395 	/**
396 	 * @attachments:
397 	 *
398 	 * List of dma_buf_attachment that denotes all devices attached,
399 	 * protected by &dma_resv lock @resv.
400 	 */
401 	struct list_head attachments;
402 
403 	/** @ops: dma_buf_ops associated with this buffer object. */
404 	const struct dma_buf_ops *ops;
405 
406 	/**
407 	 * @vmapping_counter:
408 	 *
409 	 * Used internally to refcnt the vmaps returned by dma_buf_vmap().
410 	 * Protected by @lock.
411 	 */
412 	unsigned vmapping_counter;
413 
414 	/**
415 	 * @vmap_ptr:
416 	 * The current vmap ptr if @vmapping_counter > 0. Protected by @lock.
417 	 */
418 	struct iosys_map vmap_ptr;
419 
420 	/**
421 	 * @exp_name:
422 	 *
423 	 * Name of the exporter; useful for debugging. Must not be NULL
424 	 */
425 	const char *exp_name;
426 
427 	/**
428 	 * @name:
429 	 *
430 	 * Userspace-provided name. Default value is NULL. If not NULL,
431 	 * length cannot be longer than DMA_BUF_NAME_LEN, including NIL
432 	 * char. Useful for accounting and debugging. Read/Write accesses
433 	 * are protected by @name_lock
434 	 *
435 	 * See the IOCTLs DMA_BUF_SET_NAME or DMA_BUF_SET_NAME_A/B
436 	 */
437 	const char *name;
438 
439 	/** @name_lock: Spinlock to protect name access for read access. */
440 	spinlock_t name_lock;
441 
442 	/**
443 	 * @owner:
444 	 *
445 	 * Pointer to exporter module; used for refcounting when exporter is a
446 	 * kernel module.
447 	 */
448 	struct module *owner;
449 
450 	/** @list_node: node for dma_buf accounting and debugging. */
451 	struct list_head list_node;
452 
453 	/** @priv: exporter specific private data for this buffer object. */
454 	void *priv;
455 
456 	/**
457 	 * @resv:
458 	 *
459 	 * Reservation object linked to this dma-buf.
460 	 *
461 	 * IMPLICIT SYNCHRONIZATION RULES:
462 	 *
463 	 * Drivers which support implicit synchronization of buffer access as
464 	 * e.g. exposed in `Implicit Fence Poll Support`_ must follow the
465 	 * below rules.
466 	 *
467 	 * - Drivers must add a read fence through dma_resv_add_fence() with the
468 	 *   DMA_RESV_USAGE_READ flag for anything the userspace API considers a
469 	 *   read access. This highly depends upon the API and window system.
470 	 *
471 	 * - Similarly drivers must add a write fence through
472 	 *   dma_resv_add_fence() with the DMA_RESV_USAGE_WRITE flag for
473 	 *   anything the userspace API considers write access.
474 	 *
475 	 * - Drivers may just always add a write fence, since that only
476 	 *   causes unnecessary synchronization, but no correctness issues.
477 	 *
478 	 * - Some drivers only expose a synchronous userspace API with no
479 	 *   pipelining across drivers. These do not set any fences for their
480 	 *   access. An example here is v4l.
481 	 *
482 	 * - Driver should use dma_resv_usage_rw() when retrieving fences as
483 	 *   dependency for implicit synchronization.
484 	 *
485 	 * DYNAMIC IMPORTER RULES:
486 	 *
487 	 * Dynamic importers, see dma_buf_attachment_is_dynamic(), have
488 	 * additional constraints on how they set up fences:
489 	 *
490 	 * - Dynamic importers must obey the write fences and wait for them to
491 	 *   signal before allowing access to the buffer's underlying storage
492 	 *   through the device.
493 	 *
494 	 * - Dynamic importers should set fences for any access that they can't
495 	 *   disable immediately from their &dma_buf_attach_ops.move_notify
496 	 *   callback.
497 	 *
498 	 * IMPORTANT:
499 	 *
500 	 * All drivers and memory management related functions must obey the
501 	 * struct dma_resv rules, specifically the rules for updating and
502 	 * obeying fences. See enum dma_resv_usage for further descriptions.
503 	 */
504 	struct dma_resv *resv;
505 
506 	/** @poll: for userspace poll support */
507 	wait_queue_head_t poll;
508 
509 	/** @cb_in: for userspace poll support */
510 	/** @cb_out: for userspace poll support */
511 	struct dma_buf_poll_cb_t {
512 		struct dma_fence_cb cb;
513 		wait_queue_head_t *poll;
514 
515 		__poll_t active;
516 	} cb_in, cb_out;
517 #ifdef CONFIG_DMABUF_SYSFS_STATS
518 	/**
519 	 * @sysfs_entry:
520 	 *
521 	 * For exposing information about this buffer in sysfs. See also
522 	 * `DMA-BUF statistics`_ for the uapi this enables.
523 	 */
524 	struct dma_buf_sysfs_entry {
525 		union {
526 			struct kobject kobj;
527 
528 			/** @sysfs_add_work:
529 			 *
530 			 * For deferred sysfs kobject creation using a workqueue.
531 			 */
532 			struct work_struct sysfs_add_work;
533 		};
534 		struct dma_buf *dmabuf;
535 	} *sysfs_entry;
536 #endif
537 
538 	ANDROID_KABI_RESERVE(1);
539 	ANDROID_KABI_RESERVE(2);
540 	ANDROID_BACKPORT_RESERVE(1);
541 };
542 
543 /**
544  * struct dma_buf_attach_ops - importer operations for an attachment
545  *
546  * Attachment operations implemented by the importer.
547  */
548 struct dma_buf_attach_ops {
549 	/**
550 	 * @allow_peer2peer:
551 	 *
552 	 * If this is set to true the importer must be able to handle peer
553 	 * resources without struct pages.
554 	 */
555 	bool allow_peer2peer;
556 
557 	/**
558 	 * @move_notify: [optional] notification that the DMA-buf is moving
559 	 *
560 	 * If this callback is provided the framework can avoid pinning the
561 	 * backing store while mappings exists.
562 	 *
563 	 * This callback is called with the lock of the reservation object
564 	 * associated with the dma_buf held and the mapping function must be
565 	 * called with this lock held as well. This makes sure that no mapping
566 	 * is created concurrently with an ongoing move operation.
567 	 *
568 	 * Mappings stay valid and are not directly affected by this callback.
569 	 * But the DMA-buf can now be in a different physical location, so all
570 	 * mappings should be destroyed and re-created as soon as possible.
571 	 *
572 	 * New mappings can be created after this callback returns, and will
573 	 * point to the new location of the DMA-buf.
574 	 */
575 	void (*move_notify)(struct dma_buf_attachment *attach);
576 
577 	ANDROID_KABI_RESERVE(1);
578 };
579 
580 /**
581  * struct dma_buf_attachment - holds device-buffer attachment data
582  * @dmabuf: buffer for this attachment.
583  * @dev: device attached to the buffer.
584  * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
585  * @sgt: cached mapping.
586  * @dir: direction of cached mapping.
587  * @peer2peer: true if the importer can handle peer resources without pages.
588  * @priv: exporter specific attachment data.
589  * @importer_ops: importer operations for this attachment, if provided
590  * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
591  * @importer_priv: importer specific attachment data.
592  * @dma_map_attrs: DMA attributes to be used when the exporter maps the buffer
593  * through dma_buf_map_attachment.
594  *
595  * This structure holds the attachment information between the dma_buf buffer
596  * and its user device(s). The list contains one attachment struct per device
597  * attached to the buffer.
598  *
599  * An attachment is created by calling dma_buf_attach(), and released again by
600  * calling dma_buf_detach(). The DMA mapping itself needed to initiate a
601  * transfer is created by dma_buf_map_attachment() and freed again by calling
602  * dma_buf_unmap_attachment().
603  */
604 struct dma_buf_attachment {
605 	struct dma_buf *dmabuf;
606 	struct device *dev;
607 	struct list_head node;
608 	struct sg_table *sgt;
609 	enum dma_data_direction dir;
610 	bool peer2peer;
611 	const struct dma_buf_attach_ops *importer_ops;
612 	void *importer_priv;
613 	void *priv;
614 	unsigned long dma_map_attrs;
615 
616 	ANDROID_KABI_RESERVE(1);
617 	ANDROID_KABI_RESERVE(2);
618 };
619 
620 /**
621  * struct dma_buf_export_info - holds information needed to export a dma_buf
622  * @exp_name:	name of the exporter - useful for debugging.
623  * @owner:	pointer to exporter module - used for refcounting kernel module
624  * @ops:	Attach allocator-defined dma buf ops to the new buffer
625  * @size:	Size of the buffer - invariant over the lifetime of the buffer
626  * @flags:	mode flags for the file
627  * @resv:	reservation-object, NULL to allocate default one
628  * @priv:	Attach private data of allocator to this buffer
629  *
630  * This structure holds the information required to export the buffer. Used
631  * with dma_buf_export() only.
632  */
633 struct dma_buf_export_info {
634 	const char *exp_name;
635 	struct module *owner;
636 	const struct dma_buf_ops *ops;
637 	size_t size;
638 	int flags;
639 	struct dma_resv *resv;
640 	void *priv;
641 
642 	ANDROID_KABI_RESERVE(1);
643 	ANDROID_KABI_RESERVE(2);
644 };
645 
646 #if IS_ENABLED(CONFIG_DEBUG_FS)
647 int get_dmabuf_debugfs_data(int (*fn)(const struct dma_buf *, void *),
648 			void *private);
649 #endif
650 
651 /**
652  * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters
653  * @name: export-info name
654  *
655  * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info,
656  * zeroes it out and pre-populates exp_name in it.
657  */
658 #define DEFINE_DMA_BUF_EXPORT_INFO(name)	\
659 	struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \
660 					 .owner = THIS_MODULE }
661 
662 /**
663  * get_dma_buf - convenience wrapper for get_file.
664  * @dmabuf:	[in]	pointer to dma_buf
665  *
666  * Increments the reference count on the dma-buf, needed in case of drivers
667  * that either need to create additional references to the dmabuf on the
668  * kernel side.  For example, an exporter that needs to keep a dmabuf ptr
669  * so that subsequent exports don't create a new dmabuf.
670  */
get_dma_buf(struct dma_buf * dmabuf)671 static inline void get_dma_buf(struct dma_buf *dmabuf)
672 {
673 	get_file(dmabuf->file);
674 }
675 
676 /**
677  * dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings.
678  * @dmabuf: the DMA-buf to check
679  *
680  * Returns true if a DMA-buf exporter wants to be called with the dma_resv
681  * locked for the map/unmap callbacks, false if it doesn't wants to be called
682  * with the lock held.
683  */
dma_buf_is_dynamic(struct dma_buf * dmabuf)684 static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
685 {
686 	return !!dmabuf->ops->pin;
687 }
688 
689 /**
690  * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
691  * mappings
692  * @attach: the DMA-buf attachment to check
693  *
694  * Returns true if a DMA-buf importer wants to call the map/unmap functions with
695  * the dma_resv lock held.
696  */
697 static inline bool
dma_buf_attachment_is_dynamic(struct dma_buf_attachment * attach)698 dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
699 {
700 	return !!attach->importer_ops;
701 }
702 
703 int is_dma_buf_file(struct file *file);
704 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
705 					  struct device *dev);
706 struct dma_buf_attachment *
707 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
708 		       const struct dma_buf_attach_ops *importer_ops,
709 		       void *importer_priv);
710 void dma_buf_detach(struct dma_buf *dmabuf,
711 		    struct dma_buf_attachment *attach);
712 int dma_buf_pin(struct dma_buf_attachment *attach);
713 void dma_buf_unpin(struct dma_buf_attachment *attach);
714 
715 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
716 
717 int dma_buf_fd(struct dma_buf *dmabuf, int flags);
718 struct dma_buf *dma_buf_get(int fd);
719 void dma_buf_put(struct dma_buf *dmabuf);
720 
721 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
722 					enum dma_data_direction);
723 void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
724 				enum dma_data_direction);
725 void dma_buf_move_notify(struct dma_buf *dma_buf);
726 int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
727 			     enum dma_data_direction dir);
728 int dma_buf_begin_cpu_access_partial(struct dma_buf *dma_buf,
729 				     enum dma_data_direction dir,
730 				     unsigned int offset, unsigned int len);
731 int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
732 			   enum dma_data_direction dir);
733 int dma_buf_end_cpu_access_partial(struct dma_buf *dma_buf,
734 				     enum dma_data_direction dir,
735 				     unsigned int offset, unsigned int len);
736 struct sg_table *
737 dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
738 				enum dma_data_direction direction);
739 void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
740 				       struct sg_table *sg_table,
741 				       enum dma_data_direction direction);
742 
743 int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
744 		 unsigned long);
745 int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map);
746 void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map);
747 int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
748 void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
749 long dma_buf_set_name(struct dma_buf *dmabuf, const char *name);
750 int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags);
751 struct dma_buf *dma_buf_iter_begin(void);
752 struct dma_buf *dma_buf_iter_next(struct dma_buf *dmbuf);
753 #endif /* __DMA_BUF_H__ */
754