• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Header file for dma buffer sharing framework.
4  *
5  * Copyright(C) 2011 Linaro Limited. All rights reserved.
6  * Author: Sumit Semwal <sumit.semwal@ti.com>
7  *
8  * Many thanks to linaro-mm-sig list, and specially
9  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11  * refining of this idea.
12  */
13 #ifndef __DMA_BUF_H__
14 #define __DMA_BUF_H__
15 
16 #include <linux/dma-buf-map.h>
17 #include <linux/file.h>
18 #include <linux/err.h>
19 #include <linux/scatterlist.h>
20 #include <linux/list.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/fs.h>
23 #include <linux/dma-fence.h>
24 #include <linux/wait.h>
25 #include <linux/android_kabi.h>
26 #include <linux/workqueue.h>
27 
28 struct device;
29 struct dma_buf;
30 struct dma_buf_attachment;
31 
32 /**
33  * struct dma_buf_ops - operations possible on struct dma_buf
34  * @vmap: [optional] creates a virtual mapping for the buffer into kernel
35  *	  address space. Same restrictions as for vmap and friends apply.
36  * @vunmap: [optional] unmaps a vmap from the buffer
37  */
38 struct dma_buf_ops {
39 	/**
40 	  * @cache_sgt_mapping:
41 	  *
42 	  * If true the framework will cache the first mapping made for each
43 	  * attachment. This avoids creating mappings for attachments multiple
44 	  * times.
45 	  */
46 	bool cache_sgt_mapping;
47 
48 	/**
49 	 * @attach:
50 	 *
51 	 * This is called from dma_buf_attach() to make sure that a given
52 	 * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters
53 	 * which support buffer objects in special locations like VRAM or
54 	 * device-specific carveout areas should check whether the buffer could
55 	 * be move to system memory (or directly accessed by the provided
56 	 * device), and otherwise need to fail the attach operation.
57 	 *
58 	 * The exporter should also in general check whether the current
59 	 * allocation fulfills the DMA constraints of the new device. If this
60 	 * is not the case, and the allocation cannot be moved, it should also
61 	 * fail the attach operation.
62 	 *
63 	 * Any exporter-private housekeeping data can be stored in the
64 	 * &dma_buf_attachment.priv pointer.
65 	 *
66 	 * This callback is optional.
67 	 *
68 	 * Returns:
69 	 *
70 	 * 0 on success, negative error code on failure. It might return -EBUSY
71 	 * to signal that backing storage is already allocated and incompatible
72 	 * with the requirements of requesting device.
73 	 */
74 	int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
75 
76 	/**
77 	 * @detach:
78 	 *
79 	 * This is called by dma_buf_detach() to release a &dma_buf_attachment.
80 	 * Provided so that exporters can clean up any housekeeping for an
81 	 * &dma_buf_attachment.
82 	 *
83 	 * This callback is optional.
84 	 */
85 	void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
86 
87 	/**
88 	 * @pin:
89 	 *
90 	 * This is called by dma_buf_pin() and lets the exporter know that the
91 	 * DMA-buf can't be moved any more. The exporter should pin the buffer
92 	 * into system memory to make sure it is generally accessible by other
93 	 * devices.
94 	 *
95 	 * This is called with the &dmabuf.resv object locked and is mutual
96 	 * exclusive with @cache_sgt_mapping.
97 	 *
98 	 * This is called automatically for non-dynamic importers from
99 	 * dma_buf_attach().
100 	 *
101 	 * Note that similar to non-dynamic exporters in their @map_dma_buf
102 	 * callback the driver must guarantee that the memory is available for
103 	 * use and cleared of any old data by the time this function returns.
104 	 * Drivers which pipeline their buffer moves internally must wait for
105 	 * all moves and clears to complete.
106 	 *
107 	 * Returns:
108 	 *
109 	 * 0 on success, negative error code on failure.
110 	 */
111 	int (*pin)(struct dma_buf_attachment *attach);
112 
113 	/**
114 	 * @unpin:
115 	 *
116 	 * This is called by dma_buf_unpin() and lets the exporter know that the
117 	 * DMA-buf can be moved again.
118 	 *
119 	 * This is called with the dmabuf->resv object locked and is mutual
120 	 * exclusive with @cache_sgt_mapping.
121 	 *
122 	 * This callback is optional.
123 	 */
124 	void (*unpin)(struct dma_buf_attachment *attach);
125 
126 	/**
127 	 * @map_dma_buf:
128 	 *
129 	 * This is called by dma_buf_map_attachment() and is used to map a
130 	 * shared &dma_buf into device address space, and it is mandatory. It
131 	 * can only be called if @attach has been called successfully.
132 	 *
133 	 * This call may sleep, e.g. when the backing storage first needs to be
134 	 * allocated, or moved to a location suitable for all currently attached
135 	 * devices.
136 	 *
137 	 * Note that any specific buffer attributes required for this function
138 	 * should get added to device_dma_parameters accessible via
139 	 * &device.dma_params from the &dma_buf_attachment. The @attach callback
140 	 * should also check these constraints.
141 	 *
142 	 * If this is being called for the first time, the exporter can now
143 	 * choose to scan through the list of attachments for this buffer,
144 	 * collate the requirements of the attached devices, and choose an
145 	 * appropriate backing storage for the buffer.
146 	 *
147 	 * Based on enum dma_data_direction, it might be possible to have
148 	 * multiple users accessing at the same time (for reading, maybe), or
149 	 * any other kind of sharing that the exporter might wish to make
150 	 * available to buffer-users.
151 	 *
152 	 * This is always called with the dmabuf->resv object locked when
153 	 * the dynamic_mapping flag is true.
154 	 *
155 	 * Note that for non-dynamic exporters the driver must guarantee that
156 	 * that the memory is available for use and cleared of any old data by
157 	 * the time this function returns.  Drivers which pipeline their buffer
158 	 * moves internally must wait for all moves and clears to complete.
159 	 * Dynamic exporters do not need to follow this rule: For non-dynamic
160 	 * importers the buffer is already pinned through @pin, which has the
161 	 * same requirements. Dynamic importers otoh are required to obey the
162 	 * dma_resv fences.
163 	 *
164 	 * Returns:
165 	 *
166 	 * A &sg_table scatter list of the backing storage of the DMA buffer,
167 	 * already mapped into the device address space of the &device attached
168 	 * with the provided &dma_buf_attachment. The addresses and lengths in
169 	 * the scatter list are PAGE_SIZE aligned.
170 	 *
171 	 * On failure, returns a negative error value wrapped into a pointer.
172 	 * May also return -EINTR when a signal was received while being
173 	 * blocked.
174 	 *
175 	 * Note that exporters should not try to cache the scatter list, or
176 	 * return the same one for multiple calls. Caching is done either by the
177 	 * DMA-BUF code (for non-dynamic importers) or the importer. Ownership
178 	 * of the scatter list is transferred to the caller, and returned by
179 	 * @unmap_dma_buf.
180 	 */
181 	struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
182 					 enum dma_data_direction);
183 	/**
184 	 * @unmap_dma_buf:
185 	 *
186 	 * This is called by dma_buf_unmap_attachment() and should unmap and
187 	 * release the &sg_table allocated in @map_dma_buf, and it is mandatory.
188 	 * For static dma_buf handling this might also unpin the backing
189 	 * storage if this is the last mapping of the DMA buffer.
190 	 */
191 	void (*unmap_dma_buf)(struct dma_buf_attachment *,
192 			      struct sg_table *,
193 			      enum dma_data_direction);
194 
195 	/* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
196 	 * if the call would block.
197 	 */
198 
199 	/**
200 	 * @release:
201 	 *
202 	 * Called after the last dma_buf_put to release the &dma_buf, and
203 	 * mandatory.
204 	 */
205 	void (*release)(struct dma_buf *);
206 
207 	/**
208 	 * @begin_cpu_access:
209 	 *
210 	 * This is called from dma_buf_begin_cpu_access() and allows the
211 	 * exporter to ensure that the memory is actually coherent for cpu
212 	 * access. The exporter also needs to ensure that cpu access is coherent
213 	 * for the access direction. The direction can be used by the exporter
214 	 * to optimize the cache flushing, i.e. access with a different
215 	 * direction (read instead of write) might return stale or even bogus
216 	 * data (e.g. when the exporter needs to copy the data to temporary
217 	 * storage).
218 	 *
219 	 * Note that this is both called through the DMA_BUF_IOCTL_SYNC IOCTL
220 	 * command for userspace mappings established through @mmap, and also
221 	 * for kernel mappings established with @vmap.
222 	 *
223 	 * This callback is optional.
224 	 *
225 	 * Returns:
226 	 *
227 	 * 0 on success or a negative error code on failure. This can for
228 	 * example fail when the backing storage can't be allocated. Can also
229 	 * return -ERESTARTSYS or -EINTR when the call has been interrupted and
230 	 * needs to be restarted.
231 	 */
232 	int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
233 
234 	/**
235 	 * @begin_cpu_access_partial:
236 	 *
237 	 * This is called from dma_buf_begin_cpu_access_partial() and allows the
238 	 * exporter to ensure that the memory specified in the range is
239 	 * available for cpu access - the exporter might need to allocate or
240 	 * swap-in and pin the backing storage.
241 	 * The exporter also needs to ensure that cpu access is
242 	 * coherent for the access direction. The direction can be used by the
243 	 * exporter to optimize the cache flushing, i.e. access with a different
244 	 * direction (read instead of write) might return stale or even bogus
245 	 * data (e.g. when the exporter needs to copy the data to temporary
246 	 * storage).
247 	 *
248 	 * This callback is optional.
249 	 *
250 	 * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command
251 	 * from userspace (where storage shouldn't be pinned to avoid handing
252 	 * de-factor mlock rights to userspace) and for the kernel-internal
253 	 * users of the various kmap interfaces, where the backing storage must
254 	 * be pinned to guarantee that the atomic kmap calls can succeed. Since
255 	 * there's no in-kernel users of the kmap interfaces yet this isn't a
256 	 * real problem.
257 	 *
258 	 * Returns:
259 	 *
260 	 * 0 on success or a negative error code on failure. This can for
261 	 * example fail when the backing storage can't be allocated. Can also
262 	 * return -ERESTARTSYS or -EINTR when the call has been interrupted and
263 	 * needs to be restarted.
264 	 */
265 	int (*begin_cpu_access_partial)(struct dma_buf *dmabuf,
266 					enum dma_data_direction,
267 					unsigned int offset, unsigned int len);
268 
269 	/**
270 	 * @end_cpu_access:
271 	 *
272 	 * This is called from dma_buf_end_cpu_access() when the importer is
273 	 * done accessing the CPU. The exporter can use this to flush caches and
274 	 * undo anything else done in @begin_cpu_access.
275 	 *
276 	 * This callback is optional.
277 	 *
278 	 * Returns:
279 	 *
280 	 * 0 on success or a negative error code on failure. Can return
281 	 * -ERESTARTSYS or -EINTR when the call has been interrupted and needs
282 	 * to be restarted.
283 	 */
284 	int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
285 
286 	/**
287 	 * @end_cpu_access_partial:
288 	 *
289 	 * This is called from dma_buf_end_cpu_access_partial() when the
290 	 * importer is done accessing the CPU. The exporter can use to limit
291 	 * cache flushing to only the range specefied and to unpin any
292 	 * resources pinned in @begin_cpu_access_umapped.
293 	 * The result of any dma_buf kmap calls after end_cpu_access_partial is
294 	 * undefined.
295 	 *
296 	 * This callback is optional.
297 	 *
298 	 * Returns:
299 	 *
300 	 * 0 on success or a negative error code on failure. Can return
301 	 * -ERESTARTSYS or -EINTR when the call has been interrupted and needs
302 	 * to be restarted.
303 	 */
304 	int (*end_cpu_access_partial)(struct dma_buf *dmabuf,
305 				      enum dma_data_direction,
306 				      unsigned int offset, unsigned int len);
307 
308 	/**
309 	 * @mmap:
310 	 *
311 	 * This callback is used by the dma_buf_mmap() function
312 	 *
313 	 * Note that the mapping needs to be incoherent, userspace is expected
314 	 * to bracket CPU access using the DMA_BUF_IOCTL_SYNC interface.
315 	 *
316 	 * Because dma-buf buffers have invariant size over their lifetime, the
317 	 * dma-buf core checks whether a vma is too large and rejects such
318 	 * mappings. The exporter hence does not need to duplicate this check.
319 	 * Drivers do not need to check this themselves.
320 	 *
321 	 * If an exporter needs to manually flush caches and hence needs to fake
322 	 * coherency for mmap support, it needs to be able to zap all the ptes
323 	 * pointing at the backing storage. Now linux mm needs a struct
324 	 * address_space associated with the struct file stored in vma->vm_file
325 	 * to do that with the function unmap_mapping_range. But the dma_buf
326 	 * framework only backs every dma_buf fd with the anon_file struct file,
327 	 * i.e. all dma_bufs share the same file.
328 	 *
329 	 * Hence exporters need to setup their own file (and address_space)
330 	 * association by setting vma->vm_file and adjusting vma->vm_pgoff in
331 	 * the dma_buf mmap callback. In the specific case of a gem driver the
332 	 * exporter could use the shmem file already provided by gem (and set
333 	 * vm_pgoff = 0). Exporters can then zap ptes by unmapping the
334 	 * corresponding range of the struct address_space associated with their
335 	 * own file.
336 	 *
337 	 * This callback is optional.
338 	 *
339 	 * Returns:
340 	 *
341 	 * 0 on success or a negative error code on failure.
342 	 */
343 	int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
344 
345 	int (*vmap)(struct dma_buf *dmabuf, struct dma_buf_map *map);
346 	void (*vunmap)(struct dma_buf *dmabuf, struct dma_buf_map *map);
347 
348 	/**
349 	 * @get_flags:
350 	 *
351 	 * This is called by dma_buf_get_flags and is used to get the buffer's
352 	 * flags.
353 	 * This callback is optional.
354 	 *
355 	 * Returns:
356 	 *
357 	 * 0 on success or a negative error code on failure. On success flags
358 	 * will be populated with the buffer's flags.
359 	 */
360 	int (*get_flags)(struct dma_buf *dmabuf, unsigned long *flags);
361 
362 	ANDROID_KABI_RESERVE(1);
363 	ANDROID_KABI_RESERVE(2);
364 };
365 
366 /**
367  * struct dma_buf - shared buffer object
368  *
369  * This represents a shared buffer, created by calling dma_buf_export(). The
370  * userspace representation is a normal file descriptor, which can be created by
371  * calling dma_buf_fd().
372  *
373  * Shared dma buffers are reference counted using dma_buf_put() and
374  * get_dma_buf().
375  *
376  * Device DMA access is handled by the separate &struct dma_buf_attachment.
377  */
378 struct dma_buf {
379 	/**
380 	 * @size:
381 	 *
382 	 * Size of the buffer; invariant over the lifetime of the buffer.
383 	 */
384 	size_t size;
385 
386 	/**
387 	 * @file:
388 	 *
389 	 * File pointer used for sharing buffers across, and for refcounting.
390 	 * See dma_buf_get() and dma_buf_put().
391 	 */
392 	struct file *file;
393 
394 	/**
395 	 * @attachments:
396 	 *
397 	 * List of dma_buf_attachment that denotes all devices attached,
398 	 * protected by &dma_resv lock @resv.
399 	 */
400 	struct list_head attachments;
401 
402 	/** @ops: dma_buf_ops associated with this buffer object. */
403 	const struct dma_buf_ops *ops;
404 
405 	/**
406 	 * @lock:
407 	 *
408 	 * Used internally to serialize list manipulation, attach/detach and
409 	 * vmap/unmap. Note that in many cases this is superseeded by
410 	 * dma_resv_lock() on @resv.
411 	 */
412 	struct mutex lock;
413 
414 	/**
415 	 * @vmapping_counter:
416 	 *
417 	 * Used internally to refcnt the vmaps returned by dma_buf_vmap().
418 	 * Protected by @lock.
419 	 */
420 	unsigned vmapping_counter;
421 
422 	/**
423 	 * @vmap_ptr:
424 	 * The current vmap ptr if @vmapping_counter > 0. Protected by @lock.
425 	 */
426 	struct dma_buf_map vmap_ptr;
427 
428 	/**
429 	 * @exp_name:
430 	 *
431 	 * Name of the exporter; useful for debugging. See the
432 	 * DMA_BUF_SET_NAME IOCTL.
433 	 */
434 	const char *exp_name;
435 
436 	/**
437 	 * @name:
438 	 *
439 	 * Userspace-provided name; useful for accounting and debugging,
440 	 * protected by dma_resv_lock() on @resv and @name_lock for read access.
441 	 */
442 	const char *name;
443 
444 	/** @name_lock: Spinlock to protect name acces for read access. */
445 	spinlock_t name_lock;
446 
447 	/**
448 	 * @owner:
449 	 *
450 	 * Pointer to exporter module; used for refcounting when exporter is a
451 	 * kernel module.
452 	 */
453 	struct module *owner;
454 
455 	/** @list_node: node for dma_buf accounting and debugging. */
456 	struct list_head list_node;
457 
458 	/** @priv: exporter specific private data for this buffer object. */
459 	void *priv;
460 
461 	/**
462 	 * @resv:
463 	 *
464 	 * Reservation object linked to this dma-buf.
465 	 *
466 	 * IMPLICIT SYNCHRONIZATION RULES:
467 	 *
468 	 * Drivers which support implicit synchronization of buffer access as
469 	 * e.g. exposed in `Implicit Fence Poll Support`_ must follow the
470 	 * below rules.
471 	 *
472 	 * - Drivers must add a shared fence through dma_resv_add_shared_fence()
473 	 *   for anything the userspace API considers a read access. This highly
474 	 *   depends upon the API and window system.
475 	 *
476 	 * - Similarly drivers must set the exclusive fence through
477 	 *   dma_resv_add_excl_fence() for anything the userspace API considers
478 	 *   write access.
479 	 *
480 	 * - Drivers may just always set the exclusive fence, since that only
481 	 *   causes unecessarily synchronization, but no correctness issues.
482 	 *
483 	 * - Some drivers only expose a synchronous userspace API with no
484 	 *   pipelining across drivers. These do not set any fences for their
485 	 *   access. An example here is v4l.
486 	 *
487 	 * DYNAMIC IMPORTER RULES:
488 	 *
489 	 * Dynamic importers, see dma_buf_attachment_is_dynamic(), have
490 	 * additional constraints on how they set up fences:
491 	 *
492 	 * - Dynamic importers must obey the exclusive fence and wait for it to
493 	 *   signal before allowing access to the buffer's underlying storage
494 	 *   through the device.
495 	 *
496 	 * - Dynamic importers should set fences for any access that they can't
497 	 *   disable immediately from their &dma_buf_attach_ops.move_notify
498 	 *   callback.
499 	 */
500 	struct dma_resv *resv;
501 
502 	/** @poll: for userspace poll support */
503 	wait_queue_head_t poll;
504 
505 	/** @cb_excl: for userspace poll support */
506 	/** @cb_shared: for userspace poll support */
507 	struct dma_buf_poll_cb_t {
508 		struct dma_fence_cb cb;
509 		wait_queue_head_t *poll;
510 
511 		__poll_t active;
512 	} cb_in, cb_out;
513 #ifdef CONFIG_DMABUF_SYSFS_STATS
514 	/**
515 	 * @sysfs_entry:
516 	 *
517 	 * For exposing information about this buffer in sysfs. See also
518 	 * `DMA-BUF statistics`_ for the uapi this enables.
519 	 */
520 	struct dma_buf_sysfs_entry {
521 		union {
522 			struct kobject kobj;
523 
524 			/** @sysfs_add_work:
525 			 *
526 			 * For deferred sysfs kobject creation using a workqueue.
527 			 */
528 			struct work_struct sysfs_add_work;
529 		};
530 		struct dma_buf *dmabuf;
531 	} *sysfs_entry;
532 #endif
533 
534 	ANDROID_KABI_RESERVE(1);
535 	ANDROID_KABI_RESERVE(2);
536 };
537 
538 /**
539  * struct dma_buf_attach_ops - importer operations for an attachment
540  *
541  * Attachment operations implemented by the importer.
542  */
543 struct dma_buf_attach_ops {
544 	/**
545 	 * @allow_peer2peer:
546 	 *
547 	 * If this is set to true the importer must be able to handle peer
548 	 * resources without struct pages.
549 	 */
550 	bool allow_peer2peer;
551 
552 	/**
553 	 * @move_notify: [optional] notification that the DMA-buf is moving
554 	 *
555 	 * If this callback is provided the framework can avoid pinning the
556 	 * backing store while mappings exists.
557 	 *
558 	 * This callback is called with the lock of the reservation object
559 	 * associated with the dma_buf held and the mapping function must be
560 	 * called with this lock held as well. This makes sure that no mapping
561 	 * is created concurrently with an ongoing move operation.
562 	 *
563 	 * Mappings stay valid and are not directly affected by this callback.
564 	 * But the DMA-buf can now be in a different physical location, so all
565 	 * mappings should be destroyed and re-created as soon as possible.
566 	 *
567 	 * New mappings can be created after this callback returns, and will
568 	 * point to the new location of the DMA-buf.
569 	 */
570 	void (*move_notify)(struct dma_buf_attachment *attach);
571 	ANDROID_KABI_RESERVE(1);
572 };
573 
574 /**
575  * struct dma_buf_attachment - holds device-buffer attachment data
576  * @dmabuf: buffer for this attachment.
577  * @dev: device attached to the buffer.
578  * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
579  * @sgt: cached mapping.
580  * @dir: direction of cached mapping.
581  * @peer2peer: true if the importer can handle peer resources without pages.
582  * @priv: exporter specific attachment data.
583  * @importer_ops: importer operations for this attachment, if provided
584  * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
585  * @importer_priv: importer specific attachment data.
586  * @dma_map_attrs: DMA attributes to be used when the exporter maps the buffer
587  * through dma_buf_map_attachment.
588  *
589  * This structure holds the attachment information between the dma_buf buffer
590  * and its user device(s). The list contains one attachment struct per device
591  * attached to the buffer.
592  *
593  * An attachment is created by calling dma_buf_attach(), and released again by
594  * calling dma_buf_detach(). The DMA mapping itself needed to initiate a
595  * transfer is created by dma_buf_map_attachment() and freed again by calling
596  * dma_buf_unmap_attachment().
597  */
598 struct dma_buf_attachment {
599 	struct dma_buf *dmabuf;
600 	struct device *dev;
601 	struct list_head node;
602 	struct sg_table *sgt;
603 	enum dma_data_direction dir;
604 	bool peer2peer;
605 	const struct dma_buf_attach_ops *importer_ops;
606 	void *importer_priv;
607 	void *priv;
608 	unsigned long dma_map_attrs;
609 
610 	ANDROID_KABI_RESERVE(1);
611 	ANDROID_KABI_RESERVE(2);
612 };
613 
614 /**
615  * struct dma_buf_export_info - holds information needed to export a dma_buf
616  * @exp_name:	name of the exporter - useful for debugging.
617  * @owner:	pointer to exporter module - used for refcounting kernel module
618  * @ops:	Attach allocator-defined dma buf ops to the new buffer
619  * @size:	Size of the buffer - invariant over the lifetime of the buffer
620  * @flags:	mode flags for the file
621  * @resv:	reservation-object, NULL to allocate default one
622  * @priv:	Attach private data of allocator to this buffer
623  *
624  * This structure holds the information required to export the buffer. Used
625  * with dma_buf_export() only.
626  */
627 struct dma_buf_export_info {
628 	const char *exp_name;
629 	struct module *owner;
630 	const struct dma_buf_ops *ops;
631 	size_t size;
632 	int flags;
633 	struct dma_resv *resv;
634 	void *priv;
635 
636 	ANDROID_KABI_RESERVE(1);
637 	ANDROID_KABI_RESERVE(2);
638 };
639 
640 /**
641  * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters
642  * @name: export-info name
643  *
644  * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info,
645  * zeroes it out and pre-populates exp_name in it.
646  */
647 #define DEFINE_DMA_BUF_EXPORT_INFO(name)	\
648 	struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \
649 					 .owner = THIS_MODULE }
650 
651 /**
652  * get_dma_buf - convenience wrapper for get_file.
653  * @dmabuf:	[in]	pointer to dma_buf
654  *
655  * Increments the reference count on the dma-buf, needed in case of drivers
656  * that either need to create additional references to the dmabuf on the
657  * kernel side.  For example, an exporter that needs to keep a dmabuf ptr
658  * so that subsequent exports don't create a new dmabuf.
659  */
get_dma_buf(struct dma_buf * dmabuf)660 static inline void get_dma_buf(struct dma_buf *dmabuf)
661 {
662 	get_file(dmabuf->file);
663 }
664 
665 /**
666  * dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings.
667  * @dmabuf: the DMA-buf to check
668  *
669  * Returns true if a DMA-buf exporter wants to be called with the dma_resv
670  * locked for the map/unmap callbacks, false if it doesn't wants to be called
671  * with the lock held.
672  */
dma_buf_is_dynamic(struct dma_buf * dmabuf)673 static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
674 {
675 	return !!dmabuf->ops->pin;
676 }
677 
678 /**
679  * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
680  * mappings
681  * @attach: the DMA-buf attachment to check
682  *
683  * Returns true if a DMA-buf importer wants to call the map/unmap functions with
684  * the dma_resv lock held.
685  */
686 static inline bool
dma_buf_attachment_is_dynamic(struct dma_buf_attachment * attach)687 dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
688 {
689 	return !!attach->importer_ops;
690 }
691 
692 int get_each_dmabuf(int (*callback)(const struct dma_buf *dmabuf,
693 		    void *private), void *private);
694 int is_dma_buf_file(struct file *file);
695 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
696 					  struct device *dev);
697 struct dma_buf_attachment *
698 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
699 		       const struct dma_buf_attach_ops *importer_ops,
700 		       void *importer_priv);
701 void dma_buf_detach(struct dma_buf *dmabuf,
702 		    struct dma_buf_attachment *attach);
703 int dma_buf_pin(struct dma_buf_attachment *attach);
704 void dma_buf_unpin(struct dma_buf_attachment *attach);
705 
706 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
707 
708 int dma_buf_fd(struct dma_buf *dmabuf, int flags);
709 struct dma_buf *dma_buf_get(int fd);
710 void dma_buf_put(struct dma_buf *dmabuf);
711 
712 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
713 					enum dma_data_direction);
714 void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
715 				enum dma_data_direction);
716 void dma_buf_move_notify(struct dma_buf *dma_buf);
717 int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
718 			     enum dma_data_direction dir);
719 int dma_buf_begin_cpu_access_partial(struct dma_buf *dma_buf,
720 				     enum dma_data_direction dir,
721 				     unsigned int offset, unsigned int len);
722 int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
723 			   enum dma_data_direction dir);
724 int dma_buf_end_cpu_access_partial(struct dma_buf *dma_buf,
725 				     enum dma_data_direction dir,
726 				     unsigned int offset, unsigned int len);
727 
728 int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
729 		 unsigned long);
730 int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
731 void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
732 long dma_buf_set_name(struct dma_buf *dmabuf, const char *name);
733 int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags);
734 #endif /* __DMA_BUF_H__ */
735