1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Header file for dma buffer sharing framework.
4 *
5 * Copyright(C) 2011 Linaro Limited. All rights reserved.
6 * Author: Sumit Semwal <sumit.semwal@ti.com>
7 *
8 * Many thanks to linaro-mm-sig list, and specially
9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11 * refining of this idea.
12 */
13 #ifndef __DMA_BUF_H__
14 #define __DMA_BUF_H__
15
16 #include <linux/file.h>
17 #include <linux/err.h>
18 #include <linux/scatterlist.h>
19 #include <linux/list.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/fs.h>
22 #include <linux/dma-fence.h>
23 #include <linux/wait.h>
24
25 struct device;
26 struct dma_buf;
27 struct dma_buf_attachment;
28
29 /**
30 * struct dma_buf_ops - operations possible on struct dma_buf
31 * @vmap: [optional] creates a virtual mapping for the buffer into kernel
32 * address space. Same restrictions as for vmap and friends apply.
33 * @vunmap: [optional] unmaps a vmap from the buffer
34 */
35 struct dma_buf_ops {
36 /**
37 * @cache_sgt_mapping:
38 *
39 * If true the framework will cache the first mapping made for each
40 * attachment. This avoids creating mappings for attachments multiple
41 * times.
42 */
43 bool cache_sgt_mapping;
44
45 /**
46 * @attach
47 *
48 * This is called from dma_buf_attach() to make sure that a given
49 * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters
50 * which support buffer objects in special locations like VRAM or
51 * device-specific carveout areas should check whether the buffer could
52 * be move to system memory (or directly accessed by the provided
53 * device), and otherwise need to fail the attach operation.
54 *
55 * The exporter should also in general check whether the current
56 * allocation fullfills the DMA constraints of the new device. If this
57 * is not the case, and the allocation cannot be moved, it should also
58 * fail the attach operation.
59 *
60 * Any exporter-private housekeeping data can be stored in the
61 * &dma_buf_attachment.priv pointer.
62 *
63 * This callback is optional.
64 *
65 * Returns
66 *
67 * 0 on success, negative error code on failure. It might return -EBUSY
68 * to signal that backing storage is already allocated and incompatible
69 * with the requirements of requesting device.
70 */
71 int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
72
73 /**
74 * @detach
75 *
76 * This is called by dma_buf_detach() to release a &dma_buf_attachment.
77 * Provided so that exporters can clean up any housekeeping for an
78 * &dma_buf_attachment.
79 *
80 * This callback is optional.
81 */
82 void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
83
84 /**
85 * @pin
86 *
87 * This is called by dma_buf_pin and lets the exporter know that the
88 * DMA-buf can't be moved any more.
89 *
90 * This is called with the dmabuf->resv object locked and is mutual
91 * exclusive with @cache_sgt_mapping.
92 *
93 * This callback is optional and should only be used in limited use
94 * cases like scanout and not for temporary pin operations.
95 *
96 * Returns
97 *
98 * 0 on success, negative error code on failure.
99 */
100 int (*pin)(struct dma_buf_attachment *attach);
101
102 /**
103 * @unpin
104 *
105 * This is called by dma_buf_unpin and lets the exporter know that the
106 * DMA-buf can be moved again.
107 *
108 * This is called with the dmabuf->resv object locked and is mutual
109 * exclusive with @cache_sgt_mapping.
110 *
111 * This callback is optional.
112 */
113 void (*unpin)(struct dma_buf_attachment *attach);
114
115 /**
116 * @map_dma_buf
117 *
118 * This is called by dma_buf_map_attachment() and is used to map a
119 * shared &dma_buf into device address space, and it is mandatory. It
120 * can only be called if @attach has been called successfully.
121 *
122 * This call may sleep, e.g. when the backing storage first needs to be
123 * allocated, or moved to a location suitable for all currently attached
124 * devices.
125 *
126 * Note that any specific buffer attributes required for this function
127 * should get added to device_dma_parameters accessible via
128 * &device.dma_params from the &dma_buf_attachment. The @attach callback
129 * should also check these constraints.
130 *
131 * If this is being called for the first time, the exporter can now
132 * choose to scan through the list of attachments for this buffer,
133 * collate the requirements of the attached devices, and choose an
134 * appropriate backing storage for the buffer.
135 *
136 * Based on enum dma_data_direction, it might be possible to have
137 * multiple users accessing at the same time (for reading, maybe), or
138 * any other kind of sharing that the exporter might wish to make
139 * available to buffer-users.
140 *
141 * This is always called with the dmabuf->resv object locked when
142 * the dynamic_mapping flag is true.
143 *
144 * Returns
145 *
146 * A &sg_table scatter list of or the backing storage of the DMA buffer,
147 * already mapped into the device address space of the &device attached
148 * with the provided &dma_buf_attachment.
149 *
150 * On failure, returns a negative error value wrapped into a pointer.
151 * May also return -EINTR when a signal was received while being
152 * blocked.
153 */
154 struct sg_table *(*map_dma_buf)(struct dma_buf_attachment *, enum dma_data_direction);
155 /**
156 * @unmap_dma_buf
157 *
158 * This is called by dma_buf_unmap_attachment() and should unmap and
159 * release the &sg_table allocated in @map_dma_buf, and it is mandatory.
160 * For static dma_buf handling this might also unpins the backing
161 * storage if this is the last mapping of the DMA buffer.
162 */
163 void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction);
164
165 /* Add try_map_dma_buf version, to return immed with -EBUSY
166 * if the call would block.
167 */
168
169 /**
170 * @release
171 *
172 * Called after the last dma_buf_put to release the &dma_buf, and
173 * mandatory.
174 */
175 void (*release)(struct dma_buf *);
176
177 /**
178 * @begin_cpu_access
179 *
180 * This is called from dma_buf_begin_cpu_access() and allows the
181 * exporter to ensure that the memory is actually available for cpu
182 * access - the exporter might need to allocate or swap-in and pin the
183 * backing storage. The exporter also needs to ensure that cpu access is
184 * coherent for the access direction. The direction can be used by the
185 * exporter to optimize the cache flushing, i.e. access with a different
186 * direction (read instead of write) might return stale or even bogus
187 * data (e.g. when the exporter needs to copy the data to temporary
188 * storage).
189 *
190 * This callback is optional.
191 *
192 * This is both called through the DMA_BUF_IOCTL_SYNC command
193 * from userspace (where storage shouldn't be pinned to avoid handing
194 * de-factor mlock rights to userspace) and for the kernel-internal
195 * users of the various kmap interfaces, where the backing storage must
196 * be pinned to guarantee that the atomic kmap calls can succeed. Since
197 * there's no in-kernel users of the kmap interfaces yet this isn't a
198 * real problem.
199 *
200 * Returns
201 *
202 * 0 on success or a negative error code on failure. This can for
203 * example fail when the backing storage can't be allocated. Can also
204 * return -ERESTARTSYS or -EINTR when the call has been interrupted and
205 * needs to be restarted.
206 */
207 int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
208
209 /**
210 * @begin_cpu_access_partial
211 *
212 * This is called from dma_buf_begin_cpu_access_partial() and allows the
213 * exporter to ensure that the memory specified in the range is
214 * available for cpu access - the exporter might need to allocate or
215 * swap-in and pin the backing storage.
216 * The exporter also needs to ensure that cpu access is
217 * coherent for the access direction. The direction can be used by the
218 * exporter to optimize the cache flushing, i.e. access with a different
219 * direction (read instead of write) might return stale or even bogus
220 * data (e.g. when the exporter needs to copy the data to temporary
221 * storage).
222 *
223 * This callback is optional.
224 *
225 * This is both called through the DMA_BUF_IOCTL_SYNC command
226 * from userspace (where storage shouldn't be pinned to avoid handing
227 * de-factor mlock rights to userspace) and for the kernel-internal
228 * users of the various kmap interfaces, where the backing storage must
229 * be pinned to guarantee that the atomic kmap calls can succeed. Since
230 * there's no in-kernel users of the kmap interfaces yet this isn't a
231 * real problem.
232 *
233 * Returns
234 *
235 * 0 on success or a negative error code on failure. This can for
236 * example fail when the backing storage can't be allocated. Can also
237 * return -ERESTARTSYS or -EINTR when the call has been interrupted and
238 * needs to be restarted.
239 */
240 int (*begin_cpu_access_partial)(struct dma_buf *dmabuf, enum dma_data_direction, unsigned int offset,
241 unsigned int len);
242
243 /**
244 * @end_cpu_access
245 *
246 * This is called from dma_buf_end_cpu_access() when the importer is
247 * done accessing the CPU. The exporter can use this to flush caches and
248 * unpin any resources pinned in @begin_cpu_access.
249 * The result of any dma_buf kmap calls after end_cpu_access is
250 * undefined.
251 *
252 * This callback is optional.
253 *
254 * Returns
255 *
256 * 0 on success or a negative error code on failure. Can return
257 * -ERESTARTSYS or -EINTR when the call has been interrupted and needs
258 * to be restarted.
259 */
260 int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
261
262 /**
263 * @end_cpu_access_partial
264 *
265 * This is called from dma_buf_end_cpu_access_partial() when the
266 * importer is done accessing the CPU. The exporter can use to limit
267 * cache flushing to only the range specefied and to unpin any
268 * resources pinned in @begin_cpu_access_umapped.
269 * The result of any dma_buf kmap calls after end_cpu_access_partial is
270 * undefined.
271 *
272 * This callback is optional.
273 *
274 * Returns
275 *
276 * 0 on success or a negative error code on failure. Can return
277 * -ERESTARTSYS or -EINTR when the call has been interrupted and needs
278 * to be restarted.
279 */
280 int (*end_cpu_access_partial)(struct dma_buf *dmabuf, enum dma_data_direction, unsigned int offset,
281 unsigned int len);
282
283 /**
284 * @mmap
285 *
286 * This callback is used by the dma_buf_mmap() function
287 *
288 * Note that the mapping needs to be incoherent, userspace is expected
289 * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface.
290 *
291 * Because dma-buf buffers have invariant size over their lifetime, the
292 * dma-buf core checks whether a vma is too large and rejects such
293 * mappings. The exporter hence does not need to duplicate this check.
294 * Drivers do not need to check this themselves.
295 *
296 * If an exporter needs to manually flush caches and hence needs to fake
297 * coherency for mmap support, it needs to be able to zap all the ptes
298 * pointing at the backing storage. Now linux mm needs a struct
299 * address_space associated with the struct file stored in vma->vm_file
300 * to do that with the function unmap_mapping_range. But the dma_buf
301 * framework only backs every dma_buf fd with the anon_file struct file,
302 * i.e. all dma_bufs share the same file.
303 *
304 * Hence exporters need to setup their own file (and address_space)
305 * association by setting vma->vm_file and adjusting vma->vm_pgoff in
306 * the dma_buf mmap callback. In the specific case of a gem driver the
307 * exporter could use the shmem file already provided by gem (and set
308 * vm_pgoff = 0). Exporters can then zap ptes by unmapping the
309 * corresponding range of the struct address_space associated with their
310 * own file.
311 *
312 * This callback is optional.
313 *
314 * Returns
315 *
316 * 0 on success or a negative error code on failure.
317 */
318 int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
319
320 void *(*vmap)(struct dma_buf *);
321 void (*vunmap)(struct dma_buf *, void *vaddr);
322
323 /**
324 * @get_uuid
325 *
326 * This is called by dma_buf_get_uuid to get the UUID which identifies
327 * the buffer to virtio devices.
328 *
329 * This callback is optional.
330 *
331 * Returns
332 *
333 * 0 on success or a negative error code on failure. On success uuid
334 * will be populated with the buffer's UUID.
335 */
336 int (*get_uuid)(struct dma_buf *dmabuf, uuid_t *uuid);
337
338 /**
339 * @get_flags
340 *
341 * This is called by dma_buf_get_flags and is used to get the buffer's
342 * flags.
343 * This callback is optional.
344 *
345 * Returns
346 *
347 * 0 on success or a negative error code on failure. On success flags
348 * will be populated with the buffer's flags.
349 */
350 int (*get_flags)(struct dma_buf *dmabuf, unsigned long *flags);
351 };
352
353 /**
354 * struct dma_buf - shared buffer object
355 * @size: size of the buffer
356 * @file: file pointer used for sharing buffers across, and for refcounting.
357 * @attachments: list of dma_buf_attachment that denotes all devices attached,
358 * protected by dma_resv lock.
359 * @ops: dma_buf_ops associated with this buffer object.
360 * @lock: used internally to serialize list manipulation, attach/detach and
361 * vmap/unmap
362 * @vmapping_counter: used internally to refcnt the vmaps
363 * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
364 * @exp_name: name of the exporter; useful for debugging.
365 * @name: userspace-provided name; useful for accounting and debugging,
366 * protected by @resv.
367 * @name_lock: spinlock to protect name access
368 * @owner: pointer to exporter module; used for refcounting when exporter is a
369 * kernel module.
370 * @list_node: node for dma_buf accounting and debugging.
371 * @priv: exporter specific private data for this buffer object.
372 * @resv: reservation object linked to this dma-buf
373 * @exp_pid: pid of exporter task which created this obj
374 * @exp_task_comm: process name of exporter task which created this obj
375 * @poll: for userspace poll support
376 * @cb_excl: for userspace poll support
377 * @cb_shared: for userspace poll support
378 * @sysfs_entry: for exposing information about this buffer in sysfs.
379 * @mmap_count: number of times buffer has been mmapped.
380 * @exp_vm_ops: the vm ops provided by the buffer exporter.
381 * @vm_ops: the overridden vm_ops used to track mmap_count of the buffer.
382 *
383 * This represents a shared buffer, created by calling dma_buf_export(). The
384 * userspace representation is a normal file descriptor, which can be created by
385 * calling dma_buf_fd().
386 *
387 * Shared dma buffers are reference counted using dma_buf_put() and
388 * get_dma_buf().
389 *
390 * Device DMA access is handled by the separate &struct dma_buf_attachment.
391 */
392 struct dma_buf {
393 size_t size;
394 struct file *file;
395 struct list_head attachments;
396 const struct dma_buf_ops *ops;
397 struct mutex lock;
398 unsigned vmapping_counter;
399 void *vmap_ptr;
400 const char *exp_name;
401 const char *name;
402 spinlock_t name_lock;
403 struct module *owner;
404 struct list_head list_node;
405 void *priv;
406 struct dma_resv *resv;
407 #ifdef CONFIG_DMABUF_PROCESS_INFO
408 pid_t exp_pid;
409 char exp_task_comm[TASK_COMM_LEN];
410 #endif
411
412 /* poll support */
413 wait_queue_head_t poll;
414
415 struct dma_buf_poll_cb_t {
416 struct dma_fence_cb cb;
417 wait_queue_head_t *poll;
418
419 __poll_t active;
420 } cb_excl, cb_shared;
421 #ifdef CONFIG_DMABUF_SYSFS_STATS
422 /* for sysfs stats */
423 struct dma_buf_sysfs_entry {
424 struct kobject kobj;
425 struct dma_buf *dmabuf;
426 } *sysfs_entry;
427 int mmap_count;
428 const struct vm_operations_struct *exp_vm_ops;
429 struct vm_operations_struct vm_ops;
430 #endif
431 };
432
433 /**
434 * struct dma_buf_attach_ops - importer operations for an attachment
435 *
436 * Attachment operations implemented by the importer.
437 */
438 struct dma_buf_attach_ops {
439 /**
440 * @allow_peer2peer:
441 *
442 * If this is set to true the importer must be able to handle peer
443 * resources without struct pages.
444 */
445 bool allow_peer2peer;
446
447 /**
448 * @move_notify: [optional] notification that the DMA-buf is moving
449 *
450 * If this callback is provided the framework can avoid pinning the
451 * backing store while mappings exists.
452 *
453 * This callback is called with the lock of the reservation object
454 * associated with the dma_buf held and the mapping function must be
455 * called with this lock held as well. This makes sure that no mapping
456 * is created concurrently with an ongoing move operation.
457 *
458 * Mappings stay valid and are not directly affected by this callback.
459 * But the DMA-buf can now be in a different physical location, so all
460 * mappings should be destroyed and re-created as soon as possible.
461 *
462 * New mappings can be created after this callback returns, and will
463 * point to the new location of the DMA-buf.
464 */
465 void (*move_notify)(struct dma_buf_attachment *attach);
466 };
467
468 /**
469 * struct dma_buf_attachment - holds device-buffer attachment data
470 * @dmabuf: buffer for this attachment.
471 * @dev: device attached to the buffer.
472 * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
473 * @sgt: cached mapping.
474 * @dir: direction of cached mapping.
475 * @peer2peer: true if the importer can handle peer resources without pages.
476 * @priv: exporter specific attachment data.
477 * @importer_ops: importer operations for this attachment, if provided
478 * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
479 * @importer_priv: importer specific attachment data.
480 * @dma_map_attrs: DMA attributes to be used when the exporter maps the buffer
481 * through dma_buf_map_attachment.
482 * @sysfs_entry: For exposing information about this attachment in sysfs.
483 *
484 * This structure holds the attachment information between the dma_buf buffer
485 * and its user device(s). The list contains one attachment struct per device
486 * attached to the buffer.
487 *
488 * An attachment is created by calling dma_buf_attach(), and released again by
489 * calling dma_buf_detach(). The DMA mapping itself needed to initiate a
490 * transfer is created by dma_buf_map_attachment() and freed again by calling
491 * dma_buf_unmap_attachment().
492 */
493 struct dma_buf_attachment {
494 struct dma_buf *dmabuf;
495 struct device *dev;
496 struct list_head node;
497 struct sg_table *sgt;
498 enum dma_data_direction dir;
499 bool peer2peer;
500 const struct dma_buf_attach_ops *importer_ops;
501 void *importer_priv;
502 void *priv;
503 unsigned long dma_map_attrs;
504 #ifdef CONFIG_DMABUF_SYSFS_STATS
505 /* for sysfs stats */
506 struct dma_buf_attach_sysfs_entry {
507 struct kobject kobj;
508 unsigned int map_counter;
509 } *sysfs_entry;
510 #endif
511 };
512
513 /**
514 * struct dma_buf_export_info - holds information needed to export a dma_buf
515 * @exp_name: name of the exporter - useful for debugging.
516 * @owner: pointer to exporter module - used for refcounting kernel module
517 * @ops: Attach allocator-defined dma buf ops to the new buffer
518 * @size: Size of the buffer
519 * @flags: mode flags for the file
520 * @resv: reservation-object, NULL to allocate default one
521 * @priv: Attach private data of allocator to this buffer
522 *
523 * This structure holds the information required to export the buffer. Used
524 * with dma_buf_export() only.
525 */
526 struct dma_buf_export_info {
527 const char *exp_name;
528 struct module *owner;
529 const struct dma_buf_ops *ops;
530 size_t size;
531 int flags;
532 struct dma_resv *resv;
533 void *priv;
534 };
535
536 /**
537 * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters
538 * @name: export-info name
539 *
540 * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info,
541 * zeroes it out and pre-populates exp_name in it.
542 */
543 #define DEFINE_DMA_BUF_EXPORT_INFO(name) \
544 struct dma_buf_export_info name = {.exp_name = KBUILD_MODNAME, .owner = THIS_MODULE}
545
546 /**
547 * get_dma_buf - convenience wrapper for get_file.
548 * @dmabuf: [in] pointer to dma_buf
549 *
550 * Increments the reference count on the dma-buf, needed in case of drivers
551 * that either need to create additional references to the dmabuf on the
552 * kernel side. For example, an exporter that needs to keep a dmabuf ptr
553 * so that subsequent exports don't create a new dmabuf.
554 */
get_dma_buf(struct dma_buf * dmabuf)555 static inline void get_dma_buf(struct dma_buf *dmabuf)
556 {
557 get_file(dmabuf->file);
558 }
559
560 /**
561 * dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings.
562 * @dmabuf: the DMA-buf to check
563 *
564 * Returns true if a DMA-buf exporter wants to be called with the dma_resv
565 * locked for the map/unmap callbacks, false if it doesn't wants to be called
566 * with the lock held.
567 */
dma_buf_is_dynamic(struct dma_buf * dmabuf)568 static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
569 {
570 return !!dmabuf->ops->pin;
571 }
572
573 /**
574 * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
575 * mappinsg
576 * @attach: the DMA-buf attachment to check
577 *
578 * Returns true if a DMA-buf importer wants to call the map/unmap functions with
579 * the dma_resv lock held.
580 */
dma_buf_attachment_is_dynamic(struct dma_buf_attachment * attach)581 static inline bool dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
582 {
583 return !!attach->importer_ops;
584 }
585
586 int get_each_dmabuf(int (*callback)(const struct dma_buf *dmabuf, void *private), void *private);
587 int is_dma_buf_file(struct file *file);
588 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, struct device *dev);
589 struct dma_buf_attachment *dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
590 const struct dma_buf_attach_ops *importer_ops, void *importer_priv);
591 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach);
592 int dma_buf_pin(struct dma_buf_attachment *attach);
593 void dma_buf_unpin(struct dma_buf_attachment *attach);
594
595 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
596
597 int dma_buf_fd(struct dma_buf *dmabuf, int flags);
598 struct dma_buf *dma_buf_get(int fd);
599 void dma_buf_put(struct dma_buf *dmabuf);
600
601 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, enum dma_data_direction);
602 void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction);
603 void dma_buf_move_notify(struct dma_buf *dma_buf);
604 int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir);
605 int dma_buf_begin_cpu_access_partial(struct dma_buf *dma_buf, enum dma_data_direction dir, unsigned int offset,
606 unsigned int len);
607 int dma_buf_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir);
608 int dma_buf_end_cpu_access_partial(struct dma_buf *dma_buf, enum dma_data_direction dir, unsigned int offset,
609 unsigned int len);
610
611 int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long);
612 void *dma_buf_vmap(struct dma_buf *);
613 void dma_buf_vunmap(struct dma_buf *, void *vaddr);
614 int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags);
615 int dma_buf_get_uuid(struct dma_buf *dmabuf, uuid_t *uuid);
616
617 #ifdef CONFIG_DMABUF_PROCESS_INFO
618 /**
619 * get_dma_buf_from_file - Get struct dma_buf* from struct file*
620 * @f: [in] pointer to struct file, which is associated with a
621 * dma_buf object.
622 *
623 * If @f IS_ERR_OR_NULL, return NULL.
624 * If @f is not a file associated with dma_buf, return NULL.
625 */
626 struct dma_buf *get_dma_buf_from_file(struct file *f);
627 #endif /* CONFIG_DMABUF_PROCESS_INFO */
628 #endif /* __DMA_BUF_H__ */
629