• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Header file for dma buffer sharing framework.
3  *
4  * Copyright(C) 2011 Linaro Limited. All rights reserved.
5  * Author: Sumit Semwal <sumit.semwal@ti.com>
6  *
7  * Many thanks to linaro-mm-sig list, and specially
8  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
9  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
10  * refining of this idea.
11  *
12  * This program is free software; you can redistribute it and/or modify it
13  * under the terms of the GNU General Public License version 2 as published by
14  * the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but WITHOUT
17  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
19  * more details.
20  *
21  * You should have received a copy of the GNU General Public License along with
22  * this program.  If not, see <http://www.gnu.org/licenses/>.
23  */
24 #ifndef __DMA_BUF_H__
25 #define __DMA_BUF_H__
26 
27 #include <linux/file.h>
28 #include <linux/err.h>
29 #include <linux/scatterlist.h>
30 #include <linux/list.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/fs.h>
33 
34 struct device;
35 struct dma_buf;
36 struct dma_buf_attachment;
37 
38 /**
39  * struct dma_buf_ops - operations possible on struct dma_buf
40  * @attach: [optional] allows different devices to 'attach' themselves to the
41  *	    given buffer. It might return -EBUSY to signal that backing storage
42  *	    is already allocated and incompatible with the requirements
43  *	    of requesting device.
44  * @detach: [optional] detach a given device from this buffer.
45  * @map_dma_buf: returns list of scatter pages allocated, increases usecount
46  *		 of the buffer. Requires atleast one attach to be called
47  *		 before. Returned sg list should already be mapped into
48  *		 _device_ address space. This call may sleep. May also return
49  *		 -EINTR. Should return -EINVAL if attach hasn't been called yet.
50  * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter
51  *		   pages.
52  * @release: release this buffer; to be called after the last dma_buf_put.
53  * @begin_cpu_access: [optional] called before cpu access to invalidate cpu
54  * 		      caches and allocate backing storage (if not yet done)
55  * 		      respectively pin the objet into memory.
56  * @end_cpu_access: [optional] called after cpu access to flush cashes.
57  * @kmap_atomic: maps a page from the buffer into kernel address
58  * 		 space, users may not block until the subsequent unmap call.
59  * 		 This callback must not sleep.
60  * @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
61  * 		   This Callback must not sleep.
62  * @kmap: maps a page from the buffer into kernel address space.
63  * @kunmap: [optional] unmaps a page from the buffer.
64  * @mmap: used to expose the backing storage to userspace. Note that the
65  * 	  mapping needs to be coherent - if the exporter doesn't directly
66  * 	  support this, it needs to fake coherency by shooting down any ptes
67  * 	  when transitioning away from the cpu domain.
68  */
69 struct dma_buf_ops {
70 	int (*attach)(struct dma_buf *, struct device *,
71 			struct dma_buf_attachment *);
72 
73 	void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
74 
75 	/* For {map,unmap}_dma_buf below, any specific buffer attributes
76 	 * required should get added to device_dma_parameters accessible
77 	 * via dev->dma_params.
78 	 */
79 	struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
80 						enum dma_data_direction);
81 	void (*unmap_dma_buf)(struct dma_buf_attachment *,
82 						struct sg_table *,
83 						enum dma_data_direction);
84 	/* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
85 	 * if the call would block.
86 	 */
87 
88 	/* after final dma_buf_put() */
89 	void (*release)(struct dma_buf *);
90 
91 	int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
92 				enum dma_data_direction);
93 	void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
94 			       enum dma_data_direction);
95 	void *(*kmap_atomic)(struct dma_buf *, unsigned long);
96 	void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
97 	void *(*kmap)(struct dma_buf *, unsigned long);
98 	void (*kunmap)(struct dma_buf *, unsigned long, void *);
99 
100 	int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
101 };
102 
103 /**
104  * struct dma_buf - shared buffer object
105  * @size: size of the buffer
106  * @file: file pointer used for sharing buffers across, and for refcounting.
107  * @attachments: list of dma_buf_attachment that denotes all devices attached.
108  * @ops: dma_buf_ops associated with this buffer object.
109  * @priv: exporter specific private data for this buffer object.
110  */
111 struct dma_buf {
112 	size_t size;
113 	struct file *file;
114 	struct list_head attachments;
115 	const struct dma_buf_ops *ops;
116 	/* mutex to serialize list manipulation and attach/detach */
117 	struct mutex lock;
118 	void *priv;
119 };
120 
121 /**
122  * struct dma_buf_attachment - holds device-buffer attachment data
123  * @dmabuf: buffer for this attachment.
124  * @dev: device attached to the buffer.
125  * @node: list of dma_buf_attachment.
126  * @priv: exporter specific attachment data.
127  *
128  * This structure holds the attachment information between the dma_buf buffer
129  * and its user device(s). The list contains one attachment struct per device
130  * attached to the buffer.
131  */
132 struct dma_buf_attachment {
133 	struct dma_buf *dmabuf;
134 	struct device *dev;
135 	struct list_head node;
136 	void *priv;
137 };
138 
139 /**
140  * get_dma_buf - convenience wrapper for get_file.
141  * @dmabuf:	[in]	pointer to dma_buf
142  *
143  * Increments the reference count on the dma-buf, needed in case of drivers
144  * that either need to create additional references to the dmabuf on the
145  * kernel side.  For example, an exporter that needs to keep a dmabuf ptr
146  * so that subsequent exports don't create a new dmabuf.
147  */
get_dma_buf(struct dma_buf * dmabuf)148 static inline void get_dma_buf(struct dma_buf *dmabuf)
149 {
150 	get_file(dmabuf->file);
151 }
152 
153 #ifdef CONFIG_DMA_SHARED_BUFFER
154 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
155 							struct device *dev);
156 void dma_buf_detach(struct dma_buf *dmabuf,
157 				struct dma_buf_attachment *dmabuf_attach);
158 struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
159 			       size_t size, int flags);
160 int dma_buf_fd(struct dma_buf *dmabuf, int flags);
161 struct dma_buf *dma_buf_get(int fd);
162 void dma_buf_put(struct dma_buf *dmabuf);
163 
164 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
165 					enum dma_data_direction);
166 void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
167 				enum dma_data_direction);
168 int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
169 			     enum dma_data_direction dir);
170 void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
171 			    enum dma_data_direction dir);
172 void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
173 void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
174 void *dma_buf_kmap(struct dma_buf *, unsigned long);
175 void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
176 
177 int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
178 		 unsigned long);
179 #else
180 
dma_buf_attach(struct dma_buf * dmabuf,struct device * dev)181 static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
182 							struct device *dev)
183 {
184 	return ERR_PTR(-ENODEV);
185 }
186 
dma_buf_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * dmabuf_attach)187 static inline void dma_buf_detach(struct dma_buf *dmabuf,
188 				  struct dma_buf_attachment *dmabuf_attach)
189 {
190 	return;
191 }
192 
dma_buf_export(void * priv,const struct dma_buf_ops * ops,size_t size,int flags)193 static inline struct dma_buf *dma_buf_export(void *priv,
194 					     const struct dma_buf_ops *ops,
195 					     size_t size, int flags)
196 {
197 	return ERR_PTR(-ENODEV);
198 }
199 
dma_buf_fd(struct dma_buf * dmabuf,int flags)200 static inline int dma_buf_fd(struct dma_buf *dmabuf, int flags)
201 {
202 	return -ENODEV;
203 }
204 
dma_buf_get(int fd)205 static inline struct dma_buf *dma_buf_get(int fd)
206 {
207 	return ERR_PTR(-ENODEV);
208 }
209 
dma_buf_put(struct dma_buf * dmabuf)210 static inline void dma_buf_put(struct dma_buf *dmabuf)
211 {
212 	return;
213 }
214 
dma_buf_map_attachment(struct dma_buf_attachment * attach,enum dma_data_direction write)215 static inline struct sg_table *dma_buf_map_attachment(
216 	struct dma_buf_attachment *attach, enum dma_data_direction write)
217 {
218 	return ERR_PTR(-ENODEV);
219 }
220 
dma_buf_unmap_attachment(struct dma_buf_attachment * attach,struct sg_table * sg,enum dma_data_direction dir)221 static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
222 			struct sg_table *sg, enum dma_data_direction dir)
223 {
224 	return;
225 }
226 
dma_buf_begin_cpu_access(struct dma_buf * dmabuf,size_t start,size_t len,enum dma_data_direction dir)227 static inline int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
228 					   size_t start, size_t len,
229 					   enum dma_data_direction dir)
230 {
231 	return -ENODEV;
232 }
233 
dma_buf_end_cpu_access(struct dma_buf * dmabuf,size_t start,size_t len,enum dma_data_direction dir)234 static inline void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
235 					  size_t start, size_t len,
236 					  enum dma_data_direction dir)
237 {
238 }
239 
dma_buf_kmap_atomic(struct dma_buf * dmabuf,unsigned long pnum)240 static inline void *dma_buf_kmap_atomic(struct dma_buf *dmabuf,
241 					unsigned long pnum)
242 {
243 	return NULL;
244 }
245 
dma_buf_kunmap_atomic(struct dma_buf * dmabuf,unsigned long pnum,void * vaddr)246 static inline void dma_buf_kunmap_atomic(struct dma_buf *dmabuf,
247 					 unsigned long pnum, void *vaddr)
248 {
249 }
250 
dma_buf_kmap(struct dma_buf * dmabuf,unsigned long pnum)251 static inline void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long pnum)
252 {
253 	return NULL;
254 }
255 
dma_buf_kunmap(struct dma_buf * dmabuf,unsigned long pnum,void * vaddr)256 static inline void dma_buf_kunmap(struct dma_buf *dmabuf,
257 				  unsigned long pnum, void *vaddr)
258 {
259 }
260 
dma_buf_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma,unsigned long pgoff)261 static inline int dma_buf_mmap(struct dma_buf *dmabuf,
262 			       struct vm_area_struct *vma,
263 			       unsigned long pgoff)
264 {
265 	return -ENODEV;
266 }
267 #endif /* CONFIG_DMA_SHARED_BUFFER */
268 
269 #endif /* __DMA_BUF_H__ */
270