• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef ASMARM_DMA_MAPPING_H
2 #define ASMARM_DMA_MAPPING_H
3 
4 #ifdef __KERNEL__
5 
6 #include <linux/mm_types.h>
7 #include <linux/scatterlist.h>
8 
9 #include <asm-generic/dma-coherent.h>
10 #include <asm/memory.h>
11 
12 /*
13  * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
14  * used internally by the DMA-mapping API to provide DMA addresses. They
15  * must not be used by drivers.
16  */
17 #ifndef __arch_page_to_dma
page_to_dma(struct device * dev,struct page * page)18 static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
19 {
20 	return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
21 }
22 
dma_to_virt(struct device * dev,dma_addr_t addr)23 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
24 {
25 	return (void *)__bus_to_virt(addr);
26 }
27 
virt_to_dma(struct device * dev,void * addr)28 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
29 {
30 	return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
31 }
32 #else
page_to_dma(struct device * dev,struct page * page)33 static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
34 {
35 	return __arch_page_to_dma(dev, page);
36 }
37 
dma_to_virt(struct device * dev,dma_addr_t addr)38 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
39 {
40 	return __arch_dma_to_virt(dev, addr);
41 }
42 
virt_to_dma(struct device * dev,void * addr)43 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
44 {
45 	return __arch_virt_to_dma(dev, addr);
46 }
47 #endif
48 
49 /*
50  * DMA-consistent mapping functions.  These allocate/free a region of
51  * uncached, unwrite-buffered mapped memory space for use with DMA
52  * devices.  This is the "generic" version.  The PCI specific version
53  * is in pci.h
54  *
55  * Note: Drivers should NOT use this function directly, as it will break
56  * platforms with CONFIG_DMABOUNCE.
57  * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
58  */
59 extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
60 
61 /*
62  * Return whether the given device DMA address mask can be supported
63  * properly.  For example, if your device can only drive the low 24-bits
64  * during bus mastering, then you would pass 0x00ffffff as the mask
65  * to this function.
66  *
67  * FIXME: This should really be a platform specific issue - we should
68  * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
69  */
dma_supported(struct device * dev,u64 mask)70 static inline int dma_supported(struct device *dev, u64 mask)
71 {
72 	if (mask < ISA_DMA_THRESHOLD)
73 		return 0;
74 	return 1;
75 }
76 
dma_set_mask(struct device * dev,u64 dma_mask)77 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
78 {
79 	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
80 		return -EIO;
81 
82 	*dev->dma_mask = dma_mask;
83 
84 	return 0;
85 }
86 
dma_get_cache_alignment(void)87 static inline int dma_get_cache_alignment(void)
88 {
89 	return 32;
90 }
91 
dma_is_consistent(struct device * dev,dma_addr_t handle)92 static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
93 {
94 	return !!arch_is_coherent();
95 }
96 
97 /*
98  * DMA errors are defined by all-bits-set in the DMA address.
99  */
dma_mapping_error(struct device * dev,dma_addr_t dma_addr)100 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
101 {
102 	return dma_addr == ~0;
103 }
104 
105 /*
106  * Dummy noncoherent implementation.  We don't provide a dma_cache_sync
107  * function so drivers using this API are highlighted with build warnings.
108  */
dma_alloc_noncoherent(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp)109 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
110 		dma_addr_t *handle, gfp_t gfp)
111 {
112 	return NULL;
113 }
114 
dma_free_noncoherent(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle)115 static inline void dma_free_noncoherent(struct device *dev, size_t size,
116 		void *cpu_addr, dma_addr_t handle)
117 {
118 }
119 
120 /**
121  * dma_alloc_coherent - allocate consistent memory for DMA
122  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
123  * @size: required memory size
124  * @handle: bus-specific DMA address
125  *
126  * Allocate some uncached, unbuffered memory for a device for
127  * performing DMA.  This function allocates pages, and will
128  * return the CPU-viewed address, and sets @handle to be the
129  * device-viewed address.
130  */
131 extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
132 
133 /**
134  * dma_free_coherent - free memory allocated by dma_alloc_coherent
135  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
136  * @size: size of memory originally requested in dma_alloc_coherent
137  * @cpu_addr: CPU-view address returned from dma_alloc_coherent
138  * @handle: device-view address returned from dma_alloc_coherent
139  *
140  * Free (and unmap) a DMA buffer previously allocated by
141  * dma_alloc_coherent().
142  *
143  * References to memory and mappings associated with cpu_addr/handle
144  * during and after this call executing are illegal.
145  */
146 extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
147 
148 /**
149  * dma_mmap_coherent - map a coherent DMA allocation into user space
150  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
151  * @vma: vm_area_struct describing requested user mapping
152  * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
153  * @handle: device-view address returned from dma_alloc_coherent
154  * @size: size of memory originally requested in dma_alloc_coherent
155  *
156  * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
157  * into user space.  The coherent DMA buffer must not be freed by the
158  * driver until the user space mapping has been released.
159  */
160 int dma_mmap_coherent(struct device *, struct vm_area_struct *,
161 		void *, dma_addr_t, size_t);
162 
163 
164 /**
165  * dma_alloc_writecombine - allocate writecombining memory for DMA
166  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
167  * @size: required memory size
168  * @handle: bus-specific DMA address
169  *
170  * Allocate some uncached, buffered memory for a device for
171  * performing DMA.  This function allocates pages, and will
172  * return the CPU-viewed address, and sets @handle to be the
173  * device-viewed address.
174  */
175 extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
176 		gfp_t);
177 
178 #define dma_free_writecombine(dev,size,cpu_addr,handle) \
179 	dma_free_coherent(dev,size,cpu_addr,handle)
180 
181 int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
182 		void *, dma_addr_t, size_t);
183 
184 
185 #ifdef CONFIG_DMABOUNCE
186 /*
187  * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic"
188  * and utilize bounce buffers as needed to work around limited DMA windows.
189  *
190  * On the SA-1111, a bug limits DMA to only certain regions of RAM.
191  * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
192  * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
193  *
194  * The following are helper functions used by the dmabounce subystem
195  *
196  */
197 
198 /**
199  * dmabounce_register_dev
200  *
201  * @dev: valid struct device pointer
202  * @small_buf_size: size of buffers to use with small buffer pool
203  * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
204  *
205  * This function should be called by low-level platform code to register
206  * a device as requireing DMA buffer bouncing. The function will allocate
207  * appropriate DMA pools for the device.
208  *
209  */
210 extern int dmabounce_register_dev(struct device *, unsigned long,
211 		unsigned long);
212 
213 /**
214  * dmabounce_unregister_dev
215  *
216  * @dev: valid struct device pointer
217  *
218  * This function should be called by low-level platform code when device
219  * that was previously registered with dmabounce_register_dev is removed
220  * from the system.
221  *
222  */
223 extern void dmabounce_unregister_dev(struct device *);
224 
225 /**
226  * dma_needs_bounce
227  *
228  * @dev: valid struct device pointer
229  * @dma_handle: dma_handle of unbounced buffer
230  * @size: size of region being mapped
231  *
232  * Platforms that utilize the dmabounce mechanism must implement
233  * this function.
234  *
235  * The dmabounce routines call this function whenever a dma-mapping
236  * is requested to determine whether a given buffer needs to be bounced
237  * or not. The function must return 0 if the buffer is OK for
238  * DMA access and 1 if the buffer needs to be bounced.
239  *
240  */
241 extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
242 
243 /*
244  * The DMA API, implemented by dmabounce.c.  See below for descriptions.
245  */
246 extern dma_addr_t dma_map_single(struct device *, void *, size_t,
247 		enum dma_data_direction);
248 extern dma_addr_t dma_map_page(struct device *, struct page *,
249 		unsigned long, size_t, enum dma_data_direction);
250 extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
251 		enum dma_data_direction);
252 
253 /*
254  * Private functions
255  */
256 int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
257 		size_t, enum dma_data_direction);
258 int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
259 		size_t, enum dma_data_direction);
260 #else
dmabounce_sync_for_cpu(struct device * d,dma_addr_t addr,unsigned long offset,size_t size,enum dma_data_direction dir)261 static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
262 	unsigned long offset, size_t size, enum dma_data_direction dir)
263 {
264 	return 1;
265 }
266 
dmabounce_sync_for_device(struct device * d,dma_addr_t addr,unsigned long offset,size_t size,enum dma_data_direction dir)267 static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
268 	unsigned long offset, size_t size, enum dma_data_direction dir)
269 {
270 	return 1;
271 }
272 
273 
274 /**
275  * dma_map_single - map a single buffer for streaming DMA
276  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
277  * @cpu_addr: CPU direct mapped address of buffer
278  * @size: size of buffer to map
279  * @dir: DMA transfer direction
280  *
281  * Ensure that any data held in the cache is appropriately discarded
282  * or written back.
283  *
284  * The device owns this memory once this call has completed.  The CPU
285  * can regain ownership by calling dma_unmap_single() or
286  * dma_sync_single_for_cpu().
287  */
dma_map_single(struct device * dev,void * cpu_addr,size_t size,enum dma_data_direction dir)288 static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
289 		size_t size, enum dma_data_direction dir)
290 {
291 	BUG_ON(!valid_dma_direction(dir));
292 
293 	if (!arch_is_coherent())
294 		dma_cache_maint(cpu_addr, size, dir);
295 
296 	return virt_to_dma(dev, cpu_addr);
297 }
298 
299 /**
300  * dma_map_page - map a portion of a page for streaming DMA
301  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
302  * @page: page that buffer resides in
303  * @offset: offset into page for start of buffer
304  * @size: size of buffer to map
305  * @dir: DMA transfer direction
306  *
307  * Ensure that any data held in the cache is appropriately discarded
308  * or written back.
309  *
310  * The device owns this memory once this call has completed.  The CPU
311  * can regain ownership by calling dma_unmap_page().
312  */
dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir)313 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
314 	     unsigned long offset, size_t size, enum dma_data_direction dir)
315 {
316 	BUG_ON(!valid_dma_direction(dir));
317 
318 	if (!arch_is_coherent())
319 		dma_cache_maint(page_address(page) + offset, size, dir);
320 
321 	return page_to_dma(dev, page) + offset;
322 }
323 
324 /**
325  * dma_unmap_single - unmap a single buffer previously mapped
326  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
327  * @handle: DMA address of buffer
328  * @size: size of buffer (same as passed to dma_map_single)
329  * @dir: DMA transfer direction (same as passed to dma_map_single)
330  *
331  * Unmap a single streaming mode DMA translation.  The handle and size
332  * must match what was provided in the previous dma_map_single() call.
333  * All other usages are undefined.
334  *
335  * After this call, reads by the CPU to the buffer are guaranteed to see
336  * whatever the device wrote there.
337  */
dma_unmap_single(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)338 static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
339 		size_t size, enum dma_data_direction dir)
340 {
341 	/* nothing to do */
342 }
343 #endif /* CONFIG_DMABOUNCE */
344 
345 /**
346  * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
347  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
348  * @handle: DMA address of buffer
349  * @size: size of buffer (same as passed to dma_map_page)
350  * @dir: DMA transfer direction (same as passed to dma_map_page)
351  *
352  * Unmap a page streaming mode DMA translation.  The handle and size
353  * must match what was provided in the previous dma_map_page() call.
354  * All other usages are undefined.
355  *
356  * After this call, reads by the CPU to the buffer are guaranteed to see
357  * whatever the device wrote there.
358  */
dma_unmap_page(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)359 static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
360 		size_t size, enum dma_data_direction dir)
361 {
362 	dma_unmap_single(dev, handle, size, dir);
363 }
364 
365 /**
366  * dma_sync_single_range_for_cpu
367  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
368  * @handle: DMA address of buffer
369  * @offset: offset of region to start sync
370  * @size: size of region to sync
371  * @dir: DMA transfer direction (same as passed to dma_map_single)
372  *
373  * Make physical memory consistent for a single streaming mode DMA
374  * translation after a transfer.
375  *
376  * If you perform a dma_map_single() but wish to interrogate the
377  * buffer using the cpu, yet do not wish to teardown the PCI dma
378  * mapping, you must call this function before doing so.  At the
379  * next point you give the PCI dma address back to the card, you
380  * must first the perform a dma_sync_for_device, and then the
381  * device again owns the buffer.
382  */
dma_sync_single_range_for_cpu(struct device * dev,dma_addr_t handle,unsigned long offset,size_t size,enum dma_data_direction dir)383 static inline void dma_sync_single_range_for_cpu(struct device *dev,
384 		dma_addr_t handle, unsigned long offset, size_t size,
385 		enum dma_data_direction dir)
386 {
387 	BUG_ON(!valid_dma_direction(dir));
388 
389 	dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
390 }
391 
dma_sync_single_range_for_device(struct device * dev,dma_addr_t handle,unsigned long offset,size_t size,enum dma_data_direction dir)392 static inline void dma_sync_single_range_for_device(struct device *dev,
393 		dma_addr_t handle, unsigned long offset, size_t size,
394 		enum dma_data_direction dir)
395 {
396 	BUG_ON(!valid_dma_direction(dir));
397 
398 	if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
399 		return;
400 
401 	if (!arch_is_coherent())
402 		dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
403 }
404 
dma_sync_single_for_cpu(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)405 static inline void dma_sync_single_for_cpu(struct device *dev,
406 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
407 {
408 	dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
409 }
410 
dma_sync_single_for_device(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)411 static inline void dma_sync_single_for_device(struct device *dev,
412 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
413 {
414 	dma_sync_single_range_for_device(dev, handle, 0, size, dir);
415 }
416 
417 /*
418  * The scatter list versions of the above methods.
419  */
420 extern int dma_map_sg(struct device *, struct scatterlist *, int,
421 		enum dma_data_direction);
422 extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
423 		enum dma_data_direction);
424 extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
425 		enum dma_data_direction);
426 extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
427 		enum dma_data_direction);
428 
429 
430 #endif /* __KERNEL__ */
431 #endif
432