1 #ifndef ASMARM_DMA_MAPPING_H
2 #define ASMARM_DMA_MAPPING_H
3
4 #ifdef __KERNEL__
5
6 #include <linux/mm_types.h>
7 #include <linux/scatterlist.h>
8 #include <linux/dma-debug.h>
9
10 #include <asm-generic/dma-coherent.h>
11 #include <asm/memory.h>
12
13 #ifdef __arch_page_to_dma
14 #error Please update to __arch_pfn_to_dma
15 #endif
16
17 /*
18 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
19 * functions used internally by the DMA-mapping API to provide DMA
20 * addresses. They must not be used by drivers.
21 */
22 #ifndef __arch_pfn_to_dma
pfn_to_dma(struct device * dev,unsigned long pfn)23 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
24 {
25 return (dma_addr_t)__pfn_to_bus(pfn);
26 }
27
dma_to_pfn(struct device * dev,dma_addr_t addr)28 static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
29 {
30 return __bus_to_pfn(addr);
31 }
32
dma_to_virt(struct device * dev,dma_addr_t addr)33 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
34 {
35 return (void *)__bus_to_virt((unsigned long)addr);
36 }
37
virt_to_dma(struct device * dev,void * addr)38 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
39 {
40 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
41 }
42 #else
pfn_to_dma(struct device * dev,unsigned long pfn)43 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
44 {
45 return __arch_pfn_to_dma(dev, pfn);
46 }
47
dma_to_pfn(struct device * dev,dma_addr_t addr)48 static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
49 {
50 return __arch_dma_to_pfn(dev, addr);
51 }
52
dma_to_virt(struct device * dev,dma_addr_t addr)53 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
54 {
55 return __arch_dma_to_virt(dev, addr);
56 }
57
virt_to_dma(struct device * dev,void * addr)58 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
59 {
60 return __arch_virt_to_dma(dev, addr);
61 }
62 #endif
63
64 /*
65 * The DMA API is built upon the notion of "buffer ownership". A buffer
66 * is either exclusively owned by the CPU (and therefore may be accessed
67 * by it) or exclusively owned by the DMA device. These helper functions
68 * represent the transitions between these two ownership states.
69 *
70 * Note, however, that on later ARMs, this notion does not work due to
71 * speculative prefetches. We model our approach on the assumption that
72 * the CPU does do speculative prefetches, which means we clean caches
73 * before transfers and delay cache invalidation until transfer completion.
74 *
75 * Private support functions: these are not part of the API and are
76 * liable to change. Drivers must not use these.
77 */
__dma_single_cpu_to_dev(const void * kaddr,size_t size,enum dma_data_direction dir)78 static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
79 enum dma_data_direction dir)
80 {
81 extern void ___dma_single_cpu_to_dev(const void *, size_t,
82 enum dma_data_direction);
83
84 if (!arch_is_coherent())
85 ___dma_single_cpu_to_dev(kaddr, size, dir);
86 }
87
__dma_single_dev_to_cpu(const void * kaddr,size_t size,enum dma_data_direction dir)88 static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
89 enum dma_data_direction dir)
90 {
91 extern void ___dma_single_dev_to_cpu(const void *, size_t,
92 enum dma_data_direction);
93
94 if (!arch_is_coherent())
95 ___dma_single_dev_to_cpu(kaddr, size, dir);
96 }
97
__dma_page_cpu_to_dev(struct page * page,unsigned long off,size_t size,enum dma_data_direction dir)98 static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
99 size_t size, enum dma_data_direction dir)
100 {
101 extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
102 size_t, enum dma_data_direction);
103
104 if (!arch_is_coherent())
105 ___dma_page_cpu_to_dev(page, off, size, dir);
106 }
107
__dma_page_dev_to_cpu(struct page * page,unsigned long off,size_t size,enum dma_data_direction dir)108 static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
109 size_t size, enum dma_data_direction dir)
110 {
111 extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
112 size_t, enum dma_data_direction);
113
114 if (!arch_is_coherent())
115 ___dma_page_dev_to_cpu(page, off, size, dir);
116 }
117
118 extern int dma_supported(struct device *, u64);
119 extern int dma_set_mask(struct device *, u64);
120
121 /*
122 * DMA errors are defined by all-bits-set in the DMA address.
123 */
dma_mapping_error(struct device * dev,dma_addr_t dma_addr)124 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
125 {
126 return dma_addr == ~0;
127 }
128
129 /*
130 * Dummy noncoherent implementation. We don't provide a dma_cache_sync
131 * function so drivers using this API are highlighted with build warnings.
132 */
dma_alloc_noncoherent(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp)133 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
134 dma_addr_t *handle, gfp_t gfp)
135 {
136 return NULL;
137 }
138
dma_free_noncoherent(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle)139 static inline void dma_free_noncoherent(struct device *dev, size_t size,
140 void *cpu_addr, dma_addr_t handle)
141 {
142 }
143
144 /**
145 * dma_alloc_coherent - allocate consistent memory for DMA
146 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
147 * @size: required memory size
148 * @handle: bus-specific DMA address
149 *
150 * Allocate some uncached, unbuffered memory for a device for
151 * performing DMA. This function allocates pages, and will
152 * return the CPU-viewed address, and sets @handle to be the
153 * device-viewed address.
154 */
155 extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
156
157 /**
158 * dma_free_coherent - free memory allocated by dma_alloc_coherent
159 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
160 * @size: size of memory originally requested in dma_alloc_coherent
161 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
162 * @handle: device-view address returned from dma_alloc_coherent
163 *
164 * Free (and unmap) a DMA buffer previously allocated by
165 * dma_alloc_coherent().
166 *
167 * References to memory and mappings associated with cpu_addr/handle
168 * during and after this call executing are illegal.
169 */
170 extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
171
172 /**
173 * dma_mmap_coherent - map a coherent DMA allocation into user space
174 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
175 * @vma: vm_area_struct describing requested user mapping
176 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
177 * @handle: device-view address returned from dma_alloc_coherent
178 * @size: size of memory originally requested in dma_alloc_coherent
179 *
180 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
181 * into user space. The coherent DMA buffer must not be freed by the
182 * driver until the user space mapping has been released.
183 */
184 int dma_mmap_coherent(struct device *, struct vm_area_struct *,
185 void *, dma_addr_t, size_t);
186
187
188 /**
189 * dma_alloc_writecombine - allocate writecombining memory for DMA
190 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
191 * @size: required memory size
192 * @handle: bus-specific DMA address
193 *
194 * Allocate some uncached, buffered memory for a device for
195 * performing DMA. This function allocates pages, and will
196 * return the CPU-viewed address, and sets @handle to be the
197 * device-viewed address.
198 */
199 extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
200 gfp_t);
201
202 #define dma_free_writecombine(dev,size,cpu_addr,handle) \
203 dma_free_coherent(dev,size,cpu_addr,handle)
204
205 int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
206 void *, dma_addr_t, size_t);
207
208 /*
209 * This can be called during boot to increase the size of the consistent
210 * DMA region above it's default value of 2MB. It must be called before the
211 * memory allocator is initialised, i.e. before any core_initcall.
212 */
213 extern void __init init_consistent_dma_size(unsigned long size);
214
215
216 #ifdef CONFIG_DMABOUNCE
217 /*
218 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
219 * and utilize bounce buffers as needed to work around limited DMA windows.
220 *
221 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
222 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
223 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
224 *
225 * The following are helper functions used by the dmabounce subystem
226 *
227 */
228
229 /**
230 * dmabounce_register_dev
231 *
232 * @dev: valid struct device pointer
233 * @small_buf_size: size of buffers to use with small buffer pool
234 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
235 * @needs_bounce_fn: called to determine whether buffer needs bouncing
236 *
237 * This function should be called by low-level platform code to register
238 * a device as requireing DMA buffer bouncing. The function will allocate
239 * appropriate DMA pools for the device.
240 */
241 extern int dmabounce_register_dev(struct device *, unsigned long,
242 unsigned long, int (*)(struct device *, dma_addr_t, size_t));
243
244 /**
245 * dmabounce_unregister_dev
246 *
247 * @dev: valid struct device pointer
248 *
249 * This function should be called by low-level platform code when device
250 * that was previously registered with dmabounce_register_dev is removed
251 * from the system.
252 *
253 */
254 extern void dmabounce_unregister_dev(struct device *);
255
256 /*
257 * The DMA API, implemented by dmabounce.c. See below for descriptions.
258 */
259 extern dma_addr_t __dma_map_page(struct device *, struct page *,
260 unsigned long, size_t, enum dma_data_direction);
261 extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
262 enum dma_data_direction);
263
264 /*
265 * Private functions
266 */
267 int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
268 size_t, enum dma_data_direction);
269 int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
270 size_t, enum dma_data_direction);
271 #else
dmabounce_sync_for_cpu(struct device * d,dma_addr_t addr,unsigned long offset,size_t size,enum dma_data_direction dir)272 static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
273 unsigned long offset, size_t size, enum dma_data_direction dir)
274 {
275 return 1;
276 }
277
dmabounce_sync_for_device(struct device * d,dma_addr_t addr,unsigned long offset,size_t size,enum dma_data_direction dir)278 static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
279 unsigned long offset, size_t size, enum dma_data_direction dir)
280 {
281 return 1;
282 }
283
284
__dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir)285 static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
286 unsigned long offset, size_t size, enum dma_data_direction dir)
287 {
288 __dma_page_cpu_to_dev(page, offset, size, dir);
289 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
290 }
291
__dma_unmap_page(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)292 static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
293 size_t size, enum dma_data_direction dir)
294 {
295 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
296 handle & ~PAGE_MASK, size, dir);
297 }
298 #endif /* CONFIG_DMABOUNCE */
299
300 /**
301 * dma_map_single - map a single buffer for streaming DMA
302 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
303 * @cpu_addr: CPU direct mapped address of buffer
304 * @size: size of buffer to map
305 * @dir: DMA transfer direction
306 *
307 * Ensure that any data held in the cache is appropriately discarded
308 * or written back.
309 *
310 * The device owns this memory once this call has completed. The CPU
311 * can regain ownership by calling dma_unmap_single() or
312 * dma_sync_single_for_cpu().
313 */
dma_map_single(struct device * dev,void * cpu_addr,size_t size,enum dma_data_direction dir)314 static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
315 size_t size, enum dma_data_direction dir)
316 {
317 unsigned long offset;
318 struct page *page;
319 dma_addr_t addr;
320
321 BUG_ON(!virt_addr_valid(cpu_addr));
322 BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
323 BUG_ON(!valid_dma_direction(dir));
324
325 page = virt_to_page(cpu_addr);
326 offset = (unsigned long)cpu_addr & ~PAGE_MASK;
327 addr = __dma_map_page(dev, page, offset, size, dir);
328 debug_dma_map_page(dev, page, offset, size, dir, addr, true);
329
330 return addr;
331 }
332
333 /**
334 * dma_map_page - map a portion of a page for streaming DMA
335 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
336 * @page: page that buffer resides in
337 * @offset: offset into page for start of buffer
338 * @size: size of buffer to map
339 * @dir: DMA transfer direction
340 *
341 * Ensure that any data held in the cache is appropriately discarded
342 * or written back.
343 *
344 * The device owns this memory once this call has completed. The CPU
345 * can regain ownership by calling dma_unmap_page().
346 */
dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir)347 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
348 unsigned long offset, size_t size, enum dma_data_direction dir)
349 {
350 dma_addr_t addr;
351
352 BUG_ON(!valid_dma_direction(dir));
353
354 addr = __dma_map_page(dev, page, offset, size, dir);
355 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
356
357 return addr;
358 }
359
360 /**
361 * dma_unmap_single - unmap a single buffer previously mapped
362 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
363 * @handle: DMA address of buffer
364 * @size: size of buffer (same as passed to dma_map_single)
365 * @dir: DMA transfer direction (same as passed to dma_map_single)
366 *
367 * Unmap a single streaming mode DMA translation. The handle and size
368 * must match what was provided in the previous dma_map_single() call.
369 * All other usages are undefined.
370 *
371 * After this call, reads by the CPU to the buffer are guaranteed to see
372 * whatever the device wrote there.
373 */
dma_unmap_single(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)374 static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
375 size_t size, enum dma_data_direction dir)
376 {
377 debug_dma_unmap_page(dev, handle, size, dir, true);
378 __dma_unmap_page(dev, handle, size, dir);
379 }
380
381 /**
382 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
383 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
384 * @handle: DMA address of buffer
385 * @size: size of buffer (same as passed to dma_map_page)
386 * @dir: DMA transfer direction (same as passed to dma_map_page)
387 *
388 * Unmap a page streaming mode DMA translation. The handle and size
389 * must match what was provided in the previous dma_map_page() call.
390 * All other usages are undefined.
391 *
392 * After this call, reads by the CPU to the buffer are guaranteed to see
393 * whatever the device wrote there.
394 */
dma_unmap_page(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)395 static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
396 size_t size, enum dma_data_direction dir)
397 {
398 debug_dma_unmap_page(dev, handle, size, dir, false);
399 __dma_unmap_page(dev, handle, size, dir);
400 }
401
402 /**
403 * dma_sync_single_range_for_cpu
404 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
405 * @handle: DMA address of buffer
406 * @offset: offset of region to start sync
407 * @size: size of region to sync
408 * @dir: DMA transfer direction (same as passed to dma_map_single)
409 *
410 * Make physical memory consistent for a single streaming mode DMA
411 * translation after a transfer.
412 *
413 * If you perform a dma_map_single() but wish to interrogate the
414 * buffer using the cpu, yet do not wish to teardown the PCI dma
415 * mapping, you must call this function before doing so. At the
416 * next point you give the PCI dma address back to the card, you
417 * must first the perform a dma_sync_for_device, and then the
418 * device again owns the buffer.
419 */
dma_sync_single_range_for_cpu(struct device * dev,dma_addr_t handle,unsigned long offset,size_t size,enum dma_data_direction dir)420 static inline void dma_sync_single_range_for_cpu(struct device *dev,
421 dma_addr_t handle, unsigned long offset, size_t size,
422 enum dma_data_direction dir)
423 {
424 BUG_ON(!valid_dma_direction(dir));
425
426 debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
427
428 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
429 return;
430
431 __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
432 }
433
dma_sync_single_range_for_device(struct device * dev,dma_addr_t handle,unsigned long offset,size_t size,enum dma_data_direction dir)434 static inline void dma_sync_single_range_for_device(struct device *dev,
435 dma_addr_t handle, unsigned long offset, size_t size,
436 enum dma_data_direction dir)
437 {
438 BUG_ON(!valid_dma_direction(dir));
439
440 debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
441
442 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
443 return;
444
445 __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
446 }
447
dma_sync_single_for_cpu(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)448 static inline void dma_sync_single_for_cpu(struct device *dev,
449 dma_addr_t handle, size_t size, enum dma_data_direction dir)
450 {
451 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
452 }
453
dma_sync_single_for_device(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)454 static inline void dma_sync_single_for_device(struct device *dev,
455 dma_addr_t handle, size_t size, enum dma_data_direction dir)
456 {
457 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
458 }
459
460 /*
461 * The scatter list versions of the above methods.
462 */
463 extern int dma_map_sg(struct device *, struct scatterlist *, int,
464 enum dma_data_direction);
465 extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
466 enum dma_data_direction);
467 extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
468 enum dma_data_direction);
469 extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
470 enum dma_data_direction);
471
472
473 #endif /* __KERNEL__ */
474 #endif
475