Lines Matching +full:dma +full:- +full:safe +full:- +full:map
4 * Special dma_{map/unmap/dma_sync}_* routines for systems that have
5 * limited DMA windows. These functions utilize bounce buffers to
6 * copy data to/from buffers located outside the DMA region. This
7 * only works for systems in which DMA memory is at the bottom of
8 * RAM, the remainder of memory is at the top and the DMA memory
10 * DMA windows will require custom implementations that reserve memory
14 * Re-written by Christopher Hoover <ch@murgatroid.com>
28 #include <linux/page-flags.h>
30 #include <linux/dma-mapping.h>
36 #include <asm/dma-iommu.h>
56 /* safe buffer info */
58 void *safe; member
91 struct dmabounce_device_info *device_info = dev->archdata.dmabounce; in dmabounce_show()
93 device_info->small.allocs, in dmabounce_show()
94 device_info->large.allocs, in dmabounce_show()
95 device_info->total_allocs - device_info->small.allocs - in dmabounce_show()
96 device_info->large.allocs, in dmabounce_show()
97 device_info->total_allocs, in dmabounce_show()
98 device_info->map_op_count, in dmabounce_show()
99 device_info->bounce_count); in dmabounce_show()
106 /* allocate a 'safe' buffer and keep track of it */
113 struct device *dev = device_info->dev; in alloc_safe_buffer()
119 if (size <= device_info->small.size) { in alloc_safe_buffer()
120 pool = &device_info->small; in alloc_safe_buffer()
121 } else if (size <= device_info->large.size) { in alloc_safe_buffer()
122 pool = &device_info->large; in alloc_safe_buffer()
133 buf->ptr = ptr; in alloc_safe_buffer()
134 buf->size = size; in alloc_safe_buffer()
135 buf->direction = dir; in alloc_safe_buffer()
136 buf->pool = pool; in alloc_safe_buffer()
139 buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC, in alloc_safe_buffer()
140 &buf->safe_dma_addr); in alloc_safe_buffer()
142 buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr, in alloc_safe_buffer()
146 if (buf->safe == NULL) { in alloc_safe_buffer()
148 "%s: could not alloc dma memory (size=%d)\n", in alloc_safe_buffer()
156 pool->allocs++; in alloc_safe_buffer()
157 device_info->total_allocs++; in alloc_safe_buffer()
160 write_lock_irqsave(&device_info->lock, flags); in alloc_safe_buffer()
161 list_add(&buf->node, &device_info->safe_buffers); in alloc_safe_buffer()
162 write_unlock_irqrestore(&device_info->lock, flags); in alloc_safe_buffer()
167 /* determine if a buffer is from our "safe" pool */
174 read_lock_irqsave(&device_info->lock, flags); in find_safe_buffer()
176 list_for_each_entry(b, &device_info->safe_buffers, node) in find_safe_buffer()
177 if (b->safe_dma_addr <= safe_dma_addr && in find_safe_buffer()
178 b->safe_dma_addr + b->size > safe_dma_addr) { in find_safe_buffer()
183 read_unlock_irqrestore(&device_info->lock, flags); in find_safe_buffer()
192 dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf); in free_safe_buffer()
194 write_lock_irqsave(&device_info->lock, flags); in free_safe_buffer()
196 list_del(&buf->node); in free_safe_buffer()
198 write_unlock_irqrestore(&device_info->lock, flags); in free_safe_buffer()
200 if (buf->pool) in free_safe_buffer()
201 dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr); in free_safe_buffer()
203 dma_free_coherent(device_info->dev, buf->size, buf->safe, in free_safe_buffer()
204 buf->safe_dma_addr); in free_safe_buffer()
214 if (!dev || !dev->archdata.dmabounce) in find_safe_buffer_dev()
220 return find_safe_buffer(dev->archdata.dmabounce, dma_addr); in find_safe_buffer_dev()
225 if (!dev || !dev->archdata.dmabounce) in needs_bounce()
228 if (dev->dma_mask) { in needs_bounce()
229 unsigned long limit, mask = *dev->dma_mask; in needs_bounce()
233 dev_err(dev, "DMA mapping too big (requested %#x " in needs_bounce()
234 "mask %#Lx)\n", size, *dev->dma_mask); in needs_bounce()
235 return -E2BIG; in needs_bounce()
238 /* Figure out if we need to bounce from the DMA mask. */ in needs_bounce()
239 if ((dma_addr | (dma_addr + size - 1)) & ~mask) in needs_bounce()
243 return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size); in needs_bounce()
250 struct dmabounce_device_info *device_info = dev->archdata.dmabounce; in map_single()
254 DO_STATS ( device_info->map_op_count++ ); in map_single()
258 dev_err(dev, "%s: unable to map unsafe buffer %p!\n", in map_single()
263 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", in map_single()
264 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), in map_single()
265 buf->safe, buf->safe_dma_addr); in map_single()
269 dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", in map_single()
270 __func__, ptr, buf->safe, size); in map_single()
271 memcpy(buf->safe, ptr, size); in map_single()
274 return buf->safe_dma_addr; in map_single()
281 BUG_ON(buf->size != size); in unmap_single()
282 BUG_ON(buf->direction != dir); in unmap_single()
284 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", in unmap_single()
285 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), in unmap_single()
286 buf->safe, buf->safe_dma_addr); in unmap_single()
288 DO_STATS(dev->archdata.dmabounce->bounce_count++); in unmap_single()
292 void *ptr = buf->ptr; in unmap_single()
294 dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", in unmap_single()
295 __func__, buf->safe, ptr, size); in unmap_single()
296 memcpy(ptr, buf->safe, size); in unmap_single()
305 free_safe_buffer(dev->archdata.dmabounce, buf); in unmap_single()
312 * allocate a 'safe' buffer and copy the unsafe buffer into it.
313 * substitute the safe buffer for the unsafe one.
314 * (basically move the buffer from an unsafe area to a safe one)
338 dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n"); in dmabounce_map_page()
346 * see if a mapped address was really a "safe" buffer and if so, copy
347 * the data from the safe buffer back to the unsafe buffer and free up
348 * the safe buffer. (basically return things back to the way they
356 dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n", in dmabounce_unmap_page()
374 dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", in __dmabounce_sync_for_cpu()
381 off = addr - buf->safe_dma_addr; in __dmabounce_sync_for_cpu()
383 BUG_ON(buf->direction != dir); in __dmabounce_sync_for_cpu()
385 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", in __dmabounce_sync_for_cpu()
386 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, in __dmabounce_sync_for_cpu()
387 buf->safe, buf->safe_dma_addr); in __dmabounce_sync_for_cpu()
389 DO_STATS(dev->archdata.dmabounce->bounce_count++); in __dmabounce_sync_for_cpu()
392 dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", in __dmabounce_sync_for_cpu()
393 __func__, buf->safe + off, buf->ptr + off, sz); in __dmabounce_sync_for_cpu()
394 memcpy(buf->ptr + off, buf->safe + off, sz); in __dmabounce_sync_for_cpu()
414 dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", in __dmabounce_sync_for_device()
421 off = addr - buf->safe_dma_addr; in __dmabounce_sync_for_device()
423 BUG_ON(buf->direction != dir); in __dmabounce_sync_for_device()
425 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", in __dmabounce_sync_for_device()
426 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, in __dmabounce_sync_for_device()
427 buf->safe, buf->safe_dma_addr); in __dmabounce_sync_for_device()
429 DO_STATS(dev->archdata.dmabounce->bounce_count++); in __dmabounce_sync_for_device()
432 dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n", in __dmabounce_sync_for_device()
433 __func__,buf->ptr + off, buf->safe + off, sz); in __dmabounce_sync_for_device()
434 memcpy(buf->safe + off, buf->ptr + off, sz); in __dmabounce_sync_for_device()
450 if (dev->archdata.dmabounce) in dmabounce_dma_supported()
481 pool->size = size; in dmabounce_init_pool()
482 DO_STATS(pool->allocs = 0); in dmabounce_init_pool()
483 pool->pool = dma_pool_create(name, dev, size, in dmabounce_init_pool()
485 0 /* no page-crossing issues */); in dmabounce_init_pool()
487 return pool->pool ? 0 : -ENOMEM; in dmabounce_init_pool()
501 return -ENOMEM; in dmabounce_register_dev()
504 ret = dmabounce_init_pool(&device_info->small, dev, in dmabounce_register_dev()
508 "dmabounce: could not allocate DMA pool for %ld byte objects\n", in dmabounce_register_dev()
514 ret = dmabounce_init_pool(&device_info->large, dev, in dmabounce_register_dev()
519 "dmabounce: could not allocate DMA pool for %ld byte objects\n", in dmabounce_register_dev()
525 device_info->dev = dev; in dmabounce_register_dev()
526 INIT_LIST_HEAD(&device_info->safe_buffers); in dmabounce_register_dev()
527 rwlock_init(&device_info->lock); in dmabounce_register_dev()
528 device_info->needs_bounce = needs_bounce_fn; in dmabounce_register_dev()
531 device_info->total_allocs = 0; in dmabounce_register_dev()
532 device_info->map_op_count = 0; in dmabounce_register_dev()
533 device_info->bounce_count = 0; in dmabounce_register_dev()
534 device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats); in dmabounce_register_dev()
537 dev->archdata.dmabounce = device_info; in dmabounce_register_dev()
545 dma_pool_destroy(device_info->small.pool); in dmabounce_register_dev()
554 struct dmabounce_device_info *device_info = dev->archdata.dmabounce; in dmabounce_unregister_dev()
556 dev->archdata.dmabounce = NULL; in dmabounce_unregister_dev()
566 if (!list_empty(&device_info->safe_buffers)) { in dmabounce_unregister_dev()
572 if (device_info->small.pool) in dmabounce_unregister_dev()
573 dma_pool_destroy(device_info->small.pool); in dmabounce_unregister_dev()
574 if (device_info->large.pool) in dmabounce_unregister_dev()
575 dma_pool_destroy(device_info->large.pool); in dmabounce_unregister_dev()
578 if (device_info->attr_res == 0) in dmabounce_unregister_dev()
589 MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA window…