• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Dynamic DMA mapping support.
3  *
4  * We never have any address translations to worry about, so this
5  * is just alloc/free.
6  */
7 
8 #include <linux/types.h>
9 #include <linux/gfp.h>
10 #include <linux/mm.h>
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/export.h>
14 #include <asm/cacheflush.h>
15 
dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)16 void *dma_alloc_coherent(struct device *dev, size_t size,
17 			   dma_addr_t *dma_handle, gfp_t gfp)
18 {
19 	void *ret;
20 	/* ignore region specifiers */
21 	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
22 
23 	if (dev == NULL || (*dev->dma_mask < 0xffffffff))
24 		gfp |= GFP_DMA;
25 	ret = (void *)__get_free_pages(gfp, get_order(size));
26 
27 	if (ret != NULL) {
28 		memset(ret, 0, size);
29 		*dma_handle = virt_to_phys(ret);
30 	}
31 	return ret;
32 }
33 
dma_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle)34 void dma_free_coherent(struct device *dev, size_t size,
35 			 void *vaddr, dma_addr_t dma_handle)
36 {
37 	free_pages((unsigned long)vaddr, get_order(size));
38 }
39 
dma_sync_single_for_device(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)40 void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
41 				size_t size, enum dma_data_direction dir)
42 {
43 	switch (dir) {
44 	case DMA_TO_DEVICE:
45 		flush_dcache_range(handle, size);
46 		break;
47 	case DMA_FROM_DEVICE:
48 		/* Should be clear already */
49 		break;
50 	default:
51 		if (printk_ratelimit())
52 			printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
53 		break;
54 	}
55 }
56 
57 EXPORT_SYMBOL(dma_sync_single_for_device);
dma_map_single(struct device * dev,void * addr,size_t size,enum dma_data_direction dir)58 dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
59 			  enum dma_data_direction dir)
60 {
61 	dma_addr_t handle = virt_to_phys(addr);
62 	flush_dcache_range(handle, size);
63 	return handle;
64 }
65 EXPORT_SYMBOL(dma_map_single);
66 
dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir)67 dma_addr_t dma_map_page(struct device *dev, struct page *page,
68 			unsigned long offset, size_t size,
69 			enum dma_data_direction dir)
70 {
71 	dma_addr_t handle = page_to_phys(page) + offset;
72 	dma_sync_single_for_device(dev, handle, size, dir);
73 	return handle;
74 }
75 EXPORT_SYMBOL(dma_map_page);
76