• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
3  *
4  * Provide default implementations of the DMA mapping callbacks for
5  * directly mapped busses.
6  */
7 
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <asm/bug.h>
11 #include <asm/abs_addr.h>
12 
13 /*
14  * Generic direct DMA implementation
15  *
16  * This implementation supports a per-device offset that can be applied if
17  * the address at which memory is visible to devices is not 0. Platform code
18  * can set archdata.dma_data to an unsigned long holding the offset. By
19  * default the offset is PCI_DRAM_OFFSET.
20  */
21 
get_dma_direct_offset(struct device * dev)22 static unsigned long get_dma_direct_offset(struct device *dev)
23 {
24 	if (dev)
25 		return (unsigned long)dev->archdata.dma_data;
26 
27 	return PCI_DRAM_OFFSET;
28 }
29 
dma_direct_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag)30 void *dma_direct_alloc_coherent(struct device *dev, size_t size,
31 				dma_addr_t *dma_handle, gfp_t flag)
32 {
33 	void *ret;
34 #ifdef CONFIG_NOT_COHERENT_CACHE
35 	ret = __dma_alloc_coherent(size, dma_handle, flag);
36 	if (ret == NULL)
37 		return NULL;
38 	*dma_handle += get_dma_direct_offset(dev);
39 	return ret;
40 #else
41 	struct page *page;
42 	int node = dev_to_node(dev);
43 
44 	/* ignore region specifiers */
45 	flag  &= ~(__GFP_HIGHMEM);
46 
47 	page = alloc_pages_node(node, flag, get_order(size));
48 	if (page == NULL)
49 		return NULL;
50 	ret = page_address(page);
51 	memset(ret, 0, size);
52 	*dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev);
53 
54 	return ret;
55 #endif
56 }
57 
dma_direct_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle)58 void dma_direct_free_coherent(struct device *dev, size_t size,
59 			      void *vaddr, dma_addr_t dma_handle)
60 {
61 #ifdef CONFIG_NOT_COHERENT_CACHE
62 	__dma_free_coherent(size, vaddr);
63 #else
64 	free_pages((unsigned long)vaddr, get_order(size));
65 #endif
66 }
67 
dma_direct_map_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction direction,struct dma_attrs * attrs)68 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
69 			     int nents, enum dma_data_direction direction,
70 			     struct dma_attrs *attrs)
71 {
72 	struct scatterlist *sg;
73 	int i;
74 
75 	for_each_sg(sgl, sg, nents, i) {
76 		sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
77 		sg->dma_length = sg->length;
78 		__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
79 	}
80 
81 	return nents;
82 }
83 
dma_direct_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction direction,struct dma_attrs * attrs)84 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
85 				int nents, enum dma_data_direction direction,
86 				struct dma_attrs *attrs)
87 {
88 }
89 
dma_direct_dma_supported(struct device * dev,u64 mask)90 static int dma_direct_dma_supported(struct device *dev, u64 mask)
91 {
92 #ifdef CONFIG_PPC64
93 	/* Could be improved to check for memory though it better be
94 	 * done via some global so platforms can set the limit in case
95 	 * they have limited DMA windows
96 	 */
97 	return mask >= DMA_32BIT_MASK;
98 #else
99 	return 1;
100 #endif
101 }
102 
dma_direct_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,struct dma_attrs * attrs)103 static inline dma_addr_t dma_direct_map_page(struct device *dev,
104 					     struct page *page,
105 					     unsigned long offset,
106 					     size_t size,
107 					     enum dma_data_direction dir,
108 					     struct dma_attrs *attrs)
109 {
110 	BUG_ON(dir == DMA_NONE);
111 	__dma_sync_page(page, offset, size, dir);
112 	return page_to_phys(page) + offset + get_dma_direct_offset(dev);
113 }
114 
dma_direct_unmap_page(struct device * dev,dma_addr_t dma_address,size_t size,enum dma_data_direction direction,struct dma_attrs * attrs)115 static inline void dma_direct_unmap_page(struct device *dev,
116 					 dma_addr_t dma_address,
117 					 size_t size,
118 					 enum dma_data_direction direction,
119 					 struct dma_attrs *attrs)
120 {
121 }
122 
123 #ifdef CONFIG_NOT_COHERENT_CACHE
dma_direct_sync_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction direction)124 static inline void dma_direct_sync_sg(struct device *dev,
125 		struct scatterlist *sgl, int nents,
126 		enum dma_data_direction direction)
127 {
128 	struct scatterlist *sg;
129 	int i;
130 
131 	for_each_sg(sgl, sg, nents, i)
132 		__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
133 }
134 
dma_direct_sync_single_range(struct device * dev,dma_addr_t dma_handle,unsigned long offset,size_t size,enum dma_data_direction direction)135 static inline void dma_direct_sync_single_range(struct device *dev,
136 		dma_addr_t dma_handle, unsigned long offset, size_t size,
137 		enum dma_data_direction direction)
138 {
139 	__dma_sync(bus_to_virt(dma_handle+offset), size, direction);
140 }
141 #endif
142 
143 struct dma_mapping_ops dma_direct_ops = {
144 	.alloc_coherent	= dma_direct_alloc_coherent,
145 	.free_coherent	= dma_direct_free_coherent,
146 	.map_sg		= dma_direct_map_sg,
147 	.unmap_sg	= dma_direct_unmap_sg,
148 	.dma_supported	= dma_direct_dma_supported,
149 	.map_page	= dma_direct_map_page,
150 	.unmap_page	= dma_direct_unmap_page,
151 #ifdef CONFIG_NOT_COHERENT_CACHE
152 	.sync_single_range_for_cpu 	= dma_direct_sync_single_range,
153 	.sync_single_range_for_device 	= dma_direct_sync_single_range,
154 	.sync_sg_for_cpu 		= dma_direct_sync_sg,
155 	.sync_sg_for_device 		= dma_direct_sync_sg,
156 #endif
157 };
158 EXPORT_SYMBOL(dma_direct_ops);
159