1 #ifndef _ASM_X86_DMA_MAPPING_H
2 #define _ASM_X86_DMA_MAPPING_H
3
4 /*
5 * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
6 * Documentation/DMA-API.txt for documentation.
7 */
8
9 #include <linux/scatterlist.h>
10 #include <asm/io.h>
11 #include <asm/swiotlb.h>
12 #include <asm-generic/dma-coherent.h>
13
14 extern dma_addr_t bad_dma_address;
15 extern int iommu_merge;
16 extern struct device x86_dma_fallback_dev;
17 extern int panic_on_overflow;
18
19 struct dma_mapping_ops {
20 int (*mapping_error)(struct device *dev,
21 dma_addr_t dma_addr);
22 void* (*alloc_coherent)(struct device *dev, size_t size,
23 dma_addr_t *dma_handle, gfp_t gfp);
24 void (*free_coherent)(struct device *dev, size_t size,
25 void *vaddr, dma_addr_t dma_handle);
26 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
27 size_t size, int direction);
28 void (*unmap_single)(struct device *dev, dma_addr_t addr,
29 size_t size, int direction);
30 void (*sync_single_for_cpu)(struct device *hwdev,
31 dma_addr_t dma_handle, size_t size,
32 int direction);
33 void (*sync_single_for_device)(struct device *hwdev,
34 dma_addr_t dma_handle, size_t size,
35 int direction);
36 void (*sync_single_range_for_cpu)(struct device *hwdev,
37 dma_addr_t dma_handle, unsigned long offset,
38 size_t size, int direction);
39 void (*sync_single_range_for_device)(struct device *hwdev,
40 dma_addr_t dma_handle, unsigned long offset,
41 size_t size, int direction);
42 void (*sync_sg_for_cpu)(struct device *hwdev,
43 struct scatterlist *sg, int nelems,
44 int direction);
45 void (*sync_sg_for_device)(struct device *hwdev,
46 struct scatterlist *sg, int nelems,
47 int direction);
48 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
49 int nents, int direction);
50 void (*unmap_sg)(struct device *hwdev,
51 struct scatterlist *sg, int nents,
52 int direction);
53 int (*dma_supported)(struct device *hwdev, u64 mask);
54 int is_phys;
55 };
56
57 extern struct dma_mapping_ops *dma_ops;
58
get_dma_ops(struct device * dev)59 static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
60 {
61 #ifdef CONFIG_X86_32
62 return dma_ops;
63 #else
64 if (unlikely(!dev) || !dev->archdata.dma_ops)
65 return dma_ops;
66 else
67 return dev->archdata.dma_ops;
68 #endif
69 }
70
71 /* Make sure we keep the same behaviour */
dma_mapping_error(struct device * dev,dma_addr_t dma_addr)72 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
73 {
74 struct dma_mapping_ops *ops = get_dma_ops(dev);
75 if (ops->mapping_error)
76 return ops->mapping_error(dev, dma_addr);
77
78 return (dma_addr == bad_dma_address);
79 }
80
81 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
82 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
83 #define dma_is_consistent(d, h) (1)
84
85 extern int dma_supported(struct device *hwdev, u64 mask);
86 extern int dma_set_mask(struct device *dev, u64 mask);
87
88 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
89 dma_addr_t *dma_addr, gfp_t flag);
90
91 static inline dma_addr_t
dma_map_single(struct device * hwdev,void * ptr,size_t size,int direction)92 dma_map_single(struct device *hwdev, void *ptr, size_t size,
93 int direction)
94 {
95 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
96
97 BUG_ON(!valid_dma_direction(direction));
98 return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
99 }
100
101 static inline void
dma_unmap_single(struct device * dev,dma_addr_t addr,size_t size,int direction)102 dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
103 int direction)
104 {
105 struct dma_mapping_ops *ops = get_dma_ops(dev);
106
107 BUG_ON(!valid_dma_direction(direction));
108 if (ops->unmap_single)
109 ops->unmap_single(dev, addr, size, direction);
110 }
111
112 static inline int
dma_map_sg(struct device * hwdev,struct scatterlist * sg,int nents,int direction)113 dma_map_sg(struct device *hwdev, struct scatterlist *sg,
114 int nents, int direction)
115 {
116 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
117
118 BUG_ON(!valid_dma_direction(direction));
119 return ops->map_sg(hwdev, sg, nents, direction);
120 }
121
122 static inline void
dma_unmap_sg(struct device * hwdev,struct scatterlist * sg,int nents,int direction)123 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
124 int direction)
125 {
126 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
127
128 BUG_ON(!valid_dma_direction(direction));
129 if (ops->unmap_sg)
130 ops->unmap_sg(hwdev, sg, nents, direction);
131 }
132
133 static inline void
dma_sync_single_for_cpu(struct device * hwdev,dma_addr_t dma_handle,size_t size,int direction)134 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
135 size_t size, int direction)
136 {
137 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
138
139 BUG_ON(!valid_dma_direction(direction));
140 if (ops->sync_single_for_cpu)
141 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
142 flush_write_buffers();
143 }
144
145 static inline void
dma_sync_single_for_device(struct device * hwdev,dma_addr_t dma_handle,size_t size,int direction)146 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
147 size_t size, int direction)
148 {
149 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
150
151 BUG_ON(!valid_dma_direction(direction));
152 if (ops->sync_single_for_device)
153 ops->sync_single_for_device(hwdev, dma_handle, size, direction);
154 flush_write_buffers();
155 }
156
157 static inline void
dma_sync_single_range_for_cpu(struct device * hwdev,dma_addr_t dma_handle,unsigned long offset,size_t size,int direction)158 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
159 unsigned long offset, size_t size, int direction)
160 {
161 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
162
163 BUG_ON(!valid_dma_direction(direction));
164 if (ops->sync_single_range_for_cpu)
165 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
166 size, direction);
167 flush_write_buffers();
168 }
169
170 static inline void
dma_sync_single_range_for_device(struct device * hwdev,dma_addr_t dma_handle,unsigned long offset,size_t size,int direction)171 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
172 unsigned long offset, size_t size,
173 int direction)
174 {
175 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
176
177 BUG_ON(!valid_dma_direction(direction));
178 if (ops->sync_single_range_for_device)
179 ops->sync_single_range_for_device(hwdev, dma_handle,
180 offset, size, direction);
181 flush_write_buffers();
182 }
183
184 static inline void
dma_sync_sg_for_cpu(struct device * hwdev,struct scatterlist * sg,int nelems,int direction)185 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
186 int nelems, int direction)
187 {
188 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
189
190 BUG_ON(!valid_dma_direction(direction));
191 if (ops->sync_sg_for_cpu)
192 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
193 flush_write_buffers();
194 }
195
196 static inline void
dma_sync_sg_for_device(struct device * hwdev,struct scatterlist * sg,int nelems,int direction)197 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
198 int nelems, int direction)
199 {
200 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
201
202 BUG_ON(!valid_dma_direction(direction));
203 if (ops->sync_sg_for_device)
204 ops->sync_sg_for_device(hwdev, sg, nelems, direction);
205
206 flush_write_buffers();
207 }
208
dma_map_page(struct device * dev,struct page * page,size_t offset,size_t size,int direction)209 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
210 size_t offset, size_t size,
211 int direction)
212 {
213 struct dma_mapping_ops *ops = get_dma_ops(dev);
214
215 BUG_ON(!valid_dma_direction(direction));
216 return ops->map_single(dev, page_to_phys(page) + offset,
217 size, direction);
218 }
219
dma_unmap_page(struct device * dev,dma_addr_t addr,size_t size,int direction)220 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
221 size_t size, int direction)
222 {
223 dma_unmap_single(dev, addr, size, direction);
224 }
225
226 static inline void
dma_cache_sync(struct device * dev,void * vaddr,size_t size,enum dma_data_direction dir)227 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
228 enum dma_data_direction dir)
229 {
230 flush_write_buffers();
231 }
232
dma_get_cache_alignment(void)233 static inline int dma_get_cache_alignment(void)
234 {
235 /* no easy way to get cache size on all x86, so return the
236 * maximum possible, to be safe */
237 return boot_cpu_data.x86_clflush_size;
238 }
239
dma_alloc_coherent_mask(struct device * dev,gfp_t gfp)240 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
241 gfp_t gfp)
242 {
243 unsigned long dma_mask = 0;
244
245 dma_mask = dev->coherent_dma_mask;
246 if (!dma_mask)
247 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
248
249 return dma_mask;
250 }
251
dma_alloc_coherent_gfp_flags(struct device * dev,gfp_t gfp)252 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
253 {
254 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
255
256 if (dma_mask <= DMA_24BIT_MASK)
257 gfp |= GFP_DMA;
258 #ifdef CONFIG_X86_64
259 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
260 gfp |= GFP_DMA32;
261 #endif
262 return gfp;
263 }
264
265 static inline void *
dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)266 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
267 gfp_t gfp)
268 {
269 struct dma_mapping_ops *ops = get_dma_ops(dev);
270 void *memory;
271
272 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
273
274 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
275 return memory;
276
277 if (!dev) {
278 dev = &x86_dma_fallback_dev;
279 gfp |= GFP_DMA;
280 }
281
282 if (!is_device_dma_capable(dev))
283 return NULL;
284
285 if (!ops->alloc_coherent)
286 return NULL;
287
288 return ops->alloc_coherent(dev, size, dma_handle,
289 dma_alloc_coherent_gfp_flags(dev, gfp));
290 }
291
dma_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t bus)292 static inline void dma_free_coherent(struct device *dev, size_t size,
293 void *vaddr, dma_addr_t bus)
294 {
295 struct dma_mapping_ops *ops = get_dma_ops(dev);
296
297 WARN_ON(irqs_disabled()); /* for portability */
298
299 if (dma_release_from_coherent(dev, get_order(size), vaddr))
300 return;
301
302 if (ops->free_coherent)
303 ops->free_coherent(dev, size, vaddr, bus);
304 }
305
306 #endif
307