1 #ifndef ASMARM_DMA_MAPPING_H
2 #define ASMARM_DMA_MAPPING_H
3
4 #ifdef __KERNEL__
5
6 #include <linux/mm_types.h>
7 #include <linux/scatterlist.h>
8 #include <linux/dma-attrs.h>
9 #include <linux/dma-debug.h>
10
11 #include <asm/memory.h>
12
13 #include <xen/xen.h>
14 #include <asm/xen/hypervisor.h>
15
16 #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
17 extern struct dma_map_ops arm_dma_ops;
18 extern struct dma_map_ops arm_coherent_dma_ops;
19
__generic_dma_ops(struct device * dev)20 static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
21 {
22 if (dev && dev->archdata.dma_ops)
23 return dev->archdata.dma_ops;
24 return &arm_dma_ops;
25 }
26
get_dma_ops(struct device * dev)27 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
28 {
29 if (xen_initial_domain())
30 return xen_dma_ops;
31 else
32 return __generic_dma_ops(dev);
33 }
34
set_dma_ops(struct device * dev,struct dma_map_ops * ops)35 static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
36 {
37 BUG_ON(!dev);
38 dev->archdata.dma_ops = ops;
39 }
40
41 #define HAVE_ARCH_DMA_SUPPORTED 1
42 extern int dma_supported(struct device *dev, u64 mask);
43
44 /*
45 * Note that while the generic code provides dummy dma_{alloc,free}_noncoherent
46 * implementations, we don't provide a dma_cache_sync function so drivers using
47 * this API are highlighted with build warnings.
48 */
49 #include <asm-generic/dma-mapping-common.h>
50
51 #ifdef __arch_page_to_dma
52 #error Please update to __arch_pfn_to_dma
53 #endif
54
55 /*
56 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
57 * functions used internally by the DMA-mapping API to provide DMA
58 * addresses. They must not be used by drivers.
59 */
60 #ifndef __arch_pfn_to_dma
pfn_to_dma(struct device * dev,unsigned long pfn)61 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
62 {
63 if (dev)
64 pfn -= dev->dma_pfn_offset;
65 return (dma_addr_t)__pfn_to_bus(pfn);
66 }
67
dma_to_pfn(struct device * dev,dma_addr_t addr)68 static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
69 {
70 unsigned long pfn = __bus_to_pfn(addr);
71
72 if (dev)
73 pfn += dev->dma_pfn_offset;
74
75 return pfn;
76 }
77
dma_to_virt(struct device * dev,dma_addr_t addr)78 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
79 {
80 if (dev) {
81 unsigned long pfn = dma_to_pfn(dev, addr);
82
83 return phys_to_virt(__pfn_to_phys(pfn));
84 }
85
86 return (void *)__bus_to_virt((unsigned long)addr);
87 }
88
virt_to_dma(struct device * dev,void * addr)89 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
90 {
91 if (dev)
92 return pfn_to_dma(dev, virt_to_pfn(addr));
93
94 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
95 }
96
97 #else
pfn_to_dma(struct device * dev,unsigned long pfn)98 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
99 {
100 return __arch_pfn_to_dma(dev, pfn);
101 }
102
dma_to_pfn(struct device * dev,dma_addr_t addr)103 static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
104 {
105 return __arch_dma_to_pfn(dev, addr);
106 }
107
dma_to_virt(struct device * dev,dma_addr_t addr)108 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
109 {
110 return __arch_dma_to_virt(dev, addr);
111 }
112
virt_to_dma(struct device * dev,void * addr)113 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
114 {
115 return __arch_virt_to_dma(dev, addr);
116 }
117 #endif
118
119 /* The ARM override for dma_max_pfn() */
dma_max_pfn(struct device * dev)120 static inline unsigned long dma_max_pfn(struct device *dev)
121 {
122 return dma_to_pfn(dev, *dev->dma_mask);
123 }
124 #define dma_max_pfn(dev) dma_max_pfn(dev)
125
126 #define arch_setup_dma_ops arch_setup_dma_ops
127 extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
128 struct iommu_ops *iommu, bool coherent);
129
130 #define arch_teardown_dma_ops arch_teardown_dma_ops
131 extern void arch_teardown_dma_ops(struct device *dev);
132
133 /* do not use this function in a driver */
is_device_dma_coherent(struct device * dev)134 static inline bool is_device_dma_coherent(struct device *dev)
135 {
136 return dev->archdata.dma_coherent;
137 }
138
phys_to_dma(struct device * dev,phys_addr_t paddr)139 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
140 {
141 unsigned int offset = paddr & ~PAGE_MASK;
142 return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
143 }
144
dma_to_phys(struct device * dev,dma_addr_t dev_addr)145 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
146 {
147 unsigned int offset = dev_addr & ~PAGE_MASK;
148 return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
149 }
150
dma_capable(struct device * dev,dma_addr_t addr,size_t size)151 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
152 {
153 u64 limit, mask;
154
155 if (!dev->dma_mask)
156 return 0;
157
158 mask = *dev->dma_mask;
159
160 limit = (mask + 1) & ~mask;
161 if (limit && size > limit)
162 return 0;
163
164 if ((addr | (addr + size - 1)) & ~mask)
165 return 0;
166
167 return 1;
168 }
169
dma_mark_clean(void * addr,size_t size)170 static inline void dma_mark_clean(void *addr, size_t size) { }
171
172 extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
173
174 /**
175 * arm_dma_alloc - allocate consistent memory for DMA
176 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
177 * @size: required memory size
178 * @handle: bus-specific DMA address
179 * @attrs: optinal attributes that specific mapping properties
180 *
181 * Allocate some memory for a device for performing DMA. This function
182 * allocates pages, and will return the CPU-viewed address, and sets @handle
183 * to be the device-viewed address.
184 */
185 extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
186 gfp_t gfp, struct dma_attrs *attrs);
187
188 /**
189 * arm_dma_free - free memory allocated by arm_dma_alloc
190 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
191 * @size: size of memory originally requested in dma_alloc_coherent
192 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
193 * @handle: device-view address returned from dma_alloc_coherent
194 * @attrs: optinal attributes that specific mapping properties
195 *
196 * Free (and unmap) a DMA buffer previously allocated by
197 * arm_dma_alloc().
198 *
199 * References to memory and mappings associated with cpu_addr/handle
200 * during and after this call executing are illegal.
201 */
202 extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
203 dma_addr_t handle, struct dma_attrs *attrs);
204
205 /**
206 * arm_dma_mmap - map a coherent DMA allocation into user space
207 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
208 * @vma: vm_area_struct describing requested user mapping
209 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
210 * @handle: device-view address returned from dma_alloc_coherent
211 * @size: size of memory originally requested in dma_alloc_coherent
212 * @attrs: optinal attributes that specific mapping properties
213 *
214 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
215 * into user space. The coherent DMA buffer must not be freed by the
216 * driver until the user space mapping has been released.
217 */
218 extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
219 void *cpu_addr, dma_addr_t dma_addr, size_t size,
220 struct dma_attrs *attrs);
221
222 /*
223 * This can be called during early boot to increase the size of the atomic
224 * coherent DMA pool above the default value of 256KiB. It must be called
225 * before postcore_initcall.
226 */
227 extern void __init init_dma_coherent_pool_size(unsigned long size);
228
229 /*
230 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
231 * and utilize bounce buffers as needed to work around limited DMA windows.
232 *
233 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
234 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
235 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
236 *
237 * The following are helper functions used by the dmabounce subystem
238 *
239 */
240
241 /**
242 * dmabounce_register_dev
243 *
244 * @dev: valid struct device pointer
245 * @small_buf_size: size of buffers to use with small buffer pool
246 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
247 * @needs_bounce_fn: called to determine whether buffer needs bouncing
248 *
249 * This function should be called by low-level platform code to register
250 * a device as requireing DMA buffer bouncing. The function will allocate
251 * appropriate DMA pools for the device.
252 */
253 extern int dmabounce_register_dev(struct device *, unsigned long,
254 unsigned long, int (*)(struct device *, dma_addr_t, size_t));
255
256 /**
257 * dmabounce_unregister_dev
258 *
259 * @dev: valid struct device pointer
260 *
261 * This function should be called by low-level platform code when device
262 * that was previously registered with dmabounce_register_dev is removed
263 * from the system.
264 *
265 */
266 extern void dmabounce_unregister_dev(struct device *);
267
268
269
270 /*
271 * The scatter list versions of the above methods.
272 */
273 extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
274 enum dma_data_direction, struct dma_attrs *attrs);
275 extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
276 enum dma_data_direction, struct dma_attrs *attrs);
277 extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
278 enum dma_data_direction);
279 extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
280 enum dma_data_direction);
281 extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
282 void *cpu_addr, dma_addr_t dma_addr, size_t size,
283 struct dma_attrs *attrs);
284
285 #endif /* __KERNEL__ */
286 #endif
287