1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * arch-independent dma-mapping routines
4 *
5 * Copyright (c) 2006 SUSE Linux Products GmbH
6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
7 */
8 #include <linux/memblock.h> /* for max_pfn */
9 #include <linux/acpi.h>
10 #include <linux/dma-map-ops.h>
11 #include <linux/export.h>
12 #include <linux/gfp.h>
13 #include <linux/of_device.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include "debug.h"
17 #include "direct.h"
18
19 bool dma_default_coherent;
20
21 /*
22 * Managed DMA API
23 */
24 struct dma_devres {
25 size_t size;
26 void *vaddr;
27 dma_addr_t dma_handle;
28 unsigned long attrs;
29 };
30
dmam_release(struct device * dev,void * res)31 static void dmam_release(struct device *dev, void *res)
32 {
33 struct dma_devres *this = res;
34
35 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
36 this->attrs);
37 }
38
dmam_match(struct device * dev,void * res,void * match_data)39 static int dmam_match(struct device *dev, void *res, void *match_data)
40 {
41 struct dma_devres *this = res, *match = match_data;
42
43 if (this->vaddr == match->vaddr) {
44 WARN_ON(this->size != match->size ||
45 this->dma_handle != match->dma_handle);
46 return 1;
47 }
48 return 0;
49 }
50
51 /**
52 * dmam_free_coherent - Managed dma_free_coherent()
53 * @dev: Device to free coherent memory for
54 * @size: Size of allocation
55 * @vaddr: Virtual address of the memory to free
56 * @dma_handle: DMA handle of the memory to free
57 *
58 * Managed dma_free_coherent().
59 */
dmam_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle)60 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
61 dma_addr_t dma_handle)
62 {
63 struct dma_devres match_data = { size, vaddr, dma_handle };
64
65 dma_free_coherent(dev, size, vaddr, dma_handle);
66 WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
67 }
68 EXPORT_SYMBOL(dmam_free_coherent);
69
70 /**
71 * dmam_alloc_attrs - Managed dma_alloc_attrs()
72 * @dev: Device to allocate non_coherent memory for
73 * @size: Size of allocation
74 * @dma_handle: Out argument for allocated DMA handle
75 * @gfp: Allocation flags
76 * @attrs: Flags in the DMA_ATTR_* namespace.
77 *
78 * Managed dma_alloc_attrs(). Memory allocated using this function will be
79 * automatically released on driver detach.
80 *
81 * RETURNS:
82 * Pointer to allocated memory on success, NULL on failure.
83 */
dmam_alloc_attrs(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)84 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
85 gfp_t gfp, unsigned long attrs)
86 {
87 struct dma_devres *dr;
88 void *vaddr;
89
90 dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
91 if (!dr)
92 return NULL;
93
94 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
95 if (!vaddr) {
96 devres_free(dr);
97 return NULL;
98 }
99
100 dr->vaddr = vaddr;
101 dr->dma_handle = *dma_handle;
102 dr->size = size;
103 dr->attrs = attrs;
104
105 devres_add(dev, dr);
106
107 return vaddr;
108 }
109 EXPORT_SYMBOL(dmam_alloc_attrs);
110
dma_go_direct(struct device * dev,dma_addr_t mask,const struct dma_map_ops * ops)111 static bool dma_go_direct(struct device *dev, dma_addr_t mask,
112 const struct dma_map_ops *ops)
113 {
114 if (likely(!ops))
115 return true;
116 #ifdef CONFIG_DMA_OPS_BYPASS
117 if (dev->dma_ops_bypass)
118 return min_not_zero(mask, dev->bus_dma_limit) >=
119 dma_direct_get_required_mask(dev);
120 #endif
121 return false;
122 }
123
124
125 /*
126 * Check if the devices uses a direct mapping for streaming DMA operations.
127 * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
128 * enough.
129 */
dma_alloc_direct(struct device * dev,const struct dma_map_ops * ops)130 static inline bool dma_alloc_direct(struct device *dev,
131 const struct dma_map_ops *ops)
132 {
133 return dma_go_direct(dev, dev->coherent_dma_mask, ops);
134 }
135
dma_map_direct(struct device * dev,const struct dma_map_ops * ops)136 static inline bool dma_map_direct(struct device *dev,
137 const struct dma_map_ops *ops)
138 {
139 return dma_go_direct(dev, *dev->dma_mask, ops);
140 }
141
dma_map_page_attrs(struct device * dev,struct page * page,size_t offset,size_t size,enum dma_data_direction dir,unsigned long attrs)142 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
143 size_t offset, size_t size, enum dma_data_direction dir,
144 unsigned long attrs)
145 {
146 const struct dma_map_ops *ops = get_dma_ops(dev);
147 dma_addr_t addr;
148
149 BUG_ON(!valid_dma_direction(dir));
150
151 if (WARN_ON_ONCE(!dev->dma_mask))
152 return DMA_MAPPING_ERROR;
153
154 if (dma_map_direct(dev, ops) ||
155 arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size))
156 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
157 else
158 addr = ops->map_page(dev, page, offset, size, dir, attrs);
159 debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
160
161 return addr;
162 }
163 EXPORT_SYMBOL(dma_map_page_attrs);
164
dma_unmap_page_attrs(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)165 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
166 enum dma_data_direction dir, unsigned long attrs)
167 {
168 const struct dma_map_ops *ops = get_dma_ops(dev);
169
170 BUG_ON(!valid_dma_direction(dir));
171 if (dma_map_direct(dev, ops) ||
172 arch_dma_unmap_page_direct(dev, addr + size))
173 dma_direct_unmap_page(dev, addr, size, dir, attrs);
174 else if (ops->unmap_page)
175 ops->unmap_page(dev, addr, size, dir, attrs);
176 debug_dma_unmap_page(dev, addr, size, dir);
177 }
178 EXPORT_SYMBOL(dma_unmap_page_attrs);
179
__dma_map_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)180 static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
181 int nents, enum dma_data_direction dir, unsigned long attrs)
182 {
183 const struct dma_map_ops *ops = get_dma_ops(dev);
184 int ents;
185
186 BUG_ON(!valid_dma_direction(dir));
187
188 if (WARN_ON_ONCE(!dev->dma_mask))
189 return 0;
190
191 if (dma_map_direct(dev, ops) ||
192 arch_dma_map_sg_direct(dev, sg, nents))
193 ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
194 else
195 ents = ops->map_sg(dev, sg, nents, dir, attrs);
196
197 if (ents > 0)
198 debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
199 else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
200 ents != -EIO))
201 return -EIO;
202
203 return ents;
204 }
205
206 /**
207 * dma_map_sg_attrs - Map the given buffer for DMA
208 * @dev: The device for which to perform the DMA operation
209 * @sg: The sg_table object describing the buffer
210 * @nents: Number of entries to map
211 * @dir: DMA direction
212 * @attrs: Optional DMA attributes for the map operation
213 *
214 * Maps a buffer described by a scatterlist passed in the sg argument with
215 * nents segments for the @dir DMA operation by the @dev device.
216 *
217 * Returns the number of mapped entries (which can be less than nents)
218 * on success. Zero is returned for any error.
219 *
220 * dma_unmap_sg_attrs() should be used to unmap the buffer with the
221 * original sg and original nents (not the value returned by this funciton).
222 */
dma_map_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)223 unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
224 int nents, enum dma_data_direction dir, unsigned long attrs)
225 {
226 int ret;
227
228 ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs);
229 if (ret < 0)
230 return 0;
231 return ret;
232 }
233 EXPORT_SYMBOL(dma_map_sg_attrs);
234
235 /**
236 * dma_map_sgtable - Map the given buffer for DMA
237 * @dev: The device for which to perform the DMA operation
238 * @sgt: The sg_table object describing the buffer
239 * @dir: DMA direction
240 * @attrs: Optional DMA attributes for the map operation
241 *
242 * Maps a buffer described by a scatterlist stored in the given sg_table
243 * object for the @dir DMA operation by the @dev device. After success, the
244 * ownership for the buffer is transferred to the DMA domain. One has to
245 * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
246 * ownership of the buffer back to the CPU domain before touching the
247 * buffer by the CPU.
248 *
249 * Returns 0 on success or a negative error code on error. The following
250 * error codes are supported with the given meaning:
251 *
252 * -EINVAL An invalid argument, unaligned access or other error
253 * in usage. Will not succeed if retried.
254 * -ENOMEM Insufficient resources (like memory or IOVA space) to
255 * complete the mapping. Should succeed if retried later.
256 * -EIO Legacy error code with an unknown meaning. eg. this is
257 * returned if a lower level call returned DMA_MAPPING_ERROR.
258 */
dma_map_sgtable(struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)259 int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
260 enum dma_data_direction dir, unsigned long attrs)
261 {
262 int nents;
263
264 nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
265 if (nents < 0)
266 return nents;
267 sgt->nents = nents;
268 return 0;
269 }
270 EXPORT_SYMBOL_GPL(dma_map_sgtable);
271
dma_unmap_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)272 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
273 int nents, enum dma_data_direction dir,
274 unsigned long attrs)
275 {
276 const struct dma_map_ops *ops = get_dma_ops(dev);
277
278 BUG_ON(!valid_dma_direction(dir));
279 debug_dma_unmap_sg(dev, sg, nents, dir);
280 if (dma_map_direct(dev, ops) ||
281 arch_dma_unmap_sg_direct(dev, sg, nents))
282 dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
283 else if (ops->unmap_sg)
284 ops->unmap_sg(dev, sg, nents, dir, attrs);
285 }
286 EXPORT_SYMBOL(dma_unmap_sg_attrs);
287
dma_map_resource(struct device * dev,phys_addr_t phys_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)288 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
289 size_t size, enum dma_data_direction dir, unsigned long attrs)
290 {
291 const struct dma_map_ops *ops = get_dma_ops(dev);
292 dma_addr_t addr = DMA_MAPPING_ERROR;
293
294 BUG_ON(!valid_dma_direction(dir));
295
296 if (WARN_ON_ONCE(!dev->dma_mask))
297 return DMA_MAPPING_ERROR;
298
299 if (dma_map_direct(dev, ops))
300 addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
301 else if (ops->map_resource)
302 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
303
304 debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
305 return addr;
306 }
307 EXPORT_SYMBOL(dma_map_resource);
308
dma_unmap_resource(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)309 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
310 enum dma_data_direction dir, unsigned long attrs)
311 {
312 const struct dma_map_ops *ops = get_dma_ops(dev);
313
314 BUG_ON(!valid_dma_direction(dir));
315 if (!dma_map_direct(dev, ops) && ops->unmap_resource)
316 ops->unmap_resource(dev, addr, size, dir, attrs);
317 debug_dma_unmap_resource(dev, addr, size, dir);
318 }
319 EXPORT_SYMBOL(dma_unmap_resource);
320
dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)321 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
322 enum dma_data_direction dir)
323 {
324 const struct dma_map_ops *ops = get_dma_ops(dev);
325
326 BUG_ON(!valid_dma_direction(dir));
327 if (dma_map_direct(dev, ops))
328 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
329 else if (ops->sync_single_for_cpu)
330 ops->sync_single_for_cpu(dev, addr, size, dir);
331 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
332 }
333 EXPORT_SYMBOL(dma_sync_single_for_cpu);
334
dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)335 void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
336 size_t size, enum dma_data_direction dir)
337 {
338 const struct dma_map_ops *ops = get_dma_ops(dev);
339
340 BUG_ON(!valid_dma_direction(dir));
341 if (dma_map_direct(dev, ops))
342 dma_direct_sync_single_for_device(dev, addr, size, dir);
343 else if (ops->sync_single_for_device)
344 ops->sync_single_for_device(dev, addr, size, dir);
345 debug_dma_sync_single_for_device(dev, addr, size, dir);
346 }
347 EXPORT_SYMBOL(dma_sync_single_for_device);
348
dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)349 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
350 int nelems, enum dma_data_direction dir)
351 {
352 const struct dma_map_ops *ops = get_dma_ops(dev);
353
354 BUG_ON(!valid_dma_direction(dir));
355 if (dma_map_direct(dev, ops))
356 dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
357 else if (ops->sync_sg_for_cpu)
358 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
359 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
360 }
361 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
362
dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)363 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
364 int nelems, enum dma_data_direction dir)
365 {
366 const struct dma_map_ops *ops = get_dma_ops(dev);
367
368 BUG_ON(!valid_dma_direction(dir));
369 if (dma_map_direct(dev, ops))
370 dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
371 else if (ops->sync_sg_for_device)
372 ops->sync_sg_for_device(dev, sg, nelems, dir);
373 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
374 }
375 EXPORT_SYMBOL(dma_sync_sg_for_device);
376
377 /*
378 * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
379 * that the intention is to allow exporting memory allocated via the
380 * coherent DMA APIs through the dma_buf API, which only accepts a
381 * scattertable. This presents a couple of problems:
382 * 1. Not all memory allocated via the coherent DMA APIs is backed by
383 * a struct page
384 * 2. Passing coherent DMA memory into the streaming APIs is not allowed
385 * as we will try to flush the memory through a different alias to that
386 * actually being used (and the flushes are redundant.)
387 */
dma_get_sgtable_attrs(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)388 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
389 void *cpu_addr, dma_addr_t dma_addr, size_t size,
390 unsigned long attrs)
391 {
392 const struct dma_map_ops *ops = get_dma_ops(dev);
393
394 if (dma_alloc_direct(dev, ops))
395 return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
396 size, attrs);
397 if (!ops->get_sgtable)
398 return -ENXIO;
399 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
400 }
401 EXPORT_SYMBOL(dma_get_sgtable_attrs);
402
403 #ifdef CONFIG_MMU
404 /*
405 * Return the page attributes used for mapping dma_alloc_* memory, either in
406 * kernel space if remapping is needed, or to userspace through dma_mmap_*.
407 */
dma_pgprot(struct device * dev,pgprot_t prot,unsigned long attrs)408 pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
409 {
410 if (force_dma_unencrypted(dev))
411 prot = pgprot_decrypted(prot);
412 if (dev_is_dma_coherent(dev))
413 return prot;
414 #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
415 if (attrs & DMA_ATTR_WRITE_COMBINE)
416 return pgprot_writecombine(prot);
417 #endif
418 if (attrs & DMA_ATTR_SYS_CACHE_ONLY ||
419 attrs & DMA_ATTR_SYS_CACHE_ONLY_NWA)
420 return pgprot_syscached(prot);
421 return pgprot_dmacoherent(prot);
422 }
423 #endif /* CONFIG_MMU */
424
425 /**
426 * dma_can_mmap - check if a given device supports dma_mmap_*
427 * @dev: device to check
428 *
429 * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
430 * map DMA allocations to userspace.
431 */
dma_can_mmap(struct device * dev)432 bool dma_can_mmap(struct device *dev)
433 {
434 const struct dma_map_ops *ops = get_dma_ops(dev);
435
436 if (dma_alloc_direct(dev, ops))
437 return dma_direct_can_mmap(dev);
438 return ops->mmap != NULL;
439 }
440 EXPORT_SYMBOL_GPL(dma_can_mmap);
441
442 /**
443 * dma_mmap_attrs - map a coherent DMA allocation into user space
444 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
445 * @vma: vm_area_struct describing requested user mapping
446 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
447 * @dma_addr: device-view address returned from dma_alloc_attrs
448 * @size: size of memory originally requested in dma_alloc_attrs
449 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
450 *
451 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
452 * space. The coherent DMA buffer must not be freed by the driver until the
453 * user space mapping has been released.
454 */
dma_mmap_attrs(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)455 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
456 void *cpu_addr, dma_addr_t dma_addr, size_t size,
457 unsigned long attrs)
458 {
459 const struct dma_map_ops *ops = get_dma_ops(dev);
460
461 if (dma_alloc_direct(dev, ops))
462 return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
463 attrs);
464 if (!ops->mmap)
465 return -ENXIO;
466 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
467 }
468 EXPORT_SYMBOL(dma_mmap_attrs);
469
dma_get_required_mask(struct device * dev)470 u64 dma_get_required_mask(struct device *dev)
471 {
472 const struct dma_map_ops *ops = get_dma_ops(dev);
473
474 if (dma_alloc_direct(dev, ops))
475 return dma_direct_get_required_mask(dev);
476 if (ops->get_required_mask)
477 return ops->get_required_mask(dev);
478
479 /*
480 * We require every DMA ops implementation to at least support a 32-bit
481 * DMA mask (and use bounce buffering if that isn't supported in
482 * hardware). As the direct mapping code has its own routine to
483 * actually report an optimal mask we default to 32-bit here as that
484 * is the right thing for most IOMMUs, and at least not actively
485 * harmful in general.
486 */
487 return DMA_BIT_MASK(32);
488 }
489 EXPORT_SYMBOL_GPL(dma_get_required_mask);
490
dma_alloc_attrs(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag,unsigned long attrs)491 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
492 gfp_t flag, unsigned long attrs)
493 {
494 const struct dma_map_ops *ops = get_dma_ops(dev);
495 void *cpu_addr;
496
497 WARN_ON_ONCE(!dev->coherent_dma_mask);
498
499 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
500 return cpu_addr;
501
502 /* let the implementation decide on the zone to allocate from: */
503 flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
504
505 if (dma_alloc_direct(dev, ops))
506 cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
507 else if (ops->alloc)
508 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
509 else
510 return NULL;
511
512 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
513 return cpu_addr;
514 }
515 EXPORT_SYMBOL(dma_alloc_attrs);
516
dma_free_attrs(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle,unsigned long attrs)517 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
518 dma_addr_t dma_handle, unsigned long attrs)
519 {
520 const struct dma_map_ops *ops = get_dma_ops(dev);
521
522 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
523 return;
524 /*
525 * On non-coherent platforms which implement DMA-coherent buffers via
526 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
527 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
528 * sleep on some machines, and b) an indication that the driver is
529 * probably misusing the coherent API anyway.
530 */
531 WARN_ON(irqs_disabled());
532
533 if (!cpu_addr)
534 return;
535
536 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
537 if (dma_alloc_direct(dev, ops))
538 dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
539 else if (ops->free)
540 ops->free(dev, size, cpu_addr, dma_handle, attrs);
541 }
542 EXPORT_SYMBOL(dma_free_attrs);
543
__dma_alloc_pages(struct device * dev,size_t size,dma_addr_t * dma_handle,enum dma_data_direction dir,gfp_t gfp)544 static struct page *__dma_alloc_pages(struct device *dev, size_t size,
545 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
546 {
547 const struct dma_map_ops *ops = get_dma_ops(dev);
548
549 if (WARN_ON_ONCE(!dev->coherent_dma_mask))
550 return NULL;
551 if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)))
552 return NULL;
553
554 size = PAGE_ALIGN(size);
555 if (dma_alloc_direct(dev, ops))
556 return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
557 if (!ops->alloc_pages)
558 return NULL;
559 return ops->alloc_pages(dev, size, dma_handle, dir, gfp);
560 }
561
dma_alloc_pages(struct device * dev,size_t size,dma_addr_t * dma_handle,enum dma_data_direction dir,gfp_t gfp)562 struct page *dma_alloc_pages(struct device *dev, size_t size,
563 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
564 {
565 struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
566
567 if (page)
568 debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
569 return page;
570 }
571 EXPORT_SYMBOL_GPL(dma_alloc_pages);
572
__dma_free_pages(struct device * dev,size_t size,struct page * page,dma_addr_t dma_handle,enum dma_data_direction dir)573 static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
574 dma_addr_t dma_handle, enum dma_data_direction dir)
575 {
576 const struct dma_map_ops *ops = get_dma_ops(dev);
577
578 size = PAGE_ALIGN(size);
579 if (dma_alloc_direct(dev, ops))
580 dma_direct_free_pages(dev, size, page, dma_handle, dir);
581 else if (ops->free_pages)
582 ops->free_pages(dev, size, page, dma_handle, dir);
583 }
584
dma_free_pages(struct device * dev,size_t size,struct page * page,dma_addr_t dma_handle,enum dma_data_direction dir)585 void dma_free_pages(struct device *dev, size_t size, struct page *page,
586 dma_addr_t dma_handle, enum dma_data_direction dir)
587 {
588 debug_dma_unmap_page(dev, dma_handle, size, dir);
589 __dma_free_pages(dev, size, page, dma_handle, dir);
590 }
591 EXPORT_SYMBOL_GPL(dma_free_pages);
592
dma_mmap_pages(struct device * dev,struct vm_area_struct * vma,size_t size,struct page * page)593 int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
594 size_t size, struct page *page)
595 {
596 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
597
598 if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
599 return -ENXIO;
600 return remap_pfn_range(vma, vma->vm_start,
601 page_to_pfn(page) + vma->vm_pgoff,
602 vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot);
603 }
604 EXPORT_SYMBOL_GPL(dma_mmap_pages);
605
alloc_single_sgt(struct device * dev,size_t size,enum dma_data_direction dir,gfp_t gfp)606 static struct sg_table *alloc_single_sgt(struct device *dev, size_t size,
607 enum dma_data_direction dir, gfp_t gfp)
608 {
609 struct sg_table *sgt;
610 struct page *page;
611
612 sgt = kmalloc(sizeof(*sgt), gfp);
613 if (!sgt)
614 return NULL;
615 if (sg_alloc_table(sgt, 1, gfp))
616 goto out_free_sgt;
617 page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp);
618 if (!page)
619 goto out_free_table;
620 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
621 sg_dma_len(sgt->sgl) = sgt->sgl->length;
622 return sgt;
623 out_free_table:
624 sg_free_table(sgt);
625 out_free_sgt:
626 kfree(sgt);
627 return NULL;
628 }
629
dma_alloc_noncontiguous(struct device * dev,size_t size,enum dma_data_direction dir,gfp_t gfp,unsigned long attrs)630 struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
631 enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
632 {
633 const struct dma_map_ops *ops = get_dma_ops(dev);
634 struct sg_table *sgt;
635
636 if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES))
637 return NULL;
638
639 if (ops && ops->alloc_noncontiguous)
640 sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs);
641 else
642 sgt = alloc_single_sgt(dev, size, dir, gfp);
643
644 if (sgt) {
645 sgt->nents = 1;
646 debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
647 }
648 return sgt;
649 }
650 EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous);
651
free_single_sgt(struct device * dev,size_t size,struct sg_table * sgt,enum dma_data_direction dir)652 static void free_single_sgt(struct device *dev, size_t size,
653 struct sg_table *sgt, enum dma_data_direction dir)
654 {
655 __dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address,
656 dir);
657 sg_free_table(sgt);
658 kfree(sgt);
659 }
660
dma_free_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt,enum dma_data_direction dir)661 void dma_free_noncontiguous(struct device *dev, size_t size,
662 struct sg_table *sgt, enum dma_data_direction dir)
663 {
664 const struct dma_map_ops *ops = get_dma_ops(dev);
665
666 debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
667 if (ops && ops->free_noncontiguous)
668 ops->free_noncontiguous(dev, size, sgt, dir);
669 else
670 free_single_sgt(dev, size, sgt, dir);
671 }
672 EXPORT_SYMBOL_GPL(dma_free_noncontiguous);
673
dma_vmap_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt)674 void *dma_vmap_noncontiguous(struct device *dev, size_t size,
675 struct sg_table *sgt)
676 {
677 const struct dma_map_ops *ops = get_dma_ops(dev);
678 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
679
680 if (ops && ops->alloc_noncontiguous)
681 return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
682 return page_address(sg_page(sgt->sgl));
683 }
684 EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous);
685
dma_vunmap_noncontiguous(struct device * dev,void * vaddr)686 void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
687 {
688 const struct dma_map_ops *ops = get_dma_ops(dev);
689
690 if (ops && ops->alloc_noncontiguous)
691 vunmap(vaddr);
692 }
693 EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous);
694
dma_mmap_noncontiguous(struct device * dev,struct vm_area_struct * vma,size_t size,struct sg_table * sgt)695 int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
696 size_t size, struct sg_table *sgt)
697 {
698 const struct dma_map_ops *ops = get_dma_ops(dev);
699
700 if (ops && ops->alloc_noncontiguous) {
701 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
702
703 if (vma->vm_pgoff >= count ||
704 vma_pages(vma) > count - vma->vm_pgoff)
705 return -ENXIO;
706 return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
707 }
708 return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl));
709 }
710 EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
711
dma_supported(struct device * dev,u64 mask)712 int dma_supported(struct device *dev, u64 mask)
713 {
714 const struct dma_map_ops *ops = get_dma_ops(dev);
715
716 /*
717 * ->dma_supported sets the bypass flag, so we must always call
718 * into the method here unless the device is truly direct mapped.
719 */
720 if (!ops)
721 return dma_direct_supported(dev, mask);
722 if (!ops->dma_supported)
723 return 1;
724 return ops->dma_supported(dev, mask);
725 }
726 EXPORT_SYMBOL(dma_supported);
727
728 #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
729 void arch_dma_set_mask(struct device *dev, u64 mask);
730 #else
731 #define arch_dma_set_mask(dev, mask) do { } while (0)
732 #endif
733
dma_set_mask(struct device * dev,u64 mask)734 int dma_set_mask(struct device *dev, u64 mask)
735 {
736 /*
737 * Truncate the mask to the actually supported dma_addr_t width to
738 * avoid generating unsupportable addresses.
739 */
740 mask = (dma_addr_t)mask;
741
742 if (!dev->dma_mask || !dma_supported(dev, mask))
743 return -EIO;
744
745 arch_dma_set_mask(dev, mask);
746 *dev->dma_mask = mask;
747 return 0;
748 }
749 EXPORT_SYMBOL(dma_set_mask);
750
751 #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
dma_set_coherent_mask(struct device * dev,u64 mask)752 int dma_set_coherent_mask(struct device *dev, u64 mask)
753 {
754 /*
755 * Truncate the mask to the actually supported dma_addr_t width to
756 * avoid generating unsupportable addresses.
757 */
758 mask = (dma_addr_t)mask;
759
760 if (!dma_supported(dev, mask))
761 return -EIO;
762
763 dev->coherent_dma_mask = mask;
764 return 0;
765 }
766 EXPORT_SYMBOL(dma_set_coherent_mask);
767 #endif
768
dma_max_mapping_size(struct device * dev)769 size_t dma_max_mapping_size(struct device *dev)
770 {
771 const struct dma_map_ops *ops = get_dma_ops(dev);
772 size_t size = SIZE_MAX;
773
774 if (dma_map_direct(dev, ops))
775 size = dma_direct_max_mapping_size(dev);
776 else if (ops && ops->max_mapping_size)
777 size = ops->max_mapping_size(dev);
778
779 return size;
780 }
781 EXPORT_SYMBOL_GPL(dma_max_mapping_size);
782
dma_need_sync(struct device * dev,dma_addr_t dma_addr)783 bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
784 {
785 const struct dma_map_ops *ops = get_dma_ops(dev);
786
787 if (dma_map_direct(dev, ops))
788 return dma_direct_need_sync(dev, dma_addr);
789 return ops->sync_single_for_cpu || ops->sync_single_for_device;
790 }
791 EXPORT_SYMBOL_GPL(dma_need_sync);
792
dma_get_merge_boundary(struct device * dev)793 unsigned long dma_get_merge_boundary(struct device *dev)
794 {
795 const struct dma_map_ops *ops = get_dma_ops(dev);
796
797 if (!ops || !ops->get_merge_boundary)
798 return 0; /* can't merge */
799
800 return ops->get_merge_boundary(dev);
801 }
802 EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
803