• Home
  • Raw
  • Download

Lines Matching refs:dev

40 static void dmam_release(struct device *dev, void *res)  in dmam_release()  argument
44 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, in dmam_release()
48 static int dmam_match(struct device *dev, void *res, void *match_data) in dmam_match() argument
69 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, in dmam_free_coherent() argument
74 WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); in dmam_free_coherent()
75 dma_free_coherent(dev, size, vaddr, dma_handle); in dmam_free_coherent()
93 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, in dmam_alloc_attrs() argument
103 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); in dmam_alloc_attrs()
114 devres_add(dev, dr); in dmam_alloc_attrs()
120 static bool dma_go_direct(struct device *dev, dma_addr_t mask, in dma_go_direct() argument
123 if (use_dma_iommu(dev)) in dma_go_direct()
130 if (dev->dma_ops_bypass) in dma_go_direct()
131 return min_not_zero(mask, dev->bus_dma_limit) >= in dma_go_direct()
132 dma_direct_get_required_mask(dev); in dma_go_direct()
143 static inline bool dma_alloc_direct(struct device *dev, in dma_alloc_direct() argument
146 return dma_go_direct(dev, dev->coherent_dma_mask, ops); in dma_alloc_direct()
149 static inline bool dma_map_direct(struct device *dev, in dma_map_direct() argument
152 return dma_go_direct(dev, *dev->dma_mask, ops); in dma_map_direct()
155 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, in dma_map_page_attrs() argument
159 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_map_page_attrs()
164 if (WARN_ON_ONCE(!dev->dma_mask)) in dma_map_page_attrs()
167 if (dma_map_direct(dev, ops) || in dma_map_page_attrs()
168 arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size)) in dma_map_page_attrs()
169 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); in dma_map_page_attrs()
170 else if (use_dma_iommu(dev)) in dma_map_page_attrs()
171 addr = iommu_dma_map_page(dev, page, offset, size, dir, attrs); in dma_map_page_attrs()
173 addr = ops->map_page(dev, page, offset, size, dir, attrs); in dma_map_page_attrs()
175 trace_dma_map_page(dev, page_to_phys(page) + offset, addr, size, dir, in dma_map_page_attrs()
177 debug_dma_map_page(dev, page, offset, size, dir, addr, attrs); in dma_map_page_attrs()
183 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, in dma_unmap_page_attrs() argument
186 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_unmap_page_attrs()
189 if (dma_map_direct(dev, ops) || in dma_unmap_page_attrs()
190 arch_dma_unmap_page_direct(dev, addr + size)) in dma_unmap_page_attrs()
191 dma_direct_unmap_page(dev, addr, size, dir, attrs); in dma_unmap_page_attrs()
192 else if (use_dma_iommu(dev)) in dma_unmap_page_attrs()
193 iommu_dma_unmap_page(dev, addr, size, dir, attrs); in dma_unmap_page_attrs()
195 ops->unmap_page(dev, addr, size, dir, attrs); in dma_unmap_page_attrs()
196 trace_dma_unmap_page(dev, addr, size, dir, attrs); in dma_unmap_page_attrs()
197 debug_dma_unmap_page(dev, addr, size, dir); in dma_unmap_page_attrs()
201 static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, in __dma_map_sg_attrs() argument
204 const struct dma_map_ops *ops = get_dma_ops(dev); in __dma_map_sg_attrs()
209 if (WARN_ON_ONCE(!dev->dma_mask)) in __dma_map_sg_attrs()
212 if (dma_map_direct(dev, ops) || in __dma_map_sg_attrs()
213 arch_dma_map_sg_direct(dev, sg, nents)) in __dma_map_sg_attrs()
214 ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); in __dma_map_sg_attrs()
215 else if (use_dma_iommu(dev)) in __dma_map_sg_attrs()
216 ents = iommu_dma_map_sg(dev, sg, nents, dir, attrs); in __dma_map_sg_attrs()
218 ents = ops->map_sg(dev, sg, nents, dir, attrs); in __dma_map_sg_attrs()
222 trace_dma_map_sg(dev, sg, nents, ents, dir, attrs); in __dma_map_sg_attrs()
223 debug_dma_map_sg(dev, sg, nents, ents, dir, attrs); in __dma_map_sg_attrs()
226 trace_dma_map_sg_err(dev, sg, nents, ents, dir, attrs); in __dma_map_sg_attrs()
250 unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, in dma_map_sg_attrs() argument
255 ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs); in dma_map_sg_attrs()
289 int dma_map_sgtable(struct device *dev, struct sg_table *sgt, in dma_map_sgtable() argument
294 nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); in dma_map_sgtable()
302 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, in dma_unmap_sg_attrs() argument
306 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_unmap_sg_attrs()
309 trace_dma_unmap_sg(dev, sg, nents, dir, attrs); in dma_unmap_sg_attrs()
310 debug_dma_unmap_sg(dev, sg, nents, dir); in dma_unmap_sg_attrs()
311 if (dma_map_direct(dev, ops) || in dma_unmap_sg_attrs()
312 arch_dma_unmap_sg_direct(dev, sg, nents)) in dma_unmap_sg_attrs()
313 dma_direct_unmap_sg(dev, sg, nents, dir, attrs); in dma_unmap_sg_attrs()
314 else if (use_dma_iommu(dev)) in dma_unmap_sg_attrs()
315 iommu_dma_unmap_sg(dev, sg, nents, dir, attrs); in dma_unmap_sg_attrs()
317 ops->unmap_sg(dev, sg, nents, dir, attrs); in dma_unmap_sg_attrs()
321 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, in dma_map_resource() argument
324 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_map_resource()
329 if (WARN_ON_ONCE(!dev->dma_mask)) in dma_map_resource()
332 if (dma_map_direct(dev, ops)) in dma_map_resource()
333 addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); in dma_map_resource()
334 else if (use_dma_iommu(dev)) in dma_map_resource()
335 addr = iommu_dma_map_resource(dev, phys_addr, size, dir, attrs); in dma_map_resource()
337 addr = ops->map_resource(dev, phys_addr, size, dir, attrs); in dma_map_resource()
339 trace_dma_map_resource(dev, phys_addr, addr, size, dir, attrs); in dma_map_resource()
340 debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs); in dma_map_resource()
345 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, in dma_unmap_resource() argument
348 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_unmap_resource()
351 if (dma_map_direct(dev, ops)) in dma_unmap_resource()
353 else if (use_dma_iommu(dev)) in dma_unmap_resource()
354 iommu_dma_unmap_resource(dev, addr, size, dir, attrs); in dma_unmap_resource()
356 ops->unmap_resource(dev, addr, size, dir, attrs); in dma_unmap_resource()
357 trace_dma_unmap_resource(dev, addr, size, dir, attrs); in dma_unmap_resource()
358 debug_dma_unmap_resource(dev, addr, size, dir); in dma_unmap_resource()
363 void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, in __dma_sync_single_for_cpu() argument
366 const struct dma_map_ops *ops = get_dma_ops(dev); in __dma_sync_single_for_cpu()
369 if (dma_map_direct(dev, ops)) in __dma_sync_single_for_cpu()
370 dma_direct_sync_single_for_cpu(dev, addr, size, dir); in __dma_sync_single_for_cpu()
371 else if (use_dma_iommu(dev)) in __dma_sync_single_for_cpu()
372 iommu_dma_sync_single_for_cpu(dev, addr, size, dir); in __dma_sync_single_for_cpu()
374 ops->sync_single_for_cpu(dev, addr, size, dir); in __dma_sync_single_for_cpu()
375 trace_dma_sync_single_for_cpu(dev, addr, size, dir); in __dma_sync_single_for_cpu()
376 debug_dma_sync_single_for_cpu(dev, addr, size, dir); in __dma_sync_single_for_cpu()
380 void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr, in __dma_sync_single_for_device() argument
383 const struct dma_map_ops *ops = get_dma_ops(dev); in __dma_sync_single_for_device()
386 if (dma_map_direct(dev, ops)) in __dma_sync_single_for_device()
387 dma_direct_sync_single_for_device(dev, addr, size, dir); in __dma_sync_single_for_device()
388 else if (use_dma_iommu(dev)) in __dma_sync_single_for_device()
389 iommu_dma_sync_single_for_device(dev, addr, size, dir); in __dma_sync_single_for_device()
391 ops->sync_single_for_device(dev, addr, size, dir); in __dma_sync_single_for_device()
392 trace_dma_sync_single_for_device(dev, addr, size, dir); in __dma_sync_single_for_device()
393 debug_dma_sync_single_for_device(dev, addr, size, dir); in __dma_sync_single_for_device()
397 void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, in __dma_sync_sg_for_cpu() argument
400 const struct dma_map_ops *ops = get_dma_ops(dev); in __dma_sync_sg_for_cpu()
403 if (dma_map_direct(dev, ops)) in __dma_sync_sg_for_cpu()
404 dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir); in __dma_sync_sg_for_cpu()
405 else if (use_dma_iommu(dev)) in __dma_sync_sg_for_cpu()
406 iommu_dma_sync_sg_for_cpu(dev, sg, nelems, dir); in __dma_sync_sg_for_cpu()
408 ops->sync_sg_for_cpu(dev, sg, nelems, dir); in __dma_sync_sg_for_cpu()
409 trace_dma_sync_sg_for_cpu(dev, sg, nelems, dir); in __dma_sync_sg_for_cpu()
410 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); in __dma_sync_sg_for_cpu()
414 void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, in __dma_sync_sg_for_device() argument
417 const struct dma_map_ops *ops = get_dma_ops(dev); in __dma_sync_sg_for_device()
420 if (dma_map_direct(dev, ops)) in __dma_sync_sg_for_device()
421 dma_direct_sync_sg_for_device(dev, sg, nelems, dir); in __dma_sync_sg_for_device()
422 else if (use_dma_iommu(dev)) in __dma_sync_sg_for_device()
423 iommu_dma_sync_sg_for_device(dev, sg, nelems, dir); in __dma_sync_sg_for_device()
425 ops->sync_sg_for_device(dev, sg, nelems, dir); in __dma_sync_sg_for_device()
426 trace_dma_sync_sg_for_device(dev, sg, nelems, dir); in __dma_sync_sg_for_device()
427 debug_dma_sync_sg_for_device(dev, sg, nelems, dir); in __dma_sync_sg_for_device()
431 bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr) in __dma_need_sync() argument
433 const struct dma_map_ops *ops = get_dma_ops(dev); in __dma_need_sync()
435 if (dma_map_direct(dev, ops)) in __dma_need_sync()
441 return dma_direct_need_sync(dev, dma_addr); in __dma_need_sync()
454 bool dma_need_unmap(struct device *dev) in dma_need_unmap() argument
456 if (!dma_map_direct(dev, get_dma_ops(dev))) in dma_need_unmap()
458 if (!dev->dma_skip_sync) in dma_need_unmap()
464 static void dma_setup_need_sync(struct device *dev) in dma_setup_need_sync() argument
466 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_setup_need_sync()
468 if (dma_map_direct(dev, ops) || use_dma_iommu(dev)) in dma_setup_need_sync()
474 dev->dma_skip_sync = dev_is_dma_coherent(dev); in dma_setup_need_sync()
481 dev->dma_skip_sync = true; in dma_setup_need_sync()
483 dev->dma_skip_sync = false; in dma_setup_need_sync()
486 static inline void dma_setup_need_sync(struct device *dev) { } in dma_setup_need_sync() argument
500 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, in dma_get_sgtable_attrs() argument
504 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_get_sgtable_attrs()
506 if (dma_alloc_direct(dev, ops)) in dma_get_sgtable_attrs()
507 return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr, in dma_get_sgtable_attrs()
509 if (use_dma_iommu(dev)) in dma_get_sgtable_attrs()
510 return iommu_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr, in dma_get_sgtable_attrs()
514 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); in dma_get_sgtable_attrs()
523 pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) in dma_pgprot() argument
525 if (dev_is_dma_coherent(dev)) in dma_pgprot()
542 bool dma_can_mmap(struct device *dev) in dma_can_mmap() argument
544 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_can_mmap()
546 if (dma_alloc_direct(dev, ops)) in dma_can_mmap()
547 return dma_direct_can_mmap(dev); in dma_can_mmap()
548 if (use_dma_iommu(dev)) in dma_can_mmap()
567 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, in dma_mmap_attrs() argument
571 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_mmap_attrs()
573 if (dma_alloc_direct(dev, ops)) in dma_mmap_attrs()
574 return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size, in dma_mmap_attrs()
576 if (use_dma_iommu(dev)) in dma_mmap_attrs()
577 return iommu_dma_mmap(dev, vma, cpu_addr, dma_addr, size, in dma_mmap_attrs()
581 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); in dma_mmap_attrs()
585 u64 dma_get_required_mask(struct device *dev) in dma_get_required_mask() argument
587 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_get_required_mask()
589 if (dma_alloc_direct(dev, ops)) in dma_get_required_mask()
590 return dma_direct_get_required_mask(dev); in dma_get_required_mask()
592 if (use_dma_iommu(dev)) in dma_get_required_mask()
596 return ops->get_required_mask(dev); in dma_get_required_mask()
610 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, in dma_alloc_attrs() argument
613 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_alloc_attrs()
616 WARN_ON_ONCE(!dev->coherent_dma_mask); in dma_alloc_attrs()
626 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) { in dma_alloc_attrs()
627 trace_dma_alloc(dev, cpu_addr, *dma_handle, size, in dma_alloc_attrs()
635 if (dma_alloc_direct(dev, ops)) { in dma_alloc_attrs()
636 cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs); in dma_alloc_attrs()
637 } else if (use_dma_iommu(dev)) { in dma_alloc_attrs()
638 cpu_addr = iommu_dma_alloc(dev, size, dma_handle, flag, attrs); in dma_alloc_attrs()
640 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); in dma_alloc_attrs()
642 trace_dma_alloc(dev, NULL, 0, size, DMA_BIDIRECTIONAL, flag, in dma_alloc_attrs()
647 trace_dma_alloc(dev, cpu_addr, *dma_handle, size, DMA_BIDIRECTIONAL, in dma_alloc_attrs()
649 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs); in dma_alloc_attrs()
654 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, in dma_free_attrs() argument
657 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_free_attrs()
659 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) in dma_free_attrs()
670 trace_dma_free(dev, cpu_addr, dma_handle, size, DMA_BIDIRECTIONAL, in dma_free_attrs()
675 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); in dma_free_attrs()
676 if (dma_alloc_direct(dev, ops)) in dma_free_attrs()
677 dma_direct_free(dev, size, cpu_addr, dma_handle, attrs); in dma_free_attrs()
678 else if (use_dma_iommu(dev)) in dma_free_attrs()
679 iommu_dma_free(dev, size, cpu_addr, dma_handle, attrs); in dma_free_attrs()
681 ops->free(dev, size, cpu_addr, dma_handle, attrs); in dma_free_attrs()
685 static struct page *__dma_alloc_pages(struct device *dev, size_t size, in __dma_alloc_pages() argument
688 const struct dma_map_ops *ops = get_dma_ops(dev); in __dma_alloc_pages()
690 if (WARN_ON_ONCE(!dev->coherent_dma_mask)) in __dma_alloc_pages()
698 if (dma_alloc_direct(dev, ops)) in __dma_alloc_pages()
699 return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp); in __dma_alloc_pages()
700 if (use_dma_iommu(dev)) in __dma_alloc_pages()
701 return dma_common_alloc_pages(dev, size, dma_handle, dir, gfp); in __dma_alloc_pages()
704 return ops->alloc_pages_op(dev, size, dma_handle, dir, gfp); in __dma_alloc_pages()
707 struct page *dma_alloc_pages(struct device *dev, size_t size, in dma_alloc_pages() argument
710 struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp); in dma_alloc_pages()
713 trace_dma_alloc_pages(dev, page_to_virt(page), *dma_handle, in dma_alloc_pages()
715 debug_dma_alloc_pages(dev, page, size, dir, *dma_handle, 0); in dma_alloc_pages()
717 trace_dma_alloc_pages(dev, NULL, 0, size, dir, gfp, 0); in dma_alloc_pages()
723 static void __dma_free_pages(struct device *dev, size_t size, struct page *page, in __dma_free_pages() argument
726 const struct dma_map_ops *ops = get_dma_ops(dev); in __dma_free_pages()
729 if (dma_alloc_direct(dev, ops)) in __dma_free_pages()
730 dma_direct_free_pages(dev, size, page, dma_handle, dir); in __dma_free_pages()
731 else if (use_dma_iommu(dev)) in __dma_free_pages()
732 dma_common_free_pages(dev, size, page, dma_handle, dir); in __dma_free_pages()
734 ops->free_pages(dev, size, page, dma_handle, dir); in __dma_free_pages()
737 void dma_free_pages(struct device *dev, size_t size, struct page *page, in dma_free_pages() argument
740 trace_dma_free_pages(dev, page_to_virt(page), dma_handle, size, dir, 0); in dma_free_pages()
741 debug_dma_free_pages(dev, page, size, dir, dma_handle); in dma_free_pages()
742 __dma_free_pages(dev, size, page, dma_handle, dir); in dma_free_pages()
746 int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, in dma_mmap_pages() argument
759 static struct sg_table *alloc_single_sgt(struct device *dev, size_t size, in alloc_single_sgt() argument
770 page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp); in alloc_single_sgt()
783 struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, in dma_alloc_noncontiguous() argument
793 if (use_dma_iommu(dev)) in dma_alloc_noncontiguous()
794 sgt = iommu_dma_alloc_noncontiguous(dev, size, dir, gfp, attrs); in dma_alloc_noncontiguous()
796 sgt = alloc_single_sgt(dev, size, dir, gfp); in dma_alloc_noncontiguous()
800 trace_dma_alloc_sgt(dev, sgt, size, dir, gfp, attrs); in dma_alloc_noncontiguous()
801 debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs); in dma_alloc_noncontiguous()
803 trace_dma_alloc_sgt_err(dev, NULL, 0, size, dir, gfp, attrs); in dma_alloc_noncontiguous()
809 static void free_single_sgt(struct device *dev, size_t size, in free_single_sgt() argument
812 __dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address, in free_single_sgt()
818 void dma_free_noncontiguous(struct device *dev, size_t size, in dma_free_noncontiguous() argument
821 trace_dma_free_sgt(dev, sgt, size, dir); in dma_free_noncontiguous()
822 debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); in dma_free_noncontiguous()
824 if (use_dma_iommu(dev)) in dma_free_noncontiguous()
825 iommu_dma_free_noncontiguous(dev, size, sgt, dir); in dma_free_noncontiguous()
827 free_single_sgt(dev, size, sgt, dir); in dma_free_noncontiguous()
831 void *dma_vmap_noncontiguous(struct device *dev, size_t size, in dma_vmap_noncontiguous() argument
835 if (use_dma_iommu(dev)) in dma_vmap_noncontiguous()
836 return iommu_dma_vmap_noncontiguous(dev, size, sgt); in dma_vmap_noncontiguous()
842 void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) in dma_vunmap_noncontiguous() argument
844 if (use_dma_iommu(dev)) in dma_vunmap_noncontiguous()
845 iommu_dma_vunmap_noncontiguous(dev, vaddr); in dma_vunmap_noncontiguous()
849 int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, in dma_mmap_noncontiguous() argument
852 if (use_dma_iommu(dev)) in dma_mmap_noncontiguous()
853 return iommu_dma_mmap_noncontiguous(dev, vma, size, sgt); in dma_mmap_noncontiguous()
854 return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl)); in dma_mmap_noncontiguous()
858 static int dma_supported(struct device *dev, u64 mask) in dma_supported() argument
860 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_supported()
862 if (use_dma_iommu(dev)) { in dma_supported()
875 return ops->dma_supported(dev, mask); in dma_supported()
878 return dma_direct_supported(dev, mask); in dma_supported()
881 bool dma_pci_p2pdma_supported(struct device *dev) in dma_pci_p2pdma_supported() argument
883 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_pci_p2pdma_supported()
896 int dma_set_mask(struct device *dev, u64 mask) in dma_set_mask() argument
904 if (!dev->dma_mask || !dma_supported(dev, mask)) in dma_set_mask()
907 arch_dma_set_mask(dev, mask); in dma_set_mask()
908 *dev->dma_mask = mask; in dma_set_mask()
909 dma_setup_need_sync(dev); in dma_set_mask()
915 int dma_set_coherent_mask(struct device *dev, u64 mask) in dma_set_coherent_mask() argument
923 if (!dma_supported(dev, mask)) in dma_set_coherent_mask()
926 dev->coherent_dma_mask = mask; in dma_set_coherent_mask()
931 static bool __dma_addressing_limited(struct device *dev) in __dma_addressing_limited() argument
933 const struct dma_map_ops *ops = get_dma_ops(dev); in __dma_addressing_limited()
935 if (min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < in __dma_addressing_limited()
936 dma_get_required_mask(dev)) in __dma_addressing_limited()
939 if (unlikely(ops) || use_dma_iommu(dev)) in __dma_addressing_limited()
941 return !dma_direct_all_ram_mapped(dev); in __dma_addressing_limited()
952 bool dma_addressing_limited(struct device *dev) in dma_addressing_limited() argument
954 if (!__dma_addressing_limited(dev)) in dma_addressing_limited()
957 dev_dbg(dev, "device is DMA addressing limited\n"); in dma_addressing_limited()
962 size_t dma_max_mapping_size(struct device *dev) in dma_max_mapping_size() argument
964 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_max_mapping_size()
967 if (dma_map_direct(dev, ops)) in dma_max_mapping_size()
968 size = dma_direct_max_mapping_size(dev); in dma_max_mapping_size()
969 else if (use_dma_iommu(dev)) in dma_max_mapping_size()
970 size = iommu_dma_max_mapping_size(dev); in dma_max_mapping_size()
972 size = ops->max_mapping_size(dev); in dma_max_mapping_size()
978 size_t dma_opt_mapping_size(struct device *dev) in dma_opt_mapping_size() argument
980 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_opt_mapping_size()
983 if (use_dma_iommu(dev)) in dma_opt_mapping_size()
988 return min(dma_max_mapping_size(dev), size); in dma_opt_mapping_size()
992 unsigned long dma_get_merge_boundary(struct device *dev) in dma_get_merge_boundary() argument
994 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_get_merge_boundary()
996 if (use_dma_iommu(dev)) in dma_get_merge_boundary()
997 return iommu_dma_get_merge_boundary(dev); in dma_get_merge_boundary()
1002 return ops->get_merge_boundary(dev); in dma_get_merge_boundary()