• Home
  • Raw
  • Download

Lines Matching refs:dev

32 static void dmam_release(struct device *dev, void *res)  in dmam_release()  argument
36 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, in dmam_release()
40 static int dmam_match(struct device *dev, void *res, void *match_data) in dmam_match() argument
61 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, in dmam_free_coherent() argument
66 dma_free_coherent(dev, size, vaddr, dma_handle); in dmam_free_coherent()
67 WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); in dmam_free_coherent()
85 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, in dmam_alloc_attrs() argument
95 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); in dmam_alloc_attrs()
106 devres_add(dev, dr); in dmam_alloc_attrs()
112 static bool dma_go_direct(struct device *dev, dma_addr_t mask, in dma_go_direct() argument
118 if (dev->dma_ops_bypass) in dma_go_direct()
119 return min_not_zero(mask, dev->bus_dma_limit) >= in dma_go_direct()
120 dma_direct_get_required_mask(dev); in dma_go_direct()
131 static inline bool dma_alloc_direct(struct device *dev, in dma_alloc_direct() argument
134 return dma_go_direct(dev, dev->coherent_dma_mask, ops); in dma_alloc_direct()
137 static inline bool dma_map_direct(struct device *dev, in dma_map_direct() argument
140 return dma_go_direct(dev, *dev->dma_mask, ops); in dma_map_direct()
143 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, in dma_map_page_attrs() argument
147 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_map_page_attrs()
152 if (WARN_ON_ONCE(!dev->dma_mask)) in dma_map_page_attrs()
155 if (dma_map_direct(dev, ops) || in dma_map_page_attrs()
156 arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size)) in dma_map_page_attrs()
157 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); in dma_map_page_attrs()
159 addr = ops->map_page(dev, page, offset, size, dir, attrs); in dma_map_page_attrs()
161 debug_dma_map_page(dev, page, offset, size, dir, addr, attrs); in dma_map_page_attrs()
167 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, in dma_unmap_page_attrs() argument
170 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_unmap_page_attrs()
173 if (dma_map_direct(dev, ops) || in dma_unmap_page_attrs()
174 arch_dma_unmap_page_direct(dev, addr + size)) in dma_unmap_page_attrs()
175 dma_direct_unmap_page(dev, addr, size, dir, attrs); in dma_unmap_page_attrs()
177 ops->unmap_page(dev, addr, size, dir, attrs); in dma_unmap_page_attrs()
178 debug_dma_unmap_page(dev, addr, size, dir); in dma_unmap_page_attrs()
182 static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, in __dma_map_sg_attrs() argument
185 const struct dma_map_ops *ops = get_dma_ops(dev); in __dma_map_sg_attrs()
190 if (WARN_ON_ONCE(!dev->dma_mask)) in __dma_map_sg_attrs()
193 if (dma_map_direct(dev, ops) || in __dma_map_sg_attrs()
194 arch_dma_map_sg_direct(dev, sg, nents)) in __dma_map_sg_attrs()
195 ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); in __dma_map_sg_attrs()
197 ents = ops->map_sg(dev, sg, nents, dir, attrs); in __dma_map_sg_attrs()
201 debug_dma_map_sg(dev, sg, nents, ents, dir, attrs); in __dma_map_sg_attrs()
227 unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, in dma_map_sg_attrs() argument
232 ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs); in dma_map_sg_attrs()
266 int dma_map_sgtable(struct device *dev, struct sg_table *sgt, in dma_map_sgtable() argument
271 nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); in dma_map_sgtable()
279 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, in dma_unmap_sg_attrs() argument
283 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_unmap_sg_attrs()
286 debug_dma_unmap_sg(dev, sg, nents, dir); in dma_unmap_sg_attrs()
287 if (dma_map_direct(dev, ops) || in dma_unmap_sg_attrs()
288 arch_dma_unmap_sg_direct(dev, sg, nents)) in dma_unmap_sg_attrs()
289 dma_direct_unmap_sg(dev, sg, nents, dir, attrs); in dma_unmap_sg_attrs()
291 ops->unmap_sg(dev, sg, nents, dir, attrs); in dma_unmap_sg_attrs()
295 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, in dma_map_resource() argument
298 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_map_resource()
303 if (WARN_ON_ONCE(!dev->dma_mask)) in dma_map_resource()
306 if (dma_map_direct(dev, ops)) in dma_map_resource()
307 addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); in dma_map_resource()
309 addr = ops->map_resource(dev, phys_addr, size, dir, attrs); in dma_map_resource()
311 debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs); in dma_map_resource()
316 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, in dma_unmap_resource() argument
319 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_unmap_resource()
322 if (!dma_map_direct(dev, ops) && ops->unmap_resource) in dma_unmap_resource()
323 ops->unmap_resource(dev, addr, size, dir, attrs); in dma_unmap_resource()
324 debug_dma_unmap_resource(dev, addr, size, dir); in dma_unmap_resource()
328 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, in dma_sync_single_for_cpu() argument
331 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_sync_single_for_cpu()
334 if (dma_map_direct(dev, ops)) in dma_sync_single_for_cpu()
335 dma_direct_sync_single_for_cpu(dev, addr, size, dir); in dma_sync_single_for_cpu()
337 ops->sync_single_for_cpu(dev, addr, size, dir); in dma_sync_single_for_cpu()
338 debug_dma_sync_single_for_cpu(dev, addr, size, dir); in dma_sync_single_for_cpu()
342 void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, in dma_sync_single_for_device() argument
345 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_sync_single_for_device()
348 if (dma_map_direct(dev, ops)) in dma_sync_single_for_device()
349 dma_direct_sync_single_for_device(dev, addr, size, dir); in dma_sync_single_for_device()
351 ops->sync_single_for_device(dev, addr, size, dir); in dma_sync_single_for_device()
352 debug_dma_sync_single_for_device(dev, addr, size, dir); in dma_sync_single_for_device()
356 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, in dma_sync_sg_for_cpu() argument
359 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_sync_sg_for_cpu()
362 if (dma_map_direct(dev, ops)) in dma_sync_sg_for_cpu()
363 dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir); in dma_sync_sg_for_cpu()
365 ops->sync_sg_for_cpu(dev, sg, nelems, dir); in dma_sync_sg_for_cpu()
366 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); in dma_sync_sg_for_cpu()
370 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, in dma_sync_sg_for_device() argument
373 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_sync_sg_for_device()
376 if (dma_map_direct(dev, ops)) in dma_sync_sg_for_device()
377 dma_direct_sync_sg_for_device(dev, sg, nelems, dir); in dma_sync_sg_for_device()
379 ops->sync_sg_for_device(dev, sg, nelems, dir); in dma_sync_sg_for_device()
380 debug_dma_sync_sg_for_device(dev, sg, nelems, dir); in dma_sync_sg_for_device()
395 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, in dma_get_sgtable_attrs() argument
399 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_get_sgtable_attrs()
401 if (dma_alloc_direct(dev, ops)) in dma_get_sgtable_attrs()
402 return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr, in dma_get_sgtable_attrs()
406 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); in dma_get_sgtable_attrs()
415 pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) in dma_pgprot() argument
417 if (dev_is_dma_coherent(dev)) in dma_pgprot()
437 bool dma_can_mmap(struct device *dev) in dma_can_mmap() argument
439 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_can_mmap()
441 if (dma_alloc_direct(dev, ops)) in dma_can_mmap()
442 return dma_direct_can_mmap(dev); in dma_can_mmap()
460 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, in dma_mmap_attrs() argument
464 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_mmap_attrs()
466 if (dma_alloc_direct(dev, ops)) in dma_mmap_attrs()
467 return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size, in dma_mmap_attrs()
471 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); in dma_mmap_attrs()
475 u64 dma_get_required_mask(struct device *dev) in dma_get_required_mask() argument
477 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_get_required_mask()
479 if (dma_alloc_direct(dev, ops)) in dma_get_required_mask()
480 return dma_direct_get_required_mask(dev); in dma_get_required_mask()
482 return ops->get_required_mask(dev); in dma_get_required_mask()
496 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, in dma_alloc_attrs() argument
499 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_alloc_attrs()
502 WARN_ON_ONCE(!dev->coherent_dma_mask); in dma_alloc_attrs()
504 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) in dma_alloc_attrs()
510 if (dma_alloc_direct(dev, ops)) in dma_alloc_attrs()
511 cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs); in dma_alloc_attrs()
513 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); in dma_alloc_attrs()
517 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs); in dma_alloc_attrs()
522 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, in dma_free_attrs() argument
525 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_free_attrs()
527 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) in dma_free_attrs()
541 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); in dma_free_attrs()
542 if (dma_alloc_direct(dev, ops)) in dma_free_attrs()
543 dma_direct_free(dev, size, cpu_addr, dma_handle, attrs); in dma_free_attrs()
545 ops->free(dev, size, cpu_addr, dma_handle, attrs); in dma_free_attrs()
549 static struct page *__dma_alloc_pages(struct device *dev, size_t size, in __dma_alloc_pages() argument
552 const struct dma_map_ops *ops = get_dma_ops(dev); in __dma_alloc_pages()
554 if (WARN_ON_ONCE(!dev->coherent_dma_mask)) in __dma_alloc_pages()
560 if (dma_alloc_direct(dev, ops)) in __dma_alloc_pages()
561 return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp); in __dma_alloc_pages()
564 return ops->alloc_pages(dev, size, dma_handle, dir, gfp); in __dma_alloc_pages()
567 struct page *dma_alloc_pages(struct device *dev, size_t size, in dma_alloc_pages() argument
570 struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp); in dma_alloc_pages()
573 debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0); in dma_alloc_pages()
578 static void __dma_free_pages(struct device *dev, size_t size, struct page *page, in __dma_free_pages() argument
581 const struct dma_map_ops *ops = get_dma_ops(dev); in __dma_free_pages()
584 if (dma_alloc_direct(dev, ops)) in __dma_free_pages()
585 dma_direct_free_pages(dev, size, page, dma_handle, dir); in __dma_free_pages()
587 ops->free_pages(dev, size, page, dma_handle, dir); in __dma_free_pages()
590 void dma_free_pages(struct device *dev, size_t size, struct page *page, in dma_free_pages() argument
593 debug_dma_unmap_page(dev, dma_handle, size, dir); in dma_free_pages()
594 __dma_free_pages(dev, size, page, dma_handle, dir); in dma_free_pages()
598 int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, in dma_mmap_pages() argument
611 static struct sg_table *alloc_single_sgt(struct device *dev, size_t size, in alloc_single_sgt() argument
622 page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp); in alloc_single_sgt()
635 struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, in dma_alloc_noncontiguous() argument
638 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_alloc_noncontiguous()
645 sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs); in dma_alloc_noncontiguous()
647 sgt = alloc_single_sgt(dev, size, dir, gfp); in dma_alloc_noncontiguous()
651 debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs); in dma_alloc_noncontiguous()
657 static void free_single_sgt(struct device *dev, size_t size, in free_single_sgt() argument
660 __dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address, in free_single_sgt()
666 void dma_free_noncontiguous(struct device *dev, size_t size, in dma_free_noncontiguous() argument
669 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_free_noncontiguous()
671 debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); in dma_free_noncontiguous()
673 ops->free_noncontiguous(dev, size, sgt, dir); in dma_free_noncontiguous()
675 free_single_sgt(dev, size, sgt, dir); in dma_free_noncontiguous()
679 void *dma_vmap_noncontiguous(struct device *dev, size_t size, in dma_vmap_noncontiguous() argument
682 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_vmap_noncontiguous()
691 void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) in dma_vunmap_noncontiguous() argument
693 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_vunmap_noncontiguous()
700 int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, in dma_mmap_noncontiguous() argument
703 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_mmap_noncontiguous()
713 return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl)); in dma_mmap_noncontiguous()
717 static int dma_supported(struct device *dev, u64 mask) in dma_supported() argument
719 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_supported()
726 return dma_direct_supported(dev, mask); in dma_supported()
729 return ops->dma_supported(dev, mask); in dma_supported()
732 bool dma_pci_p2pdma_supported(struct device *dev) in dma_pci_p2pdma_supported() argument
734 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_pci_p2pdma_supported()
751 void arch_dma_set_mask(struct device *dev, u64 mask);
753 #define arch_dma_set_mask(dev, mask) do { } while (0) argument
756 int dma_set_mask(struct device *dev, u64 mask) in dma_set_mask() argument
764 if (!dev->dma_mask || !dma_supported(dev, mask)) in dma_set_mask()
767 arch_dma_set_mask(dev, mask); in dma_set_mask()
768 *dev->dma_mask = mask; in dma_set_mask()
773 int dma_set_coherent_mask(struct device *dev, u64 mask) in dma_set_coherent_mask() argument
781 if (!dma_supported(dev, mask)) in dma_set_coherent_mask()
784 dev->coherent_dma_mask = mask; in dma_set_coherent_mask()
789 size_t dma_max_mapping_size(struct device *dev) in dma_max_mapping_size() argument
791 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_max_mapping_size()
794 if (dma_map_direct(dev, ops)) in dma_max_mapping_size()
795 size = dma_direct_max_mapping_size(dev); in dma_max_mapping_size()
797 size = ops->max_mapping_size(dev); in dma_max_mapping_size()
803 size_t dma_opt_mapping_size(struct device *dev) in dma_opt_mapping_size() argument
805 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_opt_mapping_size()
811 return min(dma_max_mapping_size(dev), size); in dma_opt_mapping_size()
815 bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) in dma_need_sync() argument
817 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_need_sync()
819 if (dma_map_direct(dev, ops)) in dma_need_sync()
820 return dma_direct_need_sync(dev, dma_addr); in dma_need_sync()
825 unsigned long dma_get_merge_boundary(struct device *dev) in dma_get_merge_boundary() argument
827 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_get_merge_boundary()
832 return ops->get_merge_boundary(dev); in dma_get_merge_boundary()