• Home
  • Raw
  • Download

Lines Matching refs:ops

113 		const struct dma_map_ops *ops)  in dma_go_direct()  argument
115 if (likely(!ops)) in dma_go_direct()
132 const struct dma_map_ops *ops) in dma_alloc_direct() argument
134 return dma_go_direct(dev, dev->coherent_dma_mask, ops); in dma_alloc_direct()
138 const struct dma_map_ops *ops) in dma_map_direct() argument
140 return dma_go_direct(dev, *dev->dma_mask, ops); in dma_map_direct()
147 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_map_page_attrs() local
155 if (dma_map_direct(dev, ops) || in dma_map_page_attrs()
159 addr = ops->map_page(dev, page, offset, size, dir, attrs); in dma_map_page_attrs()
170 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_unmap_page_attrs() local
173 if (dma_map_direct(dev, ops) || in dma_unmap_page_attrs()
176 else if (ops->unmap_page) in dma_unmap_page_attrs()
177 ops->unmap_page(dev, addr, size, dir, attrs); in dma_unmap_page_attrs()
185 const struct dma_map_ops *ops = get_dma_ops(dev); in __dma_map_sg_attrs() local
193 if (dma_map_direct(dev, ops) || in __dma_map_sg_attrs()
197 ents = ops->map_sg(dev, sg, nents, dir, attrs); in __dma_map_sg_attrs()
283 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_unmap_sg_attrs() local
287 if (dma_map_direct(dev, ops) || in dma_unmap_sg_attrs()
290 else if (ops->unmap_sg) in dma_unmap_sg_attrs()
291 ops->unmap_sg(dev, sg, nents, dir, attrs); in dma_unmap_sg_attrs()
298 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_map_resource() local
306 if (dma_map_direct(dev, ops)) in dma_map_resource()
308 else if (ops->map_resource) in dma_map_resource()
309 addr = ops->map_resource(dev, phys_addr, size, dir, attrs); in dma_map_resource()
319 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_unmap_resource() local
322 if (!dma_map_direct(dev, ops) && ops->unmap_resource) in dma_unmap_resource()
323 ops->unmap_resource(dev, addr, size, dir, attrs); in dma_unmap_resource()
331 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_sync_single_for_cpu() local
334 if (dma_map_direct(dev, ops)) in dma_sync_single_for_cpu()
336 else if (ops->sync_single_for_cpu) in dma_sync_single_for_cpu()
337 ops->sync_single_for_cpu(dev, addr, size, dir); in dma_sync_single_for_cpu()
345 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_sync_single_for_device() local
348 if (dma_map_direct(dev, ops)) in dma_sync_single_for_device()
350 else if (ops->sync_single_for_device) in dma_sync_single_for_device()
351 ops->sync_single_for_device(dev, addr, size, dir); in dma_sync_single_for_device()
359 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_sync_sg_for_cpu() local
362 if (dma_map_direct(dev, ops)) in dma_sync_sg_for_cpu()
364 else if (ops->sync_sg_for_cpu) in dma_sync_sg_for_cpu()
365 ops->sync_sg_for_cpu(dev, sg, nelems, dir); in dma_sync_sg_for_cpu()
373 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_sync_sg_for_device() local
376 if (dma_map_direct(dev, ops)) in dma_sync_sg_for_device()
378 else if (ops->sync_sg_for_device) in dma_sync_sg_for_device()
379 ops->sync_sg_for_device(dev, sg, nelems, dir); in dma_sync_sg_for_device()
399 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_get_sgtable_attrs() local
401 if (dma_alloc_direct(dev, ops)) in dma_get_sgtable_attrs()
404 if (!ops->get_sgtable) in dma_get_sgtable_attrs()
406 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); in dma_get_sgtable_attrs()
439 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_can_mmap() local
441 if (dma_alloc_direct(dev, ops)) in dma_can_mmap()
443 return ops->mmap != NULL; in dma_can_mmap()
464 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_mmap_attrs() local
466 if (dma_alloc_direct(dev, ops)) in dma_mmap_attrs()
469 if (!ops->mmap) in dma_mmap_attrs()
471 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); in dma_mmap_attrs()
477 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_get_required_mask() local
479 if (dma_alloc_direct(dev, ops)) in dma_get_required_mask()
481 if (ops->get_required_mask) in dma_get_required_mask()
482 return ops->get_required_mask(dev); in dma_get_required_mask()
499 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_alloc_attrs() local
510 if (dma_alloc_direct(dev, ops)) in dma_alloc_attrs()
512 else if (ops->alloc) in dma_alloc_attrs()
513 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); in dma_alloc_attrs()
525 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_free_attrs() local
542 if (dma_alloc_direct(dev, ops)) in dma_free_attrs()
544 else if (ops->free) in dma_free_attrs()
545 ops->free(dev, size, cpu_addr, dma_handle, attrs); in dma_free_attrs()
552 const struct dma_map_ops *ops = get_dma_ops(dev); in __dma_alloc_pages() local
560 if (dma_alloc_direct(dev, ops)) in __dma_alloc_pages()
562 if (!ops->alloc_pages) in __dma_alloc_pages()
564 return ops->alloc_pages(dev, size, dma_handle, dir, gfp); in __dma_alloc_pages()
581 const struct dma_map_ops *ops = get_dma_ops(dev); in __dma_free_pages() local
584 if (dma_alloc_direct(dev, ops)) in __dma_free_pages()
586 else if (ops->free_pages) in __dma_free_pages()
587 ops->free_pages(dev, size, page, dma_handle, dir); in __dma_free_pages()
638 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_alloc_noncontiguous() local
644 if (ops && ops->alloc_noncontiguous) in dma_alloc_noncontiguous()
645 sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs); in dma_alloc_noncontiguous()
669 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_free_noncontiguous() local
672 if (ops && ops->free_noncontiguous) in dma_free_noncontiguous()
673 ops->free_noncontiguous(dev, size, sgt, dir); in dma_free_noncontiguous()
682 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_vmap_noncontiguous() local
685 if (ops && ops->alloc_noncontiguous) in dma_vmap_noncontiguous()
693 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_vunmap_noncontiguous() local
695 if (ops && ops->alloc_noncontiguous) in dma_vunmap_noncontiguous()
703 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_mmap_noncontiguous() local
705 if (ops && ops->alloc_noncontiguous) { in dma_mmap_noncontiguous()
719 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_supported() local
725 if (!ops) in dma_supported()
727 if (!ops->dma_supported) in dma_supported()
729 return ops->dma_supported(dev, mask); in dma_supported()
734 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_pci_p2pdma_supported() local
737 if (!ops) in dma_pci_p2pdma_supported()
746 return ops->flags & DMA_F_PCI_P2PDMA_SUPPORTED; in dma_pci_p2pdma_supported()
791 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_max_mapping_size() local
794 if (dma_map_direct(dev, ops)) in dma_max_mapping_size()
796 else if (ops && ops->max_mapping_size) in dma_max_mapping_size()
797 size = ops->max_mapping_size(dev); in dma_max_mapping_size()
805 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_opt_mapping_size() local
808 if (ops && ops->opt_mapping_size) in dma_opt_mapping_size()
809 size = ops->opt_mapping_size(); in dma_opt_mapping_size()
817 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_need_sync() local
819 if (dma_map_direct(dev, ops)) in dma_need_sync()
821 return ops->sync_single_for_cpu || ops->sync_single_for_device; in dma_need_sync()
827 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_get_merge_boundary() local
829 if (!ops || !ops->get_merge_boundary) in dma_get_merge_boundary()
832 return ops->get_merge_boundary(dev); in dma_get_merge_boundary()