• Home
  • Raw
  • Download

Lines Matching refs:ops

110 		const struct dma_map_ops *ops)  in dma_go_direct()  argument
112 if (likely(!ops)) in dma_go_direct()
129 const struct dma_map_ops *ops) in dma_alloc_direct() argument
131 return dma_go_direct(dev, dev->coherent_dma_mask, ops); in dma_alloc_direct()
135 const struct dma_map_ops *ops) in dma_map_direct() argument
137 return dma_go_direct(dev, *dev->dma_mask, ops); in dma_map_direct()
144 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_map_page_attrs() local
152 if (dma_map_direct(dev, ops)) in dma_map_page_attrs()
155 addr = ops->map_page(dev, page, offset, size, dir, attrs); in dma_map_page_attrs()
165 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_unmap_page_attrs() local
168 if (dma_map_direct(dev, ops)) in dma_unmap_page_attrs()
170 else if (ops->unmap_page) in dma_unmap_page_attrs()
171 ops->unmap_page(dev, addr, size, dir, attrs); in dma_unmap_page_attrs()
183 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_map_sg_attrs() local
191 if (dma_map_direct(dev, ops)) in dma_map_sg_attrs()
194 ents = ops->map_sg(dev, sg, nents, dir, attrs); in dma_map_sg_attrs()
206 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_unmap_sg_attrs() local
210 if (dma_map_direct(dev, ops)) in dma_unmap_sg_attrs()
212 else if (ops->unmap_sg) in dma_unmap_sg_attrs()
213 ops->unmap_sg(dev, sg, nents, dir, attrs); in dma_unmap_sg_attrs()
220 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_map_resource() local
232 if (dma_map_direct(dev, ops)) in dma_map_resource()
234 else if (ops->map_resource) in dma_map_resource()
235 addr = ops->map_resource(dev, phys_addr, size, dir, attrs); in dma_map_resource()
245 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_unmap_resource() local
248 if (!dma_map_direct(dev, ops) && ops->unmap_resource) in dma_unmap_resource()
249 ops->unmap_resource(dev, addr, size, dir, attrs); in dma_unmap_resource()
257 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_sync_single_for_cpu() local
260 if (dma_map_direct(dev, ops)) in dma_sync_single_for_cpu()
262 else if (ops->sync_single_for_cpu) in dma_sync_single_for_cpu()
263 ops->sync_single_for_cpu(dev, addr, size, dir); in dma_sync_single_for_cpu()
271 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_sync_single_for_device() local
274 if (dma_map_direct(dev, ops)) in dma_sync_single_for_device()
276 else if (ops->sync_single_for_device) in dma_sync_single_for_device()
277 ops->sync_single_for_device(dev, addr, size, dir); in dma_sync_single_for_device()
285 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_sync_sg_for_cpu() local
288 if (dma_map_direct(dev, ops)) in dma_sync_sg_for_cpu()
290 else if (ops->sync_sg_for_cpu) in dma_sync_sg_for_cpu()
291 ops->sync_sg_for_cpu(dev, sg, nelems, dir); in dma_sync_sg_for_cpu()
299 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_sync_sg_for_device() local
302 if (dma_map_direct(dev, ops)) in dma_sync_sg_for_device()
304 else if (ops->sync_sg_for_device) in dma_sync_sg_for_device()
305 ops->sync_sg_for_device(dev, sg, nelems, dir); in dma_sync_sg_for_device()
325 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_get_sgtable_attrs() local
327 if (dma_alloc_direct(dev, ops)) in dma_get_sgtable_attrs()
330 if (!ops->get_sgtable) in dma_get_sgtable_attrs()
332 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); in dma_get_sgtable_attrs()
367 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_can_mmap() local
369 if (dma_alloc_direct(dev, ops)) in dma_can_mmap()
371 return ops->mmap != NULL; in dma_can_mmap()
392 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_mmap_attrs() local
394 if (dma_alloc_direct(dev, ops)) in dma_mmap_attrs()
397 if (!ops->mmap) in dma_mmap_attrs()
399 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); in dma_mmap_attrs()
405 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_get_required_mask() local
407 if (dma_alloc_direct(dev, ops)) in dma_get_required_mask()
409 if (ops->get_required_mask) in dma_get_required_mask()
410 return ops->get_required_mask(dev); in dma_get_required_mask()
427 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_alloc_attrs() local
438 if (dma_alloc_direct(dev, ops)) in dma_alloc_attrs()
440 else if (ops->alloc) in dma_alloc_attrs()
441 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); in dma_alloc_attrs()
453 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_free_attrs() local
470 if (dma_alloc_direct(dev, ops)) in dma_free_attrs()
472 else if (ops->free) in dma_free_attrs()
473 ops->free(dev, size, cpu_addr, dma_handle, attrs); in dma_free_attrs()
480 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_alloc_pages() local
489 if (dma_alloc_direct(dev, ops)) in dma_alloc_pages()
491 else if (ops->alloc_pages) in dma_alloc_pages()
492 page = ops->alloc_pages(dev, size, dma_handle, dir, gfp); in dma_alloc_pages()
505 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_free_pages() local
510 if (dma_alloc_direct(dev, ops)) in dma_free_pages()
512 else if (ops->free_pages) in dma_free_pages()
513 ops->free_pages(dev, size, page, dma_handle, dir); in dma_free_pages()
520 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_alloc_noncoherent() local
523 if (!ops || !ops->alloc_noncoherent) { in dma_alloc_noncoherent()
533 vaddr = ops->alloc_noncoherent(dev, size, dma_handle, dir, gfp); in dma_alloc_noncoherent()
544 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_free_noncoherent() local
546 if (!ops || !ops->free_noncoherent) { in dma_free_noncoherent()
553 ops->free_noncoherent(dev, size, vaddr, dma_handle, dir); in dma_free_noncoherent()
559 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_supported() local
565 if (!ops) in dma_supported()
567 if (!ops->dma_supported) in dma_supported()
569 return ops->dma_supported(dev, mask); in dma_supported()
616 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_max_mapping_size() local
619 if (dma_map_direct(dev, ops)) in dma_max_mapping_size()
621 else if (ops && ops->max_mapping_size) in dma_max_mapping_size()
622 size = ops->max_mapping_size(dev); in dma_max_mapping_size()
630 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_need_sync() local
632 if (dma_map_direct(dev, ops)) in dma_need_sync()
634 return ops->sync_single_for_cpu || ops->sync_single_for_device; in dma_need_sync()
640 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_get_merge_boundary() local
642 if (!ops || !ops->get_merge_boundary) in dma_get_merge_boundary()
645 return ops->get_merge_boundary(dev); in dma_get_merge_boundary()