Lines Matching +full:dma +full:- +full:coherent
2 * linux/arch/arm/mm/dma-mapping.c
4 * Copyright (C) 2000-2004 Russell King
10 * DMA uncached mapping support.
21 #include <linux/dma-mapping.h>
22 #include <linux/dma-contiguous.h>
37 #include <asm/dma-iommu.h>
40 #include <asm/dma-contiguous.h>
42 #include "dma.h"
64 #define COHERENT 1 macro
88 if (buf->virt == virt) { in arm_dma_buffer_find()
89 list_del(&buf->list); in arm_dma_buffer_find()
99 * The DMA API is built upon the notion of "buffer ownership". A buffer
101 * by it) or exclusively owned by the DMA device. These helper functions
116 * arm_dma_map_page - map a portion of a page for streaming DMA
117 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
121 * @dir: DMA transfer direction
146 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
147 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
148 * @handle: DMA address of buffer
150 * @dir: DMA transfer direction (same as passed to dma_map_page)
152 * Unmap a page streaming mode DMA translation. The handle and size
170 unsigned int offset = handle & (PAGE_SIZE - 1); in arm_dma_sync_single_for_cpu()
171 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); in arm_dma_sync_single_for_cpu()
178 unsigned int offset = handle & (PAGE_SIZE - 1); in arm_dma_sync_single_for_device()
179 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); in arm_dma_sync_single_for_device()
233 * indicate that DMA to this device is not supported. in __dma_supported()
237 dma_to_pfn(dev, ~0) < max_pfn - 1) { in __dma_supported()
239 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", in __dma_supported()
249 * Translate the device's DMA mask to a PFN limit. This in __dma_supported()
250 * PFN number includes the page which we can DMA to. in __dma_supported()
254 …dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory tha… in __dma_supported()
269 mask = dev->coherent_dma_mask; in get_coherent_dma_mask()
272 * Sanity check the DMA mask - it must be non-zero, and in get_coherent_dma_mask()
273 * must be able to be satisfied by a DMA allocation. in get_coherent_dma_mask()
276 dev_warn(dev, "coherent DMA mask is unset\n"); in get_coherent_dma_mask()
291 * lurking in the kernel direct-mapped region is invalidated. in __dma_clear_buffer()
299 if (coherent_flag != COHERENT) in __dma_clear_buffer()
303 size -= PAGE_SIZE; in __dma_clear_buffer()
305 if (coherent_flag != COHERENT) in __dma_clear_buffer()
310 if (coherent_flag != COHERENT) { in __dma_clear_buffer()
318 * Allocate a DMA buffer for 'dev' of size 'size' using the
344 * Free a DMA buffer. 'size' must be page aligned.
370 * DMA allocation can be mapped to user space, so lets in __dma_alloc_remap()
397 * Initialise the coherent pool for atomic allocations.
406 atomic_pool = gen_pool_create(PAGE_SHIFT, -1); in atomic_pool_init()
410 * The atomic pool is only used for non-coherent allocations in atomic_pool_init()
425 atomic_pool_size, -1); in atomic_pool_init()
432 pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n", in atomic_pool_init()
441 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", in atomic_pool_init()
443 return -ENOMEM; in atomic_pool_init()
482 map.length = end - start; in dma_contiguous_remap()
486 * Clear previous low-memory mapping to ensure that the in dma_contiguous_remap()
532 * non-coherent in __alloc_remap_buffer()
557 WARN(1, "coherent pool not initialised!\n"); in __alloc_from_pool()
646 /* __alloc_simple_buffer is only called when the device is coherent */ in __alloc_simple_buffer()
647 page = __dma_alloc_buffer(dev, size, gfp, COHERENT); in __alloc_simple_buffer()
658 return __alloc_simple_buffer(args->dev, args->size, args->gfp, in simple_allocator_alloc()
664 __dma_free_buffer(args->page, args->size); in simple_allocator_free()
675 return __alloc_from_contiguous(args->dev, args->size, args->prot, in cma_allocator_alloc()
676 ret_page, args->caller, in cma_allocator_alloc()
677 args->want_vaddr, args->coherent_flag, in cma_allocator_alloc()
678 args->gfp); in cma_allocator_alloc()
683 __free_from_contiguous(args->dev, args->page, args->cpu_addr, in cma_allocator_free()
684 args->size, args->want_vaddr); in cma_allocator_free()
695 return __alloc_from_pool(args->size, ret_page); in pool_allocator_alloc()
700 __free_from_pool(args->cpu_addr, args->size); in pool_allocator_free()
711 return __alloc_remap_buffer(args->dev, args->size, args->gfp, in remap_allocator_alloc()
712 args->prot, ret_page, args->caller, in remap_allocator_alloc()
713 args->want_vaddr); in remap_allocator_alloc()
718 if (args->want_vaddr) in remap_allocator_free()
719 __dma_free_remap(args->cpu_addr, args->size); in remap_allocator_free()
721 __dma_free_buffer(args->page, args->size); in remap_allocator_free()
745 .coherent_flag = is_coherent ? COHERENT : NORMAL, in __dma_alloc()
751 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", in __dma_alloc()
769 * Following is a work-around (a.k.a. hack) to prevent pages in __dma_alloc()
783 buf->allocator = &cma_allocator; in __dma_alloc()
785 buf->allocator = &simple_allocator; in __dma_alloc()
787 buf->allocator = &remap_allocator; in __dma_alloc()
789 buf->allocator = &pool_allocator; in __dma_alloc()
791 addr = buf->allocator->alloc(&args, &page); in __dma_alloc()
797 buf->virt = args.want_vaddr ? addr : page; in __dma_alloc()
800 list_add(&buf->list, &arm_dma_bufs); in __dma_alloc()
810 * Allocate DMA-coherent memory space and return both the kernel remapped
833 int ret = -ENXIO; in __arm_dma_mmap()
837 unsigned long off = vma->vm_pgoff; in __arm_dma_mmap()
842 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { in __arm_dma_mmap()
843 ret = remap_pfn_range(vma, vma->vm_start, in __arm_dma_mmap()
845 vma->vm_end - vma->vm_start, in __arm_dma_mmap()
846 vma->vm_page_prot); in __arm_dma_mmap()
853 * Create userspace mapping for the DMA-coherent memory.
866 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); in arm_dma_mmap()
891 buf->allocator->free(&args); in __arm_dma_free()
908 * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
910 * coherent DMA APIs through the dma_buf API, which only accepts a
912 * 1. Not all memory allocated via the coherent DMA APIs is backed by
914 * 2. Passing coherent DMA memory into the streaming APIs is not allowed
928 return -ENXIO; in arm_dma_get_sgtable()
936 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in arm_dma_get_sgtable()
964 len = PAGE_SIZE - offset; in dma_cache_maint_page()
983 left -= len; in dma_cache_maint_page()
991 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
1006 /* FIXME: non-speculating: flush on bidirectional mappings? */ in __dma_page_cpu_to_dev()
1014 /* FIXME: non-speculating: not required */ in __dma_page_dev_to_cpu()
1015 /* in any case, don't bother invalidating if DMA to device */ in __dma_page_dev_to_cpu()
1023 * Mark the D-cache clean for these pages to avoid extra flushing. in __dma_page_dev_to_cpu()
1033 left -= PAGE_SIZE - off; in __dma_page_dev_to_cpu()
1037 set_bit(PG_dcache_clean, &page->flags); in __dma_page_dev_to_cpu()
1038 left -= PAGE_SIZE; in __dma_page_dev_to_cpu()
1044 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
1045 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1048 * @dir: DMA transfer direction
1050 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1051 * This is the scatter-gather version of the dma_map_single interface.
1053 * appropriate dma address and length. They are obtained via
1068 s->dma_length = s->length; in arm_dma_map_sg()
1070 s->dma_address = ops->map_page(dev, sg_page(s), s->offset, in arm_dma_map_sg()
1071 s->length, dir, attrs); in arm_dma_map_sg()
1072 if (dma_mapping_error(dev, s->dma_address)) in arm_dma_map_sg()
1079 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); in arm_dma_map_sg()
1084 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1085 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1088 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1090 * Unmap a set of streaming mode DMA translations. Again, CPU access
1102 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); in arm_dma_unmap_sg()
1107 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1110 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1120 ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, in arm_dma_sync_sg_for_cpu()
1126 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
1129 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1139 ops->sync_single_for_device(dev, sg_dma_address(s), s->length, in arm_dma_sync_sg_for_device()
1144 * Return whether the given device DMA address mask can be supported
1145 * properly. For example, if your device can only drive the low 24-bits
1154 static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) in arm_get_dma_map_ops() argument
1156 return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; in arm_get_dma_map_ops()
1190 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __alloc_iova()
1199 align = (1 << order) - 1; in __alloc_iova()
1201 spin_lock_irqsave(&mapping->lock, flags); in __alloc_iova()
1202 for (i = 0; i < mapping->nr_bitmaps; i++) { in __alloc_iova()
1203 start = bitmap_find_next_zero_area(mapping->bitmaps[i], in __alloc_iova()
1204 mapping->bits, 0, count, align); in __alloc_iova()
1206 if (start > mapping->bits) in __alloc_iova()
1209 bitmap_set(mapping->bitmaps[i], start, count); in __alloc_iova()
1218 if (i == mapping->nr_bitmaps) { in __alloc_iova()
1220 spin_unlock_irqrestore(&mapping->lock, flags); in __alloc_iova()
1224 start = bitmap_find_next_zero_area(mapping->bitmaps[i], in __alloc_iova()
1225 mapping->bits, 0, count, align); in __alloc_iova()
1227 if (start > mapping->bits) { in __alloc_iova()
1228 spin_unlock_irqrestore(&mapping->lock, flags); in __alloc_iova()
1232 bitmap_set(mapping->bitmaps[i], start, count); in __alloc_iova()
1234 spin_unlock_irqrestore(&mapping->lock, flags); in __alloc_iova()
1236 iova = mapping->base + (mapping_size * i); in __alloc_iova()
1246 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __free_iova()
1254 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; in __free_iova()
1255 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); in __free_iova()
1257 bitmap_base = mapping->base + mapping_size * bitmap_index; in __free_iova()
1259 start = (addr - bitmap_base) >> PAGE_SHIFT; in __free_iova()
1272 spin_lock_irqsave(&mapping->lock, flags); in __free_iova()
1273 bitmap_clear(mapping->bitmaps[bitmap_index], start, count); in __free_iova()
1274 spin_unlock_irqrestore(&mapping->lock, flags); in __free_iova()
1317 order_idx = ARRAY_SIZE(iommu_order_array) - 1; in __iommu_alloc_buffer()
1336 /* See if it's easy to allocate a high-order chunk */ in __iommu_alloc_buffer()
1353 while (--j) in __iommu_alloc_buffer()
1359 count -= 1 << order; in __iommu_alloc_buffer()
1364 while (i--) in __iommu_alloc_buffer()
1428 len = (j - i) << PAGE_SHIFT; in __iommu_create_mapping()
1429 ret = iommu_map(mapping->domain, iova, phys, len, in __iommu_create_mapping()
1438 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); in __iommu_create_mapping()
1448 * add optional in-page offset from iova to size and align in __iommu_remove_mapping()
1454 iommu_unmap(mapping->domain, iova, size); in __iommu_remove_mapping()
1481 if (area && (area->flags & VM_ARM_DMA_CONSISTENT)) in __iommu_get_pages()
1482 return area->pages; in __iommu_get_pages()
1493 if (coherent_flag == COHERENT) in __iommu_alloc_simple()
1515 if (coherent_flag == COHERENT) in __iommu_free_atomic()
1532 if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) in __arm_iommu_alloc_attrs()
1537 * Following is a work-around (a.k.a. hack) to prevent pages in __arm_iommu_alloc_attrs()
1579 return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT); in arm_coherent_iommu_alloc_attrs()
1586 unsigned long uaddr = vma->vm_start; in __arm_iommu_mmap_attrs()
1587 unsigned long usize = vma->vm_end - vma->vm_start; in __arm_iommu_mmap_attrs()
1590 unsigned long off = vma->vm_pgoff; in __arm_iommu_mmap_attrs()
1593 return -ENXIO; in __arm_iommu_mmap_attrs()
1595 if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off) in __arm_iommu_mmap_attrs()
1596 return -ENXIO; in __arm_iommu_mmap_attrs()
1607 usize -= PAGE_SIZE; in __arm_iommu_mmap_attrs()
1616 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); in arm_iommu_mmap_attrs()
1638 if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) { in __arm_iommu_free_attrs()
1645 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); in __arm_iommu_free_attrs()
1667 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT); in arm_coherent_iommu_free_attrs()
1678 return -ENXIO; in arm_iommu_get_sgtable()
1685 * Map a part of the scatter-gather list into contiguous io address space
1704 return -ENOMEM; in __map_sg_chunk()
1708 unsigned int len = PAGE_ALIGN(s->offset + s->length); in __map_sg_chunk()
1711 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); in __map_sg_chunk()
1715 ret = iommu_map(mapping->domain, iova, phys, len, prot); in __map_sg_chunk()
1725 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); in __map_sg_chunk()
1734 struct scatterlist *s = sg, *dma = sg, *start = sg; in __iommu_map_sg() local
1736 unsigned int offset = s->offset; in __iommu_map_sg()
1737 unsigned int size = s->offset + s->length; in __iommu_map_sg()
1743 s->dma_address = ARM_MAPPING_ERROR; in __iommu_map_sg()
1744 s->dma_length = 0; in __iommu_map_sg()
1746 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { in __iommu_map_sg()
1747 if (__map_sg_chunk(dev, start, size, &dma->dma_address, in __iommu_map_sg()
1751 dma->dma_address += offset; in __iommu_map_sg()
1752 dma->dma_length = size - offset; in __iommu_map_sg()
1754 size = offset = s->offset; in __iommu_map_sg()
1756 dma = sg_next(dma); in __iommu_map_sg()
1759 size += s->length; in __iommu_map_sg()
1761 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, in __iommu_map_sg()
1765 dma->dma_address += offset; in __iommu_map_sg()
1766 dma->dma_length = size - offset; in __iommu_map_sg()
1777 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1781 * @dir: DMA transfer direction
1783 * Map a set of i/o coherent buffers described by scatterlist in streaming
1784 * mode for DMA. The scatter gather list elements are merged together (if
1785 * possible) and tagged with the appropriate dma address and length. They are
1795 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1799 * @dir: DMA transfer direction
1801 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1803 * tagged with the appropriate dma address and length. They are obtained via
1824 __dma_page_dev_to_cpu(sg_page(s), s->offset, in __iommu_unmap_sg()
1825 s->length, dir); in __iommu_unmap_sg()
1830 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1834 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1836 * Unmap a set of streaming mode DMA translations. Again, CPU access
1847 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1851 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1853 * Unmap a set of streaming mode DMA translations. Again, CPU access
1868 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1877 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); in arm_iommu_sync_sg_for_cpu()
1886 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1895 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); in arm_iommu_sync_sg_for_device()
1905 * @dir: DMA transfer direction
1907 * Coherent IOMMU aware version of arm_dma_map_page()
1923 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); in arm_coherent_iommu_map_page()
1939 * @dir: DMA transfer direction
1956 * @handle: DMA address of buffer
1958 * @dir: DMA transfer direction (same as passed to dma_map_page)
1960 * Coherent IOMMU aware version of arm_dma_unmap_page()
1973 iommu_unmap(mapping->domain, iova, len); in arm_coherent_iommu_unmap_page()
1980 * @handle: DMA address of buffer
1982 * @dir: DMA transfer direction (same as passed to dma_map_page)
1991 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_unmap_page()
2001 iommu_unmap(mapping->domain, iova, len); in arm_iommu_unmap_page()
2006 * arm_iommu_map_resource - map a device resource for DMA
2010 * @dir: DMA transfer direction
2029 ret = iommu_map(mapping->domain, dma_addr, addr, len, prot); in arm_iommu_map_resource()
2040 * arm_iommu_unmap_resource - unmap a device DMA resource
2042 * @dma_handle: DMA address to resource
2044 * @dir: DMA transfer direction
2058 iommu_unmap(mapping->domain, iova, len); in arm_iommu_unmap_resource()
2067 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_sync_single_for_cpu()
2081 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_sync_single_for_device()
2152 int err = -ENOMEM; in arm_iommu_create_mapping()
2154 /* currently only 32-bit DMA address space is supported */ in arm_iommu_create_mapping()
2156 return ERR_PTR(-ERANGE); in arm_iommu_create_mapping()
2159 return ERR_PTR(-EINVAL); in arm_iommu_create_mapping()
2170 mapping->bitmap_size = bitmap_size; in arm_iommu_create_mapping()
2171 mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *), in arm_iommu_create_mapping()
2173 if (!mapping->bitmaps) in arm_iommu_create_mapping()
2176 mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); in arm_iommu_create_mapping()
2177 if (!mapping->bitmaps[0]) in arm_iommu_create_mapping()
2180 mapping->nr_bitmaps = 1; in arm_iommu_create_mapping()
2181 mapping->extensions = extensions; in arm_iommu_create_mapping()
2182 mapping->base = base; in arm_iommu_create_mapping()
2183 mapping->bits = BITS_PER_BYTE * bitmap_size; in arm_iommu_create_mapping()
2185 spin_lock_init(&mapping->lock); in arm_iommu_create_mapping()
2187 mapping->domain = iommu_domain_alloc(bus); in arm_iommu_create_mapping()
2188 if (!mapping->domain) in arm_iommu_create_mapping()
2191 kref_init(&mapping->kref); in arm_iommu_create_mapping()
2194 kfree(mapping->bitmaps[0]); in arm_iommu_create_mapping()
2196 kfree(mapping->bitmaps); in arm_iommu_create_mapping()
2210 iommu_domain_free(mapping->domain); in release_iommu_mapping()
2211 for (i = 0; i < mapping->nr_bitmaps; i++) in release_iommu_mapping()
2212 kfree(mapping->bitmaps[i]); in release_iommu_mapping()
2213 kfree(mapping->bitmaps); in release_iommu_mapping()
2221 if (mapping->nr_bitmaps >= mapping->extensions) in extend_iommu_mapping()
2222 return -EINVAL; in extend_iommu_mapping()
2224 next_bitmap = mapping->nr_bitmaps; in extend_iommu_mapping()
2225 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, in extend_iommu_mapping()
2227 if (!mapping->bitmaps[next_bitmap]) in extend_iommu_mapping()
2228 return -ENOMEM; in extend_iommu_mapping()
2230 mapping->nr_bitmaps++; in extend_iommu_mapping()
2238 kref_put(&mapping->kref, release_iommu_mapping); in arm_iommu_release_mapping()
2247 err = iommu_attach_device(mapping->domain, dev); in __arm_iommu_attach_device()
2251 kref_get(&mapping->kref); in __arm_iommu_attach_device()
2265 * This replaces the dma operations (dma_map_ops pointer) with the
2290 * This voids the dma operations (dma_map_ops pointer)
2302 iommu_detach_device(mapping->domain, dev); in arm_iommu_detach_device()
2303 kref_put(&mapping->kref, release_iommu_mapping); in arm_iommu_detach_device()
2305 set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent)); in arm_iommu_detach_device()
2311 static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) in arm_get_iommu_dma_map_ops() argument
2313 return coherent ? &iommu_coherent_ops : &iommu_ops; in arm_get_iommu_dma_map_ops()
2324 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); in arm_setup_iommu_dma_ops()
2326 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", in arm_setup_iommu_dma_ops()
2367 const struct iommu_ops *iommu, bool coherent) in arch_setup_dma_ops() argument
2371 dev->archdata.dma_coherent = coherent; in arch_setup_dma_ops()
2378 if (dev->dma_ops) in arch_setup_dma_ops()
2382 dma_ops = arm_get_iommu_dma_map_ops(coherent); in arch_setup_dma_ops()
2384 dma_ops = arm_get_dma_map_ops(coherent); in arch_setup_dma_ops()
2390 dev->archdata.dev_dma_ops = dev->dma_ops; in arch_setup_dma_ops()
2391 dev->dma_ops = xen_dma_ops; in arch_setup_dma_ops()
2394 dev->archdata.dma_ops_setup = true; in arch_setup_dma_ops()
2399 if (!dev->archdata.dma_ops_setup) in arch_teardown_dma_ops()
2403 /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ in arch_teardown_dma_ops()