Lines Matching +full:dma +full:- +full:coherent
2 * SWIOTLB-based DMA API implementation
27 #include <linux/dma-direct.h>
28 #include <linux/dma-contiguous.h>
38 bool coherent) in __get_dma_pgprot() argument
40 if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE)) in __get_dma_pgprot()
63 WARN(1, "coherent pool not initialised!\n"); in __alloc_from_pool()
100 bool coherent = is_device_dma_coherent(dev); in __dma_alloc() local
105 if (!coherent && !gfpflags_allow_blocking(flags)) { in __dma_alloc()
119 /* no need for non-cacheable mapping if coherent */ in __dma_alloc()
120 if (coherent) in __dma_alloc()
126 /* create a coherent mapping */ in __dma_alloc()
194 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), in __swiotlb_map_sg_attrs()
195 sg->length, dir); in __swiotlb_map_sg_attrs()
211 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), in __swiotlb_unmap_sg_attrs()
212 sg->length, dir); in __swiotlb_unmap_sg_attrs()
243 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), in __swiotlb_sync_sg_for_cpu()
244 sg->length, dir); in __swiotlb_sync_sg_for_cpu()
258 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), in __swiotlb_sync_sg_for_device()
259 sg->length, dir); in __swiotlb_sync_sg_for_device()
265 int ret = -ENXIO; in __swiotlb_mmap_pfn()
268 unsigned long off = vma->vm_pgoff; in __swiotlb_mmap_pfn()
270 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { in __swiotlb_mmap_pfn()
271 ret = remap_pfn_range(vma, vma->vm_start, in __swiotlb_mmap_pfn()
273 vma->vm_end - vma->vm_start, in __swiotlb_mmap_pfn()
274 vma->vm_page_prot); in __swiotlb_mmap_pfn()
288 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, in __swiotlb_mmap()
303 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in __swiotlb_get_sgtable_page()
369 atomic_pool = gen_pool_create(PAGE_SHIFT, -1); in atomic_pool_init()
381 atomic_pool_size, -1); in atomic_pool_init()
389 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n", in atomic_pool_init()
404 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", in atomic_pool_init()
406 return -ENOMEM; in atomic_pool_init()
410 * The following APIs are for dummy DMA ops *
431 return -ENXIO; in __dummy_mmap()
517 #include <linux/dma-iommu.h>
531 bool coherent = is_device_dma_coherent(dev); in __iommu_alloc_attrs() local
532 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); in __iommu_alloc_attrs()
554 if (coherent) { in __iommu_alloc_attrs()
565 if (coherent) in __iommu_alloc_attrs()
572 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent); in __iommu_alloc_attrs()
590 if (!coherent) in __iommu_alloc_attrs()
599 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent); in __iommu_alloc_attrs()
623 * - A remapped array of pages for contiguous allocations. in __iommu_free_attrs()
624 * - A remapped array of pages from iommu_dma_alloc(), for all in __iommu_free_attrs()
625 * non-atomic allocations. in __iommu_free_attrs()
626 * - A non-cacheable alias from the atomic pool, for atomic in __iommu_free_attrs()
627 * allocations by non-coherent devices. in __iommu_free_attrs()
628 * - A normal lowmem address, for atomic allocations by in __iommu_free_attrs()
629 * coherent devices. in __iommu_free_attrs()
644 if (WARN_ON(!area || !area->pages)) in __iommu_free_attrs()
646 iommu_dma_free(dev, area->pages, iosize, &handle); in __iommu_free_attrs()
661 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, in __iommu_mmap_attrs()
682 if (WARN_ON(!area || !area->pages)) in __iommu_mmap_attrs()
683 return -ENXIO; in __iommu_mmap_attrs()
685 return iommu_dma_mmap(area->pages, size, vma); in __iommu_mmap_attrs()
709 if (WARN_ON(!area || !area->pages)) in __iommu_get_sgtable()
710 return -ENXIO; in __iommu_get_sgtable()
712 return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size, in __iommu_get_sgtable()
747 bool coherent = is_device_dma_coherent(dev); in __iommu_map_page() local
748 int prot = dma_info_to_prot(dir, coherent, attrs); in __iommu_map_page()
779 __dma_unmap_area(sg_virt(sg), sg->length, dir); in __iommu_sync_sg_for_cpu()
793 __dma_map_area(sg_virt(sg), sg->length, dir); in __iommu_sync_sg_for_device()
800 bool coherent = is_device_dma_coherent(dev); in __iommu_map_sg_attrs() local
806 dma_info_to_prot(dir, coherent, attrs)); in __iommu_map_sg_attrs()
853 * The IOMMU core code allocates the default DMA domain, which the in __iommu_setup_dma_ops()
854 * underlying IOMMU driver needs to support via the dma-iommu layer. in __iommu_setup_dma_ops()
861 if (domain->type == IOMMU_DOMAIN_DMA) { in __iommu_setup_dma_ops()
865 dev->dma_ops = &iommu_dma_ops; in __iommu_setup_dma_ops()
871 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", in __iommu_setup_dma_ops()
877 dev->dma_ops = NULL; in arch_teardown_dma_ops()
889 const struct iommu_ops *iommu, bool coherent) in arch_setup_dma_ops() argument
891 if (!dev->dma_ops) in arch_setup_dma_ops()
892 dev->dma_ops = &arm64_swiotlb_dma_ops; in arch_setup_dma_ops()
894 dev->archdata.dma_coherent = coherent; in arch_setup_dma_ops()
899 dev->archdata.dev_dma_ops = dev->dma_ops; in arch_setup_dma_ops()
900 dev->dma_ops = xen_dma_ops; in arch_setup_dma_ops()