Lines Matching refs:dev
26 static inline dma_addr_t phys_to_dma_direct(struct device *dev, in phys_to_dma_direct() argument
29 if (force_dma_unencrypted(dev)) in phys_to_dma_direct()
30 return phys_to_dma_unencrypted(dev, phys); in phys_to_dma_direct()
31 return phys_to_dma(dev, phys); in phys_to_dma_direct()
34 static inline struct page *dma_direct_to_page(struct device *dev, in dma_direct_to_page() argument
37 return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr))); in dma_direct_to_page()
40 u64 dma_direct_get_required_mask(struct device *dev) in dma_direct_get_required_mask() argument
43 u64 max_dma = phys_to_dma_direct(dev, phys); in dma_direct_get_required_mask()
48 static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 *phys_limit) in dma_direct_optimal_gfp_mask() argument
51 dev->coherent_dma_mask, in dma_direct_optimal_gfp_mask()
52 dev->bus_dma_limit); in dma_direct_optimal_gfp_mask()
62 *phys_limit = dma_to_phys(dev, dma_limit); in dma_direct_optimal_gfp_mask()
71 bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) in dma_coherent_ok() argument
73 dma_addr_t dma_addr = phys_to_dma_direct(dev, phys); in dma_coherent_ok()
78 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); in dma_coherent_ok()
81 static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size) in dma_set_decrypted() argument
83 if (!force_dma_unencrypted(dev)) in dma_set_decrypted()
88 static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size) in dma_set_encrypted() argument
92 if (!force_dma_unencrypted(dev)) in dma_set_encrypted()
100 static void __dma_direct_free_pages(struct device *dev, struct page *page, in __dma_direct_free_pages() argument
103 if (swiotlb_free(dev, page, size)) in __dma_direct_free_pages()
105 dma_free_contiguous(dev, page, size); in __dma_direct_free_pages()
108 static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size) in dma_direct_alloc_swiotlb() argument
110 struct page *page = swiotlb_alloc(dev, size); in dma_direct_alloc_swiotlb()
112 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in dma_direct_alloc_swiotlb()
113 swiotlb_free(dev, page, size); in dma_direct_alloc_swiotlb()
120 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, in __dma_direct_alloc_pages() argument
123 int node = dev_to_node(dev); in __dma_direct_alloc_pages()
129 if (is_swiotlb_for_alloc(dev)) in __dma_direct_alloc_pages()
130 return dma_direct_alloc_swiotlb(dev, size); in __dma_direct_alloc_pages()
132 gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit); in __dma_direct_alloc_pages()
133 page = dma_alloc_contiguous(dev, size, gfp); in __dma_direct_alloc_pages()
135 if (!dma_coherent_ok(dev, page_to_phys(page), size) || in __dma_direct_alloc_pages()
137 dma_free_contiguous(dev, page, size); in __dma_direct_alloc_pages()
144 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in __dma_direct_alloc_pages()
169 static bool dma_direct_use_pool(struct device *dev, gfp_t gfp) in dma_direct_use_pool() argument
171 return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev); in dma_direct_use_pool()
174 static void *dma_direct_alloc_from_pool(struct device *dev, size_t size, in dma_direct_alloc_from_pool() argument
184 gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit); in dma_direct_alloc_from_pool()
185 page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok); in dma_direct_alloc_from_pool()
188 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc_from_pool()
192 static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size, in dma_direct_alloc_no_mapping() argument
197 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true); in dma_direct_alloc_no_mapping()
206 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc_no_mapping()
210 void *dma_direct_alloc(struct device *dev, size_t size, in dma_direct_alloc() argument
222 !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) in dma_direct_alloc()
223 return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp); in dma_direct_alloc()
225 if (!dev_is_dma_coherent(dev)) { in dma_direct_alloc()
227 !is_swiotlb_for_alloc(dev)) in dma_direct_alloc()
228 return arch_dma_alloc(dev, size, dma_handle, gfp, in dma_direct_alloc()
236 return dma_alloc_from_global_coherent(dev, size, in dma_direct_alloc()
256 if ((remap || force_dma_unencrypted(dev)) && in dma_direct_alloc()
257 dma_direct_use_pool(dev, gfp)) in dma_direct_alloc()
258 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); in dma_direct_alloc()
261 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true); in dma_direct_alloc()
276 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); in dma_direct_alloc()
278 if (force_dma_unencrypted(dev)) in dma_direct_alloc()
291 if (dma_set_decrypted(dev, ret, size)) in dma_direct_alloc()
304 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc()
308 if (dma_set_encrypted(dev, page_address(page), size)) in dma_direct_alloc()
311 __dma_direct_free_pages(dev, page, size); in dma_direct_alloc()
318 void dma_direct_free(struct device *dev, size_t size, in dma_direct_free() argument
324 !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) { in dma_direct_free()
326 dma_free_contiguous(dev, cpu_addr, size); in dma_direct_free()
331 !dev_is_dma_coherent(dev) && in dma_direct_free()
332 !is_swiotlb_for_alloc(dev)) { in dma_direct_free()
333 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); in dma_direct_free()
338 !dev_is_dma_coherent(dev)) { in dma_direct_free()
346 dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) in dma_direct_free()
354 if (dma_set_encrypted(dev, cpu_addr, size)) in dma_direct_free()
358 __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size); in dma_direct_free()
362 struct page *dma_direct_alloc_pages(struct device *dev, size_t size, in dma_direct_alloc_pages() argument
368 if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp)) in dma_direct_alloc_pages()
369 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); in dma_direct_alloc_pages()
371 page = __dma_direct_alloc_pages(dev, size, gfp, false); in dma_direct_alloc_pages()
376 if (dma_set_decrypted(dev, ret, size)) in dma_direct_alloc_pages()
379 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc_pages()
385 void dma_direct_free_pages(struct device *dev, size_t size, in dma_direct_free_pages() argument
393 dma_free_from_pool(dev, vaddr, size)) in dma_direct_free_pages()
396 if (dma_set_encrypted(dev, vaddr, size)) in dma_direct_free_pages()
398 __dma_direct_free_pages(dev, page, size); in dma_direct_free_pages()
403 void dma_direct_sync_sg_for_device(struct device *dev, in dma_direct_sync_sg_for_device() argument
410 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); in dma_direct_sync_sg_for_device()
412 swiotlb_sync_single_for_device(dev, paddr, sg->length, dir); in dma_direct_sync_sg_for_device()
414 if (!dev_is_dma_coherent(dev)) in dma_direct_sync_sg_for_device()
424 void dma_direct_sync_sg_for_cpu(struct device *dev, in dma_direct_sync_sg_for_cpu() argument
431 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); in dma_direct_sync_sg_for_cpu()
433 if (!dev_is_dma_coherent(dev)) in dma_direct_sync_sg_for_cpu()
436 swiotlb_sync_single_for_cpu(dev, paddr, sg->length, dir); in dma_direct_sync_sg_for_cpu()
442 if (!dev_is_dma_coherent(dev)) in dma_direct_sync_sg_for_cpu()
450 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, in dma_direct_unmap_sg() argument
460 dma_direct_unmap_page(dev, sg->dma_address, in dma_direct_unmap_sg()
466 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, in dma_direct_map_sg() argument
474 switch (pci_p2pdma_state(&p2pdma_state, dev, sg_page(sg))) { in dma_direct_map_sg()
483 sg->dma_address = dma_direct_map_page(dev, sg_page(sg), in dma_direct_map_sg()
505 dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); in dma_direct_map_sg()
509 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, in dma_direct_map_resource() argument
514 if (unlikely(!dma_capable(dev, dma_addr, size, false))) { in dma_direct_map_resource()
515 dev_err_once(dev, in dma_direct_map_resource()
517 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); in dma_direct_map_resource()
525 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, in dma_direct_get_sgtable() argument
529 struct page *page = dma_direct_to_page(dev, dma_addr); in dma_direct_get_sgtable()
538 bool dma_direct_can_mmap(struct device *dev) in dma_direct_can_mmap() argument
540 return dev_is_dma_coherent(dev) || in dma_direct_can_mmap()
544 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, in dma_direct_mmap() argument
550 unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr)); in dma_direct_mmap()
553 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); in dma_direct_mmap()
554 if (force_dma_unencrypted(dev)) in dma_direct_mmap()
557 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) in dma_direct_mmap()
568 int dma_direct_supported(struct device *dev, u64 mask) in dma_direct_supported() argument
588 return mask >= phys_to_dma_unencrypted(dev, min_mask); in dma_direct_supported()
591 static const struct bus_dma_region *dma_find_range(struct device *dev, in dma_find_range() argument
596 for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) { in dma_find_range()
616 struct device *dev = data; in check_ram_in_range_map() local
621 bdr = dma_find_range(dev, start_pfn); in check_ram_in_range_map()
631 bool dma_direct_all_ram_mapped(struct device *dev) in dma_direct_all_ram_mapped() argument
633 if (!dev->dma_range_map) in dma_direct_all_ram_mapped()
635 return !walk_system_ram_range(0, PFN_DOWN(ULONG_MAX) + 1, dev, in dma_direct_all_ram_mapped()
639 size_t dma_direct_max_mapping_size(struct device *dev) in dma_direct_max_mapping_size() argument
642 if (is_swiotlb_active(dev) && in dma_direct_max_mapping_size()
643 (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev))) in dma_direct_max_mapping_size()
644 return swiotlb_max_mapping_size(dev); in dma_direct_max_mapping_size()
648 bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr) in dma_direct_need_sync() argument
650 return !dev_is_dma_coherent(dev) || in dma_direct_need_sync()
651 swiotlb_find_pool(dev, dma_to_phys(dev, dma_addr)); in dma_direct_need_sync()
670 int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, in dma_direct_set_offset() argument
676 if (dev->dma_range_map) { in dma_direct_set_offset()
677 dev_err(dev, "attempt to add DMA range to existing map\n"); in dma_direct_set_offset()
690 dev->dma_range_map = map; in dma_direct_set_offset()