• Home
  • Raw
  • Download

Lines Matching refs:dev

25 static inline dma_addr_t phys_to_dma_direct(struct device *dev,  in phys_to_dma_direct()  argument
28 if (force_dma_unencrypted(dev)) in phys_to_dma_direct()
29 return phys_to_dma_unencrypted(dev, phys); in phys_to_dma_direct()
30 return phys_to_dma(dev, phys); in phys_to_dma_direct()
33 static inline struct page *dma_direct_to_page(struct device *dev, in dma_direct_to_page() argument
36 return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr))); in dma_direct_to_page()
39 u64 dma_direct_get_required_mask(struct device *dev) in dma_direct_get_required_mask() argument
42 u64 max_dma = phys_to_dma_direct(dev, phys); in dma_direct_get_required_mask()
47 static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, in dma_direct_optimal_gfp_mask() argument
50 u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit); in dma_direct_optimal_gfp_mask()
60 *phys_limit = dma_to_phys(dev, dma_limit); in dma_direct_optimal_gfp_mask()
69 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) in dma_coherent_ok() argument
71 dma_addr_t dma_addr = phys_to_dma_direct(dev, phys); in dma_coherent_ok()
76 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); in dma_coherent_ok()
79 static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size) in dma_set_decrypted() argument
81 if (!force_dma_unencrypted(dev)) in dma_set_decrypted()
86 static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size) in dma_set_encrypted() argument
90 if (!force_dma_unencrypted(dev)) in dma_set_encrypted()
98 static void __dma_direct_free_pages(struct device *dev, struct page *page, in __dma_direct_free_pages() argument
102 swiotlb_free(dev, page, size)) in __dma_direct_free_pages()
104 dma_free_contiguous(dev, page, size); in __dma_direct_free_pages()
107 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, in __dma_direct_alloc_pages() argument
110 int node = dev_to_node(dev); in __dma_direct_alloc_pages()
116 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, in __dma_direct_alloc_pages()
119 is_swiotlb_for_alloc(dev)) { in __dma_direct_alloc_pages()
120 page = swiotlb_alloc(dev, size); in __dma_direct_alloc_pages()
121 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in __dma_direct_alloc_pages()
122 __dma_direct_free_pages(dev, page, size); in __dma_direct_alloc_pages()
128 page = dma_alloc_contiguous(dev, size, gfp); in __dma_direct_alloc_pages()
130 if (!dma_coherent_ok(dev, page_to_phys(page), size) || in __dma_direct_alloc_pages()
132 dma_free_contiguous(dev, page, size); in __dma_direct_alloc_pages()
139 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in __dma_direct_alloc_pages()
140 dma_free_contiguous(dev, page, size); in __dma_direct_alloc_pages()
160 static void *dma_direct_alloc_from_pool(struct device *dev, size_t size, in dma_direct_alloc_from_pool() argument
167 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, in dma_direct_alloc_from_pool()
169 page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok); in dma_direct_alloc_from_pool()
172 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc_from_pool()
176 static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size, in dma_direct_alloc_no_mapping() argument
181 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true); in dma_direct_alloc_no_mapping()
190 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc_no_mapping()
194 void *dma_direct_alloc(struct device *dev, size_t size, in dma_direct_alloc() argument
205 !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) in dma_direct_alloc()
206 return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp); in dma_direct_alloc()
211 !dev_is_dma_coherent(dev) && in dma_direct_alloc()
212 !is_swiotlb_for_alloc(dev)) in dma_direct_alloc()
213 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); in dma_direct_alloc()
216 !dev_is_dma_coherent(dev)) in dma_direct_alloc()
217 return dma_alloc_from_global_coherent(dev, size, dma_handle); in dma_direct_alloc()
228 (force_dma_unencrypted(dev) || in dma_direct_alloc()
230 !dev_is_dma_coherent(dev))) && in dma_direct_alloc()
231 !is_swiotlb_for_alloc(dev)) in dma_direct_alloc()
232 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); in dma_direct_alloc()
235 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true); in dma_direct_alloc()
240 !dev_is_dma_coherent(dev)) || in dma_direct_alloc()
247 dma_pgprot(dev, PAGE_KERNEL, attrs), in dma_direct_alloc()
262 dev_info(dev, "Rejecting highmem page from CMA.\n"); in dma_direct_alloc()
267 if (dma_set_decrypted(dev, ret, size)) in dma_direct_alloc()
272 !dev_is_dma_coherent(dev)) { in dma_direct_alloc()
279 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc()
283 if (dma_set_encrypted(dev, page_address(page), size)) in dma_direct_alloc()
286 __dma_direct_free_pages(dev, page, size); in dma_direct_alloc()
290 void dma_direct_free(struct device *dev, size_t size, in dma_direct_free() argument
296 !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) { in dma_direct_free()
298 dma_free_contiguous(dev, cpu_addr, size); in dma_direct_free()
305 !dev_is_dma_coherent(dev) && in dma_direct_free()
306 !is_swiotlb_for_alloc(dev)) { in dma_direct_free()
307 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); in dma_direct_free()
312 !dev_is_dma_coherent(dev)) { in dma_direct_free()
320 dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) in dma_direct_free()
328 if (dma_set_encrypted(dev, cpu_addr, size)) in dma_direct_free()
332 __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size); in dma_direct_free()
335 struct page *dma_direct_alloc_pages(struct device *dev, size_t size, in dma_direct_alloc_pages() argument
342 force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) && in dma_direct_alloc_pages()
343 !is_swiotlb_for_alloc(dev)) in dma_direct_alloc_pages()
344 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); in dma_direct_alloc_pages()
346 page = __dma_direct_alloc_pages(dev, size, gfp, false); in dma_direct_alloc_pages()
351 if (dma_set_decrypted(dev, ret, size)) in dma_direct_alloc_pages()
354 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); in dma_direct_alloc_pages()
357 __dma_direct_free_pages(dev, page, size); in dma_direct_alloc_pages()
361 void dma_direct_free_pages(struct device *dev, size_t size, in dma_direct_free_pages() argument
369 dma_free_from_pool(dev, vaddr, size)) in dma_direct_free_pages()
372 if (dma_set_encrypted(dev, vaddr, size)) in dma_direct_free_pages()
374 __dma_direct_free_pages(dev, page, size); in dma_direct_free_pages()
379 void dma_direct_sync_sg_for_device(struct device *dev, in dma_direct_sync_sg_for_device() argument
386 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); in dma_direct_sync_sg_for_device()
388 if (unlikely(is_swiotlb_buffer(dev, paddr))) in dma_direct_sync_sg_for_device()
389 swiotlb_sync_single_for_device(dev, paddr, sg->length, in dma_direct_sync_sg_for_device()
392 if (!dev_is_dma_coherent(dev)) in dma_direct_sync_sg_for_device()
402 void dma_direct_sync_sg_for_cpu(struct device *dev, in dma_direct_sync_sg_for_cpu() argument
409 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); in dma_direct_sync_sg_for_cpu()
411 if (!dev_is_dma_coherent(dev)) in dma_direct_sync_sg_for_cpu()
414 if (unlikely(is_swiotlb_buffer(dev, paddr))) in dma_direct_sync_sg_for_cpu()
415 swiotlb_sync_single_for_cpu(dev, paddr, sg->length, in dma_direct_sync_sg_for_cpu()
422 if (!dev_is_dma_coherent(dev)) in dma_direct_sync_sg_for_cpu()
426 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, in dma_direct_unmap_sg() argument
433 dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir, in dma_direct_unmap_sg()
438 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, in dma_direct_map_sg() argument
445 sg->dma_address = dma_direct_map_page(dev, sg_page(sg), in dma_direct_map_sg()
455 dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); in dma_direct_map_sg()
459 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, in dma_direct_map_resource() argument
464 if (unlikely(!dma_capable(dev, dma_addr, size, false))) { in dma_direct_map_resource()
465 dev_err_once(dev, in dma_direct_map_resource()
467 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); in dma_direct_map_resource()
475 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, in dma_direct_get_sgtable() argument
479 struct page *page = dma_direct_to_page(dev, dma_addr); in dma_direct_get_sgtable()
488 bool dma_direct_can_mmap(struct device *dev) in dma_direct_can_mmap() argument
490 return dev_is_dma_coherent(dev) || in dma_direct_can_mmap()
494 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, in dma_direct_mmap() argument
500 unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr)); in dma_direct_mmap()
503 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); in dma_direct_mmap()
505 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) in dma_direct_mmap()
516 int dma_direct_supported(struct device *dev, u64 mask) in dma_direct_supported() argument
536 return mask >= phys_to_dma_unencrypted(dev, min_mask); in dma_direct_supported()
539 size_t dma_direct_max_mapping_size(struct device *dev) in dma_direct_max_mapping_size() argument
542 if (is_swiotlb_active(dev) && in dma_direct_max_mapping_size()
543 (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev))) in dma_direct_max_mapping_size()
544 return swiotlb_max_mapping_size(dev); in dma_direct_max_mapping_size()
548 bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr) in dma_direct_need_sync() argument
550 return !dev_is_dma_coherent(dev) || in dma_direct_need_sync()
551 is_swiotlb_buffer(dev, dma_to_phys(dev, dma_addr)); in dma_direct_need_sync()
570 int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, in dma_direct_set_offset() argument
576 if (dev->dma_range_map) { in dma_direct_set_offset()
577 dev_err(dev, "attempt to add DMA range to existing map\n"); in dma_direct_set_offset()
591 dev->dma_range_map = map; in dma_direct_set_offset()