• Home
  • Raw
  • Download

Lines Matching refs:size

71 bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)  in dma_coherent_ok()  argument
77 return dma_addr + size - 1 <= in dma_coherent_ok()
81 static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size) in dma_set_decrypted() argument
85 return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size)); in dma_set_decrypted()
88 static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size) in dma_set_encrypted() argument
94 ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size)); in dma_set_encrypted()
101 size_t size) in __dma_direct_free_pages() argument
103 if (swiotlb_free(dev, page, size)) in __dma_direct_free_pages()
105 dma_free_contiguous(dev, page, size); in __dma_direct_free_pages()
108 static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size) in dma_direct_alloc_swiotlb() argument
110 struct page *page = swiotlb_alloc(dev, size); in dma_direct_alloc_swiotlb()
112 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in dma_direct_alloc_swiotlb()
113 swiotlb_free(dev, page, size); in dma_direct_alloc_swiotlb()
120 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, in __dma_direct_alloc_pages() argument
127 WARN_ON_ONCE(!PAGE_ALIGNED(size)); in __dma_direct_alloc_pages()
130 return dma_direct_alloc_swiotlb(dev, size); in __dma_direct_alloc_pages()
133 page = dma_alloc_contiguous(dev, size, gfp); in __dma_direct_alloc_pages()
135 if (!dma_coherent_ok(dev, page_to_phys(page), size) || in __dma_direct_alloc_pages()
137 dma_free_contiguous(dev, page, size); in __dma_direct_alloc_pages()
143 page = alloc_pages_node(node, gfp, get_order(size)); in __dma_direct_alloc_pages()
144 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in __dma_direct_alloc_pages()
145 __free_pages(page, get_order(size)); in __dma_direct_alloc_pages()
174 static void *dma_direct_alloc_from_pool(struct device *dev, size_t size, in dma_direct_alloc_from_pool() argument
185 page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok); in dma_direct_alloc_from_pool()
192 static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size, in dma_direct_alloc_no_mapping() argument
197 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true); in dma_direct_alloc_no_mapping()
203 arch_dma_prep_coherent(page, size); in dma_direct_alloc_no_mapping()
210 void *dma_direct_alloc(struct device *dev, size_t size, in dma_direct_alloc() argument
217 size = PAGE_ALIGN(size); in dma_direct_alloc()
223 return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp); in dma_direct_alloc()
228 return arch_dma_alloc(dev, size, dma_handle, gfp, in dma_direct_alloc()
236 return dma_alloc_from_global_coherent(dev, size, in dma_direct_alloc()
258 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); in dma_direct_alloc()
261 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true); in dma_direct_alloc()
282 arch_dma_prep_coherent(page, size); in dma_direct_alloc()
285 ret = dma_common_contiguous_remap(page, size, prot, in dma_direct_alloc()
291 if (dma_set_decrypted(dev, ret, size)) in dma_direct_alloc()
295 memset(ret, 0, size); in dma_direct_alloc()
298 arch_dma_prep_coherent(page, size); in dma_direct_alloc()
299 ret = arch_dma_set_uncached(ret, size); in dma_direct_alloc()
308 if (dma_set_encrypted(dev, page_address(page), size)) in dma_direct_alloc()
311 __dma_direct_free_pages(dev, page, size); in dma_direct_alloc()
318 void dma_direct_free(struct device *dev, size_t size, in dma_direct_free() argument
321 unsigned int page_order = get_order(size); in dma_direct_free()
326 dma_free_contiguous(dev, cpu_addr, size); in dma_direct_free()
333 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); in dma_direct_free()
346 dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) in dma_direct_free()
353 arch_dma_clear_uncached(cpu_addr, size); in dma_direct_free()
354 if (dma_set_encrypted(dev, cpu_addr, size)) in dma_direct_free()
358 __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size); in dma_direct_free()
362 struct page *dma_direct_alloc_pages(struct device *dev, size_t size, in dma_direct_alloc_pages() argument
369 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); in dma_direct_alloc_pages()
371 page = __dma_direct_alloc_pages(dev, size, gfp, false); in dma_direct_alloc_pages()
376 if (dma_set_decrypted(dev, ret, size)) in dma_direct_alloc_pages()
378 memset(ret, 0, size); in dma_direct_alloc_pages()
385 void dma_direct_free_pages(struct device *dev, size_t size, in dma_direct_free_pages() argument
393 dma_free_from_pool(dev, vaddr, size)) in dma_direct_free_pages()
396 if (dma_set_encrypted(dev, vaddr, size)) in dma_direct_free_pages()
398 __dma_direct_free_pages(dev, page, size); in dma_direct_free_pages()
510 size_t size, enum dma_data_direction dir, unsigned long attrs) in dma_direct_map_resource() argument
514 if (unlikely(!dma_capable(dev, dma_addr, size, false))) { in dma_direct_map_resource()
517 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); in dma_direct_map_resource()
526 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_direct_get_sgtable() argument
534 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in dma_direct_get_sgtable()
545 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_direct_mmap() argument
549 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; in dma_direct_mmap()
557 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) in dma_direct_mmap()
559 if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) in dma_direct_mmap()
596 for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) { in dma_find_range()
600 start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) in dma_find_range()
625 start_pfn = PFN_DOWN(bdr->cpu_start) + PFN_DOWN(bdr->size); in check_ram_in_range_map()
671 dma_addr_t dma_start, u64 size) in dma_direct_set_offset() argument
689 map[0].size = size; in dma_direct_set_offset()