Lines Matching refs:size
69 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) in dma_coherent_ok() argument
75 return dma_addr + size - 1 <= in dma_coherent_ok()
79 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, in __dma_direct_alloc_pages() argument
86 WARN_ON_ONCE(!PAGE_ALIGNED(size)); in __dma_direct_alloc_pages()
90 page = dma_alloc_contiguous(dev, size, gfp); in __dma_direct_alloc_pages()
91 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in __dma_direct_alloc_pages()
92 dma_free_contiguous(dev, page, size); in __dma_direct_alloc_pages()
97 page = alloc_pages_node(node, gfp, get_order(size)); in __dma_direct_alloc_pages()
98 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in __dma_direct_alloc_pages()
99 dma_free_contiguous(dev, page, size); in __dma_direct_alloc_pages()
119 static void *dma_direct_alloc_from_pool(struct device *dev, size_t size, in dma_direct_alloc_from_pool() argument
128 page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok); in dma_direct_alloc_from_pool()
135 void *dma_direct_alloc(struct device *dev, size_t size, in dma_direct_alloc() argument
142 size = PAGE_ALIGN(size); in dma_direct_alloc()
148 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); in dma_direct_alloc()
153 arch_dma_prep_coherent(page, size); in dma_direct_alloc()
162 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); in dma_direct_alloc()
172 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); in dma_direct_alloc()
175 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); in dma_direct_alloc()
183 arch_dma_prep_coherent(page, size); in dma_direct_alloc()
186 ret = dma_common_contiguous_remap(page, size, in dma_direct_alloc()
193 PFN_UP(size)); in dma_direct_alloc()
197 memset(ret, 0, size); in dma_direct_alloc()
215 PFN_UP(size)); in dma_direct_alloc()
220 memset(ret, 0, size); in dma_direct_alloc()
224 arch_dma_prep_coherent(page, size); in dma_direct_alloc()
225 ret = arch_dma_set_uncached(ret, size); in dma_direct_alloc()
236 PFN_UP(size)); in dma_direct_alloc()
242 dma_free_contiguous(dev, page, size); in dma_direct_alloc()
246 void dma_direct_free(struct device *dev, size_t size, in dma_direct_free() argument
252 dma_free_contiguous(dev, cpu_addr, size); in dma_direct_free()
259 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); in dma_direct_free()
265 dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) in dma_direct_free()
269 set_memory_encrypted((unsigned long)cpu_addr, PFN_UP(size)); in dma_direct_free()
274 arch_dma_clear_uncached(cpu_addr, size); in dma_direct_free()
276 dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size); in dma_direct_free()
279 struct page *dma_direct_alloc_pages(struct device *dev, size_t size, in dma_direct_alloc_pages() argument
287 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); in dma_direct_alloc_pages()
289 page = __dma_direct_alloc_pages(dev, size, gfp); in dma_direct_alloc_pages()
305 if (set_memory_decrypted((unsigned long)ret, PFN_UP(size))) in dma_direct_alloc_pages()
308 memset(ret, 0, size); in dma_direct_alloc_pages()
312 dma_free_contiguous(dev, page, size); in dma_direct_alloc_pages()
316 void dma_direct_free_pages(struct device *dev, size_t size, in dma_direct_free_pages() argument
324 dma_free_from_pool(dev, vaddr, size)) in dma_direct_free_pages()
328 set_memory_encrypted((unsigned long)vaddr, PFN_UP(size)); in dma_direct_free_pages()
330 dma_free_contiguous(dev, page, size); in dma_direct_free_pages()
416 size_t size, enum dma_data_direction dir, unsigned long attrs) in dma_direct_map_resource() argument
420 if (unlikely(!dma_capable(dev, dma_addr, size, false))) { in dma_direct_map_resource()
423 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); in dma_direct_map_resource()
432 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_direct_get_sgtable() argument
440 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in dma_direct_get_sgtable()
451 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_direct_mmap() argument
455 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; in dma_direct_mmap()
461 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) in dma_direct_mmap()
525 dma_addr_t dma_start, u64 size) in dma_direct_set_offset() argument
544 map[0].size = size; in dma_direct_set_offset()