• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * drivers/base/dma-mapping.c - arch-independent dma-mapping routines
3  *
4  * Copyright (c) 2006  SUSE Linux Products GmbH
5  * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
6  *
7  * This file is released under the GPLv2.
8  */
9 
10 #include <linux/dma-mapping.h>
11 #include <linux/export.h>
12 #include <linux/gfp.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <asm-generic/dma-coherent.h>
16 
17 /*
18  * Managed DMA API
19  */
20 struct dma_devres {
21 	size_t		size;
22 	void		*vaddr;
23 	dma_addr_t	dma_handle;
24 };
25 
dmam_coherent_release(struct device * dev,void * res)26 static void dmam_coherent_release(struct device *dev, void *res)
27 {
28 	struct dma_devres *this = res;
29 
30 	dma_free_coherent(dev, this->size, this->vaddr, this->dma_handle);
31 }
32 
dmam_noncoherent_release(struct device * dev,void * res)33 static void dmam_noncoherent_release(struct device *dev, void *res)
34 {
35 	struct dma_devres *this = res;
36 
37 	dma_free_noncoherent(dev, this->size, this->vaddr, this->dma_handle);
38 }
39 
dmam_match(struct device * dev,void * res,void * match_data)40 static int dmam_match(struct device *dev, void *res, void *match_data)
41 {
42 	struct dma_devres *this = res, *match = match_data;
43 
44 	if (this->vaddr == match->vaddr) {
45 		WARN_ON(this->size != match->size ||
46 			this->dma_handle != match->dma_handle);
47 		return 1;
48 	}
49 	return 0;
50 }
51 
52 /**
53  * dmam_alloc_coherent - Managed dma_alloc_coherent()
54  * @dev: Device to allocate coherent memory for
55  * @size: Size of allocation
56  * @dma_handle: Out argument for allocated DMA handle
57  * @gfp: Allocation flags
58  *
59  * Managed dma_alloc_coherent().  Memory allocated using this function
60  * will be automatically released on driver detach.
61  *
62  * RETURNS:
63  * Pointer to allocated memory on success, NULL on failure.
64  */
dmam_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)65 void * dmam_alloc_coherent(struct device *dev, size_t size,
66 			   dma_addr_t *dma_handle, gfp_t gfp)
67 {
68 	struct dma_devres *dr;
69 	void *vaddr;
70 
71 	dr = devres_alloc(dmam_coherent_release, sizeof(*dr), gfp);
72 	if (!dr)
73 		return NULL;
74 
75 	vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
76 	if (!vaddr) {
77 		devres_free(dr);
78 		return NULL;
79 	}
80 
81 	dr->vaddr = vaddr;
82 	dr->dma_handle = *dma_handle;
83 	dr->size = size;
84 
85 	devres_add(dev, dr);
86 
87 	return vaddr;
88 }
89 EXPORT_SYMBOL(dmam_alloc_coherent);
90 
91 /**
92  * dmam_free_coherent - Managed dma_free_coherent()
93  * @dev: Device to free coherent memory for
94  * @size: Size of allocation
95  * @vaddr: Virtual address of the memory to free
96  * @dma_handle: DMA handle of the memory to free
97  *
98  * Managed dma_free_coherent().
99  */
dmam_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle)100 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
101 			dma_addr_t dma_handle)
102 {
103 	struct dma_devres match_data = { size, vaddr, dma_handle };
104 
105 	dma_free_coherent(dev, size, vaddr, dma_handle);
106 	WARN_ON(devres_destroy(dev, dmam_coherent_release, dmam_match,
107 			       &match_data));
108 }
109 EXPORT_SYMBOL(dmam_free_coherent);
110 
111 /**
112  * dmam_alloc_non_coherent - Managed dma_alloc_non_coherent()
113  * @dev: Device to allocate non_coherent memory for
114  * @size: Size of allocation
115  * @dma_handle: Out argument for allocated DMA handle
116  * @gfp: Allocation flags
117  *
118  * Managed dma_alloc_non_coherent().  Memory allocated using this
119  * function will be automatically released on driver detach.
120  *
121  * RETURNS:
122  * Pointer to allocated memory on success, NULL on failure.
123  */
dmam_alloc_noncoherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)124 void *dmam_alloc_noncoherent(struct device *dev, size_t size,
125 			     dma_addr_t *dma_handle, gfp_t gfp)
126 {
127 	struct dma_devres *dr;
128 	void *vaddr;
129 
130 	dr = devres_alloc(dmam_noncoherent_release, sizeof(*dr), gfp);
131 	if (!dr)
132 		return NULL;
133 
134 	vaddr = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
135 	if (!vaddr) {
136 		devres_free(dr);
137 		return NULL;
138 	}
139 
140 	dr->vaddr = vaddr;
141 	dr->dma_handle = *dma_handle;
142 	dr->size = size;
143 
144 	devres_add(dev, dr);
145 
146 	return vaddr;
147 }
148 EXPORT_SYMBOL(dmam_alloc_noncoherent);
149 
150 /**
151  * dmam_free_coherent - Managed dma_free_noncoherent()
152  * @dev: Device to free noncoherent memory for
153  * @size: Size of allocation
154  * @vaddr: Virtual address of the memory to free
155  * @dma_handle: DMA handle of the memory to free
156  *
157  * Managed dma_free_noncoherent().
158  */
dmam_free_noncoherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle)159 void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
160 			   dma_addr_t dma_handle)
161 {
162 	struct dma_devres match_data = { size, vaddr, dma_handle };
163 
164 	dma_free_noncoherent(dev, size, vaddr, dma_handle);
165 	WARN_ON(!devres_destroy(dev, dmam_noncoherent_release, dmam_match,
166 				&match_data));
167 }
168 EXPORT_SYMBOL(dmam_free_noncoherent);
169 
170 #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
171 
dmam_coherent_decl_release(struct device * dev,void * res)172 static void dmam_coherent_decl_release(struct device *dev, void *res)
173 {
174 	dma_release_declared_memory(dev);
175 }
176 
177 /**
178  * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
179  * @dev: Device to declare coherent memory for
180  * @phys_addr: Physical address of coherent memory to be declared
181  * @device_addr: Device address of coherent memory to be declared
182  * @size: Size of coherent memory to be declared
183  * @flags: Flags
184  *
185  * Managed dma_declare_coherent_memory().
186  *
187  * RETURNS:
188  * 0 on success, -errno on failure.
189  */
dmam_declare_coherent_memory(struct device * dev,phys_addr_t phys_addr,dma_addr_t device_addr,size_t size,int flags)190 int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
191 				 dma_addr_t device_addr, size_t size, int flags)
192 {
193 	void *res;
194 	int rc;
195 
196 	res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
197 	if (!res)
198 		return -ENOMEM;
199 
200 	rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
201 					 flags);
202 	if (rc == 0)
203 		devres_add(dev, res);
204 	else
205 		devres_free(res);
206 
207 	return rc;
208 }
209 EXPORT_SYMBOL(dmam_declare_coherent_memory);
210 
211 /**
212  * dmam_release_declared_memory - Managed dma_release_declared_memory().
213  * @dev: Device to release declared coherent memory for
214  *
215  * Managed dmam_release_declared_memory().
216  */
dmam_release_declared_memory(struct device * dev)217 void dmam_release_declared_memory(struct device *dev)
218 {
219 	WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
220 }
221 EXPORT_SYMBOL(dmam_release_declared_memory);
222 
223 #endif
224 
225 /*
226  * Create scatter-list for the already allocated DMA buffer.
227  */
dma_common_get_sgtable(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t handle,size_t size)228 int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
229 		 void *cpu_addr, dma_addr_t handle, size_t size)
230 {
231 	struct page *page = virt_to_page(cpu_addr);
232 	int ret;
233 
234 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
235 	if (unlikely(ret))
236 		return ret;
237 
238 	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
239 	return 0;
240 }
241 EXPORT_SYMBOL(dma_common_get_sgtable);
242 
243 /*
244  * Create userspace mapping for the DMA-coherent memory.
245  */
dma_common_mmap(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size)246 int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
247 		    void *cpu_addr, dma_addr_t dma_addr, size_t size)
248 {
249 	int ret = -ENXIO;
250 #ifdef CONFIG_MMU
251 	unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
252 	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
253 	unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
254 	unsigned long off = vma->vm_pgoff;
255 
256 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
257 
258 	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
259 		return ret;
260 
261 	if (off < count && user_count <= (count - off)) {
262 		ret = remap_pfn_range(vma, vma->vm_start,
263 				      pfn + off,
264 				      user_count << PAGE_SHIFT,
265 				      vma->vm_page_prot);
266 	}
267 #endif	/* CONFIG_MMU */
268 
269 	return ret;
270 }
271 EXPORT_SYMBOL(dma_common_mmap);
272 
273 #ifdef CONFIG_MMU
274 /*
275  * remaps an array of PAGE_SIZE pages into another vm_area
276  * Cannot be used in non-sleeping contexts
277  */
dma_common_pages_remap(struct page ** pages,size_t size,unsigned long vm_flags,pgprot_t prot,const void * caller)278 void *dma_common_pages_remap(struct page **pages, size_t size,
279 			unsigned long vm_flags, pgprot_t prot,
280 			const void *caller)
281 {
282 	struct vm_struct *area;
283 
284 	area = get_vm_area_caller(size, vm_flags, caller);
285 	if (!area)
286 		return NULL;
287 
288 	area->pages = pages;
289 
290 	if (map_vm_area(area, prot, pages)) {
291 		vunmap(area->addr);
292 		return NULL;
293 	}
294 
295 	return area->addr;
296 }
297 
298 /*
299  * remaps an allocated contiguous region into another vm_area.
300  * Cannot be used in non-sleeping contexts
301  */
302 
dma_common_contiguous_remap(struct page * page,size_t size,unsigned long vm_flags,pgprot_t prot,const void * caller)303 void *dma_common_contiguous_remap(struct page *page, size_t size,
304 			unsigned long vm_flags,
305 			pgprot_t prot, const void *caller)
306 {
307 	int i;
308 	struct page **pages;
309 	void *ptr;
310 	unsigned long pfn;
311 
312 	pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
313 	if (!pages)
314 		return NULL;
315 
316 	for (i = 0, pfn = page_to_pfn(page); i < (size >> PAGE_SHIFT); i++)
317 		pages[i] = pfn_to_page(pfn + i);
318 
319 	ptr = dma_common_pages_remap(pages, size, vm_flags, prot, caller);
320 
321 	kfree(pages);
322 
323 	return ptr;
324 }
325 
326 /*
327  * unmaps a range previously mapped by dma_common_*_remap
328  */
dma_common_free_remap(void * cpu_addr,size_t size,unsigned long vm_flags)329 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
330 {
331 	struct vm_struct *area = find_vm_area(cpu_addr);
332 
333 	if (!area || (area->flags & vm_flags) != vm_flags) {
334 		WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
335 		return;
336 	}
337 
338 	unmap_kernel_range((unsigned long)cpu_addr, size);
339 	vunmap(cpu_addr);
340 }
341 #endif
342