• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * drivers/base/dma-mapping.c - arch-independent dma-mapping routines
3  *
4  * Copyright (c) 2006  SUSE Linux Products GmbH
5  * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
6  *
7  * This file is released under the GPLv2.
8  */
9 
10 #include <linux/acpi.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/export.h>
13 #include <linux/gfp.h>
14 #include <linux/of_device.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 
18 /*
19  * Managed DMA API
20  */
21 struct dma_devres {
22 	size_t		size;
23 	void		*vaddr;
24 	dma_addr_t	dma_handle;
25 	unsigned long	attrs;
26 };
27 
dmam_release(struct device * dev,void * res)28 static void dmam_release(struct device *dev, void *res)
29 {
30 	struct dma_devres *this = res;
31 
32 	dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
33 			this->attrs);
34 }
35 
dmam_match(struct device * dev,void * res,void * match_data)36 static int dmam_match(struct device *dev, void *res, void *match_data)
37 {
38 	struct dma_devres *this = res, *match = match_data;
39 
40 	if (this->vaddr == match->vaddr) {
41 		WARN_ON(this->size != match->size ||
42 			this->dma_handle != match->dma_handle);
43 		return 1;
44 	}
45 	return 0;
46 }
47 
48 /**
49  * dmam_alloc_coherent - Managed dma_alloc_coherent()
50  * @dev: Device to allocate coherent memory for
51  * @size: Size of allocation
52  * @dma_handle: Out argument for allocated DMA handle
53  * @gfp: Allocation flags
54  *
55  * Managed dma_alloc_coherent().  Memory allocated using this function
56  * will be automatically released on driver detach.
57  *
58  * RETURNS:
59  * Pointer to allocated memory on success, NULL on failure.
60  */
dmam_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)61 void *dmam_alloc_coherent(struct device *dev, size_t size,
62 			   dma_addr_t *dma_handle, gfp_t gfp)
63 {
64 	struct dma_devres *dr;
65 	void *vaddr;
66 
67 	dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
68 	if (!dr)
69 		return NULL;
70 
71 	vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
72 	if (!vaddr) {
73 		devres_free(dr);
74 		return NULL;
75 	}
76 
77 	dr->vaddr = vaddr;
78 	dr->dma_handle = *dma_handle;
79 	dr->size = size;
80 
81 	devres_add(dev, dr);
82 
83 	return vaddr;
84 }
85 EXPORT_SYMBOL(dmam_alloc_coherent);
86 
87 /**
88  * dmam_free_coherent - Managed dma_free_coherent()
89  * @dev: Device to free coherent memory for
90  * @size: Size of allocation
91  * @vaddr: Virtual address of the memory to free
92  * @dma_handle: DMA handle of the memory to free
93  *
94  * Managed dma_free_coherent().
95  */
dmam_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle)96 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
97 			dma_addr_t dma_handle)
98 {
99 	struct dma_devres match_data = { size, vaddr, dma_handle };
100 
101 	dma_free_coherent(dev, size, vaddr, dma_handle);
102 	WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
103 }
104 EXPORT_SYMBOL(dmam_free_coherent);
105 
106 /**
107  * dmam_alloc_attrs - Managed dma_alloc_attrs()
108  * @dev: Device to allocate non_coherent memory for
109  * @size: Size of allocation
110  * @dma_handle: Out argument for allocated DMA handle
111  * @gfp: Allocation flags
112  * @attrs: Flags in the DMA_ATTR_* namespace.
113  *
114  * Managed dma_alloc_attrs().  Memory allocated using this function will be
115  * automatically released on driver detach.
116  *
117  * RETURNS:
118  * Pointer to allocated memory on success, NULL on failure.
119  */
dmam_alloc_attrs(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)120 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
121 		gfp_t gfp, unsigned long attrs)
122 {
123 	struct dma_devres *dr;
124 	void *vaddr;
125 
126 	dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
127 	if (!dr)
128 		return NULL;
129 
130 	vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
131 	if (!vaddr) {
132 		devres_free(dr);
133 		return NULL;
134 	}
135 
136 	dr->vaddr = vaddr;
137 	dr->dma_handle = *dma_handle;
138 	dr->size = size;
139 	dr->attrs = attrs;
140 
141 	devres_add(dev, dr);
142 
143 	return vaddr;
144 }
145 EXPORT_SYMBOL(dmam_alloc_attrs);
146 
147 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
148 
dmam_coherent_decl_release(struct device * dev,void * res)149 static void dmam_coherent_decl_release(struct device *dev, void *res)
150 {
151 	dma_release_declared_memory(dev);
152 }
153 
154 /**
155  * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
156  * @dev: Device to declare coherent memory for
157  * @phys_addr: Physical address of coherent memory to be declared
158  * @device_addr: Device address of coherent memory to be declared
159  * @size: Size of coherent memory to be declared
160  * @flags: Flags
161  *
162  * Managed dma_declare_coherent_memory().
163  *
164  * RETURNS:
165  * 0 on success, -errno on failure.
166  */
dmam_declare_coherent_memory(struct device * dev,phys_addr_t phys_addr,dma_addr_t device_addr,size_t size,int flags)167 int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
168 				 dma_addr_t device_addr, size_t size, int flags)
169 {
170 	void *res;
171 	int rc;
172 
173 	res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
174 	if (!res)
175 		return -ENOMEM;
176 
177 	rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
178 					 flags);
179 	if (!rc)
180 		devres_add(dev, res);
181 	else
182 		devres_free(res);
183 
184 	return rc;
185 }
186 EXPORT_SYMBOL(dmam_declare_coherent_memory);
187 
188 /**
189  * dmam_release_declared_memory - Managed dma_release_declared_memory().
190  * @dev: Device to release declared coherent memory for
191  *
192  * Managed dmam_release_declared_memory().
193  */
dmam_release_declared_memory(struct device * dev)194 void dmam_release_declared_memory(struct device *dev)
195 {
196 	WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
197 }
198 EXPORT_SYMBOL(dmam_release_declared_memory);
199 
200 #endif
201 
202 /*
203  * Create scatter-list for the already allocated DMA buffer.
204  */
dma_common_get_sgtable(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t handle,size_t size)205 int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
206 		 void *cpu_addr, dma_addr_t handle, size_t size)
207 {
208 	struct page *page = virt_to_page(cpu_addr);
209 	int ret;
210 
211 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
212 	if (unlikely(ret))
213 		return ret;
214 
215 	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
216 	return 0;
217 }
218 EXPORT_SYMBOL(dma_common_get_sgtable);
219 
220 /*
221  * Create userspace mapping for the DMA-coherent memory.
222  */
dma_common_mmap(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size)223 int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
224 		    void *cpu_addr, dma_addr_t dma_addr, size_t size)
225 {
226 	int ret = -ENXIO;
227 #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
228 	unsigned long user_count = vma_pages(vma);
229 	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
230 	unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
231 	unsigned long off = vma->vm_pgoff;
232 
233 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
234 
235 	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
236 		return ret;
237 
238 	if (off < count && user_count <= (count - off)) {
239 		ret = remap_pfn_range(vma, vma->vm_start,
240 				      pfn + off,
241 				      user_count << PAGE_SHIFT,
242 				      vma->vm_page_prot);
243 	}
244 #endif	/* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
245 
246 	return ret;
247 }
248 EXPORT_SYMBOL(dma_common_mmap);
249 
250 #ifdef CONFIG_MMU
__dma_common_pages_remap(struct page ** pages,size_t size,unsigned long vm_flags,pgprot_t prot,const void * caller)251 static struct vm_struct *__dma_common_pages_remap(struct page **pages,
252 			size_t size, unsigned long vm_flags, pgprot_t prot,
253 			const void *caller)
254 {
255 	struct vm_struct *area;
256 
257 	area = get_vm_area_caller(size, vm_flags, caller);
258 	if (!area)
259 		return NULL;
260 
261 	if (map_vm_area(area, prot, pages)) {
262 		vunmap(area->addr);
263 		return NULL;
264 	}
265 
266 	return area;
267 }
268 
269 /*
270  * remaps an array of PAGE_SIZE pages into another vm_area
271  * Cannot be used in non-sleeping contexts
272  */
dma_common_pages_remap(struct page ** pages,size_t size,unsigned long vm_flags,pgprot_t prot,const void * caller)273 void *dma_common_pages_remap(struct page **pages, size_t size,
274 			unsigned long vm_flags, pgprot_t prot,
275 			const void *caller)
276 {
277 	struct vm_struct *area;
278 
279 	area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
280 	if (!area)
281 		return NULL;
282 
283 	area->pages = pages;
284 
285 	return area->addr;
286 }
287 
288 /*
289  * remaps an allocated contiguous region into another vm_area.
290  * Cannot be used in non-sleeping contexts
291  */
292 
dma_common_contiguous_remap(struct page * page,size_t size,unsigned long vm_flags,pgprot_t prot,const void * caller)293 void *dma_common_contiguous_remap(struct page *page, size_t size,
294 			unsigned long vm_flags,
295 			pgprot_t prot, const void *caller)
296 {
297 	int i;
298 	struct page **pages;
299 	struct vm_struct *area;
300 
301 	pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
302 	if (!pages)
303 		return NULL;
304 
305 	for (i = 0; i < (size >> PAGE_SHIFT); i++)
306 		pages[i] = nth_page(page, i);
307 
308 	area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
309 
310 	kfree(pages);
311 
312 	if (!area)
313 		return NULL;
314 	return area->addr;
315 }
316 
317 /*
318  * unmaps a range previously mapped by dma_common_*_remap
319  */
dma_common_free_remap(void * cpu_addr,size_t size,unsigned long vm_flags)320 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
321 {
322 	struct vm_struct *area = find_vm_area(cpu_addr);
323 
324 	if (!area || (area->flags & vm_flags) != vm_flags) {
325 		WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
326 		return;
327 	}
328 
329 	unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
330 	vunmap(cpu_addr);
331 }
332 #endif
333 
334 /*
335  * Common configuration to enable DMA API use for a device
336  */
337 #include <linux/pci.h>
338 
dma_configure(struct device * dev)339 int dma_configure(struct device *dev)
340 {
341 	struct device *bridge = NULL, *dma_dev = dev;
342 	enum dev_dma_attr attr;
343 	int ret = 0;
344 
345 	if (dev_is_pci(dev)) {
346 		bridge = pci_get_host_bridge_device(to_pci_dev(dev));
347 		dma_dev = bridge;
348 		if (IS_ENABLED(CONFIG_OF) && dma_dev->parent &&
349 		    dma_dev->parent->of_node)
350 			dma_dev = dma_dev->parent;
351 	}
352 
353 	if (dma_dev->of_node) {
354 		ret = of_dma_configure(dev, dma_dev->of_node);
355 	} else if (has_acpi_companion(dma_dev)) {
356 		attr = acpi_get_dma_attr(to_acpi_device_node(dma_dev->fwnode));
357 		if (attr != DEV_DMA_NOT_SUPPORTED)
358 			ret = acpi_dma_configure(dev, attr);
359 	}
360 
361 	if (bridge)
362 		pci_put_host_bridge_device(bridge);
363 
364 	return ret;
365 }
366 
dma_deconfigure(struct device * dev)367 void dma_deconfigure(struct device *dev)
368 {
369 	of_dma_deconfigure(dev);
370 	acpi_dma_deconfigure(dev);
371 }
372