• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4  *                   Takashi Iwai <tiwai@suse.de>
5  *
6  *  Generic memory allocators
7  */
8 
9 #include <linux/slab.h>
10 #include <linux/mm.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dma-map-ops.h>
13 #include <linux/genalloc.h>
14 #include <linux/highmem.h>
15 #include <linux/vmalloc.h>
16 #ifdef CONFIG_X86
17 #include <asm/set_memory.h>
18 #endif
19 #include <sound/memalloc.h>
20 #include "memalloc_local.h"
21 
22 #define DEFAULT_GFP \
23 	(GFP_KERNEL | \
24 	 __GFP_COMP |    /* compound page lets parts be mapped */ \
25 	 __GFP_RETRY_MAYFAIL | /* don't trigger OOM-killer */ \
26 	 __GFP_NOWARN)   /* no stack trace print - this call is non-critical */
27 
28 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
29 
30 #ifdef CONFIG_SND_DMA_SGBUF
31 static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
32 #endif
33 
__snd_dma_alloc_pages(struct snd_dma_buffer * dmab,size_t size)34 static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
35 {
36 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
37 
38 	if (WARN_ON_ONCE(!ops || !ops->alloc))
39 		return NULL;
40 	return ops->alloc(dmab, size);
41 }
42 
43 /**
44  * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
45  *	type and direction
46  * @type: the DMA buffer type
47  * @device: the device pointer
48  * @dir: DMA direction
49  * @size: the buffer size to allocate
50  * @dmab: buffer allocation record to store the allocated data
51  *
52  * Calls the memory-allocator function for the corresponding
53  * buffer type.
54  *
55  * Return: Zero if the buffer with the given size is allocated successfully,
56  * otherwise a negative value on error.
57  */
snd_dma_alloc_dir_pages(int type,struct device * device,enum dma_data_direction dir,size_t size,struct snd_dma_buffer * dmab)58 int snd_dma_alloc_dir_pages(int type, struct device *device,
59 			    enum dma_data_direction dir, size_t size,
60 			    struct snd_dma_buffer *dmab)
61 {
62 	if (WARN_ON(!size))
63 		return -ENXIO;
64 	if (WARN_ON(!dmab))
65 		return -ENXIO;
66 
67 	size = PAGE_ALIGN(size);
68 	dmab->dev.type = type;
69 	dmab->dev.dev = device;
70 	dmab->dev.dir = dir;
71 	dmab->bytes = 0;
72 	dmab->addr = 0;
73 	dmab->private_data = NULL;
74 	dmab->area = __snd_dma_alloc_pages(dmab, size);
75 	if (!dmab->area)
76 		return -ENOMEM;
77 	dmab->bytes = size;
78 	return 0;
79 }
80 EXPORT_SYMBOL(snd_dma_alloc_dir_pages);
81 
82 /**
83  * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
84  * @type: the DMA buffer type
85  * @device: the device pointer
86  * @size: the buffer size to allocate
87  * @dmab: buffer allocation record to store the allocated data
88  *
89  * Calls the memory-allocator function for the corresponding
90  * buffer type.  When no space is left, this function reduces the size and
91  * tries to allocate again.  The size actually allocated is stored in
92  * res_size argument.
93  *
94  * Return: Zero if the buffer with the given size is allocated successfully,
95  * otherwise a negative value on error.
96  */
snd_dma_alloc_pages_fallback(int type,struct device * device,size_t size,struct snd_dma_buffer * dmab)97 int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
98 				 struct snd_dma_buffer *dmab)
99 {
100 	int err;
101 
102 	while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
103 		if (err != -ENOMEM)
104 			return err;
105 		if (size <= PAGE_SIZE)
106 			return -ENOMEM;
107 		size >>= 1;
108 		size = PAGE_SIZE << get_order(size);
109 	}
110 	if (! dmab->area)
111 		return -ENOMEM;
112 	return 0;
113 }
114 EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
115 
116 /**
117  * snd_dma_free_pages - release the allocated buffer
118  * @dmab: the buffer allocation record to release
119  *
120  * Releases the allocated buffer via snd_dma_alloc_pages().
121  */
snd_dma_free_pages(struct snd_dma_buffer * dmab)122 void snd_dma_free_pages(struct snd_dma_buffer *dmab)
123 {
124 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
125 
126 	if (ops && ops->free)
127 		ops->free(dmab);
128 }
129 EXPORT_SYMBOL(snd_dma_free_pages);
130 
131 /* called by devres */
__snd_release_pages(struct device * dev,void * res)132 static void __snd_release_pages(struct device *dev, void *res)
133 {
134 	snd_dma_free_pages(res);
135 }
136 
137 /**
138  * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
139  * @dev: the device pointer
140  * @type: the DMA buffer type
141  * @dir: DMA direction
142  * @size: the buffer size to allocate
143  *
144  * Allocate buffer pages depending on the given type and manage using devres.
145  * The pages will be released automatically at the device removal.
146  *
147  * Unlike snd_dma_alloc_pages(), this function requires the real device pointer,
148  * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or
149  * SNDRV_DMA_TYPE_VMALLOC type.
150  *
151  * Return: the snd_dma_buffer object at success, or NULL if failed
152  */
153 struct snd_dma_buffer *
snd_devm_alloc_dir_pages(struct device * dev,int type,enum dma_data_direction dir,size_t size)154 snd_devm_alloc_dir_pages(struct device *dev, int type,
155 			 enum dma_data_direction dir, size_t size)
156 {
157 	struct snd_dma_buffer *dmab;
158 	int err;
159 
160 	if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS ||
161 		    type == SNDRV_DMA_TYPE_VMALLOC))
162 		return NULL;
163 
164 	dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL);
165 	if (!dmab)
166 		return NULL;
167 
168 	err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
169 	if (err < 0) {
170 		devres_free(dmab);
171 		return NULL;
172 	}
173 
174 	devres_add(dev, dmab);
175 	return dmab;
176 }
177 EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
178 
179 /**
180  * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
181  * @dmab: buffer allocation information
182  * @area: VM area information
183  *
184  * Return: zero if successful, or a negative error code
185  */
snd_dma_buffer_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)186 int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
187 			struct vm_area_struct *area)
188 {
189 	const struct snd_malloc_ops *ops;
190 
191 	if (!dmab)
192 		return -ENOENT;
193 	ops = snd_dma_get_ops(dmab);
194 	if (ops && ops->mmap)
195 		return ops->mmap(dmab, area);
196 	else
197 		return -ENOENT;
198 }
199 EXPORT_SYMBOL(snd_dma_buffer_mmap);
200 
201 #ifdef CONFIG_HAS_DMA
202 /**
203  * snd_dma_buffer_sync - sync DMA buffer between CPU and device
204  * @dmab: buffer allocation information
205  * @mode: sync mode
206  */
snd_dma_buffer_sync(struct snd_dma_buffer * dmab,enum snd_dma_sync_mode mode)207 void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
208 			 enum snd_dma_sync_mode mode)
209 {
210 	const struct snd_malloc_ops *ops;
211 
212 	if (!dmab || !dmab->dev.need_sync)
213 		return;
214 	ops = snd_dma_get_ops(dmab);
215 	if (ops && ops->sync)
216 		ops->sync(dmab, mode);
217 }
218 EXPORT_SYMBOL_GPL(snd_dma_buffer_sync);
219 #endif /* CONFIG_HAS_DMA */
220 
221 /**
222  * snd_sgbuf_get_addr - return the physical address at the corresponding offset
223  * @dmab: buffer allocation information
224  * @offset: offset in the ring buffer
225  *
226  * Return: the physical address
227  */
snd_sgbuf_get_addr(struct snd_dma_buffer * dmab,size_t offset)228 dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
229 {
230 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
231 
232 	if (ops && ops->get_addr)
233 		return ops->get_addr(dmab, offset);
234 	else
235 		return dmab->addr + offset;
236 }
237 EXPORT_SYMBOL(snd_sgbuf_get_addr);
238 
239 /**
240  * snd_sgbuf_get_page - return the physical page at the corresponding offset
241  * @dmab: buffer allocation information
242  * @offset: offset in the ring buffer
243  *
244  * Return: the page pointer
245  */
snd_sgbuf_get_page(struct snd_dma_buffer * dmab,size_t offset)246 struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
247 {
248 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
249 
250 	if (ops && ops->get_page)
251 		return ops->get_page(dmab, offset);
252 	else
253 		return virt_to_page(dmab->area + offset);
254 }
255 EXPORT_SYMBOL(snd_sgbuf_get_page);
256 
257 /**
258  * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
259  *	on sg-buffer
260  * @dmab: buffer allocation information
261  * @ofs: offset in the ring buffer
262  * @size: the requested size
263  *
264  * Return: the chunk size
265  */
snd_sgbuf_get_chunk_size(struct snd_dma_buffer * dmab,unsigned int ofs,unsigned int size)266 unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
267 				      unsigned int ofs, unsigned int size)
268 {
269 	const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
270 
271 	if (ops && ops->get_chunk_size)
272 		return ops->get_chunk_size(dmab, ofs, size);
273 	else
274 		return size;
275 }
276 EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
277 
278 /*
279  * Continuous pages allocator
280  */
do_alloc_pages(struct device * dev,size_t size,dma_addr_t * addr,bool wc)281 static void *do_alloc_pages(struct device *dev, size_t size, dma_addr_t *addr,
282 			    bool wc)
283 {
284 	void *p;
285 	gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
286 
287  again:
288 	p = alloc_pages_exact(size, gfp);
289 	if (!p)
290 		return NULL;
291 	*addr = page_to_phys(virt_to_page(p));
292 	if (!dev)
293 		return p;
294 	if ((*addr + size - 1) & ~dev->coherent_dma_mask) {
295 		if (IS_ENABLED(CONFIG_ZONE_DMA32) && !(gfp & GFP_DMA32)) {
296 			gfp |= GFP_DMA32;
297 			goto again;
298 		}
299 		if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
300 			gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
301 			goto again;
302 		}
303 	}
304 #ifdef CONFIG_X86
305 	if (wc)
306 		set_memory_wc((unsigned long)(p), size >> PAGE_SHIFT);
307 #endif
308 	return p;
309 }
310 
do_free_pages(void * p,size_t size,bool wc)311 static void do_free_pages(void *p, size_t size, bool wc)
312 {
313 #ifdef CONFIG_X86
314 	if (wc)
315 		set_memory_wb((unsigned long)(p), size >> PAGE_SHIFT);
316 #endif
317 	free_pages_exact(p, size);
318 }
319 
320 
snd_dma_continuous_alloc(struct snd_dma_buffer * dmab,size_t size)321 static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
322 {
323 	return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, false);
324 }
325 
snd_dma_continuous_free(struct snd_dma_buffer * dmab)326 static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
327 {
328 	do_free_pages(dmab->area, dmab->bytes, false);
329 }
330 
snd_dma_continuous_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)331 static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
332 				   struct vm_area_struct *area)
333 {
334 	return remap_pfn_range(area, area->vm_start,
335 			       dmab->addr >> PAGE_SHIFT,
336 			       area->vm_end - area->vm_start,
337 			       area->vm_page_prot);
338 }
339 
340 static const struct snd_malloc_ops snd_dma_continuous_ops = {
341 	.alloc = snd_dma_continuous_alloc,
342 	.free = snd_dma_continuous_free,
343 	.mmap = snd_dma_continuous_mmap,
344 };
345 
346 /*
347  * VMALLOC allocator
348  */
snd_dma_vmalloc_alloc(struct snd_dma_buffer * dmab,size_t size)349 static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
350 {
351 	return vmalloc(size);
352 }
353 
snd_dma_vmalloc_free(struct snd_dma_buffer * dmab)354 static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
355 {
356 	vfree(dmab->area);
357 }
358 
snd_dma_vmalloc_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)359 static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
360 				struct vm_area_struct *area)
361 {
362 	return remap_vmalloc_range(area, dmab->area, 0);
363 }
364 
365 #define get_vmalloc_page_addr(dmab, offset) \
366 	page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
367 
snd_dma_vmalloc_get_addr(struct snd_dma_buffer * dmab,size_t offset)368 static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
369 					   size_t offset)
370 {
371 	return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE;
372 }
373 
snd_dma_vmalloc_get_page(struct snd_dma_buffer * dmab,size_t offset)374 static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
375 					     size_t offset)
376 {
377 	return vmalloc_to_page(dmab->area + offset);
378 }
379 
380 static unsigned int
snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer * dmab,unsigned int ofs,unsigned int size)381 snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
382 			       unsigned int ofs, unsigned int size)
383 {
384 	unsigned int start, end;
385 	unsigned long addr;
386 
387 	start = ALIGN_DOWN(ofs, PAGE_SIZE);
388 	end = ofs + size - 1; /* the last byte address */
389 	/* check page continuity */
390 	addr = get_vmalloc_page_addr(dmab, start);
391 	for (;;) {
392 		start += PAGE_SIZE;
393 		if (start > end)
394 			break;
395 		addr += PAGE_SIZE;
396 		if (get_vmalloc_page_addr(dmab, start) != addr)
397 			return start - ofs;
398 	}
399 	/* ok, all on continuous pages */
400 	return size;
401 }
402 
403 static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
404 	.alloc = snd_dma_vmalloc_alloc,
405 	.free = snd_dma_vmalloc_free,
406 	.mmap = snd_dma_vmalloc_mmap,
407 	.get_addr = snd_dma_vmalloc_get_addr,
408 	.get_page = snd_dma_vmalloc_get_page,
409 	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
410 };
411 
412 #ifdef CONFIG_HAS_DMA
413 /*
414  * IRAM allocator
415  */
416 #ifdef CONFIG_GENERIC_ALLOCATOR
snd_dma_iram_alloc(struct snd_dma_buffer * dmab,size_t size)417 static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
418 {
419 	struct device *dev = dmab->dev.dev;
420 	struct gen_pool *pool;
421 	void *p;
422 
423 	if (dev->of_node) {
424 		pool = of_gen_pool_get(dev->of_node, "iram", 0);
425 		/* Assign the pool into private_data field */
426 		dmab->private_data = pool;
427 
428 		p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE);
429 		if (p)
430 			return p;
431 	}
432 
433 	/* Internal memory might have limited size and no enough space,
434 	 * so if we fail to malloc, try to fetch memory traditionally.
435 	 */
436 	dmab->dev.type = SNDRV_DMA_TYPE_DEV;
437 	return __snd_dma_alloc_pages(dmab, size);
438 }
439 
snd_dma_iram_free(struct snd_dma_buffer * dmab)440 static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
441 {
442 	struct gen_pool *pool = dmab->private_data;
443 
444 	if (pool && dmab->area)
445 		gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
446 }
447 
snd_dma_iram_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)448 static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
449 			     struct vm_area_struct *area)
450 {
451 	area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
452 	return remap_pfn_range(area, area->vm_start,
453 			       dmab->addr >> PAGE_SHIFT,
454 			       area->vm_end - area->vm_start,
455 			       area->vm_page_prot);
456 }
457 
458 static const struct snd_malloc_ops snd_dma_iram_ops = {
459 	.alloc = snd_dma_iram_alloc,
460 	.free = snd_dma_iram_free,
461 	.mmap = snd_dma_iram_mmap,
462 };
463 #endif /* CONFIG_GENERIC_ALLOCATOR */
464 
465 /*
466  * Coherent device pages allocator
467  */
snd_dma_dev_alloc(struct snd_dma_buffer * dmab,size_t size)468 static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
469 {
470 	return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
471 }
472 
snd_dma_dev_free(struct snd_dma_buffer * dmab)473 static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
474 {
475 	dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
476 }
477 
snd_dma_dev_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)478 static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
479 			    struct vm_area_struct *area)
480 {
481 	return dma_mmap_coherent(dmab->dev.dev, area,
482 				 dmab->area, dmab->addr, dmab->bytes);
483 }
484 
485 static const struct snd_malloc_ops snd_dma_dev_ops = {
486 	.alloc = snd_dma_dev_alloc,
487 	.free = snd_dma_dev_free,
488 	.mmap = snd_dma_dev_mmap,
489 };
490 
491 /*
492  * Write-combined pages
493  */
494 /* x86-specific allocations */
495 #ifdef CONFIG_SND_DMA_SGBUF
snd_dma_wc_alloc(struct snd_dma_buffer * dmab,size_t size)496 static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
497 {
498 	return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, true);
499 }
500 
snd_dma_wc_free(struct snd_dma_buffer * dmab)501 static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
502 {
503 	do_free_pages(dmab->area, dmab->bytes, true);
504 }
505 
snd_dma_wc_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)506 static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
507 			   struct vm_area_struct *area)
508 {
509 	area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
510 	return snd_dma_continuous_mmap(dmab, area);
511 }
512 #else
snd_dma_wc_alloc(struct snd_dma_buffer * dmab,size_t size)513 static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
514 {
515 	return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
516 }
517 
snd_dma_wc_free(struct snd_dma_buffer * dmab)518 static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
519 {
520 	dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
521 }
522 
snd_dma_wc_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)523 static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
524 			   struct vm_area_struct *area)
525 {
526 	return dma_mmap_wc(dmab->dev.dev, area,
527 			   dmab->area, dmab->addr, dmab->bytes);
528 }
529 #endif /* CONFIG_SND_DMA_SGBUF */
530 
531 static const struct snd_malloc_ops snd_dma_wc_ops = {
532 	.alloc = snd_dma_wc_alloc,
533 	.free = snd_dma_wc_free,
534 	.mmap = snd_dma_wc_mmap,
535 };
536 
537 /*
538  * Non-contiguous pages allocator
539  */
snd_dma_noncontig_alloc(struct snd_dma_buffer * dmab,size_t size)540 static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
541 {
542 	struct sg_table *sgt;
543 	void *p;
544 
545 #ifdef CONFIG_SND_DMA_SGBUF
546 	if (cpu_feature_enabled(X86_FEATURE_XENPV))
547 		return snd_dma_sg_fallback_alloc(dmab, size);
548 #endif
549 	sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
550 				      DEFAULT_GFP, 0);
551 #ifdef CONFIG_SND_DMA_SGBUF
552 	if (!sgt && !get_dma_ops(dmab->dev.dev))
553 		return snd_dma_sg_fallback_alloc(dmab, size);
554 #endif
555 	if (!sgt)
556 		return NULL;
557 
558 	dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
559 					    sg_dma_address(sgt->sgl));
560 	p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
561 	if (p) {
562 		dmab->private_data = sgt;
563 		/* store the first page address for convenience */
564 		dmab->addr = snd_sgbuf_get_addr(dmab, 0);
565 	} else {
566 		dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
567 	}
568 	return p;
569 }
570 
snd_dma_noncontig_free(struct snd_dma_buffer * dmab)571 static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab)
572 {
573 	dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area);
574 	dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data,
575 			       dmab->dev.dir);
576 }
577 
snd_dma_noncontig_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)578 static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab,
579 				  struct vm_area_struct *area)
580 {
581 	return dma_mmap_noncontiguous(dmab->dev.dev, area,
582 				      dmab->bytes, dmab->private_data);
583 }
584 
snd_dma_noncontig_sync(struct snd_dma_buffer * dmab,enum snd_dma_sync_mode mode)585 static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
586 				   enum snd_dma_sync_mode mode)
587 {
588 	if (mode == SNDRV_DMA_SYNC_CPU) {
589 		if (dmab->dev.dir == DMA_TO_DEVICE)
590 			return;
591 		invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
592 		dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
593 					 dmab->dev.dir);
594 	} else {
595 		if (dmab->dev.dir == DMA_FROM_DEVICE)
596 			return;
597 		flush_kernel_vmap_range(dmab->area, dmab->bytes);
598 		dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data,
599 					    dmab->dev.dir);
600 	}
601 }
602 
snd_dma_noncontig_iter_set(struct snd_dma_buffer * dmab,struct sg_page_iter * piter,size_t offset)603 static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab,
604 					      struct sg_page_iter *piter,
605 					      size_t offset)
606 {
607 	struct sg_table *sgt = dmab->private_data;
608 
609 	__sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents,
610 			     offset >> PAGE_SHIFT);
611 }
612 
snd_dma_noncontig_get_addr(struct snd_dma_buffer * dmab,size_t offset)613 static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab,
614 					     size_t offset)
615 {
616 	struct sg_dma_page_iter iter;
617 
618 	snd_dma_noncontig_iter_set(dmab, &iter.base, offset);
619 	__sg_page_iter_dma_next(&iter);
620 	return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE;
621 }
622 
snd_dma_noncontig_get_page(struct snd_dma_buffer * dmab,size_t offset)623 static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab,
624 					       size_t offset)
625 {
626 	struct sg_page_iter iter;
627 
628 	snd_dma_noncontig_iter_set(dmab, &iter, offset);
629 	__sg_page_iter_next(&iter);
630 	return sg_page_iter_page(&iter);
631 }
632 
633 static unsigned int
snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer * dmab,unsigned int ofs,unsigned int size)634 snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab,
635 				 unsigned int ofs, unsigned int size)
636 {
637 	struct sg_dma_page_iter iter;
638 	unsigned int start, end;
639 	unsigned long addr;
640 
641 	start = ALIGN_DOWN(ofs, PAGE_SIZE);
642 	end = ofs + size - 1; /* the last byte address */
643 	snd_dma_noncontig_iter_set(dmab, &iter.base, start);
644 	if (!__sg_page_iter_dma_next(&iter))
645 		return 0;
646 	/* check page continuity */
647 	addr = sg_page_iter_dma_address(&iter);
648 	for (;;) {
649 		start += PAGE_SIZE;
650 		if (start > end)
651 			break;
652 		addr += PAGE_SIZE;
653 		if (!__sg_page_iter_dma_next(&iter) ||
654 		    sg_page_iter_dma_address(&iter) != addr)
655 			return start - ofs;
656 	}
657 	/* ok, all on continuous pages */
658 	return size;
659 }
660 
661 static const struct snd_malloc_ops snd_dma_noncontig_ops = {
662 	.alloc = snd_dma_noncontig_alloc,
663 	.free = snd_dma_noncontig_free,
664 	.mmap = snd_dma_noncontig_mmap,
665 	.sync = snd_dma_noncontig_sync,
666 	.get_addr = snd_dma_noncontig_get_addr,
667 	.get_page = snd_dma_noncontig_get_page,
668 	.get_chunk_size = snd_dma_noncontig_get_chunk_size,
669 };
670 
671 /* x86-specific SG-buffer with WC pages */
672 #ifdef CONFIG_SND_DMA_SGBUF
673 #define sg_wc_address(it) ((unsigned long)page_address(sg_page_iter_page(it)))
674 
snd_dma_sg_wc_alloc(struct snd_dma_buffer * dmab,size_t size)675 static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
676 {
677 	void *p = snd_dma_noncontig_alloc(dmab, size);
678 	struct sg_table *sgt = dmab->private_data;
679 	struct sg_page_iter iter;
680 
681 	if (!p)
682 		return NULL;
683 	if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG)
684 		return p;
685 	for_each_sgtable_page(sgt, &iter, 0)
686 		set_memory_wc(sg_wc_address(&iter), 1);
687 	return p;
688 }
689 
snd_dma_sg_wc_free(struct snd_dma_buffer * dmab)690 static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab)
691 {
692 	struct sg_table *sgt = dmab->private_data;
693 	struct sg_page_iter iter;
694 
695 	for_each_sgtable_page(sgt, &iter, 0)
696 		set_memory_wb(sg_wc_address(&iter), 1);
697 	snd_dma_noncontig_free(dmab);
698 }
699 
snd_dma_sg_wc_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)700 static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab,
701 			      struct vm_area_struct *area)
702 {
703 	area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
704 	return dma_mmap_noncontiguous(dmab->dev.dev, area,
705 				      dmab->bytes, dmab->private_data);
706 }
707 
708 static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
709 	.alloc = snd_dma_sg_wc_alloc,
710 	.free = snd_dma_sg_wc_free,
711 	.mmap = snd_dma_sg_wc_mmap,
712 	.sync = snd_dma_noncontig_sync,
713 	.get_addr = snd_dma_noncontig_get_addr,
714 	.get_page = snd_dma_noncontig_get_page,
715 	.get_chunk_size = snd_dma_noncontig_get_chunk_size,
716 };
717 
718 /* Fallback SG-buffer allocations for x86 */
719 struct snd_dma_sg_fallback {
720 	bool use_dma_alloc_coherent;
721 	size_t count;
722 	struct page **pages;
723 	/* DMA address array; the first page contains #pages in ~PAGE_MASK */
724 	dma_addr_t *addrs;
725 };
726 
__snd_dma_sg_fallback_free(struct snd_dma_buffer * dmab,struct snd_dma_sg_fallback * sgbuf)727 static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
728 				       struct snd_dma_sg_fallback *sgbuf)
729 {
730 	size_t i, size;
731 
732 	if (sgbuf->pages && sgbuf->addrs) {
733 		i = 0;
734 		while (i < sgbuf->count) {
735 			if (!sgbuf->pages[i] || !sgbuf->addrs[i])
736 				break;
737 			size = sgbuf->addrs[i] & ~PAGE_MASK;
738 			if (WARN_ON(!size))
739 				break;
740 			if (sgbuf->use_dma_alloc_coherent)
741 				dma_free_coherent(dmab->dev.dev, size << PAGE_SHIFT,
742 						  page_address(sgbuf->pages[i]),
743 						  sgbuf->addrs[i] & PAGE_MASK);
744 			else
745 				do_free_pages(page_address(sgbuf->pages[i]),
746 					      size << PAGE_SHIFT, false);
747 			i += size;
748 		}
749 	}
750 	kvfree(sgbuf->pages);
751 	kvfree(sgbuf->addrs);
752 	kfree(sgbuf);
753 }
754 
snd_dma_sg_fallback_alloc(struct snd_dma_buffer * dmab,size_t size)755 static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
756 {
757 	struct snd_dma_sg_fallback *sgbuf;
758 	struct page **pagep, *curp;
759 	size_t chunk, npages;
760 	dma_addr_t *addrp;
761 	dma_addr_t addr;
762 	void *p;
763 
764 	/* correct the type */
765 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG)
766 		dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
767 	else if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
768 		dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
769 
770 	sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
771 	if (!sgbuf)
772 		return NULL;
773 	sgbuf->use_dma_alloc_coherent = cpu_feature_enabled(X86_FEATURE_XENPV);
774 	size = PAGE_ALIGN(size);
775 	sgbuf->count = size >> PAGE_SHIFT;
776 	sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL);
777 	sgbuf->addrs = kvcalloc(sgbuf->count, sizeof(*sgbuf->addrs), GFP_KERNEL);
778 	if (!sgbuf->pages || !sgbuf->addrs)
779 		goto error;
780 
781 	pagep = sgbuf->pages;
782 	addrp = sgbuf->addrs;
783 	chunk = (PAGE_SIZE - 1) << PAGE_SHIFT; /* to fit in low bits in addrs */
784 	while (size > 0) {
785 		chunk = min(size, chunk);
786 		if (sgbuf->use_dma_alloc_coherent)
787 			p = dma_alloc_coherent(dmab->dev.dev, chunk, &addr, DEFAULT_GFP);
788 		else
789 			p = do_alloc_pages(dmab->dev.dev, chunk, &addr, false);
790 		if (!p) {
791 			if (chunk <= PAGE_SIZE)
792 				goto error;
793 			chunk >>= 1;
794 			chunk = PAGE_SIZE << get_order(chunk);
795 			continue;
796 		}
797 
798 		size -= chunk;
799 		/* fill pages */
800 		npages = chunk >> PAGE_SHIFT;
801 		*addrp = npages; /* store in lower bits */
802 		curp = virt_to_page(p);
803 		while (npages--) {
804 			*pagep++ = curp++;
805 			*addrp++ |= addr;
806 			addr += PAGE_SIZE;
807 		}
808 	}
809 
810 	p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL);
811 	if (!p)
812 		goto error;
813 
814 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
815 		set_pages_array_wc(sgbuf->pages, sgbuf->count);
816 
817 	dmab->private_data = sgbuf;
818 	/* store the first page address for convenience */
819 	dmab->addr = sgbuf->addrs[0] & PAGE_MASK;
820 	return p;
821 
822  error:
823 	__snd_dma_sg_fallback_free(dmab, sgbuf);
824 	return NULL;
825 }
826 
snd_dma_sg_fallback_free(struct snd_dma_buffer * dmab)827 static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
828 {
829 	struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
830 
831 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
832 		set_pages_array_wb(sgbuf->pages, sgbuf->count);
833 	vunmap(dmab->area);
834 	__snd_dma_sg_fallback_free(dmab, dmab->private_data);
835 }
836 
snd_dma_sg_fallback_get_addr(struct snd_dma_buffer * dmab,size_t offset)837 static dma_addr_t snd_dma_sg_fallback_get_addr(struct snd_dma_buffer *dmab,
838 					       size_t offset)
839 {
840 	struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
841 	size_t index = offset >> PAGE_SHIFT;
842 
843 	return (sgbuf->addrs[index] & PAGE_MASK) | (offset & ~PAGE_MASK);
844 }
845 
snd_dma_sg_fallback_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)846 static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
847 				    struct vm_area_struct *area)
848 {
849 	struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
850 
851 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
852 		area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
853 	return vm_map_pages(area, sgbuf->pages, sgbuf->count);
854 }
855 
856 static const struct snd_malloc_ops snd_dma_sg_fallback_ops = {
857 	.alloc = snd_dma_sg_fallback_alloc,
858 	.free = snd_dma_sg_fallback_free,
859 	.mmap = snd_dma_sg_fallback_mmap,
860 	.get_addr = snd_dma_sg_fallback_get_addr,
861 	/* reuse vmalloc helpers */
862 	.get_page = snd_dma_vmalloc_get_page,
863 	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
864 };
865 #endif /* CONFIG_SND_DMA_SGBUF */
866 
867 /*
868  * Non-coherent pages allocator
869  */
snd_dma_noncoherent_alloc(struct snd_dma_buffer * dmab,size_t size)870 static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
871 {
872 	void *p;
873 
874 	p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
875 				  dmab->dev.dir, DEFAULT_GFP);
876 	if (p)
877 		dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr);
878 	return p;
879 }
880 
snd_dma_noncoherent_free(struct snd_dma_buffer * dmab)881 static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
882 {
883 	dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area,
884 			     dmab->addr, dmab->dev.dir);
885 }
886 
snd_dma_noncoherent_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)887 static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
888 				    struct vm_area_struct *area)
889 {
890 	area->vm_page_prot = vm_get_page_prot(area->vm_flags);
891 	return dma_mmap_pages(dmab->dev.dev, area,
892 			      area->vm_end - area->vm_start,
893 			      virt_to_page(dmab->area));
894 }
895 
snd_dma_noncoherent_sync(struct snd_dma_buffer * dmab,enum snd_dma_sync_mode mode)896 static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
897 				     enum snd_dma_sync_mode mode)
898 {
899 	if (mode == SNDRV_DMA_SYNC_CPU) {
900 		if (dmab->dev.dir != DMA_TO_DEVICE)
901 			dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr,
902 						dmab->bytes, dmab->dev.dir);
903 	} else {
904 		if (dmab->dev.dir != DMA_FROM_DEVICE)
905 			dma_sync_single_for_device(dmab->dev.dev, dmab->addr,
906 						   dmab->bytes, dmab->dev.dir);
907 	}
908 }
909 
910 static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
911 	.alloc = snd_dma_noncoherent_alloc,
912 	.free = snd_dma_noncoherent_free,
913 	.mmap = snd_dma_noncoherent_mmap,
914 	.sync = snd_dma_noncoherent_sync,
915 };
916 
917 #endif /* CONFIG_HAS_DMA */
918 
919 /*
920  * Entry points
921  */
922 static const struct snd_malloc_ops *snd_dma_ops[] = {
923 	[SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
924 	[SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
925 #ifdef CONFIG_HAS_DMA
926 	[SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
927 	[SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
928 	[SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
929 	[SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
930 #ifdef CONFIG_SND_DMA_SGBUF
931 	[SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops,
932 #endif
933 #ifdef CONFIG_GENERIC_ALLOCATOR
934 	[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
935 #endif /* CONFIG_GENERIC_ALLOCATOR */
936 #ifdef CONFIG_SND_DMA_SGBUF
937 	[SNDRV_DMA_TYPE_DEV_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
938 	[SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
939 #endif
940 #endif /* CONFIG_HAS_DMA */
941 };
942 
snd_dma_get_ops(struct snd_dma_buffer * dmab)943 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
944 {
945 	if (WARN_ON_ONCE(!dmab))
946 		return NULL;
947 	if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
948 			 dmab->dev.type >= ARRAY_SIZE(snd_dma_ops)))
949 		return NULL;
950 	return snd_dma_ops[dmab->dev.type];
951 }
952