• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Scatter-Gather buffer
4  *
5  *  Copyright (c) by Takashi Iwai <tiwai@suse.de>
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/mm.h>
10 #include <linux/vmalloc.h>
11 #include <linux/export.h>
12 #include <sound/memalloc.h>
13 
14 
15 /* table entries are align to 32 */
16 #define SGBUF_TBL_ALIGN		32
17 #define sgbuf_align_table(tbl)	ALIGN((tbl), SGBUF_TBL_ALIGN)
18 
snd_free_sgbuf_pages(struct snd_dma_buffer * dmab)19 int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
20 {
21 	struct snd_sg_buf *sgbuf = dmab->private_data;
22 	struct snd_dma_buffer tmpb;
23 	int i;
24 
25 	if (! sgbuf)
26 		return -EINVAL;
27 
28 	vunmap(dmab->area);
29 	dmab->area = NULL;
30 
31 	tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
32 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG)
33 		tmpb.dev.type = SNDRV_DMA_TYPE_DEV_UC;
34 	tmpb.dev.dev = sgbuf->dev;
35 	for (i = 0; i < sgbuf->pages; i++) {
36 		if (!(sgbuf->table[i].addr & ~PAGE_MASK))
37 			continue; /* continuous pages */
38 		tmpb.area = sgbuf->table[i].buf;
39 		tmpb.addr = sgbuf->table[i].addr & PAGE_MASK;
40 		tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
41 		snd_dma_free_pages(&tmpb);
42 	}
43 
44 	kfree(sgbuf->table);
45 	kfree(sgbuf->page_table);
46 	kfree(sgbuf);
47 	dmab->private_data = NULL;
48 
49 	return 0;
50 }
51 
52 #define MAX_ALLOC_PAGES		32
53 
snd_malloc_sgbuf_pages(struct device * device,size_t size,struct snd_dma_buffer * dmab,size_t * res_size)54 void *snd_malloc_sgbuf_pages(struct device *device,
55 			     size_t size, struct snd_dma_buffer *dmab,
56 			     size_t *res_size)
57 {
58 	struct snd_sg_buf *sgbuf;
59 	unsigned int i, pages, chunk, maxpages;
60 	struct snd_dma_buffer tmpb;
61 	struct snd_sg_page *table;
62 	struct page **pgtable;
63 	int type = SNDRV_DMA_TYPE_DEV;
64 	pgprot_t prot = PAGE_KERNEL;
65 
66 	dmab->area = NULL;
67 	dmab->addr = 0;
68 	dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
69 	if (! sgbuf)
70 		return NULL;
71 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG) {
72 		type = SNDRV_DMA_TYPE_DEV_UC;
73 #ifdef pgprot_noncached
74 		prot = pgprot_noncached(PAGE_KERNEL);
75 #endif
76 	}
77 	sgbuf->dev = device;
78 	pages = snd_sgbuf_aligned_pages(size);
79 	sgbuf->tblsize = sgbuf_align_table(pages);
80 	table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
81 	if (!table)
82 		goto _failed;
83 	sgbuf->table = table;
84 	pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL);
85 	if (!pgtable)
86 		goto _failed;
87 	sgbuf->page_table = pgtable;
88 
89 	/* allocate pages */
90 	maxpages = MAX_ALLOC_PAGES;
91 	while (pages > 0) {
92 		chunk = pages;
93 		/* don't be too eager to take a huge chunk */
94 		if (chunk > maxpages)
95 			chunk = maxpages;
96 		chunk <<= PAGE_SHIFT;
97 		if (snd_dma_alloc_pages_fallback(type, device,
98 						 chunk, &tmpb) < 0) {
99 			if (!sgbuf->pages)
100 				goto _failed;
101 			if (!res_size)
102 				goto _failed;
103 			size = sgbuf->pages * PAGE_SIZE;
104 			break;
105 		}
106 		chunk = tmpb.bytes >> PAGE_SHIFT;
107 		for (i = 0; i < chunk; i++) {
108 			table->buf = tmpb.area;
109 			table->addr = tmpb.addr;
110 			if (!i)
111 				table->addr |= chunk; /* mark head */
112 			table++;
113 			*pgtable++ = virt_to_page(tmpb.area);
114 			tmpb.area += PAGE_SIZE;
115 			tmpb.addr += PAGE_SIZE;
116 		}
117 		sgbuf->pages += chunk;
118 		pages -= chunk;
119 		if (chunk < maxpages)
120 			maxpages = chunk;
121 	}
122 
123 	sgbuf->size = size;
124 	dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
125 	if (! dmab->area)
126 		goto _failed;
127 	if (res_size)
128 		*res_size = sgbuf->size;
129 	return dmab->area;
130 
131  _failed:
132 	snd_free_sgbuf_pages(dmab); /* free the table */
133 	return NULL;
134 }
135 
136 /*
137  * compute the max chunk size with continuous pages on sg-buffer
138  */
snd_sgbuf_get_chunk_size(struct snd_dma_buffer * dmab,unsigned int ofs,unsigned int size)139 unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
140 				      unsigned int ofs, unsigned int size)
141 {
142 	struct snd_sg_buf *sg = dmab->private_data;
143 	unsigned int start, end, pg;
144 
145 	if (!sg)
146 		return size;
147 
148 	start = ofs >> PAGE_SHIFT;
149 	end = (ofs + size - 1) >> PAGE_SHIFT;
150 	/* check page continuity */
151 	pg = sg->table[start].addr >> PAGE_SHIFT;
152 	for (;;) {
153 		start++;
154 		if (start > end)
155 			break;
156 		pg++;
157 		if ((sg->table[start].addr >> PAGE_SHIFT) != pg)
158 			return (start << PAGE_SHIFT) - ofs;
159 	}
160 	/* ok, all on continuous pages */
161 	return size;
162 }
163 EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
164