• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Scatter-Gather buffer
4  *
5  *  Copyright (c) by Takashi Iwai <tiwai@suse.de>
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/mm.h>
10 #include <linux/vmalloc.h>
11 #include <linux/export.h>
12 #include <sound/memalloc.h>
13 #include "memalloc_local.h"
14 
15 struct snd_sg_page {
16 	void *buf;
17 	dma_addr_t addr;
18 };
19 
20 struct snd_sg_buf {
21 	int size;	/* allocated byte size */
22 	int pages;	/* allocated pages */
23 	int tblsize;	/* allocated table size */
24 	struct snd_sg_page *table;	/* address table */
25 	struct page **page_table;	/* page table (for vmap/vunmap) */
26 	struct device *dev;
27 };
28 
29 /* table entries are align to 32 */
30 #define SGBUF_TBL_ALIGN		32
31 #define sgbuf_align_table(tbl)	ALIGN((tbl), SGBUF_TBL_ALIGN)
32 
snd_dma_sg_free(struct snd_dma_buffer * dmab)33 static void snd_dma_sg_free(struct snd_dma_buffer *dmab)
34 {
35 	struct snd_sg_buf *sgbuf = dmab->private_data;
36 	struct snd_dma_buffer tmpb;
37 	int i;
38 
39 	if (!sgbuf)
40 		return;
41 
42 	vunmap(dmab->area);
43 	dmab->area = NULL;
44 
45 	tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
46 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
47 		tmpb.dev.type = SNDRV_DMA_TYPE_DEV_WC;
48 	tmpb.dev.dev = sgbuf->dev;
49 	for (i = 0; i < sgbuf->pages; i++) {
50 		if (!(sgbuf->table[i].addr & ~PAGE_MASK))
51 			continue; /* continuous pages */
52 		tmpb.area = sgbuf->table[i].buf;
53 		tmpb.addr = sgbuf->table[i].addr & PAGE_MASK;
54 		tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
55 		snd_dma_free_pages(&tmpb);
56 	}
57 
58 	kfree(sgbuf->table);
59 	kfree(sgbuf->page_table);
60 	kfree(sgbuf);
61 	dmab->private_data = NULL;
62 }
63 
64 #define MAX_ALLOC_PAGES		32
65 
snd_dma_sg_alloc(struct snd_dma_buffer * dmab,size_t size)66 static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
67 {
68 	struct snd_sg_buf *sgbuf;
69 	unsigned int i, pages, chunk, maxpages;
70 	struct snd_dma_buffer tmpb;
71 	struct snd_sg_page *table;
72 	struct page **pgtable;
73 	int type = SNDRV_DMA_TYPE_DEV;
74 	pgprot_t prot = PAGE_KERNEL;
75 	void *area;
76 
77 	dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
78 	if (!sgbuf)
79 		return NULL;
80 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) {
81 		type = SNDRV_DMA_TYPE_DEV_WC;
82 #ifdef pgprot_noncached
83 		prot = pgprot_noncached(PAGE_KERNEL);
84 #endif
85 	}
86 	sgbuf->dev = dmab->dev.dev;
87 	pages = snd_sgbuf_aligned_pages(size);
88 	sgbuf->tblsize = sgbuf_align_table(pages);
89 	table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
90 	if (!table)
91 		goto _failed;
92 	sgbuf->table = table;
93 	pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL);
94 	if (!pgtable)
95 		goto _failed;
96 	sgbuf->page_table = pgtable;
97 
98 	/* allocate pages */
99 	maxpages = MAX_ALLOC_PAGES;
100 	while (pages > 0) {
101 		chunk = pages;
102 		/* don't be too eager to take a huge chunk */
103 		if (chunk > maxpages)
104 			chunk = maxpages;
105 		chunk <<= PAGE_SHIFT;
106 		if (snd_dma_alloc_pages_fallback(type, dmab->dev.dev,
107 						 chunk, &tmpb) < 0) {
108 			if (!sgbuf->pages)
109 				goto _failed;
110 			size = sgbuf->pages * PAGE_SIZE;
111 			break;
112 		}
113 		chunk = tmpb.bytes >> PAGE_SHIFT;
114 		for (i = 0; i < chunk; i++) {
115 			table->buf = tmpb.area;
116 			table->addr = tmpb.addr;
117 			if (!i)
118 				table->addr |= chunk; /* mark head */
119 			table++;
120 			*pgtable++ = virt_to_page(tmpb.area);
121 			tmpb.area += PAGE_SIZE;
122 			tmpb.addr += PAGE_SIZE;
123 		}
124 		sgbuf->pages += chunk;
125 		pages -= chunk;
126 		if (chunk < maxpages)
127 			maxpages = chunk;
128 	}
129 
130 	sgbuf->size = size;
131 	area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
132 	if (!area)
133 		goto _failed;
134 	return area;
135 
136  _failed:
137 	snd_dma_sg_free(dmab); /* free the table */
138 	return NULL;
139 }
140 
snd_dma_sg_get_addr(struct snd_dma_buffer * dmab,size_t offset)141 static dma_addr_t snd_dma_sg_get_addr(struct snd_dma_buffer *dmab,
142 				      size_t offset)
143 {
144 	struct snd_sg_buf *sgbuf = dmab->private_data;
145 	dma_addr_t addr;
146 
147 	addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
148 	addr &= ~((dma_addr_t)PAGE_SIZE - 1);
149 	return addr + offset % PAGE_SIZE;
150 }
151 
snd_dma_sg_get_page(struct snd_dma_buffer * dmab,size_t offset)152 static struct page *snd_dma_sg_get_page(struct snd_dma_buffer *dmab,
153 					size_t offset)
154 {
155 	struct snd_sg_buf *sgbuf = dmab->private_data;
156 	unsigned int idx = offset >> PAGE_SHIFT;
157 
158 	if (idx >= (unsigned int)sgbuf->pages)
159 		return NULL;
160 	return sgbuf->page_table[idx];
161 }
162 
snd_dma_sg_get_chunk_size(struct snd_dma_buffer * dmab,unsigned int ofs,unsigned int size)163 static unsigned int snd_dma_sg_get_chunk_size(struct snd_dma_buffer *dmab,
164 					      unsigned int ofs,
165 					      unsigned int size)
166 {
167 	struct snd_sg_buf *sg = dmab->private_data;
168 	unsigned int start, end, pg;
169 
170 	start = ofs >> PAGE_SHIFT;
171 	end = (ofs + size - 1) >> PAGE_SHIFT;
172 	/* check page continuity */
173 	pg = sg->table[start].addr >> PAGE_SHIFT;
174 	for (;;) {
175 		start++;
176 		if (start > end)
177 			break;
178 		pg++;
179 		if ((sg->table[start].addr >> PAGE_SHIFT) != pg)
180 			return (start << PAGE_SHIFT) - ofs;
181 	}
182 	/* ok, all on continuous pages */
183 	return size;
184 }
185 
snd_dma_sg_mmap(struct snd_dma_buffer * dmab,struct vm_area_struct * area)186 static int snd_dma_sg_mmap(struct snd_dma_buffer *dmab,
187 			   struct vm_area_struct *area)
188 {
189 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
190 		area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
191 	return -ENOENT; /* continue with the default mmap handler */
192 }
193 
194 const struct snd_malloc_ops snd_dma_sg_ops = {
195 	.alloc = snd_dma_sg_alloc,
196 	.free = snd_dma_sg_free,
197 	.get_addr = snd_dma_sg_get_addr,
198 	.get_page = snd_dma_sg_get_page,
199 	.get_chunk_size = snd_dma_sg_get_chunk_size,
200 	.mmap = snd_dma_sg_mmap,
201 };
202