• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DMABUF CMA heap exporter
4  *
5  * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
6  * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
7  *
8  * Also utilizing parts of Andrew Davis' SRAM heap:
9  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10  *	Andrew F. Davis <afd@ti.com>
11  */
12 #include <linux/cma.h>
13 #include <linux/dma-buf.h>
14 #include <linux/dma-heap.h>
15 #include <linux/dma-map-ops.h>
16 #include <linux/err.h>
17 #include <linux/highmem.h>
18 #include <linux/io.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 
25 
26 struct cma_heap {
27 	struct dma_heap *heap;
28 	struct cma *cma;
29 };
30 
31 struct cma_heap_buffer {
32 	struct cma_heap *heap;
33 	struct list_head attachments;
34 	struct mutex lock;
35 	unsigned long len;
36 	struct page *cma_pages;
37 	struct page **pages;
38 	pgoff_t pagecount;
39 	int vmap_cnt;
40 	void *vaddr;
41 };
42 
43 struct dma_heap_attachment {
44 	struct device *dev;
45 	struct sg_table table;
46 	struct list_head list;
47 	bool mapped;
48 };
49 
cma_heap_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)50 static int cma_heap_attach(struct dma_buf *dmabuf,
51 			   struct dma_buf_attachment *attachment)
52 {
53 	struct cma_heap_buffer *buffer = dmabuf->priv;
54 	struct dma_heap_attachment *a;
55 	int ret;
56 
57 	a = kzalloc(sizeof(*a), GFP_KERNEL);
58 	if (!a)
59 		return -ENOMEM;
60 
61 	ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
62 					buffer->pagecount, 0,
63 					buffer->pagecount << PAGE_SHIFT,
64 					GFP_KERNEL);
65 	if (ret) {
66 		kfree(a);
67 		return ret;
68 	}
69 
70 	a->dev = attachment->dev;
71 	INIT_LIST_HEAD(&a->list);
72 	a->mapped = false;
73 
74 	attachment->priv = a;
75 
76 	mutex_lock(&buffer->lock);
77 	list_add(&a->list, &buffer->attachments);
78 	mutex_unlock(&buffer->lock);
79 
80 	return 0;
81 }
82 
cma_heap_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)83 static void cma_heap_detach(struct dma_buf *dmabuf,
84 			    struct dma_buf_attachment *attachment)
85 {
86 	struct cma_heap_buffer *buffer = dmabuf->priv;
87 	struct dma_heap_attachment *a = attachment->priv;
88 
89 	mutex_lock(&buffer->lock);
90 	list_del(&a->list);
91 	mutex_unlock(&buffer->lock);
92 
93 	sg_free_table(&a->table);
94 	kfree(a);
95 }
96 
cma_heap_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction direction)97 static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
98 					     enum dma_data_direction direction)
99 {
100 	struct dma_heap_attachment *a = attachment->priv;
101 	struct sg_table *table = &a->table;
102 	int attrs = attachment->dma_map_attrs;
103 	int ret;
104 
105 	ret = dma_map_sgtable(attachment->dev, table, direction, attrs);
106 	if (ret)
107 		return ERR_PTR(-ENOMEM);
108 	a->mapped = true;
109 	return table;
110 }
111 
cma_heap_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction direction)112 static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
113 				   struct sg_table *table,
114 				   enum dma_data_direction direction)
115 {
116 	struct dma_heap_attachment *a = attachment->priv;
117 	int attrs = attachment->dma_map_attrs;
118 
119 	a->mapped = false;
120 	dma_unmap_sgtable(attachment->dev, table, direction, attrs);
121 }
122 
cma_heap_dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)123 static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
124 					     enum dma_data_direction direction)
125 {
126 	struct cma_heap_buffer *buffer = dmabuf->priv;
127 	struct dma_heap_attachment *a;
128 
129 	mutex_lock(&buffer->lock);
130 
131 	if (buffer->vmap_cnt)
132 		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
133 
134 	list_for_each_entry(a, &buffer->attachments, list) {
135 		if (!a->mapped)
136 			continue;
137 		dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
138 	}
139 	mutex_unlock(&buffer->lock);
140 
141 	return 0;
142 }
143 
cma_heap_dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)144 static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
145 					   enum dma_data_direction direction)
146 {
147 	struct cma_heap_buffer *buffer = dmabuf->priv;
148 	struct dma_heap_attachment *a;
149 
150 	mutex_lock(&buffer->lock);
151 
152 	if (buffer->vmap_cnt)
153 		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
154 
155 	list_for_each_entry(a, &buffer->attachments, list) {
156 		if (!a->mapped)
157 			continue;
158 		dma_sync_sgtable_for_device(a->dev, &a->table, direction);
159 	}
160 	mutex_unlock(&buffer->lock);
161 
162 	return 0;
163 }
164 
cma_heap_vm_fault(struct vm_fault * vmf)165 static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
166 {
167 	struct vm_area_struct *vma = vmf->vma;
168 	struct cma_heap_buffer *buffer = vma->vm_private_data;
169 
170 	if (vmf->pgoff > buffer->pagecount)
171 		return VM_FAULT_SIGBUS;
172 
173 	return vmf_insert_pfn(vma, vmf->address, page_to_pfn(buffer->pages[vmf->pgoff]));
174 }
175 
176 static const struct vm_operations_struct dma_heap_vm_ops = {
177 	.fault = cma_heap_vm_fault,
178 };
179 
cma_heap_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)180 static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
181 {
182 	struct cma_heap_buffer *buffer = dmabuf->priv;
183 
184 	if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
185 		return -EINVAL;
186 
187 	vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
188 
189 	vma->vm_ops = &dma_heap_vm_ops;
190 	vma->vm_private_data = buffer;
191 
192 	return 0;
193 }
194 
cma_heap_do_vmap(struct cma_heap_buffer * buffer)195 static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
196 {
197 	void *vaddr;
198 
199 	vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
200 	if (!vaddr)
201 		return ERR_PTR(-ENOMEM);
202 
203 	return vaddr;
204 }
205 
cma_heap_vmap(struct dma_buf * dmabuf,struct iosys_map * map)206 static int cma_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
207 {
208 	struct cma_heap_buffer *buffer = dmabuf->priv;
209 	void *vaddr;
210 	int ret = 0;
211 
212 	mutex_lock(&buffer->lock);
213 	if (buffer->vmap_cnt) {
214 		buffer->vmap_cnt++;
215 		iosys_map_set_vaddr(map, buffer->vaddr);
216 		goto out;
217 	}
218 
219 	vaddr = cma_heap_do_vmap(buffer);
220 	if (IS_ERR(vaddr)) {
221 		ret = PTR_ERR(vaddr);
222 		goto out;
223 	}
224 	buffer->vaddr = vaddr;
225 	buffer->vmap_cnt++;
226 	iosys_map_set_vaddr(map, buffer->vaddr);
227 out:
228 	mutex_unlock(&buffer->lock);
229 
230 	return ret;
231 }
232 
cma_heap_vunmap(struct dma_buf * dmabuf,struct iosys_map * map)233 static void cma_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
234 {
235 	struct cma_heap_buffer *buffer = dmabuf->priv;
236 
237 	mutex_lock(&buffer->lock);
238 	if (!--buffer->vmap_cnt) {
239 		vunmap(buffer->vaddr);
240 		buffer->vaddr = NULL;
241 	}
242 	mutex_unlock(&buffer->lock);
243 	iosys_map_clear(map);
244 }
245 
cma_heap_dma_buf_release(struct dma_buf * dmabuf)246 static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
247 {
248 	struct cma_heap_buffer *buffer = dmabuf->priv;
249 	struct cma_heap *cma_heap = buffer->heap;
250 
251 	if (buffer->vmap_cnt > 0) {
252 		WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
253 		vunmap(buffer->vaddr);
254 		buffer->vaddr = NULL;
255 	}
256 
257 	/* free page list */
258 	kfree(buffer->pages);
259 	/* release memory */
260 	cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
261 	kfree(buffer);
262 }
263 
264 static const struct dma_buf_ops cma_heap_buf_ops = {
265 	.attach = cma_heap_attach,
266 	.detach = cma_heap_detach,
267 	.map_dma_buf = cma_heap_map_dma_buf,
268 	.unmap_dma_buf = cma_heap_unmap_dma_buf,
269 	.begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
270 	.end_cpu_access = cma_heap_dma_buf_end_cpu_access,
271 	.mmap = cma_heap_mmap,
272 	.vmap = cma_heap_vmap,
273 	.vunmap = cma_heap_vunmap,
274 	.release = cma_heap_dma_buf_release,
275 };
276 
cma_heap_allocate(struct dma_heap * heap,unsigned long len,u32 fd_flags,u64 heap_flags)277 static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
278 					 unsigned long len,
279 					 u32 fd_flags,
280 					 u64 heap_flags)
281 {
282 	struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
283 	struct cma_heap_buffer *buffer;
284 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
285 	size_t size = PAGE_ALIGN(len);
286 	pgoff_t pagecount = size >> PAGE_SHIFT;
287 	unsigned long align = get_order(size);
288 	struct page *cma_pages;
289 	struct dma_buf *dmabuf;
290 	int ret = -ENOMEM;
291 	pgoff_t pg;
292 
293 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
294 	if (!buffer)
295 		return ERR_PTR(-ENOMEM);
296 
297 	INIT_LIST_HEAD(&buffer->attachments);
298 	mutex_init(&buffer->lock);
299 	buffer->len = size;
300 
301 	if (align > CONFIG_CMA_ALIGNMENT)
302 		align = CONFIG_CMA_ALIGNMENT;
303 
304 	cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false);
305 	if (!cma_pages)
306 		goto free_buffer;
307 
308 	/* Clear the cma pages */
309 	if (PageHighMem(cma_pages)) {
310 		unsigned long nr_clear_pages = pagecount;
311 		struct page *page = cma_pages;
312 
313 		while (nr_clear_pages > 0) {
314 			void *vaddr = kmap_atomic(page);
315 
316 			memset(vaddr, 0, PAGE_SIZE);
317 			kunmap_atomic(vaddr);
318 			/*
319 			 * Avoid wasting time zeroing memory if the process
320 			 * has been killed by by SIGKILL
321 			 */
322 			if (fatal_signal_pending(current))
323 				goto free_cma;
324 			page++;
325 			nr_clear_pages--;
326 		}
327 	} else {
328 		memset(page_address(cma_pages), 0, size);
329 	}
330 
331 	buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
332 	if (!buffer->pages) {
333 		ret = -ENOMEM;
334 		goto free_cma;
335 	}
336 
337 	for (pg = 0; pg < pagecount; pg++)
338 		buffer->pages[pg] = &cma_pages[pg];
339 
340 	buffer->cma_pages = cma_pages;
341 	buffer->heap = cma_heap;
342 	buffer->pagecount = pagecount;
343 
344 	/* create the dmabuf */
345 	exp_info.exp_name = dma_heap_get_name(heap);
346 	exp_info.ops = &cma_heap_buf_ops;
347 	exp_info.size = buffer->len;
348 	exp_info.flags = fd_flags;
349 	exp_info.priv = buffer;
350 	dmabuf = dma_buf_export(&exp_info);
351 	if (IS_ERR(dmabuf)) {
352 		ret = PTR_ERR(dmabuf);
353 		goto free_pages;
354 	}
355 	return dmabuf;
356 
357 free_pages:
358 	kfree(buffer->pages);
359 free_cma:
360 	cma_release(cma_heap->cma, cma_pages, pagecount);
361 free_buffer:
362 	kfree(buffer);
363 
364 	return ERR_PTR(ret);
365 }
366 
367 static const struct dma_heap_ops cma_heap_ops = {
368 	.allocate = cma_heap_allocate,
369 };
370 
__add_cma_heap(struct cma * cma,void * data)371 static int __add_cma_heap(struct cma *cma, void *data)
372 {
373 	struct cma_heap *cma_heap;
374 	struct dma_heap_export_info exp_info;
375 
376 	cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
377 	if (!cma_heap)
378 		return -ENOMEM;
379 	cma_heap->cma = cma;
380 
381 	exp_info.name = cma_get_name(cma);
382 	exp_info.ops = &cma_heap_ops;
383 	exp_info.priv = cma_heap;
384 
385 	cma_heap->heap = dma_heap_add(&exp_info);
386 	if (IS_ERR(cma_heap->heap)) {
387 		int ret = PTR_ERR(cma_heap->heap);
388 
389 		kfree(cma_heap);
390 		return ret;
391 	}
392 
393 	return 0;
394 }
395 
add_default_cma_heap(void)396 static int add_default_cma_heap(void)
397 {
398 	struct cma *default_cma = dev_get_cma_area(NULL);
399 	int ret = 0;
400 
401 	if (default_cma)
402 		ret = __add_cma_heap(default_cma, NULL);
403 
404 	return ret;
405 }
406 module_init(add_default_cma_heap);
407 MODULE_DESCRIPTION("DMA-BUF CMA Heap");
408 MODULE_LICENSE("GPL v2");
409 MODULE_IMPORT_NS(DMA_BUF);
410