• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12 #include <linux/version.h>
13 #include <linux/io.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <linux/dma-mapping.h>
20 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0))
21 #include <linux/dma-contiguous.h>
22 #else
23 #include <linux/iommu.h>
24 #include <linux/dma-map-ops.h>
25 #endif
26 
27 #include "isp-vb2-cmalloc.h"
28 
cma_alloc(struct device * dev,unsigned long size)29 static void *cma_alloc(struct device *dev, unsigned long size)
30 {
31     struct page *cma_pages = NULL;
32     dma_addr_t paddr = 0;
33     void *vaddr = NULL;
34 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
35     cma_pages = dma_alloc_from_contiguous(dev,
36 			size >> PAGE_SHIFT, 0, false);
37 #else
38     cma_pages = dma_alloc_from_contiguous(dev,
39 			size >> PAGE_SHIFT, 0);
40 #endif
41 
42     if (cma_pages) {
43         paddr = page_to_phys(cma_pages);
44     } else {
45         pr_err("Failed to alloc cma pages.\n");
46         return NULL;
47     }
48 
49     vaddr = (void *)cma_pages;
50 
51     return vaddr;
52 }
53 
cma_free(void * buf_priv)54 static void cma_free(void *buf_priv)
55 {
56     struct vb2_cmalloc_buf *buf = buf_priv;
57     struct page *cma_pages = NULL;
58     struct device *dev = NULL;
59     bool rc = -1;
60 
61     dev = (void *)(buf->dbuf);
62 
63     cma_pages = buf->vaddr;
64 
65     rc = dma_release_from_contiguous(dev, cma_pages,
66                 buf->size >> PAGE_SHIFT);
67     if (rc == false) {
68         pr_err("Failed to release cma buffer\n");
69         return;
70     }
71 
72     buf->vaddr = NULL;
73 }
74 
75 
vb2_cmalloc_put(void * buf_priv)76 static void vb2_cmalloc_put(void *buf_priv)
77 {
78 	struct vb2_cmalloc_buf *buf = buf_priv;
79 
80 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
81     if (refcount_dec_and_test(&buf->refcount)) {
82 #else
83     if (atomic_dec_and_test(&buf->refcount)) {
84 #endif
85 		cma_free(buf_priv);
86 		kfree(buf);
87 	}
88 }
89 
90 static void *vb2_cmalloc_alloc(struct device *dev, unsigned long attrs,
91 			       unsigned long size, enum dma_data_direction dma_dir,
92 			       gfp_t gfp_flags)
93 {
94 	struct vb2_cmalloc_buf *buf;
95 
96 	buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
97 	if (!buf)
98 		return ERR_PTR(-ENOMEM);
99 
100 	buf->size = PAGE_ALIGN(size);
101 	buf->vaddr = cma_alloc(dev, buf->size);
102 	buf->dma_dir = dma_dir;
103 	buf->handler.refcount = &buf->refcount;
104 	buf->handler.put = vb2_cmalloc_put;
105 	buf->handler.arg = buf;
106 	buf->dbuf = (void *)dev;
107 
108 	if (!buf->vaddr) {
109 		pr_err("cmalloc of size %ld failed\n", buf->size);
110 		kfree(buf);
111 		return ERR_PTR(-ENOMEM);
112 	}
113 
114 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
115 	refcount_set(&buf->refcount, 1);
116 #else
117 	atomic_inc(&buf->refcount);
118 #endif
119 
120 	return buf;
121 }
122 
123 void *vb2_get_userptr(struct device *dev, unsigned long vaddr,
124 				unsigned long size,
125 				enum dma_data_direction dma_dir)
126 {
127 	struct vb2_cmalloc_buf *buf;
128 
129 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
130 	if (buf == NULL) {
131 		pr_err("%s: Failed to alloc mem\n", __func__);
132 		return NULL;
133 	}
134 
135 	buf->dbuf = (void *)dev;
136 	buf->vaddr = (void *)vaddr;
137 	buf->dma_dir = dma_dir;
138 	buf->size = size;
139 
140 	return buf;
141 }
142 
143 void vb2_put_userptr(void *buf_priv)
144 {
145 	if (buf_priv == NULL) {
146 		pr_err("%s: Error input param\n", __func__);
147 		return;
148 	}
149 
150 	kfree(buf_priv);
151 }
152 
153 static void *vb2_cmalloc_vaddr(void *buf_priv)
154 {
155 	struct vb2_cmalloc_buf *buf = buf_priv;
156 
157 	if (!buf->vaddr) {
158 		pr_err("Address of an unallocated plane requested "
159 		       "or cannot map user pointer\n");
160 		return NULL;
161 	}
162 
163 	return buf->vaddr;
164 }
165 
166 static unsigned int vb2_cmalloc_num_users(void *buf_priv)
167 {
168 	struct vb2_cmalloc_buf *buf = buf_priv;
169 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
170     return refcount_read(&buf->refcount);
171 #else
172     return atomic_read(&buf->refcount);
173 #endif
174 }
175 
176 static int vb2_cmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
177 {
178 	struct vb2_cmalloc_buf *buf = buf_priv;
179 	unsigned long pfn = 0;
180 	unsigned long vsize = vma->vm_end - vma->vm_start;
181 	int ret = -1;
182 	dma_addr_t paddr = 0;
183 	struct page *cma_pages = NULL;
184 
185 	if (!buf || !vma) {
186 		pr_err("No memory to map\n");
187 		return -EINVAL;
188 	}
189 
190 	cma_pages = buf->vaddr;
191 
192 	paddr = page_to_phys(cma_pages);
193 
194 	pfn = paddr >> PAGE_SHIFT;
195 	ret = remap_pfn_range(vma, vma->vm_start, pfn, vsize, vma->vm_page_prot);
196 
197 	if (ret) {
198 		pr_err("Remapping vmalloc memory, error: %d\n", ret);
199 		return ret;
200 	}
201 		/*
202 		* Make sure that vm_areas for 2 buffers won't be merged together
203 		*/
204 	vma->vm_flags |= VM_DONTEXPAND;
205 
206 		/*
207 		* Use common vm_area operations to track buffer refcount.
208 		*/
209 	vma->vm_private_data = &buf->handler;
210 	vma->vm_ops = &vb2_common_vm_ops;
211 
212 	vma->vm_ops->open(vma);
213 
214 	return 0;
215 }
216 
217 struct vb2_cmalloc_attachment {
218 	struct sg_table sgt;
219 	enum dma_data_direction dma_dir;
220 };
221 
222 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
223 static int vb2_cmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
224 		struct dma_buf_attachment *dbuf_attach)
225 #else
226 static int vb2_cmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
227 		struct device *dev, struct dma_buf_attachment *dbuf_attach)
228 #endif
229 {
230 	struct vb2_cmalloc_attachment *attach;
231 	struct vb2_cmalloc_buf *buf = dbuf->priv;
232 	int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
233 	struct sg_table *sgt;
234 	struct scatterlist *sg;
235 	void *vaddr = buf->vaddr;
236 	int ret;
237 	int i;
238 
239 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
240 	if (!attach)
241 		return -ENOMEM;
242 
243 	sgt = &attach->sgt;
244 	ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
245 	if (ret) {
246 		kfree(attach);
247 		return ret;
248 	}
249 
250 	struct page *page = vaddr;
251 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
252 		if (!page) {
253 			sg_free_table(sgt);
254 			kfree(attach);
255 			return -ENOMEM;
256 		}
257 		sg_set_page(sg, page, PAGE_SIZE, 0);
258 		page ++;
259 	}
260 
261 	attach->dma_dir = DMA_NONE;
262 	dbuf_attach->priv = attach;
263 	return 0;
264 }
265 
266 static void vb2_cmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
267 	struct dma_buf_attachment *db_attach)
268 {
269 	struct vb2_cmalloc_attachment *attach = db_attach->priv;
270 	struct sg_table *sgt;
271 
272 	if (!attach)
273 		return;
274 
275 	sgt = &attach->sgt;
276 
277 	/* release the scatterlist cache */
278 	if (attach->dma_dir != DMA_NONE)
279 		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
280 			attach->dma_dir);
281 	sg_free_table(sgt);
282 	kfree(attach);
283 	db_attach->priv = NULL;
284 }
285 
286 
287 static struct sg_table *vb2_cmalloc_dmabuf_ops_map(
288 	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
289 {
290 	struct vb2_cmalloc_attachment *attach = db_attach->priv;
291 	/* stealing dmabuf mutex to serialize map/unmap operations */
292 	struct mutex *lock = &db_attach->dmabuf->lock;
293 	struct sg_table *sgt;
294 
295 	mutex_lock(lock);
296 
297 	sgt = &attach->sgt;
298 	/* return previously mapped sg table */
299 	if (attach->dma_dir == dma_dir) {
300 		mutex_unlock(lock);
301 		return sgt;
302 	}
303 
304 	/* release any previous cache */
305 	if (attach->dma_dir != DMA_NONE) {
306 		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
307 			attach->dma_dir);
308 		attach->dma_dir = DMA_NONE;
309 	}
310 
311 	/* mapping to the client with new direction */
312 	sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
313 				dma_dir);
314 	if (!sgt->nents) {
315 		pr_err("failed to map scatterlist\n");
316 		mutex_unlock(lock);
317 		return ERR_PTR(-EIO);
318 	}
319 
320 	attach->dma_dir = dma_dir;
321 
322 	mutex_unlock(lock);
323 
324 	return sgt;
325 }
326 
327 static void vb2_cmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
328 	struct sg_table *sgt, enum dma_data_direction dma_dir)
329 {
330 	/* nothing to be done here */
331 }
332 
333 
334 static void vb2_cmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
335 {
336 	vb2_cmalloc_put(dbuf->priv);
337 }
338 
339 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0))
340 static void *vb2_cmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
341 {
342 	struct vb2_cmalloc_buf *buf = dbuf->priv;
343 
344 	return buf->vaddr + pgnum * PAGE_SIZE;
345 }
346 #endif
347 
348 static void *vb2_cmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
349 {
350 	struct vb2_cmalloc_buf *buf = dbuf->priv;
351 
352 #ifdef CONFIG_ANDROID_OS
353 	return buf->vaddr;
354 #else
355 	return page_to_virt(buf->vaddr);
356 #endif
357 }
358 
359 static int vb2_cmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
360 	struct vm_area_struct *vma)
361 {
362 	return vb2_cmalloc_mmap(dbuf->priv, vma);
363 }
364 
365 static struct dma_buf_ops vb2_cmalloc_dmabuf_ops = {
366 	.attach = vb2_cmalloc_dmabuf_ops_attach,
367 	.detach = vb2_cmalloc_dmabuf_ops_detach,
368 	.map_dma_buf = vb2_cmalloc_dmabuf_ops_map,
369 	.unmap_dma_buf = vb2_cmalloc_dmabuf_ops_unmap,
370 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0))
371 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
372     .map = vb2_cmalloc_dmabuf_ops_kmap,
373 #else
374     .kmap = vb2_cmalloc_dmabuf_ops_kmap,
375     .kmap_atomic = vb2_cmalloc_dmabuf_ops_kmap,
376 #endif
377 #endif
378 	.vmap = vb2_cmalloc_dmabuf_ops_vmap,
379 	.mmap = vb2_cmalloc_dmabuf_ops_mmap,
380 	.release = vb2_cmalloc_dmabuf_ops_release,
381 };
382 
383 static struct dma_buf *vb2_cmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
384 {
385 	struct vb2_cmalloc_buf *buf = buf_priv;
386 	struct dma_buf *dbuf;
387 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
388 
389 	exp_info.ops = &vb2_cmalloc_dmabuf_ops;
390 	exp_info.size = buf->size;
391 	exp_info.flags = flags;
392 	exp_info.priv = buf;
393 
394 	if (WARN_ON(!buf->vaddr))
395 		return NULL;
396 
397 	dbuf = dma_buf_export(&exp_info);
398 	if (IS_ERR(dbuf))
399 		return NULL;
400 
401 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
402     refcount_inc(&buf->refcount);
403 #else
404     atomic_inc(&buf->refcount);
405 #endif
406 
407 	return dbuf;
408 }
409 
410 const struct vb2_mem_ops vb2_cmalloc_memops = {
411 	.alloc		= vb2_cmalloc_alloc,
412 	.put		= vb2_cmalloc_put,
413 	.get_userptr	= vb2_get_userptr,
414 	.put_userptr	= vb2_put_userptr,
415 #ifdef CONFIG_HAS_DMA
416 	.get_dmabuf	= vb2_cmalloc_get_dmabuf,
417 #endif
418 	.map_dmabuf	= NULL,
419 	.unmap_dmabuf	= NULL,
420 	.attach_dmabuf	= NULL,
421 	.detach_dmabuf	= NULL,
422 	.vaddr		= vb2_cmalloc_vaddr,
423 	.mmap		= vb2_cmalloc_mmap,
424 	.num_users	= vb2_cmalloc_num_users,
425 };
426 EXPORT_SYMBOL_GPL(vb2_cmalloc_memops);
427 
428 MODULE_DESCRIPTION("cmalloc memory handling routines for videobuf2");
429 MODULE_AUTHOR("Keke Li<keke.li@amlogic.com>");
430 MODULE_LICENSE("GPL");
431