• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12 
13 #include <linux/io.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-vmalloc.h>
22 #include <media/videobuf2-memops.h>
23 
24 struct vb2_vmalloc_buf {
25 	void				*vaddr;
26 	struct page			**pages;
27 	struct vm_area_struct		*vma;
28 	int				write;
29 	unsigned long			size;
30 	unsigned int			n_pages;
31 	atomic_t			refcount;
32 	struct vb2_vmarea_handler	handler;
33 	struct dma_buf			*dbuf;
34 };
35 
36 static void vb2_vmalloc_put(void *buf_priv);
37 
vb2_vmalloc_alloc(void * alloc_ctx,unsigned long size,gfp_t gfp_flags)38 static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
39 {
40 	struct vb2_vmalloc_buf *buf;
41 
42 	buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
43 	if (!buf)
44 		return NULL;
45 
46 	buf->size = size;
47 	buf->vaddr = vmalloc_user(buf->size);
48 	buf->handler.refcount = &buf->refcount;
49 	buf->handler.put = vb2_vmalloc_put;
50 	buf->handler.arg = buf;
51 
52 	if (!buf->vaddr) {
53 		pr_debug("vmalloc of size %ld failed\n", buf->size);
54 		kfree(buf);
55 		return NULL;
56 	}
57 
58 	atomic_inc(&buf->refcount);
59 	return buf;
60 }
61 
vb2_vmalloc_put(void * buf_priv)62 static void vb2_vmalloc_put(void *buf_priv)
63 {
64 	struct vb2_vmalloc_buf *buf = buf_priv;
65 
66 	if (atomic_dec_and_test(&buf->refcount)) {
67 		vfree(buf->vaddr);
68 		kfree(buf);
69 	}
70 }
71 
vb2_vmalloc_get_userptr(void * alloc_ctx,unsigned long vaddr,unsigned long size,int write)72 static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
73 				     unsigned long size, int write)
74 {
75 	struct vb2_vmalloc_buf *buf;
76 	unsigned long first, last;
77 	int n_pages, offset;
78 	struct vm_area_struct *vma;
79 	dma_addr_t physp;
80 
81 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
82 	if (!buf)
83 		return NULL;
84 
85 	buf->write = write;
86 	offset = vaddr & ~PAGE_MASK;
87 	buf->size = size;
88 
89 
90 	vma = find_vma(current->mm, vaddr);
91 	if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) {
92 		if (vb2_get_contig_userptr(vaddr, size, &vma, &physp))
93 			goto fail_pages_array_alloc;
94 		buf->vma = vma;
95 		buf->vaddr = ioremap_nocache(physp, size);
96 		if (!buf->vaddr)
97 			goto fail_pages_array_alloc;
98 	} else {
99 		first = vaddr >> PAGE_SHIFT;
100 		last  = (vaddr + size - 1) >> PAGE_SHIFT;
101 		buf->n_pages = last - first + 1;
102 		buf->pages = kzalloc(buf->n_pages * sizeof(struct page *),
103 				     GFP_KERNEL);
104 		if (!buf->pages)
105 			goto fail_pages_array_alloc;
106 
107 		/* current->mm->mmap_sem is taken by videobuf2 core */
108 		n_pages = get_user_pages(current, current->mm,
109 					 vaddr & PAGE_MASK, buf->n_pages,
110 					 write, 1, /* force */
111 					 buf->pages, NULL);
112 		if (n_pages != buf->n_pages)
113 			goto fail_get_user_pages;
114 
115 		buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1,
116 					PAGE_KERNEL);
117 		if (!buf->vaddr)
118 			goto fail_get_user_pages;
119 	}
120 
121 	buf->vaddr += offset;
122 	return buf;
123 
124 fail_get_user_pages:
125 	pr_debug("get_user_pages requested/got: %d/%d]\n", n_pages,
126 		 buf->n_pages);
127 	while (--n_pages >= 0)
128 		put_page(buf->pages[n_pages]);
129 	kfree(buf->pages);
130 
131 fail_pages_array_alloc:
132 	kfree(buf);
133 
134 	return NULL;
135 }
136 
vb2_vmalloc_put_userptr(void * buf_priv)137 static void vb2_vmalloc_put_userptr(void *buf_priv)
138 {
139 	struct vb2_vmalloc_buf *buf = buf_priv;
140 	unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
141 	unsigned int i;
142 
143 	if (buf->pages) {
144 		if (vaddr)
145 			vm_unmap_ram((void *)vaddr, buf->n_pages);
146 		for (i = 0; i < buf->n_pages; ++i) {
147 			if (buf->write)
148 				set_page_dirty_lock(buf->pages[i]);
149 			put_page(buf->pages[i]);
150 		}
151 		kfree(buf->pages);
152 	} else {
153 		if (buf->vma)
154 			vb2_put_vma(buf->vma);
155 		iounmap(buf->vaddr);
156 	}
157 	kfree(buf);
158 }
159 
vb2_vmalloc_vaddr(void * buf_priv)160 static void *vb2_vmalloc_vaddr(void *buf_priv)
161 {
162 	struct vb2_vmalloc_buf *buf = buf_priv;
163 
164 	if (!buf->vaddr) {
165 		pr_err("Address of an unallocated plane requested "
166 		       "or cannot map user pointer\n");
167 		return NULL;
168 	}
169 
170 	return buf->vaddr;
171 }
172 
vb2_vmalloc_num_users(void * buf_priv)173 static unsigned int vb2_vmalloc_num_users(void *buf_priv)
174 {
175 	struct vb2_vmalloc_buf *buf = buf_priv;
176 	return atomic_read(&buf->refcount);
177 }
178 
vb2_vmalloc_mmap(void * buf_priv,struct vm_area_struct * vma)179 static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
180 {
181 	struct vb2_vmalloc_buf *buf = buf_priv;
182 	int ret;
183 
184 	if (!buf) {
185 		pr_err("No memory to map\n");
186 		return -EINVAL;
187 	}
188 
189 	ret = remap_vmalloc_range(vma, buf->vaddr, 0);
190 	if (ret) {
191 		pr_err("Remapping vmalloc memory, error: %d\n", ret);
192 		return ret;
193 	}
194 
195 	/*
196 	 * Make sure that vm_areas for 2 buffers won't be merged together
197 	 */
198 	vma->vm_flags		|= VM_DONTEXPAND;
199 
200 	/*
201 	 * Use common vm_area operations to track buffer refcount.
202 	 */
203 	vma->vm_private_data	= &buf->handler;
204 	vma->vm_ops		= &vb2_common_vm_ops;
205 
206 	vma->vm_ops->open(vma);
207 
208 	return 0;
209 }
210 
211 /*********************************************/
212 /*       callbacks for DMABUF buffers        */
213 /*********************************************/
214 
vb2_vmalloc_map_dmabuf(void * mem_priv)215 static int vb2_vmalloc_map_dmabuf(void *mem_priv)
216 {
217 	struct vb2_vmalloc_buf *buf = mem_priv;
218 
219 	buf->vaddr = dma_buf_vmap(buf->dbuf);
220 
221 	return buf->vaddr ? 0 : -EFAULT;
222 }
223 
vb2_vmalloc_unmap_dmabuf(void * mem_priv)224 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
225 {
226 	struct vb2_vmalloc_buf *buf = mem_priv;
227 
228 	dma_buf_vunmap(buf->dbuf, buf->vaddr);
229 	buf->vaddr = NULL;
230 }
231 
vb2_vmalloc_detach_dmabuf(void * mem_priv)232 static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
233 {
234 	struct vb2_vmalloc_buf *buf = mem_priv;
235 
236 	if (buf->vaddr)
237 		dma_buf_vunmap(buf->dbuf, buf->vaddr);
238 
239 	kfree(buf);
240 }
241 
vb2_vmalloc_attach_dmabuf(void * alloc_ctx,struct dma_buf * dbuf,unsigned long size,int write)242 static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
243 	unsigned long size, int write)
244 {
245 	struct vb2_vmalloc_buf *buf;
246 
247 	if (dbuf->size < size)
248 		return ERR_PTR(-EFAULT);
249 
250 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
251 	if (!buf)
252 		return ERR_PTR(-ENOMEM);
253 
254 	buf->dbuf = dbuf;
255 	buf->write = write;
256 	buf->size = size;
257 
258 	return buf;
259 }
260 
261 
262 const struct vb2_mem_ops vb2_vmalloc_memops = {
263 	.alloc		= vb2_vmalloc_alloc,
264 	.put		= vb2_vmalloc_put,
265 	.get_userptr	= vb2_vmalloc_get_userptr,
266 	.put_userptr	= vb2_vmalloc_put_userptr,
267 	.map_dmabuf	= vb2_vmalloc_map_dmabuf,
268 	.unmap_dmabuf	= vb2_vmalloc_unmap_dmabuf,
269 	.attach_dmabuf	= vb2_vmalloc_attach_dmabuf,
270 	.detach_dmabuf	= vb2_vmalloc_detach_dmabuf,
271 	.vaddr		= vb2_vmalloc_vaddr,
272 	.mmap		= vb2_vmalloc_mmap,
273 	.num_users	= vb2_vmalloc_num_users,
274 };
275 EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
276 
277 MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
278 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
279 MODULE_LICENSE("GPL");
280