1 /*
2 * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13 #include <linux/io.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/refcount.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-vmalloc.h>
23 #include <media/videobuf2-memops.h>
24
25 struct vb2_vmalloc_buf {
26 void *vaddr;
27 struct frame_vector *vec;
28 enum dma_data_direction dma_dir;
29 unsigned long size;
30 refcount_t refcount;
31 struct vb2_vmarea_handler handler;
32 struct dma_buf *dbuf;
33 };
34
35 static void vb2_vmalloc_put(void *buf_priv);
36
vb2_vmalloc_alloc(struct vb2_buffer * vb,struct device * dev,unsigned long size)37 static void *vb2_vmalloc_alloc(struct vb2_buffer *vb, struct device *dev,
38 unsigned long size)
39 {
40 struct vb2_vmalloc_buf *buf;
41
42 buf = kzalloc(sizeof(*buf), GFP_KERNEL | vb->vb2_queue->gfp_flags);
43 if (!buf)
44 return ERR_PTR(-ENOMEM);
45
46 buf->size = size;
47 buf->vaddr = vmalloc_user(buf->size);
48 if (!buf->vaddr) {
49 pr_debug("vmalloc of size %ld failed\n", buf->size);
50 kfree(buf);
51 return ERR_PTR(-ENOMEM);
52 }
53
54 buf->dma_dir = vb->vb2_queue->dma_dir;
55 buf->handler.refcount = &buf->refcount;
56 buf->handler.put = vb2_vmalloc_put;
57 buf->handler.arg = buf;
58
59 refcount_set(&buf->refcount, 1);
60 return buf;
61 }
62
vb2_vmalloc_put(void * buf_priv)63 static void vb2_vmalloc_put(void *buf_priv)
64 {
65 struct vb2_vmalloc_buf *buf = buf_priv;
66
67 if (refcount_dec_and_test(&buf->refcount)) {
68 vfree(buf->vaddr);
69 kfree(buf);
70 }
71 }
72
vb2_vmalloc_get_userptr(struct vb2_buffer * vb,struct device * dev,unsigned long vaddr,unsigned long size)73 static void *vb2_vmalloc_get_userptr(struct vb2_buffer *vb, struct device *dev,
74 unsigned long vaddr, unsigned long size)
75 {
76 struct vb2_vmalloc_buf *buf;
77 struct frame_vector *vec;
78 int n_pages, offset, i;
79 int ret = -ENOMEM;
80
81 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
82 if (!buf)
83 return ERR_PTR(-ENOMEM);
84
85 buf->dma_dir = vb->vb2_queue->dma_dir;
86 offset = vaddr & ~PAGE_MASK;
87 buf->size = size;
88 vec = vb2_create_framevec(vaddr, size);
89 if (IS_ERR(vec)) {
90 ret = PTR_ERR(vec);
91 goto fail_pfnvec_create;
92 }
93 buf->vec = vec;
94 n_pages = frame_vector_count(vec);
95 if (frame_vector_to_pages(vec) < 0) {
96 unsigned long *nums = frame_vector_pfns(vec);
97
98 /*
99 * We cannot get page pointers for these pfns. Check memory is
100 * physically contiguous and use direct mapping.
101 */
102 for (i = 1; i < n_pages; i++)
103 if (nums[i-1] + 1 != nums[i])
104 goto fail_map;
105 buf->vaddr = (__force void *)
106 ioremap(__pfn_to_phys(nums[0]), size + offset);
107 } else {
108 buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1);
109 }
110
111 if (!buf->vaddr)
112 goto fail_map;
113 buf->vaddr += offset;
114 return buf;
115
116 fail_map:
117 vb2_destroy_framevec(vec);
118 fail_pfnvec_create:
119 kfree(buf);
120
121 return ERR_PTR(ret);
122 }
123
vb2_vmalloc_put_userptr(void * buf_priv)124 static void vb2_vmalloc_put_userptr(void *buf_priv)
125 {
126 struct vb2_vmalloc_buf *buf = buf_priv;
127 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
128 unsigned int i;
129 struct page **pages;
130 unsigned int n_pages;
131
132 if (!buf->vec->is_pfns) {
133 n_pages = frame_vector_count(buf->vec);
134 pages = frame_vector_pages(buf->vec);
135 if (vaddr)
136 vm_unmap_ram((void *)vaddr, n_pages);
137 if (buf->dma_dir == DMA_FROM_DEVICE ||
138 buf->dma_dir == DMA_BIDIRECTIONAL)
139 for (i = 0; i < n_pages; i++)
140 set_page_dirty_lock(pages[i]);
141 } else {
142 iounmap((__force void __iomem *)buf->vaddr);
143 }
144 vb2_destroy_framevec(buf->vec);
145 kfree(buf);
146 }
147
vb2_vmalloc_vaddr(struct vb2_buffer * vb,void * buf_priv)148 static void *vb2_vmalloc_vaddr(struct vb2_buffer *vb, void *buf_priv)
149 {
150 struct vb2_vmalloc_buf *buf = buf_priv;
151
152 if (!buf->vaddr) {
153 pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
154 return NULL;
155 }
156
157 return buf->vaddr;
158 }
159
vb2_vmalloc_num_users(void * buf_priv)160 static unsigned int vb2_vmalloc_num_users(void *buf_priv)
161 {
162 struct vb2_vmalloc_buf *buf = buf_priv;
163 return refcount_read(&buf->refcount);
164 }
165
vb2_vmalloc_mmap(void * buf_priv,struct vm_area_struct * vma)166 static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
167 {
168 struct vb2_vmalloc_buf *buf = buf_priv;
169 int ret;
170
171 if (!buf) {
172 pr_err("No memory to map\n");
173 return -EINVAL;
174 }
175
176 ret = remap_vmalloc_range(vma, buf->vaddr, 0);
177 if (ret) {
178 pr_err("Remapping vmalloc memory, error: %d\n", ret);
179 return ret;
180 }
181
182 /*
183 * Make sure that vm_areas for 2 buffers won't be merged together
184 */
185 vma->vm_flags |= VM_DONTEXPAND;
186
187 /*
188 * Use common vm_area operations to track buffer refcount.
189 */
190 vma->vm_private_data = &buf->handler;
191 vma->vm_ops = &vb2_common_vm_ops;
192
193 vma->vm_ops->open(vma);
194
195 return 0;
196 }
197
198 #ifdef CONFIG_HAS_DMA
199 /*********************************************/
200 /* DMABUF ops for exporters */
201 /*********************************************/
202
203 struct vb2_vmalloc_attachment {
204 struct sg_table sgt;
205 enum dma_data_direction dma_dir;
206 };
207
vb2_vmalloc_dmabuf_ops_attach(struct dma_buf * dbuf,struct dma_buf_attachment * dbuf_attach)208 static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
209 struct dma_buf_attachment *dbuf_attach)
210 {
211 struct vb2_vmalloc_attachment *attach;
212 struct vb2_vmalloc_buf *buf = dbuf->priv;
213 int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
214 struct sg_table *sgt;
215 struct scatterlist *sg;
216 void *vaddr = buf->vaddr;
217 int ret;
218 int i;
219
220 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
221 if (!attach)
222 return -ENOMEM;
223
224 sgt = &attach->sgt;
225 ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
226 if (ret) {
227 kfree(attach);
228 return ret;
229 }
230 for_each_sgtable_sg(sgt, sg, i) {
231 struct page *page = vmalloc_to_page(vaddr);
232
233 if (!page) {
234 sg_free_table(sgt);
235 kfree(attach);
236 return -ENOMEM;
237 }
238 sg_set_page(sg, page, PAGE_SIZE, 0);
239 vaddr += PAGE_SIZE;
240 }
241
242 attach->dma_dir = DMA_NONE;
243 dbuf_attach->priv = attach;
244 return 0;
245 }
246
vb2_vmalloc_dmabuf_ops_detach(struct dma_buf * dbuf,struct dma_buf_attachment * db_attach)247 static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
248 struct dma_buf_attachment *db_attach)
249 {
250 struct vb2_vmalloc_attachment *attach = db_attach->priv;
251 struct sg_table *sgt;
252
253 if (!attach)
254 return;
255
256 sgt = &attach->sgt;
257
258 /* release the scatterlist cache */
259 if (attach->dma_dir != DMA_NONE)
260 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
261 sg_free_table(sgt);
262 kfree(attach);
263 db_attach->priv = NULL;
264 }
265
vb2_vmalloc_dmabuf_ops_map(struct dma_buf_attachment * db_attach,enum dma_data_direction dma_dir)266 static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
267 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
268 {
269 struct vb2_vmalloc_attachment *attach = db_attach->priv;
270 /* stealing dmabuf mutex to serialize map/unmap operations */
271 struct mutex *lock = &db_attach->dmabuf->lock;
272 struct sg_table *sgt;
273
274 mutex_lock(lock);
275
276 sgt = &attach->sgt;
277 /* return previously mapped sg table */
278 if (attach->dma_dir == dma_dir) {
279 mutex_unlock(lock);
280 return sgt;
281 }
282
283 /* release any previous cache */
284 if (attach->dma_dir != DMA_NONE) {
285 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
286 attach->dma_dir = DMA_NONE;
287 }
288
289 /* mapping to the client with new direction */
290 if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
291 pr_err("failed to map scatterlist\n");
292 mutex_unlock(lock);
293 return ERR_PTR(-EIO);
294 }
295
296 attach->dma_dir = dma_dir;
297
298 mutex_unlock(lock);
299
300 return sgt;
301 }
302
vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment * db_attach,struct sg_table * sgt,enum dma_data_direction dma_dir)303 static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
304 struct sg_table *sgt, enum dma_data_direction dma_dir)
305 {
306 /* nothing to be done here */
307 }
308
vb2_vmalloc_dmabuf_ops_release(struct dma_buf * dbuf)309 static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
310 {
311 /* drop reference obtained in vb2_vmalloc_get_dmabuf */
312 vb2_vmalloc_put(dbuf->priv);
313 }
314
vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf * dbuf,struct dma_buf_map * map)315 static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
316 {
317 struct vb2_vmalloc_buf *buf = dbuf->priv;
318
319 dma_buf_map_set_vaddr(map, buf->vaddr);
320
321 return 0;
322 }
323
vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf * dbuf,struct vm_area_struct * vma)324 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
325 struct vm_area_struct *vma)
326 {
327 return vb2_vmalloc_mmap(dbuf->priv, vma);
328 }
329
330 static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
331 .attach = vb2_vmalloc_dmabuf_ops_attach,
332 .detach = vb2_vmalloc_dmabuf_ops_detach,
333 .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
334 .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
335 .vmap = vb2_vmalloc_dmabuf_ops_vmap,
336 .mmap = vb2_vmalloc_dmabuf_ops_mmap,
337 .release = vb2_vmalloc_dmabuf_ops_release,
338 };
339
vb2_vmalloc_get_dmabuf(struct vb2_buffer * vb,void * buf_priv,unsigned long flags)340 static struct dma_buf *vb2_vmalloc_get_dmabuf(struct vb2_buffer *vb,
341 void *buf_priv,
342 unsigned long flags)
343 {
344 struct vb2_vmalloc_buf *buf = buf_priv;
345 struct dma_buf *dbuf;
346 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
347
348 exp_info.ops = &vb2_vmalloc_dmabuf_ops;
349 exp_info.size = buf->size;
350 exp_info.flags = flags;
351 exp_info.priv = buf;
352
353 if (WARN_ON(!buf->vaddr))
354 return NULL;
355
356 dbuf = dma_buf_export(&exp_info);
357 if (IS_ERR(dbuf))
358 return NULL;
359
360 /* dmabuf keeps reference to vb2 buffer */
361 refcount_inc(&buf->refcount);
362
363 return dbuf;
364 }
365 #endif /* CONFIG_HAS_DMA */
366
367
368 /*********************************************/
369 /* callbacks for DMABUF buffers */
370 /*********************************************/
371
vb2_vmalloc_map_dmabuf(void * mem_priv)372 static int vb2_vmalloc_map_dmabuf(void *mem_priv)
373 {
374 struct vb2_vmalloc_buf *buf = mem_priv;
375 struct dma_buf_map map;
376 int ret;
377
378 ret = dma_buf_vmap(buf->dbuf, &map);
379 if (ret)
380 return -EFAULT;
381 buf->vaddr = map.vaddr;
382
383 return 0;
384 }
385
vb2_vmalloc_unmap_dmabuf(void * mem_priv)386 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
387 {
388 struct vb2_vmalloc_buf *buf = mem_priv;
389 struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
390
391 dma_buf_vunmap(buf->dbuf, &map);
392 buf->vaddr = NULL;
393 }
394
vb2_vmalloc_detach_dmabuf(void * mem_priv)395 static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
396 {
397 struct vb2_vmalloc_buf *buf = mem_priv;
398 struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
399
400 if (buf->vaddr)
401 dma_buf_vunmap(buf->dbuf, &map);
402
403 kfree(buf);
404 }
405
vb2_vmalloc_attach_dmabuf(struct vb2_buffer * vb,struct device * dev,struct dma_buf * dbuf,unsigned long size)406 static void *vb2_vmalloc_attach_dmabuf(struct vb2_buffer *vb,
407 struct device *dev,
408 struct dma_buf *dbuf,
409 unsigned long size)
410 {
411 struct vb2_vmalloc_buf *buf;
412
413 if (dbuf->size < size)
414 return ERR_PTR(-EFAULT);
415
416 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
417 if (!buf)
418 return ERR_PTR(-ENOMEM);
419
420 buf->dbuf = dbuf;
421 buf->dma_dir = vb->vb2_queue->dma_dir;
422 buf->size = size;
423
424 return buf;
425 }
426
427
428 const struct vb2_mem_ops vb2_vmalloc_memops = {
429 .alloc = vb2_vmalloc_alloc,
430 .put = vb2_vmalloc_put,
431 .get_userptr = vb2_vmalloc_get_userptr,
432 .put_userptr = vb2_vmalloc_put_userptr,
433 #ifdef CONFIG_HAS_DMA
434 .get_dmabuf = vb2_vmalloc_get_dmabuf,
435 #endif
436 .map_dmabuf = vb2_vmalloc_map_dmabuf,
437 .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
438 .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
439 .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
440 .vaddr = vb2_vmalloc_vaddr,
441 .mmap = vb2_vmalloc_mmap,
442 .num_users = vb2_vmalloc_num_users,
443 };
444 EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
445
446 MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
447 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
448 MODULE_LICENSE("GPL");
449