1 /*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/refcount.h>
16 #include <linux/scatterlist.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dma-mapping.h>
20
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-dma-contig.h>
23 #include <media/videobuf2-memops.h>
24
25 struct vb2_dc_buf {
26 struct device *dev;
27 void *vaddr;
28 unsigned long size;
29 void *cookie;
30 dma_addr_t dma_addr;
31 unsigned long attrs;
32 enum dma_data_direction dma_dir;
33 struct sg_table *dma_sgt;
34 struct frame_vector *vec;
35
36 /* MMAP related */
37 struct vb2_vmarea_handler handler;
38 refcount_t refcount;
39 struct sg_table *sgt_base;
40
41 /* DMABUF related */
42 struct dma_buf_attachment *db_attach;
43
44 struct vb2_buffer *vb;
45 };
46
47 /*********************************************/
48 /* scatterlist table functions */
49 /*********************************************/
50
vb2_dc_get_contiguous_size(struct sg_table * sgt)51 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
52 {
53 struct scatterlist *s;
54 dma_addr_t expected = sg_dma_address(sgt->sgl);
55 unsigned int i;
56 unsigned long size = 0;
57
58 for_each_sgtable_dma_sg(sgt, s, i) {
59 if (sg_dma_address(s) != expected)
60 break;
61 expected += sg_dma_len(s);
62 size += sg_dma_len(s);
63 }
64 return size;
65 }
66
67 /*********************************************/
68 /* callbacks for all buffers */
69 /*********************************************/
70
vb2_dc_cookie(struct vb2_buffer * vb,void * buf_priv)71 static void *vb2_dc_cookie(struct vb2_buffer *vb, void *buf_priv)
72 {
73 struct vb2_dc_buf *buf = buf_priv;
74
75 return &buf->dma_addr;
76 }
77
vb2_dc_vaddr(struct vb2_buffer * vb,void * buf_priv)78 static void *vb2_dc_vaddr(struct vb2_buffer *vb, void *buf_priv)
79 {
80 struct vb2_dc_buf *buf = buf_priv;
81 struct dma_buf_map map;
82 int ret;
83
84 if (!buf->vaddr && buf->db_attach) {
85 ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
86 buf->vaddr = ret ? NULL : map.vaddr;
87 }
88
89 return buf->vaddr;
90 }
91
vb2_dc_num_users(void * buf_priv)92 static unsigned int vb2_dc_num_users(void *buf_priv)
93 {
94 struct vb2_dc_buf *buf = buf_priv;
95
96 return refcount_read(&buf->refcount);
97 }
98
vb2_dc_prepare(void * buf_priv)99 static void vb2_dc_prepare(void *buf_priv)
100 {
101 struct vb2_dc_buf *buf = buf_priv;
102 struct sg_table *sgt = buf->dma_sgt;
103
104 if (!sgt)
105 return;
106
107 dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
108 }
109
vb2_dc_finish(void * buf_priv)110 static void vb2_dc_finish(void *buf_priv)
111 {
112 struct vb2_dc_buf *buf = buf_priv;
113 struct sg_table *sgt = buf->dma_sgt;
114
115 if (!sgt)
116 return;
117
118 dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
119 }
120
121 /*********************************************/
122 /* callbacks for MMAP buffers */
123 /*********************************************/
124
vb2_dc_put(void * buf_priv)125 static void vb2_dc_put(void *buf_priv)
126 {
127 struct vb2_dc_buf *buf = buf_priv;
128
129 if (!refcount_dec_and_test(&buf->refcount))
130 return;
131
132 if (buf->sgt_base) {
133 sg_free_table(buf->sgt_base);
134 kfree(buf->sgt_base);
135 }
136 dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
137 buf->attrs);
138 put_device(buf->dev);
139 kfree(buf);
140 }
141
vb2_dc_alloc(struct vb2_buffer * vb,struct device * dev,unsigned long size)142 static void *vb2_dc_alloc(struct vb2_buffer *vb,
143 struct device *dev,
144 unsigned long size)
145 {
146 struct vb2_dc_buf *buf;
147
148 if (WARN_ON(!dev))
149 return ERR_PTR(-EINVAL);
150
151 buf = kzalloc(sizeof *buf, GFP_KERNEL);
152 if (!buf)
153 return ERR_PTR(-ENOMEM);
154
155 buf->attrs = vb->vb2_queue->dma_attrs;
156 buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
157 GFP_KERNEL | vb->vb2_queue->gfp_flags,
158 buf->attrs);
159 if (!buf->cookie) {
160 dev_err(dev, "dma_alloc_coherent of size %lu failed\n", size);
161 kfree(buf);
162 return ERR_PTR(-ENOMEM);
163 }
164
165 if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
166 buf->vaddr = buf->cookie;
167
168 /* Prevent the device from being released while the buffer is used */
169 buf->dev = get_device(dev);
170 buf->size = size;
171 buf->dma_dir = vb->vb2_queue->dma_dir;
172
173 buf->handler.refcount = &buf->refcount;
174 buf->handler.put = vb2_dc_put;
175 buf->handler.arg = buf;
176 buf->vb = vb;
177
178 refcount_set(&buf->refcount, 1);
179
180 return buf;
181 }
182
vb2_dc_mmap(void * buf_priv,struct vm_area_struct * vma)183 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
184 {
185 struct vb2_dc_buf *buf = buf_priv;
186 int ret;
187
188 if (!buf) {
189 printk(KERN_ERR "No buffer to map\n");
190 return -EINVAL;
191 }
192
193 ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
194 buf->dma_addr, buf->size, buf->attrs);
195
196 if (ret) {
197 pr_err("Remapping memory failed, error: %d\n", ret);
198 return ret;
199 }
200
201 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
202 vma->vm_private_data = &buf->handler;
203 vma->vm_ops = &vb2_common_vm_ops;
204
205 vma->vm_ops->open(vma);
206
207 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %lu\n",
208 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
209 buf->size);
210
211 return 0;
212 }
213
214 /*********************************************/
215 /* DMABUF ops for exporters */
216 /*********************************************/
217
218 struct vb2_dc_attachment {
219 struct sg_table sgt;
220 enum dma_data_direction dma_dir;
221 };
222
vb2_dc_dmabuf_ops_attach(struct dma_buf * dbuf,struct dma_buf_attachment * dbuf_attach)223 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
224 struct dma_buf_attachment *dbuf_attach)
225 {
226 struct vb2_dc_attachment *attach;
227 unsigned int i;
228 struct scatterlist *rd, *wr;
229 struct sg_table *sgt;
230 struct vb2_dc_buf *buf = dbuf->priv;
231 int ret;
232
233 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
234 if (!attach)
235 return -ENOMEM;
236
237 sgt = &attach->sgt;
238 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
239 * map the same scatter list to multiple attachments at the same time.
240 */
241 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
242 if (ret) {
243 kfree(attach);
244 return -ENOMEM;
245 }
246
247 rd = buf->sgt_base->sgl;
248 wr = sgt->sgl;
249 for (i = 0; i < sgt->orig_nents; ++i) {
250 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
251 rd = sg_next(rd);
252 wr = sg_next(wr);
253 }
254
255 attach->dma_dir = DMA_NONE;
256 dbuf_attach->priv = attach;
257
258 return 0;
259 }
260
vb2_dc_dmabuf_ops_detach(struct dma_buf * dbuf,struct dma_buf_attachment * db_attach)261 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
262 struct dma_buf_attachment *db_attach)
263 {
264 struct vb2_dc_attachment *attach = db_attach->priv;
265 struct sg_table *sgt;
266
267 if (!attach)
268 return;
269
270 sgt = &attach->sgt;
271
272 /* release the scatterlist cache */
273 if (attach->dma_dir != DMA_NONE)
274 /*
275 * Cache sync can be skipped here, as the vb2_dc memory is
276 * allocated from device coherent memory, which means the
277 * memory locations do not require any explicit cache
278 * maintenance prior or after being used by the device.
279 */
280 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
281 DMA_ATTR_SKIP_CPU_SYNC);
282 sg_free_table(sgt);
283 kfree(attach);
284 db_attach->priv = NULL;
285 }
286
vb2_dc_dmabuf_ops_map(struct dma_buf_attachment * db_attach,enum dma_data_direction dma_dir)287 static struct sg_table *vb2_dc_dmabuf_ops_map(
288 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
289 {
290 struct vb2_dc_attachment *attach = db_attach->priv;
291 /* stealing dmabuf mutex to serialize map/unmap operations */
292 struct mutex *lock = &db_attach->dmabuf->lock;
293 struct sg_table *sgt;
294
295 mutex_lock(lock);
296
297 sgt = &attach->sgt;
298 /* return previously mapped sg table */
299 if (attach->dma_dir == dma_dir) {
300 mutex_unlock(lock);
301 return sgt;
302 }
303
304 /* release any previous cache */
305 if (attach->dma_dir != DMA_NONE) {
306 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
307 DMA_ATTR_SKIP_CPU_SYNC);
308 attach->dma_dir = DMA_NONE;
309 }
310
311 /*
312 * mapping to the client with new direction, no cache sync
313 * required see comment in vb2_dc_dmabuf_ops_detach()
314 */
315 if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
316 DMA_ATTR_SKIP_CPU_SYNC)) {
317 pr_err("failed to map scatterlist\n");
318 mutex_unlock(lock);
319 return ERR_PTR(-EIO);
320 }
321
322 attach->dma_dir = dma_dir;
323
324 mutex_unlock(lock);
325
326 return sgt;
327 }
328
vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment * db_attach,struct sg_table * sgt,enum dma_data_direction dma_dir)329 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
330 struct sg_table *sgt, enum dma_data_direction dma_dir)
331 {
332 /* nothing to be done here */
333 }
334
vb2_dc_dmabuf_ops_release(struct dma_buf * dbuf)335 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
336 {
337 /* drop reference obtained in vb2_dc_get_dmabuf */
338 vb2_dc_put(dbuf->priv);
339 }
340
341 static int
vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf * dbuf,enum dma_data_direction direction)342 vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
343 enum dma_data_direction direction)
344 {
345 return 0;
346 }
347
348 static int
vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf * dbuf,enum dma_data_direction direction)349 vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
350 enum dma_data_direction direction)
351 {
352 return 0;
353 }
354
vb2_dc_dmabuf_ops_vmap(struct dma_buf * dbuf,struct dma_buf_map * map)355 static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
356 {
357 struct vb2_dc_buf *buf = dbuf->priv;
358
359 dma_buf_map_set_vaddr(map, buf->vaddr);
360
361 return 0;
362 }
363
vb2_dc_dmabuf_ops_mmap(struct dma_buf * dbuf,struct vm_area_struct * vma)364 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
365 struct vm_area_struct *vma)
366 {
367 return vb2_dc_mmap(dbuf->priv, vma);
368 }
369
370 static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
371 .attach = vb2_dc_dmabuf_ops_attach,
372 .detach = vb2_dc_dmabuf_ops_detach,
373 .map_dma_buf = vb2_dc_dmabuf_ops_map,
374 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
375 .begin_cpu_access = vb2_dc_dmabuf_ops_begin_cpu_access,
376 .end_cpu_access = vb2_dc_dmabuf_ops_end_cpu_access,
377 .vmap = vb2_dc_dmabuf_ops_vmap,
378 .mmap = vb2_dc_dmabuf_ops_mmap,
379 .release = vb2_dc_dmabuf_ops_release,
380 };
381
vb2_dc_get_base_sgt(struct vb2_dc_buf * buf)382 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
383 {
384 int ret;
385 struct sg_table *sgt;
386
387 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
388 if (!sgt) {
389 dev_err(buf->dev, "failed to alloc sg table\n");
390 return NULL;
391 }
392
393 ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
394 buf->size, buf->attrs);
395 if (ret < 0) {
396 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
397 kfree(sgt);
398 return NULL;
399 }
400
401 return sgt;
402 }
403
vb2_dc_get_dmabuf(struct vb2_buffer * vb,void * buf_priv,unsigned long flags)404 static struct dma_buf *vb2_dc_get_dmabuf(struct vb2_buffer *vb,
405 void *buf_priv,
406 unsigned long flags)
407 {
408 struct vb2_dc_buf *buf = buf_priv;
409 struct dma_buf *dbuf;
410 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
411
412 exp_info.ops = &vb2_dc_dmabuf_ops;
413 exp_info.size = buf->size;
414 exp_info.flags = flags;
415 exp_info.priv = buf;
416
417 if (!buf->sgt_base)
418 buf->sgt_base = vb2_dc_get_base_sgt(buf);
419
420 if (WARN_ON(!buf->sgt_base))
421 return NULL;
422
423 dbuf = dma_buf_export(&exp_info);
424 if (IS_ERR(dbuf))
425 return NULL;
426
427 /* dmabuf keeps reference to vb2 buffer */
428 refcount_inc(&buf->refcount);
429
430 return dbuf;
431 }
432
433 /*********************************************/
434 /* callbacks for USERPTR buffers */
435 /*********************************************/
436
vb2_dc_put_userptr(void * buf_priv)437 static void vb2_dc_put_userptr(void *buf_priv)
438 {
439 struct vb2_dc_buf *buf = buf_priv;
440 struct sg_table *sgt = buf->dma_sgt;
441 int i;
442 struct page **pages;
443
444 if (sgt) {
445 /*
446 * No need to sync to CPU, it's already synced to the CPU
447 * since the finish() memop will have been called before this.
448 */
449 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
450 DMA_ATTR_SKIP_CPU_SYNC);
451 pages = frame_vector_pages(buf->vec);
452 /* sgt should exist only if vector contains pages... */
453 BUG_ON(IS_ERR(pages));
454 if (buf->dma_dir == DMA_FROM_DEVICE ||
455 buf->dma_dir == DMA_BIDIRECTIONAL)
456 for (i = 0; i < frame_vector_count(buf->vec); i++)
457 set_page_dirty_lock(pages[i]);
458 sg_free_table(sgt);
459 kfree(sgt);
460 } else {
461 dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
462 buf->dma_dir, 0);
463 }
464 vb2_destroy_framevec(buf->vec);
465 kfree(buf);
466 }
467
vb2_dc_get_userptr(struct vb2_buffer * vb,struct device * dev,unsigned long vaddr,unsigned long size)468 static void *vb2_dc_get_userptr(struct vb2_buffer *vb, struct device *dev,
469 unsigned long vaddr, unsigned long size)
470 {
471 struct vb2_dc_buf *buf;
472 struct frame_vector *vec;
473 unsigned int offset;
474 int n_pages, i;
475 int ret = 0;
476 struct sg_table *sgt;
477 unsigned long contig_size;
478 unsigned long dma_align = dma_get_cache_alignment();
479
480 /* Only cache aligned DMA transfers are reliable */
481 if (!IS_ALIGNED(vaddr | size, dma_align)) {
482 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
483 return ERR_PTR(-EINVAL);
484 }
485
486 if (!size) {
487 pr_debug("size is zero\n");
488 return ERR_PTR(-EINVAL);
489 }
490
491 if (WARN_ON(!dev))
492 return ERR_PTR(-EINVAL);
493
494 buf = kzalloc(sizeof *buf, GFP_KERNEL);
495 if (!buf)
496 return ERR_PTR(-ENOMEM);
497
498 buf->dev = dev;
499 buf->dma_dir = vb->vb2_queue->dma_dir;
500 buf->vb = vb;
501
502 offset = lower_32_bits(offset_in_page(vaddr));
503 vec = vb2_create_framevec(vaddr, size);
504 if (IS_ERR(vec)) {
505 ret = PTR_ERR(vec);
506 goto fail_buf;
507 }
508 buf->vec = vec;
509 n_pages = frame_vector_count(vec);
510 ret = frame_vector_to_pages(vec);
511 if (ret < 0) {
512 unsigned long *nums = frame_vector_pfns(vec);
513
514 /*
515 * Failed to convert to pages... Check the memory is physically
516 * contiguous and use direct mapping
517 */
518 for (i = 1; i < n_pages; i++)
519 if (nums[i-1] + 1 != nums[i])
520 goto fail_pfnvec;
521 buf->dma_addr = dma_map_resource(buf->dev,
522 __pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
523 if (dma_mapping_error(buf->dev, buf->dma_addr)) {
524 ret = -ENOMEM;
525 goto fail_pfnvec;
526 }
527 goto out;
528 }
529
530 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
531 if (!sgt) {
532 pr_err("failed to allocate sg table\n");
533 ret = -ENOMEM;
534 goto fail_pfnvec;
535 }
536
537 ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
538 offset, size, GFP_KERNEL);
539 if (ret) {
540 pr_err("failed to initialize sg table\n");
541 goto fail_sgt;
542 }
543
544 /*
545 * No need to sync to the device, this will happen later when the
546 * prepare() memop is called.
547 */
548 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
549 DMA_ATTR_SKIP_CPU_SYNC)) {
550 pr_err("failed to map scatterlist\n");
551 ret = -EIO;
552 goto fail_sgt_init;
553 }
554
555 contig_size = vb2_dc_get_contiguous_size(sgt);
556 if (contig_size < size) {
557 pr_err("contiguous mapping is too small %lu/%lu\n",
558 contig_size, size);
559 ret = -EFAULT;
560 goto fail_map_sg;
561 }
562
563 buf->dma_addr = sg_dma_address(sgt->sgl);
564 buf->dma_sgt = sgt;
565 out:
566 buf->size = size;
567
568 return buf;
569
570 fail_map_sg:
571 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
572
573 fail_sgt_init:
574 sg_free_table(sgt);
575
576 fail_sgt:
577 kfree(sgt);
578
579 fail_pfnvec:
580 vb2_destroy_framevec(vec);
581
582 fail_buf:
583 kfree(buf);
584
585 return ERR_PTR(ret);
586 }
587
588 /*********************************************/
589 /* callbacks for DMABUF buffers */
590 /*********************************************/
591
vb2_dc_map_dmabuf(void * mem_priv)592 static int vb2_dc_map_dmabuf(void *mem_priv)
593 {
594 struct vb2_dc_buf *buf = mem_priv;
595 struct sg_table *sgt;
596 unsigned long contig_size;
597
598 if (WARN_ON(!buf->db_attach)) {
599 pr_err("trying to pin a non attached buffer\n");
600 return -EINVAL;
601 }
602
603 if (WARN_ON(buf->dma_sgt)) {
604 pr_err("dmabuf buffer is already pinned\n");
605 return 0;
606 }
607
608 /* get the associated scatterlist for this buffer */
609 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
610 if (IS_ERR(sgt)) {
611 pr_err("Error getting dmabuf scatterlist\n");
612 return -EINVAL;
613 }
614
615 /* checking if dmabuf is big enough to store contiguous chunk */
616 contig_size = vb2_dc_get_contiguous_size(sgt);
617 if (contig_size < buf->size) {
618 pr_err("contiguous chunk is too small %lu/%lu\n",
619 contig_size, buf->size);
620 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
621 return -EFAULT;
622 }
623
624 buf->dma_addr = sg_dma_address(sgt->sgl);
625 buf->dma_sgt = sgt;
626 buf->vaddr = NULL;
627
628 return 0;
629 }
630
vb2_dc_unmap_dmabuf(void * mem_priv)631 static void vb2_dc_unmap_dmabuf(void *mem_priv)
632 {
633 struct vb2_dc_buf *buf = mem_priv;
634 struct sg_table *sgt = buf->dma_sgt;
635 struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
636
637 if (WARN_ON(!buf->db_attach)) {
638 pr_err("trying to unpin a not attached buffer\n");
639 return;
640 }
641
642 if (WARN_ON(!sgt)) {
643 pr_err("dmabuf buffer is already unpinned\n");
644 return;
645 }
646
647 if (buf->vaddr) {
648 dma_buf_vunmap(buf->db_attach->dmabuf, &map);
649 buf->vaddr = NULL;
650 }
651 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
652
653 buf->dma_addr = 0;
654 buf->dma_sgt = NULL;
655 }
656
vb2_dc_detach_dmabuf(void * mem_priv)657 static void vb2_dc_detach_dmabuf(void *mem_priv)
658 {
659 struct vb2_dc_buf *buf = mem_priv;
660
661 /* if vb2 works correctly you should never detach mapped buffer */
662 if (WARN_ON(buf->dma_addr))
663 vb2_dc_unmap_dmabuf(buf);
664
665 /* detach this attachment */
666 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
667 kfree(buf);
668 }
669
vb2_dc_attach_dmabuf(struct vb2_buffer * vb,struct device * dev,struct dma_buf * dbuf,unsigned long size)670 static void *vb2_dc_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
671 struct dma_buf *dbuf, unsigned long size)
672 {
673 struct vb2_dc_buf *buf;
674 struct dma_buf_attachment *dba;
675
676 if (dbuf->size < size)
677 return ERR_PTR(-EFAULT);
678
679 if (WARN_ON(!dev))
680 return ERR_PTR(-EINVAL);
681
682 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
683 if (!buf)
684 return ERR_PTR(-ENOMEM);
685
686 buf->dev = dev;
687 buf->vb = vb;
688
689 /* create attachment for the dmabuf with the user device */
690 dba = dma_buf_attach(dbuf, buf->dev);
691 if (IS_ERR(dba)) {
692 pr_err("failed to attach dmabuf\n");
693 kfree(buf);
694 return dba;
695 }
696
697 buf->dma_dir = vb->vb2_queue->dma_dir;
698 buf->size = size;
699 buf->db_attach = dba;
700
701 return buf;
702 }
703
704 /*********************************************/
705 /* DMA CONTIG exported functions */
706 /*********************************************/
707
708 const struct vb2_mem_ops vb2_dma_contig_memops = {
709 .alloc = vb2_dc_alloc,
710 .put = vb2_dc_put,
711 .get_dmabuf = vb2_dc_get_dmabuf,
712 .cookie = vb2_dc_cookie,
713 .vaddr = vb2_dc_vaddr,
714 .mmap = vb2_dc_mmap,
715 .get_userptr = vb2_dc_get_userptr,
716 .put_userptr = vb2_dc_put_userptr,
717 .prepare = vb2_dc_prepare,
718 .finish = vb2_dc_finish,
719 .map_dmabuf = vb2_dc_map_dmabuf,
720 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
721 .attach_dmabuf = vb2_dc_attach_dmabuf,
722 .detach_dmabuf = vb2_dc_detach_dmabuf,
723 .num_users = vb2_dc_num_users,
724 };
725 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
726
727 /**
728 * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
729 * @dev: device for configuring DMA parameters
730 * @size: size of DMA max segment size to set
731 *
732 * To allow mapping the scatter-list into a single chunk in the DMA
733 * address space, the device is required to have the DMA max segment
734 * size parameter set to a value larger than the buffer size. Otherwise,
735 * the DMA-mapping subsystem will split the mapping into max segment
736 * size chunks. This function sets the DMA max segment size
737 * parameter to let DMA-mapping map a buffer as a single chunk in DMA
738 * address space.
739 * This code assumes that the DMA-mapping subsystem will merge all
740 * scatterlist segments if this is really possible (for example when
741 * an IOMMU is available and enabled).
742 * Ideally, this parameter should be set by the generic bus code, but it
743 * is left with the default 64KiB value due to historical litmiations in
744 * other subsystems (like limited USB host drivers) and there no good
745 * place to set it to the proper value.
746 * This function should be called from the drivers, which are known to
747 * operate on platforms with IOMMU and provide access to shared buffers
748 * (either USERPTR or DMABUF). This should be done before initializing
749 * videobuf2 queue.
750 */
vb2_dma_contig_set_max_seg_size(struct device * dev,unsigned int size)751 int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
752 {
753 if (!dev->dma_parms) {
754 dev_err(dev, "Failed to set max_seg_size: dma_parms is NULL\n");
755 return -ENODEV;
756 }
757 if (dma_get_max_seg_size(dev) < size)
758 return dma_set_max_seg_size(dev, size);
759
760 return 0;
761 }
762 EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
763
764 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
765 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
766 MODULE_LICENSE("GPL");
767