1 /*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/refcount.h>
16 #include <linux/scatterlist.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dma-mapping.h>
20
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-dma-contig.h>
23 #include <media/videobuf2-memops.h>
24
25 struct vb2_dc_buf {
26 struct device *dev;
27 void *vaddr;
28 unsigned long size;
29 void *cookie;
30 dma_addr_t dma_addr;
31 unsigned long attrs;
32 enum dma_data_direction dma_dir;
33 struct sg_table *dma_sgt;
34 struct frame_vector *vec;
35
36 /* MMAP related */
37 struct vb2_vmarea_handler handler;
38 refcount_t refcount;
39 struct sg_table *sgt_base;
40
41 /* DMABUF related */
42 struct dma_buf_attachment *db_attach;
43 };
44
45 /*********************************************/
46 /* scatterlist table functions */
47 /*********************************************/
48
vb2_dc_get_contiguous_size(struct sg_table * sgt)49 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
50 {
51 struct scatterlist *s;
52 dma_addr_t expected = sg_dma_address(sgt->sgl);
53 unsigned int i;
54 unsigned long size = 0;
55
56 for_each_sg(sgt->sgl, s, sgt->nents, i) {
57 if (sg_dma_address(s) != expected)
58 break;
59 expected = sg_dma_address(s) + sg_dma_len(s);
60 size += sg_dma_len(s);
61 }
62 return size;
63 }
64
65 /*********************************************/
66 /* callbacks for all buffers */
67 /*********************************************/
68
vb2_dc_cookie(void * buf_priv)69 static void *vb2_dc_cookie(void *buf_priv)
70 {
71 struct vb2_dc_buf *buf = buf_priv;
72
73 return &buf->dma_addr;
74 }
75
vb2_dc_vaddr(void * buf_priv)76 static void *vb2_dc_vaddr(void *buf_priv)
77 {
78 struct vb2_dc_buf *buf = buf_priv;
79
80 if (!buf->vaddr && buf->db_attach)
81 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
82
83 return buf->vaddr;
84 }
85
vb2_dc_num_users(void * buf_priv)86 static unsigned int vb2_dc_num_users(void *buf_priv)
87 {
88 struct vb2_dc_buf *buf = buf_priv;
89
90 return refcount_read(&buf->refcount);
91 }
92
vb2_dc_prepare(void * buf_priv)93 static void vb2_dc_prepare(void *buf_priv)
94 {
95 struct vb2_dc_buf *buf = buf_priv;
96 struct sg_table *sgt = buf->dma_sgt;
97
98 /* DMABUF exporter will flush the cache for us */
99 if (!sgt || buf->db_attach)
100 return;
101
102 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
103 buf->dma_dir);
104 }
105
vb2_dc_finish(void * buf_priv)106 static void vb2_dc_finish(void *buf_priv)
107 {
108 struct vb2_dc_buf *buf = buf_priv;
109 struct sg_table *sgt = buf->dma_sgt;
110
111 /* DMABUF exporter will flush the cache for us */
112 if (!sgt || buf->db_attach)
113 return;
114
115 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
116 }
117
118 /*********************************************/
119 /* callbacks for MMAP buffers */
120 /*********************************************/
121
vb2_dc_put(void * buf_priv)122 static void vb2_dc_put(void *buf_priv)
123 {
124 struct vb2_dc_buf *buf = buf_priv;
125
126 if (!refcount_dec_and_test(&buf->refcount))
127 return;
128
129 if (buf->sgt_base) {
130 sg_free_table(buf->sgt_base);
131 kfree(buf->sgt_base);
132 }
133 dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
134 buf->attrs);
135 put_device(buf->dev);
136 kfree(buf);
137 }
138
vb2_dc_alloc(struct device * dev,unsigned long attrs,unsigned long size,enum dma_data_direction dma_dir,gfp_t gfp_flags)139 static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
140 unsigned long size, enum dma_data_direction dma_dir,
141 gfp_t gfp_flags)
142 {
143 struct vb2_dc_buf *buf;
144
145 if (WARN_ON(!dev))
146 return ERR_PTR(-EINVAL);
147
148 buf = kzalloc(sizeof *buf, GFP_KERNEL);
149 if (!buf)
150 return ERR_PTR(-ENOMEM);
151
152 if (attrs)
153 buf->attrs = attrs;
154 buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
155 GFP_KERNEL | gfp_flags, buf->attrs);
156 if (!buf->cookie) {
157 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
158 kfree(buf);
159 return ERR_PTR(-ENOMEM);
160 }
161
162 if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
163 buf->vaddr = buf->cookie;
164
165 /* Prevent the device from being released while the buffer is used */
166 buf->dev = get_device(dev);
167 buf->size = size;
168 buf->dma_dir = dma_dir;
169
170 buf->handler.refcount = &buf->refcount;
171 buf->handler.put = vb2_dc_put;
172 buf->handler.arg = buf;
173
174 refcount_set(&buf->refcount, 1);
175
176 return buf;
177 }
178
vb2_dc_mmap(void * buf_priv,struct vm_area_struct * vma)179 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
180 {
181 struct vb2_dc_buf *buf = buf_priv;
182 int ret;
183
184 if (!buf) {
185 printk(KERN_ERR "No buffer to map\n");
186 return -EINVAL;
187 }
188
189 ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
190 buf->dma_addr, buf->size, buf->attrs);
191
192 if (ret) {
193 pr_err("Remapping memory failed, error: %d\n", ret);
194 return ret;
195 }
196
197 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
198 vma->vm_private_data = &buf->handler;
199 vma->vm_ops = &vb2_common_vm_ops;
200
201 vma->vm_ops->open(vma);
202
203 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
204 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
205 buf->size);
206
207 return 0;
208 }
209
210 /*********************************************/
211 /* DMABUF ops for exporters */
212 /*********************************************/
213
214 struct vb2_dc_attachment {
215 struct sg_table sgt;
216 enum dma_data_direction dma_dir;
217 };
218
vb2_dc_dmabuf_ops_attach(struct dma_buf * dbuf,struct dma_buf_attachment * dbuf_attach)219 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
220 struct dma_buf_attachment *dbuf_attach)
221 {
222 struct vb2_dc_attachment *attach;
223 unsigned int i;
224 struct scatterlist *rd, *wr;
225 struct sg_table *sgt;
226 struct vb2_dc_buf *buf = dbuf->priv;
227 int ret;
228
229 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
230 if (!attach)
231 return -ENOMEM;
232
233 sgt = &attach->sgt;
234 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
235 * map the same scatter list to multiple attachments at the same time.
236 */
237 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
238 if (ret) {
239 kfree(attach);
240 return -ENOMEM;
241 }
242
243 rd = buf->sgt_base->sgl;
244 wr = sgt->sgl;
245 for (i = 0; i < sgt->orig_nents; ++i) {
246 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
247 rd = sg_next(rd);
248 wr = sg_next(wr);
249 }
250
251 attach->dma_dir = DMA_NONE;
252 dbuf_attach->priv = attach;
253
254 return 0;
255 }
256
vb2_dc_dmabuf_ops_detach(struct dma_buf * dbuf,struct dma_buf_attachment * db_attach)257 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
258 struct dma_buf_attachment *db_attach)
259 {
260 struct vb2_dc_attachment *attach = db_attach->priv;
261 struct sg_table *sgt;
262
263 if (!attach)
264 return;
265
266 sgt = &attach->sgt;
267
268 /* release the scatterlist cache */
269 if (attach->dma_dir != DMA_NONE)
270 /*
271 * Cache sync can be skipped here, as the vb2_dc memory is
272 * allocated from device coherent memory, which means the
273 * memory locations do not require any explicit cache
274 * maintenance prior or after being used by the device.
275 */
276 dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
277 attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
278 sg_free_table(sgt);
279 kfree(attach);
280 db_attach->priv = NULL;
281 }
282
vb2_dc_dmabuf_ops_map(struct dma_buf_attachment * db_attach,enum dma_data_direction dma_dir)283 static struct sg_table *vb2_dc_dmabuf_ops_map(
284 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
285 {
286 struct vb2_dc_attachment *attach = db_attach->priv;
287 /* stealing dmabuf mutex to serialize map/unmap operations */
288 struct mutex *lock = &db_attach->dmabuf->lock;
289 struct sg_table *sgt;
290
291 mutex_lock(lock);
292
293 sgt = &attach->sgt;
294 /* return previously mapped sg table */
295 if (attach->dma_dir == dma_dir) {
296 mutex_unlock(lock);
297 return sgt;
298 }
299
300 /* release any previous cache */
301 if (attach->dma_dir != DMA_NONE) {
302 dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
303 attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
304 attach->dma_dir = DMA_NONE;
305 }
306
307 /*
308 * mapping to the client with new direction, no cache sync
309 * required see comment in vb2_dc_dmabuf_ops_detach()
310 */
311 sgt->nents = dma_map_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
312 dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
313 if (!sgt->nents) {
314 pr_err("failed to map scatterlist\n");
315 mutex_unlock(lock);
316 return ERR_PTR(-EIO);
317 }
318
319 attach->dma_dir = dma_dir;
320
321 mutex_unlock(lock);
322
323 return sgt;
324 }
325
vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment * db_attach,struct sg_table * sgt,enum dma_data_direction dma_dir)326 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
327 struct sg_table *sgt, enum dma_data_direction dma_dir)
328 {
329 /* nothing to be done here */
330 }
331
vb2_dc_dmabuf_ops_release(struct dma_buf * dbuf)332 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
333 {
334 /* drop reference obtained in vb2_dc_get_dmabuf */
335 vb2_dc_put(dbuf->priv);
336 }
337
vb2_dc_dmabuf_ops_kmap(struct dma_buf * dbuf,unsigned long pgnum)338 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
339 {
340 struct vb2_dc_buf *buf = dbuf->priv;
341
342 return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
343 }
344
vb2_dc_dmabuf_ops_vmap(struct dma_buf * dbuf)345 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
346 {
347 struct vb2_dc_buf *buf = dbuf->priv;
348
349 return buf->vaddr;
350 }
351
vb2_dc_dmabuf_ops_mmap(struct dma_buf * dbuf,struct vm_area_struct * vma)352 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
353 struct vm_area_struct *vma)
354 {
355 return vb2_dc_mmap(dbuf->priv, vma);
356 }
357
358 static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
359 .attach = vb2_dc_dmabuf_ops_attach,
360 .detach = vb2_dc_dmabuf_ops_detach,
361 .map_dma_buf = vb2_dc_dmabuf_ops_map,
362 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
363 .map = vb2_dc_dmabuf_ops_kmap,
364 .vmap = vb2_dc_dmabuf_ops_vmap,
365 .mmap = vb2_dc_dmabuf_ops_mmap,
366 .release = vb2_dc_dmabuf_ops_release,
367 };
368
vb2_dc_get_base_sgt(struct vb2_dc_buf * buf)369 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
370 {
371 int ret;
372 struct sg_table *sgt;
373
374 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
375 if (!sgt) {
376 dev_err(buf->dev, "failed to alloc sg table\n");
377 return NULL;
378 }
379
380 ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
381 buf->size, buf->attrs);
382 if (ret < 0) {
383 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
384 kfree(sgt);
385 return NULL;
386 }
387
388 return sgt;
389 }
390
vb2_dc_get_dmabuf(void * buf_priv,unsigned long flags)391 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
392 {
393 struct vb2_dc_buf *buf = buf_priv;
394 struct dma_buf *dbuf;
395 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
396
397 exp_info.ops = &vb2_dc_dmabuf_ops;
398 exp_info.size = buf->size;
399 exp_info.flags = flags;
400 exp_info.priv = buf;
401
402 if (!buf->sgt_base)
403 buf->sgt_base = vb2_dc_get_base_sgt(buf);
404
405 if (WARN_ON(!buf->sgt_base))
406 return NULL;
407
408 dbuf = dma_buf_export(&exp_info);
409 if (IS_ERR(dbuf))
410 return NULL;
411
412 /* dmabuf keeps reference to vb2 buffer */
413 refcount_inc(&buf->refcount);
414
415 return dbuf;
416 }
417
418 /*********************************************/
419 /* callbacks for USERPTR buffers */
420 /*********************************************/
421
vb2_dc_put_userptr(void * buf_priv)422 static void vb2_dc_put_userptr(void *buf_priv)
423 {
424 struct vb2_dc_buf *buf = buf_priv;
425 struct sg_table *sgt = buf->dma_sgt;
426 int i;
427 struct page **pages;
428
429 if (sgt) {
430 /*
431 * No need to sync to CPU, it's already synced to the CPU
432 * since the finish() memop will have been called before this.
433 */
434 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
435 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
436 pages = frame_vector_pages(buf->vec);
437 /* sgt should exist only if vector contains pages... */
438 BUG_ON(IS_ERR(pages));
439 if (buf->dma_dir == DMA_FROM_DEVICE ||
440 buf->dma_dir == DMA_BIDIRECTIONAL)
441 for (i = 0; i < frame_vector_count(buf->vec); i++)
442 set_page_dirty_lock(pages[i]);
443 sg_free_table(sgt);
444 kfree(sgt);
445 } else {
446 dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
447 buf->dma_dir, 0);
448 }
449 vb2_destroy_framevec(buf->vec);
450 kfree(buf);
451 }
452
vb2_dc_get_userptr(struct device * dev,unsigned long vaddr,unsigned long size,enum dma_data_direction dma_dir)453 static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
454 unsigned long size, enum dma_data_direction dma_dir)
455 {
456 struct vb2_dc_buf *buf;
457 struct frame_vector *vec;
458 unsigned int offset;
459 int n_pages, i;
460 int ret = 0;
461 struct sg_table *sgt;
462 unsigned long contig_size;
463 unsigned long dma_align = dma_get_cache_alignment();
464
465 /* Only cache aligned DMA transfers are reliable */
466 if (!IS_ALIGNED(vaddr | size, dma_align)) {
467 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
468 return ERR_PTR(-EINVAL);
469 }
470
471 if (!size) {
472 pr_debug("size is zero\n");
473 return ERR_PTR(-EINVAL);
474 }
475
476 if (WARN_ON(!dev))
477 return ERR_PTR(-EINVAL);
478
479 buf = kzalloc(sizeof *buf, GFP_KERNEL);
480 if (!buf)
481 return ERR_PTR(-ENOMEM);
482
483 buf->dev = dev;
484 buf->dma_dir = dma_dir;
485
486 offset = lower_32_bits(offset_in_page(vaddr));
487 vec = vb2_create_framevec(vaddr, size);
488 if (IS_ERR(vec)) {
489 ret = PTR_ERR(vec);
490 goto fail_buf;
491 }
492 buf->vec = vec;
493 n_pages = frame_vector_count(vec);
494 ret = frame_vector_to_pages(vec);
495 if (ret < 0) {
496 unsigned long *nums = frame_vector_pfns(vec);
497
498 /*
499 * Failed to convert to pages... Check the memory is physically
500 * contiguous and use direct mapping
501 */
502 for (i = 1; i < n_pages; i++)
503 if (nums[i-1] + 1 != nums[i])
504 goto fail_pfnvec;
505 buf->dma_addr = dma_map_resource(buf->dev,
506 __pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
507 if (dma_mapping_error(buf->dev, buf->dma_addr)) {
508 ret = -ENOMEM;
509 goto fail_pfnvec;
510 }
511 goto out;
512 }
513
514 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
515 if (!sgt) {
516 pr_err("failed to allocate sg table\n");
517 ret = -ENOMEM;
518 goto fail_pfnvec;
519 }
520
521 ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
522 offset, size, GFP_KERNEL);
523 if (ret) {
524 pr_err("failed to initialize sg table\n");
525 goto fail_sgt;
526 }
527
528 /*
529 * No need to sync to the device, this will happen later when the
530 * prepare() memop is called.
531 */
532 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
533 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
534 if (sgt->nents <= 0) {
535 pr_err("failed to map scatterlist\n");
536 ret = -EIO;
537 goto fail_sgt_init;
538 }
539
540 contig_size = vb2_dc_get_contiguous_size(sgt);
541 if (contig_size < size) {
542 pr_err("contiguous mapping is too small %lu/%lu\n",
543 contig_size, size);
544 ret = -EFAULT;
545 goto fail_map_sg;
546 }
547
548 buf->dma_addr = sg_dma_address(sgt->sgl);
549 buf->dma_sgt = sgt;
550 out:
551 buf->size = size;
552
553 return buf;
554
555 fail_map_sg:
556 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
557 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
558
559 fail_sgt_init:
560 sg_free_table(sgt);
561
562 fail_sgt:
563 kfree(sgt);
564
565 fail_pfnvec:
566 vb2_destroy_framevec(vec);
567
568 fail_buf:
569 kfree(buf);
570
571 return ERR_PTR(ret);
572 }
573
574 /*********************************************/
575 /* callbacks for DMABUF buffers */
576 /*********************************************/
577
vb2_dc_map_dmabuf(void * mem_priv)578 static int vb2_dc_map_dmabuf(void *mem_priv)
579 {
580 struct vb2_dc_buf *buf = mem_priv;
581 struct sg_table *sgt;
582 unsigned long contig_size;
583
584 if (WARN_ON(!buf->db_attach)) {
585 pr_err("trying to pin a non attached buffer\n");
586 return -EINVAL;
587 }
588
589 if (WARN_ON(buf->dma_sgt)) {
590 pr_err("dmabuf buffer is already pinned\n");
591 return 0;
592 }
593
594 /* get the associated scatterlist for this buffer */
595 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
596 if (IS_ERR(sgt)) {
597 pr_err("Error getting dmabuf scatterlist\n");
598 return -EINVAL;
599 }
600
601 /* checking if dmabuf is big enough to store contiguous chunk */
602 contig_size = vb2_dc_get_contiguous_size(sgt);
603 if (contig_size < buf->size) {
604 pr_err("contiguous chunk is too small %lu/%lu b\n",
605 contig_size, buf->size);
606 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
607 return -EFAULT;
608 }
609
610 buf->dma_addr = sg_dma_address(sgt->sgl);
611 buf->dma_sgt = sgt;
612 buf->vaddr = NULL;
613
614 return 0;
615 }
616
vb2_dc_unmap_dmabuf(void * mem_priv)617 static void vb2_dc_unmap_dmabuf(void *mem_priv)
618 {
619 struct vb2_dc_buf *buf = mem_priv;
620 struct sg_table *sgt = buf->dma_sgt;
621
622 if (WARN_ON(!buf->db_attach)) {
623 pr_err("trying to unpin a not attached buffer\n");
624 return;
625 }
626
627 if (WARN_ON(!sgt)) {
628 pr_err("dmabuf buffer is already unpinned\n");
629 return;
630 }
631
632 if (buf->vaddr) {
633 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
634 buf->vaddr = NULL;
635 }
636 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
637
638 buf->dma_addr = 0;
639 buf->dma_sgt = NULL;
640 }
641
vb2_dc_detach_dmabuf(void * mem_priv)642 static void vb2_dc_detach_dmabuf(void *mem_priv)
643 {
644 struct vb2_dc_buf *buf = mem_priv;
645
646 /* if vb2 works correctly you should never detach mapped buffer */
647 if (WARN_ON(buf->dma_addr))
648 vb2_dc_unmap_dmabuf(buf);
649
650 /* detach this attachment */
651 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
652 kfree(buf);
653 }
654
vb2_dc_attach_dmabuf(struct device * dev,struct dma_buf * dbuf,unsigned long size,enum dma_data_direction dma_dir)655 static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
656 unsigned long size, enum dma_data_direction dma_dir)
657 {
658 struct vb2_dc_buf *buf;
659 struct dma_buf_attachment *dba;
660
661 if (dbuf->size < size)
662 return ERR_PTR(-EFAULT);
663
664 if (WARN_ON(!dev))
665 return ERR_PTR(-EINVAL);
666
667 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
668 if (!buf)
669 return ERR_PTR(-ENOMEM);
670
671 buf->dev = dev;
672 /* create attachment for the dmabuf with the user device */
673 dba = dma_buf_attach(dbuf, buf->dev);
674 if (IS_ERR(dba)) {
675 pr_err("failed to attach dmabuf\n");
676 kfree(buf);
677 return dba;
678 }
679
680 buf->dma_dir = dma_dir;
681 buf->size = size;
682 buf->db_attach = dba;
683
684 return buf;
685 }
686
687 /*********************************************/
688 /* DMA CONTIG exported functions */
689 /*********************************************/
690
691 const struct vb2_mem_ops vb2_dma_contig_memops = {
692 .alloc = vb2_dc_alloc,
693 .put = vb2_dc_put,
694 .get_dmabuf = vb2_dc_get_dmabuf,
695 .cookie = vb2_dc_cookie,
696 .vaddr = vb2_dc_vaddr,
697 .mmap = vb2_dc_mmap,
698 .get_userptr = vb2_dc_get_userptr,
699 .put_userptr = vb2_dc_put_userptr,
700 .prepare = vb2_dc_prepare,
701 .finish = vb2_dc_finish,
702 .map_dmabuf = vb2_dc_map_dmabuf,
703 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
704 .attach_dmabuf = vb2_dc_attach_dmabuf,
705 .detach_dmabuf = vb2_dc_detach_dmabuf,
706 .num_users = vb2_dc_num_users,
707 };
708 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
709
710 /**
711 * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
712 * @dev: device for configuring DMA parameters
713 * @size: size of DMA max segment size to set
714 *
715 * To allow mapping the scatter-list into a single chunk in the DMA
716 * address space, the device is required to have the DMA max segment
717 * size parameter set to a value larger than the buffer size. Otherwise,
718 * the DMA-mapping subsystem will split the mapping into max segment
719 * size chunks. This function sets the DMA max segment size
720 * parameter to let DMA-mapping map a buffer as a single chunk in DMA
721 * address space.
722 * This code assumes that the DMA-mapping subsystem will merge all
723 * scatterlist segments if this is really possible (for example when
724 * an IOMMU is available and enabled).
725 * Ideally, this parameter should be set by the generic bus code, but it
726 * is left with the default 64KiB value due to historical litmiations in
727 * other subsystems (like limited USB host drivers) and there no good
728 * place to set it to the proper value.
729 * This function should be called from the drivers, which are known to
730 * operate on platforms with IOMMU and provide access to shared buffers
731 * (either USERPTR or DMABUF). This should be done before initializing
732 * videobuf2 queue.
733 */
vb2_dma_contig_set_max_seg_size(struct device * dev,unsigned int size)734 int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
735 {
736 if (!dev->dma_parms) {
737 dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
738 if (!dev->dma_parms)
739 return -ENOMEM;
740 }
741 if (dma_get_max_seg_size(dev) < size)
742 return dma_set_max_seg_size(dev, size);
743
744 return 0;
745 }
746 EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
747
748 /*
749 * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
750 * @dev: device for configuring DMA parameters
751 *
752 * This function releases resources allocated to configure DMA parameters
753 * (see vb2_dma_contig_set_max_seg_size() function). It should be called from
754 * device drivers on driver remove.
755 */
vb2_dma_contig_clear_max_seg_size(struct device * dev)756 void vb2_dma_contig_clear_max_seg_size(struct device *dev)
757 {
758 kfree(dev->dma_parms);
759 dev->dma_parms = NULL;
760 }
761 EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size);
762
763 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
764 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
765 MODULE_LICENSE("GPL");
766