• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12 
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
19 
20 #include <media/videobuf2-v4l2.h>
21 #include <media/videobuf2-dma-contig.h>
22 #include <media/videobuf2-memops.h>
23 
24 struct vb2_dc_buf {
25 	struct device			*dev;
26 	void				*vaddr;
27 	unsigned long			size;
28 	void				*cookie;
29 	dma_addr_t			dma_addr;
30 	unsigned long			attrs;
31 	enum dma_data_direction		dma_dir;
32 	struct sg_table			*dma_sgt;
33 	struct frame_vector		*vec;
34 
35 	/* MMAP related */
36 	struct vb2_vmarea_handler	handler;
37 	atomic_t			refcount;
38 	struct sg_table			*sgt_base;
39 
40 	/* DMABUF related */
41 	struct dma_buf_attachment	*db_attach;
42 };
43 
44 /*********************************************/
45 /*        scatterlist table functions        */
46 /*********************************************/
47 
vb2_dc_get_contiguous_size(struct sg_table * sgt)48 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
49 {
50 	struct scatterlist *s;
51 	dma_addr_t expected = sg_dma_address(sgt->sgl);
52 	unsigned int i;
53 	unsigned long size = 0;
54 
55 	for_each_sg(sgt->sgl, s, sgt->nents, i) {
56 		if (sg_dma_address(s) != expected)
57 			break;
58 		expected = sg_dma_address(s) + sg_dma_len(s);
59 		size += sg_dma_len(s);
60 	}
61 	return size;
62 }
63 
64 /*********************************************/
65 /*         callbacks for all buffers         */
66 /*********************************************/
67 
vb2_dc_cookie(void * buf_priv)68 static void *vb2_dc_cookie(void *buf_priv)
69 {
70 	struct vb2_dc_buf *buf = buf_priv;
71 
72 	return &buf->dma_addr;
73 }
74 
vb2_dc_vaddr(void * buf_priv)75 static void *vb2_dc_vaddr(void *buf_priv)
76 {
77 	struct vb2_dc_buf *buf = buf_priv;
78 
79 	if (!buf->vaddr && buf->db_attach)
80 		buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
81 
82 	return buf->vaddr;
83 }
84 
vb2_dc_num_users(void * buf_priv)85 static unsigned int vb2_dc_num_users(void *buf_priv)
86 {
87 	struct vb2_dc_buf *buf = buf_priv;
88 
89 	return atomic_read(&buf->refcount);
90 }
91 
vb2_dc_prepare(void * buf_priv)92 static void vb2_dc_prepare(void *buf_priv)
93 {
94 	struct vb2_dc_buf *buf = buf_priv;
95 	struct sg_table *sgt = buf->dma_sgt;
96 
97 	/* DMABUF exporter will flush the cache for us */
98 	if (!sgt || buf->db_attach)
99 		return;
100 
101 	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
102 			       buf->dma_dir);
103 }
104 
vb2_dc_finish(void * buf_priv)105 static void vb2_dc_finish(void *buf_priv)
106 {
107 	struct vb2_dc_buf *buf = buf_priv;
108 	struct sg_table *sgt = buf->dma_sgt;
109 
110 	/* DMABUF exporter will flush the cache for us */
111 	if (!sgt || buf->db_attach)
112 		return;
113 
114 	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
115 }
116 
117 /*********************************************/
118 /*        callbacks for MMAP buffers         */
119 /*********************************************/
120 
vb2_dc_put(void * buf_priv)121 static void vb2_dc_put(void *buf_priv)
122 {
123 	struct vb2_dc_buf *buf = buf_priv;
124 
125 	if (!atomic_dec_and_test(&buf->refcount))
126 		return;
127 
128 	if (buf->sgt_base) {
129 		sg_free_table(buf->sgt_base);
130 		kfree(buf->sgt_base);
131 	}
132 	dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
133 		       buf->attrs);
134 	put_device(buf->dev);
135 	kfree(buf);
136 }
137 
vb2_dc_alloc(struct device * dev,unsigned long attrs,unsigned long size,enum dma_data_direction dma_dir,gfp_t gfp_flags)138 static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
139 			  unsigned long size, enum dma_data_direction dma_dir,
140 			  gfp_t gfp_flags)
141 {
142 	struct vb2_dc_buf *buf;
143 
144 	if (WARN_ON(!dev))
145 		return ERR_PTR(-EINVAL);
146 
147 	buf = kzalloc(sizeof *buf, GFP_KERNEL);
148 	if (!buf)
149 		return ERR_PTR(-ENOMEM);
150 
151 	if (attrs)
152 		buf->attrs = attrs;
153 	buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
154 					GFP_KERNEL | gfp_flags, buf->attrs);
155 	if (!buf->cookie) {
156 		dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
157 		kfree(buf);
158 		return ERR_PTR(-ENOMEM);
159 	}
160 
161 	if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
162 		buf->vaddr = buf->cookie;
163 
164 	/* Prevent the device from being released while the buffer is used */
165 	buf->dev = get_device(dev);
166 	buf->size = size;
167 	buf->dma_dir = dma_dir;
168 
169 	buf->handler.refcount = &buf->refcount;
170 	buf->handler.put = vb2_dc_put;
171 	buf->handler.arg = buf;
172 
173 	atomic_inc(&buf->refcount);
174 
175 	return buf;
176 }
177 
vb2_dc_mmap(void * buf_priv,struct vm_area_struct * vma)178 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
179 {
180 	struct vb2_dc_buf *buf = buf_priv;
181 	int ret;
182 
183 	if (!buf) {
184 		printk(KERN_ERR "No buffer to map\n");
185 		return -EINVAL;
186 	}
187 
188 	/*
189 	 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
190 	 * map whole buffer
191 	 */
192 	vma->vm_pgoff = 0;
193 
194 	ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
195 		buf->dma_addr, buf->size, buf->attrs);
196 
197 	if (ret) {
198 		pr_err("Remapping memory failed, error: %d\n", ret);
199 		return ret;
200 	}
201 
202 	vma->vm_flags		|= VM_DONTEXPAND | VM_DONTDUMP;
203 	vma->vm_private_data	= &buf->handler;
204 	vma->vm_ops		= &vb2_common_vm_ops;
205 
206 	vma->vm_ops->open(vma);
207 
208 	pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
209 		__func__, (unsigned long)buf->dma_addr, vma->vm_start,
210 		buf->size);
211 
212 	return 0;
213 }
214 
215 /*********************************************/
216 /*         DMABUF ops for exporters          */
217 /*********************************************/
218 
219 struct vb2_dc_attachment {
220 	struct sg_table sgt;
221 	enum dma_data_direction dma_dir;
222 };
223 
vb2_dc_dmabuf_ops_attach(struct dma_buf * dbuf,struct device * dev,struct dma_buf_attachment * dbuf_attach)224 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
225 	struct dma_buf_attachment *dbuf_attach)
226 {
227 	struct vb2_dc_attachment *attach;
228 	unsigned int i;
229 	struct scatterlist *rd, *wr;
230 	struct sg_table *sgt;
231 	struct vb2_dc_buf *buf = dbuf->priv;
232 	int ret;
233 
234 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
235 	if (!attach)
236 		return -ENOMEM;
237 
238 	sgt = &attach->sgt;
239 	/* Copy the buf->base_sgt scatter list to the attachment, as we can't
240 	 * map the same scatter list to multiple attachments at the same time.
241 	 */
242 	ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
243 	if (ret) {
244 		kfree(attach);
245 		return -ENOMEM;
246 	}
247 
248 	rd = buf->sgt_base->sgl;
249 	wr = sgt->sgl;
250 	for (i = 0; i < sgt->orig_nents; ++i) {
251 		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
252 		rd = sg_next(rd);
253 		wr = sg_next(wr);
254 	}
255 
256 	attach->dma_dir = DMA_NONE;
257 	dbuf_attach->priv = attach;
258 
259 	return 0;
260 }
261 
vb2_dc_dmabuf_ops_detach(struct dma_buf * dbuf,struct dma_buf_attachment * db_attach)262 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
263 	struct dma_buf_attachment *db_attach)
264 {
265 	struct vb2_dc_attachment *attach = db_attach->priv;
266 	struct sg_table *sgt;
267 
268 	if (!attach)
269 		return;
270 
271 	sgt = &attach->sgt;
272 
273 	/* release the scatterlist cache */
274 	if (attach->dma_dir != DMA_NONE)
275 		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
276 			attach->dma_dir);
277 	sg_free_table(sgt);
278 	kfree(attach);
279 	db_attach->priv = NULL;
280 }
281 
vb2_dc_dmabuf_ops_map(struct dma_buf_attachment * db_attach,enum dma_data_direction dma_dir)282 static struct sg_table *vb2_dc_dmabuf_ops_map(
283 	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
284 {
285 	struct vb2_dc_attachment *attach = db_attach->priv;
286 	/* stealing dmabuf mutex to serialize map/unmap operations */
287 	struct mutex *lock = &db_attach->dmabuf->lock;
288 	struct sg_table *sgt;
289 
290 	mutex_lock(lock);
291 
292 	sgt = &attach->sgt;
293 	/* return previously mapped sg table */
294 	if (attach->dma_dir == dma_dir) {
295 		mutex_unlock(lock);
296 		return sgt;
297 	}
298 
299 	/* release any previous cache */
300 	if (attach->dma_dir != DMA_NONE) {
301 		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
302 			attach->dma_dir);
303 		attach->dma_dir = DMA_NONE;
304 	}
305 
306 	/* mapping to the client with new direction */
307 	sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
308 				dma_dir);
309 	if (!sgt->nents) {
310 		pr_err("failed to map scatterlist\n");
311 		mutex_unlock(lock);
312 		return ERR_PTR(-EIO);
313 	}
314 
315 	attach->dma_dir = dma_dir;
316 
317 	mutex_unlock(lock);
318 
319 	return sgt;
320 }
321 
vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment * db_attach,struct sg_table * sgt,enum dma_data_direction dma_dir)322 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
323 	struct sg_table *sgt, enum dma_data_direction dma_dir)
324 {
325 	/* nothing to be done here */
326 }
327 
vb2_dc_dmabuf_ops_release(struct dma_buf * dbuf)328 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
329 {
330 	/* drop reference obtained in vb2_dc_get_dmabuf */
331 	vb2_dc_put(dbuf->priv);
332 }
333 
vb2_dc_dmabuf_ops_kmap(struct dma_buf * dbuf,unsigned long pgnum)334 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
335 {
336 	struct vb2_dc_buf *buf = dbuf->priv;
337 
338 	return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
339 }
340 
vb2_dc_dmabuf_ops_vmap(struct dma_buf * dbuf)341 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
342 {
343 	struct vb2_dc_buf *buf = dbuf->priv;
344 
345 	return buf->vaddr;
346 }
347 
vb2_dc_dmabuf_ops_mmap(struct dma_buf * dbuf,struct vm_area_struct * vma)348 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
349 	struct vm_area_struct *vma)
350 {
351 	return vb2_dc_mmap(dbuf->priv, vma);
352 }
353 
354 static struct dma_buf_ops vb2_dc_dmabuf_ops = {
355 	.attach = vb2_dc_dmabuf_ops_attach,
356 	.detach = vb2_dc_dmabuf_ops_detach,
357 	.map_dma_buf = vb2_dc_dmabuf_ops_map,
358 	.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
359 	.kmap = vb2_dc_dmabuf_ops_kmap,
360 	.kmap_atomic = vb2_dc_dmabuf_ops_kmap,
361 	.vmap = vb2_dc_dmabuf_ops_vmap,
362 	.mmap = vb2_dc_dmabuf_ops_mmap,
363 	.release = vb2_dc_dmabuf_ops_release,
364 };
365 
vb2_dc_get_base_sgt(struct vb2_dc_buf * buf)366 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
367 {
368 	int ret;
369 	struct sg_table *sgt;
370 
371 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
372 	if (!sgt) {
373 		dev_err(buf->dev, "failed to alloc sg table\n");
374 		return NULL;
375 	}
376 
377 	ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
378 		buf->size, buf->attrs);
379 	if (ret < 0) {
380 		dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
381 		kfree(sgt);
382 		return NULL;
383 	}
384 
385 	return sgt;
386 }
387 
vb2_dc_get_dmabuf(void * buf_priv,unsigned long flags)388 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
389 {
390 	struct vb2_dc_buf *buf = buf_priv;
391 	struct dma_buf *dbuf;
392 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
393 
394 	exp_info.ops = &vb2_dc_dmabuf_ops;
395 	exp_info.size = buf->size;
396 	exp_info.flags = flags;
397 	exp_info.priv = buf;
398 
399 	if (!buf->sgt_base)
400 		buf->sgt_base = vb2_dc_get_base_sgt(buf);
401 
402 	if (WARN_ON(!buf->sgt_base))
403 		return NULL;
404 
405 	dbuf = dma_buf_export(&exp_info);
406 	if (IS_ERR(dbuf))
407 		return NULL;
408 
409 	/* dmabuf keeps reference to vb2 buffer */
410 	atomic_inc(&buf->refcount);
411 
412 	return dbuf;
413 }
414 
415 /*********************************************/
416 /*       callbacks for USERPTR buffers       */
417 /*********************************************/
418 
vb2_dc_put_userptr(void * buf_priv)419 static void vb2_dc_put_userptr(void *buf_priv)
420 {
421 	struct vb2_dc_buf *buf = buf_priv;
422 	struct sg_table *sgt = buf->dma_sgt;
423 	int i;
424 	struct page **pages;
425 
426 	if (sgt) {
427 		/*
428 		 * No need to sync to CPU, it's already synced to the CPU
429 		 * since the finish() memop will have been called before this.
430 		 */
431 		dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
432 				   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
433 		pages = frame_vector_pages(buf->vec);
434 		/* sgt should exist only if vector contains pages... */
435 		BUG_ON(IS_ERR(pages));
436 		for (i = 0; i < frame_vector_count(buf->vec); i++)
437 			set_page_dirty_lock(pages[i]);
438 		sg_free_table(sgt);
439 		kfree(sgt);
440 	}
441 	vb2_destroy_framevec(buf->vec);
442 	kfree(buf);
443 }
444 
445 /*
446  * For some kind of reserved memory there might be no struct page available,
447  * so all that can be done to support such 'pages' is to try to convert
448  * pfn to dma address or at the last resort just assume that
449  * dma address == physical address (like it has been assumed in earlier version
450  * of videobuf2-dma-contig
451  */
452 
453 #ifdef __arch_pfn_to_dma
vb2_dc_pfn_to_dma(struct device * dev,unsigned long pfn)454 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
455 {
456 	return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
457 }
458 #elif defined(__pfn_to_bus)
vb2_dc_pfn_to_dma(struct device * dev,unsigned long pfn)459 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
460 {
461 	return (dma_addr_t)__pfn_to_bus(pfn);
462 }
463 #elif defined(__pfn_to_phys)
vb2_dc_pfn_to_dma(struct device * dev,unsigned long pfn)464 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
465 {
466 	return (dma_addr_t)__pfn_to_phys(pfn);
467 }
468 #else
vb2_dc_pfn_to_dma(struct device * dev,unsigned long pfn)469 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
470 {
471 	/* really, we cannot do anything better at this point */
472 	return (dma_addr_t)(pfn) << PAGE_SHIFT;
473 }
474 #endif
475 
vb2_dc_get_userptr(struct device * dev,unsigned long vaddr,unsigned long size,enum dma_data_direction dma_dir)476 static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
477 	unsigned long size, enum dma_data_direction dma_dir)
478 {
479 	struct vb2_dc_buf *buf;
480 	struct frame_vector *vec;
481 	unsigned long offset;
482 	int n_pages, i;
483 	int ret = 0;
484 	struct sg_table *sgt;
485 	unsigned long contig_size;
486 	unsigned long dma_align = dma_get_cache_alignment();
487 
488 	/* Only cache aligned DMA transfers are reliable */
489 	if (!IS_ALIGNED(vaddr | size, dma_align)) {
490 		pr_debug("user data must be aligned to %lu bytes\n", dma_align);
491 		return ERR_PTR(-EINVAL);
492 	}
493 
494 	if (!size) {
495 		pr_debug("size is zero\n");
496 		return ERR_PTR(-EINVAL);
497 	}
498 
499 	if (WARN_ON(!dev))
500 		return ERR_PTR(-EINVAL);
501 
502 	buf = kzalloc(sizeof *buf, GFP_KERNEL);
503 	if (!buf)
504 		return ERR_PTR(-ENOMEM);
505 
506 	buf->dev = dev;
507 	buf->dma_dir = dma_dir;
508 
509 	offset = vaddr & ~PAGE_MASK;
510 	vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
511 	if (IS_ERR(vec)) {
512 		ret = PTR_ERR(vec);
513 		goto fail_buf;
514 	}
515 	buf->vec = vec;
516 	n_pages = frame_vector_count(vec);
517 	ret = frame_vector_to_pages(vec);
518 	if (ret < 0) {
519 		unsigned long *nums = frame_vector_pfns(vec);
520 
521 		/*
522 		 * Failed to convert to pages... Check the memory is physically
523 		 * contiguous and use direct mapping
524 		 */
525 		for (i = 1; i < n_pages; i++)
526 			if (nums[i-1] + 1 != nums[i])
527 				goto fail_pfnvec;
528 		buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
529 		goto out;
530 	}
531 
532 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
533 	if (!sgt) {
534 		pr_err("failed to allocate sg table\n");
535 		ret = -ENOMEM;
536 		goto fail_pfnvec;
537 	}
538 
539 	ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
540 		offset, size, GFP_KERNEL);
541 	if (ret) {
542 		pr_err("failed to initialize sg table\n");
543 		goto fail_sgt;
544 	}
545 
546 	/*
547 	 * No need to sync to the device, this will happen later when the
548 	 * prepare() memop is called.
549 	 */
550 	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
551 				      buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
552 	if (sgt->nents <= 0) {
553 		pr_err("failed to map scatterlist\n");
554 		ret = -EIO;
555 		goto fail_sgt_init;
556 	}
557 
558 	contig_size = vb2_dc_get_contiguous_size(sgt);
559 	if (contig_size < size) {
560 		pr_err("contiguous mapping is too small %lu/%lu\n",
561 			contig_size, size);
562 		ret = -EFAULT;
563 		goto fail_map_sg;
564 	}
565 
566 	buf->dma_addr = sg_dma_address(sgt->sgl);
567 	buf->dma_sgt = sgt;
568 out:
569 	buf->size = size;
570 
571 	return buf;
572 
573 fail_map_sg:
574 	dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
575 			   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
576 
577 fail_sgt_init:
578 	sg_free_table(sgt);
579 
580 fail_sgt:
581 	kfree(sgt);
582 
583 fail_pfnvec:
584 	vb2_destroy_framevec(vec);
585 
586 fail_buf:
587 	kfree(buf);
588 
589 	return ERR_PTR(ret);
590 }
591 
592 /*********************************************/
593 /*       callbacks for DMABUF buffers        */
594 /*********************************************/
595 
vb2_dc_map_dmabuf(void * mem_priv)596 static int vb2_dc_map_dmabuf(void *mem_priv)
597 {
598 	struct vb2_dc_buf *buf = mem_priv;
599 	struct sg_table *sgt;
600 	unsigned long contig_size;
601 
602 	if (WARN_ON(!buf->db_attach)) {
603 		pr_err("trying to pin a non attached buffer\n");
604 		return -EINVAL;
605 	}
606 
607 	if (WARN_ON(buf->dma_sgt)) {
608 		pr_err("dmabuf buffer is already pinned\n");
609 		return 0;
610 	}
611 
612 	/* get the associated scatterlist for this buffer */
613 	sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
614 	if (IS_ERR(sgt)) {
615 		pr_err("Error getting dmabuf scatterlist\n");
616 		return -EINVAL;
617 	}
618 
619 	/* checking if dmabuf is big enough to store contiguous chunk */
620 	contig_size = vb2_dc_get_contiguous_size(sgt);
621 	if (contig_size < buf->size) {
622 		pr_err("contiguous chunk is too small %lu/%lu b\n",
623 			contig_size, buf->size);
624 		dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
625 		return -EFAULT;
626 	}
627 
628 	buf->dma_addr = sg_dma_address(sgt->sgl);
629 	buf->dma_sgt = sgt;
630 	buf->vaddr = NULL;
631 
632 	return 0;
633 }
634 
vb2_dc_unmap_dmabuf(void * mem_priv)635 static void vb2_dc_unmap_dmabuf(void *mem_priv)
636 {
637 	struct vb2_dc_buf *buf = mem_priv;
638 	struct sg_table *sgt = buf->dma_sgt;
639 
640 	if (WARN_ON(!buf->db_attach)) {
641 		pr_err("trying to unpin a not attached buffer\n");
642 		return;
643 	}
644 
645 	if (WARN_ON(!sgt)) {
646 		pr_err("dmabuf buffer is already unpinned\n");
647 		return;
648 	}
649 
650 	if (buf->vaddr) {
651 		dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
652 		buf->vaddr = NULL;
653 	}
654 	dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
655 
656 	buf->dma_addr = 0;
657 	buf->dma_sgt = NULL;
658 }
659 
vb2_dc_detach_dmabuf(void * mem_priv)660 static void vb2_dc_detach_dmabuf(void *mem_priv)
661 {
662 	struct vb2_dc_buf *buf = mem_priv;
663 
664 	/* if vb2 works correctly you should never detach mapped buffer */
665 	if (WARN_ON(buf->dma_addr))
666 		vb2_dc_unmap_dmabuf(buf);
667 
668 	/* detach this attachment */
669 	dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
670 	kfree(buf);
671 }
672 
vb2_dc_attach_dmabuf(struct device * dev,struct dma_buf * dbuf,unsigned long size,enum dma_data_direction dma_dir)673 static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
674 	unsigned long size, enum dma_data_direction dma_dir)
675 {
676 	struct vb2_dc_buf *buf;
677 	struct dma_buf_attachment *dba;
678 
679 	if (dbuf->size < size)
680 		return ERR_PTR(-EFAULT);
681 
682 	if (WARN_ON(!dev))
683 		return ERR_PTR(-EINVAL);
684 
685 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
686 	if (!buf)
687 		return ERR_PTR(-ENOMEM);
688 
689 	buf->dev = dev;
690 	/* create attachment for the dmabuf with the user device */
691 	dba = dma_buf_attach(dbuf, buf->dev);
692 	if (IS_ERR(dba)) {
693 		pr_err("failed to attach dmabuf\n");
694 		kfree(buf);
695 		return dba;
696 	}
697 
698 	buf->dma_dir = dma_dir;
699 	buf->size = size;
700 	buf->db_attach = dba;
701 
702 	return buf;
703 }
704 
705 /*********************************************/
706 /*       DMA CONTIG exported functions       */
707 /*********************************************/
708 
709 const struct vb2_mem_ops vb2_dma_contig_memops = {
710 	.alloc		= vb2_dc_alloc,
711 	.put		= vb2_dc_put,
712 	.get_dmabuf	= vb2_dc_get_dmabuf,
713 	.cookie		= vb2_dc_cookie,
714 	.vaddr		= vb2_dc_vaddr,
715 	.mmap		= vb2_dc_mmap,
716 	.get_userptr	= vb2_dc_get_userptr,
717 	.put_userptr	= vb2_dc_put_userptr,
718 	.prepare	= vb2_dc_prepare,
719 	.finish		= vb2_dc_finish,
720 	.map_dmabuf	= vb2_dc_map_dmabuf,
721 	.unmap_dmabuf	= vb2_dc_unmap_dmabuf,
722 	.attach_dmabuf	= vb2_dc_attach_dmabuf,
723 	.detach_dmabuf	= vb2_dc_detach_dmabuf,
724 	.num_users	= vb2_dc_num_users,
725 };
726 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
727 
728 /**
729  * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
730  * @dev:	device for configuring DMA parameters
731  * @size:	size of DMA max segment size to set
732  *
733  * To allow mapping the scatter-list into a single chunk in the DMA
734  * address space, the device is required to have the DMA max segment
735  * size parameter set to a value larger than the buffer size. Otherwise,
736  * the DMA-mapping subsystem will split the mapping into max segment
737  * size chunks. This function sets the DMA max segment size
738  * parameter to let DMA-mapping map a buffer as a single chunk in DMA
739  * address space.
740  * This code assumes that the DMA-mapping subsystem will merge all
741  * scatterlist segments if this is really possible (for example when
742  * an IOMMU is available and enabled).
743  * Ideally, this parameter should be set by the generic bus code, but it
744  * is left with the default 64KiB value due to historical litmiations in
745  * other subsystems (like limited USB host drivers) and there no good
746  * place to set it to the proper value.
747  * This function should be called from the drivers, which are known to
748  * operate on platforms with IOMMU and provide access to shared buffers
749  * (either USERPTR or DMABUF). This should be done before initializing
750  * videobuf2 queue.
751  */
vb2_dma_contig_set_max_seg_size(struct device * dev,unsigned int size)752 int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
753 {
754 	if (!dev->dma_parms) {
755 		dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
756 		if (!dev->dma_parms)
757 			return -ENOMEM;
758 	}
759 	if (dma_get_max_seg_size(dev) < size)
760 		return dma_set_max_seg_size(dev, size);
761 
762 	return 0;
763 }
764 EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
765 
766 /*
767  * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
768  * @dev:	device for configuring DMA parameters
769  *
770  * This function releases resources allocated to configure DMA parameters
771  * (see vb2_dma_contig_set_max_seg_size() function). It should be called from
772  * device drivers on driver remove.
773  */
vb2_dma_contig_clear_max_seg_size(struct device * dev)774 void vb2_dma_contig_clear_max_seg_size(struct device *dev)
775 {
776 	kfree(dev->dma_parms);
777 	dev->dma_parms = NULL;
778 }
779 EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size);
780 
781 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
782 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
783 MODULE_LICENSE("GPL");
784