1 /*
2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13 #include <linux/module.h>
14 #include <linux/mm.h>
15 #include <linux/refcount.h>
16 #include <linux/scatterlist.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-memops.h>
23 #include <media/videobuf2-dma-sg.h>
24
25 static int debug;
26 module_param(debug, int, 0644);
27
28 #define dprintk(level, fmt, arg...) \
29 do { \
30 if (debug >= level) \
31 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
32 } while (0)
33
34 struct vb2_dma_sg_buf {
35 struct device *dev;
36 void *vaddr;
37 struct page **pages;
38 struct frame_vector *vec;
39 int offset;
40 enum dma_data_direction dma_dir;
41 struct sg_table sg_table;
42 /*
43 * This will point to sg_table when used with the MMAP or USERPTR
44 * memory model, and to the dma_buf sglist when used with the
45 * DMABUF memory model.
46 */
47 struct sg_table *dma_sgt;
48 size_t size;
49 unsigned int num_pages;
50 refcount_t refcount;
51 struct vb2_vmarea_handler handler;
52
53 struct dma_buf_attachment *db_attach;
54
55 struct vb2_buffer *vb;
56 };
57
58 static void vb2_dma_sg_put(void *buf_priv);
59
vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf * buf,gfp_t gfp_flags)60 static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
61 gfp_t gfp_flags)
62 {
63 unsigned int last_page = 0;
64 unsigned long size = buf->size;
65
66 while (size > 0) {
67 struct page *pages;
68 int order;
69 int i;
70
71 order = get_order(size);
72 /* Don't over allocate*/
73 if ((PAGE_SIZE << order) > size)
74 order--;
75
76 pages = NULL;
77 while (!pages) {
78 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
79 __GFP_NOWARN | gfp_flags, order);
80 if (pages)
81 break;
82
83 if (order == 0) {
84 while (last_page--)
85 __free_page(buf->pages[last_page]);
86 return -ENOMEM;
87 }
88 order--;
89 }
90
91 split_page(pages, order);
92 for (i = 0; i < (1 << order); i++)
93 buf->pages[last_page++] = &pages[i];
94
95 size -= PAGE_SIZE << order;
96 }
97
98 return 0;
99 }
100
vb2_dma_sg_alloc(struct vb2_buffer * vb,struct device * dev,unsigned long size)101 static void *vb2_dma_sg_alloc(struct vb2_buffer *vb, struct device *dev,
102 unsigned long size)
103 {
104 struct vb2_dma_sg_buf *buf;
105 struct sg_table *sgt;
106 int ret;
107 int num_pages;
108
109 if (WARN_ON(!dev) || WARN_ON(!size))
110 return ERR_PTR(-EINVAL);
111
112 buf = kzalloc(sizeof *buf, GFP_KERNEL);
113 if (!buf)
114 return ERR_PTR(-ENOMEM);
115
116 buf->vaddr = NULL;
117 buf->dma_dir = vb->vb2_queue->dma_dir;
118 buf->offset = 0;
119 buf->size = size;
120 /* size is already page aligned */
121 buf->num_pages = size >> PAGE_SHIFT;
122 buf->dma_sgt = &buf->sg_table;
123
124 /*
125 * NOTE: dma-sg allocates memory using the page allocator directly, so
126 * there is no memory consistency guarantee, hence dma-sg ignores DMA
127 * attributes passed from the upper layer.
128 */
129 buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
130 GFP_KERNEL | __GFP_ZERO);
131 if (!buf->pages)
132 goto fail_pages_array_alloc;
133
134 ret = vb2_dma_sg_alloc_compacted(buf, vb->vb2_queue->gfp_flags);
135 if (ret)
136 goto fail_pages_alloc;
137
138 ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
139 buf->num_pages, 0, size, GFP_KERNEL);
140 if (ret)
141 goto fail_table_alloc;
142
143 /* Prevent the device from being released while the buffer is used */
144 buf->dev = get_device(dev);
145
146 sgt = &buf->sg_table;
147 /*
148 * No need to sync to the device, this will happen later when the
149 * prepare() memop is called.
150 */
151 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
152 DMA_ATTR_SKIP_CPU_SYNC))
153 goto fail_map;
154
155 buf->handler.refcount = &buf->refcount;
156 buf->handler.put = vb2_dma_sg_put;
157 buf->handler.arg = buf;
158 buf->vb = vb;
159
160 refcount_set(&buf->refcount, 1);
161
162 dprintk(1, "%s: Allocated buffer of %d pages\n",
163 __func__, buf->num_pages);
164 return buf;
165
166 fail_map:
167 put_device(buf->dev);
168 sg_free_table(buf->dma_sgt);
169 fail_table_alloc:
170 num_pages = buf->num_pages;
171 while (num_pages--)
172 __free_page(buf->pages[num_pages]);
173 fail_pages_alloc:
174 kvfree(buf->pages);
175 fail_pages_array_alloc:
176 kfree(buf);
177 return ERR_PTR(-ENOMEM);
178 }
179
vb2_dma_sg_put(void * buf_priv)180 static void vb2_dma_sg_put(void *buf_priv)
181 {
182 struct vb2_dma_sg_buf *buf = buf_priv;
183 struct sg_table *sgt = &buf->sg_table;
184 int i = buf->num_pages;
185
186 if (refcount_dec_and_test(&buf->refcount)) {
187 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
188 buf->num_pages);
189 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
190 DMA_ATTR_SKIP_CPU_SYNC);
191 if (buf->vaddr)
192 vm_unmap_ram(buf->vaddr, buf->num_pages);
193 sg_free_table(buf->dma_sgt);
194 while (--i >= 0)
195 __free_page(buf->pages[i]);
196 kvfree(buf->pages);
197 put_device(buf->dev);
198 kfree(buf);
199 }
200 }
201
vb2_dma_sg_prepare(void * buf_priv)202 static void vb2_dma_sg_prepare(void *buf_priv)
203 {
204 struct vb2_dma_sg_buf *buf = buf_priv;
205 struct sg_table *sgt = buf->dma_sgt;
206
207 dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
208 }
209
vb2_dma_sg_finish(void * buf_priv)210 static void vb2_dma_sg_finish(void *buf_priv)
211 {
212 struct vb2_dma_sg_buf *buf = buf_priv;
213 struct sg_table *sgt = buf->dma_sgt;
214
215 dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
216 }
217
vb2_dma_sg_get_userptr(struct vb2_buffer * vb,struct device * dev,unsigned long vaddr,unsigned long size)218 static void *vb2_dma_sg_get_userptr(struct vb2_buffer *vb, struct device *dev,
219 unsigned long vaddr, unsigned long size)
220 {
221 struct vb2_dma_sg_buf *buf;
222 struct sg_table *sgt;
223 struct frame_vector *vec;
224
225 if (WARN_ON(!dev))
226 return ERR_PTR(-EINVAL);
227
228 buf = kzalloc(sizeof *buf, GFP_KERNEL);
229 if (!buf)
230 return ERR_PTR(-ENOMEM);
231
232 buf->vaddr = NULL;
233 buf->dev = dev;
234 buf->dma_dir = vb->vb2_queue->dma_dir;
235 buf->offset = vaddr & ~PAGE_MASK;
236 buf->size = size;
237 buf->dma_sgt = &buf->sg_table;
238 buf->vb = vb;
239 vec = vb2_create_framevec(vaddr, size);
240 if (IS_ERR(vec))
241 goto userptr_fail_pfnvec;
242 buf->vec = vec;
243
244 buf->pages = frame_vector_pages(vec);
245 if (IS_ERR(buf->pages))
246 goto userptr_fail_sgtable;
247 buf->num_pages = frame_vector_count(vec);
248
249 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
250 buf->num_pages, buf->offset, size, 0))
251 goto userptr_fail_sgtable;
252
253 sgt = &buf->sg_table;
254 /*
255 * No need to sync to the device, this will happen later when the
256 * prepare() memop is called.
257 */
258 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
259 DMA_ATTR_SKIP_CPU_SYNC))
260 goto userptr_fail_map;
261
262 return buf;
263
264 userptr_fail_map:
265 sg_free_table(&buf->sg_table);
266 userptr_fail_sgtable:
267 vb2_destroy_framevec(vec);
268 userptr_fail_pfnvec:
269 kfree(buf);
270 return ERR_PTR(-ENOMEM);
271 }
272
273 /*
274 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
275 * be used
276 */
vb2_dma_sg_put_userptr(void * buf_priv)277 static void vb2_dma_sg_put_userptr(void *buf_priv)
278 {
279 struct vb2_dma_sg_buf *buf = buf_priv;
280 struct sg_table *sgt = &buf->sg_table;
281 int i = buf->num_pages;
282
283 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
284 __func__, buf->num_pages);
285 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
286 if (buf->vaddr)
287 vm_unmap_ram(buf->vaddr, buf->num_pages);
288 sg_free_table(buf->dma_sgt);
289 if (buf->dma_dir == DMA_FROM_DEVICE ||
290 buf->dma_dir == DMA_BIDIRECTIONAL)
291 while (--i >= 0)
292 set_page_dirty_lock(buf->pages[i]);
293 vb2_destroy_framevec(buf->vec);
294 kfree(buf);
295 }
296
vb2_dma_sg_vaddr(struct vb2_buffer * vb,void * buf_priv)297 static void *vb2_dma_sg_vaddr(struct vb2_buffer *vb, void *buf_priv)
298 {
299 struct vb2_dma_sg_buf *buf = buf_priv;
300 struct dma_buf_map map;
301 int ret;
302
303 BUG_ON(!buf);
304
305 if (!buf->vaddr) {
306 if (buf->db_attach) {
307 ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
308 buf->vaddr = ret ? NULL : map.vaddr;
309 } else {
310 buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
311 }
312 }
313
314 /* add offset in case userptr is not page-aligned */
315 return buf->vaddr ? buf->vaddr + buf->offset : NULL;
316 }
317
vb2_dma_sg_num_users(void * buf_priv)318 static unsigned int vb2_dma_sg_num_users(void *buf_priv)
319 {
320 struct vb2_dma_sg_buf *buf = buf_priv;
321
322 return refcount_read(&buf->refcount);
323 }
324
vb2_dma_sg_mmap(void * buf_priv,struct vm_area_struct * vma)325 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
326 {
327 struct vb2_dma_sg_buf *buf = buf_priv;
328 int err;
329
330 if (!buf) {
331 printk(KERN_ERR "No memory to map\n");
332 return -EINVAL;
333 }
334
335 err = vm_map_pages(vma, buf->pages, buf->num_pages);
336 if (err) {
337 printk(KERN_ERR "Remapping memory, error: %d\n", err);
338 return err;
339 }
340
341 /*
342 * Use common vm_area operations to track buffer refcount.
343 */
344 vma->vm_private_data = &buf->handler;
345 vma->vm_ops = &vb2_common_vm_ops;
346
347 vma->vm_ops->open(vma);
348
349 return 0;
350 }
351
352 /*********************************************/
353 /* DMABUF ops for exporters */
354 /*********************************************/
355
356 struct vb2_dma_sg_attachment {
357 struct sg_table sgt;
358 enum dma_data_direction dma_dir;
359 };
360
vb2_dma_sg_dmabuf_ops_attach(struct dma_buf * dbuf,struct dma_buf_attachment * dbuf_attach)361 static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf,
362 struct dma_buf_attachment *dbuf_attach)
363 {
364 struct vb2_dma_sg_attachment *attach;
365 unsigned int i;
366 struct scatterlist *rd, *wr;
367 struct sg_table *sgt;
368 struct vb2_dma_sg_buf *buf = dbuf->priv;
369 int ret;
370
371 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
372 if (!attach)
373 return -ENOMEM;
374
375 sgt = &attach->sgt;
376 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
377 * map the same scatter list to multiple attachments at the same time.
378 */
379 ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
380 if (ret) {
381 kfree(attach);
382 return -ENOMEM;
383 }
384
385 rd = buf->dma_sgt->sgl;
386 wr = sgt->sgl;
387 for (i = 0; i < sgt->orig_nents; ++i) {
388 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
389 rd = sg_next(rd);
390 wr = sg_next(wr);
391 }
392
393 attach->dma_dir = DMA_NONE;
394 dbuf_attach->priv = attach;
395
396 return 0;
397 }
398
vb2_dma_sg_dmabuf_ops_detach(struct dma_buf * dbuf,struct dma_buf_attachment * db_attach)399 static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
400 struct dma_buf_attachment *db_attach)
401 {
402 struct vb2_dma_sg_attachment *attach = db_attach->priv;
403 struct sg_table *sgt;
404
405 if (!attach)
406 return;
407
408 sgt = &attach->sgt;
409
410 /* release the scatterlist cache */
411 if (attach->dma_dir != DMA_NONE)
412 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
413 sg_free_table(sgt);
414 kfree(attach);
415 db_attach->priv = NULL;
416 }
417
vb2_dma_sg_dmabuf_ops_map(struct dma_buf_attachment * db_attach,enum dma_data_direction dma_dir)418 static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
419 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
420 {
421 struct vb2_dma_sg_attachment *attach = db_attach->priv;
422 /* stealing dmabuf mutex to serialize map/unmap operations */
423 struct mutex *lock = &db_attach->dmabuf->lock;
424 struct sg_table *sgt;
425
426 mutex_lock(lock);
427
428 sgt = &attach->sgt;
429 /* return previously mapped sg table */
430 if (attach->dma_dir == dma_dir) {
431 mutex_unlock(lock);
432 return sgt;
433 }
434
435 /* release any previous cache */
436 if (attach->dma_dir != DMA_NONE) {
437 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
438 attach->dma_dir = DMA_NONE;
439 }
440
441 /* mapping to the client with new direction */
442 if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
443 pr_err("failed to map scatterlist\n");
444 mutex_unlock(lock);
445 return ERR_PTR(-EIO);
446 }
447
448 attach->dma_dir = dma_dir;
449
450 mutex_unlock(lock);
451
452 return sgt;
453 }
454
vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment * db_attach,struct sg_table * sgt,enum dma_data_direction dma_dir)455 static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
456 struct sg_table *sgt, enum dma_data_direction dma_dir)
457 {
458 /* nothing to be done here */
459 }
460
vb2_dma_sg_dmabuf_ops_release(struct dma_buf * dbuf)461 static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
462 {
463 /* drop reference obtained in vb2_dma_sg_get_dmabuf */
464 vb2_dma_sg_put(dbuf->priv);
465 }
466
467 static int
vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf * dbuf,enum dma_data_direction direction)468 vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
469 enum dma_data_direction direction)
470 {
471 struct vb2_dma_sg_buf *buf = dbuf->priv;
472 struct sg_table *sgt = buf->dma_sgt;
473
474 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
475 return 0;
476 }
477
478 static int
vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf * dbuf,enum dma_data_direction direction)479 vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
480 enum dma_data_direction direction)
481 {
482 struct vb2_dma_sg_buf *buf = dbuf->priv;
483 struct sg_table *sgt = buf->dma_sgt;
484
485 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
486 return 0;
487 }
488
vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf * dbuf,struct dma_buf_map * map)489 static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
490 {
491 struct vb2_dma_sg_buf *buf = dbuf->priv;
492
493 dma_buf_map_set_vaddr(map, buf->vaddr);
494
495 return 0;
496 }
497
vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf * dbuf,struct vm_area_struct * vma)498 static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
499 struct vm_area_struct *vma)
500 {
501 return vb2_dma_sg_mmap(dbuf->priv, vma);
502 }
503
504 static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
505 .attach = vb2_dma_sg_dmabuf_ops_attach,
506 .detach = vb2_dma_sg_dmabuf_ops_detach,
507 .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
508 .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
509 .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
510 .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
511 .vmap = vb2_dma_sg_dmabuf_ops_vmap,
512 .mmap = vb2_dma_sg_dmabuf_ops_mmap,
513 .release = vb2_dma_sg_dmabuf_ops_release,
514 };
515
vb2_dma_sg_get_dmabuf(struct vb2_buffer * vb,void * buf_priv,unsigned long flags)516 static struct dma_buf *vb2_dma_sg_get_dmabuf(struct vb2_buffer *vb,
517 void *buf_priv,
518 unsigned long flags)
519 {
520 struct vb2_dma_sg_buf *buf = buf_priv;
521 struct dma_buf *dbuf;
522 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
523
524 exp_info.ops = &vb2_dma_sg_dmabuf_ops;
525 exp_info.size = buf->size;
526 exp_info.flags = flags;
527 exp_info.priv = buf;
528
529 if (WARN_ON(!buf->dma_sgt))
530 return NULL;
531
532 dbuf = dma_buf_export(&exp_info);
533 if (IS_ERR(dbuf))
534 return NULL;
535
536 /* dmabuf keeps reference to vb2 buffer */
537 refcount_inc(&buf->refcount);
538
539 return dbuf;
540 }
541
542 /*********************************************/
543 /* callbacks for DMABUF buffers */
544 /*********************************************/
545
vb2_dma_sg_map_dmabuf(void * mem_priv)546 static int vb2_dma_sg_map_dmabuf(void *mem_priv)
547 {
548 struct vb2_dma_sg_buf *buf = mem_priv;
549 struct sg_table *sgt;
550
551 if (WARN_ON(!buf->db_attach)) {
552 pr_err("trying to pin a non attached buffer\n");
553 return -EINVAL;
554 }
555
556 if (WARN_ON(buf->dma_sgt)) {
557 pr_err("dmabuf buffer is already pinned\n");
558 return 0;
559 }
560
561 /* get the associated scatterlist for this buffer */
562 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
563 if (IS_ERR(sgt)) {
564 pr_err("Error getting dmabuf scatterlist\n");
565 return -EINVAL;
566 }
567
568 buf->dma_sgt = sgt;
569 buf->vaddr = NULL;
570
571 return 0;
572 }
573
vb2_dma_sg_unmap_dmabuf(void * mem_priv)574 static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
575 {
576 struct vb2_dma_sg_buf *buf = mem_priv;
577 struct sg_table *sgt = buf->dma_sgt;
578 struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
579
580 if (WARN_ON(!buf->db_attach)) {
581 pr_err("trying to unpin a not attached buffer\n");
582 return;
583 }
584
585 if (WARN_ON(!sgt)) {
586 pr_err("dmabuf buffer is already unpinned\n");
587 return;
588 }
589
590 if (buf->vaddr) {
591 dma_buf_vunmap(buf->db_attach->dmabuf, &map);
592 buf->vaddr = NULL;
593 }
594 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
595
596 buf->dma_sgt = NULL;
597 }
598
vb2_dma_sg_detach_dmabuf(void * mem_priv)599 static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
600 {
601 struct vb2_dma_sg_buf *buf = mem_priv;
602
603 /* if vb2 works correctly you should never detach mapped buffer */
604 if (WARN_ON(buf->dma_sgt))
605 vb2_dma_sg_unmap_dmabuf(buf);
606
607 /* detach this attachment */
608 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
609 kfree(buf);
610 }
611
vb2_dma_sg_attach_dmabuf(struct vb2_buffer * vb,struct device * dev,struct dma_buf * dbuf,unsigned long size)612 static void *vb2_dma_sg_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
613 struct dma_buf *dbuf, unsigned long size)
614 {
615 struct vb2_dma_sg_buf *buf;
616 struct dma_buf_attachment *dba;
617
618 if (WARN_ON(!dev))
619 return ERR_PTR(-EINVAL);
620
621 if (dbuf->size < size)
622 return ERR_PTR(-EFAULT);
623
624 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
625 if (!buf)
626 return ERR_PTR(-ENOMEM);
627
628 buf->dev = dev;
629 /* create attachment for the dmabuf with the user device */
630 dba = dma_buf_attach(dbuf, buf->dev);
631 if (IS_ERR(dba)) {
632 pr_err("failed to attach dmabuf\n");
633 kfree(buf);
634 return dba;
635 }
636
637 buf->dma_dir = vb->vb2_queue->dma_dir;
638 buf->size = size;
639 buf->db_attach = dba;
640 buf->vb = vb;
641
642 return buf;
643 }
644
vb2_dma_sg_cookie(struct vb2_buffer * vb,void * buf_priv)645 static void *vb2_dma_sg_cookie(struct vb2_buffer *vb, void *buf_priv)
646 {
647 struct vb2_dma_sg_buf *buf = buf_priv;
648
649 return buf->dma_sgt;
650 }
651
652 const struct vb2_mem_ops vb2_dma_sg_memops = {
653 .alloc = vb2_dma_sg_alloc,
654 .put = vb2_dma_sg_put,
655 .get_userptr = vb2_dma_sg_get_userptr,
656 .put_userptr = vb2_dma_sg_put_userptr,
657 .prepare = vb2_dma_sg_prepare,
658 .finish = vb2_dma_sg_finish,
659 .vaddr = vb2_dma_sg_vaddr,
660 .mmap = vb2_dma_sg_mmap,
661 .num_users = vb2_dma_sg_num_users,
662 .get_dmabuf = vb2_dma_sg_get_dmabuf,
663 .map_dmabuf = vb2_dma_sg_map_dmabuf,
664 .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf,
665 .attach_dmabuf = vb2_dma_sg_attach_dmabuf,
666 .detach_dmabuf = vb2_dma_sg_detach_dmabuf,
667 .cookie = vb2_dma_sg_cookie,
668 };
669 EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
670
671 MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
672 MODULE_AUTHOR("Andrzej Pietrasiewicz");
673 MODULE_LICENSE("GPL");
674