1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2020 Rockchip Electronics Co., Ltd */
3
4 #include <linux/cma.h>
5 #include <linux/module.h>
6 #include <linux/mm.h>
7 #include <linux/refcount.h>
8 #include <linux/scatterlist.h>
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12
13 #include <media/videobuf2-v4l2.h>
14 #include <media/videobuf2-memops.h>
15
16 struct vb2_dma_sg_buf {
17 struct device *dev;
18 void *vaddr;
19 struct page **pages;
20 struct frame_vector *vec;
21 int offset;
22 unsigned long dma_attrs;
23 enum dma_data_direction dma_dir;
24 struct sg_table sg_table;
25 /*
26 * This will point to sg_table when used with the MMAP or USERPTR
27 * memory model, and to the dma_buf sglist when used with the
28 * DMABUF memory model.
29 */
30 struct sg_table *dma_sgt;
31 size_t size;
32 unsigned int num_pages;
33 refcount_t refcount;
34 struct vb2_vmarea_handler handler;
35
36 struct dma_buf_attachment *db_attach;
37 };
38
39 static void vb2_dma_sg_put(void *buf_priv);
40
vb2_dma_sg_alloc_contiguous(struct device * dev,struct vb2_dma_sg_buf * buf,gfp_t gfp_flags)41 static int vb2_dma_sg_alloc_contiguous(struct device *dev,
42 struct vb2_dma_sg_buf *buf, gfp_t gfp_flags)
43 {
44 struct page *page = NULL;
45 unsigned int i, align = get_order(buf->size);
46
47 /* cma reserved area */
48 if (align > CONFIG_CMA_ALIGNMENT)
49 align = CONFIG_CMA_ALIGNMENT;
50 if (dev && dev->cma_area)
51 page = cma_alloc(dev->cma_area, buf->num_pages,
52 align, gfp_flags & __GFP_NOWARN);
53 if (!page)
54 return -ENOMEM;
55
56 for (i = 0; i < buf->num_pages; i++)
57 buf->pages[i] = page + i;
58
59 return 0;
60 }
61
vb2_dma_sg_alloc(struct device * dev,unsigned long dma_attrs,unsigned long size,enum dma_data_direction dma_dir,gfp_t gfp_flags)62 static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
63 unsigned long size, enum dma_data_direction dma_dir,
64 gfp_t gfp_flags)
65 {
66 struct vb2_dma_sg_buf *buf;
67 struct sg_table *sgt;
68 int ret;
69 int num_pages;
70
71 if (WARN_ON(!dev))
72 return ERR_PTR(-EINVAL);
73
74 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
75 if (!buf)
76 return ERR_PTR(-ENOMEM);
77
78 buf->vaddr = NULL;
79 buf->dma_attrs = dma_attrs;
80 buf->dma_dir = dma_dir;
81 buf->offset = 0;
82 buf->size = size;
83 /* size is already page aligned */
84 buf->num_pages = size >> PAGE_SHIFT;
85 buf->dma_sgt = &buf->sg_table;
86
87 buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
88 GFP_KERNEL | __GFP_ZERO);
89 if (!buf->pages)
90 goto fail_pages_array_alloc;
91
92 ret = vb2_dma_sg_alloc_contiguous(dev, buf, gfp_flags);
93 if (ret)
94 goto fail_pages_alloc;
95
96 ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
97 buf->num_pages, 0, size, GFP_KERNEL);
98 if (ret)
99 goto fail_table_alloc;
100
101 /* Prevent the device from being released while the buffer is used */
102 buf->dev = get_device(dev);
103
104 sgt = &buf->sg_table;
105 /*
106 * No need to sync to the device, this will happen later when the
107 * prepare() memop is called.
108 */
109 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
110 DMA_ATTR_SKIP_CPU_SYNC))
111 goto fail_map;
112
113 buf->handler.refcount = &buf->refcount;
114 buf->handler.put = vb2_dma_sg_put;
115 buf->handler.arg = buf;
116
117 refcount_set(&buf->refcount, 1);
118
119 return buf;
120
121 fail_map:
122 put_device(buf->dev);
123 sg_free_table(buf->dma_sgt);
124 fail_table_alloc:
125 num_pages = buf->num_pages;
126 cma_release(dev->cma_area, buf->pages[0], num_pages);
127 fail_pages_alloc:
128 kvfree(buf->pages);
129 fail_pages_array_alloc:
130 kfree(buf);
131 return ERR_PTR(-ENOMEM);
132 }
133
vb2_dma_sg_put(void * buf_priv)134 static void vb2_dma_sg_put(void *buf_priv)
135 {
136 struct vb2_dma_sg_buf *buf = buf_priv;
137 struct sg_table *sgt = &buf->sg_table;
138 int i = buf->num_pages;
139
140 if (refcount_dec_and_test(&buf->refcount)) {
141 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
142 DMA_ATTR_SKIP_CPU_SYNC);
143 if (buf->vaddr)
144 vm_unmap_ram(buf->vaddr, buf->num_pages);
145 sg_free_table(buf->dma_sgt);
146 cma_release(buf->dev->cma_area, buf->pages[0], i);
147 kvfree(buf->pages);
148 put_device(buf->dev);
149 kfree(buf);
150 }
151 }
152
vb2_dma_sg_prepare(void * buf_priv)153 static void vb2_dma_sg_prepare(void *buf_priv)
154 {
155 struct vb2_dma_sg_buf *buf = buf_priv;
156 struct sg_table *sgt = buf->dma_sgt;
157
158 dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
159 }
160
vb2_dma_sg_finish(void * buf_priv)161 static void vb2_dma_sg_finish(void *buf_priv)
162 {
163 struct vb2_dma_sg_buf *buf = buf_priv;
164 struct sg_table *sgt = buf->dma_sgt;
165
166 dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
167 }
168
vb2_dma_sg_get_userptr(struct device * dev,unsigned long vaddr,unsigned long size,enum dma_data_direction dma_dir)169 static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
170 unsigned long size,
171 enum dma_data_direction dma_dir)
172 {
173 struct vb2_dma_sg_buf *buf;
174 struct sg_table *sgt;
175 struct frame_vector *vec;
176
177 if (WARN_ON(!dev))
178 return ERR_PTR(-EINVAL);
179
180 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
181 if (!buf)
182 return ERR_PTR(-ENOMEM);
183
184 buf->vaddr = NULL;
185 buf->dev = dev;
186 buf->dma_dir = dma_dir;
187 buf->offset = vaddr & ~PAGE_MASK;
188 buf->size = size;
189 buf->dma_sgt = &buf->sg_table;
190 vec = vb2_create_framevec(vaddr, size);
191 if (IS_ERR(vec))
192 goto userptr_fail_pfnvec;
193 buf->vec = vec;
194
195 buf->pages = frame_vector_pages(vec);
196 if (IS_ERR(buf->pages))
197 goto userptr_fail_sgtable;
198 buf->num_pages = frame_vector_count(vec);
199
200 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
201 buf->num_pages, buf->offset, size, 0))
202 goto userptr_fail_sgtable;
203
204 sgt = &buf->sg_table;
205 /*
206 * No need to sync to the device, this will happen later when the
207 * prepare() memop is called.
208 */
209 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
210 DMA_ATTR_SKIP_CPU_SYNC))
211 goto userptr_fail_map;
212
213 return buf;
214
215 userptr_fail_map:
216 sg_free_table(&buf->sg_table);
217 userptr_fail_sgtable:
218 vb2_destroy_framevec(vec);
219 userptr_fail_pfnvec:
220 kfree(buf);
221 return ERR_PTR(-ENOMEM);
222 }
223
224 /*
225 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
226 * be used
227 */
vb2_dma_sg_put_userptr(void * buf_priv)228 static void vb2_dma_sg_put_userptr(void *buf_priv)
229 {
230 struct vb2_dma_sg_buf *buf = buf_priv;
231 struct sg_table *sgt = &buf->sg_table;
232 int i = buf->num_pages;
233
234 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
235 if (buf->vaddr)
236 vm_unmap_ram(buf->vaddr, buf->num_pages);
237 sg_free_table(buf->dma_sgt);
238 if (buf->dma_dir == DMA_FROM_DEVICE ||
239 buf->dma_dir == DMA_BIDIRECTIONAL)
240 while (--i >= 0)
241 set_page_dirty_lock(buf->pages[i]);
242 vb2_destroy_framevec(buf->vec);
243 kfree(buf);
244 }
245
vb2_dma_sg_vaddr(void * buf_priv)246 static void *vb2_dma_sg_vaddr(void *buf_priv)
247 {
248 struct vb2_dma_sg_buf *buf = buf_priv;
249
250 WARN_ON(!buf);
251
252 if (!buf->vaddr) {
253 if (buf->db_attach)
254 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
255 else
256 buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
257 }
258
259 /* add offset in case userptr is not page-aligned */
260 return buf->vaddr ? buf->vaddr + buf->offset : NULL;
261 }
262
vb2_dma_sg_num_users(void * buf_priv)263 static unsigned int vb2_dma_sg_num_users(void *buf_priv)
264 {
265 struct vb2_dma_sg_buf *buf = buf_priv;
266
267 return refcount_read(&buf->refcount);
268 }
269
vb2_dma_sg_mmap(void * buf_priv,struct vm_area_struct * vma)270 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
271 {
272 struct vb2_dma_sg_buf *buf = buf_priv;
273 int err;
274
275 if (!buf) {
276 pr_err("No memory to map\n");
277 return -EINVAL;
278 }
279
280 err = vm_map_pages(vma, buf->pages, buf->num_pages);
281 if (err) {
282 pr_err("Remapping memory, error: %d\n", err);
283 return err;
284 }
285
286 /*
287 * Use common vm_area operations to track buffer refcount.
288 */
289 vma->vm_private_data = &buf->handler;
290 vma->vm_ops = &vb2_common_vm_ops;
291
292 vma->vm_ops->open(vma);
293
294 return 0;
295 }
296
297 /*********************************************/
298 /* DMABUF ops for exporters */
299 /*********************************************/
300
301 struct vb2_dma_sg_attachment {
302 struct sg_table sgt;
303 enum dma_data_direction dma_dir;
304 };
305
vb2_dma_sg_dmabuf_ops_attach(struct dma_buf * dbuf,struct dma_buf_attachment * dbuf_attach)306 static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf,
307 struct dma_buf_attachment *dbuf_attach)
308 {
309 struct vb2_dma_sg_attachment *attach;
310 unsigned int i;
311 struct scatterlist *rd, *wr;
312 struct sg_table *sgt;
313 struct vb2_dma_sg_buf *buf = dbuf->priv;
314 int ret;
315
316 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
317 if (!attach)
318 return -ENOMEM;
319
320 sgt = &attach->sgt;
321 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
322 * map the same scatter list to multiple attachments at the same time.
323 */
324 ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
325 if (ret) {
326 kfree(attach);
327 return -ENOMEM;
328 }
329
330 rd = buf->dma_sgt->sgl;
331 wr = sgt->sgl;
332 for (i = 0; i < sgt->orig_nents; ++i) {
333 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
334 rd = sg_next(rd);
335 wr = sg_next(wr);
336 }
337
338 attach->dma_dir = DMA_NONE;
339 dbuf_attach->priv = attach;
340
341 return 0;
342 }
343
vb2_dma_sg_dmabuf_ops_detach(struct dma_buf * dbuf,struct dma_buf_attachment * db_attach)344 static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
345 struct dma_buf_attachment *db_attach)
346 {
347 struct vb2_dma_sg_attachment *attach = db_attach->priv;
348 struct sg_table *sgt;
349
350 if (!attach)
351 return;
352
353 sgt = &attach->sgt;
354
355 /* release the scatterlist cache */
356 if (attach->dma_dir != DMA_NONE)
357 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
358 sg_free_table(sgt);
359 kfree(attach);
360 db_attach->priv = NULL;
361 }
362
vb2_dma_sg_dmabuf_ops_map(struct dma_buf_attachment * db_attach,enum dma_data_direction dma_dir)363 static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
364 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
365 {
366 struct vb2_dma_sg_attachment *attach = db_attach->priv;
367 /* stealing dmabuf mutex to serialize map/unmap operations */
368 struct mutex *lock = &db_attach->dmabuf->lock;
369 struct sg_table *sgt;
370
371 mutex_lock(lock);
372
373 sgt = &attach->sgt;
374 /* return previously mapped sg table */
375 if (attach->dma_dir == dma_dir) {
376 mutex_unlock(lock);
377 return sgt;
378 }
379
380 /* release any previous cache */
381 if (attach->dma_dir != DMA_NONE) {
382 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
383 attach->dma_dir = DMA_NONE;
384 }
385
386 /* mapping to the client with new direction */
387 if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
388 pr_err("failed to map scatterlist\n");
389 mutex_unlock(lock);
390 return ERR_PTR(-EIO);
391 }
392
393 attach->dma_dir = dma_dir;
394
395 mutex_unlock(lock);
396
397 return sgt;
398 }
399
vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment * db_attach,struct sg_table * sgt,enum dma_data_direction dma_dir)400 static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
401 struct sg_table *sgt, enum dma_data_direction dma_dir)
402 {
403 /* nothing to be done here */
404 }
405
vb2_dma_sg_dmabuf_ops_release(struct dma_buf * dbuf)406 static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
407 {
408 /* drop reference obtained in vb2_dma_sg_get_dmabuf */
409 vb2_dma_sg_put(dbuf->priv);
410 }
411
412 static int
vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf * dbuf,enum dma_data_direction direction)413 vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
414 enum dma_data_direction direction)
415 {
416 struct vb2_dma_sg_buf *buf = dbuf->priv;
417 struct sg_table *sgt = buf->dma_sgt;
418
419 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
420 return 0;
421 }
422
423 static int
vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf * dbuf,enum dma_data_direction direction)424 vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
425 enum dma_data_direction direction)
426 {
427 struct vb2_dma_sg_buf *buf = dbuf->priv;
428 struct sg_table *sgt = buf->dma_sgt;
429
430 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
431 return 0;
432 }
433
vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf * dbuf)434 static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
435 {
436 struct vb2_dma_sg_buf *buf = dbuf->priv;
437
438 return vb2_dma_sg_vaddr(buf);
439 }
440
vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf * dbuf,struct vm_area_struct * vma)441 static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
442 struct vm_area_struct *vma)
443 {
444 return vb2_dma_sg_mmap(dbuf->priv, vma);
445 }
446
447 static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
448 .attach = vb2_dma_sg_dmabuf_ops_attach,
449 .detach = vb2_dma_sg_dmabuf_ops_detach,
450 .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
451 .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
452 .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
453 .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
454 .vmap = vb2_dma_sg_dmabuf_ops_vmap,
455 .mmap = vb2_dma_sg_dmabuf_ops_mmap,
456 .release = vb2_dma_sg_dmabuf_ops_release,
457 };
458
vb2_dma_sg_get_dmabuf(void * buf_priv,unsigned long flags)459 static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags)
460 {
461 struct vb2_dma_sg_buf *buf = buf_priv;
462 struct dma_buf *dbuf;
463 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
464
465 exp_info.ops = &vb2_dma_sg_dmabuf_ops;
466 exp_info.size = buf->size;
467 exp_info.flags = flags;
468 exp_info.priv = buf;
469
470 if (WARN_ON(!buf->dma_sgt))
471 return NULL;
472
473 dbuf = dma_buf_export(&exp_info);
474 if (IS_ERR(dbuf))
475 return NULL;
476
477 /* dmabuf keeps reference to vb2 buffer */
478 refcount_inc(&buf->refcount);
479
480 return dbuf;
481 }
482
483 /*********************************************/
484 /* callbacks for DMABUF buffers */
485 /*********************************************/
486
vb2_dma_sg_map_dmabuf(void * mem_priv)487 static int vb2_dma_sg_map_dmabuf(void *mem_priv)
488 {
489 struct vb2_dma_sg_buf *buf = mem_priv;
490 struct sg_table *sgt;
491
492 if (WARN_ON(!buf->db_attach)) {
493 pr_err("trying to pin a non attached buffer\n");
494 return -EINVAL;
495 }
496
497 if (WARN_ON(buf->dma_sgt)) {
498 pr_err("dmabuf buffer is already pinned\n");
499 return 0;
500 }
501
502 /* get the associated scatterlist for this buffer */
503 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
504 if (IS_ERR(sgt)) {
505 pr_err("Error getting dmabuf scatterlist\n");
506 return -EINVAL;
507 }
508
509 buf->dma_sgt = sgt;
510 buf->vaddr = NULL;
511
512 return 0;
513 }
514
vb2_dma_sg_unmap_dmabuf(void * mem_priv)515 static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
516 {
517 struct vb2_dma_sg_buf *buf = mem_priv;
518 struct sg_table *sgt = buf->dma_sgt;
519
520 if (WARN_ON(!buf->db_attach)) {
521 pr_err("trying to unpin a not attached buffer\n");
522 return;
523 }
524
525 if (WARN_ON(!sgt)) {
526 pr_err("dmabuf buffer is already unpinned\n");
527 return;
528 }
529
530 if (buf->vaddr) {
531 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
532 buf->vaddr = NULL;
533 }
534 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
535
536 buf->dma_sgt = NULL;
537 }
538
vb2_dma_sg_detach_dmabuf(void * mem_priv)539 static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
540 {
541 struct vb2_dma_sg_buf *buf = mem_priv;
542
543 /* if vb2 works correctly you should never detach mapped buffer */
544 if (WARN_ON(buf->dma_sgt))
545 vb2_dma_sg_unmap_dmabuf(buf);
546
547 /* detach this attachment */
548 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
549 kfree(buf);
550 }
551
vb2_dma_sg_attach_dmabuf(struct device * dev,struct dma_buf * dbuf,unsigned long size,enum dma_data_direction dma_dir)552 static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
553 unsigned long size, enum dma_data_direction dma_dir)
554 {
555 struct vb2_dma_sg_buf *buf;
556 struct dma_buf_attachment *dba;
557
558 if (WARN_ON(!dev))
559 return ERR_PTR(-EINVAL);
560
561 if (dbuf->size < size)
562 return ERR_PTR(-EFAULT);
563
564 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
565 if (!buf)
566 return ERR_PTR(-ENOMEM);
567
568 buf->dev = dev;
569 /* create attachment for the dmabuf with the user device */
570 dba = dma_buf_attach(dbuf, buf->dev);
571 if (IS_ERR(dba)) {
572 pr_err("failed to attach dmabuf\n");
573 kfree(buf);
574 return dba;
575 }
576
577 buf->dma_dir = dma_dir;
578 buf->size = size;
579 buf->db_attach = dba;
580
581 return buf;
582 }
583
vb2_dma_sg_cookie(void * buf_priv)584 static void *vb2_dma_sg_cookie(void *buf_priv)
585 {
586 struct vb2_dma_sg_buf *buf = buf_priv;
587
588 return buf->dma_sgt;
589 }
590
591 const struct vb2_mem_ops vb2_rdma_sg_memops = {
592 .alloc = vb2_dma_sg_alloc,
593 .put = vb2_dma_sg_put,
594 .get_userptr = vb2_dma_sg_get_userptr,
595 .put_userptr = vb2_dma_sg_put_userptr,
596 .prepare = vb2_dma_sg_prepare,
597 .finish = vb2_dma_sg_finish,
598 .vaddr = vb2_dma_sg_vaddr,
599 .mmap = vb2_dma_sg_mmap,
600 .num_users = vb2_dma_sg_num_users,
601 .get_dmabuf = vb2_dma_sg_get_dmabuf,
602 .map_dmabuf = vb2_dma_sg_map_dmabuf,
603 .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf,
604 .attach_dmabuf = vb2_dma_sg_attach_dmabuf,
605 .detach_dmabuf = vb2_dma_sg_detach_dmabuf,
606 .cookie = vb2_dma_sg_cookie,
607 };
608 EXPORT_SYMBOL_GPL(vb2_rdma_sg_memops);
609