• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * vendor/amlogic/media/common/ge2d/ge2d_dmabuf.c
3  *
4  * Copyright (C) 2017 Amlogic, Inc. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  */
17 
18 #include <linux/version.h>
19 #include <linux/device.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/mm.h>
23 #include <linux/uaccess.h>
24 #include <linux/slab.h>
25 #include <linux/err.h>
26 #include <linux/platform_device.h>
27 #include <linux/mm_types.h>
28 #include <linux/mutex.h>
29 #include <linux/dma-buf.h>
30 #include <linux/scatterlist.h>
31 #include <linux/pagemap.h>
32 #include <linux/dma-mapping.h>
33 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
34 #include <linux/dma-contiguous.h>
35 #else
36 #include <linux/iommu.h>
37 #include <linux/dma-map-ops.h>
38 #endif
39 
40 #include "ge2d_log.h"
41 #include "ge2d_dmabuf.h"
42 
43 static void clear_dma_buffer(struct aml_dma_buffer *buffer, int index);
44 
aml_mm_vmap(phys_addr_t phys,unsigned long size)45 static void *aml_mm_vmap(phys_addr_t phys, unsigned long size)
46 {
47     u32 offset, npages;
48     struct page **pages = NULL;
49     pgprot_t pgprot = PAGE_KERNEL;
50     void *vaddr;
51     int i;
52 
53     offset = offset_in_page(phys);
54     npages = DIV_ROUND_UP(size + offset, PAGE_SIZE);
55 
56     pages = vmalloc(sizeof(struct page *) * npages);
57     if (!pages) {
58         return NULL;
59     }
60     for (i = 0; i < npages; i++) {
61         pages[i] = phys_to_page(phys);
62         phys += PAGE_SIZE;
63     }
64 
65     vaddr = vmap(pages, npages, VM_MAP, pgprot);
66     if (!vaddr) {
67         pr_err("vmaped fail, size: %d\n", npages << PAGE_SHIFT);
68         vfree(pages);
69         return NULL;
70     }
71     vfree(pages);
72     ge2d_log_dbg("[HIGH-MEM-MAP] pa(%lx) to va(%p), size: %d\n", (unsigned long)phys, vaddr, npages << PAGE_SHIFT);
73     return vaddr;
74 }
75 
aml_map_phyaddr_to_virt(dma_addr_t phys,unsigned long size)76 static void *aml_map_phyaddr_to_virt(dma_addr_t phys, unsigned long size)
77 {
78     void *vaddr = NULL;
79 
80     if (!PageHighMem(phys_to_page(phys))) {
81         return phys_to_virt(phys);
82     }
83     vaddr = aml_mm_vmap(phys, size);
84     return vaddr;
85 }
86 
87 /* dma free */
aml_dma_put(void * buf_priv)88 static void aml_dma_put(void *buf_priv)
89 {
90     struct aml_dma_buf *buf = buf_priv;
91     struct page *cma_pages = NULL;
92     void *vaddr = (void *)(PAGE_MASK & (ulong)buf->vaddr);
93 
94     if (!atomic_dec_and_test(&buf->refcount)) {
95         ge2d_log_dbg("ge2d aml_dma_put, refcont=%d\n", atomic_read(&buf->refcount));
96         return;
97     }
98     cma_pages = phys_to_page(buf->dma_addr);
99 #if LINUX_VERSION_CODE < KERNEL_VERSION(5L, 10L, 0)
100     if (is_vmalloc_or_module_addr(vaddr)) {
101 #else
102     if (is_vmalloc_addr(vaddr)) {
103 #endif
104         vunmap(vaddr);
105     }
106 
107     if (!dma_release_from_contiguous(buf->dev, cma_pages, buf->size >> PAGE_SHIFT)) {
108         pr_err("failed to release cma buffer\n");
109     }
110     buf->vaddr = NULL;
111     clear_dma_buffer((struct aml_dma_buffer *)buf->priv, buf->index);
112     put_device(buf->dev);
113     ge2d_log_dbg("ge2d free:aml_dma_buf=0x%p,buf->index=%d\n", buf, buf->index);
114     kfree(buf);
115 }
116 
117 static void *aml_dma_alloc(struct device *dev, unsigned long attrs, unsigned long size, enum dma_data_direction dma_dir,
118                            gfp_t gfp_flags)
119 {
120     struct aml_dma_buf *buf;
121     struct page *cma_pages = NULL;
122     dma_addr_t paddr = 0;
123 
124     if (WARN_ON(!dev)) {
125         return (void *)(-EINVAL);
126     }
127 
128     buf = kzalloc(sizeof(struct aml_dma_buf), GFP_KERNEL | gfp_flags);
129     if (!buf) {
130         return NULL;
131     }
132 
133     if (attrs) {
134         buf->attrs = attrs;
135     }
136 #if LINUX_VERSION_CODE < KERNEL_VERSION(5L, 10L, 0)
137     cma_pages = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, 0);
138 #else
139     cma_pages = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, 0, false);
140 #endif
141     if (cma_pages) {
142         paddr = page_to_phys(cma_pages);
143     } else {
144         kfree(buf);
145         pr_err("failed to alloc cma pages.\n");
146         return NULL;
147     }
148     buf->vaddr = aml_map_phyaddr_to_virt(paddr, size);
149     buf->dev = get_device(dev);
150     buf->size = size;
151     buf->dma_dir = dma_dir;
152     buf->dma_addr = paddr;
153     atomic_inc(&buf->refcount);
154     ge2d_log_dbg("aml_dma_buf=0x%p, refcont=%d\n", buf, atomic_read(&buf->refcount));
155 
156     return buf;
157 }
158 
159 static int aml_dma_mmap(void *buf_priv, struct vm_area_struct *vma)
160 {
161     struct aml_dma_buf *buf = buf_priv;
162     unsigned long pfn = 0;
163     unsigned long vsize;
164     int ret = -1;
165 
166     if (!buf || !vma) {
167         pr_err("No memory to map\n");
168         return -EINVAL;
169     }
170 
171     vsize = vma->vm_end - vma->vm_start;
172 
173     pfn = buf->dma_addr >> PAGE_SHIFT;
174     ret = remap_pfn_range(vma, vma->vm_start, pfn, vsize, vma->vm_page_prot);
175     if (ret) {
176         pr_err("Remapping memory, error: %d\n", ret);
177         return ret;
178     }
179     vma->vm_flags |= VM_DONTEXPAND;
180     ge2d_log_dbg("mapped dma addr 0x%08lx at 0x%08lx, size %d\n", (unsigned long)buf->dma_addr, vma->vm_start,
181                  buf->size);
182     return 0;
183 }
184 
185 /*********************************************/
186 /*         DMABUF ops for exporters          */
187 /*********************************************/
188 struct aml_attachment {
189     struct sg_table sgt;
190     enum dma_data_direction dma_dir;
191 };
192 
193 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
194 static int aml_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, struct dma_buf_attachment *dbuf_attach)
195 #else
196 static int aml_dmabuf_ops_attach(struct dma_buf *dbuf, struct dma_buf_attachment *dbuf_attach)
197 #endif
198 {
199     struct aml_attachment *attach;
200     struct aml_dma_buf *buf = dbuf->priv;
201     int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
202     struct sg_table *sgt;
203     struct scatterlist *sg;
204     phys_addr_t phys = buf->dma_addr;
205     unsigned int i;
206     int ret;
207 
208     attach = kzalloc(sizeof(*attach), GFP_KERNEL);
209     if (!attach) {
210         return -ENOMEM;
211     }
212 
213     sgt = &attach->sgt;
214     /* Copy the buf->base_sgt scatter list to the attachment, as we can't
215      * map the same scatter list to multiple attachments at the same time.
216      */
217     ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
218     if (ret) {
219         kfree(attach);
220         return -ENOMEM;
221     }
222     for_each_sg(sgt->sgl, sg, sgt->nents, i)
223     {
224         struct page *page = phys_to_page(phys);
225 
226         sg_set_page(sg, page, PAGE_SIZE, 0);
227         phys += PAGE_SIZE;
228     }
229 
230     attach->dma_dir = DMA_NONE;
231     dbuf_attach->priv = attach;
232 
233     return 0;
234 }
235 
236 static void aml_dmabuf_ops_detach(struct dma_buf *dbuf, struct dma_buf_attachment *db_attach)
237 {
238     struct aml_attachment *attach = db_attach->priv;
239     struct sg_table *sgt;
240 
241     if (!attach) {
242         return;
243     }
244 
245     sgt = &attach->sgt;
246 
247     /* release the scatterlist cache */
248     if (attach->dma_dir != DMA_NONE) {
249         dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, attach->dma_dir);
250     }
251     sg_free_table(sgt);
252     kfree(attach);
253     db_attach->priv = NULL;
254 }
255 
256 static struct sg_table *aml_dmabuf_ops_map(struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
257 {
258     struct aml_attachment *attach = db_attach->priv;
259     /* stealing dmabuf mutex to serialize map/unmap operations */
260     struct mutex *lock = &db_attach->dmabuf->lock;
261     struct sg_table *sgt;
262 
263     mutex_lock(lock);
264 
265     sgt = &attach->sgt;
266     /* return previously mapped sg table */
267     if (attach->dma_dir == dma_dir) {
268         mutex_unlock(lock);
269         return sgt;
270     }
271 
272     /* release any previous cache */
273     if (attach->dma_dir != DMA_NONE) {
274         dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, attach->dma_dir);
275         attach->dma_dir = DMA_NONE;
276     }
277     /* mapping to the client with new direction */
278     sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
279     if (!sgt->nents) {
280         pr_err("failed to map scatterlist\n");
281         mutex_unlock(lock);
282         return (void *)(-EIO);
283     }
284 
285     attach->dma_dir = dma_dir;
286 
287     mutex_unlock(lock);
288     return sgt;
289 }
290 
291 static void aml_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, struct sg_table *sgt,
292                                  enum dma_data_direction dma_dir)
293 {
294     /* nothing to be done here */
295 }
296 
297 static void aml_dmabuf_ops_release(struct dma_buf *dbuf)
298 {
299     /* drop reference obtained in vb2_dc_get_dmabuf */
300     aml_dma_put(dbuf->priv);
301 }
302 
303 static void *aml_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
304 {
305     struct aml_dma_buf *buf = dbuf->priv;
306 
307     return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
308 }
309 
310 static void *aml_dmabuf_ops_vmap(struct dma_buf *dbuf)
311 {
312     struct aml_dma_buf *buf = dbuf->priv;
313 
314     return buf->vaddr;
315 }
316 
317 static int aml_dmabuf_ops_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma)
318 {
319     return aml_dma_mmap(dbuf->priv, vma);
320 }
321 
322 static struct dma_buf_ops ge2d_dmabuf_ops = {
323     .attach = aml_dmabuf_ops_attach,
324     .detach = aml_dmabuf_ops_detach,
325     .map_dma_buf = aml_dmabuf_ops_map,
326     .unmap_dma_buf = aml_dmabuf_ops_unmap,
327 #if LINUX_VERSION_CODE <= KERNEL_VERSION(5L, 4L, 125L)
328     .kmap = aml_dmabuf_ops_kmap,
329     .kmap_atomic = aml_dmabuf_ops_kmap,
330 #endif
331     .vmap = aml_dmabuf_ops_vmap,
332     .mmap = aml_dmabuf_ops_mmap,
333     .release = aml_dmabuf_ops_release,
334 };
335 
336 static struct dma_buf *get_dmabuf(void *buf_priv, unsigned long flags)
337 {
338     struct aml_dma_buf *buf = buf_priv;
339     struct dma_buf *dbuf;
340     DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
341 
342     exp_info.ops = &ge2d_dmabuf_ops;
343     exp_info.size = buf->size;
344     exp_info.flags = flags;
345     exp_info.priv = buf;
346     if (WARN_ON(!buf->vaddr)) {
347         return NULL;
348     }
349 
350     dbuf = dma_buf_export(&exp_info);
351     if (IS_ERR(dbuf)) {
352         return NULL;
353     }
354 
355     /* dmabuf keeps reference to vb2 buffer */
356     atomic_inc(&buf->refcount);
357     ge2d_log_dbg("get_dmabuf, refcount=%d\n", atomic_read(&buf->refcount));
358     return dbuf;
359 }
360 
361 /* ge2d dma-buf api.h */
362 static int find_empty_dma_buffer(struct aml_dma_buffer *buffer)
363 {
364     int i;
365     int found = 0;
366 
367     for (i = 0; i < AML_MAX_DMABUF; i++) {
368         if (buffer->gd_buffer[i].alloc) {
369             continue;
370         } else {
371             ge2d_log_dbg("find_empty_dma_buffer i=%d\n", i);
372             found = 1;
373             break;
374         }
375     }
376     if (found) {
377         return i;
378     } else {
379         return -1;
380     }
381 }
382 
383 static void clear_dma_buffer(struct aml_dma_buffer *buffer, int index)
384 {
385     mutex_lock(&(buffer->lock));
386     buffer->gd_buffer[index].mem_priv = NULL;
387     buffer->gd_buffer[index].index = 0;
388     buffer->gd_buffer[index].alloc = 0;
389     mutex_unlock(&(buffer->lock));
390 }
391 
392 void *ge2d_dma_buffer_create(void)
393 {
394     int i;
395     struct aml_dma_buffer *buffer;
396 
397     buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
398     if (!buffer) {
399         return NULL;
400     }
401 
402     mutex_init(&buffer->lock);
403     for (i = 0; i < AML_MAX_DMABUF; i++) {
404         buffer->gd_buffer[i].mem_priv = NULL;
405         buffer->gd_buffer[i].index = 0;
406         buffer->gd_buffer[i].alloc = 0;
407     }
408     return buffer;
409 }
410 
411 void ge2d_dma_buffer_destroy(struct aml_dma_buffer *buffer)
412 {
413     kfree(buffer);
414 }
415 
416 int ge2d_dma_buffer_alloc(struct aml_dma_buffer *buffer, struct device *dev, struct ge2d_dmabuf_req_s *ge2d_req_buf)
417 {
418     void *buf;
419     struct aml_dma_buf *dma_buf;
420     unsigned int size;
421     int index;
422 
423     if (WARN_ON(!dev)) {
424         return (-EINVAL);
425     }
426     if (!ge2d_req_buf) {
427         return (-EINVAL);
428     }
429     if (!buffer) {
430         return (-EINVAL);
431     }
432 
433     size = PAGE_ALIGN(ge2d_req_buf->len);
434     if (size == 0) {
435         return (-EINVAL);
436     }
437     buf = aml_dma_alloc(dev, 0, size, ge2d_req_buf->dma_dir, GFP_HIGHUSER | __GFP_ZERO);
438     if (!buf) {
439         return (-ENOMEM);
440     }
441     mutex_lock(&(buffer->lock));
442     index = find_empty_dma_buffer(buffer);
443     if ((index < 0) || (index >= AML_MAX_DMABUF)) {
444         pr_err("no empty buffer found\n");
445         mutex_unlock(&(buffer->lock));
446         aml_dma_put(buf);
447         return (-ENOMEM);
448     }
449     ((struct aml_dma_buf *)buf)->priv = buffer;
450     ((struct aml_dma_buf *)buf)->index = index;
451     buffer->gd_buffer[index].mem_priv = buf;
452     buffer->gd_buffer[index].index = index;
453     buffer->gd_buffer[index].alloc = 1;
454     mutex_unlock(&(buffer->lock));
455     ge2d_req_buf->index = index;
456     dma_buf = (struct aml_dma_buf *)buf;
457     if (dma_buf->dma_dir == DMA_FROM_DEVICE) {
458         dma_sync_single_for_cpu(dma_buf->dev, dma_buf->dma_addr, dma_buf->size, DMA_FROM_DEVICE);
459     }
460     return 0;
461 }
462 
463 int ge2d_dma_buffer_free(struct aml_dma_buffer *buffer, int index)
464 {
465     struct aml_dma_buf *buf;
466 
467     if (!buffer) {
468         return (-EINVAL);
469     }
470     if ((index < 0) || (index >= AML_MAX_DMABUF)) {
471         return (-EINVAL);
472     }
473 
474     buf = buffer->gd_buffer[index].mem_priv;
475     if (!buf) {
476         pr_err("aml_dma_buf is null\n");
477         return (-EINVAL);
478     }
479     aml_dma_put(buf);
480     return 0;
481 }
482 
483 int ge2d_dma_buffer_export(struct aml_dma_buffer *buffer, struct ge2d_dmabuf_exp_s *ge2d_exp_buf)
484 {
485     struct aml_dma_buf *buf;
486     struct dma_buf *dbuf;
487     int ret, index;
488     unsigned int flags;
489 
490     if (!ge2d_exp_buf) {
491         return (-EINVAL);
492     }
493     if (!buffer) {
494         return (-EINVAL);
495     }
496 
497     index = ge2d_exp_buf->index;
498     if ((index < 0) || (index >= AML_MAX_DMABUF)) {
499         return (-EINVAL);
500     }
501 
502     flags = ge2d_exp_buf->flags;
503     buf = buffer->gd_buffer[index].mem_priv;
504     if (!buf) {
505         pr_err("aml_dma_buf is null\n");
506         return (-EINVAL);
507     }
508 
509     dbuf = get_dmabuf(buf, flags & O_ACCMODE);
510     if (IS_ERR_OR_NULL(dbuf)) {
511         pr_err("failed to export buffer %d\n", index);
512         return -EINVAL;
513     }
514     ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE);
515     if (ret < 0) {
516         pr_err("buffer %d, failed to export (%d)\n", index, ret);
517         dma_buf_put(dbuf);
518         return ret;
519     }
520 
521     ge2d_log_dbg("buffer %d,exported as %d descriptor\n", index, ret);
522     buffer->gd_buffer[index].fd = ret;
523     buffer->gd_buffer[index].dbuf = dbuf;
524     ge2d_exp_buf->fd = ret;
525     return 0;
526 }
527 
528 int ge2d_dma_buffer_map(struct aml_dma_cfg *cfg)
529 {
530     long ret = -1;
531     int fd = -1;
532     struct dma_buf *dbuf = NULL;
533     struct dma_buf_attachment *d_att = NULL;
534     struct sg_table *sg = NULL;
535     void *vaddr = NULL;
536     struct device *dev = NULL;
537     enum dma_data_direction dir;
538 
539     if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL) {
540         pr_err("error input param");
541         return -EINVAL;
542     }
543     fd = cfg->fd;
544     dev = cfg->dev;
545     dir = cfg->dir;
546 
547     dbuf = dma_buf_get(fd);
548     if (IS_ERR(dbuf)) {
549         pr_err("failed to get dma buffer");
550         return -EINVAL;
551     }
552 
553     d_att = dma_buf_attach(dbuf, dev);
554     if (IS_ERR(d_att)) {
555         pr_err("failed to set dma attach");
556         goto attach_err;
557     }
558 
559     sg = dma_buf_map_attachment(d_att, dir);
560     if (IS_ERR(sg)) {
561         pr_err("failed to get dma sg");
562         goto map_attach_err;
563     }
564 
565     ret = dma_buf_begin_cpu_access(dbuf, dir);
566     if (ret != 0) {
567         pr_err("failed to access dma buff");
568         goto access_err;
569     }
570 
571     vaddr = dma_buf_vmap(dbuf);
572     if (vaddr == NULL) {
573         pr_err("failed to vmap dma buf");
574         goto vmap_err;
575     }
576     cfg->dbuf = dbuf;
577     cfg->attach = d_att;
578     cfg->vaddr = vaddr;
579     cfg->sg = sg;
580     ge2d_log_dbg("%s, dbuf=0x%p\n", __func__, dbuf);
581     return ret;
582 
583 vmap_err:
584     dma_buf_end_cpu_access(dbuf, dir);
585 
586 access_err:
587     dma_buf_unmap_attachment(d_att, sg, dir);
588 
589 map_attach_err:
590     dma_buf_detach(dbuf, d_att);
591 
592 attach_err:
593     dma_buf_put(dbuf);
594 
595     return ret;
596 }
597 
598 static int ge2d_dma_buffer_get_phys_internal(struct aml_dma_buffer *buffer, struct aml_dma_cfg *cfg,
599                                              unsigned long *addr)
600 {
601     int i = 0, ret = -1;
602     struct aml_dma_buf *dma_buf;
603     struct dma_buf *dbuf = NULL;
604 
605     for (i = 0; i < AML_MAX_DMABUF; i++) {
606         if (buffer->gd_buffer[i].alloc) {
607             dbuf = dma_buf_get(cfg->fd);
608             if (IS_ERR(dbuf)) {
609                 pr_err("%s: failed to get dma buffer,fd=%d, dbuf=%p\n", __func__, cfg->fd, dbuf);
610                 return -EINVAL;
611             }
612             dma_buf_put(dbuf);
613             if (dbuf == buffer->gd_buffer[i].dbuf) {
614                 cfg->dbuf = dbuf;
615                 dma_buf = buffer->gd_buffer[i].mem_priv;
616                 *addr = dma_buf->dma_addr;
617                 ret = 0;
618                 break;
619             }
620         }
621     }
622     return ret;
623 }
624 
625 int ge2d_dma_buffer_get_phys(struct aml_dma_buffer *buffer, struct aml_dma_cfg *cfg, unsigned long *addr)
626 {
627     struct sg_table *sg_table;
628     struct page *page;
629     int ret = -1;
630 
631     if (cfg == NULL || (cfg->fd < 0)) {
632         pr_err("error input param");
633         return -EINVAL;
634     }
635     ret = ge2d_dma_buffer_get_phys_internal(buffer, cfg, addr);
636     if (ret < 0) {
637         ret = ge2d_dma_buffer_map(cfg);
638         if (ret < 0) {
639             pr_err("gdc_dma_buffer_map failed\n");
640             return ret;
641         }
642         if (cfg->sg) {
643             sg_table = cfg->sg;
644             page = sg_page(sg_table->sgl);
645             *addr = PFN_PHYS(page_to_pfn(page));
646             ret = 0;
647         }
648     }
649     return ret;
650 }
651 
652 int ge2d_dma_buffer_unmap_info(struct aml_dma_buffer *buffer, struct aml_dma_cfg *cfg)
653 {
654     int i, found = 0;
655 
656     if (cfg == NULL || (cfg->fd < 0)) {
657         pr_err("error input param");
658         return -EINVAL;
659     }
660     for (i = 0; i < AML_MAX_DMABUF; i++) {
661         if (buffer->gd_buffer[i].alloc) {
662             if (cfg->dbuf == buffer->gd_buffer[i].dbuf) {
663                 found = 1;
664                 break;
665             }
666         }
667     }
668     if (!found) {
669         ge2d_dma_buffer_unmap(cfg);
670     }
671     return 0;
672 }
673 
674 void ge2d_dma_buffer_unmap(struct aml_dma_cfg *cfg)
675 {
676     int fd = -1;
677     struct dma_buf *dbuf = NULL;
678     struct dma_buf_attachment *d_att = NULL;
679     struct sg_table *sg = NULL;
680     void *vaddr = NULL;
681     struct device *dev = NULL;
682     enum dma_data_direction dir;
683 
684     if (cfg == NULL || (cfg->fd < 0) || cfg->dev == NULL || cfg->dbuf == NULL || cfg->vaddr == NULL ||
685         cfg->attach == NULL || cfg->sg == NULL) {
686         pr_err("Error input param");
687         return;
688     }
689     fd = cfg->fd;
690     dev = cfg->dev;
691     dir = cfg->dir;
692     dbuf = cfg->dbuf;
693     vaddr = cfg->vaddr;
694     d_att = cfg->attach;
695     sg = cfg->sg;
696 
697     dma_buf_vunmap(dbuf, vaddr);
698 
699     dma_buf_end_cpu_access(dbuf, dir);
700 
701     dma_buf_unmap_attachment(d_att, sg, dir);
702 
703     dma_buf_detach(dbuf, d_att);
704 
705     dma_buf_put(dbuf);
706 
707     ge2d_log_dbg("%s, dbuf=0x%p\n", __func__, dbuf);
708 }
709 
710 void ge2d_dma_buffer_dma_flush(struct device *dev, int fd)
711 {
712     struct dma_buf *dmabuf;
713     struct aml_dma_buf *buf;
714 
715     ge2d_log_dbg("ge2d_dma_buffer_dma_flush fd=%d\n", fd);
716     dmabuf = dma_buf_get(fd);
717     if (IS_ERR(dmabuf)) {
718         pr_err("dma_buf_get failed\n");
719         return;
720     }
721     buf = dmabuf->priv;
722     if (!buf) {
723         pr_err("error input param");
724         return;
725     }
726     if ((buf->size > 0) && (buf->dev == dev)) {
727         dma_sync_single_for_device(buf->dev, buf->dma_addr, buf->size, DMA_TO_DEVICE);
728     }
729     dma_buf_put(dmabuf);
730 }
731 
732 void ge2d_dma_buffer_cache_flush(struct device *dev, int fd)
733 {
734     struct dma_buf *dmabuf;
735     struct aml_dma_buf *buf;
736 
737     ge2d_log_dbg("ge2d_dma_buffer_cache_flush fd=%d\n", fd);
738     dmabuf = dma_buf_get(fd);
739     if (IS_ERR(dmabuf)) {
740         pr_err("dma_buf_get failed\n");
741         return;
742     }
743     buf = dmabuf->priv;
744     if (!buf) {
745         pr_err("error input param");
746         return;
747     }
748     if ((buf->size > 0) && (buf->dev == dev)) {
749         dma_sync_single_for_cpu(buf->dev, buf->dma_addr, buf->size, DMA_FROM_DEVICE);
750     }
751     dma_buf_put(dmabuf);
752 }
753