1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) Rockchip Electronics Co., Ltd.
4 *
5 * Author: Huang Lee <Putin.li@rock-chips.com>
6 */
7
8 #define pr_fmt(fmt) "rga_dma_buf: " fmt
9
10 #include "rga_dma_buf.h"
11 #include "rga.h"
12 #include "rga_hw_config.h"
13 #include "rga_job.h"
14
15 /**
16 * rga_dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
17 * page flags.
18 * @dir: Direction of DMA transfer
19 * @coherent: Is the DMA master cache-coherent?
20 * @attrs: DMA attributes for the mapping
21 *
22 * Return: corresponding IOMMU API page protection flags
23 */
rga_dma_info_to_prot(enum dma_data_direction dir,bool coherent,unsigned long attrs)24 static int rga_dma_info_to_prot(enum dma_data_direction dir, bool coherent,
25 unsigned long attrs)
26 {
27 int prot = coherent ? IOMMU_CACHE : 0;
28
29 if (attrs & DMA_ATTR_PRIVILEGED)
30 prot |= IOMMU_PRIV;
31 if (attrs & DMA_ATTR_SYS_CACHE_ONLY)
32 prot |= IOMMU_SYS_CACHE;
33 if (attrs & DMA_ATTR_SYS_CACHE_ONLY_NWA)
34 prot |= IOMMU_SYS_CACHE_NWA;
35
36 switch (dir) {
37 case DMA_BIDIRECTIONAL:
38 return prot | IOMMU_READ | IOMMU_WRITE;
39 case DMA_TO_DEVICE:
40 return prot | IOMMU_READ;
41 case DMA_FROM_DEVICE:
42 return prot | IOMMU_WRITE;
43 default:
44 return 0;
45 }
46 }
47
rga_buf_size_cal(unsigned long yrgb_addr,unsigned long uv_addr,unsigned long v_addr,int format,uint32_t w,uint32_t h,unsigned long * StartAddr,unsigned long * size)48 int rga_buf_size_cal(unsigned long yrgb_addr, unsigned long uv_addr,
49 unsigned long v_addr, int format, uint32_t w,
50 uint32_t h, unsigned long *StartAddr, unsigned long *size)
51 {
52 uint32_t size_yrgb = 0;
53 uint32_t size_uv = 0;
54 uint32_t size_v = 0;
55 uint32_t stride = 0;
56 unsigned long start, end;
57 uint32_t pageCount;
58
59 switch (format) {
60 case RGA2_FORMAT_RGBA_8888:
61 case RGA2_FORMAT_RGBX_8888:
62 case RGA2_FORMAT_BGRA_8888:
63 case RGA2_FORMAT_BGRX_8888:
64 case RGA2_FORMAT_ARGB_8888:
65 case RGA2_FORMAT_XRGB_8888:
66 case RGA2_FORMAT_ABGR_8888:
67 case RGA2_FORMAT_XBGR_8888:
68 stride = (w * 4 + 3) & (~3);
69 size_yrgb = stride * h;
70 start = yrgb_addr >> PAGE_SHIFT;
71 end = yrgb_addr + size_yrgb;
72 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
73 pageCount = end - start;
74 break;
75 case RGA2_FORMAT_RGB_888:
76 case RGA2_FORMAT_BGR_888:
77 stride = (w * 3 + 3) & (~3);
78 size_yrgb = stride * h;
79 start = yrgb_addr >> PAGE_SHIFT;
80 end = yrgb_addr + size_yrgb;
81 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
82 pageCount = end - start;
83 break;
84 case RGA2_FORMAT_RGB_565:
85 case RGA2_FORMAT_RGBA_5551:
86 case RGA2_FORMAT_RGBA_4444:
87 case RGA2_FORMAT_BGR_565:
88 case RGA2_FORMAT_BGRA_5551:
89 case RGA2_FORMAT_BGRA_4444:
90 case RGA2_FORMAT_ARGB_5551:
91 case RGA2_FORMAT_ARGB_4444:
92 case RGA2_FORMAT_ABGR_5551:
93 case RGA2_FORMAT_ABGR_4444:
94 stride = (w * 2 + 3) & (~3);
95 size_yrgb = stride * h;
96 start = yrgb_addr >> PAGE_SHIFT;
97 end = yrgb_addr + size_yrgb;
98 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
99 pageCount = end - start;
100 break;
101
102 /* YUV FORMAT */
103 case RGA2_FORMAT_YCbCr_422_SP:
104 case RGA2_FORMAT_YCrCb_422_SP:
105 stride = (w + 3) & (~3);
106 size_yrgb = stride * h;
107 size_uv = stride * h;
108 start = min(yrgb_addr, uv_addr);
109 start >>= PAGE_SHIFT;
110 end = max((yrgb_addr + size_yrgb), (uv_addr + size_uv));
111 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
112 pageCount = end - start;
113 break;
114 case RGA2_FORMAT_YCbCr_422_P:
115 case RGA2_FORMAT_YCrCb_422_P:
116 stride = (w + 3) & (~3);
117 size_yrgb = stride * h;
118 size_uv = ((stride >> 1) * h);
119 size_v = ((stride >> 1) * h);
120 start = min3(yrgb_addr, uv_addr, v_addr);
121 start = start >> PAGE_SHIFT;
122 end =
123 max3((yrgb_addr + size_yrgb), (uv_addr + size_uv),
124 (v_addr + size_v));
125 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
126 pageCount = end - start;
127 break;
128 case RGA2_FORMAT_YCbCr_420_SP:
129 case RGA2_FORMAT_YCrCb_420_SP:
130 stride = (w + 3) & (~3);
131 size_yrgb = stride * h;
132 size_uv = (stride * (h >> 1));
133 start = min(yrgb_addr, uv_addr);
134 start >>= PAGE_SHIFT;
135 end = max((yrgb_addr + size_yrgb), (uv_addr + size_uv));
136 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
137 pageCount = end - start;
138 break;
139 case RGA2_FORMAT_YCbCr_420_P:
140 case RGA2_FORMAT_YCrCb_420_P:
141 stride = (w + 3) & (~3);
142 size_yrgb = stride * h;
143 size_uv = ((stride >> 1) * (h >> 1));
144 size_v = ((stride >> 1) * (h >> 1));
145 start = min3(yrgb_addr, uv_addr, v_addr);
146 start >>= PAGE_SHIFT;
147 end =
148 max3((yrgb_addr + size_yrgb), (uv_addr + size_uv),
149 (v_addr + size_v));
150 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
151 pageCount = end - start;
152 break;
153 case RGA2_FORMAT_YCbCr_400:
154 stride = (w + 3) & (~3);
155 size_yrgb = stride * h;
156 start = yrgb_addr >> PAGE_SHIFT;
157 end = yrgb_addr + size_yrgb;
158 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
159 pageCount = end - start;
160 break;
161 case RGA2_FORMAT_Y4:
162 stride = ((w + 3) & (~3)) >> 1;
163 size_yrgb = stride * h;
164 start = yrgb_addr >> PAGE_SHIFT;
165 end = yrgb_addr + size_yrgb;
166 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
167 pageCount = end - start;
168 break;
169 case RGA2_FORMAT_YVYU_422:
170 case RGA2_FORMAT_VYUY_422:
171 case RGA2_FORMAT_YUYV_422:
172 case RGA2_FORMAT_UYVY_422:
173 stride = (w + 3) & (~3);
174 size_yrgb = stride * h;
175 size_uv = stride * h;
176 start = min(yrgb_addr, uv_addr);
177 start >>= PAGE_SHIFT;
178 end = max((yrgb_addr + size_yrgb), (uv_addr + size_uv));
179 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
180 pageCount = end - start;
181 break;
182 case RGA2_FORMAT_YVYU_420:
183 case RGA2_FORMAT_VYUY_420:
184 case RGA2_FORMAT_YUYV_420:
185 case RGA2_FORMAT_UYVY_420:
186 stride = (w + 3) & (~3);
187 size_yrgb = stride * h;
188 size_uv = (stride * (h >> 1));
189 start = min(yrgb_addr, uv_addr);
190 start >>= PAGE_SHIFT;
191 end = max((yrgb_addr + size_yrgb), (uv_addr + size_uv));
192 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
193 pageCount = end - start;
194 break;
195 case RGA2_FORMAT_YCbCr_420_SP_10B:
196 case RGA2_FORMAT_YCrCb_420_SP_10B:
197 stride = (w + 3) & (~3);
198 size_yrgb = stride * h;
199 size_uv = (stride * (h >> 1));
200 start = min(yrgb_addr, uv_addr);
201 start >>= PAGE_SHIFT;
202 end = max((yrgb_addr + size_yrgb), (uv_addr + size_uv));
203 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
204 pageCount = end - start;
205 break;
206 default:
207 pageCount = 0;
208 start = 0;
209 break;
210 }
211
212 *StartAddr = start;
213
214 if (size != NULL)
215 *size = size_yrgb + size_uv + size_v;
216
217 return pageCount;
218 }
219
rga_MapUserMemory(struct page ** pages,uint32_t * pageTable,unsigned long Memory,uint32_t pageCount,int writeFlag,struct mm_struct * mm)220 static int rga_MapUserMemory(struct page **pages, uint32_t *pageTable,
221 unsigned long Memory, uint32_t pageCount, int writeFlag,
222 struct mm_struct *mm)
223 {
224 uint32_t i, status;
225 int32_t result;
226 unsigned long Address;
227 unsigned long pfn;
228 struct page __maybe_unused *page;
229 struct vm_area_struct *vma;
230 spinlock_t *ptl;
231 pte_t *pte;
232 pgd_t *pgd;
233 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
234 p4d_t *p4d;
235 #endif
236 pud_t *pud;
237 pmd_t *pmd;
238
239 status = 0;
240
241 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
242 mmap_read_lock(mm);
243 #else
244 down_read(&mm->mmap_sem);
245 #endif
246
247 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 168) && \
248 LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
249 result = get_user_pages(current, mm, Memory << PAGE_SHIFT,
250 pageCount, writeFlag ? FOLL_WRITE : 0,
251 pages, NULL);
252 #elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
253 result = get_user_pages(current, mm, Memory << PAGE_SHIFT,
254 pageCount, writeFlag, 0, pages, NULL);
255 #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
256 result = get_user_pages_remote(current, mm,
257 Memory << PAGE_SHIFT,
258 pageCount, writeFlag, pages, NULL, NULL);
259 #else
260 result = get_user_pages_remote(mm, Memory << PAGE_SHIFT,
261 pageCount, writeFlag, pages, NULL, NULL);
262 #endif
263
264 if (result > 0 && result >= pageCount) {
265 /* Fill the page table. */
266 for (i = 0; i < pageCount; i++) {
267 /* Get the physical address from page struct. */
268 pageTable[i] = page_to_phys(pages[i]);
269 }
270
271 for (i = 0; i < result; i++)
272 put_page(pages[i]);
273
274 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
275 mmap_read_unlock(mm);
276 #else
277 up_read(&mm->mmap_sem);
278 #endif
279 return 0;
280 }
281
282 if (result > 0) {
283 for (i = 0; i < result; i++)
284 put_page(pages[i]);
285 }
286
287 for (i = 0; i < pageCount; i++) {
288 vma = find_vma(mm, (Memory + i) << PAGE_SHIFT);
289 if (!vma) {
290 pr_err("failed to get vma, result = %d, pageCount = %d\n",
291 result, pageCount);
292 status = RGA_OUT_OF_RESOURCES;
293 break;
294 }
295
296 pgd = pgd_offset(mm, (Memory + i) << PAGE_SHIFT);
297 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) {
298 pr_err("failed to get pgd, result = %d, pageCount = %d\n",
299 result, pageCount);
300 status = RGA_OUT_OF_RESOURCES;
301 break;
302 }
303 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
304 /*
305 * In the four-level page table,
306 * it will do nothing and return pgd.
307 */
308 p4d = p4d_offset(pgd, (Memory + i) << PAGE_SHIFT);
309 if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) {
310 pr_err("failed to get p4d, result = %d, pageCount = %d\n",
311 result, pageCount);
312 status = RGA_OUT_OF_RESOURCES;
313 break;
314 }
315
316 pud = pud_offset(p4d, (Memory + i) << PAGE_SHIFT);
317 #else
318 pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);
319 #endif
320
321 if (pud_none(*pud) || unlikely(pud_bad(*pud))) {
322 pr_err("failed to get pud, result = %d, pageCount = %d\n",
323 result, pageCount);
324 status = RGA_OUT_OF_RESOURCES;
325 break;
326 }
327 pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
328 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
329 pr_err("failed to get pmd, result = %d, pageCount = %d\n",
330 result, pageCount);
331 status = RGA_OUT_OF_RESOURCES;
332 break;
333 }
334 pte = pte_offset_map_lock(mm, pmd,
335 (Memory + i) << PAGE_SHIFT, &ptl);
336 if (pte_none(*pte)) {
337 pr_err("failed to get pte, result = %d, pageCount = %d\n",
338 result, pageCount);
339 pte_unmap_unlock(pte, ptl);
340 status = RGA_OUT_OF_RESOURCES;
341 break;
342 }
343
344 pfn = pte_pfn(*pte);
345 Address = ((pfn << PAGE_SHIFT) |
346 (((unsigned long)((Memory + i) << PAGE_SHIFT)) &
347 ~PAGE_MASK));
348
349 pages[i] = pfn_to_page(pfn);
350 pageTable[i] = (uint32_t)Address;
351 pte_unmap_unlock(pte, ptl);
352 }
353
354 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
355 mmap_read_unlock(mm);
356 #else
357 up_read(&mm->mmap_sem);
358 #endif
359
360 return status;
361 }
362
rga_iommu_dma_alloc_iova(struct iommu_domain * domain,size_t size,u64 dma_limit,struct device * dev)363 static dma_addr_t rga_iommu_dma_alloc_iova(struct iommu_domain *domain,
364 size_t size, u64 dma_limit,
365 struct device *dev)
366 {
367 struct rga_iommu_dma_cookie *cookie = domain->iova_cookie;
368 struct iova_domain *iovad = &cookie->iovad;
369 unsigned long shift, iova_len, iova = 0;
370
371 shift = iova_shift(iovad);
372 iova_len = size >> shift;
373 /*
374 * Freeing non-power-of-two-sized allocations back into the IOVA caches
375 * will come back to bite us badly, so we have to waste a bit of space
376 * rounding up anything cacheable to make sure that can't happen. The
377 * order of the unadjusted size will still match upon freeing.
378 */
379 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
380 iova_len = roundup_pow_of_two(iova_len);
381
382 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
383
384 if (domain->geometry.force_aperture)
385 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
386
387 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true);
388
389 return (dma_addr_t)iova << shift;
390 }
391
rga_iommu_dma_free_iova(struct rga_iommu_dma_cookie * cookie,dma_addr_t iova,size_t size)392 static void rga_iommu_dma_free_iova(struct rga_iommu_dma_cookie *cookie,
393 dma_addr_t iova, size_t size)
394 {
395 struct iova_domain *iovad = &cookie->iovad;
396
397 free_iova_fast(iovad, iova_pfn(iovad, iova),
398 size >> iova_shift(iovad));
399 }
400
rga_viraddr_put_channel_info(struct rga_dma_buffer_t ** rga_dma_buffer)401 static void rga_viraddr_put_channel_info(struct rga_dma_buffer_t **rga_dma_buffer)
402 {
403 struct rga_dma_buffer_t *buffer;
404
405 buffer = *rga_dma_buffer;
406 if (buffer == NULL)
407 return;
408
409 if (!buffer->use_viraddr)
410 return;
411
412 iommu_unmap(buffer->domain, buffer->iova, buffer->size);
413 rga_iommu_dma_free_iova(buffer->cookie, buffer->iova, buffer->size);
414
415 kfree(buffer);
416
417 *rga_dma_buffer = NULL;
418 }
419
rga_iommu_unmap_virt_addr(struct rga_dma_buffer * virt_dma_buf)420 void rga_iommu_unmap_virt_addr(struct rga_dma_buffer *virt_dma_buf)
421 {
422 if (virt_dma_buf == NULL)
423 return;
424 if (virt_dma_buf->iova == 0)
425 return;
426
427 iommu_unmap(virt_dma_buf->domain, virt_dma_buf->iova, virt_dma_buf->size);
428 rga_iommu_dma_free_iova(virt_dma_buf->cookie, virt_dma_buf->iova, virt_dma_buf->size);
429 }
430
rga_iommu_map_virt_addr(struct rga_memory_parm * memory_parm,struct rga_dma_buffer * virt_dma_buf,struct device * rga_dev,struct mm_struct * mm)431 int rga_iommu_map_virt_addr(struct rga_memory_parm *memory_parm,
432 struct rga_dma_buffer *virt_dma_buf,
433 struct device *rga_dev,
434 struct mm_struct *mm)
435 {
436 unsigned long size;
437 size_t map_size;
438 bool coherent;
439 int ioprot;
440 struct iommu_domain *domain = NULL;
441 struct rga_iommu_dma_cookie *cookie;
442 struct iova_domain *iovad;
443 dma_addr_t iova;
444 struct sg_table *sgt = NULL;
445
446
447 coherent = dev_is_dma_coherent(rga_dev);
448 ioprot = rga_dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, 0);
449 domain = iommu_get_dma_domain(rga_dev);
450 cookie = domain->iova_cookie;
451 iovad = &cookie->iovad;
452 size = iova_align(iovad, virt_dma_buf->size);
453 sgt = virt_dma_buf->sgt;
454 if (sgt == NULL) {
455 pr_err("can not map iommu, because sgt is null!\n");
456 return -EFAULT;
457 }
458
459 if (DEBUGGER_EN(MSG))
460 pr_debug("iova_align size = %ld", size);
461
462 iova = rga_iommu_dma_alloc_iova(domain, size, rga_dev->coherent_dma_mask, rga_dev);
463 if (!iova) {
464 pr_err("rga_iommu_dma_alloc_iova failed");
465 return -ENOMEM;
466 }
467
468 if (!(ioprot & IOMMU_CACHE)) {
469 struct scatterlist *sg;
470 int i;
471
472 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
473 arch_dma_prep_coherent(sg_page(sg), sg->length);
474 }
475
476 map_size = iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot);
477 if (map_size < size) {
478 pr_err("iommu can not map sgt to iova");
479 return -EINVAL;
480 }
481
482 virt_dma_buf->cookie = cookie;
483 virt_dma_buf->domain = domain;
484 virt_dma_buf->iova = iova;
485 virt_dma_buf->size = size;
486
487 return 0;
488 }
489
rga_viraddr_get_channel_info(struct rga_img_info_t * channel_info,struct rga_dma_buffer_t ** rga_dma_buffer,int writeFlag,int core,struct mm_struct * mm)490 static int rga_viraddr_get_channel_info(struct rga_img_info_t *channel_info,
491 struct rga_dma_buffer_t **rga_dma_buffer,
492 int writeFlag, int core, struct mm_struct *mm)
493 {
494 struct rga_scheduler_t *scheduler = NULL;
495 struct rga_dma_buffer_t *alloc_buffer;
496
497 unsigned long size;
498 unsigned long start_addr;
499 unsigned int count;
500 int order = 0;
501
502 uint32_t *page_table = NULL;
503 struct page **pages = NULL;
504 struct sg_table sgt;
505
506 int ret = 0;
507 size_t map_size = 0;
508
509 struct iommu_domain *domain = NULL;
510 struct rga_iommu_dma_cookie *cookie;
511 struct iova_domain *iovad;
512 bool coherent;
513 int ioprot;
514 dma_addr_t iova;
515
516 int format;
517
518 alloc_buffer =
519 kmalloc(sizeof(struct rga_dma_buffer_t),
520 GFP_KERNEL);
521 if (alloc_buffer == NULL) {
522 pr_err("rga_dma_buffer alloc error!\n");
523 return -ENOMEM;
524 }
525
526 user_format_convert(&format, channel_info->format);
527
528 scheduler = rga_job_get_scheduler(core);
529 if (scheduler == NULL) {
530 pr_err("failed to get scheduler, %s(%d)\n", __func__,
531 __LINE__);
532 ret = -EINVAL;
533 goto out_free_buffer;
534 }
535
536 coherent = dev_is_dma_coherent(scheduler->dev);
537 ioprot = rga_dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, 0);
538 domain = iommu_get_dma_domain(scheduler->dev);
539 cookie = domain->iova_cookie;
540 iovad = &cookie->iovad;
541
542 /* Calculate page size. */
543 count = rga_buf_size_cal(channel_info->yrgb_addr, channel_info->uv_addr,
544 channel_info->v_addr, format,
545 channel_info->vir_w, channel_info->vir_h,
546 &start_addr, &size);
547
548 /* alloc pages and page_table */
549 order = get_order(size / 4096 * sizeof(struct page *));
550 pages = (struct page **)__get_free_pages(GFP_KERNEL, order);
551 if (pages == NULL) {
552 pr_err("Can not alloc pages for pages\n");
553 ret = -ENOMEM;
554 goto out_free_buffer;
555 }
556
557 page_table = (uint32_t *)__get_free_pages(GFP_KERNEL, order);
558 if (page_table == NULL) {
559 pr_err("Can not alloc pages for page_table\n");
560 ret = -ENOMEM;
561 goto out_free_pages;
562 }
563
564 /* get pages from virtual address. */
565 ret = rga_MapUserMemory(pages, page_table, start_addr, count, writeFlag, mm);
566 if (ret) {
567 pr_err("failed to get pages");
568 ret = -EINVAL;
569 goto out_free_pages_table;
570 }
571
572 size = iova_align(iovad, size);
573
574 if (DEBUGGER_EN(MSG))
575 pr_err("iova_align size = %ld", size);
576
577 iova = rga_iommu_dma_alloc_iova(domain, size, scheduler->dev->coherent_dma_mask,
578 scheduler->dev);
579 if (!iova) {
580 pr_err("rga_iommu_dma_alloc_iova failed");
581 ret = -ENOMEM;
582 goto out_free_pages_table;
583 }
584
585 /* get sg form pages. */
586 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) {
587 pr_err("sg_alloc_table_from_pages failed");
588 ret = -ENOMEM;
589 goto out_free_sg;
590 }
591
592 if (!(ioprot & IOMMU_CACHE)) {
593 struct scatterlist *sg;
594 int i;
595
596 for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
597 arch_dma_prep_coherent(sg_page(sg), sg->length);
598 }
599
600 map_size = iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot);
601 if (map_size < size) {
602 pr_err("iommu can not map sgt to iova");
603 ret = -EINVAL;
604 goto out_free_sg;
605 }
606
607 channel_info->yrgb_addr = iova;
608 alloc_buffer->iova = iova;
609 alloc_buffer->size = size;
610 alloc_buffer->cookie = cookie;
611 alloc_buffer->use_viraddr = true;
612 alloc_buffer->domain = domain;
613
614 sg_free_table(&sgt);
615
616 free_pages((unsigned long)pages, order);
617 free_pages((unsigned long)page_table, order);
618
619 *rga_dma_buffer = alloc_buffer;
620
621 return ret;
622
623 out_free_sg:
624 sg_free_table(&sgt);
625 rga_iommu_dma_free_iova(cookie, iova, size);
626
627 out_free_pages_table:
628 free_pages((unsigned long)page_table, order);
629
630 out_free_pages:
631 free_pages((unsigned long)pages, order);
632
633 out_free_buffer:
634 kfree(alloc_buffer);
635
636 return ret;
637 }
638
is_yuv422p_format(u32 format)639 static bool is_yuv422p_format(u32 format)
640 {
641 bool ret = false;
642
643 switch (format) {
644 case RGA2_FORMAT_YCbCr_422_P:
645 case RGA2_FORMAT_YCrCb_422_P:
646 ret = true;
647 break;
648 }
649 return ret;
650 }
651
rga_convert_addr(struct rga_img_info_t * img,bool before_vir_get_channel)652 static void rga_convert_addr(struct rga_img_info_t *img, bool before_vir_get_channel)
653 {
654 /*
655 * If it is not using dma fd, the virtual/phyical address is assigned
656 * to the address of the corresponding channel.
657 */
658
659 //img->yrgb_addr = img->uv_addr;
660
661 /*
662 * if before_vir_get_channel is true, then convert addr by default
663 * when has iova (before_vir_get_channel is false),
664 * need to consider whether fbc case
665 */
666 if (img->rd_mode != RGA_FBC_MODE || before_vir_get_channel) {
667 img->uv_addr = img->yrgb_addr + (img->vir_w * img->vir_h);
668
669 //warning: rga3 may need /2 for all
670 if (is_yuv422p_format(img->format))
671 img->v_addr =
672 img->uv_addr + (img->vir_w * img->vir_h) / 2;
673 else
674 img->v_addr =
675 img->uv_addr + (img->vir_w * img->vir_h) / 4;
676 } else {
677 img->uv_addr = img->yrgb_addr;
678 img->v_addr = 0;
679 }
680 }
681
rga_get_format_bits(u32 format)682 int rga_get_format_bits(u32 format)
683 {
684 int bits = 0;
685
686 switch (format) {
687 case RGA2_FORMAT_RGBA_8888:
688 case RGA2_FORMAT_RGBX_8888:
689 case RGA2_FORMAT_BGRA_8888:
690 case RGA2_FORMAT_BGRX_8888:
691 case RGA2_FORMAT_ARGB_8888:
692 case RGA2_FORMAT_XRGB_8888:
693 case RGA2_FORMAT_ABGR_8888:
694 case RGA2_FORMAT_XBGR_8888:
695 bits = 32;
696 break;
697 case RGA2_FORMAT_RGB_888:
698 case RGA2_FORMAT_BGR_888:
699 bits = 24;
700 break;
701 case RGA2_FORMAT_RGB_565:
702 case RGA2_FORMAT_RGBA_5551:
703 case RGA2_FORMAT_RGBA_4444:
704 case RGA2_FORMAT_BGR_565:
705 case RGA2_FORMAT_YCbCr_422_SP:
706 case RGA2_FORMAT_YCbCr_422_P:
707 case RGA2_FORMAT_YCrCb_422_SP:
708 case RGA2_FORMAT_YCrCb_422_P:
709 case RGA2_FORMAT_BGRA_5551:
710 case RGA2_FORMAT_BGRA_4444:
711 case RGA2_FORMAT_ARGB_5551:
712 case RGA2_FORMAT_ARGB_4444:
713 case RGA2_FORMAT_ABGR_5551:
714 case RGA2_FORMAT_ABGR_4444:
715 bits = 16;
716 break;
717 case RGA2_FORMAT_YCbCr_420_SP:
718 case RGA2_FORMAT_YCbCr_420_P:
719 case RGA2_FORMAT_YCrCb_420_SP:
720 case RGA2_FORMAT_YCrCb_420_P:
721 bits = 12;
722 break;
723 case RGA2_FORMAT_YCbCr_420_SP_10B:
724 case RGA2_FORMAT_YCrCb_420_SP_10B:
725 case RGA2_FORMAT_YCbCr_422_SP_10B:
726 case RGA2_FORMAT_YCrCb_422_SP_10B:
727 bits = 15;
728 break;
729 default:
730 pr_err("unknown format [%d]\n", format);
731 return -1;
732 }
733
734 return bits;
735 }
736
rga_virtual_memory_check(void * vaddr,u32 w,u32 h,u32 format,int fd)737 static int rga_virtual_memory_check(void *vaddr, u32 w, u32 h, u32 format,
738 int fd)
739 {
740 int bits = 32;
741 int temp_data = 0;
742 void *one_line = NULL;
743
744 bits = rga_get_format_bits(format);
745 if (bits < 0)
746 return -1;
747
748 one_line = kzalloc(w * 4, GFP_KERNEL);
749 if (!one_line) {
750 pr_err("kzalloc fail %s[%d]\n", __func__, __LINE__);
751 return 0;
752 }
753
754 temp_data = w * (h - 1) * bits >> 3;
755 if (fd > 0) {
756 pr_info("vaddr is%p, bits is %d, fd check\n", vaddr, bits);
757 memcpy(one_line, (char *)vaddr + temp_data, w * bits >> 3);
758 pr_info("fd check ok\n");
759 } else {
760 pr_info("vir addr memory check.\n");
761 memcpy((void *)((char *)vaddr + temp_data), one_line,
762 w * bits >> 3);
763 pr_info("vir addr check ok.\n");
764 }
765
766 kfree(one_line);
767 return 0;
768 }
769
rga_dma_memory_check(struct rga_dma_buffer_t * rga_dma_buffer,struct rga_img_info_t * img)770 static int rga_dma_memory_check(struct rga_dma_buffer_t *rga_dma_buffer,
771 struct rga_img_info_t *img)
772 {
773 int ret = 0;
774 void *vaddr;
775 struct dma_buf *dma_buf;
776
777 dma_buf = rga_dma_buffer->dma_buf;
778
779 if (!IS_ERR_OR_NULL(dma_buf)) {
780 vaddr = dma_buf_vmap(dma_buf);
781 if (vaddr) {
782 ret = rga_virtual_memory_check(vaddr, img->vir_w,
783 img->vir_h, img->format, img->yrgb_addr);
784 } else {
785 pr_err("can't vmap the dma buffer!\n");
786 return -EINVAL;
787 }
788
789 dma_buf_vunmap(dma_buf, vaddr);
790 }
791
792 return ret;
793 }
794
rga_dma_map_fd(int fd,struct rga_dma_buffer * rga_dma_buffer,enum dma_data_direction dir,struct device * rga_dev)795 int rga_dma_map_fd(int fd, struct rga_dma_buffer *rga_dma_buffer,
796 enum dma_data_direction dir, struct device *rga_dev)
797 {
798 struct dma_buf *dma_buf = NULL;
799 struct dma_buf_attachment *attach = NULL;
800 struct sg_table *sgt = NULL;
801 int ret = 0;
802
803 dma_buf = dma_buf_get(fd);
804 if (IS_ERR(dma_buf)) {
805 pr_err("dma_buf_get fail fd[%d]\n", fd);
806 ret = -EINVAL;
807 return ret;
808 }
809
810 attach = dma_buf_attach(dma_buf, rga_dev);
811 if (IS_ERR(attach)) {
812 pr_err("Failed to attach dma_buf\n");
813 ret = -EINVAL;
814 goto err_get_attach;
815 }
816
817 sgt = dma_buf_map_attachment(attach, dir);
818 if (IS_ERR(sgt)) {
819 pr_err("Failed to map src attachment\n");
820 ret = -EINVAL;
821 goto err_get_sgt;
822 }
823
824 rga_dma_buffer->dma_buf = dma_buf;
825 rga_dma_buffer->attach = attach;
826 rga_dma_buffer->sgt = sgt;
827 rga_dma_buffer->iova = sg_dma_address(sgt->sgl);
828 rga_dma_buffer->size = sg_dma_len(sgt->sgl);
829 rga_dma_buffer->dir = dir;
830
831 return ret;
832
833 err_get_sgt:
834 if (attach)
835 dma_buf_detach(dma_buf, attach);
836 err_get_attach:
837 if (dma_buf)
838 dma_buf_put(dma_buf);
839
840 return ret;
841 }
842
rga_dma_unmap_fd(struct rga_dma_buffer * rga_dma_buffer)843 void rga_dma_unmap_fd(struct rga_dma_buffer *rga_dma_buffer)
844 {
845 if (rga_dma_buffer->attach && rga_dma_buffer->sgt)
846 dma_buf_unmap_attachment(rga_dma_buffer->attach,
847 rga_dma_buffer->sgt,
848 rga_dma_buffer->dir);
849
850 if (rga_dma_buffer->attach) {
851 dma_buf_detach(rga_dma_buffer->dma_buf, rga_dma_buffer->attach);
852 dma_buf_put(rga_dma_buffer->dma_buf);
853 }
854 }
855
rga_dma_map_buffer(struct dma_buf * dma_buf,struct rga_dma_buffer_t * rga_dma_buffer,enum dma_data_direction dir,struct device * rga_dev)856 static int rga_dma_map_buffer(struct dma_buf *dma_buf,
857 struct rga_dma_buffer_t *rga_dma_buffer,
858 enum dma_data_direction dir, struct device *rga_dev)
859 {
860 struct dma_buf_attachment *attach = NULL;
861 struct sg_table *sgt = NULL;
862
863 int ret = 0;
864
865 attach = dma_buf_attach(dma_buf, rga_dev);
866 if (IS_ERR(attach)) {
867 ret = -EINVAL;
868 pr_err("Failed to attach dma_buf\n");
869 goto err_get_attach;
870 }
871
872 sgt = dma_buf_map_attachment(attach, dir);
873 if (IS_ERR(sgt)) {
874 ret = -EINVAL;
875 pr_err("Failed to map src attachment\n");
876 goto err_get_sg;
877 }
878
879 rga_dma_buffer->dma_buf = dma_buf;
880 rga_dma_buffer->attach = attach;
881 rga_dma_buffer->sgt = sgt;
882 rga_dma_buffer->iova = sg_dma_address(sgt->sgl);
883
884 /* TODO: size for check */
885 rga_dma_buffer->size = sg_dma_len(sgt->sgl);
886 rga_dma_buffer->dir = dir;
887
888 return ret;
889
890 err_get_sg:
891 if (sgt)
892 dma_buf_unmap_attachment(attach, sgt, dir);
893 if (attach)
894 dma_buf_detach(dma_buf, attach);
895 err_get_attach:
896 if (dma_buf && (rga_dma_buffer->use_dma_buf == false))
897 dma_buf_put(dma_buf);
898
899 return ret;
900 }
901
rga_dma_unmap_buffer(struct rga_dma_buffer_t * rga_dma_buffer)902 static void rga_dma_unmap_buffer(struct rga_dma_buffer_t *rga_dma_buffer)
903 {
904 if (rga_dma_buffer->attach && rga_dma_buffer->sgt) {
905 dma_buf_unmap_attachment(rga_dma_buffer->attach,
906 rga_dma_buffer->sgt, rga_dma_buffer->dir);
907 }
908
909 if (rga_dma_buffer->attach)
910 dma_buf_detach(rga_dma_buffer->dma_buf, rga_dma_buffer->attach);
911 }
912
rga_dma_buf_get_channel_info(struct rga_img_info_t * channel_info,struct rga_dma_buffer_t ** rga_dma_buffer,int mmu_flag,struct dma_buf ** dma_buf,int core)913 static int rga_dma_buf_get_channel_info(struct rga_img_info_t *channel_info,
914 struct rga_dma_buffer_t **rga_dma_buffer, int mmu_flag,
915 struct dma_buf **dma_buf, int core)
916 {
917 int ret;
918 struct rga_dma_buffer_t *alloc_buffer;
919 struct rga_scheduler_t *scheduler = NULL;
920
921 if (unlikely(!mmu_flag && *dma_buf)) {
922 pr_err("Fix it please enable mmu on dma buf channel\n");
923 return -EINVAL;
924 } else if (mmu_flag && *dma_buf) {
925 /* perform a single mapping to dma buffer */
926 alloc_buffer =
927 kmalloc(sizeof(struct rga_dma_buffer_t),
928 GFP_KERNEL);
929 if (alloc_buffer == NULL) {
930 pr_err("rga_dma_buffer alloc error!\n");
931 return -ENOMEM;
932 }
933
934 alloc_buffer->use_dma_buf = false;
935 alloc_buffer->use_viraddr = false;
936
937 scheduler = rga_job_get_scheduler(core);
938 if (scheduler == NULL) {
939 pr_err("failed to get scheduler, %s(%d)\n", __func__,
940 __LINE__);
941 kfree(alloc_buffer);
942 ret = -EINVAL;
943 return ret;
944 }
945
946 ret =
947 rga_dma_map_buffer(*dma_buf, alloc_buffer,
948 DMA_BIDIRECTIONAL, scheduler->dev);
949 if (ret < 0) {
950 pr_err("Can't map dma-buf\n");
951 kfree(alloc_buffer);
952 return ret;
953 }
954
955 *rga_dma_buffer = alloc_buffer;
956 }
957
958 if (DEBUGGER_EN(CHECK_MODE)) {
959 ret = rga_dma_memory_check(*rga_dma_buffer,
960 channel_info);
961 if (ret < 0) {
962 pr_err("Channel check memory error!\n");
963 /*
964 * Note: This error is released by external
965 * rga_dma_put_channel_info().
966 */
967 return ret;
968 }
969 }
970
971 /* The value of dma_fd is no longer needed. */
972 channel_info->yrgb_addr = 0;
973
974 if (core == RGA3_SCHEDULER_CORE0 || core == RGA3_SCHEDULER_CORE1)
975 if (*rga_dma_buffer)
976 channel_info->yrgb_addr = (*rga_dma_buffer)->iova;
977
978 return 0;
979 }
980
rga_dma_put_channel_info(struct rga_dma_buffer_t ** rga_dma_buffer,struct dma_buf ** dma_buf)981 static void rga_dma_put_channel_info(struct rga_dma_buffer_t **rga_dma_buffer, struct dma_buf **dma_buf)
982 {
983 struct rga_dma_buffer_t *buffer;
984
985 buffer = *rga_dma_buffer;
986 if (buffer == NULL)
987 return;
988
989 if (buffer->use_viraddr)
990 return;
991
992 rga_dma_unmap_buffer(buffer);
993 if (*dma_buf)
994 dma_buf_put(*dma_buf);
995
996 kfree(buffer);
997
998 *rga_dma_buffer = NULL;
999 *dma_buf = NULL;
1000 }
1001
rga_dma_buf_get(struct rga_job * job)1002 int rga_dma_buf_get(struct rga_job *job)
1003 {
1004 int ret = -EINVAL;
1005 int mmu_flag;
1006
1007 struct rga_img_info_t *src0 = NULL;
1008 struct rga_img_info_t *src1 = NULL;
1009 struct rga_img_info_t *dst = NULL;
1010 struct rga_img_info_t *els = NULL;
1011
1012 src0 = &job->rga_command_base.src;
1013 dst = &job->rga_command_base.dst;
1014 if (job->rga_command_base.render_mode != UPDATE_PALETTE_TABLE_MODE)
1015 src1 = &job->rga_command_base.pat;
1016 else
1017 els = &job->rga_command_base.pat;
1018
1019 if (likely(src0 != NULL)) {
1020 mmu_flag = ((job->rga_command_base.mmu_info.mmu_flag >> 8) & 1);
1021 if (mmu_flag && src0->yrgb_addr) {
1022 job->dma_buf_src0 = dma_buf_get(src0->yrgb_addr);
1023 if (IS_ERR(job->dma_buf_src0)) {
1024 ret = -EINVAL;
1025 pr_err("%s src0 dma_buf_get fail fd[%lu]\n",
1026 __func__, (unsigned long)src0->yrgb_addr);
1027 return ret;
1028 }
1029 }
1030 }
1031
1032 if (likely(dst != NULL)) {
1033 mmu_flag = ((job->rga_command_base.mmu_info.mmu_flag >> 10) & 1);
1034 if (mmu_flag && dst->yrgb_addr) {
1035 job->dma_buf_dst = dma_buf_get(dst->yrgb_addr);
1036 if (IS_ERR(job->dma_buf_dst)) {
1037 ret = -EINVAL;
1038 pr_err("%s dst dma_buf_get fail fd[%lu]\n",
1039 __func__, (unsigned long)dst->yrgb_addr);
1040 return ret;
1041 }
1042 }
1043 }
1044
1045 if (src1 != NULL) {
1046 mmu_flag = ((job->rga_command_base.mmu_info.mmu_flag >> 9) & 1);
1047 if (mmu_flag && src1->yrgb_addr) {
1048 job->dma_buf_src1 = dma_buf_get(src1->yrgb_addr);
1049 if (IS_ERR(job->dma_buf_src0)) {
1050 ret = -EINVAL;
1051 pr_err("%s src1 dma_buf_get fail fd[%lu]\n",
1052 __func__, (unsigned long)src1->yrgb_addr);
1053 return ret;
1054 }
1055 }
1056 }
1057
1058 if (els != NULL) {
1059 mmu_flag = ((job->rga_command_base.mmu_info.mmu_flag >> 11) & 1);
1060 if (mmu_flag && els->yrgb_addr) {
1061 job->dma_buf_els = dma_buf_get(els->yrgb_addr);
1062 if (IS_ERR(job->dma_buf_els)) {
1063 ret = -EINVAL;
1064 pr_err("%s els dma_buf_get fail fd[%lu]\n",
1065 __func__, (unsigned long)els->yrgb_addr);
1066 return ret;
1067 }
1068 }
1069 }
1070
1071 return 0;
1072 }
1073
rga_dma_get_info(struct rga_job * job)1074 int rga_dma_get_info(struct rga_job *job)
1075 {
1076 int ret = 0;
1077 uint32_t mmu_flag;
1078 struct rga_img_info_t *src0 = NULL;
1079 struct rga_img_info_t *src1 = NULL;
1080 struct rga_img_info_t *dst = NULL;
1081 struct rga_img_info_t *els = NULL;
1082
1083 src0 = &job->rga_command_base.src;
1084 dst = &job->rga_command_base.dst;
1085 if (job->rga_command_base.render_mode != UPDATE_PALETTE_TABLE_MODE)
1086 src1 = &job->rga_command_base.pat;
1087 else
1088 els = &job->rga_command_base.pat;
1089
1090 /* src0 channel */
1091 if (likely(src0 != NULL)) {
1092 mmu_flag = ((job->rga_command_base.mmu_info.mmu_flag >> 8) & 1);
1093 if (job->dma_buf_src0 != NULL) {
1094 ret = rga_dma_buf_get_channel_info(src0,
1095 &job->rga_dma_buffer_src0, mmu_flag,
1096 &job->dma_buf_src0, job->core);
1097
1098 if (unlikely(ret < 0)) {
1099 pr_err("src0 channel get info error!\n");
1100 goto src0_channel_err;
1101 }
1102
1103 if (src0->yrgb_addr <= 0)
1104 job->rga_dma_buffer_src0->use_dma_buf = true;
1105 } else {
1106 src0->yrgb_addr = src0->uv_addr;
1107 rga_convert_addr(src0, true);
1108 if (job->core == RGA3_SCHEDULER_CORE0 || job->core == RGA3_SCHEDULER_CORE1) {
1109 if (src0->yrgb_addr > 0 && mmu_flag) {
1110 ret = rga_viraddr_get_channel_info(src0, &job->rga_dma_buffer_src0,
1111 0, job->core, job->mm);
1112
1113 if (unlikely(ret < 0)) {
1114 pr_err("src0 channel viraddr get info error!\n");
1115 return ret;
1116 }
1117 }
1118 }
1119 }
1120
1121 rga_convert_addr(src0, false);
1122 }
1123
1124 /* dst channel */
1125 if (likely(dst != NULL)) {
1126 mmu_flag = ((job->rga_command_base.mmu_info.mmu_flag >> 10) & 1);
1127 if (job->dma_buf_dst != NULL) {
1128 ret = rga_dma_buf_get_channel_info(dst,
1129 &job->rga_dma_buffer_dst, mmu_flag,
1130 &job->dma_buf_dst, job->core);
1131
1132 if (unlikely(ret < 0)) {
1133 pr_err("dst channel get info error!\n");
1134 goto dst_channel_err;
1135 }
1136
1137 if (dst->yrgb_addr <= 0)
1138 job->rga_dma_buffer_dst->use_dma_buf = true;
1139 } else {
1140 dst->yrgb_addr = dst->uv_addr;
1141 rga_convert_addr(dst, true);
1142 if (job->core == RGA3_SCHEDULER_CORE0 || job->core == RGA3_SCHEDULER_CORE1) {
1143 if (dst->yrgb_addr > 0 && mmu_flag) {
1144 ret = rga_viraddr_get_channel_info(dst, &job->rga_dma_buffer_dst,
1145 1, job->core, job->mm);
1146
1147 if (unlikely(ret < 0)) {
1148 pr_err("dst channel viraddr get info error!\n");
1149 return ret;
1150 }
1151 }
1152 }
1153 }
1154
1155 rga_convert_addr(dst, false);
1156 }
1157
1158 /* src1 channel */
1159 if (src1 != NULL) {
1160 mmu_flag = ((job->rga_command_base.mmu_info.mmu_flag >> 9) & 1);
1161 if (job->dma_buf_src1 != NULL) {
1162 ret = rga_dma_buf_get_channel_info(src1,
1163 &job->rga_dma_buffer_src1, mmu_flag,
1164 &job->dma_buf_src1, job->core);
1165
1166 if (unlikely(ret < 0)) {
1167 pr_err("src1 channel get info error!\n");
1168 goto src1_channel_err;
1169 }
1170
1171 if (src1->yrgb_addr <= 0)
1172 job->rga_dma_buffer_src1->use_dma_buf = true;
1173 } else {
1174 src1->yrgb_addr = src1->uv_addr;
1175 rga_convert_addr(src1, true);
1176 if (job->core == RGA3_SCHEDULER_CORE0 || job->core == RGA3_SCHEDULER_CORE1) {
1177 if (src1->yrgb_addr > 0 && mmu_flag) {
1178 ret = rga_viraddr_get_channel_info(src1, &job->rga_dma_buffer_src1,
1179 0, job->core, job->mm);
1180
1181 if (unlikely(ret < 0)) {
1182 pr_err("src1 channel viraddr get info error!\n");
1183 return ret;
1184 }
1185 }
1186 }
1187 }
1188
1189 rga_convert_addr(src1, false);
1190 }
1191
1192 /* els channel */
1193 if (els != NULL) {
1194 mmu_flag = ((job->rga_command_base.mmu_info.mmu_flag >> 11) & 1);
1195 if (job->dma_buf_els != NULL) {
1196 ret = rga_dma_buf_get_channel_info(els,
1197 &job->rga_dma_buffer_els, mmu_flag,
1198 &job->dma_buf_els, job->core);
1199
1200 if (unlikely(ret < 0)) {
1201 pr_err("els channel get info error!\n");
1202 goto els_channel_err;
1203 }
1204
1205 if (els->yrgb_addr <= 0)
1206 job->rga_dma_buffer_els->use_dma_buf = true;
1207 } else {
1208 els->yrgb_addr = els->uv_addr;
1209 rga_convert_addr(els, true);
1210 if (job->core == RGA3_SCHEDULER_CORE0 || job->core == RGA3_SCHEDULER_CORE1) {
1211 if (els->yrgb_addr > 0 && mmu_flag) {
1212 ret = rga_viraddr_get_channel_info(els, &job->rga_dma_buffer_els,
1213 0, job->core, job->mm);
1214
1215 if (unlikely(ret < 0)) {
1216 pr_err("els channel viraddr get info error!\n");
1217 return ret;
1218 }
1219 }
1220 }
1221 }
1222
1223 rga_convert_addr(els, false);
1224 }
1225
1226 return 0;
1227
1228 els_channel_err:
1229 rga_dma_put_channel_info(&job->rga_dma_buffer_els, &job->dma_buf_els);
1230 dst_channel_err:
1231 rga_dma_put_channel_info(&job->rga_dma_buffer_dst, &job->dma_buf_dst);
1232 src1_channel_err:
1233 rga_dma_put_channel_info(&job->rga_dma_buffer_src1, &job->dma_buf_src1);
1234 src0_channel_err:
1235 rga_dma_put_channel_info(&job->rga_dma_buffer_src0, &job->dma_buf_src0);
1236
1237 return ret;
1238 }
1239
rga_dma_put_info(struct rga_job * job)1240 void rga_dma_put_info(struct rga_job *job)
1241 {
1242 rga_dma_put_channel_info(&job->rga_dma_buffer_src0, &job->dma_buf_src0);
1243 rga_viraddr_put_channel_info(&job->rga_dma_buffer_src0);
1244 rga_dma_put_channel_info(&job->rga_dma_buffer_src1, &job->dma_buf_src1);
1245 rga_viraddr_put_channel_info(&job->rga_dma_buffer_src1);
1246 rga_dma_put_channel_info(&job->rga_dma_buffer_dst, &job->dma_buf_dst);
1247 rga_viraddr_put_channel_info(&job->rga_dma_buffer_dst);
1248 rga_dma_put_channel_info(&job->rga_dma_buffer_els, &job->dma_buf_els);
1249 rga_viraddr_put_channel_info(&job->rga_dma_buffer_els);
1250 }
1251