• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) Rockchip Electronics Co., Ltd.
4  *
5  * Author: Cerf Yu <cerf.yu@rock-chips.com>
6  */
7 
8 #define pr_fmt(fmt) "rga_mm: " fmt
9 
10 #include "rga.h"
11 #include "rga_job.h"
12 #include "rga_mm.h"
13 #include "rga_dma_buf.h"
14 #include "rga_hw_config.h"
15 
is_yuv422p_format(u32 format)16 static bool is_yuv422p_format(u32 format)
17 {
18 	bool ret = false;
19 
20 	switch (format) {
21 	case RGA2_FORMAT_YCbCr_422_P:
22 	case RGA2_FORMAT_YCrCb_422_P:
23 		ret = true;
24 		break;
25 	}
26 	return ret;
27 }
28 
rga_convert_addr(struct rga_img_info_t * img)29 static void rga_convert_addr(struct rga_img_info_t *img)
30 {
31 	if (img->rd_mode != RGA_FBC_MODE) {
32 		img->uv_addr = img->yrgb_addr + (img->vir_w * img->vir_h);
33 
34 		//warning: rga3 may need /2 for all
35 		if (is_yuv422p_format(img->format))
36 			img->v_addr =
37 				img->uv_addr + (img->vir_w * img->vir_h) / 2;
38 		else
39 			img->v_addr =
40 				img->uv_addr + (img->vir_w * img->vir_h) / 4;
41 	} else {
42 		img->uv_addr = img->yrgb_addr;
43 		img->v_addr = 0;
44 	}
45 }
46 
rga_current_mm_read_lock(struct mm_struct * mm)47 static void rga_current_mm_read_lock(struct mm_struct *mm)
48 {
49 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
50 	mmap_read_lock(mm);
51 #else
52 	down_read(&mm->mmap_sem);
53 #endif
54 }
55 
rga_current_mm_read_unlock(struct mm_struct * mm)56 static void rga_current_mm_read_unlock(struct mm_struct *mm)
57 {
58 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
59 	mmap_read_unlock(mm);
60 #else
61 	up_read(&mm->mmap_sem);
62 #endif
63 }
64 
rga_get_user_pages_from_vma(struct page ** pages,unsigned long Memory,uint32_t pageCount,struct mm_struct * current_mm)65 static int rga_get_user_pages_from_vma(struct page **pages, unsigned long Memory,
66 				       uint32_t pageCount, struct mm_struct *current_mm)
67 {
68 	int ret = 0;
69 	int i;
70 	struct vm_area_struct *vma;
71 	spinlock_t *ptl;
72 	pte_t *pte;
73 	pgd_t *pgd;
74 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
75 	p4d_t *p4d;
76 #endif
77 	pud_t *pud;
78 	pmd_t *pmd;
79 	unsigned long pfn;
80 
81 	for (i = 0; i < pageCount; i++) {
82 		vma = find_vma(current_mm, (Memory + i) << PAGE_SHIFT);
83 		if (!vma) {
84 			pr_err("failed to get vma\n");
85 			ret = RGA_OUT_OF_RESOURCES;
86 			break;
87 		}
88 
89 		pgd = pgd_offset(current_mm, (Memory + i) << PAGE_SHIFT);
90 		if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) {
91 			pr_err("failed to get pgd\n");
92 			ret = RGA_OUT_OF_RESOURCES;
93 			break;
94 		}
95 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
96 		/*
97 		 * In the four-level page table,
98 		 * it will do nothing and return pgd.
99 		 */
100 		p4d = p4d_offset(pgd, (Memory + i) << PAGE_SHIFT);
101 		if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) {
102 			pr_err("failed to get p4d\n");
103 			ret = RGA_OUT_OF_RESOURCES;
104 			break;
105 		}
106 
107 		pud = pud_offset(p4d, (Memory + i) << PAGE_SHIFT);
108 #else
109 		pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);
110 #endif
111 
112 		if (pud_none(*pud) || unlikely(pud_bad(*pud))) {
113 			pr_err("failed to get pud\n");
114 			ret = RGA_OUT_OF_RESOURCES;
115 			break;
116 		}
117 		pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
118 		if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
119 			pr_err("failed to get pmd\n");
120 			ret = RGA_OUT_OF_RESOURCES;
121 			break;
122 		}
123 		pte = pte_offset_map_lock(current_mm, pmd,
124 					  (Memory + i) << PAGE_SHIFT, &ptl);
125 		if (pte_none(*pte)) {
126 			pr_err("failed to get pte\n");
127 			pte_unmap_unlock(pte, ptl);
128 			ret = RGA_OUT_OF_RESOURCES;
129 			break;
130 		}
131 
132 		pfn = pte_pfn(*pte);
133 		pages[i] = pfn_to_page(pfn);
134 		pte_unmap_unlock(pte, ptl);
135 	}
136 
137 	return ret;
138 }
139 
rga_get_user_pages(struct page ** pages,unsigned long Memory,uint32_t pageCount,int writeFlag,struct mm_struct * current_mm)140 static int rga_get_user_pages(struct page **pages, unsigned long Memory,
141 			      uint32_t pageCount, int writeFlag,
142 			      struct mm_struct *current_mm)
143 {
144 	uint32_t i;
145 	int32_t ret = 0;
146 	int32_t result;
147 
148 	rga_current_mm_read_lock(current_mm);
149 
150 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 168) && \
151     LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
152 	result = get_user_pages(current, current_mm, Memory << PAGE_SHIFT,
153 				pageCount, writeFlag ? FOLL_WRITE : 0,
154 				pages, NULL);
155 #elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
156 	result = get_user_pages(current, current_mm, Memory << PAGE_SHIFT,
157 				pageCount, writeFlag, 0, pages, NULL);
158 #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
159 	result = get_user_pages_remote(current, current_mm,
160 				       Memory << PAGE_SHIFT,
161 				       pageCount, writeFlag, pages, NULL, NULL);
162 #else
163 	result = get_user_pages_remote(current_mm, Memory << PAGE_SHIFT,
164 				       pageCount, writeFlag, pages, NULL, NULL);
165 #endif
166 
167 	if (result > 0 && result >= pageCount) {
168 		ret = result;
169 	} else {
170 		if (result > 0)
171 			for (i = 0; i < result; i++)
172 				put_page(pages[i]);
173 
174 		ret = rga_get_user_pages_from_vma(pages, Memory, pageCount, current_mm);
175 		if (ret < 0) {
176 			pr_err("Can not get user pages from vma, result = %d, pagecount = %d\n",
177 			       result, pageCount);
178 		}
179 	}
180 
181 	rga_current_mm_read_unlock(current_mm);
182 
183 	return ret;
184 }
185 
rga_free_sgt(struct rga_dma_buffer * virt_dma_buf)186 static void rga_free_sgt(struct rga_dma_buffer *virt_dma_buf)
187 {
188 	if (virt_dma_buf->sgt == NULL)
189 		return;
190 
191 	sg_free_table(virt_dma_buf->sgt);
192 	kfree(virt_dma_buf->sgt);
193 	virt_dma_buf->sgt = NULL;
194 }
195 
rga_alloc_sgt(struct rga_virt_addr * virt_addr,struct rga_dma_buffer * virt_dma_buf)196 static int rga_alloc_sgt(struct rga_virt_addr *virt_addr,
197 			 struct rga_dma_buffer *virt_dma_buf)
198 {
199 	int ret;
200 	struct sg_table *sgt = NULL;
201 
202 	sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
203 	if (sgt == NULL) {
204 		pr_err("%s alloc sgt error!\n", __func__);
205 		return -ENOMEM;
206 	}
207 
208 	/* get sg form pages. */
209 	if (sg_alloc_table_from_pages(sgt, virt_addr->pages,
210 				      virt_addr->page_count, 0,
211 				      virt_addr->size, GFP_KERNEL)) {
212 		pr_err("sg_alloc_table_from_pages failed");
213 		ret = -ENOMEM;
214 		goto out_free_sgt;
215 	}
216 
217 	virt_dma_buf->sgt = sgt;
218 	virt_dma_buf->size = virt_addr->size;
219 
220 	return 0;
221 
222 out_free_sgt:
223 	kfree(sgt);
224 
225 	return ret;
226 }
227 
rga_free_virt_addr(struct rga_virt_addr ** virt_addr_p)228 static void rga_free_virt_addr(struct rga_virt_addr **virt_addr_p)
229 {
230 	int i;
231 	struct rga_virt_addr *virt_addr = NULL;
232 
233 	if (virt_addr_p == NULL)
234 		return;
235 
236 	virt_addr = *virt_addr_p;
237 	if (virt_addr == NULL)
238 		return;
239 
240 	for (i = 0; i < virt_addr->result; i++)
241 		put_page(virt_addr->pages[i]);
242 
243 	free_pages((unsigned long)virt_addr->pages, virt_addr->pages_order);
244 	kfree(virt_addr);
245 	*virt_addr_p = NULL;
246 }
247 
rga_alloc_virt_addr(struct rga_virt_addr ** virt_addr_p,uint64_t viraddr,struct rga_memory_parm * memory_parm,int writeFlag,struct mm_struct * mm)248 static int rga_alloc_virt_addr(struct rga_virt_addr **virt_addr_p,
249 			       uint64_t viraddr,
250 			       struct rga_memory_parm *memory_parm,
251 			       int writeFlag,
252 			       struct mm_struct *mm)
253 {
254 	int i;
255 	int ret;
256 	int result = 0;
257 	int order;
258 	int format;
259 	unsigned int count;
260 	unsigned long start_addr;
261 	unsigned long size;
262 	struct page **pages = NULL;
263 	struct rga_virt_addr *virt_addr = NULL;
264 
265 	user_format_convert(&format, memory_parm->format);
266 
267 	/* Calculate page size. */
268 	count = rga_buf_size_cal(viraddr, viraddr, viraddr, format,
269 				 memory_parm->width, memory_parm->height,
270 				 &start_addr, &size);
271 
272 	/* alloc pages and page_table */
273 	order = get_order(size / 4096 * sizeof(struct page *));
274 	pages = (struct page **)__get_free_pages(GFP_KERNEL, order);
275 	if (pages == NULL) {
276 		pr_err("%s can not alloc pages for pages\n", __func__);
277 		return -ENOMEM;
278 	}
279 
280 	/* get pages from virtual address. */
281 	ret = rga_get_user_pages(pages, start_addr, count, writeFlag, mm);
282 	if (ret < 0) {
283 		pr_err("failed to get pages");
284 		ret = -EINVAL;
285 		goto out_free_pages;
286 	} else if (ret > 0) {
287 		/* For put pages */
288 		result = ret;
289 	}
290 
291 	*virt_addr_p = kzalloc(sizeof(struct rga_virt_addr), GFP_KERNEL);
292 	if (*virt_addr_p == NULL) {
293 		pr_err("%s alloc virt_addr error!\n", __func__);
294 		ret = -ENOMEM;
295 		goto out_put_and_free_pages;
296 	}
297 	virt_addr = *virt_addr_p;
298 
299 	virt_addr->addr = viraddr;
300 	virt_addr->pages = pages;
301 	virt_addr->pages_order = order;
302 	virt_addr->page_count = count;
303 	virt_addr->size = size;
304 	virt_addr->result = result;
305 
306 	return 0;
307 
308 out_put_and_free_pages:
309 	for (i = 0; i < result; i++)
310 		put_page(pages[i]);
311 out_free_pages:
312 	free_pages((unsigned long)pages, order);
313 
314 	return ret;
315 }
316 
317 /* If it is within 0~4G, return 1 (true). */
rga_mm_check_range_sgt(struct sg_table * sgt)318 static int rga_mm_check_range_sgt(struct sg_table *sgt)
319 {
320 	int i;
321 	struct scatterlist *sg;
322 	phys_addr_t s_phys = 0;
323 
324 	for_each_sgtable_sg(sgt, sg, i) {
325 		s_phys = sg_phys(sg);
326 		if ((s_phys > 0xffffffff) || (s_phys + sg->length > 0xffffffff))
327 			return 0;
328 	}
329 
330 	return 1;
331 }
332 
rga_mm_unmap_dma_buffer(struct rga_internal_buffer * internal_buffer)333 static void rga_mm_unmap_dma_buffer(struct rga_internal_buffer *internal_buffer)
334 {
335 	int i;
336 
337 	for (i = 0; i < internal_buffer->dma_buffer_size; i++)
338 		rga_dma_unmap_fd(&internal_buffer->dma_buffer[i]);
339 
340 	kfree(internal_buffer->dma_buffer);
341 	internal_buffer->dma_buffer = NULL;
342 }
343 
rga_mm_map_dma_buffer(struct rga_external_buffer * external_buffer,struct rga_internal_buffer * internal_buffer)344 static int rga_mm_map_dma_buffer(struct rga_external_buffer *external_buffer,
345 				 struct rga_internal_buffer *internal_buffer)
346 {
347 	int ret, i;
348 
349 	internal_buffer->dma_buffer_size = rga_drvdata->num_of_scheduler;
350 	internal_buffer->dma_buffer = kcalloc(internal_buffer->dma_buffer_size,
351 					      sizeof(struct rga_dma_buffer), GFP_KERNEL);
352 	if (internal_buffer->dma_buffer == NULL) {
353 		pr_err("%s alloc internal_buffer error!\n", __func__);
354 		return  -ENOMEM;
355 	}
356 
357 	for (i = 0; i < internal_buffer->dma_buffer_size; i++) {
358 		/* If the physical address is greater than 4G, there is no need to map RGA2. */
359 		if ((rga_drvdata->rga_scheduler[i]->core == RGA2_SCHEDULER_CORE0) &&
360 		    (~internal_buffer->mm_flag & RGA_MM_UNDER_4G))
361 			continue;
362 
363 		ret = rga_dma_map_fd((int)external_buffer->memory,
364 				     &internal_buffer->dma_buffer[i],
365 				     DMA_BIDIRECTIONAL,
366 				     rga_drvdata->rga_scheduler[i]->dev);
367 		if (ret < 0) {
368 			pr_err("%s core[%d] map dma buffer error!\n",
369 				__func__, rga_drvdata->rga_scheduler[0]->core);
370 			goto FREE_RGA_DMA_BUF;
371 		}
372 
373 		internal_buffer->dma_buffer[i].core = rga_drvdata->rga_scheduler[i]->core;
374 
375 		/* At first, check whether the physical address is greater than 4G. */
376 		if (i == 0)
377 			if (rga_mm_check_range_sgt(internal_buffer->dma_buffer[0].sgt))
378 				internal_buffer->mm_flag |= RGA_MM_UNDER_4G;
379 	}
380 
381 	return 0;
382 
383 FREE_RGA_DMA_BUF:
384 	rga_mm_unmap_dma_buffer(internal_buffer);
385 
386 	return ret;
387 }
388 
rga_mm_unmap_virt_addr(struct rga_internal_buffer * internal_buffer)389 static void rga_mm_unmap_virt_addr(struct rga_internal_buffer *internal_buffer)
390 {
391 	int i;
392 
393 	WARN_ON(internal_buffer->dma_buffer == NULL || internal_buffer->virt_addr == NULL);
394 
395 	for (i = 0; i < internal_buffer->dma_buffer_size; i++)
396 		if (rga_drvdata->rga_scheduler[i]->core == RGA3_SCHEDULER_CORE0 ||
397 		    rga_drvdata->rga_scheduler[i]->core == RGA3_SCHEDULER_CORE1)
398 			rga_iommu_unmap_virt_addr(&internal_buffer->dma_buffer[i]);
399 		else if (internal_buffer->dma_buffer[i].core != 0)
400 			dma_unmap_sg(rga_drvdata->rga_scheduler[i]->dev,
401 				     internal_buffer->dma_buffer[i].sgt->sgl,
402 				     internal_buffer->dma_buffer[i].sgt->orig_nents,
403 				     DMA_BIDIRECTIONAL);
404 
405 	for (i = 0; i < internal_buffer->dma_buffer_size; i++)
406 		rga_free_sgt(&internal_buffer->dma_buffer[i]);
407 	kfree(internal_buffer->dma_buffer);
408 	internal_buffer->dma_buffer_size = 0;
409 
410 	rga_free_virt_addr(&internal_buffer->virt_addr);
411 
412 	mmput(internal_buffer->current_mm);
413 	mmdrop(internal_buffer->current_mm);
414 	internal_buffer->current_mm = NULL;
415 }
416 
rga_mm_map_virt_addr(struct rga_external_buffer * external_buffer,struct rga_internal_buffer * internal_buffer)417 static int rga_mm_map_virt_addr(struct rga_external_buffer *external_buffer,
418 				struct rga_internal_buffer *internal_buffer)
419 {
420 	int i;
421 	int ret;
422 
423 	internal_buffer->current_mm = current->mm;
424 	if (internal_buffer->current_mm == NULL) {
425 		pr_err("%s, cannot get current mm!\n", __func__);
426 		return -EFAULT;
427 	}
428 	mmgrab(internal_buffer->current_mm);
429 	mmget(internal_buffer->current_mm);
430 
431 	ret = rga_alloc_virt_addr(&internal_buffer->virt_addr,
432 				  external_buffer->memory,
433 				  &internal_buffer->memory_parm,
434 				  0, internal_buffer->current_mm);
435 	if (ret < 0) {
436 		pr_err("Can not alloc rga_virt_addr from 0x%lx\n",
437 		       (unsigned long)external_buffer->memory);
438 		goto put_current_mm;
439 	}
440 
441 	internal_buffer->dma_buffer = kcalloc(rga_drvdata->num_of_scheduler,
442 					      sizeof(struct rga_dma_buffer), GFP_KERNEL);
443 	if (internal_buffer->dma_buffer == NULL) {
444 		pr_err("%s alloc internal_buffer->dma_buffer error!\n", __func__);
445 		ret = -ENOMEM;
446 		goto free_virt_addr;
447 	}
448 	internal_buffer->dma_buffer_size = rga_drvdata->num_of_scheduler;
449 
450 	for (i = 0; i < internal_buffer->dma_buffer_size; i++) {
451 		/* If the physical address is greater than 4G, there is no need to map RGA2. */
452 		if ((rga_drvdata->rga_scheduler[i]->core == RGA2_SCHEDULER_CORE0) &&
453 		    (~internal_buffer->mm_flag & RGA_MM_UNDER_4G) &&
454 		    i != 0)
455 			continue;
456 
457 		ret = rga_alloc_sgt(internal_buffer->virt_addr,
458 				    &internal_buffer->dma_buffer[i]);
459 		if (ret < 0) {
460 			pr_err("%s core[%d] alloc sgt error!\n", __func__,
461 			       rga_drvdata->rga_scheduler[0]->core);
462 			goto free_sgt_and_dma_buffer;
463 		}
464 
465 		if (i == 0)
466 			if (rga_mm_check_range_sgt(internal_buffer->dma_buffer[0].sgt))
467 				internal_buffer->mm_flag |= RGA_MM_UNDER_4G;
468 	}
469 
470 	for (i = 0; i < internal_buffer->dma_buffer_size; i++) {
471 		if ((rga_drvdata->rga_scheduler[i]->core == RGA2_SCHEDULER_CORE0) &&
472 		    (~internal_buffer->mm_flag & RGA_MM_UNDER_4G))
473 			continue;
474 
475 		if (rga_drvdata->rga_scheduler[i]->core == RGA3_SCHEDULER_CORE0 ||
476 		    rga_drvdata->rga_scheduler[i]->core == RGA3_SCHEDULER_CORE1) {
477 			ret = rga_iommu_map_virt_addr(&internal_buffer->memory_parm,
478 						      &internal_buffer->dma_buffer[i],
479 						      rga_drvdata->rga_scheduler[i]->dev,
480 						      internal_buffer->current_mm);
481 			if (ret < 0) {
482 				pr_err("%s core[%d] iommu_map virtual address error!\n",
483 				       __func__, rga_drvdata->rga_scheduler[i]->core);
484 				goto unmap_virt_addr;
485 			}
486 		} else {
487 			ret = dma_map_sg(rga_drvdata->rga_scheduler[i]->dev,
488 					 internal_buffer->dma_buffer[i].sgt->sgl,
489 					 internal_buffer->dma_buffer[i].sgt->orig_nents,
490 					 DMA_BIDIRECTIONAL);
491 			if (ret == 0) {
492 				pr_err("%s core[%d] dma_map_sgt error! va = 0x%lx, nents = %d\n",
493 				       __func__, rga_drvdata->rga_scheduler[i]->core,
494 				       (unsigned long)internal_buffer->virt_addr->addr,
495 				       internal_buffer->dma_buffer[i].sgt->orig_nents);
496 				goto unmap_virt_addr;
497 			}
498 		}
499 
500 		internal_buffer->dma_buffer[i].core = rga_drvdata->rga_scheduler[i]->core;
501 	}
502 
503 	return 0;
504 
505 unmap_virt_addr:
506 	for (i = 0; i < internal_buffer->dma_buffer_size; i++)
507 		if (rga_drvdata->rga_scheduler[i]->core == RGA3_SCHEDULER_CORE0 ||
508 		    rga_drvdata->rga_scheduler[i]->core == RGA3_SCHEDULER_CORE1)
509 			rga_iommu_unmap_virt_addr(&internal_buffer->dma_buffer[i]);
510 		else if (internal_buffer->dma_buffer[i].core != 0)
511 			dma_unmap_sg(rga_drvdata->rga_scheduler[i]->dev,
512 				     internal_buffer->dma_buffer[i].sgt->sgl,
513 				     internal_buffer->dma_buffer[i].sgt->orig_nents,
514 				     DMA_BIDIRECTIONAL);
515 free_sgt_and_dma_buffer:
516 	for (i = 0; i < internal_buffer->dma_buffer_size; i++)
517 		rga_free_sgt(&internal_buffer->dma_buffer[i]);
518 	kfree(internal_buffer->dma_buffer);
519 free_virt_addr:
520 	rga_free_virt_addr(&internal_buffer->virt_addr);
521 put_current_mm:
522 	mmput(internal_buffer->current_mm);
523 	mmdrop(internal_buffer->current_mm);
524 	internal_buffer->current_mm = NULL;
525 
526 	return ret;
527 }
528 
rga_mm_unmap_buffer(struct rga_internal_buffer * internal_buffer)529 static int rga_mm_unmap_buffer(struct rga_internal_buffer *internal_buffer)
530 {
531 	switch (internal_buffer->type) {
532 	case RGA_DMA_BUFFER:
533 		rga_mm_unmap_dma_buffer(internal_buffer);
534 		break;
535 	case RGA_VIRTUAL_ADDRESS:
536 		rga_mm_unmap_virt_addr(internal_buffer);
537 		break;
538 	case RGA_PHYSICAL_ADDRESS:
539 		internal_buffer->phys_addr = 0;
540 		break;
541 	default:
542 		pr_err("Illegal external buffer!\n");
543 		return -EFAULT;
544 	}
545 
546 	return 0;
547 }
548 
rga_mm_map_buffer(struct rga_external_buffer * external_buffer,struct rga_internal_buffer * internal_buffer)549 static int rga_mm_map_buffer(struct rga_external_buffer *external_buffer,
550 	struct rga_internal_buffer *internal_buffer)
551 {
552 	int ret;
553 
554 	memcpy(&internal_buffer->memory_parm, &external_buffer->memory_parm,
555 	       sizeof(internal_buffer->memory_parm));
556 
557 	switch (external_buffer->type) {
558 	case RGA_DMA_BUFFER:
559 		internal_buffer->type = RGA_DMA_BUFFER;
560 
561 		ret = rga_mm_map_dma_buffer(external_buffer, internal_buffer);
562 		if (ret < 0) {
563 			pr_err("%s map dma_buf error!\n", __func__);
564 			return ret;
565 		}
566 
567 		internal_buffer->mm_flag |= RGA_MM_NEED_USE_IOMMU;
568 		break;
569 	case RGA_VIRTUAL_ADDRESS:
570 		internal_buffer->type = RGA_VIRTUAL_ADDRESS;
571 
572 		ret = rga_mm_map_virt_addr(external_buffer, internal_buffer);
573 		if (ret < 0) {
574 			pr_err("%s iommu_map virtual address error!\n", __func__);
575 			return ret;
576 		}
577 
578 		internal_buffer->mm_flag |= RGA_MM_NEED_USE_IOMMU;
579 		break;
580 	case RGA_PHYSICAL_ADDRESS:
581 		internal_buffer->type = RGA_PHYSICAL_ADDRESS;
582 
583 		internal_buffer->phys_addr = external_buffer->memory;
584 		break;
585 	default:
586 		pr_err("Illegal external buffer!\n");
587 		return -EFAULT;
588 	}
589 
590 	return 0;
591 }
592 
rga_mm_kref_release_buffer(struct kref * ref)593 static void rga_mm_kref_release_buffer(struct kref *ref)
594 {
595 	struct rga_internal_buffer *internal_buffer;
596 
597 	internal_buffer = container_of(ref, struct rga_internal_buffer, refcount);
598 	rga_mm_unmap_buffer(internal_buffer);
599 
600 	idr_remove(&rga_drvdata->mm->memory_idr, internal_buffer->handle);
601 	kfree(internal_buffer);
602 	rga_drvdata->mm->buffer_count--;
603 }
604 
605 /*
606  * Called at driver close to release the memory's handle references.
607  */
rga_mm_handle_remove(int id,void * ptr,void * data)608 static int rga_mm_handle_remove(int id, void *ptr, void *data)
609 {
610 	struct rga_internal_buffer *internal_buffer = ptr;
611 
612 	rga_mm_kref_release_buffer(&internal_buffer->refcount);
613 
614 	return 0;
615 }
616 
617 static struct rga_internal_buffer *
rga_mm_lookup_external(struct rga_mm * mm_session,struct rga_external_buffer * external_buffer)618 rga_mm_lookup_external(struct rga_mm *mm_session,
619 		       struct rga_external_buffer *external_buffer)
620 {
621 	int id;
622 	struct dma_buf *dma_buf = NULL;
623 	struct rga_internal_buffer *temp_buffer = NULL;
624 	struct rga_internal_buffer *output_buffer = NULL;
625 
626 	WARN_ON(!mutex_is_locked(&mm_session->lock));
627 
628 	switch (external_buffer->type) {
629 	case RGA_DMA_BUFFER:
630 		dma_buf = dma_buf_get((int)external_buffer->memory);
631 		if (IS_ERR(dma_buf))
632 			return (struct rga_internal_buffer *)dma_buf;
633 
634 		idr_for_each_entry(&mm_session->memory_idr, temp_buffer, id) {
635 			if (temp_buffer->dma_buffer == NULL)
636 				continue;
637 
638 			if (temp_buffer->dma_buffer[0].dma_buf == dma_buf) {
639 				output_buffer = temp_buffer;
640 				break;
641 			}
642 		}
643 
644 		dma_buf_put(dma_buf);
645 		break;
646 	case RGA_VIRTUAL_ADDRESS:
647 		idr_for_each_entry(&mm_session->memory_idr, temp_buffer, id) {
648 			if (temp_buffer->virt_addr == NULL)
649 				continue;
650 
651 			if (temp_buffer->virt_addr->addr == external_buffer->memory) {
652 				output_buffer = temp_buffer;
653 				break;
654 			}
655 		}
656 
657 		break;
658 	case RGA_PHYSICAL_ADDRESS:
659 		idr_for_each_entry(&mm_session->memory_idr, temp_buffer, id) {
660 			if (temp_buffer->phys_addr == external_buffer->memory) {
661 				output_buffer = temp_buffer;
662 				break;
663 			}
664 		}
665 
666 		break;
667 	default:
668 		pr_err("Illegal external buffer!\n");
669 		return NULL;
670 	}
671 
672 	return output_buffer;
673 }
674 
rga_mm_lookup_handle(struct rga_mm * mm_session,uint32_t handle)675 struct rga_internal_buffer *rga_mm_lookup_handle(struct rga_mm *mm_session, uint32_t handle)
676 {
677 	struct rga_internal_buffer *output_buffer;
678 
679 	WARN_ON(!mutex_is_locked(&mm_session->lock));
680 
681 	output_buffer = idr_find(&mm_session->memory_idr, handle);
682 
683 	return output_buffer;
684 }
685 
rga_mm_lookup_flag(struct rga_mm * mm_session,uint64_t handle)686 int rga_mm_lookup_flag(struct rga_mm *mm_session, uint64_t handle)
687 {
688 	struct rga_internal_buffer *output_buffer;
689 
690 	output_buffer = rga_mm_lookup_handle(mm_session, handle);
691 	if (output_buffer == NULL) {
692 		pr_err("This handle[%ld] is illegal.\n", (unsigned long)handle);
693 		return -EINVAL;
694 	}
695 
696 	return output_buffer->mm_flag;
697 }
698 
rga_mm_lookup_iova(struct rga_internal_buffer * buffer,int core)699 dma_addr_t rga_mm_lookup_iova(struct rga_internal_buffer *buffer, int core)
700 {
701 	int i;
702 
703 	for (i = 0; i < buffer->dma_buffer_size; i++)
704 		if (buffer->dma_buffer[i].core == core)
705 			return buffer->dma_buffer[i].iova;
706 
707 	return 0;
708 }
709 
rga_mm_lookup_sgt(struct rga_internal_buffer * buffer,int core)710 struct sg_table *rga_mm_lookup_sgt(struct rga_internal_buffer *buffer, int core)
711 {
712 	int i;
713 
714 	for (i = 0; i < buffer->dma_buffer_size; i++)
715 		if (buffer->dma_buffer[i].core == core)
716 			return buffer->dma_buffer[i].sgt;
717 
718 	return NULL;
719 }
720 
rga_mm_dump_info(struct rga_mm * mm_session)721 void rga_mm_dump_info(struct rga_mm *mm_session)
722 {
723 	int id, i;
724 	struct rga_internal_buffer *dump_buffer;
725 
726 	WARN_ON(!mutex_is_locked(&mm_session->lock));
727 
728 	pr_info("rga mm info:\n");
729 
730 	pr_info("buffer count = %d\n", mm_session->buffer_count);
731 	pr_info("===============================================================\n");
732 
733 	idr_for_each_entry(&mm_session->memory_idr, dump_buffer, id) {
734 		pr_info("handle = %d	refcount = %d	mm_flag = 0x%x\n",
735 			dump_buffer->handle, kref_read(&dump_buffer->refcount),
736 			dump_buffer->mm_flag);
737 
738 		switch (dump_buffer->type) {
739 		case RGA_DMA_BUFFER:
740 			pr_info("dma_buffer:\n");
741 			for (i = 0; i < dump_buffer->dma_buffer_size; i++) {
742 				pr_info("\t core %d:\n", dump_buffer->dma_buffer[i].core);
743 				pr_info("\t\t dma_buf = %p, iova = 0x%lx\n",
744 					dump_buffer->dma_buffer[i].dma_buf,
745 					(unsigned long)dump_buffer->dma_buffer[i].iova);
746 			}
747 			break;
748 		case RGA_VIRTUAL_ADDRESS:
749 			pr_info("virtual address:\n");
750 			pr_info("\t va = 0x%lx, pages = %p, size = %ld\n",
751 				(unsigned long)dump_buffer->virt_addr->addr,
752 				dump_buffer->virt_addr->pages,
753 				dump_buffer->virt_addr->size);
754 
755 			for (i = 0; i < dump_buffer->dma_buffer_size; i++) {
756 				pr_info("\t core %d:\n", dump_buffer->dma_buffer[i].core);
757 				pr_info("\t\t iova = 0x%lx, sgt = %p, size = %ld\n",
758 					(unsigned long)dump_buffer->dma_buffer[i].iova,
759 					dump_buffer->dma_buffer[i].sgt,
760 					dump_buffer->dma_buffer[i].size);
761 			}
762 			break;
763 		case RGA_PHYSICAL_ADDRESS:
764 			pr_info("physical address:\n");
765 			pr_info("\t pa = 0x%lx\n", (unsigned long)dump_buffer->phys_addr);
766 			break;
767 		default:
768 			pr_err("Illegal external buffer!\n");
769 			break;
770 		}
771 
772 		pr_info("---------------------------------------------------------------\n");
773 	}
774 }
775 
rga_mm_set_mmu_flag(struct rga_job * job)776 static int rga_mm_set_mmu_flag(struct rga_job *job)
777 {
778 	struct rga_mmu_t *mmu_info;
779 	int src_mmu_en;
780 	int src1_mmu_en;
781 	int dst_mmu_en;
782 	int els_mmu_en;
783 
784 	src_mmu_en = job->src_buffer ? job->src_buffer->mm_flag & RGA_MM_NEED_USE_IOMMU : 0;
785 	src1_mmu_en = job->src1_buffer ? job->src1_buffer->mm_flag & RGA_MM_NEED_USE_IOMMU : 0;
786 	dst_mmu_en = job->dst_buffer ? job->dst_buffer->mm_flag & RGA_MM_NEED_USE_IOMMU : 0;
787 	els_mmu_en = job->els_buffer ? job->els_buffer->mm_flag & RGA_MM_NEED_USE_IOMMU : 0;
788 
789 	mmu_info = &job->rga_command_base.mmu_info;
790 	if (src_mmu_en)
791 		mmu_info->mmu_flag |= (0x1 << 8);
792 	if (src1_mmu_en)
793 		mmu_info->mmu_flag |= (0x1 << 9);
794 	if (dst_mmu_en)
795 		mmu_info->mmu_flag |= (0x1 << 10);
796 	if (els_mmu_en)
797 		mmu_info->mmu_flag |= (0x1 << 11);
798 
799 	if (mmu_info->mmu_flag & (0xf << 8)) {
800 		mmu_info->mmu_flag |= 1;
801 		mmu_info->mmu_flag |= 1 << 31;
802 		mmu_info->mmu_en  = 1;
803 	}
804 
805 	return 0;
806 }
807 
rga_mm_sync_dma_sg_for_device(struct rga_internal_buffer * buffer,int core,enum dma_data_direction dir)808 static int rga_mm_sync_dma_sg_for_device(struct rga_internal_buffer *buffer,
809 					 int core,
810 					 enum dma_data_direction dir)
811 {
812 	struct sg_table *sgt;
813 	struct rga_scheduler_t *scheduler;
814 
815 	scheduler = rga_job_get_scheduler(core);
816 	if (scheduler == NULL) {
817 		pr_err("%s(%d), failed to get scheduler, core = 0x%x\n",
818 		       __func__, __LINE__, core);
819 		return -EFAULT;
820 	}
821 
822 	sgt = rga_mm_lookup_sgt(buffer, core);
823 	if (sgt == NULL) {
824 		pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
825 		       __func__, __LINE__, core);
826 		return -EINVAL;
827 	}
828 
829 	dma_sync_sg_for_device(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
830 
831 	return 0;
832 }
833 
rga_mm_sync_dma_sg_for_cpu(struct rga_internal_buffer * buffer,int core,enum dma_data_direction dir)834 static int rga_mm_sync_dma_sg_for_cpu(struct rga_internal_buffer *buffer,
835 				      int core,
836 				      enum dma_data_direction dir)
837 {
838 	struct sg_table *sgt;
839 	struct rga_scheduler_t *scheduler;
840 
841 	scheduler = rga_job_get_scheduler(core);
842 	if (scheduler == NULL) {
843 		pr_err("%s(%d), failed to get scheduler, core = 0x%x\n",
844 		       __func__, __LINE__, core);
845 		return -EFAULT;
846 	}
847 
848 	sgt = rga_mm_lookup_sgt(buffer, core);
849 	if (sgt == NULL) {
850 		pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
851 		       __func__, __LINE__, core);
852 		return -EINVAL;
853 	}
854 
855 	dma_sync_sg_for_cpu(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
856 
857 	return 0;
858 }
859 
rga_mm_get_channel_handle_info(struct rga_mm * mm,struct rga_job * job,struct rga_img_info_t * img,struct rga_internal_buffer ** buf,enum dma_data_direction dir)860 static int rga_mm_get_channel_handle_info(struct rga_mm *mm,
861 					  struct rga_job *job,
862 					  struct rga_img_info_t *img,
863 					  struct rga_internal_buffer **buf,
864 					  enum dma_data_direction dir)
865 {
866 	int ret = 0;
867 	struct rga_internal_buffer *internal_buffer = NULL;
868 
869 	if (!(img->yrgb_addr > 0)) {
870 		pr_err("No buffer handle can be used!\n");
871 		return -EFAULT;
872 	}
873 
874 	mutex_lock(&mm->lock);
875 	*buf = rga_mm_lookup_handle(mm, img->yrgb_addr);
876 	if (*buf == NULL) {
877 		pr_err("This handle[%ld] is illegal.\n", (unsigned long)img->yrgb_addr);
878 
879 		ret = -EFAULT;
880 		goto unlock_mm_and_return;
881 	}
882 
883 	internal_buffer = *buf;
884 	kref_get(&internal_buffer->refcount);
885 
886 	switch (internal_buffer->type) {
887 	case RGA_DMA_BUFFER:
888 		if (job->core == RGA3_SCHEDULER_CORE0 ||
889 		    job->core == RGA3_SCHEDULER_CORE1) {
890 			img->yrgb_addr = rga_mm_lookup_iova(internal_buffer, job->core);
891 			if (img->yrgb_addr == 0) {
892 				pr_err("lookup dma_buf iova error!\n");
893 
894 				ret = -EINVAL;
895 				goto unlock_mm_and_return;
896 			}
897 		} else {
898 			img->yrgb_addr = 0;
899 		}
900 
901 		break;
902 	case RGA_VIRTUAL_ADDRESS:
903 		if (job->core == RGA3_SCHEDULER_CORE0 ||
904 		    job->core == RGA3_SCHEDULER_CORE1) {
905 			img->yrgb_addr = rga_mm_lookup_iova(internal_buffer, job->core);
906 			if (img->yrgb_addr == 0) {
907 				pr_err("lookup virt_addr iova error!\n");
908 
909 				ret = -EINVAL;
910 				goto unlock_mm_and_return;
911 			}
912 		} else {
913 			img->yrgb_addr = internal_buffer->virt_addr->addr;
914 		}
915 
916 		/*
917 		 * Some userspace virtual addresses do not have an
918 		 * interface for flushing the cache, so it is mandatory
919 		 * to flush the cache when the virtual address is used.
920 		 */
921 		ret = rga_mm_sync_dma_sg_for_device(internal_buffer, job->core, dir);
922 		if (ret < 0) {
923 			pr_err("sync sgt for device error!\n");
924 			goto unlock_mm_and_return;
925 		}
926 
927 		break;
928 	case RGA_PHYSICAL_ADDRESS:
929 		img->yrgb_addr = internal_buffer->phys_addr;
930 		break;
931 	default:
932 		pr_err("Illegal external buffer!\n");
933 
934 		ret = -EFAULT;
935 		goto unlock_mm_and_return;
936 	}
937 	mutex_unlock(&mm->lock);
938 
939 	rga_convert_addr(img);
940 
941 	return 0;
942 unlock_mm_and_return:
943 	mutex_unlock(&mm->lock);
944 	return ret;
945 }
946 
rga_mm_put_channel_handle_info(struct rga_mm * mm,struct rga_internal_buffer * internal_buffer,int core,enum dma_data_direction dir)947 static void rga_mm_put_channel_handle_info(struct rga_mm *mm,
948 					   struct rga_internal_buffer *internal_buffer,
949 					   int core,
950 					   enum dma_data_direction dir)
951 {
952 	int ret;
953 
954 	if (internal_buffer->type == RGA_VIRTUAL_ADDRESS && dir != DMA_NONE) {
955 		ret = rga_mm_sync_dma_sg_for_cpu(internal_buffer, core, dir);
956 		if (ret < 0) {
957 			pr_err("sync sgt for cpu error!\n");
958 			goto put_internal_buffer;
959 		}
960 	}
961 
962 put_internal_buffer:
963 	mutex_lock(&mm->lock);
964 
965 	kref_put(&internal_buffer->refcount, rga_mm_kref_release_buffer);
966 
967 	mutex_unlock(&mm->lock);
968 }
969 
rga_mm_get_handle_info(struct rga_job * job)970 int rga_mm_get_handle_info(struct rga_job *job)
971 {
972 	int ret = 0;
973 	struct rga_req *req = NULL;
974 	struct rga_mm *mm = NULL;
975 	enum dma_data_direction dir;
976 
977 	req = &job->rga_command_base;
978 	mm = rga_drvdata->mm;
979 
980 	if (likely(req->src.yrgb_addr > 0)) {
981 		ret = rga_mm_get_channel_handle_info(mm, job, &req->src,
982 						     &job->src_buffer,
983 						     DMA_TO_DEVICE);
984 		if (ret < 0) {
985 			pr_err("Can't get src buffer info!\n");
986 			return ret;
987 		}
988 	}
989 
990 	if (likely(req->dst.yrgb_addr > 0)) {
991 		ret = rga_mm_get_channel_handle_info(mm, job, &req->dst,
992 						     &job->dst_buffer,
993 						     DMA_TO_DEVICE);
994 		if (ret < 0) {
995 			pr_err("Can't get dst buffer info!\n");
996 			return ret;
997 		}
998 	}
999 
1000 	if (likely(req->pat.yrgb_addr > 0)) {
1001 
1002 		if (job->rga_command_base.render_mode != UPDATE_PALETTE_TABLE_MODE) {
1003 			if (job->rga_command_base.bsfilter_flag)
1004 				dir = DMA_BIDIRECTIONAL;
1005 			else
1006 				dir = DMA_TO_DEVICE;
1007 
1008 			ret = rga_mm_get_channel_handle_info(mm, job, &req->pat,
1009 							     &job->src1_buffer,
1010 							     dir);
1011 		} else {
1012 			ret = rga_mm_get_channel_handle_info(mm, job, &req->pat,
1013 							     &job->els_buffer,
1014 							     DMA_BIDIRECTIONAL);
1015 		}
1016 		if (ret < 0) {
1017 			pr_err("Can't get pat buffer info!\n");
1018 			return ret;
1019 		}
1020 	}
1021 
1022 	rga_mm_set_mmu_flag(job);
1023 
1024 	return 0;
1025 }
1026 
rga_mm_put_handle_info(struct rga_job * job)1027 void rga_mm_put_handle_info(struct rga_job *job)
1028 {
1029 	struct rga_mm *mm = NULL;
1030 
1031 	mm = rga_drvdata->mm;
1032 
1033 	if (job->src_buffer)
1034 		rga_mm_put_channel_handle_info(mm, job->src_buffer, job->core, DMA_NONE);
1035 	if (job->dst_buffer)
1036 		rga_mm_put_channel_handle_info(mm, job->dst_buffer, job->core, DMA_FROM_DEVICE);
1037 	if (job->src1_buffer)
1038 		rga_mm_put_channel_handle_info(mm, job->src1_buffer, job->core, DMA_NONE);
1039 	if (job->els_buffer)
1040 		rga_mm_put_channel_handle_info(mm, job->els_buffer, job->core, DMA_NONE);
1041 }
1042 
rga_mm_import_buffer(struct rga_external_buffer * external_buffer)1043 uint32_t rga_mm_import_buffer(struct rga_external_buffer *external_buffer)
1044 {
1045 	int ret = 0;
1046 	struct rga_mm *mm;
1047 	struct rga_internal_buffer *internal_buffer;
1048 
1049 	mm = rga_drvdata->mm;
1050 	if (mm == NULL) {
1051 		pr_err("rga mm is null!\n");
1052 		return -EFAULT;
1053 	}
1054 
1055 	mutex_lock(&mm->lock);
1056 
1057 	/* first, Check whether to rga_mm */
1058 	internal_buffer = rga_mm_lookup_external(mm, external_buffer);
1059 	if (!IS_ERR_OR_NULL(internal_buffer)) {
1060 		kref_get(&internal_buffer->refcount);
1061 
1062 		mutex_unlock(&mm->lock);
1063 		return internal_buffer->handle;
1064 	}
1065 
1066 	/* finally, map and cached external_buffer in rga_mm */
1067 	internal_buffer = kzalloc(sizeof(struct rga_internal_buffer), GFP_KERNEL);
1068 	if (internal_buffer == NULL) {
1069 		pr_err("%s alloc internal_buffer error!\n", __func__);
1070 
1071 		mutex_unlock(&mm->lock);
1072 		return -ENOMEM;
1073 	}
1074 
1075 	ret = rga_mm_map_buffer(external_buffer, internal_buffer);
1076 	if (ret < 0)
1077 		goto FREE_INTERNAL_BUFFER;
1078 
1079 	kref_init(&internal_buffer->refcount);
1080 
1081 	/*
1082 	 * Get the user-visible handle using idr. Preload and perform
1083 	 * allocation under our spinlock.
1084 	 */
1085 	idr_preload(GFP_KERNEL);
1086 	internal_buffer->handle = idr_alloc(&mm->memory_idr, internal_buffer, 1, 0, GFP_KERNEL);
1087 	idr_preload_end();
1088 
1089 	mm->buffer_count++;
1090 
1091 	mutex_unlock(&mm->lock);
1092 	return internal_buffer->handle;
1093 
1094 FREE_INTERNAL_BUFFER:
1095 	mutex_unlock(&mm->lock);
1096 	kfree(internal_buffer);
1097 
1098 	return ret;
1099 }
1100 
rga_mm_release_buffer(uint32_t handle)1101 int rga_mm_release_buffer(uint32_t handle)
1102 {
1103 	struct rga_mm *mm;
1104 	struct rga_internal_buffer *internal_buffer;
1105 
1106 	mm = rga_drvdata->mm;
1107 	if (mm == NULL) {
1108 		pr_err("rga mm is null!\n");
1109 		return -EFAULT;
1110 	}
1111 
1112 	mutex_lock(&mm->lock);
1113 
1114 	/* Find the buffer that has been imported */
1115 	internal_buffer = rga_mm_lookup_handle(mm, handle);
1116 	if (IS_ERR_OR_NULL(internal_buffer)) {
1117 		pr_err("This is not a buffer that has been imported, handle = %d\n", (int)handle);
1118 
1119 		mutex_unlock(&mm->lock);
1120 		return -ENOENT;
1121 	}
1122 
1123 	kref_put(&internal_buffer->refcount, rga_mm_kref_release_buffer);
1124 
1125 	mutex_unlock(&mm->lock);
1126 	return 0;
1127 }
1128 
rga_mm_init(struct rga_mm ** mm_session)1129 int rga_mm_init(struct rga_mm **mm_session)
1130 {
1131 	struct rga_mm *mm = NULL;
1132 
1133 	*mm_session = kzalloc(sizeof(struct rga_mm), GFP_KERNEL);
1134 	if (*mm_session == NULL) {
1135 		pr_err("can not kzalloc for rga buffer mm_session\n");
1136 		return -ENOMEM;
1137 	}
1138 
1139 	mm = *mm_session;
1140 
1141 	mutex_init(&mm->lock);
1142 	idr_init_base(&mm->memory_idr, 1);
1143 
1144 	return 0;
1145 }
1146 
rga_mm_remove(struct rga_mm ** mm_session)1147 int rga_mm_remove(struct rga_mm **mm_session)
1148 {
1149 	struct rga_mm *mm = *mm_session;
1150 
1151 	mutex_lock(&mm->lock);
1152 
1153 	idr_for_each(&mm->memory_idr, &rga_mm_handle_remove, mm);
1154 	idr_destroy(&mm->memory_idr);
1155 
1156 	mutex_unlock(&mm->lock);
1157 
1158 	kfree(*mm_session);
1159 	*mm_session = NULL;
1160 
1161 	return 0;
1162 }
1163