• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) Rockchip Electronics Co., Ltd.
4  *
5  * Author: Huang Lee <Putin.li@rock-chips.com>
6  */
7 
8 #define pr_fmt(fmt) "rga: " fmt
9 
10 #include "rga2_reg_info.h"
11 #include "rga3_reg_info.h"
12 #include "rga_dma_buf.h"
13 #include "rga_mm.h"
14 
15 #include "rga_job.h"
16 #include "rga_fence.h"
17 #include "rga_hw_config.h"
18 
19 #include "rga2_mmu_info.h"
20 #include "rga_debugger.h"
21 
22 struct rga2_mmu_info_t rga2_mmu_info;
23 
24 struct rga_drvdata_t *rga_drvdata;
25 
26 /* set hrtimer */
27 static struct hrtimer timer;
28 static ktime_t kt;
29 
30 static const struct rga_backend_ops rga3_ops = {
31 	.get_version = rga3_get_version,
32 	.set_reg = rga3_set_reg,
33 	.init_reg = rga3_init_reg,
34 	.soft_reset = rga3_soft_reset
35 };
36 
37 static const struct rga_backend_ops rga2_ops = {
38 	.get_version = rga2_get_version,
39 	.set_reg = rga2_set_reg,
40 	.init_reg = rga2_init_reg,
41 	.soft_reset = rga2_soft_reset
42 };
43 
rga_mpi_commit(struct rga_req * cmd,struct rga_mpi_job_t * mpi_job)44 int rga_mpi_commit(struct rga_req *cmd, struct rga_mpi_job_t *mpi_job)
45 {
46 	int ret;
47 
48 	if (DEBUGGER_EN(MSG))
49 		rga_cmd_print_debug_info(cmd);
50 
51 	ret = rga_job_mpi_commit(cmd, mpi_job, RGA_BLIT_SYNC);
52 	if (ret < 0) {
53 		if (ret == -ERESTARTSYS) {
54 			if (DEBUGGER_EN(MSG))
55 				pr_err("%s, commit mpi job failed, by a software interrupt.\n",
56 				       __func__);
57 		} else {
58 			pr_err("%s, commit mpi job failed\n", __func__);
59 		}
60 
61 		return ret;
62 	}
63 
64 	return ret;
65 }
66 EXPORT_SYMBOL_GPL(rga_mpi_commit);
67 
rga_kernel_commit(struct rga_req * cmd)68 int rga_kernel_commit(struct rga_req *cmd)
69 {
70 	int ret;
71 
72 	if (DEBUGGER_EN(MSG))
73 		rga_cmd_print_debug_info(cmd);
74 
75 	ret = rga_job_commit(cmd, RGA_BLIT_SYNC);
76 	if (ret < 0) {
77 		if (ret == -ERESTARTSYS) {
78 			if (DEBUGGER_EN(MSG))
79 				pr_err("%s, commit kernel job failed, by a software interrupt.\n",
80 				       __func__);
81 		} else {
82 			pr_err("%s, commit kernel job failed\n", __func__);
83 		}
84 
85 		return ret;
86 	}
87 
88 	return ret;
89 }
90 EXPORT_SYMBOL_GPL(rga_kernel_commit);
91 
hrtimer_handler(struct hrtimer * timer)92 static enum hrtimer_restart hrtimer_handler(struct hrtimer *timer)
93 {
94 	struct rga_drvdata_t *rga = rga_drvdata;
95 	struct rga_scheduler_t *scheduler = NULL;
96 	struct rga_job *job = NULL;
97 	unsigned long flags;
98 	int i;
99 
100 	ktime_t now = ktime_get();
101 
102 	for (i = 0; i < rga->num_of_scheduler; i++) {
103 		scheduler = rga->rga_scheduler[i];
104 
105 		spin_lock_irqsave(&scheduler->irq_lock, flags);
106 
107 		/* if timer action on job running */
108 		job = scheduler->running_job;
109 		if (job) {
110 			scheduler->timer.busy_time += ktime_us_delta(now, job->timestamp);
111 			job->timestamp = now;
112 		}
113 
114 		scheduler->timer.busy_time_record = scheduler->timer.busy_time;
115 		scheduler->timer.busy_time = 0;
116 
117 		spin_unlock_irqrestore(&scheduler->irq_lock, flags);
118 	}
119 
120 	hrtimer_forward_now(timer, kt);
121 	return HRTIMER_RESTART;
122 }
123 
rga_init_timer(void)124 static void rga_init_timer(void)
125 {
126 	kt = ktime_set(0, RGA_LOAD_INTERVAL);
127 	hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
128 	hrtimer_start(&timer, kt, HRTIMER_MODE_REL);
129 	timer.function = hrtimer_handler;
130 }
131 
rga_cancel_timer(void)132 static void rga_cancel_timer(void)
133 {
134 	hrtimer_cancel(&timer);
135 }
136 
137 #ifndef CONFIG_ROCKCHIP_FPGA
rga_power_enable(struct rga_scheduler_t * rga_scheduler)138 int rga_power_enable(struct rga_scheduler_t *rga_scheduler)
139 {
140 	int ret = -EINVAL;
141 	int i;
142 
143 	pm_runtime_get_sync(rga_scheduler->dev);
144 	pm_stay_awake(rga_scheduler->dev);
145 
146 	for (i = 0; i < rga_scheduler->num_clks; i++) {
147 		if (!IS_ERR(rga_scheduler->clks[i])) {
148 			ret = clk_prepare_enable(rga_scheduler->clks[i]);
149 			if (ret < 0)
150 				goto err_enable_clk;
151 		}
152 	}
153 
154 	return 0;
155 
156 err_enable_clk:
157 	for (--i; i >= 0; --i)
158 		if (!IS_ERR(rga_scheduler->clks[i]))
159 			clk_disable_unprepare(rga_scheduler->clks[i]);
160 
161 	pm_relax(rga_scheduler->dev);
162 	pm_runtime_put_sync_suspend(rga_scheduler->dev);
163 
164 	rga_scheduler->pd_refcount++;
165 
166 	return ret;
167 }
168 
rga_power_disable(struct rga_scheduler_t * rga_scheduler)169 int rga_power_disable(struct rga_scheduler_t *rga_scheduler)
170 {
171 	int i;
172 
173 	for (i = rga_scheduler->num_clks - 1; i >= 0; i--)
174 		if (!IS_ERR(rga_scheduler->clks[i]))
175 			clk_disable_unprepare(rga_scheduler->clks[i]);
176 
177 	pm_relax(rga_scheduler->dev);
178 	pm_runtime_put_sync_suspend(rga_scheduler->dev);
179 
180 	rga_scheduler->pd_refcount--;
181 
182 	return 0;
183 }
184 
185 #endif //CONFIG_ROCKCHIP_FPGA
186 
rga_ioctl_import_buffer(unsigned long arg)187 static long rga_ioctl_import_buffer(unsigned long arg)
188 {
189 	int i;
190 	int ret = 0;
191 	struct rga_buffer_pool buffer_pool;
192 	struct rga_external_buffer *external_buffer = NULL;
193 
194 	if (unlikely(copy_from_user(&buffer_pool,
195 				    (struct rga_buffer_pool *)arg,
196 				    sizeof(buffer_pool)))) {
197 		pr_err("rga_buffer_pool copy_from_user failed!\n");
198 		return -EFAULT;
199 	}
200 
201 	if (buffer_pool.size > RGA_BUFFER_POOL_SIZE_MAX) {
202 		pr_err("Cannot import more than %d buffers at a time!\n",
203 		       RGA_BUFFER_POOL_SIZE_MAX);
204 		return -EFBIG;
205 	}
206 
207 	if (buffer_pool.buffers == NULL) {
208 		pr_err("Import buffers is NULL!\n");
209 		return -EFAULT;
210 	}
211 
212 	external_buffer = kmalloc(sizeof(struct rga_external_buffer) * buffer_pool.size,
213 				  GFP_KERNEL);
214 	if (external_buffer == NULL) {
215 		pr_err("external buffer list alloc error!\n");
216 		return -ENOMEM;
217 	}
218 
219 	if (unlikely(copy_from_user(external_buffer, buffer_pool.buffers,
220 				    sizeof(struct rga_external_buffer) * buffer_pool.size))) {
221 		pr_err("rga_buffer_pool external_buffer list copy_from_user failed\n");
222 		ret = -EFAULT;
223 
224 		goto err_free_external_buffer;
225 	}
226 
227 	for (i = 0; i < buffer_pool.size; i++) {
228 		ret = rga_mm_import_buffer(&external_buffer[i]);
229 		if (ret < 0) {
230 			pr_err("buffer[%d] mm import buffer failed!\n", i);
231 
232 			goto err_free_external_buffer;
233 		}
234 
235 		external_buffer[i].handle = ret;
236 	}
237 
238 	if (unlikely(copy_to_user(buffer_pool.buffers, external_buffer,
239 				  sizeof(struct rga_external_buffer) * buffer_pool.size))) {
240 		pr_err("rga_buffer_pool external_buffer list copy_to_user failed\n");
241 		ret = -EFAULT;
242 
243 		goto err_free_external_buffer;
244 	}
245 
246 err_free_external_buffer:
247 	kfree(external_buffer);
248 	return ret;
249 }
250 
rga_ioctl_release_buffer(unsigned long arg)251 static long rga_ioctl_release_buffer(unsigned long arg)
252 {
253 	int i;
254 	int ret = 0;
255 	struct rga_buffer_pool buffer_pool;
256 	struct rga_external_buffer *external_buffer = NULL;
257 
258 	if (unlikely(copy_from_user(&buffer_pool,
259 				    (struct rga_buffer_pool *)arg,
260 				    sizeof(buffer_pool)))) {
261 		pr_err("rga_buffer_pool  copy_from_user failed!\n");
262 		return -EFAULT;
263 	}
264 
265 	if (buffer_pool.size > RGA_BUFFER_POOL_SIZE_MAX) {
266 		pr_err("Cannot release more than %d buffers at a time!\n",
267 		       RGA_BUFFER_POOL_SIZE_MAX);
268 		return -EFBIG;
269 	}
270 
271 	if (buffer_pool.buffers == NULL) {
272 		pr_err("Release buffers is NULL!\n");
273 		return -EFAULT;
274 	}
275 
276 	external_buffer = kmalloc(sizeof(struct rga_external_buffer) * buffer_pool.size,
277 				  GFP_KERNEL);
278 	if (external_buffer == NULL) {
279 		pr_err("external buffer list alloc error!\n");
280 		return -ENOMEM;
281 	}
282 
283 	if (unlikely(copy_from_user(external_buffer, buffer_pool.buffers,
284 				    sizeof(struct rga_external_buffer) * buffer_pool.size))) {
285 		pr_err("rga_buffer_pool external_buffer list copy_from_user failed\n");
286 		ret = -EFAULT;
287 
288 		goto err_free_external_buffer;
289 	}
290 
291 	for (i = 0; i < buffer_pool.size; i++) {
292 		ret = rga_mm_release_buffer(external_buffer[i].handle);
293 		if (ret < 0) {
294 			pr_err("buffer[%d] mm release buffer failed!\n", i);
295 
296 			goto err_free_external_buffer;
297 		}
298 	}
299 
300 err_free_external_buffer:
301 	kfree(external_buffer);
302 	return ret;
303 }
304 
rga_ioctl(struct file * file,uint32_t cmd,unsigned long arg)305 static long rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg)
306 {
307 	struct rga_drvdata_t *rga = rga_drvdata;
308 	struct rga_req req_rga;
309 	int ret = 0;
310 	int i = 0;
311 	int major_version = 0, minor_version = 0;
312 	char version[16] = { 0 };
313 	struct rga_version_t driver_version;
314 	struct rga_hw_versions_t hw_versions;
315 
316 	if (!rga) {
317 		pr_err("rga_drvdata is null, rga is not init\n");
318 		return -ENODEV;
319 	}
320 
321 	if (DEBUGGER_EN(NONUSE))
322 		return 0;
323 
324 	switch (cmd) {
325 	case RGA_BLIT_SYNC:
326 	case RGA_BLIT_ASYNC:
327 		if (unlikely(copy_from_user(&req_rga,
328 			(struct rga_req *)arg, sizeof(struct rga_req)))) {
329 			pr_err("copy_from_user failed\n");
330 			ret = -EFAULT;
331 			break;
332 		}
333 
334 		if (DEBUGGER_EN(MSG))
335 			rga_cmd_print_debug_info(&req_rga);
336 
337 		ret = rga_job_commit(&req_rga, cmd);
338 		if (ret < 0) {
339 			if (ret == -ERESTARTSYS) {
340 				if (DEBUGGER_EN(MSG))
341 					pr_err("rga_job_commit failed, by a software interrupt.\n");
342 			} else {
343 				pr_err("rga_job_commit failed\n");
344 			}
345 
346 			break;
347 		}
348 
349 		if (copy_to_user((struct rga_req *)arg,
350 				&req_rga, sizeof(struct rga_req))) {
351 			pr_err("copy_to_user failed\n");
352 			ret = -EFAULT;
353 			break;
354 		}
355 
356 		break;
357 	case RGA_CACHE_FLUSH:
358 	case RGA_FLUSH:
359 	case RGA_GET_RESULT:
360 		break;
361 	case RGA_GET_VERSION:
362 		sscanf(rga->rga_scheduler[i]->version.str, "%x.%x.%*x",
363 			 &major_version, &minor_version);
364 		snprintf(version, 5, "%x.%02x", major_version, minor_version);
365 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
366 		/* TODO: userspcae to get version */
367 		if (copy_to_user((void *)arg, version, sizeof(version)))
368 			ret = -EFAULT;
369 #else
370 		if (copy_to_user((void *)arg, RGA3_VERSION,
371 				 sizeof(RGA3_VERSION)))
372 			ret = -EFAULT;
373 #endif
374 		break;
375 	case RGA2_GET_VERSION:
376 		for (i = 0; i < rga->num_of_scheduler; i++) {
377 			if (rga->rga_scheduler[i]->ops == &rga2_ops) {
378 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
379 				if (copy_to_user((void *)arg, rga->rga_scheduler[i]->version.str,
380 					sizeof(rga->rga_scheduler[i]->version.str)))
381 					ret = -EFAULT;
382 #else
383 				if (copy_to_user((void *)arg, RGA3_VERSION,
384 						sizeof(RGA3_VERSION)))
385 					ret = -EFAULT;
386 #endif
387 				else
388 					ret = true;
389 
390 				break;
391 			}
392 		}
393 
394 		/* This will indicate that the RGA2 version number cannot be obtained. */
395 		if (ret != true)
396 			ret = -EFAULT;
397 
398 		break;
399 
400 	case RGA_IOC_GET_HW_VERSION:
401 		/* RGA hardware version */
402 		hw_versions.size = rga->num_of_scheduler > RGA_HW_SIZE ?
403 			RGA_HW_SIZE : rga->num_of_scheduler;
404 
405 		for (i = 0; i < hw_versions.size; i++) {
406 			memcpy(&hw_versions.version[i], &rga->rga_scheduler[i]->version,
407 				sizeof(rga->rga_scheduler[i]->version));
408 		}
409 
410 		if (copy_to_user((void *)arg, &hw_versions, sizeof(hw_versions)))
411 			ret = -EFAULT;
412 		else
413 			ret = true;
414 
415 		break;
416 
417 	case RGA_IOC_GET_DRVIER_VERSION:
418 		/* Driver version */
419 		driver_version.major = DRIVER_MAJOR_VERISON;
420 		driver_version.minor = DRIVER_MINOR_VERSION;
421 		driver_version.revision = DRIVER_REVISION_VERSION;
422 		strncpy((char *)driver_version.str, DRIVER_VERSION, sizeof(driver_version.str));
423 
424 		if (copy_to_user((void *)arg, &driver_version, sizeof(driver_version)))
425 			ret = -EFAULT;
426 		else
427 			ret = true;
428 
429 		break;
430 
431 	case RGA_IOC_IMPORT_BUFFER:
432 		ret = rga_ioctl_import_buffer(arg);
433 
434 		break;
435 
436 	case RGA_IOC_RELEASE_BUFFER:
437 		ret = rga_ioctl_release_buffer(arg);
438 
439 		break;
440 
441 	case RGA_IMPORT_DMA:
442 	case RGA_RELEASE_DMA:
443 	default:
444 		pr_err("unknown ioctl cmd!\n");
445 		ret = -EINVAL;
446 		break;
447 	}
448 
449 	return ret;
450 }
451 
452 #ifdef CONFIG_ROCKCHIP_RGA_DEBUGGER
rga_debugger_init(struct rga_debugger ** debugger_p)453 static int rga_debugger_init(struct rga_debugger **debugger_p)
454 {
455 	struct rga_debugger *debugger;
456 
457 	*debugger_p = kzalloc(sizeof(struct rga_debugger), GFP_KERNEL);
458 	if (*debugger_p == NULL) {
459 		pr_err("can not alloc for rga debugger\n");
460 		return -ENOMEM;
461 	}
462 
463 	debugger = *debugger_p;
464 
465 #ifdef CONFIG_ROCKCHIP_RGA_DEBUG_FS
466 	mutex_init(&debugger->debugfs_lock);
467 	INIT_LIST_HEAD(&debugger->debugfs_entry_list);
468 #endif
469 
470 #ifdef CONFIG_ROCKCHIP_RGA_PROC_FS
471 	mutex_init(&debugger->procfs_lock);
472 	INIT_LIST_HEAD(&debugger->procfs_entry_list);
473 #endif
474 
475 	rga_debugfs_init();
476 	rga_procfs_init();
477 
478 	return 0;
479 }
480 
rga_debugger_remove(struct rga_debugger ** debugger_p)481 static int rga_debugger_remove(struct rga_debugger **debugger_p)
482 {
483 	rga_debugfs_remove();
484 	rga_procfs_remove();
485 
486 	kfree(*debugger_p);
487 	*debugger_p = NULL;
488 
489 	return 0;
490 }
491 #endif
492 
rga_open(struct inode * inode,struct file * file)493 static int rga_open(struct inode *inode, struct file *file)
494 {
495 	return nonseekable_open(inode, file);
496 }
497 
rga_release(struct inode * inode,struct file * file)498 static int rga_release(struct inode *inode, struct file *file)
499 {
500 	return 0;
501 }
502 
rga3_irq_handler(int irq,void * data)503 static irqreturn_t rga3_irq_handler(int irq, void *data)
504 {
505 	struct rga_scheduler_t *rga_scheduler = data;
506 
507 	if (DEBUGGER_EN(INT_FLAG))
508 		pr_info("irqthread INT[%x],STATS0[%x], STATS1[%x]\n",
509 			rga_read(RGA3_INT_RAW, rga_scheduler),
510 			rga_read(RGA3_STATUS0, rga_scheduler),
511 			rga_read(RGA3_STATUS1, rga_scheduler));
512 
513 	/* TODO: if error interrupt then soft reset hardware */
514 	//rga_scheduler->ops->soft_reset(job->core);
515 
516 	/*clear INT */
517 	rga_write(1, RGA3_INT_CLR, rga_scheduler);
518 
519 	return IRQ_WAKE_THREAD;
520 }
521 
rga3_irq_thread(int irq,void * data)522 static irqreturn_t rga3_irq_thread(int irq, void *data)
523 {
524 	struct rga_scheduler_t *rga_scheduler = data;
525 	struct rga_job *job;
526 
527 	job = rga_scheduler->running_job;
528 
529 	if (!job) {
530 		pr_err("running job is invaild on irq thread\n");
531 		return IRQ_HANDLED;
532 	}
533 
534 	if (DEBUGGER_EN(INT_FLAG))
535 		pr_info("irq INT[%x], STATS0[%x], STATS1[%x]\n",
536 			rga_read(RGA3_INT_RAW, rga_scheduler),
537 			rga_read(RGA3_STATUS0, rga_scheduler),
538 			rga_read(RGA3_STATUS1, rga_scheduler));
539 
540 	rga_job_done(rga_scheduler, 0);
541 
542 	return IRQ_HANDLED;
543 }
544 
rga2_irq_handler(int irq,void * data)545 static irqreturn_t rga2_irq_handler(int irq, void *data)
546 {
547 	struct rga_scheduler_t *rga_scheduler = data;
548 
549 	if (DEBUGGER_EN(INT_FLAG))
550 		pr_info("irqthread INT[%x],STATS0[%x]\n",
551 			rga_read(RGA2_INT, rga_scheduler), rga_read(RGA2_STATUS,
552 								 rga_scheduler));
553 
554 	/*if error interrupt then soft reset hardware */
555 	//warning
556 	if (rga_read(RGA2_INT, rga_scheduler) & 0x01) {
557 		pr_err("err irq! INT[%x],STATS0[%x]\n",
558 			 rga_read(RGA2_INT, rga_scheduler),
559 			 rga_read(RGA2_STATUS, rga_scheduler));
560 		rga_scheduler->ops->soft_reset(rga_scheduler);
561 	}
562 
563 	/*clear INT */
564 	rga_write(rga_read(RGA2_INT, rga_scheduler) | (0x1 << 4) | (0x1 << 5) |
565 		 (0x1 << 6) | (0x1 << 7), RGA2_INT, rga_scheduler);
566 
567 	return IRQ_WAKE_THREAD;
568 }
569 
rga2_irq_thread(int irq,void * data)570 static irqreturn_t rga2_irq_thread(int irq, void *data)
571 {
572 	struct rga_scheduler_t *rga_scheduler = data;
573 	struct rga_job *job;
574 
575 	job = rga_scheduler->running_job;
576 
577 	if (!job)
578 		return IRQ_HANDLED;
579 
580 	if (DEBUGGER_EN(INT_FLAG))
581 		pr_info("irq INT[%x], STATS0[%x]\n",
582 			rga_read(RGA2_INT, rga_scheduler), rga_read(RGA2_STATUS,
583 								 rga_scheduler));
584 
585 	rga_job_done(rga_scheduler, 0);
586 
587 	return IRQ_HANDLED;
588 }
589 
590 const struct file_operations rga_fops = {
591 	.owner = THIS_MODULE,
592 	.open = rga_open,
593 	.release = rga_release,
594 	.unlocked_ioctl = rga_ioctl,
595 #ifdef CONFIG_COMPAT
596 	.compat_ioctl = rga_ioctl,
597 #endif
598 };
599 
600 static struct miscdevice rga_dev = {
601 	.name = "rga",
602 	.fops = &rga_fops,
603 };
604 
605 static const char *const old_rga2_clks[] = {
606 	"aclk_rga",
607 	"hclk_rga",
608 	"clk_rga",
609 };
610 
611 static const char *const rk3588_rga2_clks[] = {
612 	"aclk_rga2",
613 	"hclk_rga2",
614 	"clk_rga2",
615 };
616 
617 static const char *const rga3_core_0_clks[] = {
618 	"aclk_rga3_0",
619 	"hclk_rga3_0",
620 	"clk_rga3_0",
621 };
622 
623 static const char *const rga3_core_1_clks[] = {
624 	"aclk_rga3_1",
625 	"hclk_rga3_1",
626 	"clk_rga3_1",
627 };
628 
629 static const struct rga_irqs_data_t single_rga2_irqs[] = {
630 	{"rga2_irq", rga2_irq_handler, rga2_irq_thread}
631 };
632 
633 static const struct rga_irqs_data_t rga3_core0_irqs[] = {
634 	{"rga3_core0_irq", rga3_irq_handler, rga3_irq_thread}
635 };
636 
637 static const struct rga_irqs_data_t rga3_core1_irqs[] = {
638 	{"rga3_core1_irq", rga3_irq_handler, rga3_irq_thread}
639 };
640 
641 static const struct rga_match_data_t old_rga2_match_data = {
642 	.clks = old_rga2_clks,
643 	.num_clks = ARRAY_SIZE(old_rga2_clks),
644 	.irqs = single_rga2_irqs,
645 	.num_irqs = ARRAY_SIZE(single_rga2_irqs)
646 };
647 
648 static const struct rga_match_data_t rk3588_rga2_match_data = {
649 	.clks = rk3588_rga2_clks,
650 	.num_clks = ARRAY_SIZE(rk3588_rga2_clks),
651 	.irqs = single_rga2_irqs,
652 	.num_irqs = ARRAY_SIZE(single_rga2_irqs)
653 };
654 
655 static const struct rga_match_data_t rga3_core0_match_data = {
656 	.clks = rga3_core_0_clks,
657 	.num_clks = ARRAY_SIZE(rga3_core_0_clks),
658 	.irqs = rga3_core0_irqs,
659 	.num_irqs = ARRAY_SIZE(rga3_core0_irqs)
660 };
661 
662 static const struct rga_match_data_t rga3_core1_match_data = {
663 	.clks = rga3_core_1_clks,
664 	.num_clks = ARRAY_SIZE(rga3_core_1_clks),
665 	.irqs = rga3_core1_irqs,
666 	.num_irqs = ARRAY_SIZE(rga3_core1_irqs)
667 };
668 
669 static const struct of_device_id rga3_core0_dt_ids[] = {
670 	{
671 	 .compatible = "rockchip,rga3_core0",
672 	 .data = &rga3_core0_match_data,
673 	},
674 	{},
675 };
676 
677 static const struct of_device_id rga3_core1_dt_ids[] = {
678 	{
679 	 .compatible = "rockchip,rga3_core1",
680 	 .data = &rga3_core1_match_data,
681 	},
682 	{},
683 };
684 
685 static const struct of_device_id rga2_dt_ids[] = {
686 	{
687 	 .compatible = "rockchip,rga2_core0",
688 	 .data = &rk3588_rga2_match_data,
689 	},
690 	{
691 	 .compatible = "rockchip,rga2",
692 	 .data = &old_rga2_match_data,
693 	},
694 	{},
695 };
696 
init_scheduler(struct rga_scheduler_t * rga_scheduler,const char * name)697 static void init_scheduler(struct rga_scheduler_t *rga_scheduler,
698 			 const char *name)
699 {
700 	spin_lock_init(&rga_scheduler->irq_lock);
701 	INIT_LIST_HEAD(&rga_scheduler->todo_list);
702 	init_waitqueue_head(&rga_scheduler->job_done_wq);
703 
704 	if (!strcmp(name, "rga3_core0")) {
705 		rga_scheduler->ops = &rga3_ops;
706 		/* TODO: get by hw version */
707 		rga_scheduler->data = &rga3_data;
708 		rga_scheduler->core = RGA3_SCHEDULER_CORE0;
709 	} else if (!strcmp(name, "rga3_core1")) {
710 		rga_scheduler->ops = &rga3_ops;
711 		rga_scheduler->data = &rga3_data;
712 		rga_scheduler->core = RGA3_SCHEDULER_CORE1;
713 	} else if (!strcmp(name, "rga2")) {
714 		rga_scheduler->ops = &rga2_ops;
715 		rga_scheduler->data = &rga2e_data;
716 		rga_scheduler->core = RGA2_SCHEDULER_CORE0;
717 	}
718 }
719 
rga_drv_probe(struct platform_device * pdev)720 static int rga_drv_probe(struct platform_device *pdev)
721 {
722 	struct rga_drvdata_t *data = rga_drvdata;
723 	struct resource *res;
724 	int ret = 0;
725 	const struct of_device_id *match = NULL;
726 	struct device *dev = &pdev->dev;
727 	const struct rga_match_data_t *match_data;
728 	int i, irq;
729 	struct rga_scheduler_t *rga_scheduler = NULL;
730 
731 	if (!pdev->dev.of_node)
732 		return -EINVAL;
733 
734 	if (!strcmp(dev_driver_string(dev), "rga3_core0"))
735 		match = of_match_device(rga3_core0_dt_ids, dev);
736 	else if (!strcmp(dev_driver_string(dev), "rga3_core1"))
737 		match = of_match_device(rga3_core1_dt_ids, dev);
738 	else if (!strcmp(dev_driver_string(dev), "rga2"))
739 		match = of_match_device(rga2_dt_ids, dev);
740 
741 	if (!match) {
742 		dev_err(dev, "%s missing DT entry!\n", dev_driver_string(dev));
743 		return -EINVAL;
744 	}
745 
746 	rga_scheduler =
747 		devm_kzalloc(&pdev->dev, sizeof(struct rga_scheduler_t),
748 			GFP_KERNEL);
749 	if (rga_scheduler == NULL) {
750 		pr_err("failed to allocate scheduler. dev name = %s\n",
751 			dev_driver_string(dev));
752 		return -ENOMEM;
753 	}
754 
755 	init_scheduler(rga_scheduler,
756 		dev_driver_string(dev));
757 
758 	rga_scheduler->dev = &pdev->dev;
759 
760 	/* map the registers */
761 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
762 	if (!res) {
763 		pr_err("get memory resource failed.\n");
764 		return -ENXIO;
765 	}
766 
767 	rga_scheduler->rga_base =
768 		devm_ioremap(&pdev->dev, res->start, resource_size(res));
769 	if (!rga_scheduler->rga_base) {
770 		pr_err("ioremap failed\n");
771 		ret = -ENOENT;
772 		return ret;
773 	}
774 
775 	/* get the IRQ */
776 	match_data = match->data;
777 
778 	/* there are irq names in dts */
779 	irq = platform_get_irq(pdev, 0);
780 	if (irq < 0) {
781 		dev_err(dev, "no irq %s in dts\n",
782 			match_data->irqs[0].name);
783 		return irq;
784 	}
785 
786 	rga_scheduler->irq = irq;
787 
788 	pr_err("%s, irq = %d, match scheduler\n",
789 			match_data->irqs[0].name, irq);
790 
791 	ret = devm_request_threaded_irq(dev, irq,
792 			match_data->irqs[0].irq_hdl,
793 			match_data->irqs[0].irq_thread, IRQF_SHARED,
794 			dev_driver_string(dev),
795 			rga_scheduler);
796 	if (ret < 0) {
797 		pr_err("request irq name: %s failed: %d\n",
798 				match_data->irqs[0].name, ret);
799 		return ret;
800 	}
801 
802 #ifndef CONFIG_ROCKCHIP_FPGA
803 	for (i = 0; i < match_data->num_clks; i++) {
804 		struct clk *clk = devm_clk_get(dev, match_data->clks[i]);
805 
806 		if (IS_ERR(clk))
807 			pr_err("failed to get %s\n", match_data->clks[i]);
808 
809 		rga_scheduler->clks[i] = clk;
810 	}
811 	rga_scheduler->num_clks = match_data->num_clks;
812 #endif
813 
814 	platform_set_drvdata(pdev, rga_scheduler);
815 
816 	device_init_wakeup(dev, true);
817 
818 	/* PM init */
819 #ifndef CONFIG_ROCKCHIP_FPGA
820 	pm_runtime_enable(&pdev->dev);
821 
822 	ret = pm_runtime_get_sync(rga_scheduler->dev);
823 	if (ret < 0) {
824 		pr_err("failed to get pm runtime, ret = %d\n",
825 			 ret);
826 		goto failed;
827 	}
828 
829 	for (i = 0; i < rga_scheduler->num_clks; i++) {
830 		if (!IS_ERR(rga_scheduler->clks[i])) {
831 			ret = clk_prepare_enable(rga_scheduler->clks[i]);
832 			if (ret < 0) {
833 				pr_err("failed to enable clk\n");
834 				goto failed;
835 			}
836 		}
837 	}
838 #endif //CONFIG_ROCKCHIP_FPGA
839 
840 	rga_scheduler->ops->get_version(rga_scheduler);
841 	pr_err("Driver loaded successfully rga[%d] ver:%s\n", i,
842 		rga_scheduler->version.str);
843 
844 	data->rga_scheduler[data->num_of_scheduler] = rga_scheduler;
845 
846 	data->num_of_scheduler++;
847 
848 	for (i = rga_scheduler->num_clks - 1; i >= 0; i--)
849 		if (!IS_ERR(rga_scheduler->clks[i]))
850 			clk_disable_unprepare(rga_scheduler->clks[i]);
851 
852 	pm_runtime_put_sync(&pdev->dev);
853 
854 	pr_err("probe successfully\n");
855 
856 	return 0;
857 
858 failed:
859 	device_init_wakeup(dev, false);
860 	pm_runtime_disable(dev);
861 
862 	return ret;
863 }
864 
rga_drv_remove(struct platform_device * pdev)865 static int rga_drv_remove(struct platform_device *pdev)
866 {
867 	device_init_wakeup(&pdev->dev, false);
868 #ifndef CONFIG_ROCKCHIP_FPGA
869 	pm_runtime_disable(&pdev->dev);
870 #endif //CONFIG_ROCKCHIP_FPGA
871 
872 	return 0;
873 }
874 
875 static struct platform_driver rga3_core0_driver = {
876 	.probe = rga_drv_probe,
877 	.remove = rga_drv_remove,
878 	.driver = {
879 		 .name = "rga3_core0",
880 		 .of_match_table = of_match_ptr(rga3_core0_dt_ids),
881 		 },
882 };
883 
884 static struct platform_driver rga3_core1_driver = {
885 	.probe = rga_drv_probe,
886 	.remove = rga_drv_remove,
887 	.driver = {
888 		 .name = "rga3_core1",
889 		 .of_match_table = of_match_ptr(rga3_core1_dt_ids),
890 		 },
891 };
892 
893 static struct platform_driver rga2_driver = {
894 	.probe = rga_drv_probe,
895 	.remove = rga_drv_remove,
896 	.driver = {
897 		 .name = "rga2",
898 		 .of_match_table = of_match_ptr(rga2_dt_ids),
899 		 },
900 };
901 
rga_init(void)902 static int __init rga_init(void)
903 {
904 	int ret;
905 	int order = 0;
906 
907 	uint32_t *buf_p;
908 	uint32_t *buf;
909 
910 	/*
911 	 * malloc pre scale mid buf mmu table:
912 	 * RGA2_PHY_PAGE_SIZE * channel_num * address_size
913 	 */
914 	order = get_order(RGA2_PHY_PAGE_SIZE * 3 * sizeof(buf_p));
915 	buf_p = (uint32_t *) __get_free_pages(GFP_KERNEL | GFP_DMA32, order);
916 	if (buf_p == NULL) {
917 		pr_err("Can not alloc pages for mmu_page_table\n");
918 		return -ENOMEM;
919 	}
920 
921 	rga2_mmu_info.buf_virtual = buf_p;
922 	rga2_mmu_info.buf_order = order;
923 
924 #if (defined(CONFIG_ARM) && defined(CONFIG_ARM_LPAE))
925 	buf =
926 		(uint32_t *) (uint32_t)
927 		virt_to_phys((void *)((unsigned long)buf_p));
928 #else
929 	buf = (uint32_t *) virt_to_phys((void *)((unsigned long)buf_p));
930 #endif
931 	rga2_mmu_info.buf = buf;
932 	rga2_mmu_info.front = 0;
933 	rga2_mmu_info.back = RGA2_PHY_PAGE_SIZE * 3;
934 	rga2_mmu_info.size = RGA2_PHY_PAGE_SIZE * 3;
935 
936 	order = get_order(RGA2_PHY_PAGE_SIZE * sizeof(struct page *));
937 	rga2_mmu_info.pages =
938 		(struct page **)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
939 	if (rga2_mmu_info.pages == NULL)
940 		pr_err("Can not alloc pages for rga2_mmu_info.pages\n");
941 
942 	rga2_mmu_info.pages_order = order;
943 
944 	rga_drvdata = kzalloc(sizeof(struct rga_drvdata_t), GFP_KERNEL);
945 	if (rga_drvdata == NULL) {
946 		pr_err("failed to allocate driver data.\n");
947 		return -ENOMEM;
948 	}
949 
950 	mutex_init(&rga_drvdata->lock);
951 
952 	wake_lock_init(&rga_drvdata->wake_lock, WAKE_LOCK_SUSPEND, "rga");
953 
954 	ret = platform_driver_register(&rga3_core0_driver);
955 	if (ret != 0) {
956 		pr_err("Platform device rga3_core0_driver register failed (%d).\n", ret);
957 		return ret;
958 	}
959 
960 	ret = platform_driver_register(&rga3_core1_driver);
961 	if (ret != 0) {
962 		pr_err("Platform device rga3_core1_driver register failed (%d).\n", ret);
963 		return ret;
964 	}
965 
966 	ret = platform_driver_register(&rga2_driver);
967 	if (ret != 0) {
968 		pr_err("Platform device rga2_driver register failed (%d).\n", ret);
969 		return ret;
970 	}
971 
972 	rga_init_timer();
973 
974 	rga_drvdata->fence_ctx = rga_fence_context_alloc();
975 	if (IS_ERR(rga_drvdata->fence_ctx)) {
976 		pr_err("failed to allocate fence context for RGA\n");
977 		ret = PTR_ERR(rga_drvdata->fence_ctx);
978 		return ret;
979 	}
980 
981 	ret = misc_register(&rga_dev);
982 	if (ret) {
983 		pr_err("cannot register miscdev (%d)\n", ret);
984 		return ret;
985 	}
986 
987 	rga_mm_init(&rga_drvdata->mm);
988 
989 #ifdef CONFIG_ROCKCHIP_RGA_DEBUGGER
990 	rga_debugger_init(&rga_drvdata->debugger);
991 #endif
992 
993 	pr_info("Module initialized. v%s\n", DRIVER_VERSION);
994 
995 	return 0;
996 }
997 
rga_exit(void)998 static void __exit rga_exit(void)
999 {
1000 	free_pages((unsigned long)rga2_mmu_info.buf_virtual,
1001 		 rga2_mmu_info.buf_order);
1002 	free_pages((unsigned long)rga2_mmu_info.pages, rga2_mmu_info.pages_order);
1003 
1004 #ifdef CONFIG_ROCKCHIP_RGA_DEBUGGER
1005 	rga_debugger_remove(&rga_drvdata->debugger);
1006 #endif
1007 
1008 	rga_mm_remove(&rga_drvdata->mm);
1009 
1010 	wake_lock_destroy(&rga_drvdata->wake_lock);
1011 
1012 	rga_fence_context_free(rga_drvdata->fence_ctx);
1013 
1014 	rga_cancel_timer();
1015 
1016 	platform_driver_unregister(&rga3_core0_driver);
1017 	platform_driver_unregister(&rga3_core1_driver);
1018 	platform_driver_unregister(&rga2_driver);
1019 
1020 	misc_deregister(&(rga_drvdata->miscdev));
1021 
1022 	kfree(rga_drvdata);
1023 }
1024 
1025 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
1026 #ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
1027 module_init(rga_init);
1028 #else
1029 late_initcall(rga_init);
1030 #endif
1031 #else
1032 fs_initcall(rga_init);
1033 #endif
1034 module_exit(rga_exit);
1035 
1036 /* Module information */
1037 MODULE_AUTHOR("putin.li@rock-chips.com");
1038 MODULE_DESCRIPTION("Driver for rga device");
1039 MODULE_LICENSE("GPL");
1040