1 /*
2 * Copyright (C) 2012 ROCKCHIP, Inc.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15 #define pr_fmt(fmt) "rga2: " fmt
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <linux/sched.h>
21 #include <linux/mutex.h>
22 #include <linux/err.h>
23 #include <linux/clk.h>
24 #include <asm/delay.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/delay.h>
27 #include <asm/io.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/fs.h>
31 #include <linux/uaccess.h>
32 #include <linux/miscdevice.h>
33 #include <linux/poll.h>
34 #include <linux/delay.h>
35 #include <linux/wait.h>
36 #include <linux/syscalls.h>
37 #include <linux/timer.h>
38 #include <linux/time.h>
39 #include <asm/cacheflush.h>
40 #include <linux/slab.h>
41 #include <linux/fb.h>
42 #include <linux/wakelock.h>
43 #include <linux/scatterlist.h>
44 #include <linux/version.h>
45
46 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
47 #include <linux/pm_runtime.h>
48 #include <linux/dma-buf-cache.h>
49 #endif
50
51 #include "rga2.h"
52 #include "rga2_reg_info.h"
53 #include "rga2_mmu_info.h"
54 #include "RGA2_API.h"
55 #include "rga2_debugger.h"
56
57 #if IS_ENABLED(CONFIG_ION_ROCKCHIP) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
58 #include <linux/rockchip_ion.h>
59 #endif
60
61 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
62 #include "rga2_drv.h"
63 #endif
64
65 #if ((defined(CONFIG_RK_IOMMU) || defined(CONFIG_ROCKCHIP_IOMMU)) && defined(CONFIG_ION_ROCKCHIP))
66 #define CONFIG_RGA_IOMMU
67 #endif
68
69 #define RGA2_TEST_FLUSH_TIME 0
70 #define RGA2_INFO_BUS_ERROR 1
71 #define RGA2_POWER_OFF_DELAY (4 * HZ) /* 4s */
72 #define RGA2_TIMEOUT_DELAY (HZ / 2) /* 500ms */
73 #define RGA2_MAJOR 255
74 #define RGA2_RESET_TIMEOUT 1000
75 /*
76 * The maximum input is 8192*8192, the maximum output is 4096*4096
77 * The size of physical pages requested is:
78 * ( ( maximum_input_value * maximum_input_value * format_bpp ) / 4K_page_size ) + 1
79 */
80 #define RGA2_PHY_PAGE_SIZE (((8192 * 8192 * 4) / 4096) + 1)
81
82 ktime_t rga2_start;
83 ktime_t rga2_end;
84 int rga2_flag;
85 int first_RGA2_proc;
86 static int rk3368;
87
88 rga2_session rga2_session_global;
89 long (*rga2_ioctl_kernel_p)(struct rga_req *);
90
91 struct rga2_drvdata_t *rga2_drvdata;
92 struct rga2_service_info rga2_service;
93 struct rga2_mmu_buf_t rga2_mmu_buf;
94
95 static int rga2_blit_async(rga2_session *session, struct rga2_req *req);
96 static void rga2_del_running_list(void);
97 static void rga2_del_running_list_timeout(void);
98 static void rga2_try_set_reg(void);
99
100 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
rga2_get_cmd_mode_str(u32 cmd)101 static const char *rga2_get_cmd_mode_str(u32 cmd)
102 {
103 switch (cmd) {
104 /* RGA1 */
105 case RGA_BLIT_SYNC:
106 return "RGA_BLIT_SYNC";
107 case RGA_BLIT_ASYNC:
108 return "RGA_BLIT_ASYNC";
109 case RGA_FLUSH:
110 return "RGA_FLUSH";
111 case RGA_GET_RESULT:
112 return "RGA_GET_RESULT";
113 case RGAGET_VERSION:
114 return "RGAGET_VERSION";
115 /* RGA2 */
116 case RGA2_BLIT_SYNC:
117 return "RGA2_BLIT_SYNC";
118 case RGA2_BLIT_ASYNC:
119 return "RGA2_BLIT_ASYNC";
120 case RGA2_FLUSH:
121 return "RGA2_FLUSH";
122 case RGA2_GET_RESULT:
123 return "RGA2_GET_RESULT";
124 case RGA2GET_VERSION:
125 return "RGA2GET_VERSION";
126 default:
127 return "UNF";
128 }
129 }
130
rga2_get_blend_mode_str(u16 alpha_rop_flag,u16 alpha_mode_0,u16 alpha_mode_1)131 static const char *rga2_get_blend_mode_str(u16 alpha_rop_flag, u16 alpha_mode_0, u16 alpha_mode_1)
132 {
133 if (alpha_rop_flag == 0) {
134 return "no blend";
135 } else if (alpha_rop_flag == 0x9) {
136 if (alpha_mode_0 == 0x381A && alpha_mode_1 == 0x381A) {
137 return "105 src + (1-src.a)*dst";
138 } else if (alpha_mode_0 == 0x483A && alpha_mode_1 == 0x483A) {
139 return "405 src.a * src + (1-src.a) * dst";
140 } else {
141 return "check reg for more imformation";
142 }
143 } else {
144 return "check reg for more imformation";
145 }
146 }
147
rga2_get_render_mode_str(u8 mode)148 static const char *rga2_get_render_mode_str(u8 mode)
149 {
150 switch (mode) {
151 case 0x0:
152 return "bitblt";
153 case 0x1:
154 return "color_palette";
155 case 0x2:
156 return "color_fill";
157 case 0x3:
158 return "update_palette_table";
159 case 0x4:
160 return "update_patten_buff";
161 default:
162 return "UNF";
163 }
164 }
165
rga2_get_rotate_mode_str(u8 mode)166 static const char *rga2_get_rotate_mode_str(u8 mode)
167 {
168 switch (mode) {
169 case 0x0:
170 return "0";
171 case 0x1:
172 return "90 degree";
173 case 0x2:
174 return "180 degree";
175 case 0x3:
176 return "270 degree";
177 case 0x10:
178 return "xmirror";
179 case 0x20:
180 return "ymirror";
181 case 0x30:
182 return "xymirror";
183 default:
184 return "UNF";
185 }
186 }
187
rga2_is_yuv10bit_format(uint32_t format)188 static bool rga2_is_yuv10bit_format(uint32_t format)
189 {
190 bool ret = false;
191
192 switch (format) {
193 case RGA2_FORMAT_YCbCr_420_SP_10B:
194 case RGA2_FORMAT_YCrCb_420_SP_10B:
195 case RGA2_FORMAT_YCbCr_422_SP_10B:
196 case RGA2_FORMAT_YCrCb_422_SP_10B:
197 ret = true;
198 break;
199 default:
200 break;
201 }
202 return ret;
203 }
204
rga2_is_yuv8bit_format(uint32_t format)205 static bool rga2_is_yuv8bit_format(uint32_t format)
206 {
207 bool ret = false;
208
209 switch (format) {
210 case RGA2_FORMAT_YCbCr_422_SP:
211 case RGA2_FORMAT_YCbCr_422_P:
212 case RGA2_FORMAT_YCbCr_420_SP:
213 case RGA2_FORMAT_YCbCr_420_P:
214 case RGA2_FORMAT_YCrCb_422_SP:
215 case RGA2_FORMAT_YCrCb_422_P:
216 case RGA2_FORMAT_YCrCb_420_SP:
217 case RGA2_FORMAT_YCrCb_420_P:
218 ret = true;
219 break;
220 default:
221 break;
222 }
223 return ret;
224 }
225
rga2_get_format_name(uint32_t format)226 static const char *rga2_get_format_name(uint32_t format)
227 {
228 switch (format) {
229 case RGA2_FORMAT_RGBA_8888:
230 return "RGBA8888";
231 case RGA2_FORMAT_RGBX_8888:
232 return "RGBX8888";
233 case RGA2_FORMAT_RGB_888:
234 return "RGB888";
235 case RGA2_FORMAT_BGRA_8888:
236 return "BGRA8888";
237 case RGA2_FORMAT_BGRX_8888:
238 return "BGRX8888";
239 case RGA2_FORMAT_BGR_888:
240 return "BGR888";
241 case RGA2_FORMAT_RGB_565:
242 return "RGB565";
243 case RGA2_FORMAT_RGBA_5551:
244 return "RGBA5551";
245 case RGA2_FORMAT_RGBA_4444:
246 return "RGBA4444";
247 case RGA2_FORMAT_BGR_565:
248 return "BGR565";
249 case RGA2_FORMAT_BGRA_5551:
250 return "BGRA5551";
251 case RGA2_FORMAT_BGRA_4444:
252 return "BGRA4444";
253
254 case RGA2_FORMAT_ARGB_8888:
255 return "ARGB8888";
256 case RGA2_FORMAT_XRGB_8888:
257 return "XBGR8888";
258 case RGA2_FORMAT_ARGB_5551:
259 return "ARGB5551";
260 case RGA2_FORMAT_ARGB_4444:
261 return "ARGB4444";
262 case RGA2_FORMAT_ABGR_8888:
263 return "ABGR8888";
264 case RGA2_FORMAT_XBGR_8888:
265 return "XBGR8888";
266 case RGA2_FORMAT_ABGR_5551:
267 return "ABGR5551";
268 case RGA2_FORMAT_ABGR_4444:
269 return "ABGR4444";
270
271 case RGA2_FORMAT_YCbCr_422_SP:
272 return "YCbCr422SP";
273 case RGA2_FORMAT_YCbCr_422_P:
274 return "YCbCr422P";
275 case RGA2_FORMAT_YCbCr_420_SP:
276 return "YCbCr420SP";
277 case RGA2_FORMAT_YCbCr_420_P:
278 return "YCbCr420P";
279 case RGA2_FORMAT_YCrCb_422_SP:
280 return "YCrCb422SP";
281 case RGA2_FORMAT_YCrCb_422_P:
282 return "YCrCb422P";
283 case RGA2_FORMAT_YCrCb_420_SP:
284 return "YCrCb420SP";
285 case RGA2_FORMAT_YCrCb_420_P:
286 return "YCrCb420P";
287
288 case RGA2_FORMAT_YVYU_422:
289 return "YVYU422";
290 case RGA2_FORMAT_YVYU_420:
291 return "YVYU420";
292 case RGA2_FORMAT_VYUY_422:
293 return "VYUY422";
294 case RGA2_FORMAT_VYUY_420:
295 return "VYUY420";
296 case RGA2_FORMAT_YUYV_422:
297 return "YUYV422";
298 case RGA2_FORMAT_YUYV_420:
299 return "YUYV420";
300 case RGA2_FORMAT_UYVY_422:
301 return "UYVY422";
302 case RGA2_FORMAT_UYVY_420:
303 return "UYVY420";
304
305 case RGA2_FORMAT_YCbCr_420_SP_10B:
306 return "YCrCb420SP10B";
307 case RGA2_FORMAT_YCrCb_420_SP_10B:
308 return "YCbCr420SP10B";
309 case RGA2_FORMAT_YCbCr_422_SP_10B:
310 return "YCbCr422SP10B";
311 case RGA2_FORMAT_YCrCb_422_SP_10B:
312 return "YCrCb422SP10B";
313 case RGA2_FORMAT_BPP_1:
314 return "BPP1";
315 case RGA2_FORMAT_BPP_2:
316 return "BPP2";
317 case RGA2_FORMAT_BPP_4:
318 return "BPP4";
319 case RGA2_FORMAT_BPP_8:
320 return "BPP8";
321 case RGA2_FORMAT_YCbCr_400:
322 return "YCbCr400";
323 case RGA2_FORMAT_Y4:
324 return "y4";
325 default:
326 return "UNF";
327 }
328 }
329
print_debug_info(struct rga2_req * req)330 static void print_debug_info(struct rga2_req *req)
331 {
332 INFO("render_mode:%s,bitblit_mode=%d,rotate_mode:%s\n", rga2_get_render_mode_str(req->render_mode),
333 req->bitblt_mode, rga2_get_rotate_mode_str(req->rotate_mode));
334 INFO("src : y=%lx uv=%lx v=%lx aw=%d ah=%d vw=%d vh=%d xoff=%d yoff=%d format=%s\n", req->src.yrgb_addr,
335 req->src.uv_addr, req->src.v_addr, req->src.act_w, req->src.act_h, req->src.vir_w, req->src.vir_h,
336 req->src.x_offset, req->src.y_offset, rga2_get_format_name(req->src.format));
337 if (req->src1.yrgb_addr != 0 || req->src1.uv_addr != 0 || req->src1.v_addr != 0) {
338 INFO("src1 : y=%lx uv=%lx v=%lx aw=%d ah=%d vw=%d vh=%d xoff=%d yoff=%d format=%s\n", req->src1.yrgb_addr,
339 req->src1.uv_addr, req->src1.v_addr, req->src1.act_w, req->src1.act_h, req->src1.vir_w, req->src1.vir_h,
340 req->src1.x_offset, req->src1.y_offset, rga2_get_format_name(req->src1.format));
341 }
342 INFO("dst : y=%lx uv=%lx v=%lx aw=%d ah=%d vw=%d vh=%d xoff=%d yoff=%d format=%s\n", req->dst.yrgb_addr,
343 req->dst.uv_addr, req->dst.v_addr, req->dst.act_w, req->dst.act_h, req->dst.vir_w, req->dst.vir_h,
344 req->dst.x_offset, req->dst.y_offset, rga2_get_format_name(req->dst.format));
345 INFO("mmu : src=%.2x src1=%.2x dst=%.2x els=%.2x\n", req->mmu_info.src0_mmu_flag, req->mmu_info.src1_mmu_flag,
346 req->mmu_info.dst_mmu_flag, req->mmu_info.els_mmu_flag);
347 INFO("alpha : flag %x mode0=%x mode1=%x\n", req->alpha_rop_flag, req->alpha_mode_0, req->alpha_mode_1);
348 INFO("blend mode is %s\n", rga2_get_blend_mode_str(req->alpha_rop_flag, req->alpha_mode_0, req->alpha_mode_1));
349 INFO("yuv2rgb mode is %x\n", req->yuv2rgb_mode);
350 }
351
rga2_align_check(struct rga2_req * req)352 static int rga2_align_check(struct rga2_req *req)
353 {
354 if (rga2_is_yuv10bit_format(req->src.format)) {
355 if ((req->src.vir_w % 0x10) || (req->src.x_offset % 0x2) || (req->src.act_w % 0x2) ||
356 (req->src.y_offset % 0x2) || (req->src.act_h % 0x2) || (req->src.vir_h % 0x2)) {
357 INFO("err src wstride is not align to 16 or yuv not align to 2");
358 }
359 }
360 if (rga2_is_yuv10bit_format(req->dst.format)) {
361 if ((req->dst.vir_w % 0x10) || (req->dst.x_offset % 0x2) || (req->dst.act_w % 0x2) ||
362 (req->dst.y_offset % 0x2) || (req->dst.act_h % 0x2) || (req->dst.vir_h % 0x2)) {
363 INFO("err dst wstride is not align to 16 or yuv not align to 2");
364 }
365 }
366 if (rga2_is_yuv8bit_format(req->src.format)) {
367 if ((req->src.vir_w % 0x8) || (req->src.x_offset % 0x2) || (req->src.act_w % 0x2) ||
368 (req->src.y_offset % 0x2) || (req->src.act_h % 0x2) || (req->src.vir_h % 0x2)) {
369 INFO("err src wstride is not align to 8 or yuv not align to 2");
370 }
371 }
372 if (rga2_is_yuv8bit_format(req->dst.format)) {
373 if ((req->dst.vir_w % 0x8) || (req->dst.x_offset % 0x2) || (req->dst.act_w % 0x2) ||
374 (req->dst.y_offset % 0x2) || (req->dst.act_h % 0x2) || (req->dst.vir_h % 0x2)) {
375 INFO("err dst wstride is not align to 8 or yuv not align to 2");
376 }
377 }
378 INFO("rga align check over!\n");
379 return 0;
380 }
381
rga2_scale_check(struct rga2_req * req)382 int rga2_scale_check(struct rga2_req *req)
383 {
384 u32 saw, sah, daw, dah;
385 struct rga2_drvdata_t *data = rga2_drvdata;
386
387 saw = req->src.act_w;
388 sah = req->src.act_h;
389 daw = req->dst.act_w;
390 dah = req->dst.act_h;
391
392 if (strncmp(data->version, "2.20", 0x4) == 0) {
393 if (((saw >> 0x4) >= daw) || ((sah >> 0x4) >= dah)) {
394 INFO("unsupported to scaling less than 1/16 times.\n");
395 }
396 if (((daw >> 0x4) >= saw) || ((dah >> 0x4) >= sah)) {
397 INFO("unsupported to scaling more than 16 times.\n");
398 }
399 } else {
400 if (((saw >> 0x3) >= daw) || ((sah >> 0x3) >= dah)) {
401 INFO("unsupported to scaling less than 1/8 tiems.\n");
402 }
403 if (((daw >> 0x3) >= saw) || ((dah >> 0x3) >= sah)) {
404 INFO("unsupported to scaling more than 8 times.\n");
405 }
406 }
407 INFO("rga2 scale check over.\n");
408 return 0;
409 }
410 #endif
411
rga2_printf_cmd_buf(u32 * cmd_buf)412 static void rga2_printf_cmd_buf(u32 *cmd_buf)
413 {
414 u32 reg_p[0x20];
415 u32 i = 0;
416 u32 src_stride, dst_stride, src_format, dst_format;
417 u32 src_aw, src_ah, dst_aw, dst_ah;
418
419 for (i = 0; i < 0x20; i++) {
420 reg_p[i] = *(cmd_buf + i);
421 }
422
423 src_stride = reg_p[0x6];
424 dst_stride = reg_p[0x12];
425
426 src_format = reg_p[1] & (~0xfffffff0);
427 dst_format = reg_p[0xE] & (~0xfffffff0);
428
429 src_aw = (reg_p[0x7] & (~0xffff0000)) + 1;
430 src_ah = ((reg_p[0x7] & (~0x0000ffff)) >> 0x10) + 1;
431
432 dst_aw = (reg_p[0x13] & (~0xffff0000)) + 1;
433 dst_ah = ((reg_p[0x13] & (~0x0000ffff)) >> 0x10) + 1;
434
435 DBG("src : aw = %d ah = %d stride = %d format is %x\n", src_aw, src_ah, src_stride, src_format);
436 DBG("dst : aw = %d ah = %d stride = %d format is %x\n", dst_aw, dst_ah, dst_stride, dst_format);
437 }
438
rga2_write(u32 b,u32 r)439 static inline void rga2_write(u32 b, u32 r)
440 {
441 *((volatile unsigned int *)(rga2_drvdata->rga_base + r)) = b;
442 }
443
rga2_read(u32 r)444 static inline u32 rga2_read(u32 r)
445 {
446 return *((volatile unsigned int *)(rga2_drvdata->rga_base + r));
447 }
448
449 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
rga2_init_version(void)450 static inline int rga2_init_version(void)
451 {
452 struct rga2_drvdata_t *rga = rga2_drvdata;
453 u32 major_version, minor_version, svn_version;
454 u32 reg_version;
455
456 if (!rga) {
457 pr_err("rga2_drvdata is null\n");
458 return -EINVAL;
459 }
460 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
461 pm_runtime_get_sync(rga2_drvdata->dev);
462 #endif
463
464 clk_prepare_enable(rga2_drvdata->aclk_rga2);
465 clk_prepare_enable(rga2_drvdata->hclk_rga2);
466
467 reg_version = rga2_read(0x028);
468
469 clk_disable_unprepare(rga2_drvdata->aclk_rga2);
470 clk_disable_unprepare(rga2_drvdata->hclk_rga2);
471
472 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
473 pm_runtime_put(rga2_drvdata->dev);
474 #endif
475
476 major_version = (reg_version & RGA2_MAJOR_VERSION_MASK) >> 0x18;
477 minor_version = (reg_version & RGA2_MINOR_VERSION_MASK) >> 0x14;
478 svn_version = (reg_version & RGA2_SVN_VERSION_MASK);
479
480 /*
481 * some old rga ip has no rga version register, so force set to 2.00
482 */
483 if (!major_version && !minor_version) {
484 major_version = 0x2;
485 }
486 snprintf(rga->version, 0xA, "%x.%01x.%05x", major_version, minor_version, svn_version);
487
488 return 0;
489 }
490 #endif
rga2_soft_reset(void)491 static void rga2_soft_reset(void)
492 {
493 u32 i;
494 u32 reg;
495 rga2_write((1 << 0x3) | (1 << 0x4) | (1 << 0x6), RGA2_SYS_CTRL);
496 for (i = 0; i < RGA2_RESET_TIMEOUT; i++) {
497 reg = rga2_read(RGA2_SYS_CTRL) & 1; // RGA_SYS_CTRL
498 if (reg == 0) {
499 break;
500 }
501 udelay(1);
502 }
503 if (i == RGA2_RESET_TIMEOUT) {
504 ERR("soft reset timeout.\n");
505 }
506 }
507
rga2_dump(void)508 static void rga2_dump(void)
509 {
510 int running;
511 struct rga2_reg *reg, *reg_tmp;
512 rga2_session *session, *session_tmp;
513
514 running = atomic_read(&rga2_service.total_running);
515 printk("rga total_running %d\n", running);
516 list_for_each_entry_safe(session, session_tmp, &rga2_service.session, list_session)
517 {
518 printk("session pid %d:\n", session->pid);
519 running = atomic_read(&session->task_running);
520 printk("task_running %d\n", running);
521 list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link)
522 {
523 printk("waiting register set 0x %.lu\n", (unsigned long)reg);
524 }
525 list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link)
526 {
527 printk("running register set 0x %.lu\n", (unsigned long)reg);
528 }
529 }
530 }
531
rga2_queue_power_off_work(void)532 static inline void rga2_queue_power_off_work(void)
533 {
534 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
535 queue_delayed_work(system_wq, &rga2_drvdata->power_off_work, RGA2_POWER_OFF_DELAY);
536 #else
537 queue_delayed_work(system_nrt_wq, &rga2_drvdata->power_off_work, RGA2_POWER_OFF_DELAY);
538 #endif
539 }
540
541 /* Caller must hold rga_service.lock */
rga2_power_on(void)542 static void rga2_power_on(void)
543 {
544 static ktime_t last;
545 ktime_t now = ktime_get();
546 if (ktime_to_ns(ktime_sub(now, last)) > NSEC_PER_SEC) {
547 cancel_delayed_work_sync(&rga2_drvdata->power_off_work);
548 rga2_queue_power_off_work();
549 last = now;
550 }
551 if (rga2_service.enable) {
552 return;
553 }
554
555 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
556 pm_runtime_get_sync(rga2_drvdata->dev);
557 #else
558 clk_prepare_enable(rga2_drvdata->pd_rga2);
559 #endif
560 clk_prepare_enable(rga2_drvdata->clk_rga2);
561 clk_prepare_enable(rga2_drvdata->aclk_rga2);
562 clk_prepare_enable(rga2_drvdata->hclk_rga2);
563 wake_lock(&rga2_drvdata->wake_lock);
564 rga2_service.enable = true;
565 }
566
567 /* Caller must hold rga_service.lock */
rga2_power_off(void)568 static void rga2_power_off(void)
569 {
570 int total_running;
571
572 if (!rga2_service.enable) {
573 return;
574 }
575
576 total_running = atomic_read(&rga2_service.total_running);
577 if (total_running) {
578 pr_err("power off when %d task running!!\n", total_running);
579 mdelay(0x32);
580 pr_err("delay 50 ms for running task\n");
581 rga2_dump();
582 }
583
584 clk_disable_unprepare(rga2_drvdata->clk_rga2);
585 clk_disable_unprepare(rga2_drvdata->aclk_rga2);
586 clk_disable_unprepare(rga2_drvdata->hclk_rga2);
587
588 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
589 pm_runtime_put(rga2_drvdata->dev);
590 #else
591 clk_disable_unprepare(rga2_drvdata->pd_rga2);
592 #endif
593
594 wake_unlock(&rga2_drvdata->wake_lock);
595 first_RGA2_proc = 0;
596 rga2_service.enable = false;
597 }
598
rga2_power_off_work(struct work_struct * work)599 static void rga2_power_off_work(struct work_struct *work)
600 {
601 if (mutex_trylock(&rga2_service.lock)) {
602 rga2_power_off();
603 mutex_unlock(&rga2_service.lock);
604 } else {
605 /* Come back later if the device is busy... */
606 rga2_queue_power_off_work();
607 }
608 }
609
rga2_flush(rga2_session * session,unsigned long arg)610 static int rga2_flush(rga2_session *session, unsigned long arg)
611 {
612 int ret = 0;
613 int ret_timeout;
614 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
615 ktime_t start = ktime_set(0, 0);
616 ktime_t end = ktime_set(0, 0);
617
618 if (RGA2_TEST_TIME) {
619 start = ktime_get();
620 }
621 #endif
622 ret_timeout = wait_event_timeout(session->wait, atomic_read(&session->done), RGA2_TIMEOUT_DELAY);
623 if (unlikely(ret_timeout < 0)) {
624 u32 i;
625 u32 *p;
626 p = rga2_service.cmd_buff;
627 pr_err("flush pid %d wait task ret %d\n", session->pid, ret);
628 pr_err("interrupt = %x status = %x\n", rga2_read(RGA2_INT), rga2_read(RGA2_STATUS));
629 rga2_printf_cmd_buf(p);
630 DBG("rga2 CMD\n");
631 for (i = 0; i < 0x7; i++) {
632 DBG("%.8x %.8x %.8x %.8x\n", p[0 + i * 0x4], p[1 + i * 0x4], p[0x2 + i * 0x4], p[0x3 + i * 0x4]);
633 }
634 mutex_lock(&rga2_service.lock);
635 rga2_del_running_list();
636 mutex_unlock(&rga2_service.lock);
637 ret = ret_timeout;
638 } else if (0 == ret_timeout) {
639 u32 i;
640 u32 *p;
641 p = rga2_service.cmd_buff;
642 pr_err("flush pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));
643 pr_err("interrupt = %x status = %x\n", rga2_read(RGA2_INT), rga2_read(RGA2_STATUS));
644 rga2_printf_cmd_buf(p);
645 DBG("rga2 CMD\n");
646 for (i = 0; i < 0x7; i++) {
647 DBG("%.8x %.8x %.8x %.8x\n", p[0 + i * 0x4], p[1 + i * 0x4], p[0x2 + i * 0x4], p[0x3 + i * 0x4]);
648 }
649 mutex_lock(&rga2_service.lock);
650 rga2_del_running_list_timeout();
651 rga2_try_set_reg();
652 mutex_unlock(&rga2_service.lock);
653 ret = -ETIMEDOUT;
654 }
655
656 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
657 if (RGA2_TEST_TIME) {
658 end = ktime_get();
659 end = ktime_sub(end, start);
660 DBG("one flush wait time %d\n", (int)ktime_to_us(end));
661 }
662 #endif
663 return ret;
664 }
665
rga2_get_result(rga2_session * session,unsigned long arg)666 static int rga2_get_result(rga2_session *session, unsigned long arg)
667 {
668 int ret = 0;
669 int num_done;
670
671 num_done = atomic_read(&session->num_done);
672 if (unlikely(copy_to_user((void __user *)arg, &num_done, sizeof(int)))) {
673 printk("copy_to_user failed\n");
674 ret = -EFAULT;
675 }
676 return ret;
677 }
678
rga2_check_param(const struct rga2_req * req)679 static int rga2_check_param(const struct rga2_req *req)
680 {
681 if (!((req->render_mode == color_fill_mode))) {
682 if (unlikely((req->src.act_w <= 0) || (req->src.act_w > 0x2000) || (req->src.act_h <= 0) ||
683 (req->src.act_h > 0x2000))) {
684 printk("invalid source resolution act_w = %d, act_h = %d\n", req->src.act_w, req->src.act_h);
685 return -EINVAL;
686 }
687 }
688
689 if (!((req->render_mode == color_fill_mode))) {
690 if (unlikely((req->src.vir_w <= 0) || (req->src.vir_w > 0x2000) || (req->src.vir_h <= 0) ||
691 (req->src.vir_h > 0x2000))) {
692 printk("invalid source resolution vir_w = %d, vir_h = %d\n", req->src.vir_w, req->src.vir_h);
693 return -EINVAL;
694 }
695 }
696
697 // check dst width and height
698 if (unlikely((req->dst.act_w <= 0) || (req->dst.act_w > 0x1000) || (req->dst.act_h <= 0) ||
699 (req->dst.act_h > 0x1000))) {
700 printk("invalid destination resolution act_w = %d, act_h = %d\n", req->dst.act_w, req->dst.act_h);
701 return -EINVAL;
702 }
703
704 if (unlikely((req->dst.vir_w <= 0) || (req->dst.vir_w > 0x1000) || (req->dst.vir_h <= 0) ||
705 (req->dst.vir_h > 0x1000))) {
706 printk("invalid destination resolution vir_w = %d, vir_h = %d\n", req->dst.vir_w, req->dst.vir_h);
707 return -EINVAL;
708 }
709
710 // check src_vir_w
711 if (unlikely(req->src.vir_w < req->src.act_w)) {
712 printk("invalid src_vir_w act_w = %d, vir_w = %d\n", req->src.act_w, req->src.vir_w);
713 return -EINVAL;
714 }
715
716 // check dst_vir_w
717 if (unlikely(req->dst.vir_w < req->dst.act_w)) {
718 if (req->rotate_mode != 1) {
719 printk("invalid dst_vir_w act_h = %d, vir_h = %d\n", req->dst.act_w, req->dst.vir_w);
720 return -EINVAL;
721 }
722 }
723
724 return 0;
725 }
726
rga2_copy_reg(struct rga2_reg * reg,uint32_t offset)727 static void rga2_copy_reg(struct rga2_reg *reg, uint32_t offset)
728 {
729 uint32_t i;
730 uint32_t *cmd_buf;
731 uint32_t *reg_p;
732
733 if (atomic_read(®->session->task_running) != 0) {
734 printk(KERN_ERR "task_running is no zero\n");
735 }
736
737 atomic_add(1, &rga2_service.cmd_num);
738 atomic_add(1, ®->session->task_running);
739
740 cmd_buf = (uint32_t *)rga2_service.cmd_buff + offset * 0x20;
741 reg_p = (uint32_t *)reg->cmd_reg;
742
743 for (i = 0; i < 0x20; i++) {
744 cmd_buf[i] = reg_p[i];
745 }
746 }
747
rga2_reg_init(rga2_session * session,struct rga2_req * req)748 static struct rga2_reg *rga2_reg_init(rga2_session *session, struct rga2_req *req)
749 {
750 int32_t ret;
751
752 /* Alloc 4k size for rga2_reg use. */
753 struct rga2_reg *reg = (struct rga2_reg *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
754
755 if (reg == NULL) {
756 pr_err("get_zeroed_page fail in rga_reg_init\n");
757 return NULL;
758 }
759
760 reg->session = session;
761 INIT_LIST_HEAD(®->session_link);
762 INIT_LIST_HEAD(®->status_link);
763
764 ret = rga2_get_dma_info(reg, req);
765 if (ret < 0) {
766 pr_err("fail to get dma buffer info!\n");
767 free_page((unsigned long)reg);
768
769 return NULL;
770 }
771
772 if ((req->mmu_info.src0_mmu_flag & 1) || (req->mmu_info.src1_mmu_flag & 1) || (req->mmu_info.dst_mmu_flag & 1) ||
773 (req->mmu_info.els_mmu_flag & 1)) {
774 ret = rga2_set_mmu_info(reg, req);
775 if (ret < 0) {
776 printk("%s, [%d] set mmu info error \n", __FUNCTION__, __LINE__);
777 free_page((unsigned long)reg);
778
779 return NULL;
780 }
781 }
782
783 if (RGA2_gen_reg_info((uint8_t *)reg->cmd_reg, (uint8_t *)reg->csc_reg, req) == -1) {
784 printk("gen reg info error\n");
785 free_page((unsigned long)reg);
786
787 return NULL;
788 }
789
790 mutex_lock(&rga2_service.lock);
791 list_add_tail(®->status_link, &rga2_service.waiting);
792 list_add_tail(®->session_link, &session->waiting);
793 mutex_unlock(&rga2_service.lock);
794
795 return reg;
796 }
797
798 /* Caller must hold rga_service.lock */
rga2_reg_deinit(struct rga2_reg * reg)799 static void rga2_reg_deinit(struct rga2_reg *reg)
800 {
801 list_del_init(®->session_link);
802 list_del_init(®->status_link);
803 free_page((unsigned long)reg);
804 }
805
806 /* Caller must hold rga_service.lock */
rga2_reg_from_wait_to_run(struct rga2_reg * reg)807 static void rga2_reg_from_wait_to_run(struct rga2_reg *reg)
808 {
809 list_del_init(®->status_link);
810 list_add_tail(®->status_link, &rga2_service.running);
811
812 list_del_init(®->session_link);
813 list_add_tail(®->session_link, ®->session->running);
814 }
815
816 /* Caller must hold rga_service.lock */
rga2_service_session_clear(rga2_session * session)817 static void rga2_service_session_clear(rga2_session *session)
818 {
819 struct rga2_reg *reg, *n;
820
821 list_for_each_entry_safe(reg, n, &session->waiting, session_link)
822 {
823 rga2_reg_deinit(reg);
824 }
825
826 list_for_each_entry_safe(reg, n, &session->running, session_link)
827 {
828 rga2_reg_deinit(reg);
829 }
830 }
831
832 /* Caller must hold rga_service.lock */
rga2_try_set_reg(void)833 static void rga2_try_set_reg(void)
834 {
835 int i;
836 struct rga2_reg *reg;
837
838 if (list_empty(&rga2_service.running)) {
839 if (!list_empty(&rga2_service.waiting)) {
840 /* RGA is idle */
841 reg = list_entry(rga2_service.waiting.next, struct rga2_reg, status_link);
842
843 rga2_power_on();
844 udelay(1);
845
846 rga2_copy_reg(reg, 0);
847 rga2_reg_from_wait_to_run(reg);
848
849 rga2_dma_flush_range(®->cmd_reg[0], ®->cmd_reg[0x20]);
850
851 rga2_write(0x0, RGA2_SYS_CTRL);
852
853 /* CMD buff */
854 rga2_write(virt_to_phys(reg->cmd_reg), RGA2_CMD_BASE);
855
856 /* full csc reg */
857 for (i = 0; i < 0xC; i++) {
858 rga2_write(reg->csc_reg[i], RGA2_CSC_COE_BASE + i * 0x4);
859 }
860
861 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
862 if (RGA2_TEST_REG) {
863 if (rga2_flag) {
864 int32_t *p;
865
866 p = rga2_service.cmd_buff;
867 INFO("CMD_REG\n");
868 for (i = 0; i < 0x8; i++) {
869 INFO("%.8x %.8x %.8x %.8x\n", p[0 + i * 0x4],
870 p[1 + i * 0x4], p[0x2 + i * 0x4], p[0x3 + i * 0x4]);
871 }
872
873 p = reg->csc_reg;
874 INFO("CSC_REG\n");
875 for (i = 0; i < 0x3; i++) {
876 INFO("%.8x %.8x %.8x %.8x\n", p[0 + i * 0x4],
877 p[1 + i * 0x4], p[0x2 + i * 0x4], p[0x3 + i * 0x4]);
878 }
879 }
880 }
881 #endif
882
883 /* master mode */
884 rga2_write((0x1 << 1) | (0x1 << 0x2) | (0x1 << 0x5) | (0x1 << 0x6), RGA2_SYS_CTRL);
885
886 /* All CMD finish int */
887 rga2_write(rga2_read(RGA2_INT) | (0x1 << 0xA) | (0x1 << 0x9) | (0x1 << 0x8), RGA2_INT);
888
889 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
890 if (RGA2_TEST_TIME) {
891 rga2_start = ktime_get();
892 }
893 #endif
894
895 /* Start proc */
896 atomic_set(®->session->done, 0);
897 rga2_write(0x1, RGA2_CMD_CTRL);
898 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
899 if (RGA2_TEST_REG) {
900 if (rga2_flag) {
901 INFO("CMD_READ_BACK_REG\n");
902 for (i = 0; i < 0x8; i++) {
903 INFO("%.8x %.8x %.8x %.8x\n", rga2_read(0x100 + i * 0x10 + 0),
904 rga2_read(0x100 + i * 0x10 + 0x4), rga2_read(0x100 + i * 0x10 + 0x8),
905 rga2_read(0x100 + i * 0x10 + 0xc));
906 }
907
908 INFO("CSC_READ_BACK_REG\n");
909 for (i = 0; i < 0x3; i++) {
910 INFO("%.8x %.8x %.8x %.8x\n", rga2_read(RGA2_CSC_COE_BASE + i * 0x10 + 0),
911 rga2_read(RGA2_CSC_COE_BASE + i * 0x10 + 0x4),
912 rga2_read(RGA2_CSC_COE_BASE + i * 0x10 + 0x8),
913 rga2_read(RGA2_CSC_COE_BASE + i * 0x10 + 0xc));
914 }
915 }
916 }
917 #endif
918 }
919 }
920 }
921
rga2_del_running_list(void)922 static void rga2_del_running_list(void)
923 {
924 struct rga2_mmu_buf_t *tbuf = &rga2_mmu_buf;
925 struct rga2_reg *reg;
926
927 while (!list_empty(&rga2_service.running)) {
928 reg = list_entry(rga2_service.running.next, struct rga2_reg, status_link);
929 if (reg->MMU_len && tbuf) {
930 if (tbuf->back + reg->MMU_len > 0x2 * tbuf->size) {
931 tbuf->back = reg->MMU_len + tbuf->size;
932 } else {
933 tbuf->back += reg->MMU_len;
934 }
935 }
936 rga2_put_dma_info(reg);
937 atomic_sub(1, ®->session->task_running);
938 atomic_sub(1, &rga2_service.total_running);
939
940 if (list_empty(®->session->waiting)) {
941 atomic_set(®->session->done, 1);
942 wake_up(®->session->wait);
943 }
944
945 rga2_reg_deinit(reg);
946 }
947 }
948
rga2_del_running_list_timeout(void)949 static void rga2_del_running_list_timeout(void)
950 {
951 struct rga2_mmu_buf_t *tbuf = &rga2_mmu_buf;
952 struct rga2_reg *reg;
953
954 while (!list_empty(&rga2_service.running)) {
955 reg = list_entry(rga2_service.running.next, struct rga2_reg, status_link);
956 if (reg->MMU_len && tbuf) {
957 if (tbuf->back + reg->MMU_len > 0x2 * tbuf->size) {
958 tbuf->back = reg->MMU_len + tbuf->size;
959 } else {
960 tbuf->back += reg->MMU_len;
961 }
962 }
963 rga2_put_dma_info(reg);
964 atomic_sub(1, ®->session->task_running);
965 atomic_sub(1, &rga2_service.total_running);
966 rga2_soft_reset();
967 if (list_empty(®->session->waiting)) {
968 atomic_set(®->session->done, 1);
969 wake_up(®->session->wait);
970 }
971 rga2_reg_deinit(reg);
972 }
973 return;
974 }
975
rga2_blit_flush_cache(rga2_session * session,struct rga2_req * req)976 static int rga2_blit_flush_cache(rga2_session *session, struct rga2_req *req)
977 {
978 int ret = 0;
979 /* Alloc 4k size for rga2_reg use. */
980 struct rga2_reg *reg = (struct rga2_reg *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
981 struct rga2_mmu_buf_t *tbuf = &rga2_mmu_buf;
982
983 if (!reg) {
984 pr_err("%s, [%d] kzalloc error\n", __func__, __LINE__);
985 ret = -ENOMEM;
986 goto err_free_reg;
987 }
988
989 ret = rga2_get_dma_info(reg, req);
990 if (ret < 0) {
991 pr_err("fail to get dma buffer info!\n");
992 goto err_free_reg;
993 }
994
995 if ((req->mmu_info.src0_mmu_flag & 1) || (req->mmu_info.src1_mmu_flag & 1) || (req->mmu_info.dst_mmu_flag & 1) ||
996 (req->mmu_info.els_mmu_flag & 1)) {
997 reg->MMU_map = true;
998 ret = rga2_set_mmu_info(reg, req);
999 if (ret < 0) {
1000 pr_err("%s, [%d] set mmu info error\n", __func__, __LINE__);
1001 ret = -EFAULT;
1002 goto err_free_reg;
1003 }
1004 }
1005 if (reg->MMU_len && tbuf) {
1006 if (tbuf->back + reg->MMU_len > 0x2 * tbuf->size) {
1007 tbuf->back = reg->MMU_len + tbuf->size;
1008 } else {
1009 tbuf->back += reg->MMU_len;
1010 }
1011 }
1012 err_free_reg:
1013 free_page((unsigned long)reg);
1014
1015 return ret;
1016 }
1017
rga2_blit(rga2_session * session,struct rga2_req * req)1018 static int rga2_blit(rga2_session *session, struct rga2_req *req)
1019 {
1020 int ret = -1;
1021 int num = 0;
1022 struct rga2_reg *reg;
1023
1024 /* check value if legal */
1025 ret = rga2_check_param(req);
1026 if (ret == -EINVAL) {
1027 pr_err("req argument is inval\n");
1028 return ret;
1029 }
1030
1031 reg = rga2_reg_init(session, req);
1032 if (reg == NULL) {
1033 pr_err("init reg fail\n");
1034 return -EFAULT;
1035 }
1036
1037 num = 1;
1038 mutex_lock(&rga2_service.lock);
1039 atomic_add(num, &rga2_service.total_running);
1040 rga2_try_set_reg();
1041 mutex_unlock(&rga2_service.lock);
1042
1043 return 0;
1044 }
1045
rga2_blit_async(rga2_session * session,struct rga2_req * req)1046 static int rga2_blit_async(rga2_session *session, struct rga2_req *req)
1047 {
1048 int ret = -1;
1049 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
1050 if (RGA2_TEST_MSG) {
1051 if (1) {
1052 print_debug_info(req);
1053 rga2_flag = 1;
1054 INFO("*** rga_blit_async proc ***\n");
1055 } else {
1056 rga2_flag = 0;
1057 }
1058 }
1059 #endif
1060 atomic_set(&session->done, 0);
1061 ret = rga2_blit(session, req);
1062
1063 return ret;
1064 }
1065
rga2_blit_sync(rga2_session * session,struct rga2_req * req)1066 static int rga2_blit_sync(rga2_session *session, struct rga2_req *req)
1067 {
1068 struct rga2_req req_bak;
1069 int restore = 0;
1070 int try = 10;
1071 int ret = -1;
1072 int ret_timeout = 0;
1073
1074 memcpy(&req_bak, req, sizeof(req_bak));
1075
1076 while (1) {
1077 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
1078 if (RGA2_TEST_MSG) {
1079 if (1) {
1080 print_debug_info(req);
1081 rga2_flag = 1;
1082 INFO("*** rga2_blit_sync proc ***\n");
1083 } else {
1084 rga2_flag = 0;
1085 }
1086 }
1087 if (RGA2_CHECK_MODE) {
1088 rga2_align_check(req);
1089 }
1090 #endif
1091
1092 atomic_set(&session->done, 0);
1093
1094 ret = rga2_blit(session, req);
1095 if (ret < 0) {
1096 return ret;
1097 }
1098
1099 if (rk3368) {
1100 ret_timeout = wait_event_timeout(session->wait, atomic_read(&session->done), RGA2_TIMEOUT_DELAY / 0x4);
1101 } else {
1102 ret_timeout = wait_event_timeout(session->wait, atomic_read(&session->done), RGA2_TIMEOUT_DELAY);
1103 }
1104
1105 if (unlikely(ret_timeout < 0)) {
1106 u32 i;
1107 u32 *p;
1108
1109 p = rga2_service.cmd_buff;
1110 pr_err("Rga sync pid %d wait task ret %d\n", session->pid, ret_timeout);
1111 pr_err("interrupt = %x status = %x\n", rga2_read(RGA2_INT), rga2_read(RGA2_STATUS));
1112 rga2_printf_cmd_buf(p);
1113 DBG("rga2 CMD\n");
1114 for (i = 0; i < 0x7; i++) {
1115 DBG("%.8x %.8x %.8x %.8x\n", p[0 + i * 0x4], p[1 + i * 0x4], p[0x2 + i * 0x4], p[0x3 + i * 0x4]);
1116 }
1117 mutex_lock(&rga2_service.lock);
1118 rga2_del_running_list();
1119 mutex_unlock(&rga2_service.lock);
1120 ret = ret_timeout;
1121 } else if (ret_timeout == 0) {
1122 u32 i;
1123 u32 *p;
1124
1125 p = rga2_service.cmd_buff;
1126 pr_err("Rga sync pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));
1127 pr_err("interrupt = %x status = %x\n", rga2_read(RGA2_INT), rga2_read(RGA2_STATUS));
1128 rga2_printf_cmd_buf(p);
1129 DBG("rga2 CMD\n");
1130 for (i = 0; i < 0x7; i++) {
1131 DBG("%.8x %.8x %.8x %.8x\n", p[0 + i * 0x4], p[1 + i * 0x4], p[0x2 + i * 0x4], p[0x3 + i * 0x4]);
1132 }
1133 mutex_lock(&rga2_service.lock);
1134 rga2_del_running_list_timeout();
1135 rga2_try_set_reg();
1136 mutex_unlock(&rga2_service.lock);
1137 ret = -ETIMEDOUT;
1138 }
1139
1140 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
1141 if (RGA2_TEST_TIME) {
1142 rga2_end = ktime_get();
1143 rga2_end = ktime_sub(rga2_end, rga2_start);
1144 DBG("sync one cmd end time %d\n", (int)ktime_to_us(rga2_end));
1145 }
1146 #endif
1147 if (ret == -ETIMEDOUT && try--) {
1148 memcpy(req, &req_bak, sizeof(req_bak));
1149 /*
1150 * if rga work timeout with scaling, need do a non-scale work
1151 * first, restore hardware status, then do actually work.
1152 */
1153 if (req->src.act_w != req->dst.act_w || req->src.act_h != req->dst.act_h) {
1154 req->src.act_w = MIN(0x140, MIN(req->src.act_w, req->dst.act_w));
1155 req->src.act_h = MIN(0xf0, MIN(req->src.act_h, req->dst.act_h));
1156 req->dst.act_w = req->src.act_w;
1157 req->dst.act_h = req->src.act_h;
1158 restore = 1;
1159 }
1160 continue;
1161 }
1162 if (!ret && restore) {
1163 memcpy(req, &req_bak, sizeof(req_bak));
1164 restore = 0;
1165 continue;
1166 }
1167 break;
1168 }
1169
1170 return ret;
1171 }
1172
rga_ioctl(struct file * file,uint32_t cmd,unsigned long arg)1173 static long rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg)
1174 {
1175 struct rga2_drvdata_t *rga = rga2_drvdata;
1176 struct rga2_req req, req_first;
1177 struct rga_req req_rga;
1178 int ret = 0;
1179 int major_version = 0, minor_version = 0;
1180 char version[16] = {0};
1181 rga2_session *session;
1182
1183 if (!rga) {
1184 pr_err("rga2_drvdata is null, rga2 is not init\n");
1185 return -ENODEV;
1186 }
1187 memset(&req, 0x0, sizeof(req));
1188
1189 mutex_lock(&rga2_service.mutex);
1190
1191 session = (rga2_session *)file->private_data;
1192
1193 if (session == NULL) {
1194 printk("%s [%d] rga thread session is null\n", __FUNCTION__, __LINE__);
1195 mutex_unlock(&rga2_service.mutex);
1196 return -EINVAL;
1197 }
1198
1199 memset(&req, 0x0, sizeof(req));
1200 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
1201 if (RGA2_TEST_MSG) {
1202 INFO("cmd is %s\n", rga2_get_cmd_mode_str(cmd));
1203 }
1204 if (RGA2_NONUSE) {
1205 mutex_unlock(&rga2_service.mutex);
1206 return 0;
1207 }
1208 #endif
1209 switch (cmd) {
1210 case RGA_BLIT_SYNC:
1211 if (unlikely(copy_from_user(&req_rga, (struct rga_req *)arg, sizeof(struct rga_req)))) {
1212 ERR("copy_from_user failed\n");
1213 ret = -EFAULT;
1214 break;
1215 }
1216 RGA_MSG_2_RGA2_MSG(&req_rga, &req);
1217
1218 if (first_RGA2_proc == 0 && req.render_mode == bitblt_mode && rga2_service.dev_mode == 1) {
1219 memcpy(&req_first, &req, sizeof(struct rga2_req));
1220 if ((req_first.src.act_w != req_first.dst.act_w) || (req_first.src.act_h != req_first.dst.act_h)) {
1221 req_first.src.act_w = MIN(0x140, MIN(req_first.src.act_w, req_first.dst.act_w));
1222 req_first.src.act_h = MIN(0xf0, MIN(req_first.src.act_h, req_first.dst.act_h));
1223 req_first.dst.act_w = req_first.src.act_w;
1224 req_first.dst.act_h = req_first.src.act_h;
1225 ret = rga2_blit_async(session, &req_first);
1226 }
1227 ret = rga2_blit_sync(session, &req);
1228 first_RGA2_proc = 1;
1229 } else {
1230 ret = rga2_blit_sync(session, &req);
1231 }
1232 break;
1233 case RGA_BLIT_ASYNC:
1234 if (unlikely(copy_from_user(&req_rga, (struct rga_req *)arg, sizeof(struct rga_req)))) {
1235 ERR("copy_from_user failed\n");
1236 ret = -EFAULT;
1237 break;
1238 }
1239
1240 RGA_MSG_2_RGA2_MSG(&req_rga, &req);
1241 if (first_RGA2_proc == 0 && req.render_mode == bitblt_mode && rga2_service.dev_mode == 1) {
1242 memcpy(&req_first, &req, sizeof(struct rga2_req));
1243 if ((req_first.src.act_w != req_first.dst.act_w) || (req_first.src.act_h != req_first.dst.act_h) ||
1244 rk3368) {
1245 req_first.src.act_w = MIN(0x140, MIN(req_first.src.act_w, req_first.dst.act_w));
1246 req_first.src.act_h = MIN(0xf0, MIN(req_first.src.act_h, req_first.dst.act_h));
1247 req_first.dst.act_w = req_first.src.act_w;
1248 req_first.dst.act_h = req_first.src.act_h;
1249 if (rk3368) {
1250 ret = rga2_blit_sync(session, &req_first);
1251 } else {
1252 ret = rga2_blit_async(session, &req_first);
1253 }
1254 }
1255 ret = rga2_blit_async(session, &req);
1256 first_RGA2_proc = 1;
1257 } else {
1258 if (rk3368) {
1259 memcpy(&req_first, &req, sizeof(struct rga2_req));
1260
1261 /*
1262 * workround for gts
1263 * run gts --skip-all-system-status-check --ignore-business-logic-failure -m GtsMediaTestCases -t
1264 * com.google.android.media.gts.WidevineYouTubePerformanceTests#testClear1080P30
1265 */
1266 if ((req_first.src.act_w == 0x780) && (req_first.src.act_h == 0x3f0) &&
1267 (req_first.src.act_h == req_first.dst.act_w)) {
1268 printk("src : aw=%d ah=%d vw=%d vh=%d \n", req_first.src.act_w, req_first.src.act_h,
1269 req_first.src.vir_w, req_first.src.vir_h);
1270 printk("dst : aw=%d ah=%d vw=%d vh=%d \n", req_first.dst.act_w, req_first.dst.act_h,
1271 req_first.dst.vir_w, req_first.dst.vir_h);
1272 } else {
1273 req_first.src.act_w = MIN(0x140, MIN(req_first.src.act_w, req_first.dst.act_w));
1274 req_first.src.act_h = MIN(0xf0, MIN(req_first.src.act_h, req_first.dst.act_h));
1275 req_first.dst.act_w = req_first.src.act_w;
1276 req_first.dst.act_h = req_first.src.act_h;
1277 ret = rga2_blit_sync(session, &req_first);
1278 }
1279 }
1280 ret = rga2_blit_async(session, &req);
1281 }
1282 break;
1283 case RGA_CACHE_FLUSH:
1284 if (unlikely(copy_from_user(&req_rga, (struct rga_req *)arg, sizeof(struct rga_req)))) {
1285 ERR("copy_from_user failed\n");
1286 ret = -EFAULT;
1287 break;
1288 }
1289 RGA_MSG_2_RGA2_MSG(&req_rga, &req);
1290 ret = rga2_blit_flush_cache(session, &req);
1291 break;
1292 case RGA2_BLIT_SYNC:
1293 if (unlikely(copy_from_user(&req, (struct rga2_req *)arg, sizeof(struct rga2_req)))) {
1294 ERR("copy_from_user failed\n");
1295 ret = -EFAULT;
1296 break;
1297 }
1298 ret = rga2_blit_sync(session, &req);
1299 break;
1300 case RGA2_BLIT_ASYNC:
1301 if (unlikely(copy_from_user(&req, (struct rga2_req *)arg, sizeof(struct rga2_req)))) {
1302 ERR("copy_from_user failed\n");
1303 ret = -EFAULT;
1304 break;
1305 }
1306
1307 if ((atomic_read(&rga2_service.total_running) > 0x10)) {
1308 ret = rga2_blit_sync(session, &req);
1309 } else {
1310 ret = rga2_blit_async(session, &req);
1311 }
1312 break;
1313 case RGA_FLUSH:
1314 case RGA2_FLUSH:
1315 ret = rga2_flush(session, arg);
1316 break;
1317 case RGA_GET_RESULT:
1318 case RGA2_GET_RESULT:
1319 ret = rga2_get_result(session, arg);
1320 break;
1321 case RGAGET_VERSION:
1322 sscanf(rga->version, "%x.%x.%*x", &major_version, &minor_version);
1323 snprintf(version, 0x5, "%x.%02x", major_version, minor_version);
1324
1325 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
1326 ret = copy_to_user((void *)arg, version, sizeof(rga->version));
1327 #else
1328 ret = copy_to_user((void *)arg, RGA2_VERSION, sizeof(RGA2_VERSION));
1329 #endif
1330 if (ret != 0) {
1331 ret = -EFAULT;
1332 }
1333 break;
1334 case RGA2GET_VERSION:
1335 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
1336 ret = copy_to_user((void *)arg, rga->version, sizeof(rga->version));
1337 #else
1338 ret = copy_to_user((void *)arg, RGA2_VERSION, sizeof(RGA2_VERSION));
1339 #endif
1340 if (ret != 0) {
1341 ret = -EFAULT;
1342 }
1343 break;
1344 default:
1345 ERR("unknown ioctl cmd!\n");
1346 ret = -EINVAL;
1347 break;
1348 }
1349
1350 mutex_unlock(&rga2_service.mutex);
1351
1352 return ret;
1353 }
1354
1355 #ifdef CONFIG_COMPAT
compat_rga_ioctl(struct file * file,uint32_t cmd,unsigned long arg)1356 static long compat_rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg)
1357 {
1358 struct rga2_drvdata_t *rga = rga2_drvdata;
1359 struct rga2_req req, req_first;
1360 struct rga_req_32 req_rga;
1361 int ret = 0;
1362 rga2_session *session;
1363
1364 if (!rga) {
1365 pr_err("rga2_drvdata is null, rga2 is not init\n");
1366 return -ENODEV;
1367 }
1368 memset(&req, 0x0, sizeof(req));
1369
1370 mutex_lock(&rga2_service.mutex);
1371
1372 session = (rga2_session *)file->private_data;
1373
1374 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
1375 if (RGA2_TEST_MSG) {
1376 INFO("using %s\n", __func__);
1377 }
1378 #endif
1379
1380 if (session == NULL) {
1381 ERR("%s [%d] rga thread session is null\n", __func__, __LINE__);
1382 mutex_unlock(&rga2_service.mutex);
1383 return -EINVAL;
1384 }
1385
1386 memset(&req, 0x0, sizeof(req));
1387
1388 switch (cmd) {
1389 case RGA_BLIT_SYNC:
1390 if (unlikely(copy_from_user(&req_rga, compat_ptr((compat_uptr_t)arg), sizeof(struct rga_req_32)))) {
1391 ERR("copy_from_user failed\n");
1392 ret = -EFAULT;
1393 break;
1394 }
1395
1396 RGA_MSG_2_RGA2_MSG_32(&req_rga, &req);
1397
1398 if (first_RGA2_proc == 0 && req.render_mode == bitblt_mode && rga2_service.dev_mode == 1) {
1399 memcpy(&req_first, &req, sizeof(struct rga2_req));
1400 if ((req_first.src.act_w != req_first.dst.act_w) || (req_first.src.act_h != req_first.dst.act_h)) {
1401 req_first.src.act_w = MIN(0x140, MIN(req_first.src.act_w, req_first.dst.act_w));
1402 req_first.src.act_h = MIN(0xf0, MIN(req_first.src.act_h, req_first.dst.act_h));
1403 req_first.dst.act_w = req_first.src.act_w;
1404 req_first.dst.act_h = req_first.src.act_h;
1405 ret = rga2_blit_async(session, &req_first);
1406 }
1407 ret = rga2_blit_sync(session, &req);
1408 first_RGA2_proc = 1;
1409 } else {
1410 ret = rga2_blit_sync(session, &req);
1411 }
1412 break;
1413 case RGA_BLIT_ASYNC:
1414 if (unlikely(copy_from_user(&req_rga, compat_ptr((compat_uptr_t)arg), sizeof(struct rga_req_32)))) {
1415 ERR("copy_from_user failed\n");
1416 ret = -EFAULT;
1417 break;
1418 }
1419 RGA_MSG_2_RGA2_MSG_32(&req_rga, &req);
1420
1421 if (first_RGA2_proc == 0 && req.render_mode == bitblt_mode && rga2_service.dev_mode == 1) {
1422 memcpy(&req_first, &req, sizeof(struct rga2_req));
1423 if ((req_first.src.act_w != req_first.dst.act_w) || (req_first.src.act_h != req_first.dst.act_h)) {
1424 req_first.src.act_w = MIN(0x140, MIN(req_first.src.act_w, req_first.dst.act_w));
1425 req_first.src.act_h = MIN(0xf0, MIN(req_first.src.act_h, req_first.dst.act_h));
1426 req_first.dst.act_w = req_first.src.act_w;
1427 req_first.dst.act_h = req_first.src.act_h;
1428 ret = rga2_blit_async(session, &req_first);
1429 }
1430 ret = rga2_blit_sync(session, &req);
1431 first_RGA2_proc = 1;
1432 } else {
1433 ret = rga2_blit_sync(session, &req);
1434 }
1435 break;
1436 case RGA2_BLIT_SYNC:
1437 if (unlikely(copy_from_user(&req, compat_ptr((compat_uptr_t)arg), sizeof(struct rga2_req)))) {
1438 ERR("copy_from_user failed\n");
1439 ret = -EFAULT;
1440 break;
1441 }
1442 ret = rga2_blit_sync(session, &req);
1443 break;
1444 case RGA2_BLIT_ASYNC:
1445 if (unlikely(copy_from_user(&req, compat_ptr((compat_uptr_t)arg), sizeof(struct rga2_req)))) {
1446 ERR("copy_from_user failed\n");
1447 ret = -EFAULT;
1448 break;
1449 }
1450
1451 if ((atomic_read(&rga2_service.total_running) > 0x10)) {
1452 ret = rga2_blit_sync(session, &req);
1453 } else {
1454 ret = rga2_blit_async(session, &req);
1455 }
1456
1457 break;
1458 case RGA_FLUSH:
1459 case RGA2_FLUSH:
1460 ret = rga2_flush(session, arg);
1461 break;
1462 case RGA_GET_RESULT:
1463 case RGA2_GET_RESULT:
1464 ret = rga2_get_result(session, arg);
1465 break;
1466 case RGAGET_VERSION:
1467 case RGA2GET_VERSION:
1468 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
1469 ret = copy_to_user((void *)arg, rga->version, 0x10);
1470 #else
1471 ret = copy_to_user((void *)arg, RGA2_VERSION, sizeof(RGA2_VERSION));
1472 #endif
1473 if (ret != 0) {
1474 ret = -EFAULT;
1475 }
1476 break;
1477 default:
1478 ERR("unknown ioctl cmd!\n");
1479 ret = -EINVAL;
1480 break;
1481 }
1482
1483 mutex_unlock(&rga2_service.mutex);
1484
1485 return ret;
1486 }
1487 #endif
1488
rga2_ioctl_kernel(struct rga_req * req_rga)1489 static long rga2_ioctl_kernel(struct rga_req *req_rga)
1490 {
1491 int ret = 0;
1492 rga2_session *session;
1493 struct rga2_req req;
1494
1495 memset(&req, 0x0, sizeof(req));
1496 mutex_lock(&rga2_service.mutex);
1497 session = &rga2_session_global;
1498 if (session == NULL) {
1499 ERR("%s [%d] rga thread session is null\n", __func__, __LINE__);
1500 mutex_unlock(&rga2_service.mutex);
1501 return -EINVAL;
1502 }
1503
1504 RGA_MSG_2_RGA2_MSG(req_rga, &req);
1505 ret = rga2_blit_sync(session, &req);
1506 mutex_unlock(&rga2_service.mutex);
1507
1508 return ret;
1509 }
1510
rga2_open(struct inode * inode,struct file * file)1511 static int rga2_open(struct inode *inode, struct file *file)
1512 {
1513 rga2_session *session = kzalloc(sizeof(rga2_session), GFP_KERNEL);
1514
1515 if (session == NULL) {
1516 pr_err("unable to allocate memory for rga_session.");
1517 return -ENOMEM;
1518 }
1519
1520 session->pid = current->pid;
1521 INIT_LIST_HEAD(&session->waiting);
1522 INIT_LIST_HEAD(&session->running);
1523 INIT_LIST_HEAD(&session->list_session);
1524 init_waitqueue_head(&session->wait);
1525 mutex_lock(&rga2_service.lock);
1526 list_add_tail(&session->list_session, &rga2_service.session);
1527 mutex_unlock(&rga2_service.lock);
1528 atomic_set(&session->task_running, 0);
1529 atomic_set(&session->num_done, 0);
1530 file->private_data = (void *)session;
1531
1532 return nonseekable_open(inode, file);
1533 }
1534
rga2_release(struct inode * inode,struct file * file)1535 static int rga2_release(struct inode *inode, struct file *file)
1536 {
1537 int task_running;
1538 rga2_session *session = (rga2_session *)file->private_data;
1539
1540 if (session == NULL) {
1541 return -EINVAL;
1542 }
1543
1544 task_running = atomic_read(&session->task_running);
1545 if (task_running) {
1546 pr_err("rga2_service session %d still has %d task running when closing\n", session->pid, task_running);
1547 msleep(0x64);
1548 }
1549
1550 wake_up(&session->wait);
1551 mutex_lock(&rga2_service.lock);
1552 list_del(&session->list_session);
1553 rga2_service_session_clear(session);
1554 kfree(session);
1555 mutex_unlock(&rga2_service.lock);
1556
1557 return 0;
1558 }
1559
RGA2_flush_page(void)1560 static void RGA2_flush_page(void)
1561 {
1562 struct rga2_reg *reg;
1563 int i;
1564
1565 reg = list_entry(rga2_service.running.prev, struct rga2_reg, status_link);
1566
1567 if (reg == NULL) {
1568 return;
1569 }
1570
1571 if (reg->MMU_src0_base != NULL) {
1572 for (i = 0; i < reg->MMU_src0_count; i++) {
1573 rga2_dma_flush_page(phys_to_page(reg->MMU_src0_base[i]), MMU_UNMAP_CLEAN);
1574 }
1575 }
1576
1577 if (reg->MMU_src1_base != NULL) {
1578 for (i = 0; i < reg->MMU_src1_count; i++) {
1579 rga2_dma_flush_page(phys_to_page(reg->MMU_src1_base[i]), MMU_UNMAP_CLEAN);
1580 }
1581 }
1582
1583 if (reg->MMU_dst_base != NULL) {
1584 for (i = 0; i < reg->MMU_dst_count; i++) {
1585 rga2_dma_flush_page(phys_to_page(reg->MMU_dst_base[i]), MMU_UNMAP_INVALID);
1586 }
1587 }
1588 }
1589
rga2_irq_thread(int irq,void * dev_id)1590 static irqreturn_t rga2_irq_thread(int irq, void *dev_id)
1591 {
1592 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
1593 if (RGA2_INT_FLAG) {
1594 INFO("irqthread INT[%x],STATS[%x]\n", rga2_read(RGA2_INT), rga2_read(RGA2_STATUS));
1595 }
1596 #endif
1597 RGA2_flush_page();
1598 mutex_lock(&rga2_service.lock);
1599 if (rga2_service.enable) {
1600 rga2_del_running_list();
1601 rga2_try_set_reg();
1602 }
1603 mutex_unlock(&rga2_service.lock);
1604
1605 return IRQ_HANDLED;
1606 }
1607
rga2_irq(int irq,void * dev_id)1608 static irqreturn_t rga2_irq(int irq, void *dev_id)
1609 {
1610 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
1611 if (RGA2_INT_FLAG) {
1612 INFO("irq INT[%x], STATS[%x]\n", rga2_read(RGA2_INT), rga2_read(RGA2_STATUS));
1613 }
1614 #endif
1615 /* if error interrupt then soft reset hardware */
1616 if (rga2_read(RGA2_INT) & 0x01) {
1617 pr_err("Rga err irq! INT[%x],STATS[%x]\n", rga2_read(RGA2_INT), rga2_read(RGA2_STATUS));
1618 rga2_soft_reset();
1619 }
1620 /* clear INT */
1621 rga2_write(rga2_read(RGA2_INT) | (0x1 << 0x4) | (0x1 << 0x5) | (0x1 << 0x6) | (0x1 << 0x7), RGA2_INT);
1622
1623 return IRQ_WAKE_THREAD;
1624 }
1625
1626 struct file_operations rga2_fops = {
1627 .owner = THIS_MODULE,
1628 .open = rga2_open,
1629 .release = rga2_release,
1630 .unlocked_ioctl = rga_ioctl,
1631 #ifdef CONFIG_COMPAT
1632 .compat_ioctl = compat_rga_ioctl,
1633 #endif
1634 };
1635
1636 static struct miscdevice rga2_dev = {
1637 .minor = RGA2_MAJOR,
1638 .name = "rga",
1639 .fops = &rga2_fops,
1640 };
1641
1642 static const struct of_device_id rockchip_rga_dt_ids[] = {
1643 {
1644 .compatible = "rockchip,rga2",
1645 },
1646 {},
1647 };
1648
1649 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
rga2_debugger_init(struct rga_debugger ** debugger_p)1650 static int rga2_debugger_init(struct rga_debugger **debugger_p)
1651 {
1652 struct rga_debugger *debugger;
1653
1654 *debugger_p = kzalloc(sizeof(struct rga_debugger), GFP_KERNEL);
1655 if (*debugger_p == NULL) {
1656 ERR("can not alloc for rga2 debugger\n");
1657 return -ENOMEM;
1658 }
1659
1660 debugger = *debugger_p;
1661
1662 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUG_FS
1663 mutex_init(&debugger->debugfs_lock);
1664 INIT_LIST_HEAD(&debugger->debugfs_entry_list);
1665 #endif
1666
1667 #ifdef CONFIG_ROCKCHIP_RGA2_PROC_FS
1668 mutex_init(&debugger->procfs_lock);
1669 INIT_LIST_HEAD(&debugger->procfs_entry_list);
1670 #endif
1671
1672 rga2_debugfs_init();
1673 rga2_procfs_init();
1674
1675 return 0;
1676 }
1677
rga2_debugger_remove(struct rga_debugger ** debugger_p)1678 static int rga2_debugger_remove(struct rga_debugger **debugger_p)
1679 {
1680 rga2_debugfs_remove();
1681 rga2_procfs_remove();
1682
1683 kfree(*debugger_p);
1684 *debugger_p = NULL;
1685
1686 return 0;
1687 }
1688 #endif
1689
rga2_drv_probe(struct platform_device * pdev)1690 static int rga2_drv_probe(struct platform_device *pdev)
1691 {
1692 struct rga2_drvdata_t *data;
1693 struct resource *res;
1694 int ret = 0;
1695 struct device_node *np = pdev->dev.of_node;
1696
1697 mutex_init(&rga2_service.lock);
1698 mutex_init(&rga2_service.mutex);
1699 atomic_set(&rga2_service.total_running, 0);
1700 atomic_set(&rga2_service.src_format_swt, 0);
1701 rga2_service.last_prc_src_format = 1; /* default is yuv first */
1702 rga2_service.enable = false;
1703
1704 rga2_ioctl_kernel_p = rga2_ioctl_kernel;
1705
1706 data = devm_kzalloc(&pdev->dev, sizeof(struct rga2_drvdata_t), GFP_KERNEL);
1707 if (data == NULL) {
1708 ERR("failed to allocate driver data.\n");
1709 return -ENOMEM;
1710 }
1711
1712 INIT_DELAYED_WORK(&data->power_off_work, rga2_power_off_work);
1713 wake_lock_init(&data->wake_lock, WAKE_LOCK_SUSPEND, "rga");
1714
1715 data->clk_rga2 = devm_clk_get(&pdev->dev, "clk_rga");
1716 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
1717 pm_runtime_enable(&pdev->dev);
1718 #else
1719 data->pd_rga2 = devm_clk_get(&pdev->dev, "pd_rga");
1720 #endif
1721 data->aclk_rga2 = devm_clk_get(&pdev->dev, "aclk_rga");
1722 data->hclk_rga2 = devm_clk_get(&pdev->dev, "hclk_rga");
1723
1724 /* map the registers */
1725 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1726 data->rga_base = devm_ioremap_resource(&pdev->dev, res);
1727 if (!data->rga_base) {
1728 ERR("rga ioremap failed\n");
1729 ret = -ENOENT;
1730 goto err_ioremap;
1731 }
1732
1733 /* get the IRQ */
1734 data->irq = platform_get_irq(pdev, 0);
1735 if (data->irq <= 0) {
1736 ERR("failed to get rga irq resource (%d).\n", data->irq);
1737 ret = data->irq;
1738 goto err_irq;
1739 }
1740
1741 /* request the IRQ */
1742 ret = devm_request_threaded_irq(&pdev->dev, data->irq, rga2_irq, rga2_irq_thread, 0, "rga", pdev);
1743 if (ret) {
1744 ERR("rga request_irq failed (%d).\n", ret);
1745 goto err_irq;
1746 }
1747
1748 platform_set_drvdata(pdev, data);
1749 data->dev = &pdev->dev;
1750 rga2_drvdata = data;
1751 of_property_read_u32(np, "dev_mode", &rga2_service.dev_mode);
1752 if (of_machine_is_compatible("rockchip,rk3368")) {
1753 rk3368 = 1;
1754 }
1755
1756 #if defined(CONFIG_ION_ROCKCHIP) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
1757 data->ion_client = rockchip_ion_client_create("rga");
1758 if (IS_ERR(data->ion_client)) {
1759 dev_err(&pdev->dev, "failed to create ion client for rga");
1760 return PTR_ERR(data->ion_client);
1761 } else {
1762 dev_info(&pdev->dev, "rga ion client create success!\n");
1763 }
1764 #endif
1765
1766 ret = misc_register(&rga2_dev);
1767 if (ret) {
1768 ERR("cannot register miscdev (%d)\n", ret);
1769 goto err_misc_register;
1770 }
1771
1772 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
1773 rga2_debugger_init(&rga2_drvdata->debugger);
1774 #endif
1775
1776 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
1777 rga2_init_version();
1778 INFO("Driver loaded successfully ver:%s\n", rga2_drvdata->version);
1779 #else
1780 INFO("Driver loaded successfully\n");
1781 #endif
1782 return 0;
1783
1784 err_misc_register:
1785 free_irq(data->irq, pdev);
1786 err_irq:
1787 iounmap(data->rga_base);
1788 err_ioremap:
1789 wake_lock_destroy(&data->wake_lock);
1790
1791 return ret;
1792 }
1793
rga2_drv_remove(struct platform_device * pdev)1794 static int rga2_drv_remove(struct platform_device *pdev)
1795 {
1796 struct rga2_drvdata_t *data = platform_get_drvdata(pdev);
1797 DBG("%s [%d]\n", __FUNCTION__, __LINE__);
1798
1799 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
1800 rga2_debugger_remove(&data->debugger);
1801 #endif
1802
1803 wake_lock_destroy(&data->wake_lock);
1804 misc_deregister(&(data->miscdev));
1805 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
1806 free_irq(data->irq, &data->miscdev);
1807 iounmap((void __iomem *)(data->rga_base));
1808
1809 devm_clk_put(&pdev->dev, data->clk_rga2);
1810 devm_clk_put(&pdev->dev, data->aclk_rga2);
1811 devm_clk_put(&pdev->dev, data->hclk_rga2);
1812 pm_runtime_disable(&pdev->dev);
1813 #endif
1814
1815 return 0;
1816 }
1817
1818 static struct platform_driver rga2_driver = {
1819 .probe = rga2_drv_probe,
1820 .remove = rga2_drv_remove,
1821 .driver =
1822 {
1823 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
1824 .owner = THIS_MODULE,
1825 #endif
1826 .name = "rga2",
1827 .of_match_table = of_match_ptr(rockchip_rga_dt_ids),
1828 },
1829 };
1830
1831 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
rga2_slt(void)1832 void rga2_slt(void)
1833 {
1834 int i;
1835 int src_size, dst_size, src_order, dst_order;
1836 int err_count = 0, right_count = 0;
1837 int task_running;
1838 unsigned int srcW, srcH, dstW, dstH;
1839 unsigned int *pstd, *pnow;
1840 unsigned long *src_vir, *dst_vir;
1841 struct rga2_req req;
1842 rga2_session session;
1843
1844 srcW = 0x190;
1845 srcH = 0xc8;
1846 dstW = 0x190;
1847 dstH = 0xc8;
1848
1849 src_size = srcW * srcH * 0x4;
1850 dst_size = dstW * dstH * 0x4;
1851
1852 src_order = get_order(src_size);
1853 src_vir = (unsigned long *)__get_free_pages(GFP_KERNEL | GFP_DMA32, src_order);
1854 if (src_vir == NULL) {
1855 ERR("%s[%d], can not alloc pages for src, order = %d\n", __func__, __LINE__, src_order);
1856 return;
1857 }
1858
1859 dst_order = get_order(dst_size);
1860 dst_vir = (unsigned long *)__get_free_pages(GFP_KERNEL | GFP_DMA32, dst_order);
1861 if (dst_vir == NULL) {
1862 ERR("%s[%d], can not alloc pages for dst, order = %d\n", __func__, __LINE__, dst_order);
1863 return;
1864 }
1865
1866 /* Init session */
1867 session.pid = current->pid;
1868
1869 INIT_LIST_HEAD(&session.waiting);
1870 INIT_LIST_HEAD(&session.running);
1871 INIT_LIST_HEAD(&session.list_session);
1872 init_waitqueue_head(&session.wait);
1873 mutex_lock(&rga2_service.lock);
1874 list_add_tail(&session.list_session, &rga2_service.session);
1875 mutex_unlock(&rga2_service.lock);
1876 atomic_set(&session.task_running, 0);
1877 atomic_set(&session.num_done, 0);
1878
1879 INFO("**********************************\n");
1880 INFO("************ RGA_TEST ************\n");
1881 INFO("**********************************\n");
1882
1883 memset(src_vir, 0x50, src_size);
1884 memset(dst_vir, 0x50, dst_size);
1885
1886 rga2_dma_flush_range(src_vir, src_vir + src_size);
1887 rga2_dma_flush_range(dst_vir, dst_vir + dst_size);
1888
1889 memset(&req, 0, sizeof(struct rga2_req));
1890 req.src.x_offset = 0;
1891 req.src.y_offset = 0;
1892 req.src.act_w = srcW;
1893 req.src.act_h = srcH;
1894 req.src.vir_w = srcW;
1895 req.src.vir_h = srcW;
1896 req.src.format = RGA2_FORMAT_RGBA_8888;
1897
1898 req.src.yrgb_addr = 0;
1899 req.src.uv_addr = (unsigned long)virt_to_phys(src_vir);
1900 req.src.v_addr = req.src.uv_addr + srcH * srcW;
1901
1902 req.dst.x_offset = 0;
1903 req.dst.y_offset = 0;
1904 req.dst.act_w = dstW;
1905 req.dst.act_h = dstH;
1906 req.dst.vir_w = dstW;
1907 req.dst.vir_h = dstH;
1908 req.dst.format = RGA2_FORMAT_RGBA_8888;
1909
1910 req.dst.yrgb_addr = 0;
1911 req.dst.uv_addr = (unsigned long)virt_to_phys(dst_vir);
1912 req.dst.v_addr = req.dst.uv_addr + dstH * dstW;
1913
1914 rga2_blit_sync(&session, &req);
1915
1916 /* Check buffer */
1917 pstd = (unsigned int *)src_vir;
1918 pnow = (unsigned int *)dst_vir;
1919
1920 INFO("[ num : srcInfo dstInfo ]\n");
1921 for (i = 0; i < dst_size / 0x4; i++) {
1922 if (*pstd != *pnow) {
1923 INFO("[X%.8d : 0x%x 0x%x]", i, *pstd, *pnow);
1924 if (i % 0x4 == 0) {
1925 INFO("\n");
1926 }
1927 err_count++;
1928 } else {
1929 if (i % (0x280 * 0x400) == 0) {
1930 INFO("[Y%.8d : 0x%.8x 0x%.8x]\n", i, *pstd, *pnow);
1931 }
1932 right_count++;
1933 }
1934 pstd++;
1935 pnow++;
1936 if (err_count > 0x40) {
1937 break;
1938 }
1939 }
1940
1941 INFO("err_count=%d, right_count=%d\n", err_count, right_count);
1942 if (err_count != 0) {
1943 INFO("rga slt err !!\n");
1944 } else {
1945 INFO("rga slt success !!\n");
1946 }
1947
1948 /* Deinit session */
1949 task_running = atomic_read(&session.task_running);
1950 if (task_running) {
1951 pr_err("%s[%d], session %d still has %d task running when closing\n", __func__, __LINE__, session.pid,
1952 task_running);
1953 msleep(0x64);
1954 }
1955 wake_up(&session.wait);
1956 mutex_lock(&rga2_service.lock);
1957 list_del(&session.list_session);
1958 rga2_service_session_clear(&session);
1959 mutex_unlock(&rga2_service.lock);
1960
1961 free_pages((unsigned long)src_vir, src_order);
1962 free_pages((unsigned long)dst_vir, dst_order);
1963 }
1964 #endif
1965
1966 void rga2_test_0(void);
1967
rga2_init(void)1968 static int __init rga2_init(void)
1969 {
1970 int ret;
1971 int order = 0;
1972 uint32_t *buf_p;
1973 uint32_t *buf;
1974
1975 /*
1976 * malloc pre scale mid buf mmu table:
1977 * RGA2_PHY_PAGE_SIZE * channel_num * address_size
1978 */
1979 order = get_order(RGA2_PHY_PAGE_SIZE * 3 * sizeof(buf_p));
1980 buf_p = (uint32_t *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
1981 if (buf_p == NULL) {
1982 ERR("Can not alloc pages for mmu_page_table\n");
1983 }
1984
1985 rga2_mmu_buf.buf_virtual = buf_p;
1986 rga2_mmu_buf.buf_order = order;
1987 #if (defined(CONFIG_ARM) && defined(CONFIG_ARM_LPAE))
1988 buf = (uint32_t *)(uint32_t)virt_to_phys((void *)((unsigned long)buf_p));
1989 #else
1990 buf = (uint32_t *)virt_to_phys((void *)((unsigned long)buf_p));
1991 #endif
1992 rga2_mmu_buf.buf = buf;
1993 rga2_mmu_buf.front = 0;
1994 rga2_mmu_buf.back = RGA2_PHY_PAGE_SIZE * 3;
1995 rga2_mmu_buf.size = RGA2_PHY_PAGE_SIZE * 3;
1996
1997 order = get_order(RGA2_PHY_PAGE_SIZE * sizeof(struct page *));
1998 rga2_mmu_buf.pages = (struct page **)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
1999 if (rga2_mmu_buf.pages == NULL) {
2000 ERR("Can not alloc pages for rga2_mmu_buf.pages\n");
2001 }
2002 rga2_mmu_buf.pages_order = order;
2003
2004 ret = platform_driver_register(&rga2_driver);
2005 if (ret != 0) {
2006 printk(KERN_ERR "Platform device register failed (%d).\n", ret);
2007 return ret;
2008 }
2009
2010 rga2_session_global.pid = 0x0000ffff;
2011 INIT_LIST_HEAD(&rga2_session_global.waiting);
2012 INIT_LIST_HEAD(&rga2_session_global.running);
2013 INIT_LIST_HEAD(&rga2_session_global.list_session);
2014
2015 INIT_LIST_HEAD(&rga2_service.waiting);
2016 INIT_LIST_HEAD(&rga2_service.running);
2017 INIT_LIST_HEAD(&rga2_service.done);
2018 INIT_LIST_HEAD(&rga2_service.session);
2019 init_waitqueue_head(&rga2_session_global.wait);
2020 list_add_tail(&rga2_session_global.list_session, &rga2_service.session);
2021 atomic_set(&rga2_session_global.task_running, 0);
2022 atomic_set(&rga2_session_global.num_done, 0);
2023
2024 #if RGA2_TEST_CASE
2025 rga2_test_0();
2026 #endif
2027 INFO("Module initialized.\n");
2028
2029 return 0;
2030 }
2031
rga2_exit(void)2032 static void __exit rga2_exit(void)
2033 {
2034 rga2_power_off();
2035
2036 free_pages((unsigned long)rga2_mmu_buf.buf_virtual, rga2_mmu_buf.buf_order);
2037 free_pages((unsigned long)rga2_mmu_buf.pages, rga2_mmu_buf.pages_order);
2038
2039 platform_driver_unregister(&rga2_driver);
2040 }
2041
2042 #if RGA2_TEST_CASE
2043
rga2_test_0(void)2044 void rga2_test_0(void)
2045 {
2046 struct rga2_req req;
2047 rga2_session session;
2048 unsigned int *src, *dst;
2049
2050 session.pid = current->pid;
2051 INIT_LIST_HEAD(&session.waiting);
2052 INIT_LIST_HEAD(&session.running);
2053 INIT_LIST_HEAD(&session.list_session);
2054 init_waitqueue_head(&session.wait);
2055 /* no need to protect */
2056 list_add_tail(&session.list_session, &rga2_service.session);
2057 atomic_set(&session.task_running, 0);
2058 atomic_set(&session.num_done, 0);
2059
2060 memset(&req, 0, sizeof(struct rga2_req));
2061 src = kmalloc(0x320 * 0x1E0 * 0x4, GFP_KERNEL);
2062 dst = kmalloc(0x320 * 0x1E0 * 0x4, GFP_KERNEL);
2063
2064 printk("\n********************************\n");
2065 printk("************ RGA2_TEST ************\n");
2066 printk("********************************\n\n");
2067
2068 memset(src, 0x80, 0x320 * 0x1E0 * 0x4);
2069 memset(dst, 0xcc, 0x320 * 0x1E0 * 0x4);
2070
2071 {
2072 uint32_t i, j;
2073 uint8_t *sp;
2074
2075 sp = (uint8_t *)src;
2076 for (j = 0; j < 0xf0; j++) {
2077 sp = (uint8_t *)src + j * 0x140 * 0xA / 0x8;
2078 for (i = 0; i < 0x140; i++) {
2079 if ((i & 0x3) == 0) {
2080 sp[i * 0x5 / 0x4] = 0;
2081 sp[i * 0x5 / 0x4 + 1] = 0x1;
2082 } else if ((i & 0x3) == 1) {
2083 sp[i * 0x5 / 0x4 + 1] = 0x4;
2084 } else if ((i & 0x3) == 0x2) {
2085 sp[i * 0x5 / 0x4 + 1] = 0x10;
2086 } else if ((i & 0x3) == 0x3) {
2087 sp[i * 0x5 / 0x4 + 1] = 0x40;
2088 }
2089 }
2090 }
2091 sp = (uint8_t *)src;
2092 for (j = 0; j < 0x64; j++) {
2093 printk("src %.2x\n", sp[j]);
2094 }
2095 }
2096 req.src.act_w = 0x140;
2097 req.src.act_h = 0xf0;
2098
2099 req.src.vir_w = 0x140;
2100 req.src.vir_h = 0xf0;
2101 req.src.yrgb_addr = 0;
2102 req.src.uv_addr = (unsigned long)virt_to_phys(src);
2103 req.src.v_addr = 0;
2104 req.src.format = RGA2_FORMAT_YCbCr_420_SP_10B;
2105
2106 req.dst.act_w = 0x140;
2107 req.dst.act_h = 0xf0;
2108 req.dst.x_offset = 0;
2109 req.dst.y_offset = 0;
2110
2111 req.dst.vir_w = 0x140;
2112 req.dst.vir_h = 0xf0;
2113
2114 req.dst.yrgb_addr = 0;
2115 req.dst.uv_addr = (unsigned long)virt_to_phys(dst);
2116 req.dst.format = RGA2_FORMAT_YCbCr_420_SP;
2117
2118 req.rotate_mode = 0;
2119 req.scale_bicu_mode = 0x2;
2120
2121 rga2_blit_sync(&session, &req);
2122
2123 {
2124 uint32_t j;
2125 uint8_t *dp = (uint8_t *)dst;
2126
2127 for (j = 0; j < 0x64; j++) {
2128 printk("%d %.2x\n", j, dp[j]);
2129 }
2130 }
2131
2132 kfree(src);
2133 kfree(dst);
2134 }
2135 #endif
2136
2137 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
2138 #ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
2139 module_init(rga2_init);
2140 #else
2141 late_initcall(rga2_init);
2142 #endif
2143 #else
2144 fs_initcall(rga2_init);
2145 #endif
2146 module_exit(rga2_exit);
2147
2148 /* Module information */
2149 MODULE_AUTHOR("zsq@rock-chips.com");
2150 MODULE_DESCRIPTION("Driver for rga device");
2151 MODULE_LICENSE("GPL");
2152