1 /*
2 * Allwinner SoCs display driver.
3 *
4 * Copyright (C) 2016 Allwinner.
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11 #include "dev_disp.h"
12 #include "de/disp_lcd.h"
13 #include <linux/ion.h>
14 #include <uapi/linux/ion.h>
15 #include <linux/pm_runtime.h>
16 #if defined(CONFIG_DEVFREQ_DRAM_FREQ_WITH_SOFT_NOTIFY)
17 #include <linux/sunxi_dramfreq.h>
18 #endif
19 #include <linux/version.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/reset.h>
22 #ifndef dma_mmap_writecombine
23 #define dma_mmap_writecombine dma_mmap_wc
24 #endif
25
26 #ifdef CONFIG_PM
27 #define CONFIG_PM_RUNTIME
28 #endif
29
30 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
31 struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags);
32 int ion_free(struct ion_buffer *buffer);
33 void *ion_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
34 void ion_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
35 #endif
36
37 #define DISP_MEM_NUM 10
38 struct disp_drv_info g_disp_drv;
39 /* alloc based on 4K byte */
40 #define MY_BYTE_ALIGN(x) (((x + (4*1024-1)) >> 12) << 12)
41
42 static u32 suspend_output_type[4] = {0};
43 /*
44 * 0:normal;
45 * suspend_status&1 != 0:in early_suspend;
46 * suspend_status&2 != 0:in suspend;
47 */
48 static u32 suspend_status;
49 /* 0:after early suspend; 1:after suspend; 2:after resume;3:after late resume */
50 static u32 suspend_prestep = 3;
51 static u32 power_status_init;
52
53 /* static unsigned int gbuffer[4096]; */
54 static struct info_mm g_disp_mm[DISP_MEM_NUM];
55 static int g_disp_mem_id = -1;
56
57 static struct cdev *my_cdev;
58 static dev_t devid;
59 static struct class *disp_class;
60 static struct device *display_dev;
61
62 static unsigned int g_disp = 0, g_enhance_mode = 0, g_cvbs_enhance_mode;
63 static u32 DISP_print = 0xffff; /* print cmd which eq DISP_print */
64 static bool g_pm_runtime_enable;
65
66 #ifdef SUPPORT_EINK
67 struct disp_layer_config_inner eink_para[16];
68 #endif
69
70 struct disp_layer_config lyr_cfg[16];
71 struct disp_layer_config2 lyr_cfg2[16];
72 struct disp_layer_config2 lyr_cfg2_1[16];
73 static spinlock_t sync_finish_lock;
74 unsigned int bright_csc = 50, contrast_csc = 50, satuation_csc = 50;
75
76 static atomic_t g_driver_ref_count;
77 static u8 palette_data[256*4];
78 #ifndef CONFIG_OF
79 static struct sunxi_disp_mod disp_mod[] = {
80 {DISP_MOD_DE, "de"},
81 {DISP_MOD_LCD0, "lcd0"},
82 {DISP_MOD_DSI0, "dsi0"},
83 #ifdef DISP_SCREEN_NUM
84 #if DISP_SCREEN_NUM == 2
85 {DISP_MOD_LCD1, "lcd1"}
86 #endif
87 #else
88 # error "DISP_SCREEN_NUM undefined!"
89 #endif
90 };
91
92 static struct resource disp_resource[] = {
93 };
94 #endif
95
96 #if defined(CONFIG_DISP2_SUNXI_COMPOSER)
97 int composer_init(struct disp_drv_info *p_disp_drv);
98 int hwc_dump(char *buf);
99 #endif
100
disp_set_suspend_output_type(u8 disp,u8 output_type)101 void disp_set_suspend_output_type(u8 disp, u8 output_type)
102 {
103 suspend_output_type[disp] = output_type;
104 }
105
106 static void disp_shutdown(struct platform_device *pdev);
disp_sys_show(struct device * dev,struct device_attribute * attr,char * buf)107 static ssize_t disp_sys_show(struct device *dev,
108 struct device_attribute *attr, char *buf)
109 {
110 struct disp_manager *mgr = NULL;
111 struct disp_device *dispdev = NULL;
112 ssize_t count = 0;
113 int num_screens, screen_id;
114 int num_layers, layer_id;
115 int num_chans, chan_id;
116 #if defined(CONFIG_DISP2_LCD_ESD_DETECT)
117 struct disp_lcd_esd_info esd_inf;
118
119 memset(&esd_inf, 0, sizeof(struct disp_lcd_esd_info));
120 #endif
121 /* int hpd; */
122
123 num_screens = bsp_disp_feat_get_num_screens();
124 for (screen_id = 0; screen_id < num_screens; screen_id++) {
125 u32 width = 0, height = 0;
126 int fps = 0;
127 struct disp_health_info info;
128
129 mgr = disp_get_layer_manager(screen_id);
130 if (mgr == NULL)
131 continue;
132 dispdev = mgr->device;
133 if (dispdev == NULL)
134 continue;
135 dispdev->get_resolution(dispdev, &width, &height);
136 fps = bsp_disp_get_fps(screen_id);
137 bsp_disp_get_health_info(screen_id, &info);
138
139 if (!dispdev->is_enabled(dispdev))
140 continue;
141 count += sprintf(buf + count, "screen %d:\n", screen_id);
142 count += sprintf(buf + count, "de_rate %d hz, ref_fps:%d\n",
143 mgr->get_clk_rate(mgr),
144 dispdev->get_fps(dispdev));
145 count += mgr->dump(mgr, buf + count);
146 /* output */
147 if (dispdev->type == DISP_OUTPUT_TYPE_LCD) {
148 count += sprintf(buf + count,
149 "\tlcd output\tbacklight(%3d)\tfps:%d.%d",
150 dispdev->get_bright(dispdev), fps / 10,
151 fps % 10);
152 #if defined(CONFIG_DISP2_LCD_ESD_DETECT)
153 if (dispdev->get_esd_info) {
154 dispdev->get_esd_info(dispdev, &esd_inf);
155 count += sprintf(buf + count,
156 "\tesd level(%u)\tfreq(%u)\tpos(%u)\treset(%u)",
157 esd_inf.level, esd_inf.freq,
158 esd_inf.esd_check_func_pos, esd_inf.rst_cnt);
159 }
160 #endif
161 } else if (dispdev->type == DISP_OUTPUT_TYPE_HDMI) {
162 int mode = dispdev->get_mode(dispdev);
163
164 count += sprintf(buf + count,
165 "\thdmi output mode(%d)\tfps:%d.%d",
166 mode, fps / 10, fps % 10);
167 } else if (dispdev->type == DISP_OUTPUT_TYPE_TV) {
168 int mode = dispdev->get_mode(dispdev);
169
170 count += sprintf(buf + count,
171 "\ttv output mode(%d)\tfps:%d.%d",
172 mode, fps / 10, fps % 10);
173 } else if (dispdev->type == DISP_OUTPUT_TYPE_VGA) {
174 int mode = dispdev->get_mode(dispdev);
175
176 count += sprintf(buf + count,
177 "\tvga output mode(%d)\tfps:%d.%d",
178 mode, fps / 10, fps % 10);
179 } else if (dispdev->type == DISP_OUTPUT_TYPE_VDPO) {
180 int mode = dispdev->get_mode(dispdev);
181
182 count += sprintf(buf + count,
183 "\tvdpo output mode(%d)\tfps:%d.%d",
184 mode, fps / 10, fps % 10);
185 } else if (dispdev->type == DISP_OUTPUT_TYPE_RTWB) {
186 int mode = dispdev->get_mode(dispdev);
187
188 count += sprintf(buf + count,
189 "\trtwb output mode(%d)\tfps:%d.%d",
190 mode, fps / 10, fps % 10);
191 } else if (dispdev->type == DISP_OUTPUT_TYPE_EDP) {
192 count += sprintf(
193 buf + count, "\tEDP output(%s) \tfps:%d.%d",
194 (dispdev->is_enabled(dispdev) == 1) ? "enable"
195 : "disable",
196 fps / 10, fps % 10);
197 }
198 if (dispdev->type != DISP_OUTPUT_TYPE_NONE) {
199 count += sprintf(buf + count, "\t%4ux%4u\n",
200 width, height);
201 count += sprintf(buf + count,
202 "\terr:%u\tskip:%u\tirq:%llu\tvsync:%u\tvsync_skip:%u\t\n",
203 info.error_cnt, info.skip_cnt,
204 info.irq_cnt, info.vsync_cnt,
205 info.vsync_skip_cnt);
206 }
207
208 num_chans = bsp_disp_feat_get_num_channels(screen_id);
209
210 /* layer info */
211 for (chan_id = 0; chan_id < num_chans; chan_id++) {
212 num_layers =
213 bsp_disp_feat_get_num_layers_by_chn(screen_id,
214 chan_id);
215 for (layer_id = 0; layer_id < num_layers; layer_id++) {
216 struct disp_layer *lyr = NULL;
217 struct disp_layer_config config;
218
219 lyr = disp_get_layer(screen_id, chan_id,
220 layer_id);
221 config.channel = chan_id;
222 config.layer_id = layer_id;
223 mgr->get_layer_config(mgr, &config, 1);
224 if (lyr && (true == config.enable) && lyr->dump)
225 count += lyr->dump(lyr, buf + count);
226 }
227 }
228 }
229 #if defined(CONFIG_DISP2_SUNXI_COMPOSER)
230 count += hwc_dump(buf + count);
231 #endif
232
233 return count;
234 }
235
disp_sys_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)236 static ssize_t disp_sys_store(struct device *dev,
237 struct device_attribute *attr,
238 const char *buf, size_t count)
239 {
240 return count;
241 }
242
243 static DEVICE_ATTR(sys, 0660,
244 disp_sys_show, disp_sys_store);
245
disp_disp_show(struct device * dev,struct device_attribute * attr,char * buf)246 static ssize_t disp_disp_show(struct device *dev,
247 struct device_attribute *attr, char *buf)
248 {
249 return sprintf(buf, "%u\n", g_disp);
250 }
251
disp_disp_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)252 static ssize_t disp_disp_store(struct device *dev,
253 struct device_attribute *attr,
254 const char *buf, size_t count)
255 {
256 int err;
257 unsigned long val;
258 unsigned int num_screens;
259
260 err = kstrtoul(buf, 10, &val);
261 if (err) {
262 pr_warn("Invalid size\n");
263 return err;
264 }
265
266 num_screens = bsp_disp_feat_get_num_screens();
267 if (val > num_screens)
268 pr_warn("Invalid value, <%d is expected!\n", num_screens);
269 else
270 g_disp = val;
271
272 return count;
273 }
274
275 static DEVICE_ATTR(disp, 0660,
276 disp_disp_show, disp_disp_store);
277
disp_enhance_mode_show(struct device * dev,struct device_attribute * attr,char * buf)278 static ssize_t disp_enhance_mode_show(struct device *dev,
279 struct device_attribute *attr, char *buf)
280 {
281 return sprintf(buf, "%u\n", g_enhance_mode);
282 }
283
disp_enhance_mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)284 static ssize_t disp_enhance_mode_store(struct device *dev,
285 struct device_attribute *attr,
286 const char *buf, size_t count)
287 {
288 int err;
289 unsigned long val;
290
291 err = kstrtoul(buf, 10, &val);
292 if (err) {
293 pr_warn("Invalid size\n");
294 return err;
295 }
296
297 /*
298 * mode: 0: standard; 1: vivid; 2: soft; 3: demo vivid
299 */
300 if (val > 3)
301 pr_warn("Invalid value, 0~3 is expected!\n");
302 else {
303 int num_screens = 2;
304 struct disp_manager *mgr = NULL;
305 struct disp_enhance *enhance = NULL;
306
307 g_enhance_mode = val;
308
309 num_screens = bsp_disp_feat_get_num_screens();
310
311 if (g_disp < num_screens)
312 mgr = g_disp_drv.mgr[g_disp];
313
314 if (mgr) {
315 enhance = mgr->enhance;
316 if (enhance && enhance->set_mode)
317 #if defined(CONFIG_ARCH_SUN8IW15) || defined(CONFIG_ARCH_SUN50IW1)
318 enhance->set_mode(enhance,
319 (g_enhance_mode == 2) ?
320 1 : g_enhance_mode);
321 if (g_enhance_mode == 2)
322 g_enhance_mode = 3;
323 #else
324 enhance->set_mode(enhance,
325 (g_enhance_mode == 3) ?
326 1 : g_enhance_mode);
327 #endif
328
329 if (enhance && enhance->demo_enable
330 && enhance->demo_disable) {
331 if (g_enhance_mode == 3)
332 enhance->demo_enable(enhance);
333 else
334 enhance->demo_disable(enhance);
335 }
336 }
337 }
338
339 return count;
340 }
341
342 static DEVICE_ATTR(enhance_mode, 0660,
343 disp_enhance_mode_show, disp_enhance_mode_store);
344 int __attribute__ ((weak))
345 _csc_enhance_setting[3][4] = {
346 {50, 50, 50, 50},
347 {50, 50, 50, 50},
348 {50, 40, 50, 50},
349 };
350
disp_enhance_bright_show(struct device * dev,struct device_attribute * attr,char * buf)351 static ssize_t disp_enhance_bright_show(struct device *dev,
352 struct device_attribute *attr, char *buf)
353 {
354 int num_screens = 2;
355 struct disp_manager *mgr = NULL;
356 struct disp_enhance *enhance = NULL;
357 int value = 0;
358 int real_mode = (g_enhance_mode == 3) ? 1 : g_enhance_mode;
359
360 num_screens = bsp_disp_feat_get_num_screens();
361 if (g_disp < num_screens)
362 mgr = g_disp_drv.mgr[g_disp];
363
364 if (mgr) {
365 enhance = mgr->enhance;
366 if (enhance && enhance->get_bright)
367 value = enhance->get_bright(enhance);
368 }
369
370 return sprintf(buf, "%d %d\n", _csc_enhance_setting[real_mode][0], value);
371 }
372
disp_enhance_bright_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)373 static ssize_t disp_enhance_bright_store(struct device *dev,
374 struct device_attribute *attr,
375 const char *buf, size_t count)
376 {
377 int err;
378 unsigned long value;
379 int num_screens = 2;
380 struct disp_manager *mgr = NULL;
381 struct disp_enhance *enhance = NULL;
382 int real_mode = (g_enhance_mode == 3) ? 1 : g_enhance_mode;
383
384 err = kstrtoul(buf, 10, &value);
385 if (err) {
386 pr_warn("Invalid size\n");
387 return err;
388 }
389
390 num_screens = bsp_disp_feat_get_num_screens();
391 if (g_disp < num_screens)
392 mgr = g_disp_drv.mgr[g_disp];
393
394 if (mgr) {
395 enhance = mgr->enhance;
396 if (enhance && enhance->set_bright) {
397 _csc_enhance_setting[real_mode][0] = value;
398 enhance->set_bright(enhance, value);
399 }
400 if (enhance && enhance->set_mode) {
401 enhance->set_mode(enhance, real_mode ? 0 : 1);
402 enhance->set_mode(enhance, real_mode);
403 }
404 }
405
406 return count;
407 }
408 static DEVICE_ATTR(enhance_bright, 0660,
409 disp_enhance_bright_show, disp_enhance_bright_store);
410
disp_enhance_saturation_show(struct device * dev,struct device_attribute * attr,char * buf)411 static ssize_t disp_enhance_saturation_show(struct device *dev,
412 struct device_attribute *attr, char *buf)
413 {
414 int num_screens = 2;
415 struct disp_manager *mgr = NULL;
416 struct disp_enhance *enhance = NULL;
417 int value = 0;
418 int real_mode = (g_enhance_mode == 3) ? 1 : g_enhance_mode;
419
420 num_screens = bsp_disp_feat_get_num_screens();
421 if (g_disp < num_screens)
422 mgr = g_disp_drv.mgr[g_disp];
423
424 if (mgr) {
425 enhance = mgr->enhance;
426 if (enhance && enhance->get_saturation)
427 value = enhance->get_saturation(enhance);
428 }
429
430 return sprintf(buf, "%d %d\n", value, _csc_enhance_setting[real_mode][2]);
431 }
432
disp_enhance_saturation_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)433 static ssize_t disp_enhance_saturation_store(struct device *dev,
434 struct device_attribute *attr,
435 const char *buf, size_t count)
436 {
437 int err;
438 unsigned long value;
439 int num_screens = 2;
440 struct disp_manager *mgr = NULL;
441 struct disp_enhance *enhance = NULL;
442 int real_mode = (g_enhance_mode == 3) ? 1 : g_enhance_mode;
443
444 err = kstrtoul(buf, 10, &value);
445 if (err) {
446 pr_warn("Invalid size\n");
447 return err;
448 }
449
450 num_screens = bsp_disp_feat_get_num_screens();
451 if (g_disp < num_screens)
452 mgr = g_disp_drv.mgr[g_disp];
453
454 if (mgr) {
455 enhance = mgr->enhance;
456 if (enhance && enhance->set_saturation) {
457 _csc_enhance_setting[real_mode][2] = value;
458 enhance->set_saturation(enhance, value);
459 }
460 if (enhance && enhance->set_mode) {
461 enhance->set_mode(enhance, real_mode ? 0 : 1);
462 enhance->set_mode(enhance, real_mode);
463 }
464 }
465
466 return count;
467 }
468 static DEVICE_ATTR(enhance_saturation, 0660,
469 disp_enhance_saturation_show, disp_enhance_saturation_store);
470
disp_enhance_contrast_show(struct device * dev,struct device_attribute * attr,char * buf)471 static ssize_t disp_enhance_contrast_show(struct device *dev,
472 struct device_attribute *attr, char *buf)
473 {
474 int num_screens = 2;
475 struct disp_manager *mgr = NULL;
476 struct disp_enhance *enhance = NULL;
477 int value = 0;
478 int real_mode = (g_enhance_mode == 3) ? 1 : g_enhance_mode;
479
480 num_screens = bsp_disp_feat_get_num_screens();
481 if (g_disp < num_screens)
482 mgr = g_disp_drv.mgr[g_disp];
483
484 if (mgr) {
485 enhance = mgr->enhance;
486 if (enhance && enhance->get_contrast)
487 value = enhance->get_contrast(enhance);
488 }
489
490 return sprintf(buf, "%d %d\n", value, _csc_enhance_setting[real_mode][1]);
491 }
492
disp_enhance_contrast_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)493 static ssize_t disp_enhance_contrast_store(struct device *dev,
494 struct device_attribute *attr,
495 const char *buf, size_t count)
496 {
497 int err;
498 unsigned long value;
499 int num_screens = 2;
500 struct disp_manager *mgr = NULL;
501 struct disp_enhance *enhance = NULL;
502 int real_mode = (g_enhance_mode == 3) ? 1 : g_enhance_mode;
503
504 err = kstrtoul(buf, 10, &value);
505 if (err) {
506 pr_warn("Invalid size\n");
507 return err;
508 }
509
510 num_screens = bsp_disp_feat_get_num_screens();
511 if (g_disp < num_screens)
512 mgr = g_disp_drv.mgr[g_disp];
513
514 if (mgr) {
515 enhance = mgr->enhance;
516 if (enhance && enhance->set_contrast) {
517 _csc_enhance_setting[real_mode][1] = value;
518 enhance->set_contrast(enhance, value);
519 }
520 if (enhance && enhance->set_mode) {
521 enhance->set_mode(enhance, real_mode ? 0 : 1);
522 enhance->set_mode(enhance, real_mode);
523 }
524 }
525
526 return count;
527 }
528 static DEVICE_ATTR(enhance_contrast, 0660,
529 disp_enhance_contrast_show, disp_enhance_contrast_store);
530
disp_enhance_edge_show(struct device * dev,struct device_attribute * attr,char * buf)531 static ssize_t disp_enhance_edge_show(struct device *dev,
532 struct device_attribute *attr, char *buf)
533 {
534 int num_screens = 2;
535 struct disp_manager *mgr = NULL;
536 struct disp_enhance *enhance = NULL;
537 int value = 0;
538
539 num_screens = bsp_disp_feat_get_num_screens();
540 if (g_disp < num_screens)
541 mgr = g_disp_drv.mgr[g_disp];
542
543 if (mgr) {
544 enhance = mgr->enhance;
545 if (enhance && enhance->get_edge)
546 value = enhance->get_edge(enhance);
547 }
548
549 return sprintf(buf, "%d\n", value);
550 }
551
disp_enhance_edge_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)552 static ssize_t disp_enhance_edge_store(struct device *dev,
553 struct device_attribute *attr,
554 const char *buf, size_t count)
555 {
556 int err;
557 unsigned long value;
558 int num_screens = 2;
559 struct disp_manager *mgr = NULL;
560 struct disp_enhance *enhance = NULL;
561
562 err = kstrtoul(buf, 10, &value);
563 if (err) {
564 pr_warn("Invalid size\n");
565 return err;
566 }
567
568 num_screens = bsp_disp_feat_get_num_screens();
569 if (g_disp < num_screens)
570 mgr = g_disp_drv.mgr[g_disp];
571
572 if (mgr) {
573 enhance = mgr->enhance;
574 if (enhance && enhance->set_edge)
575 enhance->set_edge(enhance, value);
576 }
577
578 return count;
579 }
580 static DEVICE_ATTR(enhance_edge, 0660,
581 disp_enhance_edge_show, disp_enhance_edge_store);
582
disp_enhance_detail_show(struct device * dev,struct device_attribute * attr,char * buf)583 static ssize_t disp_enhance_detail_show(struct device *dev,
584 struct device_attribute *attr, char *buf)
585 {
586 int num_screens = 2;
587 struct disp_manager *mgr = NULL;
588 struct disp_enhance *enhance = NULL;
589 int value = 0;
590
591 num_screens = bsp_disp_feat_get_num_screens();
592 if (g_disp < num_screens)
593 mgr = g_disp_drv.mgr[g_disp];
594
595 if (mgr) {
596 enhance = mgr->enhance;
597 if (enhance && enhance->get_detail)
598 value = enhance->get_detail(enhance);
599 }
600
601 return sprintf(buf, "%d\n", value);
602 }
603
disp_enhance_detail_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)604 static ssize_t disp_enhance_detail_store(struct device *dev,
605 struct device_attribute *attr,
606 const char *buf, size_t count)
607 {
608 int err;
609 unsigned long value;
610 int num_screens = 2;
611 struct disp_manager *mgr = NULL;
612 struct disp_enhance *enhance = NULL;
613
614 err = kstrtoul(buf, 10, &value);
615 if (err) {
616 pr_warn("Invalid size\n");
617 return err;
618 }
619
620 num_screens = bsp_disp_feat_get_num_screens();
621 if (g_disp < num_screens)
622 mgr = g_disp_drv.mgr[g_disp];
623
624 if (mgr) {
625 enhance = mgr->enhance;
626 if (enhance && enhance->set_detail)
627 enhance->set_detail(enhance, value);
628 }
629
630 return count;
631 }
632 static DEVICE_ATTR(enhance_detail, 0660,
633 disp_enhance_detail_show, disp_enhance_detail_store);
634
disp_enhance_denoise_show(struct device * dev,struct device_attribute * attr,char * buf)635 static ssize_t disp_enhance_denoise_show(struct device *dev,
636 struct device_attribute *attr, char *buf)
637 {
638 int num_screens = 2;
639 struct disp_manager *mgr = NULL;
640 struct disp_enhance *enhance = NULL;
641 int value = 0;
642
643 num_screens = bsp_disp_feat_get_num_screens();
644 if (g_disp < num_screens)
645 mgr = g_disp_drv.mgr[g_disp];
646
647 if (mgr) {
648 enhance = mgr->enhance;
649 if (enhance && enhance->get_denoise)
650 value = enhance->get_denoise(enhance);
651 }
652
653 return sprintf(buf, "%d\n", value);
654 }
655
disp_enhance_denoise_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)656 static ssize_t disp_enhance_denoise_store(struct device *dev,
657 struct device_attribute *attr,
658 const char *buf, size_t count)
659 {
660 int err;
661 unsigned long value;
662 int num_screens = 2;
663 struct disp_manager *mgr = NULL;
664 struct disp_enhance *enhance = NULL;
665
666 err = kstrtoul(buf, 10, &value);
667 if (err) {
668 pr_warn("Invalid size\n");
669 return err;
670 }
671
672 num_screens = bsp_disp_feat_get_num_screens();
673 if (g_disp < num_screens)
674 mgr = g_disp_drv.mgr[g_disp];
675
676 if (mgr) {
677 enhance = mgr->enhance;
678 if (enhance && enhance->set_denoise)
679 enhance->set_denoise(enhance, value);
680 }
681
682 return count;
683 }
684 static DEVICE_ATTR(enhance_denoise, 0660,
685 disp_enhance_denoise_show, disp_enhance_denoise_store);
686
disp_cvbs_enhance_show(struct device * dev,struct device_attribute * attr,char * buf)687 static ssize_t disp_cvbs_enhance_show(struct device *dev,
688 struct device_attribute *attr, char *buf)
689 {
690 return sprintf(buf, "%u\n", g_cvbs_enhance_mode);
691 }
692
disp_cvbs_enhance_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)693 static ssize_t disp_cvbs_enhance_store(struct device *dev,
694 struct device_attribute *attr,
695 const char *buf, size_t count)
696 {
697 int err;
698 unsigned long val;
699 int num_screens = 0;
700 unsigned int disp;
701 struct disp_device *ptv = NULL;
702
703 err = kstrtoul(buf, 10, &val);
704
705 g_cvbs_enhance_mode = val;
706 num_screens = bsp_disp_feat_get_num_screens();
707
708 for (disp = 0; disp < num_screens; disp++) {
709 ptv = disp_device_find(disp, DISP_OUTPUT_TYPE_TV);
710 if (ptv && ptv->set_enhance_mode)
711 ptv->set_enhance_mode(ptv, g_cvbs_enhance_mode);
712 }
713
714 return count;
715 }
716
717 static DEVICE_ATTR(cvbs_enhacne_mode, 0660,
718 disp_cvbs_enhance_show, disp_cvbs_enhance_store);
719
disp_runtime_enable_show(struct device * dev,struct device_attribute * attr,char * buf)720 static ssize_t disp_runtime_enable_show(struct device *dev,
721 struct device_attribute *attr,
722 char *buf)
723 {
724 return sprintf(buf, "%d\n", g_pm_runtime_enable);
725 }
726
disp_runtime_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)727 static ssize_t disp_runtime_enable_store(struct device *dev,
728 struct device_attribute *attr,
729 const char *buf, size_t count)
730 {
731 int err;
732 unsigned long val;
733
734 err = kstrtoul(buf, 10, &val);
735 if (val > 1)
736 pr_warn("Invalid value, 0/1 is expected!\n");
737 else
738 g_pm_runtime_enable = val;
739
740 return count;
741 }
742
743 static DEVICE_ATTR(runtime_enable, 0660,
744 disp_runtime_enable_show, disp_runtime_enable_store);
disp_color_temperature_show(struct device * dev,struct device_attribute * attr,char * buf)745 static ssize_t disp_color_temperature_show(struct device *dev,
746 struct device_attribute *attr, char *buf)
747 {
748 int num_screens = 2;
749 struct disp_manager *mgr = NULL;
750 struct disp_device *dispdev = NULL;
751 int value = 0;
752
753 num_screens = bsp_disp_feat_get_num_screens();
754 if (g_disp < num_screens)
755 mgr = g_disp_drv.mgr[g_disp];
756
757 if (mgr && mgr->device) {
758 dispdev = mgr->device;
759 if (dispdev->get_color_temperature)
760 value = dispdev->get_color_temperature(dispdev);
761 }
762
763 return sprintf(buf, "%d\n", value);
764 }
765
disp_color_temperature_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)766 static ssize_t disp_color_temperature_store(struct device *dev,
767 struct device_attribute *attr,
768 const char *buf, size_t count)
769 {
770 int err;
771 long value;
772 int num_screens = 2;
773 struct disp_manager *mgr = NULL;
774 struct disp_device *dispdev = NULL;
775
776 err = kstrtol(buf, 10, &value);
777 if (err) {
778 pr_warn("Invalid size\n");
779 return err;
780 }
781
782 if ((value > 256) || (value < -256)) {
783 pr_warn("value shoud in range [-256,256]\n");
784 value = (value > 256) ? 256 : value;
785 value = (value < -256) ? -256 : value;
786 }
787
788 num_screens = bsp_disp_feat_get_num_screens();
789 if (g_disp < num_screens)
790 mgr = g_disp_drv.mgr[g_disp];
791
792 if (mgr && mgr->device) {
793 dispdev = mgr->device;
794 if (dispdev->set_color_temperature)
795 value = dispdev->set_color_temperature(dispdev, value);
796 }
797
798 return count;
799 }
800
801 static DEVICE_ATTR(color_temperature, 0660,
802 disp_color_temperature_show, disp_color_temperature_store);
803
804
disp_boot_para_show(struct device * dev,struct device_attribute * attr,char * buf)805 static ssize_t disp_boot_para_show(struct device *dev,
806 struct device_attribute *attr, char *buf)
807 {
808 #if DISP_SCREEN_NUM > 1
809 return sprintf(buf, "disp_para=%x init_disp=%x tv_vdid=%x fb_base=0x%x disp_config0=%d,%u - %d,%d,%d,%d disp_config1=%d,%u - %d,%d,%d,%d\n",
810 disp_boot_para_parse("boot_disp"),
811 disp_boot_para_parse("init_disp"),
812 disp_boot_para_parse("tv_vdid"),
813 disp_boot_para_parse("fb_base"),
814 g_disp_drv.disp_init.output_type[0], g_disp_drv.disp_init.output_mode[0],
815 g_disp_drv.disp_init.output_format[0], g_disp_drv.disp_init.output_bits[0],
816 g_disp_drv.disp_init.output_cs[0], g_disp_drv.disp_init.output_eotf[0],
817 g_disp_drv.disp_init.output_type[1], g_disp_drv.disp_init.output_mode[1],
818 g_disp_drv.disp_init.output_format[1], g_disp_drv.disp_init.output_bits[1],
819 g_disp_drv.disp_init.output_cs[1], g_disp_drv.disp_init.output_eotf[1]);
820 #else
821 return sprintf(buf, "disp_para=%x init_disp=%x tv_vdid=%x fb_base=0x%x disp_config0=%d,%u - %d,%d,%d,%d\n",
822 disp_boot_para_parse("boot_disp"),
823 disp_boot_para_parse("init_disp"),
824 disp_boot_para_parse("tv_vdid"),
825 disp_boot_para_parse("fb_base"),
826 g_disp_drv.disp_init.output_type[0], g_disp_drv.disp_init.output_mode[0],
827 g_disp_drv.disp_init.output_format[0], g_disp_drv.disp_init.output_bits[0],
828 g_disp_drv.disp_init.output_cs[0], g_disp_drv.disp_init.output_eotf[0]);
829 #endif
830 }
831
disp_xres_show(struct device * dev,struct device_attribute * attr,char * buf)832 static ssize_t disp_xres_show(struct device *dev,
833 struct device_attribute *attr, char *buf)
834 {
835 int num_screens = 2;
836 struct disp_manager *mgr = NULL;
837 struct disp_device *dispdev = NULL;
838 u32 width = 0, height;
839
840 num_screens = bsp_disp_feat_get_num_screens();
841 if (g_disp < num_screens)
842 mgr = g_disp_drv.mgr[g_disp];
843
844 if (mgr && mgr->device) {
845 dispdev = mgr->device;
846 if (dispdev->get_resolution)
847 dispdev->get_resolution(dispdev, &width, &height);
848 }
849
850 return sprintf(buf, "%d\n", width);
851 }
852
disp_yres_show(struct device * dev,struct device_attribute * attr,char * buf)853 static ssize_t disp_yres_show(struct device *dev,
854 struct device_attribute *attr, char *buf)
855 {
856 int num_screens = 2;
857 struct disp_manager *mgr = NULL;
858 struct disp_device *dispdev = NULL;
859 u32 width = 0, height = 0;
860
861 num_screens = bsp_disp_feat_get_num_screens();
862 if (g_disp < num_screens)
863 mgr = g_disp_drv.mgr[g_disp];
864
865 if (mgr && mgr->device) {
866 dispdev = mgr->device;
867 if (dispdev->get_resolution)
868 dispdev->get_resolution(dispdev, &width, &height);
869 }
870
871 return sprintf(buf, "%d\n", height);
872 }
873
874 /**
875 * @name :disp_draw_colorbar
876 * @brief :draw colorbar using DE's LAYER MODE
877 * @param[IN] :disp:screen index
878 * @return :0 if success
879 */
disp_draw_colorbar(u32 disp,u8 zorder)880 int disp_draw_colorbar(u32 disp, u8 zorder)
881 {
882 struct disp_manager *mgr = NULL;
883 struct disp_layer_config config[4];
884 unsigned int i = 0;
885 unsigned int width = 0, height = 0, num_screens;
886 int ret = -1;
887
888 num_screens = bsp_disp_feat_get_num_screens();
889 if (disp < num_screens)
890 mgr = g_disp_drv.mgr[disp];
891 else
892 return ret;
893
894 if (mgr && mgr->device && mgr->device->get_resolution)
895 mgr->device->get_resolution(mgr->device, &width, &height);
896 else
897 return ret;
898
899 memset(config, 0, 4 * sizeof(struct disp_layer_config));
900 for (i = 0; i < 4; ++i) {
901 config[i].channel = 0;
902 config[i].layer_id = i;
903 config[i].enable = 1;
904 config[i].info.zorder = zorder;
905 config[i].info.mode = LAYER_MODE_COLOR;
906 config[i].info.fb.format = DISP_FORMAT_ARGB_8888;
907 config[i].info.screen_win.width = width / 4;
908 config[i].info.screen_win.height = height;
909 config[i].info.screen_win.x = (width / 4) * i;
910 config[i].info.screen_win.y = 0;
911 config[i].info.fb.crop.x =
912 ((long long)(config[i].info.screen_win.x) << 32);
913 config[i].info.fb.crop.y =
914 ((long long)(config[i].info.screen_win.y) << 32);
915 config[i].info.fb.crop.width =
916 ((long long)(config[i].info.screen_win.width) << 32);
917 config[i].info.fb.crop.height =
918 ((long long)(config[i].info.screen_win.height) << 32);
919 }
920 config[0].info.color = 0xffff0000; /*red*/
921 config[1].info.color = 0xff00ff00; /*green*/
922 config[2].info.color = 0xff0000ff; /*blue*/
923 config[3].info.color = 0xffffff00; /*yellow*/
924
925 if (mgr->set_layer_config)
926 ret = mgr->set_layer_config(mgr, config, 4);
927
928 return ret;
929 }
930
disp_colorbar_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)931 static ssize_t disp_colorbar_store(struct device *dev,
932 struct device_attribute *attr,
933 const char *buf, size_t count)
934 {
935 int err;
936 unsigned int val;
937 unsigned int num_screens;
938 struct disp_manager *mgr = NULL;
939
940 err = kstrtou32(buf, 10, &val);
941 if (err) {
942 pr_warn("Invalid size\n");
943 return err;
944 }
945
946 num_screens = bsp_disp_feat_get_num_screens();
947
948 if (g_disp < num_screens)
949 mgr = g_disp_drv.mgr[g_disp];
950
951 /*val:*/
952 /*0:DE-->tcon-->other interface*/
953 /*1-7:tcon or edp or other device's builtin patten*/
954 /*for tcon:*/
955 /*1:color bar*/
956 /*2:grayscale check*/
957 /*3:black and white check*/
958 /*4:all 0*/
959 /*5:all 1*/
960 /*6:reserve*/
961 /*7:Gridding*/
962 /*for edp:*/
963 /*1:colorbar*/
964 /*2:mosaic*/
965 if (val == 8) {
966 disp_draw_colorbar(g_disp, 16);
967 if (mgr && mgr->device && mgr->device->show_builtin_patten)
968 mgr->device->show_builtin_patten(mgr->device, 0);
969 } else {
970 if (mgr && mgr->device && mgr->device->show_builtin_patten)
971 mgr->device->show_builtin_patten(mgr->device, val);
972 }
973
974 return count;
975 }
976
disp_capture_dump_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)977 static ssize_t disp_capture_dump_store(struct device *dev,
978 struct device_attribute *attr,
979 const char *buf, size_t count)
980 {
981 #ifndef MODULE
982 struct file *pfile;
983 mm_segment_t old_fs;
984 ssize_t bw;
985 loff_t pos = 0;
986 dma_addr_t phy_addr = 0;
987 void *buf_addr_vir = NULL;
988 struct disp_capture_info cptr_info;
989 unsigned int size = 0, width = 0, height = 0, num_screens = 0;
990 struct disp_manager *mgr = NULL;
991 char *image_name = NULL;
992 int ret = -1, cs = DISP_CSC_TYPE_RGB;
993 struct bmp_header bmp_header;
994
995 num_screens = bsp_disp_feat_get_num_screens();
996
997 if (g_disp < num_screens)
998 mgr = g_disp_drv.mgr[g_disp];
999
1000 if (!mgr || !mgr->device || !mgr->cptr)
1001 goto OUT;
1002
1003 memset(&cptr_info, 0, sizeof(struct disp_capture_info));
1004
1005 image_name = kmalloc(count, GFP_KERNEL | __GFP_ZERO);
1006 if (!image_name) {
1007 __wrn("kmalloc image name fail!\n");
1008 goto OUT;
1009 }
1010 strncpy(image_name, buf, count);
1011 image_name[count - 1] = '\0';
1012
1013 old_fs = get_fs();
1014 set_fs(KERNEL_DS);
1015 pfile = filp_open(image_name, O_RDWR | O_CREAT | O_EXCL, 0755);
1016 set_fs(old_fs);
1017 if (IS_ERR(pfile)) {
1018 __wrn("%s, open %s err\n", __func__, image_name);
1019 goto FREE;
1020 }
1021
1022 if (mgr->device->get_resolution)
1023 ret = mgr->device->get_resolution(mgr->device, &width, &height);
1024 if (ret) {
1025 __wrn("Get resolution fail!\n");
1026 goto FILE_CLOSE;
1027 }
1028
1029 cptr_info.out_frame.size[0].width = width;
1030 cptr_info.out_frame.size[0].height = height;
1031 cptr_info.window.width = width;
1032 cptr_info.window.height = height;
1033 cptr_info.out_frame.crop.width = width;
1034 cptr_info.out_frame.crop.height = height;
1035 if (strstr(image_name, ".bmp"))
1036 cptr_info.out_frame.format = DISP_FORMAT_ARGB_8888;
1037 else if (strstr(image_name, ".yuv420_p"))
1038 cptr_info.out_frame.format = DISP_FORMAT_YUV420_P;
1039 else if (strstr(image_name, ".yuv420_sp_uvuv"))
1040 cptr_info.out_frame.format = DISP_FORMAT_YUV420_SP_UVUV;
1041 else if (strstr(image_name, ".yuv420_sp_vuvu"))
1042 cptr_info.out_frame.format = DISP_FORMAT_YUV420_SP_VUVU;
1043 else if (strstr(image_name, ".argb8888"))
1044 cptr_info.out_frame.format = DISP_FORMAT_ARGB_8888;
1045 else if (strstr(image_name, ".abgr8888"))
1046 cptr_info.out_frame.format = DISP_FORMAT_ABGR_8888;
1047 else if (strstr(image_name, ".rgb888"))
1048 cptr_info.out_frame.format = DISP_FORMAT_RGB_888;
1049 else if (strstr(image_name, ".bgr888"))
1050 cptr_info.out_frame.format = DISP_FORMAT_BGR_888;
1051 else if (strstr(image_name, ".rgba8888"))
1052 cptr_info.out_frame.format = DISP_FORMAT_RGBA_8888;
1053 else if (strstr(image_name, ".bgra8888"))
1054 cptr_info.out_frame.format = DISP_FORMAT_BGRA_8888;
1055 else {
1056 if (mgr->device->get_input_csc)
1057 cs = mgr->device->get_input_csc(mgr->device);
1058 if (cs == DISP_CSC_TYPE_RGB)
1059 cptr_info.out_frame.format = DISP_FORMAT_ARGB_8888;
1060 else
1061 cptr_info.out_frame.format = DISP_FORMAT_YUV420_P;
1062 }
1063
1064 size = width * height * 4;
1065
1066 buf_addr_vir = disp_malloc(size, (void *)&phy_addr);
1067 if (!phy_addr || !buf_addr_vir) {
1068 __wrn("%s, disp_malloc phy_addr err\n", __func__);
1069 goto FILE_CLOSE;
1070 }
1071
1072 cptr_info.out_frame.addr[0] = (unsigned long)phy_addr;
1073 cptr_info.out_frame.addr[1] =
1074 cptr_info.out_frame.addr[0] + width * height;
1075 cptr_info.out_frame.addr[2] =
1076 cptr_info.out_frame.addr[1] + width * height / 4;
1077
1078 ret = mgr->cptr->start(mgr->cptr);
1079 if (ret) {
1080 mgr->cptr->stop(mgr->cptr);
1081 goto FREE_DMA;
1082 }
1083
1084 ret = mgr->cptr->commmit(mgr->cptr, &cptr_info);
1085 if (ret) {
1086 mgr->cptr->stop(mgr->cptr);
1087 goto FREE_DMA;
1088 }
1089 disp_delay_ms(1000);
1090 ret = mgr->cptr->stop(mgr->cptr);
1091 if (ret)
1092 goto FREE_DMA;
1093
1094 if (strstr(image_name, ".bmp")) {
1095 memset(&bmp_header, 0, sizeof(struct bmp_header));
1096 bmp_header.signature[0] = 'B';
1097 bmp_header.signature[1] = 'M';
1098 bmp_header.data_offset = sizeof(struct bmp_header);
1099 bmp_header.file_size = bmp_header.data_offset + size;
1100 bmp_header.size = sizeof(struct bmp_header) - 14;
1101 bmp_header.width = width;
1102 bmp_header.height = -height;
1103 bmp_header.planes = 1;
1104 bmp_header.bit_count = 32;
1105 bmp_header.image_size = size;
1106 bw = kernel_write(pfile, (const char *)&bmp_header,
1107 sizeof(struct bmp_header), &pos);
1108 pos = sizeof(struct bmp_header);
1109 }
1110
1111 bw = kernel_write(pfile, (char *)buf_addr_vir, size, &pos);
1112 if (unlikely(bw != size))
1113 __wrn("%s, write %s err at byte offset %llu\n", __func__,
1114 image_name, pfile->f_pos);
1115
1116 FREE_DMA:
1117 disp_free((void *)buf_addr_vir, (void *)phy_addr, size);
1118 FILE_CLOSE:
1119 filp_close(pfile, NULL);
1120 FREE:
1121 kfree(image_name);
1122 image_name = NULL;
1123 OUT:
1124 #endif
1125 return count;
1126 }
1127
1128 static DEVICE_ATTR(boot_para, 0660, disp_boot_para_show, NULL);
1129 static DEVICE_ATTR(xres, 0660, disp_xres_show, NULL);
1130 static DEVICE_ATTR(yres, 0660, disp_yres_show, NULL);
1131 static DEVICE_ATTR(colorbar, 0660, NULL, disp_colorbar_store);
1132 static DEVICE_ATTR(capture_dump, 0660, NULL, disp_capture_dump_store);
1133
1134 static struct attribute *disp_attributes[] = {
1135 &dev_attr_sys.attr,
1136 &dev_attr_disp.attr,
1137 &dev_attr_enhance_mode.attr,
1138 &dev_attr_cvbs_enhacne_mode.attr,
1139 &dev_attr_runtime_enable.attr,
1140 &dev_attr_enhance_bright.attr,
1141 &dev_attr_enhance_saturation.attr,
1142 &dev_attr_enhance_contrast.attr,
1143 &dev_attr_enhance_edge.attr,
1144 &dev_attr_enhance_detail.attr,
1145 &dev_attr_enhance_denoise.attr,
1146 &dev_attr_color_temperature.attr,
1147 &dev_attr_boot_para.attr,
1148 &dev_attr_xres.attr,
1149 &dev_attr_yres.attr,
1150 &dev_attr_colorbar.attr,
1151 &dev_attr_capture_dump.attr,
1152 NULL
1153 };
1154
1155 static struct attribute_group disp_attribute_group = {
1156 .name = "attr",
1157 .attrs = disp_attributes
1158 };
1159
disp_boot_para_parse(const char * name)1160 unsigned int disp_boot_para_parse(const char *name)
1161 {
1162 unsigned int value = 0;
1163
1164 if (!g_disp_drv.dev->of_node) {
1165 pr_err("disp_boot_para_parse failed, of node is NULL!\n");
1166 return 0;
1167 }
1168
1169 if (of_property_read_u32(g_disp_drv.dev->of_node, name, &value) < 0)
1170 __inf("of_property_read disp.%s fail\n", name);
1171
1172 __inf("[DISP] %s:0x%x\n", name, value);
1173 return value;
1174 }
1175 EXPORT_SYMBOL(disp_boot_para_parse);
1176
disp_boot_para_parse_array(const char * name,unsigned int * value,unsigned int count)1177 unsigned int disp_boot_para_parse_array(const char *name, unsigned int *value,
1178 unsigned int count)
1179 {
1180 unsigned int ret = 0;
1181
1182 ret = of_property_read_u32_array(g_disp_drv.dev->of_node, name,
1183 value, count);
1184 if (ret)
1185 __wrn("of_property_read_array disp.%s fail\n", name);
1186
1187 return ret;
1188 }
1189 EXPORT_SYMBOL(disp_boot_para_parse_array);
1190
1191
disp_boot_para_parse_str(const char * name)1192 const char *disp_boot_para_parse_str(const char *name)
1193 {
1194 const char *str;
1195
1196 if (!of_property_read_string(g_disp_drv.dev->of_node, name, &str))
1197 return str;
1198
1199 __inf("of_property_read_string disp.%s fail\n", name);
1200
1201 return NULL;
1202 }
1203 EXPORT_SYMBOL(disp_boot_para_parse_str);
1204
parser_disp_init_para(const struct device_node * np,struct disp_init_para * init_para)1205 static s32 parser_disp_init_para(const struct device_node *np,
1206 struct disp_init_para *init_para)
1207 {
1208 int value;
1209 int i;
1210
1211 memset(init_para, 0, sizeof(struct disp_init_para));
1212
1213 if (of_property_read_u32(np, "disp_init_enable", &value) < 0) {
1214 __wrn("of_property_read disp_init.disp_init_enable fail\n");
1215 return -1;
1216 }
1217 init_para->b_init = value;
1218
1219 if (of_property_read_u32(np, "chn_cfg_mode", &value) < 0)
1220 value = 0;
1221 init_para->chn_cfg_mode = value;
1222
1223 if (of_property_read_u32(np, "disp_mode", &value) < 0) {
1224 __wrn("of_property_read disp_init.disp_mode fail\n");
1225 return -1;
1226 }
1227 init_para->disp_mode = value;
1228
1229 /* screen0 */
1230 if (of_property_read_u32(np, "screen0_output_type", &value) < 0) {
1231 __wrn("of_property_read disp_init.screen0_output_type fail\n");
1232 return -1;
1233 }
1234 if (value == 0) {
1235 init_para->output_type[0] = DISP_OUTPUT_TYPE_NONE;
1236 } else if (value == 1) {
1237 init_para->output_type[0] = DISP_OUTPUT_TYPE_LCD;
1238 } else if (value == 2) {
1239 init_para->output_type[0] = DISP_OUTPUT_TYPE_TV;
1240 } else if (value == 3) {
1241 init_para->output_type[0] = DISP_OUTPUT_TYPE_HDMI;
1242 } else if (value == 4) {
1243 init_para->output_type[0] = DISP_OUTPUT_TYPE_VGA;
1244 } else if (value == 5) {
1245 init_para->output_type[0] = DISP_OUTPUT_TYPE_VDPO;
1246 } else if (value == 6) {
1247 init_para->output_type[0] = DISP_OUTPUT_TYPE_EDP;
1248 } else if (value == 7) {
1249 init_para->output_type[0] = DISP_OUTPUT_TYPE_RTWB;
1250 } else {
1251 __wrn("invalid screen0_output_type %d\n",
1252 init_para->output_type[0]);
1253 return -1;
1254 }
1255
1256 if (of_property_read_u32(np, "screen0_output_mode", &value) < 0) {
1257 __wrn("of_property_read disp_init.screen0_output_mode fail\n");
1258 return -1;
1259 }
1260
1261 if (init_para->output_type[0] != DISP_OUTPUT_TYPE_NONE &&
1262 init_para->output_type[0] != DISP_OUTPUT_TYPE_LCD)
1263 init_para->output_mode[0] = value;
1264
1265 if (of_property_read_u32(np, "screen0_output_format", &value) < 0) {
1266 __inf("of_property_read screen0_output_format fail\n");
1267 } else {
1268 init_para->output_format[0] = value;
1269 init_para->using_device_config[0] = true;
1270 }
1271
1272 if (of_property_read_u32(np, "screen0_output_bits", &value) < 0) {
1273 __inf("of_property_read screen0_output_bits fail\n");
1274 } else {
1275 init_para->output_bits[0] = value;
1276 init_para->using_device_config[0] = true;
1277 }
1278
1279 if (of_property_read_u32(np, "screen0_output_eotf", &value) < 0) {
1280 __inf("of_property_read screen0_output_eotf fail\n");
1281 } else {
1282 init_para->output_eotf[0] = value;
1283 init_para->using_device_config[0] = true;
1284 }
1285
1286 if (of_property_read_u32(np, "screen0_output_cs", &value) < 0) {
1287 __inf("of_property_read screen0_output_cs fail\n");
1288 } else {
1289 init_para->output_cs[0] = value;
1290 init_para->using_device_config[0] = true;
1291 }
1292
1293 if (of_property_read_u32(np, "screen0_output_dvi_hdmi", &value) < 0) {
1294 __inf("of_property_read screen0_output_dvi_hdmi fail\n");
1295 } else {
1296 init_para->output_dvi_hdmi[0] = value;
1297 init_para->using_device_config[0] = true;
1298 }
1299
1300 if (of_property_read_u32(np, "screen0_output_range", &value) < 0) {
1301 __inf("of_property_read screen0_output_range fail\n");
1302 } else {
1303 init_para->output_range[0] = value;
1304 init_para->using_device_config[0] = true;
1305 }
1306
1307 if (of_property_read_u32(np, "screen0_output_scan", &value) < 0) {
1308 __inf("of_property_read screen0_output_scan fail\n");
1309 } else {
1310 init_para->output_scan[0] = value;
1311 init_para->using_device_config[0] = true;
1312 }
1313
1314 if (of_property_read_u32(np, "screen0_output_aspect_ratio", &value) < 0) {
1315 __inf("of_property_read screen0_output_aspect_ratio fail\n");
1316 } else {
1317 init_para->output_aspect_ratio[0] = value;
1318 init_para->using_device_config[0] = true;
1319 }
1320
1321 #if DISP_SCREEN_NUM > 1
1322 /* screen1 */
1323 if (of_property_read_u32(np,
1324 "screen1_output_type",
1325 &value) < 0) {
1326 __wrn("of_property_read screen1_output_type fail\n");
1327 return -1;
1328 }
1329 if (value == 0) {
1330 init_para->output_type[1] = DISP_OUTPUT_TYPE_NONE;
1331 } else if (value == 1) {
1332 init_para->output_type[1] = DISP_OUTPUT_TYPE_LCD;
1333 } else if (value == 2) {
1334 init_para->output_type[1] = DISP_OUTPUT_TYPE_TV;
1335 } else if (value == 3) {
1336 init_para->output_type[1] = DISP_OUTPUT_TYPE_HDMI;
1337 } else if (value == 4) {
1338 init_para->output_type[1] = DISP_OUTPUT_TYPE_VGA;
1339 } else if (value == 5) {
1340 init_para->output_type[1] = DISP_OUTPUT_TYPE_VDPO;
1341 } else if (value == 6) {
1342 init_para->output_type[1] = DISP_OUTPUT_TYPE_EDP;
1343 } else if (value == 7) {
1344 init_para->output_type[1] = DISP_OUTPUT_TYPE_RTWB;
1345 } else {
1346 __wrn("invalid screen1_output_type %d\n",
1347 init_para->output_type[1]);
1348 return -1;
1349 }
1350
1351 if (of_property_read_u32(np, "screen1_output_mode", &value) < 0)
1352 __inf
1353 ("of_property_read screen1_output_mode fail\n");
1354 if (init_para->output_type[1] != DISP_OUTPUT_TYPE_NONE &&
1355 init_para->output_type[1] != DISP_OUTPUT_TYPE_LCD)
1356 init_para->output_mode[1] = value;
1357
1358 if (of_property_read_u32(np,
1359 "screen1_output_format", &value) < 0) {
1360 __inf("of_property_read screen1_output_format fail\n");
1361 } else {
1362 init_para->output_format[1] = value;
1363 init_para->using_device_config[1] = true;
1364 }
1365
1366 if (of_property_read_u32(np,
1367 "screen1_output_bits", &value) < 0) {
1368 __inf("of_property_read screen1_output_bits fail\n");
1369 } else {
1370 init_para->output_bits[1] = value;
1371 init_para->using_device_config[1] = true;
1372 }
1373
1374 if (of_property_read_u32(np,
1375 "screen1_output_eotf", &value) < 0) {
1376 __inf("of_property_read screen1_output_eotf fail\n");
1377 } else {
1378 init_para->output_eotf[1] = value;
1379 init_para->using_device_config[1] = true;
1380 }
1381
1382 if (of_property_read_u32(np, "screen1_output_cs", &value) < 0) {
1383 __inf("of_property_read screen1_output_cs fail\n");
1384 } else {
1385 init_para->output_cs[1] = value;
1386 init_para->using_device_config[1] = true;
1387 }
1388
1389 if (of_property_read_u32(np, "screen1_output_dvi_hdmi", &value) < 0) {
1390 __inf(
1391 "of_property_read screen1_output_dvi_hdmi fail\n");
1392 } else {
1393 init_para->output_dvi_hdmi[1] = value;
1394 init_para->using_device_config[1] = true;
1395 }
1396
1397 if (of_property_read_u32(np, "screen1_output_range", &value) < 0) {
1398 __inf("of_property_read screen1_output_range fail\n");
1399 } else {
1400 init_para->output_range[1] = value;
1401 init_para->using_device_config[1] = true;
1402 }
1403
1404 if (of_property_read_u32(np, "screen1_output_scan", &value) < 0) {
1405 __inf("of_property_read screen1_output_scan fail\n");
1406 } else {
1407 init_para->output_scan[1] = value;
1408 init_para->using_device_config[1] = true;
1409 }
1410
1411 if (of_property_read_u32(np, "screen1_output_aspect_ratio", &value) < 0) {
1412 __inf("read screen1_output_aspect_ratio fail\n");
1413 } else {
1414 init_para->output_aspect_ratio[1] = value;
1415 init_para->using_device_config[1] = true;
1416 }
1417 #endif
1418 /* fb0 */
1419 init_para->buffer_num[0] = 2;
1420 if (of_property_read_u32(np, "fb0_buffer_num", &value) == 0)
1421 init_para->buffer_num[0] = value;
1422
1423 if (of_property_read_u32(np, "fb0_format", &value) < 0) {
1424 __wrn("of_property_read disp_init.fb0_format fail\n");
1425 return -1;
1426 }
1427 init_para->format[0] = value;
1428
1429 if (of_property_read_u32(np, "fb0_width", &value) < 0) {
1430 __wrn("of_property_read fb0_width fail\n");
1431 return -1;
1432 }
1433 init_para->fb_width[0] = value;
1434
1435 if (of_property_read_u32(np, "fb0_height", &value) < 0) {
1436 __wrn("of_property_read fb0_height fail\n");
1437 return -1;
1438 }
1439 init_para->fb_height[0] = value;
1440
1441 /* fb1 */
1442 #if DISP_SCREEN_NUM > 1
1443 init_para->buffer_num[1] = 2;
1444
1445 if (of_property_read_u32(np, "fb1_buffer_num", &value) == 0)
1446 init_para->buffer_num[1] = value;
1447
1448 if (of_property_read_u32(np, "fb1_format", &value) == 0)
1449 init_para->format[1] = value;
1450
1451 if (of_property_read_u32(np, "fb1_width", &value) == 0)
1452 init_para->fb_width[1] = value;
1453
1454 if (of_property_read_u32(np, "fb1_height", &value) == 0)
1455 init_para->fb_height[1] = value;
1456 #endif
1457
1458 __inf("====display init para begin====\n");
1459 __inf("b_init:%d\n", init_para->b_init);
1460 __inf("disp_mode:%d\n\n", init_para->disp_mode);
1461 for (i = 0; i < DISP_SCREEN_NUM; i++) {
1462 __inf("output_type[%d]:%d\n", i, init_para->output_type[i]);
1463 __inf("output_mode[%d]:%d\n", i, init_para->output_mode[i]);
1464 }
1465 for (i = 0; i < DISP_SCREEN_NUM; i++) {
1466 __inf("buffer_num[%d]:%d\n", i, init_para->buffer_num[i]);
1467 __inf("format[%d]:%d\n", i, init_para->format[i]);
1468 __inf("fb_width[%d]:%d\n", i, init_para->fb_width[i]);
1469 __inf("fb_height[%d]:%d\n", i, init_para->fb_height[i]);
1470 }
1471 __inf("====display init para end====\n");
1472
1473 return 0;
1474 }
1475
disp_malloc(u32 num_bytes,void * phys_addr)1476 void *disp_malloc(u32 num_bytes, void *phys_addr)
1477 {
1478 u32 actual_bytes;
1479 void *address = NULL;
1480
1481 if (num_bytes != 0) {
1482 actual_bytes = MY_BYTE_ALIGN(num_bytes);
1483
1484 address =
1485 dma_alloc_coherent(g_disp_drv.dev, actual_bytes,
1486 (dma_addr_t *) phys_addr, GFP_KERNEL);
1487 if (address) {
1488 __inf
1489 ("dma_alloc_coherent ok, address=0x%p, size=0x%x\n",
1490 (void *)(*(unsigned long *)phys_addr), num_bytes);
1491 return address;
1492 }
1493
1494 __wrn("dma_alloc_coherent fail, size=0x%x\n", num_bytes);
1495 return NULL;
1496 }
1497
1498 __wrn("%s size is zero\n", __func__);
1499
1500 return NULL;
1501 }
1502
disp_free(void * virt_addr,void * phys_addr,u32 num_bytes)1503 void disp_free(void *virt_addr, void *phys_addr, u32 num_bytes)
1504 {
1505 u32 actual_bytes;
1506
1507 actual_bytes = MY_BYTE_ALIGN(num_bytes);
1508 if (phys_addr && virt_addr)
1509 dma_free_coherent(g_disp_drv.dev, actual_bytes, virt_addr,
1510 (dma_addr_t)phys_addr);
1511 }
1512
1513 #if defined(CONFIG_DMABUF_HEAPS)
init_disp_ion_mgr(struct disp_ion_mgr * ion_mgr)1514 static int init_disp_ion_mgr(struct disp_ion_mgr *ion_mgr)
1515 {
1516 if (ion_mgr == NULL) {
1517 __wrn("input param is null\n");
1518 return -EINVAL;
1519 }
1520
1521 mutex_init(&(ion_mgr->mlock));
1522
1523 mutex_lock(&(ion_mgr->mlock));
1524 INIT_LIST_HEAD(&(ion_mgr->ion_list));
1525 mutex_unlock(&(ion_mgr->mlock));
1526
1527 return 0;
1528 }
1529
__disp_dma_heap_alloc_coherent(struct disp_ion_mem * mem)1530 static int __disp_dma_heap_alloc_coherent(struct disp_ion_mem *mem)
1531 {
1532 struct dma_buf *dmabuf;
1533 struct dma_heap *dmaheap;
1534 struct dma_buf_attachment *attachment;
1535 struct sg_table *sgt;
1536
1537 #if IS_ENABLED(CONFIG_AW_IOMMU)
1538 dmaheap = dma_heap_find("system");
1539 #else
1540 dmaheap = dma_heap_find("reserved");
1541 #endif
1542
1543 if (IS_ERR_OR_NULL(dmaheap)) {
1544 __wrn("%s failed, size=%u dmaheap=0x%p\n", __func__, (unsigned int)mem->size, dmaheap);
1545 return -2;
1546 }
1547
1548 dmabuf = dma_heap_buffer_alloc(dmaheap, mem->size, O_RDWR, 0);
1549
1550 if (IS_ERR_OR_NULL(dmabuf)) {
1551 __wrn("%s failed, size=%u dmabuf=0x%p\n", __func__, (unsigned int)mem->size, dmabuf);
1552 return -2;
1553 }
1554 mem->vaddr = dma_buf_vmap(dmabuf);
1555
1556 if (IS_ERR_OR_NULL(mem->vaddr)) {
1557 __wrn("ion_map_kernel failed!!\n");
1558 goto err_map_kernel;
1559 }
1560
1561 __debug("ion map kernel, vaddr=0x%p\n", mem->vaddr);
1562 mem->p_item = kmalloc(sizeof(struct dmabuf_item), GFP_KERNEL);
1563
1564 attachment = dma_buf_attach(dmabuf, g_disp_drv.dev);
1565 if (IS_ERR(attachment)) {
1566 DE_WRN("dma_buf_attach failed\n");
1567 goto err_buf_put;
1568 }
1569 sgt = dma_buf_map_attachment(attachment, DMA_FROM_DEVICE);
1570 if (IS_ERR_OR_NULL(sgt)) {
1571 DE_WRN("dma_buf_map_attachment failed\n");
1572 // FIXME, wait iommu ready
1573 return -1;
1574 // goto err_buf_detach;
1575 }
1576
1577 mem->p_item->dmabuf = dmabuf;
1578 mem->p_item->sgt = sgt;
1579 mem->p_item->attachment = attachment;
1580 mem->p_item->dma_addr = sg_dma_address(sgt->sgl);
1581
1582 return 0;
1583 /* unmap attachment sgt, not sgt_bak, cause it's not alloc yet! */
1584 dma_buf_unmap_attachment(attachment, sgt, DMA_FROM_DEVICE);
1585 // err_buf_detach:
1586 dma_buf_detach(dmabuf, attachment);
1587 err_buf_put:
1588 dma_buf_put(dmabuf);
1589 dma_buf_vunmap(mem->p_item->dmabuf, mem->vaddr);
1590 err_map_kernel:
1591 dma_heap_buffer_free(mem->p_item->dmabuf);
1592 return -ENOMEM;
1593 }
1594
__disp_ion_free_coherent(struct disp_ion_mem * mem)1595 static void __disp_ion_free_coherent(struct disp_ion_mem *mem)
1596 {
1597 struct dmabuf_item item;
1598 memcpy(&item, mem->p_item, sizeof(struct dmabuf_item));
1599 disp_dma_unmap(mem->p_item);
1600 dma_buf_vunmap(item.dmabuf, mem->vaddr);
1601 dma_heap_buffer_free(item.dmabuf);
1602 return;
1603 }
1604
1605
1606
disp_ion_malloc(u32 num_bytes,void * phys_addr)1607 struct disp_ion_mem *disp_ion_malloc(u32 num_bytes, void *phys_addr)
1608 {
1609 struct disp_ion_mgr *ion_mgr = &(g_disp_drv.ion_mgr);
1610 struct disp_ion_list_node *ion_node = NULL;
1611 struct disp_ion_mem *mem = NULL;
1612 u32 *paddr = NULL;
1613 int ret = -1;
1614
1615 if (ion_mgr == NULL) {
1616 __wrn("disp ion manager has not initial yet\n");
1617 return NULL;
1618 }
1619
1620 ion_node = kmalloc(sizeof(struct disp_ion_list_node), GFP_KERNEL);
1621 if (ion_node == NULL) {
1622 __wrn("fail to alloc ion node, size=%u\n",
1623 (unsigned int)sizeof(struct disp_ion_list_node));
1624 return NULL;
1625 }
1626 mutex_lock(&(ion_mgr->mlock));
1627 mem = &ion_node->mem;
1628 mem->size = MY_BYTE_ALIGN(num_bytes);
1629
1630 ret = __disp_dma_heap_alloc_coherent(mem);
1631
1632 if (ret != 0) {
1633 __wrn("fail to alloc ion, ret=%d\n", ret);
1634 goto err_hdl;
1635 }
1636
1637 paddr = (u32 *)phys_addr;
1638 *paddr = (u32)mem->p_item->dma_addr;
1639 list_add_tail(&(ion_node->node), &(ion_mgr->ion_list));
1640
1641 mutex_unlock(&(ion_mgr->mlock));
1642 return mem;
1643
1644 err_hdl:
1645 kfree(ion_node);
1646 mutex_unlock(&(ion_mgr->mlock));
1647
1648 return NULL;
1649 }
1650
disp_get_ion_fd(struct disp_ion_mem * mem)1651 int disp_get_ion_fd(struct disp_ion_mem *mem)
1652 {
1653 return dma_buf_fd(mem->p_item->dmabuf, O_CLOEXEC);
1654 }
1655
disp_get_phy_addr(struct disp_ion_mem * mem)1656 void *disp_get_phy_addr(struct disp_ion_mem *mem)
1657 {
1658 return (void *)mem->p_item->dma_addr;
1659 }
1660
disp_ion_free(void * virt_addr,void * phys_addr,u32 num_bytes)1661 void disp_ion_free(void *virt_addr, void *phys_addr, u32 num_bytes)
1662 {
1663 struct disp_ion_mgr *ion_mgr = &(g_disp_drv.ion_mgr);
1664 struct disp_ion_list_node *ion_node = NULL, *tmp_ion_node = NULL;
1665 struct disp_ion_mem *mem = NULL;
1666 bool found = false;
1667
1668 if (ion_mgr == NULL) {
1669 __wrn("disp ion manager has not initial yet\n");
1670 return;
1671 }
1672
1673
1674 mutex_lock(&(ion_mgr->mlock));
1675 list_for_each_entry_safe(ion_node, tmp_ion_node, &ion_mgr->ion_list,
1676 node) {
1677 if (ion_node != NULL) {
1678 mem = &ion_node->mem;
1679 if ((((unsigned long)mem->p_item->dma_addr) ==
1680 ((unsigned long)phys_addr)) &&
1681 (((unsigned long)mem->vaddr) ==
1682 ((unsigned long)virt_addr))) {
1683 __disp_ion_free_coherent(mem);
1684 __list_del_entry(&(ion_node->node));
1685 found = true;
1686 break;
1687 }
1688 }
1689 }
1690 mutex_unlock(&(ion_mgr->mlock));
1691
1692 if (false == found) {
1693 __wrn("vaddr=0x%p, paddr=0x%p is not found in ion\n", virt_addr,
1694 phys_addr);
1695 }
1696 }
1697
deinit_disp_ion_mgr(struct disp_ion_mgr * ion_mgr)1698 static void deinit_disp_ion_mgr(struct disp_ion_mgr *ion_mgr)
1699 {
1700 struct disp_ion_list_node *ion_node = NULL, *tmp_ion_node = NULL;
1701 struct disp_ion_mem *mem = NULL;
1702
1703 if (ion_mgr == NULL) {
1704 __wrn("input param is null\n");
1705 return;
1706 }
1707
1708 mutex_lock(&(ion_mgr->mlock));
1709 list_for_each_entry_safe(ion_node, tmp_ion_node, &ion_mgr->ion_list,
1710 node) {
1711 if (ion_node != NULL) {
1712 // free all ion node
1713 mem = &ion_node->mem;
1714 __disp_ion_free_coherent(mem);
1715 __list_del_entry(&(ion_node->node));
1716 kfree(ion_node);
1717 }
1718 }
1719 mutex_unlock(&(ion_mgr->mlock));
1720 }
1721 #endif
1722
disp_set_hdmi_func(struct disp_device_func * func)1723 s32 disp_set_hdmi_func(struct disp_device_func *func)
1724 {
1725 return bsp_disp_set_hdmi_func(func);
1726 }
1727 EXPORT_SYMBOL(disp_set_hdmi_func);
1728
disp_set_edp_func(struct disp_tv_func * func)1729 s32 disp_set_edp_func(struct disp_tv_func *func)
1730 {
1731 return bsp_disp_set_edp_func(func);
1732 }
1733 EXPORT_SYMBOL(disp_set_edp_func);
1734
disp_set_vdpo_func(struct disp_tv_func * func)1735 s32 disp_set_vdpo_func(struct disp_tv_func *func)
1736 {
1737 return bsp_disp_set_vdpo_func(func);
1738 }
1739 EXPORT_SYMBOL(disp_set_vdpo_func);
1740
disp_set_hdmi_detect(bool hpd)1741 s32 disp_set_hdmi_detect(bool hpd)
1742 {
1743 return bsp_disp_hdmi_set_detect(hpd);
1744 }
1745 EXPORT_SYMBOL(disp_set_hdmi_detect);
1746
disp_tv_register(struct disp_tv_func * func)1747 s32 disp_tv_register(struct disp_tv_func *func)
1748 {
1749 return bsp_disp_tv_register(func);
1750 }
1751 EXPORT_SYMBOL(disp_tv_register);
1752
resume_proc(unsigned int disp,struct disp_manager * mgr)1753 static void resume_proc(unsigned int disp, struct disp_manager *mgr)
1754 {
1755 if (!mgr || !mgr->device)
1756 return;
1757
1758 if (mgr->device->type == DISP_OUTPUT_TYPE_LCD)
1759 mgr->device->fake_enable(mgr->device);
1760 }
1761
resume_work_0(struct work_struct * work)1762 static void resume_work_0(struct work_struct *work)
1763 {
1764 resume_proc(0, g_disp_drv.mgr[0]);
1765 }
1766
1767 #if DISP_SCREEN_NUM > 1
resume_work_1(struct work_struct * work)1768 static void resume_work_1(struct work_struct *work)
1769 {
1770 resume_proc(1, g_disp_drv.mgr[1]);
1771 }
1772 #endif
1773
disp_device_set_config(struct disp_init_para * init,unsigned int screen_id)1774 int disp_device_set_config(struct disp_init_para *init,
1775 unsigned int screen_id)
1776 {
1777 struct disp_device_config config;
1778
1779 if (screen_id >= DISP_SCREEN_NUM) {
1780 __wrn("Out of range of screen index\n");
1781 return -1;
1782 }
1783
1784 memset(&config, 0, sizeof(struct disp_device_config));
1785 config.type = init->output_type[screen_id];
1786 config.mode = init->output_mode[screen_id];
1787 config.format = init->output_format[screen_id];
1788 config.bits = init->output_bits[screen_id];
1789 config.eotf = init->output_eotf[screen_id];
1790 config.cs = init->output_cs[screen_id];
1791 config.dvi_hdmi = init->output_dvi_hdmi[screen_id];
1792 config.range = init->output_range[screen_id];
1793 config.scan = init->output_scan[screen_id];
1794 config.aspect_ratio = init->output_aspect_ratio[screen_id];
1795 if (!init->using_device_config[screen_id])
1796 return bsp_disp_device_switch(screen_id, config.type, (enum disp_output_type)config.mode);
1797 else
1798 return bsp_disp_device_set_config(screen_id, &config);
1799 }
1800
start_work(struct work_struct * work)1801 static void start_work(struct work_struct *work)
1802 {
1803 int num_screens;
1804 int screen_id;
1805 int count = 0;
1806
1807 num_screens = bsp_disp_feat_get_num_screens();
1808 while ((g_disp_drv.inited == 0) && (count < 5)) {
1809 count++;
1810 msleep(20);
1811 }
1812 if (count >= 5)
1813 pr_warn("%s, timeout\n", __func__);
1814 if (g_disp_drv.para.boot_info.sync == 0) {
1815 for (screen_id = 0; screen_id < num_screens; screen_id++) {
1816 int disp_mode = g_disp_drv.disp_init.disp_mode;
1817 int output_type =
1818 g_disp_drv.disp_init.output_type[screen_id%DE_NUM];
1819 int lcd_registered =
1820 bsp_disp_get_lcd_registered(screen_id);
1821 int hdmi_registered = bsp_disp_get_hdmi_registered();
1822
1823 __inf
1824 ("sel=%d, output_type=%d, lcd_reg=%d,hdmi_reg=%d\n",
1825 screen_id, output_type, lcd_registered,
1826 hdmi_registered);
1827 if (((disp_mode == DISP_INIT_MODE_SCREEN0)
1828 && (screen_id == 0))
1829 || ((disp_mode == DISP_INIT_MODE_SCREEN1)
1830 && (screen_id == 1))) {
1831 if (output_type == DISP_OUTPUT_TYPE_LCD) {
1832 if (lcd_registered &&
1833 bsp_disp_get_output_type(screen_id)
1834 != DISP_OUTPUT_TYPE_LCD) {
1835 disp_device_set_config(
1836 &g_disp_drv.disp_init, screen_id);
1837 suspend_output_type[screen_id] =
1838 output_type;
1839 }
1840 } else if (output_type
1841 == DISP_OUTPUT_TYPE_HDMI) {
1842 if (hdmi_registered &&
1843 bsp_disp_get_output_type(screen_id)
1844 != DISP_OUTPUT_TYPE_HDMI) {
1845 msleep(600);
1846 disp_device_set_config(
1847 &g_disp_drv.disp_init, screen_id);
1848 suspend_output_type[screen_id] =
1849 output_type;
1850 }
1851 } else {
1852 disp_device_set_config(
1853 &g_disp_drv.disp_init, screen_id);
1854 suspend_output_type[screen_id] =
1855 output_type;
1856 }
1857 }
1858 }
1859 } else {
1860 if ((g_disp_drv.para.boot_info.type == DISP_OUTPUT_TYPE_HDMI)
1861 && !bsp_disp_get_hdmi_registered())
1862 return;
1863 if (bsp_disp_get_output_type(g_disp_drv.para.boot_info.disp) !=
1864 g_disp_drv.para.boot_info.type) {
1865 bsp_disp_sync_with_hw(&g_disp_drv.para);
1866 suspend_output_type[g_disp_drv.para.boot_info.disp] =
1867 g_disp_drv.para.boot_info.type;
1868 }
1869 }
1870 }
1871
start_process(void)1872 static s32 start_process(void)
1873 {
1874 flush_work(&g_disp_drv.start_work);
1875 #if !IS_ENABLED(CONFIG_EINK_PANEL_USED) && !IS_ENABLED(CONFIG_EINK200_SUNXI)
1876 schedule_work(&g_disp_drv.start_work);
1877 #endif
1878 return 0;
1879 }
1880
disp_register_sync_proc(void (* proc)(u32))1881 s32 disp_register_sync_proc(void (*proc) (u32))
1882 {
1883 struct proc_list *new_proc;
1884
1885 new_proc =
1886 (struct proc_list *)disp_sys_malloc(sizeof(struct proc_list));
1887 if (new_proc) {
1888 new_proc->proc = proc;
1889 list_add_tail(&(new_proc->list),
1890 &(g_disp_drv.sync_proc_list.list));
1891 } else {
1892 pr_warn("malloc fail in %s\n", __func__);
1893 }
1894
1895 return 0;
1896 }
1897
disp_unregister_sync_proc(void (* proc)(u32))1898 s32 disp_unregister_sync_proc(void (*proc) (u32))
1899 {
1900 struct proc_list *ptr, *ptrtmp;
1901
1902 if (proc == NULL) {
1903 pr_warn("hdl is NULL in %s\n", __func__);
1904 return -1;
1905 }
1906 list_for_each_entry_safe(ptr,
1907 ptrtmp,
1908 &g_disp_drv.sync_proc_list.list,
1909 list) {
1910 if (ptr->proc == proc) {
1911 list_del(&ptr->list);
1912 kfree((void *)ptr);
1913 return 0;
1914 }
1915 }
1916
1917 return -1;
1918 }
1919
disp_register_sync_finish_proc(void (* proc)(u32))1920 s32 disp_register_sync_finish_proc(void (*proc) (u32))
1921 {
1922 struct proc_list *new_proc;
1923
1924 new_proc =
1925 (struct proc_list *)disp_sys_malloc(sizeof(struct proc_list));
1926 if (new_proc) {
1927 new_proc->proc = proc;
1928 list_add_tail(&(new_proc->list),
1929 &(g_disp_drv.sync_finish_proc_list.list));
1930 } else {
1931 pr_warn("malloc fail in %s\n", __func__);
1932 }
1933
1934 return 0;
1935 }
1936
disp_unregister_sync_finish_proc(void (* proc)(u32))1937 s32 disp_unregister_sync_finish_proc(void (*proc) (u32))
1938 {
1939 struct proc_list *ptr, *ptrtmp;
1940 unsigned long flags;
1941
1942 spin_lock_irqsave(&sync_finish_lock, flags);
1943 if (proc == NULL) {
1944 pr_warn("hdl is NULL in %s\n", __func__);
1945 return -1;
1946 }
1947 list_for_each_entry_safe(ptr,
1948 ptrtmp,
1949 &g_disp_drv.sync_finish_proc_list.list,
1950 list) {
1951 if (ptr->proc == proc) {
1952 list_del(&ptr->list);
1953 kfree((void *)ptr);
1954 return 0;
1955 }
1956 }
1957 spin_unlock_irqrestore(&sync_finish_lock, flags);
1958
1959 return -1;
1960 }
1961
disp_sync_finish_process(u32 screen_id)1962 static s32 disp_sync_finish_process(u32 screen_id)
1963 {
1964 struct proc_list *ptr;
1965 unsigned long flags;
1966
1967 spin_lock_irqsave(&sync_finish_lock, flags);
1968 list_for_each_entry(ptr, &g_disp_drv.sync_finish_proc_list.list, list) {
1969 if (ptr->proc)
1970 ptr->proc(screen_id);
1971 }
1972 spin_unlock_irqrestore(&sync_finish_lock, flags);
1973
1974 return 0;
1975 }
1976
disp_register_ioctl_func(unsigned int cmd,int (* proc)(unsigned int cmd,unsigned long arg))1977 s32 disp_register_ioctl_func(unsigned int cmd,
1978 int (*proc)(unsigned int cmd, unsigned long arg))
1979 {
1980 struct ioctl_list *new_proc;
1981
1982 new_proc =
1983 (struct ioctl_list *)disp_sys_malloc(sizeof(struct ioctl_list));
1984 if (new_proc) {
1985 new_proc->cmd = cmd;
1986 new_proc->func = proc;
1987 list_add_tail(&(new_proc->list),
1988 &(g_disp_drv.ioctl_extend_list.list));
1989 } else {
1990 pr_warn("malloc fail in %s\n", __func__);
1991 }
1992
1993 return 0;
1994 }
1995
disp_unregister_ioctl_func(unsigned int cmd)1996 s32 disp_unregister_ioctl_func(unsigned int cmd)
1997 {
1998 struct ioctl_list *ptr;
1999
2000 list_for_each_entry(ptr, &g_disp_drv.ioctl_extend_list.list, list) {
2001 if (ptr->cmd == cmd) {
2002 list_del(&ptr->list);
2003 kfree((void *)ptr);
2004 return 0;
2005 }
2006 }
2007
2008 pr_warn("no ioctl found(cmd:0x%x) in %s\n", cmd, __func__);
2009 return -1;
2010 }
2011
disp_ioctl_extend(unsigned int cmd,unsigned long arg)2012 static s32 disp_ioctl_extend(unsigned int cmd, unsigned long arg)
2013 {
2014 struct ioctl_list *ptr;
2015
2016 list_for_each_entry(ptr, &g_disp_drv.ioctl_extend_list.list, list) {
2017 if (cmd == ptr->cmd)
2018 return ptr->func(cmd, arg);
2019 }
2020
2021 return -1;
2022 }
2023
disp_register_compat_ioctl_func(unsigned int cmd,int (* proc)(unsigned int cmd,unsigned long arg))2024 s32 disp_register_compat_ioctl_func(unsigned int cmd,
2025 int (*proc)(unsigned int cmd,
2026 unsigned long arg))
2027 {
2028 struct ioctl_list *new_proc;
2029
2030 new_proc =
2031 (struct ioctl_list *)disp_sys_malloc(sizeof(struct ioctl_list));
2032 if (new_proc) {
2033 new_proc->cmd = cmd;
2034 new_proc->func = proc;
2035 list_add_tail(&(new_proc->list),
2036 &(g_disp_drv.compat_ioctl_extend_list.list));
2037 } else {
2038 pr_warn("malloc fail in %s\n", __func__);
2039 }
2040
2041 return 0;
2042 }
2043
disp_unregister_compat_ioctl_func(unsigned int cmd)2044 s32 disp_unregister_compat_ioctl_func(unsigned int cmd)
2045 {
2046 struct ioctl_list *ptr;
2047
2048 list_for_each_entry(ptr, &g_disp_drv.compat_ioctl_extend_list.list,
2049 list) {
2050 if (ptr->cmd == cmd) {
2051 list_del(&ptr->list);
2052 kfree((void *)ptr);
2053 return 0;
2054 }
2055 }
2056
2057 pr_warn("no ioctl found(cmd:0x%x) in %s\n", cmd, __func__);
2058 return -1;
2059 }
2060
2061 #ifdef CONFIG_COMPAT
disp_compat_ioctl_extend(unsigned int cmd,unsigned long arg)2062 static __attribute__((unused)) s32 disp_compat_ioctl_extend(unsigned int cmd, unsigned long arg)
2063 {
2064 struct ioctl_list *ptr;
2065
2066 list_for_each_entry(ptr, &g_disp_drv.compat_ioctl_extend_list.list,
2067 list) {
2068 if (cmd == ptr->cmd)
2069 return ptr->func(cmd, arg);
2070 }
2071
2072 return -1;
2073 }
2074 #endif
2075
disp_register_standby_func(int (* suspend)(void),int (* resume)(void))2076 s32 disp_register_standby_func(int (*suspend) (void), int (*resume) (void))
2077 {
2078 struct standby_cb_list *new_proc;
2079
2080 new_proc = (struct standby_cb_list *)disp_sys_malloc(
2081 sizeof(struct standby_cb_list));
2082 if (new_proc) {
2083 new_proc->suspend = suspend;
2084 new_proc->resume = resume;
2085 list_add_tail(&(new_proc->list),
2086 &(g_disp_drv.stb_cb_list.list));
2087 } else {
2088 pr_warn("malloc fail in %s\n", __func__);
2089 }
2090
2091 return 0;
2092 }
2093
disp_unregister_standby_func(int (* suspend)(void),int (* resume)(void))2094 s32 disp_unregister_standby_func(int (*suspend) (void), int (*resume) (void))
2095 {
2096 struct standby_cb_list *ptr;
2097
2098 list_for_each_entry(ptr, &g_disp_drv.stb_cb_list.list, list) {
2099 if ((ptr->suspend == suspend) && (ptr->resume == resume)) {
2100 list_del(&ptr->list);
2101 kfree((void *)ptr);
2102 return 0;
2103 }
2104 }
2105
2106 return -1;
2107 }
2108
disp_suspend_cb(void)2109 static s32 disp_suspend_cb(void)
2110 {
2111 struct standby_cb_list *ptr;
2112
2113 list_for_each_entry(ptr, &g_disp_drv.stb_cb_list.list, list) {
2114 if (ptr->suspend)
2115 return ptr->suspend();
2116 }
2117
2118 return -1;
2119 }
2120
disp_resume_cb(void)2121 static s32 disp_resume_cb(void)
2122 {
2123 struct standby_cb_list *ptr;
2124
2125 list_for_each_entry(ptr, &g_disp_drv.stb_cb_list.list, list) {
2126 if (ptr->resume)
2127 return ptr->resume();
2128 }
2129
2130 return -1;
2131 }
2132 /**
2133 * drv_disp_vsync_event - wakeup vsync thread
2134 * @sel: the index of display manager
2135 *
2136 * Get the current time, push it into the cirular queue,
2137 * and then wakeup the vsync thread.
2138 */
2139
drv_disp_vsync_event(u32 sel)2140 s32 drv_disp_vsync_event(u32 sel)
2141 {
2142 unsigned long flags;
2143 ktime_t now;
2144 unsigned int head, tail, next;
2145 bool full = false;
2146 int cur_line = -1;
2147 struct disp_device *dispdev = NULL;
2148 struct disp_manager *mgr = g_disp_drv.mgr[sel];
2149
2150 if (mgr)
2151 dispdev = mgr->device;
2152 if (dispdev) {
2153 if (dispdev->type == DISP_OUTPUT_TYPE_LCD) {
2154 struct disp_panel_para panel;
2155
2156 if (dispdev->get_panel_info) {
2157 dispdev->get_panel_info(dispdev, &panel);
2158 cur_line = disp_al_lcd_get_cur_line(
2159 dispdev->hwdev_index, &panel);
2160 }
2161 }
2162 #if defined(SUPPORT_EDP)
2163 else if (dispdev->type == DISP_OUTPUT_TYPE_EDP) {
2164 cur_line = -1;
2165 cur_line = disp_edp_get_cur_line(dispdev);
2166 }
2167 #endif
2168 else {
2169 cur_line =
2170 disp_al_device_get_cur_line(dispdev->hwdev_index);
2171 }
2172 }
2173
2174 now = ktime_get();
2175 spin_lock_irqsave(&g_disp_drv.disp_vsync.slock[sel], flags);
2176 head = g_disp_drv.disp_vsync.vsync_timestamp_head[sel];
2177 tail = g_disp_drv.disp_vsync.vsync_timestamp_tail[sel];
2178 next = tail + 1;
2179 next = (next >= VSYNC_NUM) ? 0 : next;
2180 if (next == head)
2181 full = true;
2182
2183 if (!full) {
2184 g_disp_drv.disp_vsync.vsync_timestamp[sel][tail] = now;
2185 g_disp_drv.disp_vsync.vsync_cur_line[sel][tail] = cur_line;
2186 g_disp_drv.disp_vsync.vsync_timestamp_tail[sel] = next;
2187 }
2188 g_disp_drv.disp_vsync.vsync_read[sel] = true;
2189 spin_unlock_irqrestore(&g_disp_drv.disp_vsync.slock[sel], flags);
2190
2191 if (g_disp_drv.disp_vsync.vsync_task[sel])
2192 wake_up_process(g_disp_drv.disp_vsync.vsync_task[sel]);
2193 else
2194 wake_up_interruptible(&g_disp_drv.disp_vsync.vsync_waitq);
2195
2196 if (full)
2197 return -1;
2198 return 0;
2199 }
2200
2201 /**
2202 * vsync_proc - sends vsync message
2203 * @disp: the index of display manager
2204 *
2205 * Get the timestamp from the circular queue,
2206 * And send it widthin vsync message to the userland.
2207 */
2208
vsync_proc(u32 disp)2209 static int vsync_proc(u32 disp)
2210 {
2211 char buf[64];
2212 char *envp[2];
2213 unsigned long flags;
2214 unsigned int head, tail, next;
2215 ktime_t time;
2216 s64 ts;
2217 int cur_line = -1, start_delay = -1;
2218 struct disp_device *dispdev = NULL;
2219 struct disp_manager *mgr = g_disp_drv.mgr[disp];
2220 u32 total_lines = 0;
2221 u64 period = 0;
2222
2223 if (mgr)
2224 dispdev = mgr->device;
2225 if (dispdev) {
2226 start_delay = dispdev->timings.start_delay;
2227 total_lines = dispdev->timings.ver_total_time;
2228 period = dispdev->timings.frame_period;
2229 }
2230
2231 spin_lock_irqsave(&g_disp_drv.disp_vsync.slock[disp], flags);
2232 head = g_disp_drv.disp_vsync.vsync_timestamp_head[disp];
2233 tail = g_disp_drv.disp_vsync.vsync_timestamp_tail[disp];
2234 while (head != tail) {
2235 time = g_disp_drv.disp_vsync.vsync_timestamp[disp][head];
2236 cur_line = g_disp_drv.disp_vsync.vsync_cur_line[disp][head];
2237 next = head + 1;
2238 next = (next >= VSYNC_NUM) ? 0 : next;
2239 g_disp_drv.disp_vsync.vsync_timestamp_head[disp] = next;
2240 spin_unlock_irqrestore(&g_disp_drv.disp_vsync.slock[disp], flags);
2241
2242 ts = ktime_to_ns(time);
2243 if ((cur_line >= 0)
2244 && (period > 0)
2245 && (start_delay >= 0)
2246 && (total_lines > 0)
2247 && (cur_line != start_delay)) {
2248 u64 tmp;
2249
2250 if (cur_line < start_delay) {
2251 tmp = (start_delay - cur_line) * period;
2252 do_div(tmp, total_lines);
2253 ts += tmp;
2254 } else {
2255 tmp = (cur_line - start_delay) * period;
2256 do_div(tmp, total_lines);
2257 ts -= tmp;
2258 }
2259 }
2260 snprintf(buf, sizeof(buf), "VSYNC%d=%llu", disp, ts);
2261 envp[0] = buf;
2262 envp[1] = NULL;
2263 kobject_uevent_env(&g_disp_drv.dev->kobj, KOBJ_CHANGE, envp);
2264
2265 spin_lock_irqsave(&g_disp_drv.disp_vsync.slock[disp], flags);
2266 head = g_disp_drv.disp_vsync.vsync_timestamp_head[disp];
2267 tail = g_disp_drv.disp_vsync.vsync_timestamp_tail[disp];
2268 }
2269
2270 spin_unlock_irqrestore(&g_disp_drv.disp_vsync.slock[disp], flags);
2271
2272 return 0;
2273 }
vsync_poll(struct file * file,poll_table * wait)2274 unsigned int vsync_poll(struct file *file, poll_table *wait)
2275 {
2276 unsigned long flags;
2277 int ret = 0;
2278 int disp;
2279 for (disp = 0; disp < DISP_SCREEN_NUM; disp++) {
2280 spin_lock_irqsave(&g_disp_drv.disp_vsync.slock[disp], flags);
2281 ret |= g_disp_drv.disp_vsync.vsync_read[disp] == true ? POLLIN : 0;
2282 spin_unlock_irqrestore(&g_disp_drv.disp_vsync.slock[disp], flags);
2283 }
2284 if (ret == 0)
2285 poll_wait(file, &g_disp_drv.disp_vsync.vsync_waitq, wait);
2286 return ret;
2287 }
2288
bsp_disp_get_vsync_timestamp(int disp,int64_t * timestamp)2289 int bsp_disp_get_vsync_timestamp(int disp, int64_t *timestamp)
2290 {
2291 unsigned long flags;
2292 unsigned int head, tail, next;
2293 ktime_t time;
2294 s64 ts;
2295 int cur_line = -1, start_delay = -1;
2296 struct disp_device *dispdev = NULL;
2297 struct disp_manager *mgr = g_disp_drv.mgr[disp];
2298 u32 total_lines = 0;
2299 u64 period = 0;
2300 u64 tmp;
2301
2302 if (mgr)
2303 dispdev = mgr->device;
2304 if (dispdev) {
2305 start_delay = dispdev->timings.start_delay;
2306 total_lines = dispdev->timings.ver_total_time;
2307 period = dispdev->timings.frame_period;
2308 }
2309
2310 spin_lock_irqsave(&g_disp_drv.disp_vsync.slock[disp], flags);
2311 head = g_disp_drv.disp_vsync.vsync_timestamp_head[disp];
2312 tail = g_disp_drv.disp_vsync.vsync_timestamp_tail[disp];
2313
2314 time = g_disp_drv.disp_vsync.vsync_timestamp[disp][head];
2315 cur_line = g_disp_drv.disp_vsync.vsync_cur_line[disp][head];
2316 next = head + 1;
2317 next = (next >= VSYNC_NUM) ? 0 : next;
2318 g_disp_drv.disp_vsync.vsync_timestamp_head[disp] = next;
2319 head = g_disp_drv.disp_vsync.vsync_timestamp_head[disp];
2320 tail = g_disp_drv.disp_vsync.vsync_timestamp_tail[disp];
2321 if (head == tail)
2322 g_disp_drv.disp_vsync.vsync_read[disp] = false;
2323 spin_unlock_irqrestore(&g_disp_drv.disp_vsync.slock[disp], flags);
2324
2325 ts = ktime_to_ns(time);
2326 if ((cur_line >= 0)
2327 && (period > 0)
2328 && (start_delay >= 0)
2329 && (total_lines > 0)
2330 && (cur_line != start_delay)) {
2331
2332 if (cur_line < start_delay) {
2333 tmp = (start_delay - cur_line) * period;
2334 do_div(tmp, total_lines);
2335 ts += tmp;
2336 } else {
2337 tmp = (cur_line - start_delay) * period;
2338 do_div(tmp, total_lines);
2339 ts -= tmp;
2340 }
2341 }
2342 *timestamp = ts;
2343
2344 return 0;
2345 }
2346
vsync_thread(void * parg)2347 int vsync_thread(void *parg)
2348 {
2349 unsigned long disp = (unsigned long)parg;
2350
2351 while (1) {
2352
2353 vsync_proc(disp);
2354 set_current_state(TASK_INTERRUPTIBLE);
2355 schedule();
2356 if (kthread_should_stop())
2357 break;
2358 set_current_state(TASK_RUNNING);
2359 }
2360
2361 return 0;
2362 }
2363
disp_init(struct platform_device * pdev)2364 static s32 disp_init(struct platform_device *pdev)
2365 {
2366 struct disp_bsp_init_para *para;
2367 int i, disp, num_screens;
2368 unsigned int value, value1, value2, output_type, output_mode;
2369 unsigned int output_format, output_bits, output_eotf, output_cs;
2370
2371 __inf("%s !\n", __func__);
2372
2373 INIT_WORK(&g_disp_drv.resume_work[0], resume_work_0);
2374 #if DISP_SCREEN_NUM > 1
2375 INIT_WORK(&g_disp_drv.resume_work[1], resume_work_1);
2376 #endif
2377 /* INIT_WORK(&g_disp_drv.resume_work[2], resume_work_2); */
2378 INIT_WORK(&g_disp_drv.start_work, start_work);
2379 INIT_LIST_HEAD(&g_disp_drv.sync_proc_list.list);
2380 INIT_LIST_HEAD(&g_disp_drv.sync_finish_proc_list.list);
2381 INIT_LIST_HEAD(&g_disp_drv.ioctl_extend_list.list);
2382 INIT_LIST_HEAD(&g_disp_drv.compat_ioctl_extend_list.list);
2383 INIT_LIST_HEAD(&g_disp_drv.stb_cb_list.list);
2384 mutex_init(&g_disp_drv.mlock);
2385 spin_lock_init(&sync_finish_lock);
2386 parser_disp_init_para(pdev->dev.of_node, &g_disp_drv.disp_init);
2387 para = &g_disp_drv.para;
2388
2389 memset(para, 0, sizeof(struct disp_bsp_init_para));
2390 for (i = 0; i < DISP_MOD_NUM; i++) {
2391 para->reg_base[i] = g_disp_drv.reg_base[i];
2392 para->irq_no[i] = g_disp_drv.irq_no[i];
2393 __inf("mod %d, base=0x%lx, irq=%d\n", i, para->reg_base[i], para->irq_no[i]);
2394 }
2395
2396 for (i = 0; i < DE_NUM; i++) {
2397 para->clk_de[i] = g_disp_drv.clk_de[i];
2398 para->clk_bus_de[i] = g_disp_drv.clk_bus_de[i];
2399 para->rst_bus_de[i] = g_disp_drv.rst_bus_de[i];
2400 }
2401
2402 #if defined(HAVE_DEVICE_COMMON_MODULE)
2403 para->clk_bus_extra = g_disp_drv.clk_bus_extra;
2404 para->rst_bus_extra = g_disp_drv.rst_bus_extra;
2405 #endif
2406 for (i = 0; i < DISP_DEVICE_NUM; i++) {
2407 para->clk_bus_dpss_top[i] = g_disp_drv.clk_bus_dpss_top[i];
2408 para->clk_tcon[i] = g_disp_drv.clk_tcon[i];
2409 para->clk_bus_tcon[i] = g_disp_drv.clk_bus_tcon[i];
2410 para->rst_bus_dpss_top[i] = g_disp_drv.rst_bus_dpss_top[i];
2411 para->rst_bus_tcon[i] = g_disp_drv.rst_bus_tcon[i];
2412 }
2413
2414 #if defined(SUPPORT_DSI)
2415 for (i = 0; i < CLK_DSI_NUM; i++) {
2416 para->clk_mipi_dsi[i] = g_disp_drv.clk_mipi_dsi[i];
2417 para->clk_bus_mipi_dsi[i] = g_disp_drv.clk_bus_mipi_dsi[i];
2418 }
2419
2420 for (i = 0; i < DEVICE_DSI_NUM; i++)
2421 para->rst_bus_mipi_dsi[i] = g_disp_drv.rst_bus_mipi_dsi[i];
2422 #endif
2423
2424 #if defined(SUPPORT_LVDS)
2425 for (i = 0; i < DEVICE_LVDS_NUM; i++)
2426 para->rst_bus_lvds[i] = g_disp_drv.rst_bus_lvds[i];
2427 #endif
2428
2429 para->disp_int_process = disp_sync_finish_process;
2430 para->vsync_event = drv_disp_vsync_event;
2431 para->start_process = start_process;
2432
2433 value = disp_boot_para_parse("boot_disp");
2434 value1 = disp_boot_para_parse("boot_disp1");
2435 value2 = disp_boot_para_parse("boot_disp2");
2436 output_type = (value >> 8) & 0xff;
2437 output_mode = (value) & 0xff;
2438
2439 output_format = (value1 >> 0) & 0xff;
2440 output_bits = (value1 >> 8) & 0xff;
2441 output_cs = (value1 >> 16) & 0xffff;
2442 output_eotf = (value2 >> 0) & 0xff;
2443
2444 if (output_type != (int)DISP_OUTPUT_TYPE_NONE) {
2445 para->boot_info.sync = 1;
2446 para->boot_info.disp = 0; /* disp0 */
2447 para->boot_info.type = output_type;
2448 para->boot_info.mode = output_mode;
2449 para->boot_info.format = output_format;
2450 para->boot_info.bits = output_bits;
2451 para->boot_info.cs = output_cs;
2452 para->boot_info.eotf = output_eotf;
2453 } else {
2454 output_type = (value >> 24) & 0xff;
2455 output_mode = (value >> 16) & 0xff;
2456 if (output_type != (int)DISP_OUTPUT_TYPE_NONE) {
2457 para->boot_info.sync = 1;
2458 para->boot_info.disp = 1; /* disp1 */
2459 para->boot_info.type = output_type;
2460 para->boot_info.mode = output_mode;
2461 para->boot_info.format = output_format;
2462 para->boot_info.bits = output_bits;
2463 para->boot_info.cs = output_cs;
2464 para->boot_info.eotf = output_eotf;
2465 }
2466 }
2467
2468 para->boot_info.dvi_hdmi =
2469 g_disp_drv.disp_init.output_dvi_hdmi[para->boot_info.disp];
2470 para->boot_info.range =
2471 g_disp_drv.disp_init.output_range[para->boot_info.disp];
2472 para->boot_info.scan =
2473 g_disp_drv.disp_init.output_scan[para->boot_info.disp];
2474 para->boot_info.aspect_ratio =
2475 g_disp_drv.disp_init.output_aspect_ratio[para->boot_info.disp];
2476
2477 if (para->boot_info.sync == 1) {
2478 __wrn("smooth display screen:%d type:%d mode:%d\n", para->boot_info.disp,
2479 para->boot_info.type, para->boot_info.mode);
2480 g_disp_drv.disp_init.disp_mode = para->boot_info.disp;
2481 g_disp_drv.disp_init.output_type[para->boot_info.disp] =
2482 output_type;
2483 g_disp_drv.disp_init.output_mode[para->boot_info.disp] =
2484 output_mode;
2485 g_disp_drv.disp_init.output_format[para->boot_info.disp] =
2486 output_format;
2487 g_disp_drv.disp_init.output_bits[para->boot_info.disp] =
2488 output_bits;
2489 g_disp_drv.disp_init.output_cs[para->boot_info.disp] =
2490 output_cs;
2491 g_disp_drv.disp_init.output_eotf[para->boot_info.disp] =
2492 output_eotf;
2493 }
2494
2495 para->feat_init.chn_cfg_mode = g_disp_drv.disp_init.chn_cfg_mode;
2496
2497 bsp_disp_init(para);
2498
2499 /*if (bsp_disp_check_device_enabled(para) == 0)
2500 para->boot_info.sync = 0;
2501 */
2502 num_screens = bsp_disp_feat_get_num_screens();
2503 for (disp = 0; disp < num_screens; disp++) {
2504 g_disp_drv.mgr[disp] = disp_get_layer_manager(disp);
2505 spin_lock_init(&g_disp_drv.disp_vsync.slock[disp]);
2506 #ifdef VSYNC_USE_UEVENT
2507 char task_name[25];
2508 sprintf(task_name, "vsync proc %d", disp);
2509 g_disp_drv.disp_vsync.vsync_task[disp] =
2510 kthread_create(vsync_thread, (void *)(unsigned long)disp, task_name);
2511 if (IS_ERR(g_disp_drv.disp_vsync.vsync_task[disp])) {
2512 s32 err = 0;
2513
2514 __wrn("Unable to start kernel thread %s.\n",
2515 "hdmi proc");
2516 err = PTR_ERR(g_disp_drv.disp_vsync.vsync_task[disp]);
2517 g_disp_drv.disp_vsync.vsync_task[disp] = NULL;
2518 } else {
2519 wake_up_process(g_disp_drv.disp_vsync.vsync_task[disp]);
2520 }
2521 #endif
2522 }
2523 init_waitqueue_head(&g_disp_drv.disp_vsync.vsync_waitq);
2524
2525 #if defined(SUPPORT_EINK)
2526 g_disp_drv.eink_manager[0] = disp_get_eink_manager(0);
2527 #endif
2528 lcd_init();
2529 bsp_disp_open();
2530 fb_init(pdev);
2531 #if defined(CONFIG_DISP2_SUNXI_COMPOSER)
2532 composer_init(&g_disp_drv);
2533 #endif
2534 g_disp_drv.inited = true;
2535 start_process();
2536
2537 __inf("%s finish\n", __func__);
2538 return 0;
2539 }
2540
disp_exit(void)2541 static s32 disp_exit(void)
2542 {
2543 unsigned int i;
2544 unsigned int num_screens;
2545
2546 num_screens = bsp_disp_feat_get_num_screens();
2547 for (i = 0; i < num_screens; i++) {
2548 if (g_disp_drv.disp_vsync.vsync_task[i] && !IS_ERR(g_disp_drv.disp_vsync.vsync_task[i])) {
2549 kthread_stop(g_disp_drv.disp_vsync.vsync_task[i]);
2550 g_disp_drv.disp_vsync.vsync_task[i] = NULL;
2551 }
2552 }
2553
2554 fb_exit();
2555 bsp_disp_close();
2556 bsp_disp_exit(g_disp_drv.exit_mode);
2557 return 0;
2558 }
2559
disp_mem_request(int sel,u32 size)2560 static int disp_mem_request(int sel, u32 size)
2561 {
2562
2563 #if IS_ENABLED(CONFIG_DMABUF_HEAPS)
2564 if (sel >= DISP_MEM_NUM || !size) {
2565 __wrn("invalid param\n");
2566 return -EINVAL;
2567 }
2568 g_disp_mm[sel].p_ion_mem = disp_ion_malloc(size, (u32 *)(&g_disp_mm[sel].mem_start));
2569 if (g_disp_mm[sel].p_ion_mem) {
2570 g_disp_mm[sel].info_base = (char __iomem *)g_disp_mm[sel].p_ion_mem->vaddr;
2571 g_disp_mm[sel].mem_len = size;
2572 g_disp_mem_id = sel;
2573 return 0;
2574 } else {
2575 return -ENOMEM;
2576 }
2577 #else
2578
2579 #ifndef FB_RESERVED_MEM
2580 unsigned int map_size = 0;
2581 struct page *page;
2582
2583 if ((sel >= DISP_MEM_NUM) ||
2584 (g_disp_mm[sel].info_base != NULL)) {
2585 __wrn("invalid param\n");
2586 return -EINVAL;
2587 }
2588
2589 g_disp_mm[sel].mem_len = size;
2590 map_size = PAGE_ALIGN(g_disp_mm[sel].mem_len);
2591
2592 page = alloc_pages(GFP_KERNEL, get_order(map_size));
2593 if (page != NULL) {
2594 g_disp_mm[sel].info_base = page_address(page);
2595 if (g_disp_mm[sel].info_base == NULL) {
2596 free_pages((unsigned long)(page), get_order(map_size));
2597 __wrn("page_address fail!\n");
2598 return -ENOMEM;
2599 }
2600 g_disp_mm[sel].mem_start =
2601 virt_to_phys(g_disp_mm[sel].info_base);
2602 memset(g_disp_mm[sel].info_base, 0, size);
2603
2604 __inf("pa=0x%p va=0x%p size:0x%x\n",
2605 (void *)g_disp_mm[sel].mem_start,
2606 g_disp_mm[sel].info_base, size);
2607 g_disp_mem_id = sel;
2608 return 0;
2609 }
2610
2611 __wrn("alloc_pages fail!\n");
2612 return -ENOMEM;
2613 #else
2614 uintptr_t phy_addr;
2615
2616 if ((sel >= DISP_MEM_NUM) ||
2617 (g_disp_mm[sel].info_base != NULL)) {
2618 __wrn("invalid param\n");
2619 return -EINVAL;
2620 }
2621
2622 g_disp_mm[sel].info_base = disp_malloc(size, (void *)&phy_addr);
2623 if (g_disp_mm[sel].info_base) {
2624 g_disp_mm[sel].mem_start = phy_addr;
2625 g_disp_mm[sel].mem_len = size;
2626 memset(g_disp_mm[sel].info_base, 0, size);
2627 __inf("pa=0x%p va=0x%p size:0x%x\n",
2628 (void *)g_disp_mm[sel].mem_start,
2629 g_disp_mm[sel].info_base, size);
2630 g_disp_mem_id = sel;
2631
2632 return 0;
2633 }
2634
2635 __wrn("disp_malloc fail!\n");
2636 return -ENOMEM;
2637 #endif
2638 #endif
2639 }
2640
disp_mem_release(int sel)2641 static int disp_mem_release(int sel)
2642 {
2643 #if IS_ENABLED(CONFIG_DMABUF_HEAPS)
2644 if (!g_disp_mm[sel].info_base) {
2645 __wrn("invalid param\n");
2646 return -EINVAL;
2647 }
2648 disp_ion_free((void *__force)g_disp_mm[sel].info_base,
2649 (void *)g_disp_mm[sel].mem_start, g_disp_mm[sel].mem_len);
2650 #else
2651
2652 #ifndef FB_RESERVED_MEM
2653 unsigned int map_size;
2654 unsigned int page_size;
2655
2656 if (g_disp_mm[sel].info_base == NULL) {
2657 __wrn("invalid param\n");
2658 return -EINVAL;
2659 }
2660
2661 map_size = PAGE_ALIGN(g_disp_mm[sel].mem_len);
2662 page_size = map_size;
2663
2664 free_pages((unsigned long)(g_disp_mm[sel].info_base),
2665 get_order(page_size));
2666 memset(&g_disp_mm[sel], 0, sizeof(struct info_mm));
2667 #else
2668 if (g_disp_mm[sel].info_base == NULL)
2669 return -EINVAL;
2670
2671 __inf("disp_mem_release, mem_id=%d, phy_addr=0x%p\n", sel,
2672 (void *)g_disp_mm[sel].mem_start);
2673 disp_free((void *)g_disp_mm[sel].info_base,
2674 (void *)g_disp_mm[sel].mem_start, g_disp_mm[sel].mem_len);
2675 memset(&g_disp_mm[sel], 0, sizeof(struct info_mm));
2676 #endif
2677 #endif
2678 g_disp_mem_id = -1;
2679 return 0;
2680 }
2681
sunxi_disp_get_source_ops(struct sunxi_disp_source_ops * src_ops)2682 int sunxi_disp_get_source_ops(struct sunxi_disp_source_ops *src_ops)
2683 {
2684 memset((void *)src_ops, 0, sizeof(struct sunxi_disp_source_ops));
2685
2686 src_ops->sunxi_lcd_set_panel_funs = bsp_disp_lcd_set_panel_funs;
2687 src_ops->sunxi_lcd_delay_ms = disp_delay_ms;
2688 src_ops->sunxi_lcd_delay_us = disp_delay_us;
2689 src_ops->sunxi_lcd_backlight_enable = bsp_disp_lcd_backlight_enable;
2690 src_ops->sunxi_lcd_backlight_disable = bsp_disp_lcd_backlight_disable;
2691 src_ops->sunxi_lcd_pwm_enable = bsp_disp_lcd_pwm_enable;
2692 src_ops->sunxi_lcd_pwm_disable = bsp_disp_lcd_pwm_disable;
2693 src_ops->sunxi_lcd_power_enable = bsp_disp_lcd_power_enable;
2694 src_ops->sunxi_lcd_power_disable = bsp_disp_lcd_power_disable;
2695 src_ops->sunxi_lcd_tcon_enable = bsp_disp_lcd_tcon_enable;
2696 src_ops->sunxi_lcd_tcon_disable = bsp_disp_lcd_tcon_disable;
2697 src_ops->sunxi_lcd_pin_cfg = bsp_disp_lcd_pin_cfg;
2698 src_ops->sunxi_lcd_gpio_set_value = bsp_disp_lcd_gpio_set_value;
2699 src_ops->sunxi_lcd_gpio_set_direction = bsp_disp_lcd_gpio_set_direction;
2700 #ifdef SUPPORT_DSI
2701 src_ops->sunxi_lcd_dsi_dcs_write = bsp_disp_lcd_dsi_dcs_wr;
2702 src_ops->sunxi_lcd_dsi_gen_write = bsp_disp_lcd_dsi_gen_wr;
2703 src_ops->sunxi_lcd_dsi_clk_enable = bsp_disp_lcd_dsi_clk_enable;
2704 src_ops->sunxi_lcd_dsi_mode_switch = bsp_disp_lcd_dsi_mode_switch;
2705 src_ops->sunxi_lcd_dsi_gen_short_read = bsp_disp_lcd_dsi_gen_short_read;
2706 src_ops->sunxi_lcd_dsi_dcs_read = bsp_disp_lcd_dsi_dcs_read;
2707 src_ops->sunxi_lcd_dsi_set_max_ret_size = bsp_disp_lcd_set_max_ret_size;
2708 #endif
2709 src_ops->sunxi_lcd_cpu_write = tcon0_cpu_wr_16b;
2710 src_ops->sunxi_lcd_cpu_write_data = tcon0_cpu_wr_16b_data;
2711 src_ops->sunxi_lcd_cpu_write_index = tcon0_cpu_wr_16b_index;
2712 src_ops->sunxi_lcd_cpu_set_auto_mode = tcon0_cpu_set_auto_mode;
2713
2714 return 0;
2715 }
2716
disp_mmap(struct file * file,struct vm_area_struct * vma)2717 int disp_mmap(struct file *file, struct vm_area_struct *vma)
2718 {
2719
2720 unsigned int off = vma->vm_pgoff << PAGE_SHIFT;
2721
2722 int mem_id = g_disp_mem_id;
2723
2724 if (mem_id >= DISP_MEM_NUM || mem_id < 0 ||
2725 !g_disp_mm[mem_id].info_base) {
2726 __wrn("invalid param\n");
2727 return -EINVAL;
2728 }
2729
2730 if (off < g_disp_mm[mem_id].mem_len) {
2731 #if IS_ENABLED(CONFIG_AW_IOMMU)
2732 if (g_disp_mm[mem_id].p_ion_mem)
2733 return g_disp_mm[mem_id].p_ion_mem->p_item->dmabuf->ops->mmap(g_disp_mm[mem_id].p_ion_mem->p_item->dmabuf, vma);
2734 else
2735 return -EINVAL;
2736 #else
2737 return dma_mmap_writecombine(
2738 g_disp_drv.dev, vma, g_disp_mm[mem_id].info_base,
2739 g_disp_mm[mem_id].mem_start, g_disp_mm[mem_id].mem_len);
2740
2741 #endif
2742 }
2743
2744 return -EINVAL;
2745 }
2746
disp_open(struct inode * inode,struct file * file)2747 int disp_open(struct inode *inode, struct file *file)
2748 {
2749 atomic_inc(&g_driver_ref_count);
2750 return 0;
2751 }
2752
disp_device_off(void)2753 void disp_device_off(void)
2754 {
2755 int num_screens = 0, i = 0, j = 0;
2756 struct disp_manager *mgr = NULL;
2757
2758 memset(lyr_cfg, 0, 16*sizeof(struct disp_layer_config));
2759
2760 for (i = 0; i < 4; ++i) {
2761 for (j = 0; j < 4; ++j) {
2762 lyr_cfg[i + j].enable = false;
2763 lyr_cfg[i + j].channel = i;
2764 lyr_cfg[i + j].layer_id = j;
2765 }
2766 }
2767 num_screens = bsp_disp_feat_get_num_screens();
2768 for (i = 0; i < num_screens; ++i) {
2769 mgr = g_disp_drv.mgr[i];
2770 if (mgr && mgr->device) {
2771 if (mgr->device->disable && mgr->device->is_enabled) {
2772 if (mgr->device->is_enabled(mgr->device)) {
2773 mgr->set_layer_config(mgr, lyr_cfg, 16);
2774 disp_delay_ms(20);
2775 mgr->device->disable(mgr->device);
2776 }
2777 }
2778 }
2779 }
2780 }
2781
disp_release(struct inode * inode,struct file * file)2782 int disp_release(struct inode *inode, struct file *file)
2783 {
2784 #if 0
2785 if (!atomic_dec_and_test(&g_driver_ref_count)) {
2786 /* There is any other user, just return. */
2787 return 0;
2788 }
2789
2790 #ifdef CONFIG_DISP2_SUNXI_DEVICE_OFF_ON_RELEASE
2791 disp_device_off();
2792 #endif
2793 #endif
2794 return 0;
2795 }
2796
disp_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)2797 ssize_t disp_read(struct file *file, char __user *buf, size_t count,
2798 loff_t *ppos)
2799 {
2800 return 0;
2801 }
2802
disp_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)2803 ssize_t disp_write(struct file *file, const char __user *buf, size_t count,
2804 loff_t *ppos)
2805 {
2806 return 0;
2807 }
2808
disp_clk_get_wrap(struct disp_drv_info * disp_drv)2809 static int disp_clk_get_wrap(struct disp_drv_info *disp_drv)
2810 {
2811 int i;
2812 char id[32];
2813 struct device *dev = disp_drv->dev;
2814
2815 /* get clocks for de */
2816 for (i = 0; i < DE_NUM; i++) {
2817 sprintf(id, "clk_de%d", i);
2818 disp_drv->clk_de[i] = devm_clk_get(dev, id);
2819 if (IS_ERR(disp_drv->clk_de[i])) {
2820 disp_drv->clk_de[i] = NULL;
2821 dev_err(dev, "failed to get clk for %s\n", id);
2822 return -EINVAL;
2823 }
2824
2825 sprintf(id, "clk_bus_de%d", i);
2826 disp_drv->clk_bus_de[i] = devm_clk_get(dev, id);
2827 if (IS_ERR(disp_drv->clk_bus_de[i])) {
2828 disp_drv->clk_bus_de[i] = NULL;
2829 dev_err(dev, "failed to get clk for %s\n", id);
2830 return -EINVAL;
2831 }
2832 }
2833
2834 for (i = 0; i < DISP_DEVICE_NUM; i++) {
2835 #if defined(HAVE_DEVICE_COMMON_MODULE)
2836 /* get clocks for dpss */
2837 sprintf(id, "clk_bus_dpss_top%d", i);
2838 disp_drv->clk_bus_dpss_top[i] = devm_clk_get(dev, id);
2839 if (IS_ERR(disp_drv->clk_bus_dpss_top[i])) {
2840 disp_drv->clk_bus_dpss_top[i] = NULL;
2841 dev_err(dev, "failed to get clk for %s\n", id);
2842 }
2843
2844 #endif
2845 /* get clocks for tcon */
2846 sprintf(id, "clk_tcon%d", i);
2847 disp_drv->clk_tcon[i] = devm_clk_get(dev, id);
2848 if (IS_ERR(disp_drv->clk_tcon[i])) {
2849 disp_drv->clk_tcon[i] = NULL;
2850 dev_err(dev, "failed to get clk for %s\n", id);
2851 }
2852
2853 sprintf(id, "clk_bus_tcon%d", i);
2854 disp_drv->clk_bus_tcon[i] = devm_clk_get(dev, id);
2855 if (IS_ERR(disp_drv->clk_bus_tcon[i])) {
2856 disp_drv->clk_bus_tcon[i] = NULL;
2857 dev_err(dev, "failed to get clk for %s\n", id);
2858 }
2859 }
2860
2861 #if defined(SUPPORT_DSI)
2862 for (i = 0; i < CLK_DSI_NUM; i++) {
2863 sprintf(id, "clk_mipi_dsi%d", i);
2864 disp_drv->clk_mipi_dsi[i] = devm_clk_get(dev, id);
2865 if (IS_ERR(disp_drv->clk_mipi_dsi[i])) {
2866 disp_drv->clk_mipi_dsi[i] = NULL;
2867 dev_err(dev, "failed to get clk for %s\n", id);
2868 return -EINVAL;
2869 }
2870
2871 sprintf(id, "clk_bus_mipi_dsi%d", i);
2872 disp_drv->clk_bus_mipi_dsi[i] = devm_clk_get(dev, id);
2873 if (IS_ERR(disp_drv->clk_bus_mipi_dsi[i])) {
2874 disp_drv->clk_bus_mipi_dsi[i] = NULL;
2875 dev_err(dev, "failed to get clk for %s\n", id);
2876 return -EINVAL;
2877 }
2878 }
2879 #endif
2880
2881 #if defined(CONFIG_ARCH_SUN50IW10)
2882 disp_drv->clk_bus_extra = devm_clk_get(dev, "clk_pll_com");
2883 if (IS_ERR(disp_drv->clk_bus_extra)) {
2884 disp_drv->clk_bus_extra = NULL;
2885 dev_err(dev, "failed to get clk for display top!\n");
2886 return -EINVAL;
2887 }
2888 #endif
2889 return 0;
2890 }
2891
disp_clk_put_wrap(struct disp_drv_info * disp_drv)2892 static void disp_clk_put_wrap(struct disp_drv_info *disp_drv)
2893 {
2894 int i;
2895 struct device *dev = disp_drv->dev;
2896
2897 /* put clocks for de */
2898 for (i = 0; i < DE_NUM; i++) {
2899 devm_clk_put(dev, disp_drv->clk_de[i]);
2900 devm_clk_put(dev, disp_drv->clk_bus_de[i]);
2901 }
2902
2903 #if defined(HAVE_DEVICE_COMMON_MODULE)
2904 devm_clk_put(dev, disp_drv->clk_bus_extra);
2905 #endif
2906
2907 for (i = 0; i < DISP_DEVICE_NUM; i++) {
2908 /* put clocks for dpss */
2909 devm_clk_put(dev, disp_drv->clk_bus_dpss_top[i]);
2910
2911 /* put clocks for tcon */
2912 devm_clk_put(dev, disp_drv->clk_tcon[i]);
2913 devm_clk_put(dev, disp_drv->clk_bus_tcon[i]);
2914 }
2915
2916 #if defined(SUPPORT_DSI)
2917 /* put clocks for dsi */
2918 for (i = 0; i < CLK_DSI_NUM; i++) {
2919 devm_clk_put(dev, disp_drv->clk_mipi_dsi[i]);
2920 devm_clk_put(dev, disp_drv->clk_bus_mipi_dsi[i]);
2921 }
2922 #endif
2923 }
2924
disp_reset_control_get_wrap(struct disp_drv_info * disp_drv)2925 static int disp_reset_control_get_wrap(struct disp_drv_info *disp_drv)
2926 {
2927 int i;
2928 char id[32];
2929 struct device *dev = disp_drv->dev;
2930 for (i = 0; i < DE_NUM; i++) {
2931 /* get resets for de */
2932 sprintf(id, "rst_bus_de%d", i);
2933 disp_drv->rst_bus_de[i] = devm_reset_control_get_shared(dev, id);
2934 if (IS_ERR(disp_drv->rst_bus_de[i])) {
2935 disp_drv->rst_bus_de[i] = NULL;
2936 dev_err(dev, "failed to get reset for %s\n", id);
2937 return -EINVAL;
2938 }
2939
2940 }
2941
2942 #if 0
2943 disp_drv->rst_bus_extra = devm_reset_control_get(dev, "rst_display_top");
2944 if (IS_ERR(disp_drv->rst_bus_extra)) {
2945 disp_drv->rst_bus_extra = NULL;
2946 __wrn("failed to get reset for display top\n");
2947 }
2948 #endif
2949 for (i = 0; i < DISP_DEVICE_NUM; i++) {
2950 /* get resets for dpss */
2951 #if defined(HAVE_DEVICE_COMMON_MODULE)
2952 sprintf(id, "rst_bus_dpss_top%d", i);
2953 disp_drv->rst_bus_dpss_top[i] = devm_reset_control_get_shared(dev, id);
2954 if (IS_ERR(disp_drv->rst_bus_dpss_top[i])) {
2955 disp_drv->rst_bus_dpss_top[i] = NULL;
2956 dev_err(dev, "failed to get reset for %s\n", id);
2957 return -EINVAL;
2958 }
2959 #endif
2960
2961 /* get resets for tcon */
2962 sprintf(id, "rst_bus_tcon%d", i);
2963 disp_drv->rst_bus_tcon[i] = devm_reset_control_get_shared(dev, id);
2964 if (IS_ERR(disp_drv->rst_bus_tcon[i])) {
2965 disp_drv->rst_bus_tcon[i] = NULL;
2966 dev_err(dev, "failed to get reset for %s\n", id);
2967 return -EINVAL;
2968 }
2969 }
2970 #if defined(SUPPORT_DSI)
2971 for (i = 0; i < DEVICE_DSI_NUM; i++) {
2972 sprintf(id, "rst_bus_mipi_dsi%d", i);
2973 disp_drv->rst_bus_mipi_dsi[i] = devm_reset_control_get(dev, id);
2974 if (IS_ERR(disp_drv->rst_bus_mipi_dsi[i])) {
2975 disp_drv->rst_bus_mipi_dsi[i] = NULL;
2976 dev_err(dev, "failed to get reset for %s\n", id);
2977 return -EINVAL;
2978 }
2979 }
2980 #endif
2981
2982 #if defined(SUPPORT_LVDS)
2983 /* get resets for lvds */
2984 for (i = 0; i < DEVICE_LVDS_NUM; i++) {
2985 sprintf(id, "rst_bus_lvds%d", i);
2986 disp_drv->rst_bus_lvds[i] =
2987 devm_reset_control_get_shared(dev, id);
2988 if (IS_ERR(disp_drv->rst_bus_lvds[i])) {
2989 disp_drv->rst_bus_lvds[i] = NULL;
2990 dev_err(dev, "failed to get reset for %s\n", id);
2991 return -EINVAL;
2992 }
2993 }
2994 #endif
2995 return 0;
2996 }
2997
disp_reset_control_put_wrap(struct disp_drv_info * disp_drv)2998 static void disp_reset_control_put_wrap(struct disp_drv_info *disp_drv)
2999 {
3000 int i;
3001
3002 /* put resets for de */
3003 for (i = 0; i < DE_NUM; i++)
3004 reset_control_put(disp_drv->rst_bus_de[i]);
3005
3006 for (i = 0; i < DISP_DEVICE_NUM; i++) {
3007 #if defined(HAVE_DEVICE_COMMON_MODULE)
3008 /* put resets for dpss */
3009 reset_control_put(disp_drv->rst_bus_dpss_top[i]);
3010 #endif
3011
3012 /* put resets for tcon */
3013 reset_control_put(disp_drv->rst_bus_tcon[i]);
3014 }
3015
3016 #if defined(SUPPORT_LVDS)
3017 for (i = 0; i < DEVICE_LVDS_NUM; i++) {
3018 /* put resets for lvds */
3019 reset_control_put(disp_drv->rst_bus_lvds[i]);
3020 }
3021 #endif
3022 }
3023
3024 static u64 disp_dmamask = DMA_BIT_MASK(32);
disp_probe(struct platform_device * pdev)3025 static int disp_probe(struct platform_device *pdev)
3026 {
3027 int i;
3028 int ret;
3029 int counter = 0;
3030
3031 if (g_disp_drv.inited) {
3032 pr_warn("disp has probed!\n");
3033 return 0;
3034 }
3035 __inf("[DISP]disp_probe\n");
3036 memset(&g_disp_drv, 0, sizeof(struct disp_drv_info));
3037
3038 #if defined(CONFIG_ARCH_SUN8IW12P1) || defined(CONFIG_ARCH_SUN8IW16P1)\
3039 || defined(CONFIG_ARCH_SUN8IW19P1)
3040 /*set ve to normal mode*/
3041 writel((readl(ioremap(0x03000004, 4)) & 0xfeffffff),
3042 ioremap(0x03000004, 4));
3043 #endif
3044
3045 g_disp_drv.dev = &pdev->dev;
3046 pdev->dev.dma_mask = &disp_dmamask;
3047
3048 /* iomap */
3049 /* de - [device(tcon-top)] - lcd0/1/2.. - dsi */
3050 counter = 0;
3051 g_disp_drv.reg_base[DISP_MOD_DE] =
3052 (uintptr_t __force) of_iomap(pdev->dev.of_node, counter);
3053 if (!g_disp_drv.reg_base[DISP_MOD_DE]) {
3054 dev_err(&pdev->dev, "unable to map de registers\n");
3055 ret = -EINVAL;
3056 goto err_iomap;
3057 }
3058 counter++;
3059
3060 #if defined(CONFIG_INDEPENDENT_DE)
3061 g_disp_drv.reg_base[DISP_MOD_DE1] =
3062 (uintptr_t __force) of_iomap(pdev->dev.of_node, counter);
3063 if (!g_disp_drv.reg_base[DISP_MOD_DE1]) {
3064 dev_err(&pdev->dev, "unable to map de registers\n");
3065 ret = -EINVAL;
3066 goto err_iomap;
3067 }
3068 counter++;
3069 #endif
3070
3071 #if defined(HAVE_DEVICE_COMMON_MODULE)
3072 g_disp_drv.reg_base[DISP_MOD_DEVICE] =
3073 (uintptr_t __force) of_iomap(pdev->dev.of_node, counter);
3074 if (!g_disp_drv.reg_base[DISP_MOD_DEVICE]) {
3075 dev_err(&pdev->dev,
3076 "unable to map device common module registers\n");
3077 ret = -EINVAL;
3078 goto err_iomap;
3079 }
3080 counter++;
3081 #if defined(CONFIG_INDEPENDENT_DE)
3082 g_disp_drv.reg_base[DISP_MOD_DEVICE1] =
3083 (uintptr_t __force) of_iomap(pdev->dev.of_node, counter);
3084 if (!g_disp_drv.reg_base[DISP_MOD_DEVICE1]) {
3085 dev_err(&pdev->dev,
3086 "unable to map device common module registers\n");
3087 ret = -EINVAL;
3088 goto err_iomap;
3089 }
3090 counter++;
3091 #endif
3092 #endif
3093
3094 for (i = 0; i < DISP_DEVICE_NUM; i++) {
3095 g_disp_drv.reg_base[DISP_MOD_LCD0 + i] =
3096 (uintptr_t __force) of_iomap(pdev->dev.of_node, counter);
3097 if (!g_disp_drv.reg_base[DISP_MOD_LCD0 + i]) {
3098 dev_err(&pdev->dev,
3099 "unable to map timing controller %d registers\n",
3100 i);
3101 ret = -EINVAL;
3102 goto err_iomap;
3103 }
3104 counter++;
3105 }
3106
3107 #if defined(SUPPORT_DSI)
3108 for (i = 0; i < DEVICE_DSI_NUM; ++i) {
3109 g_disp_drv.reg_base[DISP_MOD_DSI0 + i] = (uintptr_t __force)
3110 of_iomap(pdev->dev.of_node, counter);
3111 if (!g_disp_drv.reg_base[DISP_MOD_DSI0 + i]) {
3112 dev_err(&pdev->dev, "unable to map dsi registers\n");
3113 ret = -EINVAL;
3114 goto err_iomap;
3115 }
3116 counter++;
3117 }
3118 #endif
3119
3120 #if defined(SUPPORT_EINK)
3121 g_disp_drv.reg_base[DISP_MOD_EINK] =
3122 (uintptr_t __force)of_iomap(pdev->dev.of_node, counter);
3123 if (!g_disp_drv.reg_base[DISP_MOD_EINK]) {
3124 dev_err(&pdev->dev, "unable to map eink registers\n");
3125 ret = -EINVAL;
3126 goto err_iomap;
3127 }
3128 counter++;
3129 #endif
3130
3131 /* parse and map irq */
3132 /* lcd0/1/2.. - dsi */
3133 /* get de irq for rcq update and eink */
3134 counter = 0;
3135
3136 #ifdef DE_VERSION_V33X
3137 g_disp_drv.irq_no[DISP_MOD_DE] =
3138 irq_of_parse_and_map(pdev->dev.of_node, counter);
3139 if (!g_disp_drv.irq_no[DISP_MOD_DE]) {
3140 dev_err(&pdev->dev, "irq_of_parse_and_map de irq fail\n");
3141 }
3142 ++counter;
3143 #endif
3144
3145 for (i = 0; i < DISP_DEVICE_NUM; i++) {
3146 g_disp_drv.irq_no[DISP_MOD_LCD0 + i] =
3147 irq_of_parse_and_map(pdev->dev.of_node, counter);
3148 if (!g_disp_drv.irq_no[DISP_MOD_LCD0 + i])
3149 dev_err(&pdev->dev,
3150 "get irq %d fail for timing controller%d\n",
3151 counter, i);
3152
3153 counter++;
3154 }
3155 #if defined(SUPPORT_DSI)
3156 for (i = 0; i < DEVICE_DSI_NUM; ++i) {
3157 g_disp_drv.irq_no[DISP_MOD_DSI0 + i] = irq_of_parse_and_map(
3158 pdev->dev.of_node, counter);
3159 if (!g_disp_drv.irq_no[DISP_MOD_DSI0 + i])
3160 dev_err(&pdev->dev,
3161 "irq_of_parse_and_map irq %d fail for dsi\n",
3162 i);
3163 counter++;
3164 }
3165 #endif
3166
3167 #if defined(SUPPORT_VDPO)
3168 g_disp_drv.irq_no[DISP_MOD_VDPO] =
3169 irq_of_parse_and_map(pdev->dev.of_node, counter);
3170 if (!g_disp_drv.irq_no[DISP_MOD_DSI0])
3171 dev_err(&pdev->dev,
3172 "irq_of_parse_and_map irq fail for vdpo\n");
3173 ++counter;
3174 #endif /*endif SUPPORT_VDPO */
3175
3176 #if defined(SUPPORT_EINK)
3177 g_disp_drv.irq_no[DISP_MOD_EINK] =
3178 irq_of_parse_and_map(pdev->dev.of_node, counter);
3179 if (!g_disp_drv.irq_no[DISP_MOD_EINK])
3180 dev_err(&pdev->dev,
3181 "irq_of_parse_and_map eink irq %d fail for ee\n", i);
3182 counter++;
3183 #endif
3184
3185 ret = disp_clk_get_wrap(&g_disp_drv);
3186 if (ret)
3187 goto out_dispose_mapping;
3188
3189 ret = disp_reset_control_get_wrap(&g_disp_drv);
3190 if (ret)
3191 goto out_dispose_mapping;
3192
3193 #if defined(CONFIG_DMABUF_HEAPS)
3194 init_disp_ion_mgr(&g_disp_drv.ion_mgr);
3195 #endif
3196
3197 disp_init(pdev);
3198 ret = sysfs_create_group(&display_dev->kobj, &disp_attribute_group);
3199 if (ret)
3200 __wrn("sysfs_create_group fail!\n");
3201
3202 power_status_init = 1;
3203 #if defined(CONFIG_PM_RUNTIME)
3204 pm_runtime_set_active(&pdev->dev);
3205 pm_runtime_get_noresume(&pdev->dev);
3206 /*pm_runtime_set_autosuspend_delay(&pdev->dev, 5000);*/
3207 pm_runtime_use_autosuspend(&pdev->dev);
3208 pm_runtime_enable(&pdev->dev);
3209 #endif
3210 device_enable_async_suspend(&pdev->dev);
3211
3212 atomic_set(&g_driver_ref_count, 0);
3213
3214 __inf("[DISP]disp_probe finish\n");
3215
3216 return ret;
3217
3218 out_dispose_mapping:
3219 for (i = 0; i < DISP_DEVICE_NUM; i++)
3220 irq_dispose_mapping(g_disp_drv.irq_no[i]);
3221 err_iomap:
3222 for (i = 0; i < DISP_DEVICE_NUM; i++) {
3223 if (g_disp_drv.reg_base[i])
3224 iounmap((char __iomem *)g_disp_drv.reg_base[i]);
3225 }
3226
3227 return ret;
3228 }
3229
disp_remove(struct platform_device * pdev)3230 static int disp_remove(struct platform_device *pdev)
3231 {
3232 int i;
3233
3234 pr_info("disp_remove call\n");
3235
3236 disp_shutdown(pdev);
3237 #if defined(CONFIG_PM_RUNTIME)
3238 pm_runtime_set_suspended(&pdev->dev);
3239 pm_runtime_dont_use_autosuspend(&pdev->dev);
3240 pm_runtime_disable(&pdev->dev);
3241 #endif
3242 disp_exit();
3243
3244 #if defined(CONFIG_DMABUF_HEAPS)
3245 deinit_disp_ion_mgr(&g_disp_drv.ion_mgr);
3246 #endif
3247
3248 sysfs_remove_group(&display_dev->kobj, &disp_attribute_group);
3249
3250 disp_clk_put_wrap(&g_disp_drv);
3251
3252 disp_reset_control_put_wrap(&g_disp_drv);
3253
3254 for (i = 0; i < DISP_MOD_NUM; i++) {
3255 irq_dispose_mapping(g_disp_drv.irq_no[i]);
3256 if (g_disp_drv.reg_base[i])
3257 iounmap((char __iomem *)g_disp_drv.reg_base[i]);
3258 }
3259
3260 platform_set_drvdata(pdev, NULL);
3261
3262 return 0;
3263 }
3264
disp_blank(bool blank)3265 static int disp_blank(bool blank)
3266 {
3267 u32 screen_id = 0;
3268 int num_screens;
3269 struct disp_manager *mgr = NULL;
3270
3271 #if defined(CONFIG_DEVFREQ_DRAM_FREQ_WITH_SOFT_NOTIFY)
3272 /* notify dramfreq module that DE will access DRAM in a short time */
3273 if (!blank)
3274 dramfreq_master_access(MASTER_DE, true);
3275 #endif
3276 num_screens = bsp_disp_feat_get_num_screens();
3277
3278 for (screen_id = 0; screen_id < num_screens; screen_id++) {
3279 mgr = g_disp_drv.mgr[screen_id];
3280 /* Currently remove !mgr->device condition,
3281 * avoiding problem in the following case:
3282 *
3283 * attach manager and device -> disp blank --> blank success
3284 * deattach manager and device -> disp unblank --> fail
3285 * (cause don't satisfy !mgr->device condition)
3286 * attach manager and device --> problem arises
3287 * (manager will be always on unblank state)
3288 *
3289 * The scenario is: hdmi plug in -> enter standy
3290 * -> hdmi plug out -> exit standby -> hdmi plug in
3291 * -> display blank on hdmi screen
3292 */
3293 if (!mgr)
3294 continue;
3295
3296 if (mgr->blank)
3297 mgr->blank(mgr, blank);
3298 }
3299
3300 #if defined(CONFIG_DEVFREQ_DRAM_FREQ_WITH_SOFT_NOTIFY)
3301 /* notify dramfreq module that DE will not access DRAM any more */
3302 if (blank)
3303 dramfreq_master_access(MASTER_DE, false);
3304 #endif
3305
3306 return 0;
3307 }
3308
3309 #if defined(CONFIG_PM_RUNTIME)
disp_runtime_suspend(struct device * dev)3310 static int disp_runtime_suspend(struct device *dev)
3311 {
3312 u32 screen_id = 0;
3313 int num_screens;
3314 struct disp_manager *mgr = NULL;
3315 struct disp_device *dispdev_suspend = NULL;
3316 struct list_head *disp_list = NULL;
3317
3318 pr_info("%s\n", __func__);
3319
3320 if (!g_pm_runtime_enable)
3321 return 0;
3322
3323 num_screens = bsp_disp_feat_get_num_screens();
3324
3325 disp_suspend_cb();
3326 for (screen_id = 0; screen_id < num_screens; screen_id++) {
3327 mgr = g_disp_drv.mgr[screen_id];
3328 if (mgr && mgr->device) {
3329 struct disp_device *dispdev = mgr->device;
3330
3331 if (suspend_output_type[screen_id] ==
3332 DISP_OUTPUT_TYPE_LCD)
3333 flush_work(&g_disp_drv.resume_work[screen_id]);
3334
3335 if (dispdev->is_enabled(dispdev))
3336 dispdev->disable(dispdev);
3337 }
3338 }
3339
3340 disp_list = disp_device_get_list_head();
3341 list_for_each_entry(dispdev_suspend, disp_list, list) {
3342 if (dispdev_suspend->suspend)
3343 dispdev_suspend->suspend(dispdev_suspend);
3344 }
3345
3346 suspend_status |= DISPLAY_LIGHT_SLEEP;
3347 suspend_prestep = 0;
3348
3349 pr_info("%s finish\n", __func__);
3350
3351 return 0;
3352 }
3353
disp_runtime_resume(struct device * dev)3354 static int disp_runtime_resume(struct device *dev)
3355 {
3356 u32 screen_id = 0;
3357 int num_screens;
3358 struct disp_manager *mgr = NULL;
3359 struct disp_device *dispdev = NULL;
3360 struct list_head *disp_list = NULL;
3361 struct disp_device_config config;
3362
3363 pr_info("%s\n", __func__);
3364
3365 if (!g_pm_runtime_enable)
3366 return 0;
3367
3368 memset(&config, 0, sizeof(struct disp_device_config));
3369 num_screens = bsp_disp_feat_get_num_screens();
3370
3371 disp_list = disp_device_get_list_head();
3372 list_for_each_entry(dispdev, disp_list, list) {
3373 if (dispdev->resume)
3374 dispdev->resume(dispdev);
3375 }
3376
3377 for (screen_id = 0; screen_id < num_screens; screen_id++) {
3378 mgr = g_disp_drv.mgr[screen_id];
3379 if (!mgr || !mgr->device)
3380 continue;
3381
3382 if (suspend_output_type[screen_id] == DISP_OUTPUT_TYPE_LCD) {
3383 flush_work(&g_disp_drv.resume_work[screen_id]);
3384 if (!mgr->device->is_enabled(mgr->device)) {
3385 mgr->device->enable(mgr->device);
3386 } else {
3387 mgr->device->pwm_enable(mgr->device);
3388 mgr->device->backlight_enable(mgr->device);
3389 }
3390 } else if (suspend_output_type[screen_id] !=
3391 DISP_OUTPUT_TYPE_NONE) {
3392 if (mgr->device->set_static_config &&
3393 mgr->device->get_static_config) {
3394 mgr->device->get_static_config(mgr->device,
3395 &config);
3396
3397 mgr->device->set_static_config(mgr->device,
3398 &config);
3399 }
3400 if (!mgr->device->is_enabled(mgr->device))
3401 mgr->device->enable(mgr->device);
3402 }
3403 }
3404
3405 suspend_status &= (~DISPLAY_LIGHT_SLEEP);
3406 suspend_prestep = 3;
3407
3408 disp_resume_cb();
3409
3410 pr_info("%s finish\n", __func__);
3411
3412 return 0;
3413 }
3414
disp_runtime_idle(struct device * dev)3415 static int disp_runtime_idle(struct device *dev)
3416 {
3417 u32 screen_id = 0;
3418 int num_screens;
3419
3420 pr_info("%s\n", __func__);
3421 num_screens = bsp_disp_feat_get_num_screens();
3422
3423 if (g_disp_drv.dev) {
3424 for (screen_id = 0; screen_id < num_screens; screen_id++) {
3425 if (suspend_output_type[screen_id] ==
3426 DISP_OUTPUT_TYPE_LCD)
3427 pm_runtime_set_autosuspend_delay(g_disp_drv.dev,
3428 5000);
3429 }
3430 pm_runtime_mark_last_busy(g_disp_drv.dev);
3431 pm_request_autosuspend(g_disp_drv.dev);
3432 } else {
3433 pr_warn("%s, display device is null\n", __func__);
3434 }
3435
3436 /* return 0: for framework to request enter suspend.
3437 * return non-zero: do susupend for myself;
3438 */
3439 return -1;
3440 }
3441 #endif
3442
disp_suspend(struct device * dev)3443 int disp_suspend(struct device *dev)
3444 {
3445 u32 screen_id = 0;
3446 int num_screens;
3447 struct disp_manager *mgr = NULL;
3448 struct disp_device *dispdev_suspend = NULL;
3449 struct list_head *disp_list = NULL;
3450 struct disp_device *dispdev = NULL;
3451
3452 #if defined(SUPPORT_EINK) && defined(CONFIG_EINK_PANEL_USED)
3453 struct disp_eink_manager *eink_manager = NULL;
3454
3455 eink_manager = g_disp_drv.eink_manager[0];
3456 if (!eink_manager)
3457 __wrn("eink_manager is NULL!\n");
3458 #endif
3459 pr_info("%s\n", __func__);
3460
3461 if (!g_disp_drv.dev) {
3462 pr_warn("display device is null!\n");
3463 return 0;
3464 }
3465 #if defined(CONFIG_PM_RUNTIME)
3466 if (!pm_runtime_status_suspended(g_disp_drv.dev))
3467 #endif
3468 {
3469 num_screens = bsp_disp_feat_get_num_screens();
3470 disp_suspend_cb();
3471 if (g_pm_runtime_enable) {
3472
3473 for (screen_id = 0; screen_id < num_screens;
3474 screen_id++) {
3475 mgr = g_disp_drv.mgr[screen_id];
3476 if (!mgr || !mgr->device)
3477 continue;
3478 dispdev = mgr->device;
3479 if (suspend_output_type[screen_id] ==
3480 DISP_OUTPUT_TYPE_LCD)
3481 flush_work(&g_disp_drv.
3482 resume_work[screen_id]);
3483 if (suspend_output_type[screen_id] !=
3484 DISP_OUTPUT_TYPE_NONE) {
3485 if (dispdev->is_enabled(dispdev))
3486 dispdev->disable(dispdev);
3487 }
3488 }
3489 } else {
3490 for (screen_id = 0; screen_id < num_screens;
3491 screen_id++) {
3492 mgr = g_disp_drv.mgr[screen_id];
3493 if (!mgr || !mgr->device)
3494 continue;
3495 dispdev = mgr->device;
3496 if (suspend_output_type[screen_id] !=
3497 DISP_OUTPUT_TYPE_NONE) {
3498 if (dispdev->is_enabled(dispdev))
3499 dispdev->disable(dispdev);
3500 }
3501 }
3502 }
3503
3504 /*suspend for all display device */
3505 disp_list = disp_device_get_list_head();
3506 list_for_each_entry(dispdev_suspend, disp_list, list) {
3507 if (dispdev_suspend->suspend)
3508 dispdev_suspend->suspend(dispdev_suspend);
3509 }
3510 }
3511 /* FIXME: hdmi suspend */
3512 suspend_status |= DISPLAY_DEEP_SLEEP;
3513 suspend_prestep = 1;
3514 #if defined(CONFIG_PM_RUNTIME)
3515 if (g_pm_runtime_enable) {
3516 pm_runtime_disable(g_disp_drv.dev);
3517 pm_runtime_set_suspended(g_disp_drv.dev);
3518 pm_runtime_enable(g_disp_drv.dev);
3519 }
3520 #endif
3521 pr_info("%s finish\n", __func__);
3522
3523 #if defined(SUPPORT_EINK) && defined(CONFIG_EINK_PANEL_USED)
3524 eink_manager->suspend(eink_manager);
3525 #endif
3526 return 0;
3527 }
3528
disp_resume(struct device * dev)3529 int disp_resume(struct device *dev)
3530 {
3531 u32 screen_id = 0;
3532 int num_screens = bsp_disp_feat_get_num_screens();
3533 struct disp_manager *mgr = NULL;
3534 struct disp_device_config config;
3535
3536 #if defined(SUPPORT_EINK) && defined(CONFIG_EINK_PANEL_USED)
3537 struct disp_eink_manager *eink_manager = NULL;
3538 #endif
3539 #if defined(CONFIG_PM_RUNTIME)
3540 memset(&config, 0, sizeof(struct disp_device_config));
3541 if (g_pm_runtime_enable) {
3542 for (screen_id = 0; screen_id < num_screens; screen_id++) {
3543 mgr = g_disp_drv.mgr[screen_id];
3544 if (!mgr || !mgr->device)
3545 continue;
3546
3547 if (suspend_output_type[screen_id] ==
3548 DISP_OUTPUT_TYPE_LCD) {
3549 schedule_work(&g_disp_drv.
3550 resume_work[screen_id]);
3551 }
3552 }
3553 if (g_pm_runtime_enable) {
3554 if (g_disp_drv.dev) {
3555 pm_runtime_disable(g_disp_drv.dev);
3556 pm_runtime_set_active(g_disp_drv.dev);
3557 pm_runtime_enable(g_disp_drv.dev);
3558 } else {
3559 pr_warn("%s, display device is null\n",
3560 __func__);
3561 }
3562 }
3563 } else {
3564 struct disp_device *dispdev = NULL;
3565 struct list_head *disp_list = NULL;
3566
3567 disp_list = disp_device_get_list_head();
3568 list_for_each_entry(dispdev, disp_list, list) {
3569 if (dispdev->resume)
3570 dispdev->resume(dispdev);
3571 }
3572 for (screen_id = 0; screen_id < num_screens; screen_id++) {
3573 mgr = g_disp_drv.mgr[screen_id];
3574 if (!mgr || !mgr->device)
3575 continue;
3576
3577 if (suspend_output_type[screen_id] !=
3578 DISP_OUTPUT_TYPE_NONE) {
3579 if (mgr->device->set_static_config
3580 && mgr->device->get_static_config) {
3581 mgr->device->get_static_config(mgr->device, &config);
3582 mgr->device->set_static_config(mgr->device, &config);
3583 }
3584 if (!mgr->device->is_enabled(mgr->device))
3585 mgr->device->enable(mgr->device);
3586 }
3587 }
3588 disp_resume_cb();
3589 }
3590 #else
3591 struct disp_device *dispdev = NULL;
3592 struct list_head *disp_list = NULL;
3593
3594 memset(&config, 0, sizeof(struct disp_device_config));
3595 disp_list = disp_device_get_list_head();
3596 list_for_each_entry(dispdev, disp_list, list) {
3597 if (dispdev->resume)
3598 dispdev->resume(dispdev);
3599 }
3600
3601 for (screen_id = 0; screen_id < num_screens; screen_id++) {
3602 mgr = g_disp_drv.mgr[screen_id];
3603 if (!mgr || !mgr->device)
3604 continue;
3605
3606 if (suspend_output_type[screen_id] != DISP_OUTPUT_TYPE_NONE) {
3607 if (mgr->device->set_static_config &&
3608 mgr->device->get_static_config) {
3609 mgr->device->get_static_config(mgr->device,
3610 &config);
3611
3612 mgr->device->set_static_config(mgr->device,
3613 &config);
3614 }
3615 mgr->device->enable(mgr->device);
3616 }
3617 }
3618 disp_resume_cb();
3619 #endif
3620
3621 suspend_status &= (~DISPLAY_DEEP_SLEEP);
3622 suspend_prestep = 2;
3623
3624 #if defined(SUPPORT_EINK) && defined(CONFIG_EINK_PANEL_USED)
3625 eink_manager = g_disp_drv.eink_manager[0];
3626 if (!eink_manager)
3627 __wrn("eink_manager is NULL!\n");
3628 eink_manager->resume(eink_manager);
3629 #endif
3630 pr_info("%s finish\n", __func__);
3631
3632 return 0;
3633 }
3634
3635 static const struct dev_pm_ops disp_runtime_pm_ops = {
3636 #ifdef CONFIG_PM_RUNTIME
3637 .runtime_suspend = disp_runtime_suspend,
3638 .runtime_resume = disp_runtime_resume,
3639 .runtime_idle = disp_runtime_idle,
3640 #endif
3641 .suspend = disp_suspend,
3642 .resume = disp_resume,
3643 };
3644
disp_is_enable(void)3645 bool disp_is_enable(void)
3646
3647 {
3648 bool ret = false;
3649 u32 screen_id = 0;
3650 int num_screens;
3651
3652 num_screens = bsp_disp_feat_get_num_screens();
3653
3654 for (screen_id = 0; screen_id < num_screens; screen_id++) {
3655 struct disp_manager *mgr = g_disp_drv.mgr[screen_id];
3656
3657 if (mgr && mgr->device && mgr->device->is_enabled &&
3658 mgr->device->disable)
3659 if (mgr->device->is_enabled(mgr->device))
3660 ret = true;
3661 }
3662 return ret;
3663 }
3664 EXPORT_SYMBOL(disp_is_enable);
3665
disp_shutdown(struct platform_device * pdev)3666 static void disp_shutdown(struct platform_device *pdev)
3667 {
3668 u32 screen_id = 0;
3669 int num_screens;
3670
3671 num_screens = bsp_disp_feat_get_num_screens();
3672
3673 for (screen_id = 0; screen_id < num_screens; screen_id++) {
3674 struct disp_manager *mgr = g_disp_drv.mgr[screen_id];
3675
3676 if (mgr && mgr->device && mgr->device->is_enabled
3677 && mgr->device->disable) {
3678 if (mgr->device->is_enabled(mgr->device))
3679 mgr->device->disable(mgr->device);
3680 mgr->enable_iommu(mgr, false);
3681 }
3682 }
3683 }
3684
3685 #ifdef EINK_FLUSH_TIME_TEST
3686 struct timeval ioctrl_start_timer;
3687 #endif
disp_ioctl(struct file * file,unsigned int cmd,unsigned long arg)3688 long disp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3689 {
3690 unsigned long karg[4];
3691 unsigned long ubuffer[4] = { 0 };
3692 s32 ret = 0;
3693 int num_screens = 2;
3694 struct disp_manager *mgr = NULL;
3695 struct disp_device *dispdev = NULL;
3696 struct disp_enhance *enhance = NULL;
3697 struct disp_smbl *smbl = NULL;
3698 struct disp_capture *cptr = NULL;
3699 #if defined(SUPPORT_EINK)
3700 struct disp_eink_manager *eink_manager = NULL;
3701 #endif
3702
3703 #ifdef EINK_FLUSH_TIME_TEST
3704 do_gettimeofday(&ioctrl_start_timer);
3705 #endif /*test eink time */
3706 num_screens = bsp_disp_feat_get_num_screens();
3707
3708 if (cmd == DISP_NODE_LCD_MESSAGE_REQUEST || cmd == DISP_RELOAD_LCD) {
3709 goto handle_cmd;
3710 }
3711
3712 if (copy_from_user
3713 ((void *)karg, (void __user *)arg, 4 * sizeof(unsigned long))) {
3714 __wrn("copy_from_user fail\n");
3715 return -EFAULT;
3716 }
3717
3718 ubuffer[0] = *(unsigned long *)karg;
3719 ubuffer[1] = (*(unsigned long *)(karg + 1));
3720 ubuffer[2] = (*(unsigned long *)(karg + 2));
3721 ubuffer[3] = (*(unsigned long *)(karg + 3));
3722
3723 if (ubuffer[0] < num_screens && cmd != DISP_GET_VSYNC_TIMESTAMP)
3724 mgr = g_disp_drv.mgr[ubuffer[0]];
3725 if (mgr) {
3726 dispdev = mgr->device;
3727 enhance = mgr->enhance;
3728 smbl = mgr->smbl;
3729 cptr = mgr->cptr;
3730 }
3731 #if defined(SUPPORT_EINK)
3732 eink_manager = g_disp_drv.eink_manager[0];
3733
3734 if (!eink_manager)
3735 __wrn("eink_manager is NULL!\n");
3736
3737 #endif
3738
3739 if (cmd < DISP_FB_REQUEST && cmd != DISP_GET_VSYNC_TIMESTAMP) {
3740 if (ubuffer[0] >= num_screens) {
3741 __wrn
3742 ("para err, cmd = 0x%x,screen id = %d\n",
3743 cmd, (int)ubuffer[0]);
3744 return -1;
3745 }
3746 }
3747 if (DISPLAY_DEEP_SLEEP & suspend_status) {
3748 __wrn("ioctl:%x fail when in suspend!\n", cmd);
3749 return -1;
3750 }
3751
3752 if (cmd == DISP_print)
3753 __wrn("cmd:0x%x,%ld,%ld\n", cmd, ubuffer[0], ubuffer[1]);
3754
3755 handle_cmd:
3756 switch (cmd) {
3757 /* ----disp global---- */
3758 case DISP_SET_BKCOLOR:
3759 {
3760 struct disp_color para;
3761
3762 if (copy_from_user(¶, (void __user *)ubuffer[1],
3763 sizeof(struct disp_color))) {
3764 __wrn("copy_from_user fail\n");
3765 return -EFAULT;
3766 }
3767 if (mgr && (mgr->set_back_color != NULL))
3768 ret = mgr->set_back_color(mgr, ¶);
3769 break;
3770 }
3771
3772 case DISP_GET_OUTPUT_TYPE:
3773 {
3774 if (suspend_status != DISPLAY_NORMAL)
3775 ret = suspend_output_type[ubuffer[0]];
3776 else
3777 ret = bsp_disp_get_output_type(ubuffer[0]);
3778
3779 break;
3780 }
3781
3782 case DISP_GET_SCN_WIDTH:
3783 {
3784 unsigned int width = 0, height = 0;
3785
3786 if (mgr && mgr->device && mgr->device->get_resolution)
3787 mgr->device->get_resolution(mgr->device, &width,
3788 &height);
3789 ret = width;
3790 break;
3791 }
3792
3793 case DISP_GET_SCN_HEIGHT:
3794 {
3795 unsigned int width = 0, height = 0;
3796
3797 if (mgr && mgr->device && mgr->device->get_resolution)
3798 mgr->device->get_resolution(mgr->device, &width,
3799 &height);
3800 ret = height;
3801 break;
3802 }
3803
3804 case DISP_VSYNC_EVENT_EN:
3805 {
3806 ret =
3807 bsp_disp_vsync_event_enable(ubuffer[0], ubuffer[1]);
3808 break;
3809 }
3810 case DISP_GET_VSYNC_TIMESTAMP:
3811 {
3812 struct disp_vsync_timestame ts;
3813 for (ts.disp = 0; ts.disp < DISP_SCREEN_NUM; ts.disp++) {
3814 ret = bsp_disp_get_vsync_timestamp(ts.disp, &ts.timestamp);
3815 if (ret == 0) {
3816 if (copy_to_user((void __user *)ubuffer[0], &ts,
3817 sizeof(struct disp_vsync_timestame))) {
3818 __wrn("copy_to_user fail\n");
3819 ret = -EFAULT;
3820 }
3821 break;
3822 }
3823 }
3824 break;
3825 }
3826
3827 case DISP_SHADOW_PROTECT:
3828 {
3829 ret = bsp_disp_shadow_protect(ubuffer[0], ubuffer[1]);
3830 break;
3831 }
3832
3833 case DISP_BLANK:
3834 {
3835 /* only response main device' blank request */
3836
3837 if (!g_pm_runtime_enable)
3838 break;
3839
3840 if (ubuffer[0] != 0)
3841 break;
3842
3843 if (ubuffer[1]) {
3844 #ifdef CONFIG_ARCH_SUN50IW6
3845 bsp_disp_hdmi_cec_standby_request();
3846 #endif
3847 #if defined(CONFIG_PM_RUNTIME)
3848 if (g_disp_drv.dev)
3849 pm_runtime_put(g_disp_drv.dev);
3850 else
3851 pr_warn("%s, display device is null\n",
3852 __func__);
3853 #endif
3854 suspend_status |= DISPLAY_BLANK;
3855 disp_blank(true);
3856 } else {
3857 if (power_status_init) {
3858 /* avoid first unblank */
3859 power_status_init = 0;
3860 break;
3861 }
3862
3863 disp_blank(false);
3864 suspend_status &= ~DISPLAY_BLANK;
3865 #if defined(CONFIG_PM_RUNTIME)
3866 if (g_disp_drv.dev) {
3867 /* recover the pm_runtime status */
3868 pm_runtime_disable(g_disp_drv.dev);
3869 pm_runtime_set_suspended(g_disp_drv.
3870 dev);
3871 pm_runtime_enable(g_disp_drv.dev);
3872 pm_runtime_get_sync(g_disp_drv.dev);
3873 } else
3874 pr_warn("%s, display device is null\n",
3875 __func__);
3876 #endif
3877 }
3878 break;
3879 }
3880
3881 case DISP_DEVICE_SWITCH:
3882 {
3883 /* if the display device has already enter blank status,
3884 * DISP_DEVICE_SWITCH request will not be responsed.
3885 */
3886 if (!(suspend_status & DISPLAY_BLANK))
3887 ret =
3888 bsp_disp_device_switch(ubuffer[0],
3889 (enum disp_output_type)ubuffer[1],
3890 (enum disp_output_type)ubuffer[2]);
3891 suspend_output_type[ubuffer[0]] = ubuffer[1];
3892 #if defined(SUPPORT_TV) && defined(CONFIG_ARCH_SUN50IW2P1)
3893 bsp_disp_tv_set_hpd(1);
3894 #endif
3895 break;
3896 }
3897
3898 case DISP_DEVICE_SET_CONFIG:
3899 {
3900 struct disp_device_config config;
3901
3902 if (copy_from_user(&config, (void __user *)ubuffer[1],
3903 sizeof(struct disp_device_config))) {
3904 __wrn("copy_from_user fail\n");
3905 return -EFAULT;
3906 }
3907 suspend_output_type[ubuffer[0]] = config.type;
3908
3909 ret = bsp_disp_device_set_config(ubuffer[0], &config);
3910 break;
3911 }
3912
3913 case DISP_DEVICE_GET_CONFIG:
3914 {
3915 struct disp_device_config config;
3916
3917 if (mgr && dispdev)
3918 dispdev->get_static_config(dispdev, &config);
3919 else
3920 ret = -EFAULT;
3921
3922 if (ret == 0) {
3923 if (copy_to_user((void __user *)ubuffer[1], &config,
3924 sizeof(struct disp_device_config))) {
3925 __wrn("copy_to_user fail\n");
3926 return -EFAULT;
3927 }
3928 }
3929 break;
3930 }
3931 #if defined(SUPPORT_EINK)
3932
3933 case DISP_EINK_UPDATE:
3934 {
3935 s32 i = 0;
3936 struct area_info area;
3937
3938 if (!eink_manager) {
3939 pr_err("there is no eink manager!\n");
3940 break;
3941 }
3942
3943 memset(lyr_cfg, 0,
3944 16 * sizeof(struct disp_layer_config));
3945 if (copy_from_user(lyr_cfg, (void __user *)ubuffer[3],
3946 sizeof(struct disp_layer_config) * ubuffer[1])) {
3947 __wrn("copy_from_user fail\n");
3948 return -EFAULT;
3949 }
3950
3951 memset(&area, 0, sizeof(struct area_info));
3952 if (copy_from_user(&area, (void __user *)ubuffer[0],
3953 sizeof(struct area_info))) {
3954 __wrn("copy_from_user fail\n");
3955 return -EFAULT;
3956 }
3957
3958 for (i = 0; i < ubuffer[1]; i++)
3959 __disp_config_transfer2inner(&eink_para[i],
3960 &lyr_cfg[i]);
3961
3962 ret = bsp_disp_eink_update(eink_manager,
3963 (struct disp_layer_config_inner *)&eink_para[0],
3964 (unsigned int)ubuffer[1],
3965 (enum eink_update_mode)ubuffer[2], &area);
3966 break;
3967 }
3968
3969 case DISP_EINK_UPDATE2:
3970 {
3971 s32 i = 0;
3972 struct area_info area;
3973
3974 if (!eink_manager) {
3975 pr_err("there is no eink manager!\n");
3976 break;
3977 }
3978
3979 memset(lyr_cfg2, 0,
3980 16 * sizeof(struct disp_layer_config2));
3981 if (copy_from_user(lyr_cfg2, (void __user *)ubuffer[3],
3982 sizeof(struct disp_layer_config2) * ubuffer[1])) {
3983 __wrn("copy_from_user fail\n");
3984 return -EFAULT;
3985 }
3986
3987 memset(&area, 0, sizeof(struct area_info));
3988 if (copy_from_user(&area, (void __user *)ubuffer[0],
3989 sizeof(struct area_info))) {
3990 __wrn("copy_from_user fail\n");
3991 return -EFAULT;
3992 }
3993
3994 for (i = 0; i < ubuffer[1]; i++)
3995 __disp_config2_transfer2inner(&eink_para[i],
3996 &lyr_cfg2[i]);
3997
3998 ret = bsp_disp_eink_update(eink_manager,
3999 (struct disp_layer_config_inner *)&eink_para[0],
4000 (unsigned int)ubuffer[1],
4001 (enum eink_update_mode)ubuffer[2], &area);
4002 break;
4003 }
4004
4005 case DISP_EINK_SET_TEMP:
4006 {
4007 ret =
4008 bsp_disp_eink_set_temperature(eink_manager,
4009 ubuffer[0]);
4010 break;
4011 }
4012 case DISP_EINK_GET_TEMP:
4013 {
4014 ret = bsp_disp_eink_get_temperature(eink_manager);
4015 break;
4016 }
4017 case DISP_EINK_OVERLAP_SKIP:
4018 {
4019 ret = bsp_disp_eink_op_skip(eink_manager, ubuffer[0]);
4020 break;
4021 }
4022 #endif
4023
4024 case DISP_GET_OUTPUT:
4025 {
4026 struct disp_output para;
4027
4028 memset(¶, 0, sizeof(struct disp_output));
4029
4030 if (mgr && mgr->device) {
4031 para.type =
4032 bsp_disp_get_output_type(ubuffer[0]);
4033 if (mgr->device->get_mode)
4034 para.mode =
4035 mgr->device->get_mode(mgr->device);
4036 }
4037
4038 if (copy_to_user((void __user *)ubuffer[1], ¶,
4039 sizeof(struct disp_output))) {
4040 __wrn("copy_from_user fail\n");
4041 return -EFAULT;
4042 }
4043 break;
4044 }
4045
4046 case DISP_SET_COLOR_RANGE:
4047 {
4048 if (mgr && mgr->set_output_color_range)
4049 ret =
4050 mgr->set_output_color_range(mgr,
4051 ubuffer[1]);
4052
4053 break;
4054 }
4055
4056 case DISP_GET_COLOR_RANGE:
4057 {
4058 if (mgr && mgr->get_output_color_range)
4059 ret = mgr->get_output_color_range(mgr);
4060
4061 break;
4062 }
4063
4064 /* ----layer---- */
4065 case DISP_LAYER_SET_CONFIG:
4066 {
4067 unsigned int i = 0;
4068 const unsigned int lyr_cfg_size = ARRAY_SIZE(lyr_cfg);
4069
4070 mutex_lock(&g_disp_drv.mlock);
4071
4072 if (ubuffer[2] > lyr_cfg_size) {
4073 __wrn("Total layer number is %d\n", lyr_cfg_size);
4074 mutex_unlock(&g_disp_drv.mlock);
4075 return -EFAULT;
4076 }
4077
4078 if (copy_from_user(lyr_cfg,
4079 (void __user *)ubuffer[1],
4080 sizeof(struct disp_layer_config) * ubuffer[2])) {
4081 __wrn("copy_from_user fail\n");
4082 mutex_unlock(&g_disp_drv.mlock);
4083
4084 return -EFAULT;
4085 }
4086
4087 for (i = 0; (i < lyr_cfg_size) && (i < ubuffer[2]); ++i) {
4088 if (lyr_cfg[i].enable == 0) {
4089 memset(&(lyr_cfg[i].info), 0,
4090 sizeof(lyr_cfg[i].info));
4091 }
4092 }
4093
4094 #if !defined(CONFIG_EINK_PANEL_USED)
4095 if (mgr && mgr->set_layer_config)
4096 ret = mgr->set_layer_config(mgr, lyr_cfg, ubuffer[2]);
4097 #endif
4098 mutex_unlock(&g_disp_drv.mlock);
4099 break;
4100 }
4101
4102 case DISP_LAYER_GET_CONFIG:
4103 {
4104 if (copy_from_user(lyr_cfg,
4105 (void __user *)ubuffer[1],
4106 sizeof(struct disp_layer_config) * ubuffer[2])) {
4107 __wrn("copy_from_user fail\n");
4108
4109 return -EFAULT;
4110 }
4111 if (mgr && mgr->get_layer_config)
4112 ret = mgr->get_layer_config(mgr, lyr_cfg, ubuffer[2]);
4113 if (copy_to_user((void __user *)ubuffer[1],
4114 lyr_cfg,
4115 sizeof(struct disp_layer_config) * ubuffer[2])) {
4116 __wrn("copy_to_user fail\n");
4117
4118 return -EFAULT;
4119 }
4120 break;
4121 }
4122
4123 case DISP_LAYER_SET_CONFIG2:
4124 {
4125 struct disp_layer_config2 *pLyr_cfg2;
4126 unsigned int i = 0;
4127 const unsigned int lyr_cfg_size =
4128 ARRAY_SIZE(lyr_cfg2);
4129
4130 /* adapt to multi thread call in case of disp 0 & 1 work together*/
4131 if (ubuffer[0] == 0)
4132 pLyr_cfg2 = lyr_cfg2;
4133 else
4134 pLyr_cfg2 = lyr_cfg2_1;
4135
4136 if (copy_from_user(pLyr_cfg2,
4137 (void __user *)ubuffer[1],
4138 sizeof(struct disp_layer_config2) * ubuffer[2])) {
4139 __wrn("copy_from_user fail\n");
4140
4141 return -EFAULT;
4142 }
4143
4144 for (i = 0; (i < lyr_cfg_size) && (i < ubuffer[2]); ++i) {
4145 if (pLyr_cfg2[i].enable == 0) {
4146 memset(&(pLyr_cfg2[i].info), 0,
4147 sizeof(pLyr_cfg2[i].info));
4148 }
4149 }
4150
4151 #if !defined(CONFIG_EINK_PANEL_USED)
4152 if (mgr && mgr->set_layer_config2)
4153 ret = mgr->set_layer_config2(mgr, pLyr_cfg2, ubuffer[2]);
4154 #endif
4155 break;
4156 }
4157
4158 case DISP_RTWB_COMMIT:
4159 {
4160 #if defined(SUPPORT_RTWB)
4161 struct disp_layer_config2 *pLyr_cfg2;
4162 struct disp_capture_info2 info2;
4163 unsigned int i = 0;
4164 const unsigned int lyr_cfg_size =
4165 ARRAY_SIZE(lyr_cfg2);
4166
4167 /* adapt to multi thread call in case of disp 0 & 1 work together*/
4168 if (ubuffer[0] == 0)
4169 pLyr_cfg2 = lyr_cfg2;
4170 else
4171 pLyr_cfg2 = lyr_cfg2_1;
4172
4173 if (copy_from_user(pLyr_cfg2,
4174 (void __user *)ubuffer[1],
4175 sizeof(struct disp_layer_config2) * ubuffer[2])) {
4176 __wrn("copy_from_user fail\n");
4177
4178 return -EFAULT;
4179 }
4180
4181 if (copy_from_user(&info2,
4182 (void __user *)ubuffer[3],
4183 sizeof(struct disp_capture_info2))) {
4184 __wrn("copy_from_user disp_capture_info2 fail\n");
4185
4186 return -EFAULT;
4187 }
4188
4189
4190 for (i = 0; (i < lyr_cfg_size) && (i < ubuffer[2]); ++i) {
4191 if (pLyr_cfg2[i].enable == 0) {
4192 memset(&(pLyr_cfg2[i].info), 0,
4193 sizeof(pLyr_cfg2[i].info));
4194 }
4195 }
4196
4197 if (mgr)
4198 ret = disp_mgr_set_rtwb_layer(mgr, pLyr_cfg2, &info2, ubuffer[2]);
4199 #endif
4200 break;
4201 }
4202
4203 case DISP_LAYER_GET_CONFIG2:
4204 {
4205 if (copy_from_user(lyr_cfg2,
4206 (void __user *)ubuffer[1],
4207 sizeof(struct disp_layer_config2) * ubuffer[2])) {
4208 __wrn("copy_from_user fail\n");
4209
4210 return -EFAULT;
4211 }
4212 if (mgr && mgr->get_layer_config2)
4213 ret = mgr->get_layer_config2(mgr, lyr_cfg2, ubuffer[2]);
4214 if (copy_to_user((void __user *)ubuffer[1],
4215 lyr_cfg2,
4216 sizeof(struct disp_layer_config2) * ubuffer[2])) {
4217 __wrn("copy_to_user fail\n");
4218
4219 return -EFAULT;
4220 }
4221 break;
4222 }
4223
4224 /* ----channels---- */
4225 case DISP_CHN_SET_PALETTE:
4226 {
4227 struct disp_palette_config palette;
4228 if (copy_from_user(&palette,
4229 (void __user *)ubuffer[1],
4230 sizeof(struct disp_palette_config))) {
4231 __wrn("copy_from_user fail\n");
4232 return -EFAULT;
4233 }
4234 if (palette.num <= 0 || palette.num > 256) {
4235 __wrn("palette param err with num:%d\n", palette.num);
4236 return -EFAULT;
4237 }
4238 if (copy_from_user(palette_data, (void __user *)palette.data, palette.num * 4)) {
4239 __wrn("copy palette data from user fail\n");
4240 return -EFAULT;
4241 }
4242 palette.data = palette_data;
4243 if (mgr && mgr->set_palette)
4244 ret = mgr->set_palette(mgr, &palette);
4245
4246 break;
4247 }
4248
4249
4250 /* ---- lcd --- */
4251 case DISP_LCD_SET_BRIGHTNESS:
4252 {
4253 if (dispdev && dispdev->set_bright)
4254 ret = dispdev->set_bright(dispdev, ubuffer[1]);
4255 break;
4256 }
4257
4258 case DISP_LCD_GET_BRIGHTNESS:
4259 {
4260 if (dispdev && dispdev->get_bright)
4261 ret = dispdev->get_bright(dispdev);
4262 break;
4263 }
4264 case DISP_TV_SET_GAMMA_TABLE:
4265 {
4266 if (dispdev && (dispdev->type == DISP_OUTPUT_TYPE_TV)) {
4267 u32 *gamma_tbl = kmalloc(LCD_GAMMA_TABLE_SIZE,
4268 GFP_KERNEL | __GFP_ZERO);
4269 u32 size = ubuffer[2];
4270
4271 if (gamma_tbl == NULL) {
4272 __wrn("kmalloc fail\n");
4273 ret = -EFAULT;
4274 break;
4275 }
4276
4277 size = (size > LCD_GAMMA_TABLE_SIZE) ?
4278 LCD_GAMMA_TABLE_SIZE : size;
4279 if (copy_from_user(gamma_tbl, (void __user *)ubuffer[1],
4280 size)) {
4281 __wrn("copy_from_user fail\n");
4282 kfree(gamma_tbl);
4283 ret = -EFAULT;
4284
4285 break;
4286 }
4287 if (dispdev->set_gamma_tbl)
4288 ret = dispdev->set_gamma_tbl(dispdev, gamma_tbl,
4289 size);
4290 kfree(gamma_tbl);
4291 }
4292 break;
4293 }
4294
4295 case DISP_LCD_GAMMA_CORRECTION_ENABLE:
4296 {
4297 if (dispdev &&
4298 (dispdev->type == DISP_OUTPUT_TYPE_LCD)) {
4299 ret = dispdev->enable_gamma(dispdev);
4300 }
4301 break;
4302 }
4303
4304 case DISP_LCD_GAMMA_CORRECTION_DISABLE:
4305 {
4306 if (dispdev && (dispdev->type == DISP_OUTPUT_TYPE_LCD))
4307 ret = dispdev->disable_gamma(dispdev);
4308 break;
4309 }
4310
4311 case DISP_LCD_SET_GAMMA_TABLE:
4312 {
4313 if (dispdev && (dispdev->type == DISP_OUTPUT_TYPE_LCD)) {
4314 u32 *gamma_tbl = kmalloc(LCD_GAMMA_TABLE_SIZE,
4315 GFP_KERNEL | __GFP_ZERO);
4316 u32 size = ubuffer[2];
4317
4318 if (gamma_tbl == NULL) {
4319 __wrn("kmalloc fail\n");
4320 ret = -EFAULT;
4321 break;
4322 }
4323
4324 size = (size > LCD_GAMMA_TABLE_SIZE) ?
4325 LCD_GAMMA_TABLE_SIZE : size;
4326 if (copy_from_user(gamma_tbl, (void __user *)ubuffer[1],
4327 size)) {
4328 __wrn("copy_from_user fail\n");
4329 kfree(gamma_tbl);
4330 ret = -EFAULT;
4331
4332 break;
4333 }
4334 ret = dispdev->set_gamma_tbl(dispdev, gamma_tbl, size);
4335 kfree(gamma_tbl);
4336 }
4337 break;
4338 }
4339
4340
4341 /* ---- hdmi --- */
4342 case DISP_HDMI_SUPPORT_MODE:
4343 {
4344 ret =
4345 bsp_disp_hdmi_check_support_mode(ubuffer[0],
4346 ubuffer[1]);
4347 break;
4348 }
4349
4350 case DISP_SET_TV_HPD:
4351 {
4352 ret = bsp_disp_tv_set_hpd(ubuffer[0]);
4353 break;
4354 }
4355 #ifdef CONFIG_ARCH_SUN50IW6
4356 case DISP_CEC_ONE_TOUCH_PLAY:
4357 {
4358 ret = bsp_disp_hdmi_cec_send_one_touch_play();
4359 break;
4360 }
4361 #endif
4362 /* ----enhance---- */
4363 case DISP_ENHANCE_ENABLE:
4364 {
4365 if (enhance && enhance->enable)
4366 ret = enhance->enable(enhance);
4367 break;
4368 }
4369
4370 case DISP_ENHANCE_DISABLE:
4371 {
4372 if (enhance && enhance->disable)
4373 ret = enhance->disable(enhance);
4374 break;
4375 }
4376
4377 case DISP_ENHANCE_DEMO_ENABLE:
4378 {
4379 if (enhance && enhance->demo_enable)
4380 ret = enhance->demo_enable(enhance);
4381 break;
4382 }
4383
4384 case DISP_ENHANCE_DEMO_DISABLE:
4385 {
4386 if (enhance && enhance->demo_disable)
4387 ret = enhance->demo_disable(enhance);
4388 break;
4389 }
4390
4391 case DISP_ENHANCE_SET_MODE:
4392 {
4393 if (enhance && enhance->set_mode)
4394 ret = enhance->set_mode(enhance, ubuffer[1]);
4395 break;
4396 }
4397
4398 case DISP_ENHANCE_GET_MODE:
4399 {
4400 if (enhance && enhance->get_mode)
4401 ret = enhance->get_mode(enhance);
4402 break;
4403 }
4404
4405 /* ---smart backlight -- */
4406 case DISP_SMBL_ENABLE:
4407 {
4408 if (smbl && smbl->enable)
4409 ret = smbl->enable(smbl);
4410 break;
4411 }
4412
4413 case DISP_SMBL_DISABLE:
4414 {
4415 if (smbl && smbl->disable)
4416 ret = smbl->disable(smbl);
4417 break;
4418 }
4419
4420 case DISP_SMBL_SET_WINDOW:
4421 {
4422 struct disp_rect rect;
4423
4424 if (copy_from_user(&rect, (void __user *)ubuffer[1],
4425 sizeof(struct disp_rect))) {
4426 __wrn("copy_from_user fail\n");
4427 return -EFAULT;
4428 }
4429 if (smbl && smbl->set_window)
4430 ret = smbl->set_window(smbl, &rect);
4431 break;
4432 }
4433
4434 /* ---capture -- */
4435 case DISP_CAPTURE_START:
4436 {
4437 if (cptr && cptr->start)
4438 ret = cptr->start(cptr);
4439 break;
4440 }
4441
4442 case DISP_CAPTURE_STOP:
4443 {
4444 if (cptr && cptr->stop)
4445 ret = cptr->stop(cptr);
4446 break;
4447 }
4448
4449 case DISP_CAPTURE_COMMIT:
4450 {
4451 struct disp_capture_info info;
4452
4453 if (copy_from_user(&info, (void __user *)ubuffer[1],
4454 sizeof(struct disp_capture_info))) {
4455 __wrn("copy_from_user fail\n");
4456 return -EFAULT;
4457 }
4458 if (cptr && cptr->commmit)
4459 ret = cptr->commmit(cptr, &info);
4460 break;
4461 }
4462 case DISP_CAPTURE_COMMIT2:
4463 {
4464 struct disp_capture_info2 info;
4465
4466 if (copy_from_user(&info,
4467 (void __user *)ubuffer[1],
4468 sizeof(struct disp_capture_info2))) {
4469 __wrn("copy_from_user fail\n");
4470 return -EFAULT;
4471 }
4472 if (cptr && cptr->commmit2)
4473 ret = cptr->commmit2(cptr, &info);
4474 break;
4475 }
4476
4477 /* ----for test---- */
4478 case DISP_MEM_REQUEST:
4479 ret = disp_mem_request(ubuffer[0], ubuffer[1]);
4480 break;
4481
4482 case DISP_MEM_RELEASE:
4483 ret = disp_mem_release(ubuffer[0]);
4484 break;
4485
4486 case DISP_MEM_GETADR:
4487 return g_disp_mm[ubuffer[0]].mem_start;
4488 #if defined(SUPPORT_VDPO)
4489 case DISP_VDPO_SET_CONFIG:
4490 {
4491 struct disp_vdpo_config vdpo_para;
4492
4493 if (copy_from_user(
4494 &vdpo_para, (void __user *)ubuffer[1],
4495 sizeof(struct disp_vdpo_config) * ubuffer[2])) {
4496 __wrn("copy_from_user fail\n");
4497 return -EFAULT;
4498 }
4499 if (mgr && mgr->device)
4500 disp_vdpo_set_config(mgr->device, &vdpo_para);
4501 break;
4502 }
4503 #endif /*endif SUPPORT_VDPO*/
4504
4505 #if defined(CONFIG_SUNXI_DISP2_FB_ROTATION_SUPPORT)
4506 case DISP_ROTATION_SW_SET_ROT:
4507 {
4508 int num_screens = bsp_disp_feat_get_num_screens();
4509 u32 degree, chn, lyr_id;
4510
4511 mutex_lock(&g_disp_drv.mlock);
4512 if (mgr == NULL) {
4513 printk("mgr is null\n");
4514 }
4515 if (mgr->rot_sw == NULL) {
4516 printk("mgr->rot_sw is null\n");
4517 }
4518 if (!mgr || !mgr->rot_sw || num_screens <= ubuffer[0]) {
4519 ret = -1;
4520 mutex_unlock(&g_disp_drv.mlock);
4521 break;
4522 }
4523 degree = ubuffer[3];
4524 switch (degree) {
4525 case ROTATION_SW_0:
4526 case ROTATION_SW_90:
4527 case ROTATION_SW_180:
4528 case ROTATION_SW_270:
4529 chn = ubuffer[1];
4530 lyr_id = ubuffer[2];
4531 ret = mgr->rot_sw->set_layer_degree(mgr->rot_sw, chn, lyr_id, degree);
4532 break;
4533 default:
4534 ret = -1;
4535 }
4536 mutex_unlock(&g_disp_drv.mlock);
4537 break;
4538 }
4539
4540 case DISP_ROTATION_SW_GET_ROT:
4541 {
4542 int num_screens = bsp_disp_feat_get_num_screens();
4543 u32 chn, lyr_id;
4544
4545 mutex_lock(&g_disp_drv.mlock);
4546 if (mgr && mgr->rot_sw && num_screens > ubuffer[0]) {
4547 chn = ubuffer[1];
4548 lyr_id = ubuffer[2];
4549 ret = mgr->rot_sw->get_layer_degree(mgr->rot_sw, chn, lyr_id);
4550 } else {
4551 ret = -1;
4552 }
4553 mutex_unlock(&g_disp_drv.mlock);
4554 break;
4555 }
4556 #endif
4557
4558 case DISP_LCD_CHECK_OPEN_FINISH:
4559 {
4560 if (mgr && mgr->device) {
4561 if (mgr->device->is_enabled)
4562 return mgr->device->is_enabled(mgr->device);
4563 else
4564 return -1;
4565 } else
4566 return -1;
4567 }
4568
4569 case DISP_LCD_BACKLIGHT_ENABLE:
4570 {
4571 if (mgr && mgr->device) {
4572 if (mgr->device->pwm_enable)
4573 mgr->device->pwm_enable(mgr->device);
4574 if (mgr->device->backlight_enable)
4575 mgr->device->backlight_enable(mgr->device);
4576
4577 return 0;
4578 }
4579 return -1;
4580 break;
4581 }
4582 case DISP_LCD_BACKLIGHT_DISABLE:
4583 {
4584 if (mgr && mgr->device) {
4585 if (mgr->device->pwm_disable)
4586 mgr->device->pwm_disable(mgr->device);
4587 if (mgr->device->backlight_disable)
4588 mgr->device->backlight_disable(mgr->device);
4589 return 0;
4590 }
4591 return -1;
4592 break;
4593 }
4594 case DISP_SET_KSC_PARA:
4595 {
4596
4597 struct disp_ksc_info ksc;
4598
4599 if (copy_from_user(&ksc, (void __user *)ubuffer[1],
4600 sizeof(struct disp_ksc_info))) {
4601 __wrn("copy_from_user fail\n");
4602 return -EFAULT;
4603 }
4604 if (mgr && mgr->set_ksc_para)
4605 ret = mgr->set_ksc_para(mgr, &ksc);
4606
4607 break;
4608 }
4609 case DISP_NODE_LCD_MESSAGE_REQUEST:
4610 {
4611 int ret;
4612 struct para lcd_debug_para;
4613 struct para lcd_debug_para_tmp;
4614 struct dt_property *dt_prop;
4615 char prop_name[32] = {0};
4616 struct dt_property *dt_prop_dts;
4617 char prop_dts_name[32] = {0};
4618 unsigned char value[100] = {0};
4619 unsigned char dts_value[100] = {0};
4620
4621 if (copy_from_user(&lcd_debug_para, (void *) arg, sizeof(struct para))) {
4622 return -2;
4623 }
4624 if (copy_from_user(&lcd_debug_para_tmp, (void *) arg, sizeof(struct para))) {
4625 return -2;
4626 }
4627 dt_prop = &lcd_debug_para.prop_src;
4628
4629 ret = copy_from_user(prop_name, dt_prop->name, 32);
4630
4631 if (ret)
4632 return -2;
4633
4634 ret = copy_from_user(value, dt_prop->value, dt_prop->length);
4635
4636 if (ret)
4637 return -2;
4638
4639 dt_prop->name = prop_name;
4640 dt_prop->value = (void *)value;
4641
4642 dt_prop_dts = &lcd_debug_para.prop_dts;
4643
4644 ret = copy_from_user(prop_dts_name, dt_prop_dts->name, 32);
4645
4646 if (ret)
4647 return -2;
4648
4649 ret = copy_from_user(dts_value, dt_prop_dts->value, dt_prop_dts->length);
4650
4651 if (ret)
4652 return -2;
4653
4654 dt_prop_dts->name = prop_dts_name;
4655 dt_prop_dts->value = (void *)dts_value;
4656
4657 ret = handle_request(&lcd_debug_para);
4658
4659 if (ret)
4660 return -1;
4661
4662 if (copy_to_user((void __user *)lcd_debug_para_tmp.prop_dts.name, lcd_debug_para.prop_dts.name, 32))
4663 return -3;
4664
4665 if (copy_to_user((void __user *)lcd_debug_para_tmp.prop_dts.value, lcd_debug_para.prop_dts.value, lcd_debug_para.prop_dts.length))
4666 return -3;
4667
4668 if (copy_to_user(&((struct para *)arg)->prop_dts.length, &lcd_debug_para.prop_dts.length, sizeof(lcd_debug_para.prop_dts.length)))
4669 return -3;
4670
4671 return ret;
4672 }
4673 case DISP_RELOAD_LCD:
4674 {
4675 reload_lcd();
4676 break;
4677 }
4678
4679
4680 default:
4681 ret = disp_ioctl_extend(cmd, (unsigned long)ubuffer);
4682 break;
4683 }
4684
4685 return ret;
4686 }
4687
4688 #ifdef CONFIG_COMPAT
disp_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)4689 static long disp_compat_ioctl(struct file *file, unsigned int cmd,
4690 unsigned long arg)
4691 {
4692 compat_uptr_t karg[4];
4693 unsigned long __user *ubuffer;
4694
4695 if (copy_from_user
4696 ((void *)karg, (void __user *)arg, 4 * sizeof(compat_uptr_t))) {
4697 __wrn("copy_from_user fail\n");
4698 return -EFAULT;
4699 }
4700
4701 ubuffer = compat_alloc_user_space(4 * sizeof(unsigned long));
4702 #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
4703 if (!access_ok(VERIFY_WRITE, ubuffer, 4 * sizeof(unsigned long)))
4704 #else
4705 if (!access_ok(ubuffer, 4 * sizeof(unsigned long)))
4706 #endif
4707 return -EFAULT;
4708
4709 if (put_user(karg[0], &ubuffer[0]) ||
4710 put_user(karg[1], &ubuffer[1]) ||
4711 put_user(karg[2], &ubuffer[2]) || put_user(karg[3], &ubuffer[3])) {
4712 __wrn("put_user fail\n");
4713 return -EFAULT;
4714 }
4715
4716 /*
4717 if (cmd == DISP_HWC_COMMIT)
4718 return disp_compat_ioctl_extend(cmd, (unsigned long)ubuffer);
4719 */
4720
4721 return disp_ioctl(file, cmd, (unsigned long)ubuffer);
4722 }
4723 #endif
4724
disp_vsync_poll(struct file * file,poll_table * wait)4725 static unsigned int disp_vsync_poll(struct file *file, poll_table *wait)
4726 {
4727 return vsync_poll(file, wait);
4728 }
4729
4730 static const struct file_operations disp_fops = {
4731 .owner = THIS_MODULE,
4732 .open = disp_open,
4733 .release = disp_release,
4734 .write = disp_write,
4735 .read = disp_read,
4736 .unlocked_ioctl = disp_ioctl,
4737 #ifdef CONFIG_COMPAT
4738 .compat_ioctl = disp_compat_ioctl,
4739 #endif
4740 .mmap = disp_mmap,
4741 .poll = disp_vsync_poll,
4742 };
4743
4744 #ifndef CONFIG_OF
4745 static struct platform_device disp_device = {
4746 .name = "disp",
4747 .id = -1,
4748 .num_resources = ARRAY_SIZE(disp_resource),
4749 .resource = disp_resource,
4750 .dev = {
4751 .power = {
4752 .async_suspend = 1,
4753 }
4754 }
4755 };
4756 #else
4757 static const struct of_device_id sunxi_disp_match[] = {
4758 {.compatible = "allwinner,sun8iw10p1-disp",},
4759 {.compatible = "allwinner,sun50i-disp",},
4760 {.compatible = "allwinner,sunxi-disp",},
4761 {},
4762 };
4763 #endif
4764
4765 static struct platform_driver disp_driver = {
4766 .probe = disp_probe,
4767 .remove = disp_remove,
4768 .shutdown = disp_shutdown,
4769 .driver = {
4770 .name = "disp",
4771 .owner = THIS_MODULE,
4772 .pm = &disp_runtime_pm_ops,
4773 .of_match_table = sunxi_disp_match,
4774 },
4775 };
4776
4777 #ifdef CONFIG_DEVFREQ_DRAM_FREQ_IN_VSYNC
4778 struct dramfreq_vb_time_ops {
4779 int (*get_vb_time)(void);
4780 int (*get_next_vb_time)(void);
4781 int (*is_in_vb)(void);
4782 };
4783 static struct dramfreq_vb_time_ops dramfreq_ops = {
4784 .get_vb_time = bsp_disp_get_vb_time,
4785 .get_next_vb_time = bsp_disp_get_next_vb_time,
4786 .is_in_vb = bsp_disp_is_in_vb,
4787 };
4788 extern int dramfreq_set_vb_time_ops(struct dramfreq_vb_time_ops *ops);
4789 #endif
4790
disp_module_init(void)4791 static int __init disp_module_init(void)
4792 {
4793 int ret = 0, err;
4794
4795 pr_info("[DISP]%s\n", __func__);
4796
4797 alloc_chrdev_region(&devid, 0, 1, "disp");
4798 my_cdev = cdev_alloc();
4799 cdev_init(my_cdev, &disp_fops);
4800 my_cdev->owner = THIS_MODULE;
4801 err = cdev_add(my_cdev, devid, 1);
4802 if (err) {
4803 __wrn("cdev_add fail\n");
4804 return -1;
4805 }
4806
4807 disp_class = class_create(THIS_MODULE, "disp");
4808 if (IS_ERR(disp_class)) {
4809 __wrn("class_create fail\n");
4810 return -1;
4811 }
4812
4813 display_dev = device_create(disp_class, NULL, devid, NULL, "disp");
4814
4815 #ifndef CONFIG_OF
4816 ret = platform_device_register(&disp_device);
4817 #endif
4818 if (ret == 0)
4819 ret = platform_driver_register(&disp_driver);
4820 #ifdef CONFIG_DISP2_SUNXI_DEBUG
4821 dispdbg_init();
4822 #endif
4823
4824 #ifdef CONFIG_DEVFREQ_DRAM_FREQ_IN_VSYNC
4825 dramfreq_set_vb_time_ops(&dramfreq_ops);
4826 #endif
4827
4828 pr_info("[DISP]%s finish, ret : %d\n", __func__, ret);
4829
4830 return ret;
4831 }
4832
disp_module_exit(void)4833 static void __exit disp_module_exit(void)
4834 {
4835 __inf("disp_module_exit\n");
4836
4837 #ifdef CONFIG_DISP2_SUNXI_DEBUG
4838 dispdbg_exit();
4839 #endif
4840
4841 disp_exit();
4842
4843 platform_driver_unregister(&disp_driver);
4844 #ifndef CONFIG_OF
4845 platform_device_unregister(&disp_device);
4846 #endif
4847
4848 device_destroy(disp_class, devid);
4849 class_destroy(disp_class);
4850
4851 cdev_del(my_cdev);
4852 }
4853
4854 module_init(disp_module_init);
4855 module_exit(disp_module_exit);
4856
4857 MODULE_AUTHOR("tan");
4858 MODULE_DESCRIPTION("display driver");
4859 MODULE_LICENSE("GPL");
4860 MODULE_ALIAS("platform:disp");
4861