1 /*
2 * Allwinner SoCs display driver.
3 *
4 * Copyright (C) 2016 Allwinner.
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11 #include <linux/reset.h>
12 #include "disp_manager.h"
13 #include "disp_display.h"
14 #include "../disp_trace.h"
15 #include "disp_rtwb.h"
16
17 #define MAX_LAYERS 16
18 #define FORCE_SYNC_THRESHOLD 4
19
20 struct disp_manager_private_data {
21 bool applied;
22 bool enabled;
23 bool unmap_dmabuf;
24 bool setting;
25 bool color_range_modified;
26 struct disp_manager_data *cfg;
27
28 s32 (*shadow_protect)(u32 disp, bool protect);
29
30 u32 reg_base;
31 u32 irq_no;
32 struct clk *clk;
33 struct clk *clk_bus;
34 struct clk *clk_parent;
35 struct clk *clk_extra;
36 struct clk *clk_dpss;
37
38 struct reset_control *rst;
39 #if defined(HAVE_DEVICE_COMMON_MODULE)
40 struct reset_control *rst_extra;
41 #endif
42 struct reset_control *rst_dpss;
43
44 unsigned int layers_using;
45 bool sync;
46 bool force_sync;
47 unsigned int nosync_cnt;
48 unsigned int force_sync_cnt;
49 bool err;
50 unsigned int err_cnt;
51 unsigned int dmabuf_unmap_skip_cnt;
52 unsigned int dmabuf_unmap_skip_cnt_max;
53 struct list_head dmabuf_list;
54 unsigned int dmabuf_cnt;
55 unsigned int dmabuf_cnt_max;
56
57 struct disp_irq_info irq_info;
58 wait_queue_head_t wait_rcq_finish_queue;
59 atomic_t wati_rcq_finish_flag;
60 bool iommu_en_flag;
61 unsigned int iommu_master_id;
62 };
63
64 static spinlock_t mgr_data_lock;
65 static struct mutex mgr_mlock;
66
67 static struct disp_manager *mgrs;
68 static struct disp_manager_private_data *mgr_private;
69 static struct disp_manager_data *mgr_cfgs;
70
71 static struct disp_layer_config_data *lyr_cfgs;
72
73 static int rcq_init_finished;
74
75 /*
76 * layer unit
77 */
78 struct disp_layer_private_data {
79 struct disp_layer_config_data *cfg;
80 s32 (*shadow_protect)(u32 sel, bool protect);
81 };
82
83 static struct disp_layer *lyrs;
84 static struct disp_layer_private_data *lyr_private;
disp_get_layer(u32 disp,u32 chn,u32 layer_id)85 struct disp_layer *disp_get_layer(u32 disp, u32 chn, u32 layer_id)
86 {
87 u32 num_screens, max_num_layers = 0;
88 struct disp_layer *lyr = lyrs;
89 int i;
90
91 num_screens = bsp_disp_feat_get_num_screens();
92 if (disp >= num_screens) {
93 DE_WRN("disp %d is out of range %d\n", disp, num_screens);
94 return NULL;
95 }
96
97 for (i = 0; i < num_screens; i++)
98 max_num_layers += bsp_disp_feat_get_num_layers(i);
99
100 for (i = 0; i < max_num_layers; i++) {
101 if ((lyr->disp == disp) && (lyr->chn == chn)
102 && (lyr->id == layer_id)) {
103 DE_INF("%d,%d,%d, name=%s\n", disp, chn, layer_id,
104 lyr->name);
105 return lyr;
106 }
107 lyr++;
108 }
109
110 DE_WRN("%s (%d,%d,%d) fail\n", __func__, disp, chn, layer_id);
111 return NULL;
112
113 }
114
disp_get_layer_1(u32 disp,u32 layer_id)115 struct disp_layer *disp_get_layer_1(u32 disp, u32 layer_id)
116 {
117 u32 num_screens, num_layers;
118 u32 i, k;
119 u32 layer_index = 0, start_index = 0;
120
121 num_screens = bsp_disp_feat_get_num_screens();
122 if (disp >= num_screens) {
123 DE_WRN("disp %d is out of range %d\n", disp, num_screens);
124 return NULL;
125 }
126
127 for (i = 0; i < disp; i++)
128 start_index += bsp_disp_feat_get_num_layers(i);
129
130 layer_id += start_index;
131
132 for (i = 0; i < num_screens; i++) {
133 num_layers = bsp_disp_feat_get_num_layers(i);
134 for (k = 0; k < num_layers; k++) {
135 if (layer_index == layer_id) {
136 DE_INF("disp%d layer%d: %d,%d,%d\n",
137 disp, layer_id,
138 lyrs[layer_index].disp,
139 lyrs[layer_index].chn,
140 lyrs[layer_index].id);
141 return &lyrs[layer_index];
142 }
143 layer_index++;
144 }
145 }
146
147 DE_WRN("%s fail\n", __func__);
148 return NULL;
149 }
150
disp_lyr_get_priv(struct disp_layer * lyr)151 static struct disp_layer_private_data *disp_lyr_get_priv(struct disp_layer *lyr)
152 {
153 if (lyr == NULL) {
154 DE_WRN("NULL hdl!\n");
155 return NULL;
156 }
157
158 return (struct disp_layer_private_data *)lyr->data;
159 }
160
161
162 /** __disp_config_transfer2inner - transfer disp_layer_config to inner one
163 */
__disp_config_transfer2inner(struct disp_layer_config_inner * config_inner,struct disp_layer_config * config)164 s32 __disp_config_transfer2inner(
165 struct disp_layer_config_inner *config_inner,
166 struct disp_layer_config *config)
167 {
168 config_inner->enable = config->enable;
169 config_inner->channel = config->channel;
170 config_inner->layer_id = config->layer_id;
171
172 if (0 == config->enable) {
173 memset(&(config->info), 0, sizeof(config->info));
174 }
175
176 /* layer info */
177 config_inner->info.mode = config->info.mode;
178 config_inner->info.zorder = config->info.zorder;
179 config_inner->info.alpha_mode = config->info.alpha_mode;
180 config_inner->info.alpha_value = config->info.alpha_value;
181 memcpy(&config_inner->info.screen_win,
182 &config->info.screen_win,
183 sizeof(struct disp_rect));
184 config_inner->info.b_trd_out = config->info.b_trd_out;
185 config_inner->info.out_trd_mode = config->info.out_trd_mode;
186 config_inner->info.id = config->info.id;
187 /* fb info */
188 memcpy(config_inner->info.fb.addr,
189 config->info.fb.addr,
190 sizeof(long long) * 3);
191 memcpy(config_inner->info.fb.size,
192 config->info.fb.size,
193 sizeof(struct disp_rectsz) * 3);
194 memcpy(config_inner->info.fb.align,
195 config->info.fb.align, sizeof(int) * 3);
196 config_inner->info.fb.format = config->info.fb.format;
197 config_inner->info.fb.color_space = config->info.fb.color_space;
198 memcpy(config_inner->info.fb.trd_right_addr,
199 config->info.fb.trd_right_addr,
200 sizeof(int) * 3);
201 config_inner->info.fb.pre_multiply = config->info.fb.pre_multiply;
202 memcpy(&config_inner->info.fb.crop,
203 &config->info.fb.crop,
204 sizeof(struct disp_rect64));
205 config_inner->info.fb.flags = config->info.fb.flags;
206 config_inner->info.fb.scan = config->info.fb.scan;
207 config_inner->info.fb.eotf = DISP_EOTF_UNDEF;
208 config_inner->info.fb.fbd_en = 0;
209 config_inner->info.fb.lbc_en = config->info.fb.lbc_en;
210 memcpy(&config_inner->info.fb.lbc_info,
211 &config->info.fb.lbc_info,
212 sizeof(struct disp_lbc_info));
213 config_inner->info.fb.metadata_buf = 0;
214 config_inner->info.fb.fd = -911;
215 config_inner->info.fb.metadata_size = 0;
216 config_inner->info.fb.metadata_flag = 0;
217 config_inner->info.atw.used = 0;
218
219 if (config_inner->info.mode == LAYER_MODE_COLOR)
220 config_inner->info.color = config->info.color;
221
222 return 0;
223 }
224
225
226 /** __disp_config2_transfer2inner -transfer disp_layer_config2 to inner one
227 */
__disp_config2_transfer2inner(struct disp_layer_config_inner * config_inner,struct disp_layer_config2 * config2)228 s32 __disp_config2_transfer2inner(struct disp_layer_config_inner *config_inner,
229 struct disp_layer_config2 *config2)
230 {
231 config_inner->enable = config2->enable;
232 config_inner->channel = config2->channel;
233 config_inner->layer_id = config2->layer_id;
234
235 if (0 == config2->enable) {
236 memset(&(config2->info), 0, sizeof(config2->info));
237 }
238
239 /* layer info */
240 config_inner->info.mode = config2->info.mode;
241 config_inner->info.zorder = config2->info.zorder;
242 config_inner->info.alpha_mode = config2->info.alpha_mode;
243 config_inner->info.alpha_value = config2->info.alpha_value;
244 memcpy(&config_inner->info.screen_win,
245 &config2->info.screen_win,
246 sizeof(struct disp_rect));
247 config_inner->info.b_trd_out = config2->info.b_trd_out;
248 config_inner->info.out_trd_mode = config2->info.out_trd_mode;
249 /* fb info */
250 config_inner->info.fb.fd = config2->info.fb.fd;
251 memcpy(config_inner->info.fb.size,
252 config2->info.fb.size,
253 sizeof(struct disp_rectsz) * 3);
254 memcpy(config_inner->info.fb.align,
255 config2->info.fb.align, sizeof(int) * 3);
256 config_inner->info.fb.format = config2->info.fb.format;
257 config_inner->info.fb.color_space = config2->info.fb.color_space;
258 config_inner->info.fb.trd_right_fd = config2->info.fb.trd_right_fd;
259 config_inner->info.fb.pre_multiply = config2->info.fb.pre_multiply;
260 memcpy(&config_inner->info.fb.crop,
261 &config2->info.fb.crop,
262 sizeof(struct disp_rect64));
263 config_inner->info.fb.flags = config2->info.fb.flags;
264 config_inner->info.fb.scan = config2->info.fb.scan;
265 config_inner->info.fb.depth = config2->info.fb.depth;
266 /* hdr related */
267 config_inner->info.fb.eotf = config2->info.fb.eotf;
268 config_inner->info.fb.fbd_en = config2->info.fb.fbd_en;
269 config_inner->info.fb.lbc_en = config2->info.fb.lbc_en;
270 memcpy(&config_inner->info.fb.lbc_info,
271 &config2->info.fb.lbc_info,
272 sizeof(struct disp_lbc_info));
273 config_inner->info.fb.metadata_fd = config2->info.fb.metadata_fd;
274 config_inner->info.fb.metadata_size = config2->info.fb.metadata_size;
275 config_inner->info.fb.metadata_flag =
276 config2->info.fb.metadata_flag;
277
278 config_inner->info.id = config2->info.id;
279 /* atw related */
280 config_inner->info.atw.used = config2->info.atw.used;
281 config_inner->info.atw.mode = config2->info.atw.mode;
282 config_inner->info.atw.b_row = config2->info.atw.b_row;
283 config_inner->info.atw.b_col = config2->info.atw.b_col;
284 config_inner->info.atw.cof_fd = config2->info.atw.cof_fd;
285 if (config_inner->info.mode == LAYER_MODE_COLOR)
286 config_inner->info.color = config2->info.color;
287
288
289 #if defined(DE_VERSION_V33X)
290 //TODO:move transform memory to atw
291 config_inner->info.transform = config2->info.transform;
292 memcpy(&config_inner->info.snr, &config2->info.snr,
293 sizeof(struct disp_snr_info));
294
295 #endif
296 return 0;
297 }
298 EXPORT_SYMBOL(__disp_config2_transfer2inner);
299
300 /** __disp_inner_transfer2config - transfer inner to disp_layer_config
301 */
__disp_inner_transfer2config(struct disp_layer_config * config,struct disp_layer_config_inner * config_inner)302 s32 __disp_inner_transfer2config(struct disp_layer_config *config,
303 struct disp_layer_config_inner *config_inner)
304 {
305 config->enable = config_inner->enable;
306 config->channel = config_inner->channel;
307 config->layer_id = config_inner->layer_id;
308 /* layer info */
309 config->info.mode = config_inner->info.mode;
310 config->info.zorder = config_inner->info.zorder;
311 config->info.alpha_mode = config_inner->info.alpha_mode;
312 config->info.alpha_value = config_inner->info.alpha_value;
313 memcpy(&config->info.screen_win,
314 &config_inner->info.screen_win,
315 sizeof(struct disp_rect));
316 config->info.b_trd_out = config_inner->info.b_trd_out;
317 config->info.out_trd_mode = config_inner->info.out_trd_mode;
318 config->info.id = config_inner->info.id;
319 /* fb info */
320 memcpy(config->info.fb.addr,
321 config_inner->info.fb.addr,
322 sizeof(long long) * 3);
323 memcpy(config->info.fb.size,
324 config_inner->info.fb.size,
325 sizeof(struct disp_rectsz) * 3);
326 memcpy(config->info.fb.align,
327 config_inner->info.fb.align, sizeof(int) * 3);
328 config->info.fb.format = config_inner->info.fb.format;
329 config->info.fb.color_space = config_inner->info.fb.color_space;
330 memcpy(config->info.fb.trd_right_addr,
331 config_inner->info.fb.trd_right_addr,
332 sizeof(int) * 3);
333 config->info.fb.pre_multiply = config_inner->info.fb.pre_multiply;
334 memcpy(&config->info.fb.crop,
335 &config_inner->info.fb.crop,
336 sizeof(struct disp_rect64));
337 config->info.fb.flags = config_inner->info.fb.flags;
338 config->info.fb.scan = config_inner->info.fb.scan;
339
340 config->info.fb.lbc_en = config_inner->info.fb.lbc_en;
341 memcpy(&config->info.fb.lbc_info,
342 &config_inner->info.fb.lbc_info,
343 sizeof(struct disp_lbc_info));
344
345 if (config->info.mode == LAYER_MODE_COLOR)
346 config->info.color = config_inner->info.color;
347
348 return 0;
349 }
350
351
352 /** __disp_inner_transfer2config2 - transfer inner to disp_layer_config2
353 */
__disp_inner_transfer2config2(struct disp_layer_config2 * config2,struct disp_layer_config_inner * config_inner)354 s32 __disp_inner_transfer2config2(struct disp_layer_config2 *config2,
355 struct disp_layer_config_inner *config_inner)
356 {
357 config2->enable = config_inner->enable;
358 config2->channel = config_inner->channel;
359 config2->layer_id = config_inner->layer_id;
360 /* layer info */
361 config2->info.mode = config_inner->info.mode;
362 config2->info.zorder = config_inner->info.zorder;
363 config2->info.alpha_mode = config_inner->info.alpha_mode;
364 config2->info.alpha_value = config_inner->info.alpha_value;
365 memcpy(&config2->info.screen_win,
366 &config_inner->info.screen_win,
367 sizeof(struct disp_rect));
368 config2->info.b_trd_out = config_inner->info.b_trd_out;
369 config2->info.out_trd_mode = config_inner->info.out_trd_mode;
370 /* fb info */
371 config2->info.fb.fd = config_inner->info.fb.fd;
372 memcpy(config2->info.fb.size,
373 config_inner->info.fb.size,
374 sizeof(struct disp_rectsz) * 3);
375 memcpy(config2->info.fb.align,
376 config_inner->info.fb.align, sizeof(int) * 3);
377 config2->info.fb.format = config_inner->info.fb.format;
378 config2->info.fb.color_space = config_inner->info.fb.color_space;
379 config2->info.fb.trd_right_fd = config_inner->info.fb.trd_right_fd;
380 config2->info.fb.pre_multiply = config_inner->info.fb.pre_multiply;
381 memcpy(&config2->info.fb.crop,
382 &config_inner->info.fb.crop,
383 sizeof(struct disp_rect64));
384 config2->info.fb.flags = config_inner->info.fb.flags;
385 config2->info.fb.scan = config_inner->info.fb.scan;
386 config2->info.fb.depth = config_inner->info.fb.depth;
387 /* hdr related */
388 config2->info.fb.eotf = config_inner->info.fb.eotf;
389 config2->info.fb.fbd_en = config_inner->info.fb.fbd_en;
390
391 config2->info.fb.lbc_en = config_inner->info.fb.lbc_en;
392 memcpy(&config2->info.fb.lbc_info,
393 &config_inner->info.fb.lbc_info,
394 sizeof(struct disp_lbc_info));
395
396 config2->info.fb.metadata_fd = config_inner->info.fb.metadata_fd;
397 config2->info.fb.metadata_size = config_inner->info.fb.metadata_size;
398 config2->info.fb.metadata_flag =
399 config_inner->info.fb.metadata_flag;
400
401 config2->info.id = config_inner->info.id;
402 /* atw related */
403 config2->info.atw.used = config_inner->info.atw.used;
404 config2->info.atw.mode = config_inner->info.atw.mode;
405 config2->info.atw.b_row = config_inner->info.atw.b_row;
406 config2->info.atw.b_col = config_inner->info.atw.b_col;
407 config2->info.atw.cof_fd = config_inner->info.atw.cof_fd;
408 if (config2->info.mode == LAYER_MODE_COLOR)
409 config2->info.color = config_inner->info.color;
410
411 #if defined(DE_VERSION_V33X)
412 config2->info.transform = config_inner->info.transform;
413 memcpy(&config2->info.snr, &config_inner->info.snr,
414 sizeof(struct disp_snr_info));
415 #endif
416
417 return 0;
418 }
419
420
421 static s32
disp_lyr_set_manager(struct disp_layer * lyr,struct disp_manager * mgr)422 disp_lyr_set_manager(struct disp_layer *lyr, struct disp_manager *mgr)
423 {
424 unsigned long flags;
425 struct disp_layer_private_data *lyrp = disp_lyr_get_priv(lyr);
426
427 if ((lyr == NULL) || (lyrp == NULL) || (mgr == NULL)) {
428 DE_WRN("NULL hdl!\n");
429 return -1;
430 }
431
432 spin_lock_irqsave(&mgr_data_lock, flags);
433 lyr->manager = mgr;
434 list_add_tail(&lyr->list, &mgr->lyr_list);
435 spin_unlock_irqrestore(&mgr_data_lock, flags);
436
437 return DIS_SUCCESS;
438 }
439
disp_lyr_unset_manager(struct disp_layer * lyr)440 static s32 disp_lyr_unset_manager(struct disp_layer *lyr)
441 {
442 unsigned long flags;
443 struct disp_layer_private_data *lyrp = disp_lyr_get_priv(lyr);
444
445 if ((lyr == NULL) || (lyrp == NULL)) {
446 DE_WRN("NULL hdl!\n");
447 return -1;
448 }
449
450 spin_lock_irqsave(&mgr_data_lock, flags);
451 lyr->manager = NULL;
452 list_del(&lyr->list);
453 spin_unlock_irqrestore(&mgr_data_lock, flags);
454
455 return DIS_SUCCESS;
456 }
457
458 static s32
disp_lyr_check(struct disp_layer * lyr,struct disp_layer_config * config)459 disp_lyr_check(struct disp_layer *lyr, struct disp_layer_config *config)
460 {
461 struct disp_layer_private_data *lyrp = disp_lyr_get_priv(lyr);
462
463 if ((lyr == NULL) || (lyrp == NULL)) {
464 DE_WRN("NULL hdl!\n");
465 return -1;
466 }
467
468 return DIS_SUCCESS;
469 }
470
disp_lyr_check2(struct disp_layer * lyr,struct disp_layer_config2 * config)471 static s32 disp_lyr_check2(struct disp_layer *lyr,
472 struct disp_layer_config2 *config)
473 {
474 struct disp_layer_private_data *lyrp = disp_lyr_get_priv(lyr);
475
476 if ((lyr == NULL) || (lyrp == NULL)) {
477 DE_WRN("NULL hdl!\n");
478 return -1;
479 }
480
481 return DIS_SUCCESS;
482 }
483
484 static s32
disp_lyr_save_and_dirty_check(struct disp_layer * lyr,struct disp_layer_config * config)485 disp_lyr_save_and_dirty_check(struct disp_layer *lyr,
486 struct disp_layer_config *config)
487 {
488 unsigned long flags;
489 struct disp_layer_private_data *lyrp = disp_lyr_get_priv(lyr);
490
491 if ((lyr == NULL) || (lyrp == NULL)) {
492 DE_WRN("NULL hdl!\n");
493 return -1;
494 }
495
496 spin_lock_irqsave(&mgr_data_lock, flags);
497 if (lyrp->cfg) {
498 struct disp_layer_config_inner *pre_config = &lyrp->cfg->config;
499
500 if ((pre_config->enable != config->enable) ||
501 (pre_config->info.fb.addr[0] != config->info.fb.addr[0])
502 || (pre_config->info.fb.format != config->info.fb.format)
503 || (pre_config->info.fb.flags != config->info.fb.flags))
504 lyrp->cfg->flag |= LAYER_ATTR_DIRTY;
505 if ((pre_config->info.fb.size[0].width !=
506 config->info.fb.size[0].width)
507 || (pre_config->info.fb.size[0].height !=
508 config->info.fb.size[0].height)
509 || (pre_config->info.fb.crop.width !=
510 config->info.fb.crop.width)
511 || (pre_config->info.fb.crop.height !=
512 config->info.fb.crop.height)
513 || (pre_config->info.screen_win.width !=
514 config->info.screen_win.width)
515 || (pre_config->info.screen_win.height !=
516 config->info.screen_win.height))
517 lyrp->cfg->flag |= LAYER_SIZE_DIRTY;
518 lyrp->cfg->flag = LAYER_ALL_DIRTY;
519
520 if ((pre_config->enable == config->enable) &&
521 (config->enable == 0))
522 lyrp->cfg->flag = 0;
523 __disp_config_transfer2inner(&lyrp->cfg->config, config);
524 } else {
525 DE_INF("cfg is NULL\n");
526 }
527 spin_unlock_irqrestore(&mgr_data_lock, flags);
528 DE_INF
529 ("layer:ch%d, layer%d, format=%d, size=<%d,%d>, crop=<%lld,%lld,%lld,%lld>,frame=<%d,%d>, en=%d addr[0x%llx,0x%llx,0x%llx>\n",
530 config->channel, config->layer_id, config->info.fb.format,
531 config->info.fb.size[0].width, config->info.fb.size[0].height,
532 config->info.fb.crop.x >> 32, config->info.fb.crop.y >> 32,
533 config->info.fb.crop.width >> 32,
534 config->info.fb.crop.height >> 32, config->info.screen_win.width,
535 config->info.screen_win.height, config->enable,
536 config->info.fb.addr[0], config->info.fb.addr[1],
537 config->info.fb.addr[2]);
538
539 return DIS_SUCCESS;
540 }
541
disp_lyr_save_and_dirty_check2(struct disp_layer * lyr,struct disp_layer_config2 * config)542 static s32 disp_lyr_save_and_dirty_check2(struct disp_layer *lyr,
543 struct disp_layer_config2 *config)
544 {
545 unsigned long flags;
546 struct disp_layer_private_data *lyrp = disp_lyr_get_priv(lyr);
547
548 if ((lyr == NULL) || (lyrp == NULL)) {
549 DE_WRN("NULL hdl!\n");
550 return -1;
551 }
552
553 spin_lock_irqsave(&mgr_data_lock, flags);
554 if (lyrp->cfg) {
555 struct disp_layer_config_inner *pre_cfg = &lyrp->cfg->config;
556 struct disp_layer_info_inner *pre_info = &pre_cfg->info;
557 struct disp_layer_info2 *info = &config->info;
558 struct disp_fb_info_inner *pre_fb = &pre_info->fb;
559 struct disp_fb_info2 *fb = &info->fb;
560
561 if ((pre_cfg->enable != config->enable) ||
562 (pre_fb->fd != fb->fd) ||
563 (pre_fb->format != fb->format) ||
564 (pre_fb->flags != fb->flags))
565 lyrp->cfg->flag |= LAYER_ATTR_DIRTY;
566 if ((pre_fb->size[0].width != fb->size[0].width) ||
567 (pre_fb->size[0].height != info->fb.size[0].height) ||
568 (pre_fb->crop.width != info->fb.crop.width) ||
569 (pre_fb->crop.height != info->fb.crop.height) ||
570 (pre_info->screen_win.width != info->screen_win.width) ||
571 (pre_info->screen_win.height != info->screen_win.height))
572 lyrp->cfg->flag |= LAYER_SIZE_DIRTY;
573
574 lyrp->cfg->flag = LAYER_ALL_DIRTY;
575
576 if ((pre_cfg->enable == config->enable) &&
577 (config->enable == 0))
578 lyrp->cfg->flag = 0;
579 __disp_config2_transfer2inner(&lyrp->cfg->config,
580 config);
581 } else {
582 DE_INF("cfg is NULL\n");
583 }
584 spin_unlock_irqrestore(&mgr_data_lock, flags);
585
586 return DIS_SUCCESS;
587 }
588
589
disp_lyr_is_dirty(struct disp_layer * lyr)590 static s32 disp_lyr_is_dirty(struct disp_layer *lyr)
591 {
592 struct disp_layer_private_data *lyrp = disp_lyr_get_priv(lyr);
593
594 if ((lyr == NULL) || (lyrp == NULL)) {
595 DE_WRN("NULL hdl!\n");
596 return -1;
597 }
598
599 if (lyrp->cfg)
600 return (lyrp->cfg->flag & LAYER_ALL_DIRTY);
601
602 return 0;
603 }
604
disp_lyr_dirty_clear(struct disp_layer * lyr)605 static s32 disp_lyr_dirty_clear(struct disp_layer *lyr)
606 {
607 struct disp_layer_private_data *lyrp = disp_lyr_get_priv(lyr);
608
609 if ((lyr == NULL) || (lyrp == NULL)) {
610 DE_WRN("NULL hdl!\n");
611 return -1;
612 }
613
614 lyrp->cfg->flag = 0;
615
616 return 0;
617 }
618
619 static s32
disp_lyr_get_config(struct disp_layer * lyr,struct disp_layer_config * config)620 disp_lyr_get_config(struct disp_layer *lyr, struct disp_layer_config *config)
621 {
622 unsigned long flags;
623 struct disp_layer_private_data *lyrp = disp_lyr_get_priv(lyr);
624
625 if ((lyr == NULL) || (lyrp == NULL)) {
626 DE_WRN("NULL hdl!\n");
627 return -1;
628 }
629
630 spin_lock_irqsave(&mgr_data_lock, flags);
631 if (lyrp->cfg) {
632 /* memcpy(config, &lyrp->cfg->config, */
633 /* sizeof(struct disp_layer_config)); */
634 __disp_inner_transfer2config(config, &lyrp->cfg->config);
635 }
636 spin_unlock_irqrestore(&mgr_data_lock, flags);
637
638 return DIS_SUCCESS;
639 }
640
disp_lyr_get_config2(struct disp_layer * lyr,struct disp_layer_config2 * config)641 static s32 disp_lyr_get_config2(struct disp_layer *lyr,
642 struct disp_layer_config2 *config)
643 {
644 unsigned long flags;
645 struct disp_layer_private_data *lyrp = disp_lyr_get_priv(lyr);
646
647 if ((lyr == NULL) || (lyrp == NULL)) {
648 DE_WRN("NULL hdl!\n");
649 return -1;
650 }
651
652 spin_lock_irqsave(&mgr_data_lock, flags);
653 if (lyrp->cfg)
654 __disp_inner_transfer2config2(config, &lyrp->cfg->config);
655 spin_unlock_irqrestore(&mgr_data_lock, flags);
656
657 return DIS_SUCCESS;
658 }
659
disp_lyr_apply(struct disp_layer * lyr)660 static s32 disp_lyr_apply(struct disp_layer *lyr)
661 {
662 struct disp_layer_private_data *lyrp = disp_lyr_get_priv(lyr);
663
664 if ((lyr == NULL) || (lyrp == NULL)) {
665 DE_WRN("NULL hdl!\n");
666 return -1;
667 }
668
669 return DIS_SUCCESS;
670 }
671
disp_lyr_force_apply(struct disp_layer * lyr)672 static s32 disp_lyr_force_apply(struct disp_layer *lyr)
673 {
674 unsigned long flags;
675 struct disp_layer_private_data *lyrp = disp_lyr_get_priv(lyr);
676
677 if ((lyr == NULL) || (lyrp == NULL)) {
678 DE_WRN("NULL hdl!\n");
679 return -1;
680 }
681
682 spin_lock_irqsave(&mgr_data_lock, flags);
683 lyrp->cfg->flag |= LAYER_ALL_DIRTY;
684 spin_unlock_irqrestore(&mgr_data_lock, flags);
685 disp_lyr_apply(lyr);
686
687 return DIS_SUCCESS;
688 }
689
disp_lyr_dump(struct disp_layer * lyr,char * buf)690 static s32 disp_lyr_dump(struct disp_layer *lyr, char *buf)
691 {
692 unsigned long flags;
693 struct disp_layer_config_data data;
694 struct disp_layer_private_data *lyrp = disp_lyr_get_priv(lyr);
695 u32 count = 0;
696
697 if ((lyr == NULL) || (lyrp == NULL)) {
698 DE_WRN("NULL hdl!\n");
699 return -1;
700 }
701
702 spin_lock_irqsave(&mgr_data_lock, flags);
703 memcpy(&data, lyrp->cfg, sizeof(struct disp_layer_config_data));
704 spin_unlock_irqrestore(&mgr_data_lock, flags);
705
706 count += sprintf(buf + count, " %5s ",
707 (data.config.info.mode == LAYER_MODE_BUFFER) ?
708 "BUF" : "COLOR");
709 count += sprintf(buf + count, " %8s ",
710 (data.config.enable == 1) ? "enable" : "disable");
711 count += sprintf(buf + count, "ch[%1u] ", data.config.channel);
712 count += sprintf(buf + count, "lyr[%1u] ", data.config.layer_id);
713 count += sprintf(buf + count, "z[%1u] ", data.config.info.zorder);
714 count += sprintf(buf + count, "prem[%1s] ",
715 (data.config.info.fb.pre_multiply) ? "Y" : "N");
716 count += sprintf(buf + count, "a[%5s %3u] ",
717 (data.config.info.alpha_mode) ? "globl" : "pixel",
718 data.config.info.alpha_value);
719 count += sprintf(buf + count, "fmt[%3d] ", data.config.info.fb.format);
720 count += sprintf(buf + count, "fb[%4u,%4u;%4u,%4u;%4u,%4u] ",
721 data.config.info.fb.size[0].width,
722 data.config.info.fb.size[0].height,
723 data.config.info.fb.size[1].width,
724 data.config.info.fb.size[1].height,
725 data.config.info.fb.size[2].width,
726 data.config.info.fb.size[2].height);
727 count += sprintf(buf + count, "crop[%4u,%4u,%4u,%4u] ",
728 (unsigned int)(data.config.info.fb.crop.x >> 32),
729 (unsigned int)(data.config.info.fb.crop.y >> 32),
730 (unsigned int)(data.config.info.fb.crop.width >> 32),
731 (unsigned int)(data.config.info.fb.crop.height >> 32));
732 count += sprintf(buf + count, "frame[%4d,%4d,%4u,%4u] ",
733 data.config.info.screen_win.x,
734 data.config.info.screen_win.y,
735 data.config.info.screen_win.width,
736 data.config.info.screen_win.height);
737 count += sprintf(buf + count, "addr[%8llx,%8llx,%8llx] ",
738 data.config.info.fb.addr[0],
739 data.config.info.fb.addr[1],
740 data.config.info.fb.addr[2]);
741 count += sprintf(buf + count, "flags[0x%8x] trd[%1d,%1d]\n",
742 data.config.info.fb.flags, data.config.info.b_trd_out,
743 data.config.info.out_trd_mode);
744 count += sprintf(buf + count, "depth[%2d] ", data.config.info.fb.depth);
745 #if defined(DE_VERSION_V33X)
746 count += sprintf(buf + count, "transf[%d]\n", data.config.info.transform);
747 #endif
748
749 return count;
750 }
751
disp_init_lyr(struct disp_bsp_init_para * para)752 static s32 disp_init_lyr(struct disp_bsp_init_para *para)
753 {
754 u32 num_screens = 0, num_channels = 0, num_layers = 0;
755 u32 max_num_layers = 0;
756 u32 disp, chn, layer_id, layer_index = 0;
757
758 DE_INF("disp_init_lyr\n");
759
760 num_screens = bsp_disp_feat_get_num_screens();
761 for (disp = 0; disp < num_screens; disp++)
762 max_num_layers += bsp_disp_feat_get_num_layers(disp);
763
764 lyrs = kmalloc_array(max_num_layers, sizeof(struct disp_layer),
765 GFP_KERNEL | __GFP_ZERO);
766 if (lyrs == NULL) {
767 DE_WRN("malloc memory fail! size=0x%x\n",
768 (unsigned int)sizeof(struct disp_layer) *
769 max_num_layers);
770 return DIS_FAIL;
771 }
772
773 lyr_private = (struct disp_layer_private_data *)
774 kmalloc(sizeof(struct disp_layer_private_data) *
775 max_num_layers, GFP_KERNEL | __GFP_ZERO);
776 if (lyr_private == NULL) {
777 DE_WRN("malloc memory fail! size=0x%x\n",
778 (unsigned int)sizeof(struct disp_layer_private_data)
779 * max_num_layers);
780 return DIS_FAIL;
781 }
782
783 lyr_cfgs = (struct disp_layer_config_data *)
784 kmalloc(sizeof(struct disp_layer_config_data) *
785 max_num_layers, GFP_KERNEL | __GFP_ZERO);
786 if (lyr_cfgs == NULL) {
787 DE_WRN("malloc memory fail! size=0x%x\n",
788 (unsigned int)sizeof(struct disp_layer_private_data)
789 * max_num_layers);
790 return DIS_FAIL;
791 }
792
793 for (disp = 0; disp < num_screens; disp++) {
794 num_channels = bsp_disp_feat_get_num_channels(disp);
795 for (chn = 0; chn < num_channels; chn++) {
796 num_layers =
797 bsp_disp_feat_get_num_layers_by_chn(disp, chn);
798 for (layer_id = 0; layer_id < num_layers;
799 layer_id++, layer_index++) {
800 struct disp_layer *lyr = &lyrs[layer_index];
801 struct disp_layer_config_data *lyr_cfg =
802 &lyr_cfgs[layer_index];
803 struct disp_layer_private_data *lyrp =
804 &lyr_private[layer_index];
805
806 lyrp->shadow_protect = para->shadow_protect;
807 lyrp->cfg = lyr_cfg;
808
809 lyr_cfg->ops.vmap = disp_vmap;
810 lyr_cfg->ops.vunmap = disp_vunmap;
811 sprintf(lyr->name, "mgr%d chn%d lyr%d",
812 disp, chn, layer_id);
813 lyr->disp = disp;
814 lyr->chn = chn;
815 lyr->id = layer_id;
816 lyr->data = (void *)lyrp;
817
818 lyr->set_manager = disp_lyr_set_manager;
819 lyr->unset_manager = disp_lyr_unset_manager;
820 lyr->apply = disp_lyr_apply;
821 lyr->force_apply = disp_lyr_force_apply;
822 lyr->check = disp_lyr_check;
823 lyr->check2 = disp_lyr_check2;
824 lyr->save_and_dirty_check =
825 disp_lyr_save_and_dirty_check;
826 lyr->save_and_dirty_check2 =
827 disp_lyr_save_and_dirty_check2;
828 lyr->get_config = disp_lyr_get_config;
829 lyr->get_config2 = disp_lyr_get_config2;
830 lyr->dump = disp_lyr_dump;
831 lyr->is_dirty = disp_lyr_is_dirty;
832 lyr->dirty_clear = disp_lyr_dirty_clear;
833 }
834 }
835 }
836
837 return 0;
838 }
839
disp_exit_lyr(void)840 static s32 disp_exit_lyr(void)
841 {
842 kfree(lyr_cfgs);
843 kfree(lyr_private);
844 kfree(lyrs);
845
846 return 0;
847 }
848
disp_get_layer_manager(u32 disp)849 struct disp_manager *disp_get_layer_manager(u32 disp)
850 {
851 u32 num_screens;
852
853 num_screens = bsp_disp_feat_get_num_screens();
854 if (disp >= num_screens) {
855 DE_WRN("disp %d out of range\n", disp);
856 return NULL;
857 }
858
859 return &mgrs[disp];
860 }
861 EXPORT_SYMBOL(disp_get_layer_manager);
862
disp_get_num_screens(void)863 int disp_get_num_screens(void)
864 {
865 int num_screens;
866
867 num_screens = bsp_disp_feat_get_num_screens();
868
869 return num_screens;
870 }
871 EXPORT_SYMBOL(disp_get_num_screens);
disp_mgr_get_priv(struct disp_manager * mgr)872 static struct disp_manager_private_data *disp_mgr_get_priv(struct disp_manager
873 *mgr)
874 {
875 if (mgr == NULL) {
876 DE_WRN("NULL hdl!\n");
877 return NULL;
878 }
879
880 return &mgr_private[mgr->disp];
881 }
882
disp_mgr_get_layer_cfg_head(struct disp_manager * mgr)883 static struct disp_layer_config_data *disp_mgr_get_layer_cfg_head(struct
884 disp_manager
885 *mgr)
886 {
887 int layer_index = 0, disp;
888 int num_screens = bsp_disp_feat_get_num_screens();
889
890 for (disp = 0; disp < num_screens && disp < mgr->disp; disp++)
891 layer_index += bsp_disp_feat_get_num_layers(disp);
892
893 return &lyr_cfgs[layer_index];
894
895 }
896 static struct disp_layer_config_data *
disp_mgr_get_layer_cfg(struct disp_manager * mgr,struct disp_layer_config2 * config)897 disp_mgr_get_layer_cfg(struct disp_manager *mgr,
898 struct disp_layer_config2 *config)
899 {
900 int layer_index = 0, disp;
901 int num_screens = bsp_disp_feat_get_num_screens();
902
903 for (disp = 0; disp < num_screens && disp < mgr->disp; disp++)
904 layer_index += bsp_disp_feat_get_num_layers(disp);
905
906 layer_index += config->channel * bsp_disp_feat_get_num_layers_by_chn(
907 mgr->disp, config->channel) +
908 config->layer_id;
909
910 return &lyr_cfgs[layer_index];
911 }
912
disp_mgr_protect_reg_for_rcq(struct disp_manager * mgr,bool protect)913 static s32 disp_mgr_protect_reg_for_rcq(
914 struct disp_manager *mgr, bool protect)
915 {
916 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
917 long ret = 0;
918
919 if ((mgr == NULL) || (mgrp == NULL)) {
920 __wrn("NULL hdl!\n");
921 return -1;
922 }
923
924 if (!rcq_init_finished) {
925 return -1;
926 }
927
928 if (protect) {
929 if (atomic_read(&mgrp->wati_rcq_finish_flag) == 1) {
930 ret = wait_event_timeout(
931 mgrp->wait_rcq_finish_queue,
932 atomic_read(&mgrp->wati_rcq_finish_flag) == 2,
933 msecs_to_jiffies(1000/mgrp->cfg->config.device_fps + 1));
934 if (ret <= 0) {
935 atomic_set(&mgrp->wati_rcq_finish_flag, 2);
936 disp_al_manager_set_rcq_update(mgr->disp, 0);
937 wake_up(&mgrp->wait_rcq_finish_queue);
938 gdisp.screen[mgr->disp].health_info.skip_cnt++;
939 } else
940 disp_al_manager_set_all_rcq_head_dirty(mgr->disp, 0);
941 }
942 } else {
943 DISP_TRACE_BEGIN("set_rcq_update");
944 disp_al_manager_set_rcq_update(mgr->disp, 1);
945 /*don't wait rcq finish if output type is rtwb*/
946 if (mgr->device && mgr->device->type == DISP_OUTPUT_TYPE_RTWB)
947 atomic_set(&mgrp->wati_rcq_finish_flag, 0);
948 else
949 atomic_set(&mgrp->wati_rcq_finish_flag, 1);
950 DISP_TRACE_END("set_rcq_update");
951 }
952
953 return 0;
954 }
955
disp_mgr_shadow_protect(struct disp_manager * mgr,bool protect)956 static s32 disp_mgr_shadow_protect(struct disp_manager *mgr, bool protect)
957 {
958 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
959
960 if ((mgr == NULL) || (mgrp == NULL)) {
961 DE_WRN("NULL hdl!\n");
962 return -1;
963 }
964
965 if (mgrp->shadow_protect)
966 return mgrp->shadow_protect(mgr->disp, protect);
967
968 return -1;
969 }
970
disp_mgr_rcq_finish_irq_handler(struct disp_manager * mgr)971 static s32 disp_mgr_rcq_finish_irq_handler(
972 struct disp_manager *mgr)
973 {
974 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
975 u32 irq_state = 0;
976 unsigned long flags;
977
978 if ((mgr == NULL) || (mgrp == NULL)) {
979 __wrn("NULL hdl!\n");
980 return -1;
981 }
982
983 irq_state = disp_al_manager_query_irq_state(mgr->disp,
984 DISP_AL_IRQ_STATE_RCQ_ACCEPT
985 | DISP_AL_IRQ_STATE_RCQ_FINISH);
986
987 if (irq_state & DISP_AL_IRQ_STATE_RCQ_FINISH) {
988 if (mgr->device && mgr->device->get_status)
989 if (mgr->device->get_status(mgr->device) != 0)
990 gdisp.screen[mgr->disp].health_info.error_cnt++;
991 disp_al_manager_set_all_rcq_head_dirty(mgr->disp, 0);
992 spin_lock_irqsave(&mgr_data_lock, flags);
993 /*rcq update ok, we know exactly what address is using, so just unmap dmabuf*/
994 mgrp->unmap_dmabuf = true;
995 spin_unlock_irqrestore(&mgr_data_lock, flags);
996 atomic_set(&mgrp->wati_rcq_finish_flag, 2);
997 wake_up(&mgrp->wait_rcq_finish_queue);
998 }
999 DISP_TRACE_INT_F("rcq_irq_state", irq_state);
1000
1001 return 0;
1002 }
1003
disp_mgr_irq_handler(u32 disp,u32 irq_flag,void * ptr)1004 s32 disp_mgr_irq_handler(u32 disp, u32 irq_flag, void *ptr)
1005 {
1006 if (irq_flag & DISP_AL_IRQ_FLAG_RCQ_FINISH)
1007 disp_mgr_rcq_finish_irq_handler((struct disp_manager *)ptr);
1008
1009 return 0;
1010 }
1011
disp_mgr_clk_init(struct disp_manager * mgr)1012 static s32 disp_mgr_clk_init(struct disp_manager *mgr)
1013 {
1014 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
1015
1016 if ((mgr == NULL) || (mgrp == NULL)) {
1017 DE_WRN("NULL hdl!\n");
1018 return -1;
1019 }
1020
1021 mgrp->clk_parent = clk_get_parent(mgrp->clk);
1022 mgrp->cfg->config.de_freq = clk_get_rate(mgrp->clk);
1023
1024 #if defined(DE_VERSION_V2X)
1025 disp_al_update_de_clk_rate(mgrp->cfg->config.de_freq);
1026 #endif
1027
1028 return 0;
1029 }
1030
disp_mgr_clk_exit(struct disp_manager * mgr)1031 static s32 disp_mgr_clk_exit(struct disp_manager *mgr)
1032 {
1033 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
1034
1035 if ((mgr == NULL) || (mgrp == NULL)) {
1036 DE_WRN("NULL hdl!\n");
1037 return -1;
1038 }
1039
1040 return 0;
1041 }
1042
disp_mgr_clk_enable(struct disp_manager * mgr)1043 static s32 disp_mgr_clk_enable(struct disp_manager *mgr)
1044 {
1045 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
1046 int ret = 0;
1047 unsigned long de_freq = 0;
1048
1049 if ((mgr == NULL) || (mgrp == NULL)) {
1050 DE_WRN("NULL hdl!\n");
1051 return -1;
1052 }
1053
1054 if (mgr->get_clk_rate && mgrp->clk) {
1055 DE_INF("set DE rate to %u\n", mgr->get_clk_rate(mgr));
1056 de_freq = mgr->get_clk_rate(mgr);
1057 clk_set_rate(mgrp->clk, de_freq);
1058 if (de_freq != clk_get_rate(mgrp->clk)) {
1059 if (mgrp->clk_parent)
1060 clk_set_rate(mgrp->clk_parent, de_freq);
1061 clk_set_rate(mgrp->clk, de_freq);
1062 if (de_freq != clk_get_rate(mgrp->clk)) {
1063 DE_WRN("Set DE clk fail\n");
1064 return -1;
1065 }
1066 }
1067 }
1068
1069 clk_set_parent(mgrp->clk, mgrp->clk_parent);
1070
1071 DE_INF("mgr %d clk enable\n", mgr->disp);
1072
1073 if (mgrp->rst_extra) {
1074 ret = reset_control_deassert(mgrp->rst_extra);
1075 if (ret) {
1076 DE_WRN("%s(%d): reset_control_deassert for rst_display top failed, ret=%d\n", __func__, __LINE__, ret);
1077 return ret;
1078 }
1079 }
1080
1081 if (mgrp->clk_extra) {
1082 ret = clk_prepare_enable(mgrp->clk_extra);
1083 if (ret != 0)
1084 DE_WRN("fail enable mgr's clk_extra!\n");
1085 }
1086
1087 ret = reset_control_deassert(mgrp->rst);
1088 if (ret) {
1089 DE_WRN("%s(%d): reset_control_deassert for rst failed\n", __func__, __LINE__);
1090 return ret;
1091 }
1092
1093 ret = clk_prepare_enable(mgrp->clk);
1094 if (ret) {
1095 DE_WRN("%s(%d): clk_prepare_enable for clk failed\n", __func__, __LINE__);
1096 return ret;
1097 }
1098
1099 ret = clk_prepare_enable(mgrp->clk_bus);
1100 if (ret) {
1101 DE_WRN("%s(%d): clk_prepare_enable for clk_bus failed\n", __func__, __LINE__);
1102 return ret;
1103 }
1104
1105 if (mgrp->rst_dpss) {
1106 ret = reset_control_deassert(mgrp->rst_dpss);
1107 if (ret) {
1108 DE_WRN("%s(%d): reset_control_deassert for rst_dpss failed, ret=%d\n", __func__, __LINE__, ret);
1109 return ret;
1110 }
1111 }
1112 if (mgrp->clk_dpss) {
1113 ret = clk_prepare_enable(mgrp->clk_dpss);
1114 if (ret) {
1115 DE_WRN("%s(%d): clk_prepare_enable for clk_dpss failed, ret=%d\n", __func__, __LINE__, ret);
1116 return ret;
1117 }
1118 }
1119
1120 return ret;
1121 }
1122
disp_mgr_clk_disable(struct disp_manager * mgr)1123 static s32 disp_mgr_clk_disable(struct disp_manager *mgr)
1124 {
1125 int ret;
1126 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
1127
1128 if ((mgr == NULL) || (mgrp == NULL)) {
1129 DE_WRN("NULL hdl!\n");
1130 return -1;
1131 }
1132
1133 if (mgrp->clk_extra)
1134 clk_disable_unprepare(mgrp->clk_extra);
1135 if (mgrp->rst_extra)
1136 reset_control_assert(mgrp->rst_extra);
1137
1138 clk_disable_unprepare(mgrp->clk);
1139 clk_disable_unprepare(mgrp->clk_bus);
1140
1141 ret = reset_control_assert(mgrp->rst);
1142 if (ret) {
1143 DE_WRN("%s(%d): reset_control_assert for rst failed\n", __func__, __LINE__);
1144 return ret;
1145 }
1146
1147 if (mgrp->clk_dpss)
1148 clk_disable(mgrp->clk_dpss);
1149 if (mgrp->rst_dpss)
1150 reset_control_assert(mgrp->rst_dpss);
1151
1152 return 0;
1153 }
1154
1155 /* Return: unit(hz) */
disp_mgr_get_clk_rate(struct disp_manager * mgr)1156 static s32 disp_mgr_get_clk_rate(struct disp_manager *mgr)
1157 {
1158 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
1159
1160 if ((mgr == NULL) || (mgrp == NULL)) {
1161 DE_WRN("NULL hdl!\n");
1162 return 0;
1163 }
1164
1165 #if defined(CONFIG_ARCH_SUN8IW16)
1166 if (mgr->device && mgr->device->type != DISP_OUTPUT_TYPE_HDMI)
1167 mgrp->cfg->config.de_freq = 216000000;
1168 else
1169 mgrp->cfg->config.de_freq = 432000000;
1170 #endif
1171
1172 return mgrp->cfg->config.de_freq;
1173 }
1174
disp_mgr_init(struct disp_manager * mgr)1175 static s32 disp_mgr_init(struct disp_manager *mgr)
1176 {
1177 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
1178
1179 if ((mgr == NULL) || (mgrp == NULL)) {
1180 DE_WRN("NULL hdl!\n");
1181 return -1;
1182 }
1183
1184 disp_mgr_clk_init(mgr);
1185 return 0;
1186 }
1187
disp_mgr_exit(struct disp_manager * mgr)1188 static s32 disp_mgr_exit(struct disp_manager *mgr)
1189 {
1190 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
1191
1192 if ((mgr == NULL) || (mgrp == NULL)) {
1193 DE_WRN("NULL hdl!\n");
1194 return -1;
1195 }
1196
1197 /* FIXME, disable manager */
1198 disp_mgr_clk_exit(mgr);
1199
1200 return 0;
1201 }
1202
disp_mgr_set_palette(struct disp_manager * mgr,struct disp_palette_config * config)1203 static s32 disp_mgr_set_palette(struct disp_manager *mgr, struct disp_palette_config *config)
1204 {
1205 unsigned long flags;
1206 unsigned int num_chns = 0;
1207 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
1208 if ((mgr == NULL) || (mgrp == NULL)) {
1209 DE_WRN("NULL hdl!\n");
1210 return -1;
1211 }
1212 num_chns = bsp_disp_feat_get_num_channels(mgr->disp);
1213 if ((config == NULL) || (num_chns == 0) || (config->channel > num_chns)) {
1214 DE_WRN("NULL hdl!\n");
1215 return -1;
1216 }
1217 spin_lock_irqsave(&mgr_data_lock, flags);
1218 memcpy(&mgrp->cfg->config.palette, config,
1219 sizeof(struct disp_palette_config));
1220 mgrp->cfg->flag |= MANAGER_PALETTE_DIRTY;
1221 spin_unlock_irqrestore(&mgr_data_lock, flags);
1222
1223 mgr->apply(mgr);
1224
1225 return DIS_SUCCESS;
1226 }
1227
1228 static s32
disp_mgr_set_back_color(struct disp_manager * mgr,struct disp_color * back_color)1229 disp_mgr_set_back_color(struct disp_manager *mgr, struct disp_color *back_color)
1230 {
1231 unsigned long flags;
1232 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
1233
1234 if ((mgr == NULL) || (mgrp == NULL)) {
1235 DE_WRN("NULL hdl!\n");
1236 return -1;
1237 }
1238
1239 spin_lock_irqsave(&mgr_data_lock, flags);
1240 memcpy(&mgrp->cfg->config.back_color, back_color,
1241 sizeof(struct disp_color));
1242 mgrp->cfg->flag |= MANAGER_BACK_COLOR_DIRTY;
1243 spin_unlock_irqrestore(&mgr_data_lock, flags);
1244
1245 mgr->apply(mgr);
1246
1247 return DIS_SUCCESS;
1248 }
1249
1250 static s32
disp_mgr_get_back_color(struct disp_manager * mgr,struct disp_color * back_color)1251 disp_mgr_get_back_color(struct disp_manager *mgr, struct disp_color *back_color)
1252 {
1253 unsigned long flags;
1254 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
1255
1256 if ((mgr == NULL) || (mgrp == NULL)) {
1257 DE_WRN("NULL hdl!\n");
1258 return -1;
1259 }
1260
1261 spin_lock_irqsave(&mgr_data_lock, flags);
1262 memcpy(back_color, &mgrp->cfg->config.back_color,
1263 sizeof(struct disp_color));
1264 spin_unlock_irqrestore(&mgr_data_lock, flags);
1265
1266 return DIS_SUCCESS;
1267 }
1268
1269 static s32
disp_mgr_set_color_key(struct disp_manager * mgr,struct disp_colorkey * ck)1270 disp_mgr_set_color_key(struct disp_manager *mgr, struct disp_colorkey *ck)
1271 {
1272 unsigned long flags;
1273 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
1274
1275 if ((mgr == NULL) || (mgrp == NULL)) {
1276 DE_WRN("NULL hdl!\n");
1277 return -1;
1278 }
1279
1280 spin_lock_irqsave(&mgr_data_lock, flags);
1281 memcpy(&mgrp->cfg->config.ck, ck, sizeof(struct disp_colorkey));
1282 mgrp->cfg->flag |= MANAGER_CK_DIRTY;
1283 spin_unlock_irqrestore(&mgr_data_lock, flags);
1284
1285 mgr->apply(mgr);
1286
1287 return DIS_SUCCESS;
1288 }
1289
1290 static s32
disp_mgr_get_color_key(struct disp_manager * mgr,struct disp_colorkey * ck)1291 disp_mgr_get_color_key(struct disp_manager *mgr, struct disp_colorkey *ck)
1292 {
1293 unsigned long flags;
1294 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
1295
1296 if ((mgr == NULL) || (mgrp == NULL)) {
1297 DE_WRN("NULL hdl!\n");
1298 return -1;
1299 }
1300
1301 spin_lock_irqsave(&mgr_data_lock, flags);
1302 memcpy(ck, &mgrp->cfg->config.ck, sizeof(struct disp_colorkey));
1303 spin_unlock_irqrestore(&mgr_data_lock, flags);
1304
1305 return DIS_SUCCESS;
1306 }
1307
1308 static s32
disp_mgr_set_output_color_range(struct disp_manager * mgr,u32 color_range)1309 disp_mgr_set_output_color_range(struct disp_manager *mgr, u32 color_range)
1310 {
1311 unsigned long flags;
1312 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
1313
1314 if ((mgr == NULL) || (mgrp == NULL)) {
1315 DE_WRN("NULL hdl!\n");
1316 return -1;
1317 }
1318
1319 spin_lock_irqsave(&mgr_data_lock, flags);
1320 mgrp->cfg->config.color_range = color_range;
1321 mgrp->cfg->flag |= MANAGER_COLOR_RANGE_DIRTY;
1322 mgrp->color_range_modified = true;
1323 spin_unlock_irqrestore(&mgr_data_lock, flags);
1324
1325 mgr->apply(mgr);
1326
1327 return DIS_SUCCESS;
1328 }
1329
disp_mgr_get_output_color_range(struct disp_manager * mgr)1330 static s32 disp_mgr_get_output_color_range(struct disp_manager *mgr)
1331 {
1332 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
1333
1334 if ((mgr == NULL) || (mgrp == NULL)) {
1335 DE_WRN("NULL hdl!\n");
1336 return 0;
1337 }
1338
1339 return mgrp->cfg->config.color_range;
1340 }
1341
disp_mgr_update_color_space(struct disp_manager * mgr)1342 static s32 disp_mgr_update_color_space(struct disp_manager *mgr)
1343 {
1344 unsigned long flags;
1345 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
1346 unsigned int cs = 0, color_range = 0;
1347
1348 if ((mgr == NULL) || (mgrp == NULL)) {
1349 DE_WRN("NULL hdl!\n");
1350 return -1;
1351 }
1352
1353 if (mgr->device) {
1354 if (mgr->device->get_input_csc)
1355 cs = mgr->device->get_input_csc(mgr->device);
1356 if (mgr->device && mgr->device->get_input_color_range)
1357 color_range =
1358 mgr->device->get_input_color_range(mgr->device);
1359 }
1360
1361 spin_lock_irqsave(&mgr_data_lock, flags);
1362 mgrp->cfg->config.cs = cs;
1363 if (!mgrp->color_range_modified)
1364 mgrp->cfg->config.color_range = color_range;
1365 mgrp->cfg->flag |= MANAGER_COLOR_SPACE_DIRTY;
1366 spin_unlock_irqrestore(&mgr_data_lock, flags);
1367
1368 mgr->apply(mgr);
1369
1370 return DIS_SUCCESS;
1371 }
1372
disp_mgr_smooth_switch(struct disp_manager * mgr)1373 static s32 disp_mgr_smooth_switch(struct disp_manager *mgr)
1374 {
1375 unsigned long flags;
1376 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
1377
1378 struct disp_device_config dev_config;
1379
1380 if ((NULL == mgr) || (mgrp == NULL)) {
1381 DE_WRN("NULL hdl!\n");
1382 return -1;
1383 }
1384 if (mgr->device) {
1385
1386 if (mgr->device->get_static_config)
1387 mgr->device->get_static_config(mgr->device,
1388 &dev_config);
1389 }
1390
1391 spin_lock_irqsave(&mgr_data_lock, flags);
1392 mgrp->cfg->config.color_space = dev_config.cs;
1393 mgrp->cfg->config.eotf = dev_config.eotf;
1394 mgrp->cfg->flag |= MANAGER_COLOR_SPACE_DIRTY;
1395 spin_unlock_irqrestore(&mgr_data_lock, flags);
1396
1397 mgr->apply(mgr);
1398
1399 return DIS_SUCCESS;
1400 }
1401
1402 static int _force_layer_en;
1403 static int _force_layer2;
1404 static struct disp_layer_config backup_layer[2][16];
1405 static struct disp_layer_config2 backup_layer2[2][16];
1406 struct dmabuf_item back_dma_item[2][4];
1407
1408 static int backup_layer_num;
1409
layer_mask_init(unsigned int * mask,unsigned int total)1410 static void layer_mask_init(unsigned int *mask, unsigned int total)
1411 {
1412 *mask = (0x00000001 << total) - 1;
1413 }
layer_mask_clear(unsigned int * mask,unsigned int channel,unsigned int id)1414 static void layer_mask_clear(unsigned int *mask, unsigned int channel, unsigned int id)
1415 {
1416 unsigned int bit = (0x00000001 << id) << (channel * 4);
1417
1418 (*mask) = (*mask) & (~bit);
1419 }
layer_mask_test(unsigned int * mask,unsigned int channel,unsigned int id)1420 static int layer_mask_test(unsigned int *mask, unsigned int channel, unsigned int id)
1421 {
1422 unsigned int bit = (0x00000001 << id) << (channel * 4);
1423
1424 return (*mask) & bit;
1425 }
1426
disp_mgr_force_set_layer_config(struct disp_manager * mgr,struct disp_layer_config * config,unsigned int layer_num)1427 static s32 disp_mgr_force_set_layer_config(struct disp_manager *mgr, struct disp_layer_config *config, unsigned int layer_num)
1428 {
1429 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
1430 unsigned int num_layers = 0, layer_index = 0;
1431 struct disp_layer *lyr = NULL;
1432 unsigned int mask = 0;
1433 struct disp_layer_config dummy;
1434 struct disp_layer *pre_lyr = NULL;
1435 int channel, id;
1436 int layers_cnt = 0;
1437 int channels_cnt = 0;
1438
1439 if ((NULL == mgr) || (mgrp == NULL)) {
1440 DE_WRN("NULL hdl!\n");
1441 return -1;
1442 }
1443
1444 mutex_lock(&mgr_mlock);
1445 _force_layer_en = 1;
1446
1447 num_layers = bsp_disp_feat_get_num_layers(mgr->disp);
1448 if ((config == NULL) || (layer_num == 0) || (layer_num > num_layers)) {
1449 DE_WRN("NULL hdl!\n");
1450 mutex_unlock(&mgr_mlock);
1451 return -1;
1452 }
1453
1454 layer_mask_init(&mask, num_layers);
1455 for (layer_index = 0; layer_index < layer_num; layer_index++) {
1456
1457 lyr = disp_get_layer(mgr->disp, config->channel, config->layer_id);
1458 if (lyr == NULL)
1459 continue;
1460 if (!lyr->check(lyr, config)) {
1461 lyr->save_and_dirty_check(lyr, config);
1462 }
1463 layer_mask_clear(&mask, config->channel, config->layer_id);
1464 config++;
1465 }
1466
1467 channels_cnt = bsp_disp_feat_get_num_channels(mgr->disp);
1468 memset(&dummy, 0, sizeof(dummy));
1469 for (channel = 0; channel < channels_cnt; channel++) {
1470 layers_cnt = bsp_disp_feat_get_num_layers_by_chn(mgr->disp, channel);
1471 for (id = 0; id < layers_cnt; id++) {
1472 if (layer_mask_test(&mask, channel, id) == 0)
1473 continue;
1474
1475 layer_mask_clear(&mask, channel, id);
1476 pre_lyr = disp_get_layer(mgr->disp, channel, id);
1477 if (pre_lyr == NULL)
1478 continue;
1479
1480 dummy.channel = channel;
1481 dummy.layer_id = id;
1482 if (!pre_lyr->check(pre_lyr, &dummy))
1483 pre_lyr->save_and_dirty_check(pre_lyr, &dummy);
1484
1485 }
1486 }
1487
1488 if (mgr->apply)
1489 mgr->apply(mgr);
1490
1491 list_for_each_entry(lyr, &mgr->lyr_list, list) {
1492 lyr->dirty_clear(lyr);
1493 }
1494
1495 mutex_unlock(&mgr_mlock);
1496 return DIS_SUCCESS;
1497 }
1498
1499 s32 disp_mgr_set_layer_config2_restore(struct disp_manager *mgr,
1500 struct disp_layer_config2 *config,
1501 unsigned int layer_num);
1502
disp_mgr_force_set_layer_config_exit(struct disp_manager * mgr)1503 static s32 disp_mgr_force_set_layer_config_exit(struct disp_manager *mgr)
1504 {
1505 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
1506 unsigned int num_layers = 16, layer_index = 0;
1507 unsigned int layer_num;
1508 struct disp_layer *lyr = NULL;
1509 struct disp_layer_config *backup;
1510 struct disp_layer_config2 *backup2;
1511 struct disp_layer_config dummy;
1512 int channel = 0;
1513 int id = 0;
1514 int layers_cnt = 0;
1515 int channels_cnt;
1516
1517 if ((NULL == mgr) || (mgrp == NULL)) {
1518 DE_WRN("NULL hdl!\n");
1519 return -1;
1520 }
1521
1522 num_layers = bsp_disp_feat_get_num_layers(mgr->disp);
1523
1524 mutex_lock(&mgr_mlock);
1525 layer_num = backup_layer_num;
1526 backup = &(backup_layer[mgr->disp][0]);
1527 backup2 = &(backup_layer2[mgr->disp][0]);
1528
1529 if (layer_num <= 0 || layer_num > num_layers) {
1530 DE_WRN("invalid input params!\n");
1531 mutex_unlock(&mgr_mlock);
1532 return -1;
1533 }
1534 if (_force_layer2) {
1535 disp_mgr_set_layer_config2_restore(mgr, backup2, layer_num);
1536 _force_layer_en = 0;
1537 mutex_unlock(&mgr_mlock);
1538 return DIS_SUCCESS;
1539 }
1540
1541 /* disable all layer first */
1542 channels_cnt = bsp_disp_feat_get_num_channels(mgr->disp);
1543
1544 memset(&dummy, 0, sizeof(dummy));
1545 for (channel = 0; channel < channels_cnt; channel++) {
1546 layers_cnt = bsp_disp_feat_get_num_layers_by_chn(mgr->disp, channel);
1547 for (id = 0; id < layers_cnt; id++) {
1548
1549 lyr = disp_get_layer(mgr->disp, channel, id);
1550 if (lyr == NULL)
1551 continue;
1552
1553 dummy.channel = channel;
1554 dummy.layer_id = id;
1555 if (!lyr->check(lyr, &dummy)) {
1556 lyr->save_and_dirty_check(lyr, &dummy);
1557 }
1558 }
1559 }
1560
1561 for (layer_index = 0; layer_index < layer_num; layer_index++) {
1562
1563 lyr = disp_get_layer(mgr->disp, backup->channel, backup->layer_id);
1564 if (lyr == NULL)
1565 continue;
1566 if (!lyr->check(lyr, backup))
1567 lyr->save_and_dirty_check(lyr, backup);
1568
1569 backup++;
1570 }
1571
1572 if (mgr->apply)
1573 mgr->apply(mgr);
1574
1575 list_for_each_entry(lyr, &mgr->lyr_list, list) {
1576 lyr->dirty_clear(lyr);
1577 }
1578 _force_layer_en = 0;
1579 mutex_unlock(&mgr_mlock);
1580
1581 return DIS_SUCCESS;
1582 }
1583
1584 static s32
disp_mgr_set_layer_config(struct disp_manager * mgr,struct disp_layer_config * config,unsigned int layer_num)1585 disp_mgr_set_layer_config(struct disp_manager *mgr,
1586 struct disp_layer_config *config,
1587 unsigned int layer_num)
1588 {
1589 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
1590 unsigned int num_layers = 0, layer_index = 0;
1591 struct disp_layer *lyr = NULL;
1592
1593 if ((mgr == NULL) || (mgrp == NULL)) {
1594 DE_WRN("NULL hdl!\n");
1595 return -1;
1596 }
1597 DE_INF("mgr%d, config %d layers\n", mgr->disp, layer_num);
1598
1599 num_layers = bsp_disp_feat_get_num_layers(mgr->disp);
1600 if ((config == NULL) || (layer_num == 0) || (layer_num > num_layers)) {
1601 DE_WRN("NULL hdl!\n");
1602 return -1;
1603 }
1604 DE_INF
1605 ("layer:ch%d, layer%d, format=%d, size=<%d,%d>, crop=<%lld,%lld,%lld,%lld>,frame=<%d,%d>, en=%d addr[0x%llx,0x%llx,0x%llx> alpha=<%d,%d>\n",
1606 config->channel, config->layer_id, config->info.fb.format,
1607 config->info.fb.size[0].width, config->info.fb.size[0].height,
1608 config->info.fb.crop.x >> 32, config->info.fb.crop.y >> 32,
1609 config->info.fb.crop.width >> 32,
1610 config->info.fb.crop.height >> 32, config->info.screen_win.width,
1611 config->info.screen_win.height, config->enable,
1612 config->info.fb.addr[0], config->info.fb.addr[1],
1613 config->info.fb.addr[2], config->info.alpha_mode,
1614 config->info.alpha_value);
1615
1616 mutex_lock(&mgr_mlock);
1617 {
1618 struct disp_layer_config *src = config;
1619 struct disp_layer_config *backup = &(backup_layer[mgr->disp][0]);
1620 backup_layer_num = layer_num;
1621 memset(backup, 0, sizeof(struct disp_layer_config)*16);
1622 for (layer_index = 0; layer_index < layer_num; layer_index++) {
1623 memcpy(backup, src, sizeof(struct disp_layer_config));
1624 backup++;
1625 src++;
1626 }
1627 if (_force_layer_en) {
1628 _force_layer2 = 0;
1629 mutex_unlock(&mgr_mlock);
1630 return DIS_SUCCESS;
1631 }
1632 }
1633
1634 for (layer_index = 0; layer_index < layer_num; layer_index++) {
1635 struct disp_layer *lyr = NULL;
1636
1637 lyr = disp_get_layer(mgr->disp, config->channel,
1638 config->layer_id);
1639 if (lyr == NULL)
1640 continue;
1641 if (!lyr->check(lyr, config))
1642 lyr->save_and_dirty_check(lyr, config);
1643 config++;
1644 }
1645
1646 if (mgr->apply)
1647 mgr->apply(mgr);
1648
1649 list_for_each_entry(lyr, &mgr->lyr_list, list) {
1650 lyr->dirty_clear(lyr);
1651 }
1652 mutex_unlock(&mgr_mlock);
1653
1654 return DIS_SUCCESS;
1655 }
1656
disp_mgr_dmabuf_list_add(struct dmabuf_item * item,struct disp_manager_private_data * mgrp,unsigned long long ref)1657 static void disp_mgr_dmabuf_list_add(struct dmabuf_item *item,
1658 struct disp_manager_private_data *mgrp,
1659 unsigned long long ref)
1660 {
1661 item->id = ref;
1662 list_add_tail(&item->list, &mgrp->dmabuf_list);
1663 mgrp->dmabuf_cnt++;
1664 }
1665
disp_mgr_set_layer_config2_restore(struct disp_manager * mgr,struct disp_layer_config2 * config,unsigned int layer_num)1666 s32 disp_mgr_set_layer_config2_restore(struct disp_manager *mgr,
1667 struct disp_layer_config2 *config,
1668 unsigned int layer_num)
1669 {
1670 unsigned int num_layers = 0, i = 0;
1671 struct disp_layer *lyr = NULL;
1672 struct disp_layer_config_data *lyr_cfg;
1673 struct dmabuf_item *item;
1674 struct disp_layer_config2 *config1 = config;
1675 struct fb_address_transfer fb;
1676 struct disp_device_dynamic_config dconf;
1677
1678
1679 num_layers = bsp_disp_feat_get_num_layers(mgr->disp);
1680
1681 memset(&dconf, 0, sizeof(struct disp_device_dynamic_config));
1682
1683 for (i = 0; i < layer_num; i++) {
1684 struct disp_layer *lyr = NULL;
1685
1686 lyr = disp_get_layer(mgr->disp, config1->channel,
1687 config1->layer_id);
1688
1689 if (lyr) {
1690 lyr->save_and_dirty_check2(lyr, config1);
1691 if (lyr->is_dirty(lyr) &&
1692 (config1->info.fb.metadata_flag & 0x3)) {
1693 dconf.metadata_fd =
1694 config1->info.fb.metadata_fd;
1695 dconf.metadata_size =
1696 config1->info.fb.metadata_size;
1697 dconf.metadata_flag =
1698 config1->info.fb.metadata_flag;
1699 }
1700 }
1701
1702 config1++;
1703 }
1704
1705 lyr_cfg = disp_mgr_get_layer_cfg_head(mgr);
1706 for (i = 0; i < layer_num; i++, lyr_cfg++) {
1707
1708 if (lyr_cfg->config.enable == 0)
1709 continue;
1710
1711 if (lyr_cfg->config.info.mode == LAYER_MODE_COLOR)
1712 continue;
1713
1714 item = &back_dma_item[mgr->disp][0];
1715
1716 fb.format = lyr_cfg->config.info.fb.format;
1717 memcpy(fb.size, lyr_cfg->config.info.fb.size,
1718 sizeof(struct disp_rectsz) * 3);
1719 memcpy(fb.align, lyr_cfg->config.info.fb.align,
1720 sizeof(int) * 3);
1721 fb.depth = lyr_cfg->config.info.fb.depth;
1722 fb.dma_addr = item->dma_addr;
1723 disp_set_fb_info(&fb, true);
1724 memcpy(lyr_cfg->config.info.fb.addr,
1725 fb.addr,
1726 sizeof(long long) * 3);
1727
1728 lyr_cfg->config.info.fb.trd_right_addr[0] =
1729 (unsigned int)fb.trd_right_addr[0];
1730 lyr_cfg->config.info.fb.trd_right_addr[1] =
1731 (unsigned int)fb.trd_right_addr[1];
1732 lyr_cfg->config.info.fb.trd_right_addr[2] =
1733 (unsigned int)fb.trd_right_addr[2];
1734
1735 /* get dma_buf for right image buffer */
1736 if (lyr_cfg->config.info.fb.flags == DISP_BF_STEREO_FP) {
1737 item = &back_dma_item[mgr->disp][1];
1738 fb.dma_addr = item->dma_addr;
1739 disp_set_fb_info(&fb, false);
1740 lyr_cfg->config.info.fb.trd_right_addr[0] =
1741 (unsigned int)fb.trd_right_addr[0];
1742 lyr_cfg->config.info.fb.trd_right_addr[1] =
1743 (unsigned int)fb.trd_right_addr[1];
1744 lyr_cfg->config.info.fb.trd_right_addr[2] =
1745 (unsigned int)fb.trd_right_addr[2];
1746 }
1747
1748 /* process 2d plus depth stereo mode */
1749 if (lyr_cfg->config.info.fb.flags == DISP_BF_STEREO_2D_DEPTH) {
1750 lyr_cfg->config.info.fb.flags = DISP_BF_STEREO_FP;
1751 /* process depth, only support rgb format */
1752 if ((lyr_cfg->config.info.fb.depth != 0) &&
1753 (lyr_cfg->config.info.fb.format
1754 < DISP_FORMAT_YUV444_I_AYUV)) {
1755 int depth = lyr_cfg->config.info.fb.depth;
1756 unsigned long long abs_depth =
1757 (depth > 0) ? depth : (-depth);
1758
1759 memcpy(fb.addr,
1760 lyr_cfg->config.info.fb.addr,
1761 sizeof(long long) * 3);
1762 fb.trd_right_addr[0] =
1763 lyr_cfg->config.info.fb.trd_right_addr[0];
1764 fb.trd_right_addr[1] =
1765 lyr_cfg->config.info.fb.trd_right_addr[1];
1766 fb.trd_right_addr[2] =
1767 lyr_cfg->config.info.fb.trd_right_addr[2];
1768 if (disp_set_fb_base_on_depth(&fb) == 0) {
1769 memcpy(lyr_cfg->config.info.fb.addr,
1770 fb.addr,
1771 sizeof(long long) * 3);
1772 lyr_cfg->config.info.fb.trd_right_addr[0] =
1773 (unsigned int)fb.trd_right_addr[0];
1774 lyr_cfg->config.info.fb.trd_right_addr[1] =
1775 (unsigned int)fb.trd_right_addr[1];
1776 lyr_cfg->config.info.fb.trd_right_addr[2] =
1777 (unsigned int)fb.trd_right_addr[2];
1778
1779 lyr_cfg->config.info.fb.crop.width -=
1780 (abs_depth << 32);
1781 }
1782 }
1783
1784 }
1785
1786 /* get dma_buf for atw coef buffer */
1787 if (!lyr_cfg->config.info.atw.used)
1788 continue;
1789
1790 item = &back_dma_item[mgr->disp][2];
1791 lyr_cfg->config.info.atw.cof_addr = item->dma_addr;
1792
1793
1794 }
1795
1796 if (mgr->apply)
1797 mgr->apply(mgr);
1798
1799 list_for_each_entry(lyr, &mgr->lyr_list, list) {
1800 lyr->dirty_clear(lyr);
1801 }
1802 return 0;
1803 }
1804
1805
disp_mgr_is_address_using(u32 layer_num,struct dmabuf_item * item,struct disp_layer_address * lyr_addr)1806 static s32 disp_mgr_is_address_using(u32 layer_num, struct dmabuf_item *item,
1807 struct disp_layer_address *lyr_addr)
1808 {
1809 u32 i = 0;
1810
1811 if (!item || !lyr_addr || layer_num <= 0)
1812 return 0;
1813
1814 for (i = 0; i < layer_num; ++i) {
1815 if (lyr_addr[i].lyr_id.disp == item->lyr_id.disp &&
1816 lyr_addr[i].lyr_id.channel == item->lyr_id.channel &&
1817 lyr_addr[i].lyr_id.layer_id == item->lyr_id.layer_id) {
1818 if (lyr_addr[i].lyr_id.type == item->lyr_id.type) {
1819 if (item->lyr_id.type & 0x1)
1820 if (lyr_addr[i].dma_addr ==
1821 item->dma_addr)
1822 return 1;
1823 if (item->lyr_id.type & 0x2)
1824 if (lyr_addr[i].trd_addr ==
1825 item->dma_addr)
1826 return 1;
1827 if (item->lyr_id.type & 0x4)
1828 if (lyr_addr[i].atw_addr ==
1829 item->dma_addr)
1830 return 1;
1831 }
1832 }
1833 }
1834
1835 return 0;
1836 }
1837
disp_unmap_afbc_header(struct disp_fb_info_inner * fb)1838 static s32 disp_unmap_afbc_header(struct disp_fb_info_inner *fb)
1839 {
1840 if ((fb->p_afbc_header == NULL)
1841 || (fb->p_metadata == NULL)) {
1842 DE_WRN("null buf: %p, %p\n",
1843 fb->p_afbc_header, fb->p_metadata);
1844 return -1;
1845 }
1846 if (IS_ERR(fb->metadata_dmabuf)) {
1847 DE_WRN("bad metadata_dmabuf(%p)\n", fb->metadata_dmabuf);
1848 return -1;
1849 }
1850
1851 DE_INF("\n");
1852 dma_buf_vunmap(fb->metadata_dmabuf, fb->p_metadata);
1853 dma_buf_end_cpu_access(fb->metadata_dmabuf, DMA_FROM_DEVICE);
1854 dma_buf_put(fb->metadata_dmabuf);
1855
1856 fb->p_afbc_header = NULL;
1857 fb->p_metadata = NULL;
1858 fb->metadata_dmabuf = NULL;
1859
1860 return 0;
1861 }
1862
disp_unmap_dmabuf(struct disp_manager * mgr,struct disp_manager_private_data * mgrp,unsigned int num_layers)1863 static void disp_unmap_dmabuf(struct disp_manager *mgr, struct disp_manager_private_data *mgrp, unsigned int num_layers)
1864 {
1865 struct disp_layer_config_data *lyr_cfg;
1866 struct disp_layer_address *lyr_addr = NULL;
1867 struct dmabuf_item *item, *tmp;
1868 int i;
1869 lyr_addr = kmalloc_array(num_layers, sizeof(struct disp_manager),
1870 GFP_KERNEL | __GFP_ZERO);
1871
1872 lyr_cfg = disp_mgr_get_layer_cfg_head(mgr);
1873 for (i = 0; i < num_layers; ++i, ++lyr_cfg) {
1874
1875 if (lyr_cfg->config.enable == 0)
1876 continue;
1877 /*color mode and set_layer_config do no need to dma map*/
1878 if (lyr_cfg->config.info.mode == LAYER_MODE_COLOR ||
1879 lyr_cfg->config.info.fb.fd == -911)
1880 continue;
1881 lyr_addr[i].lyr_id.disp = mgr->disp;
1882 lyr_addr[i].lyr_id.channel = lyr_cfg->config.channel;
1883 lyr_addr[i].lyr_id.layer_id = lyr_cfg->config.layer_id;
1884 lyr_addr[i].dma_addr = lyr_cfg->config.info.fb.addr[0];
1885 lyr_addr[i].lyr_id.type |= 0x1;
1886 if (lyr_cfg->config.info.fb.flags == DISP_BF_STEREO_FP) {
1887 lyr_addr[i].trd_addr =
1888 lyr_cfg->config.info.fb.trd_right_addr[0];
1889 lyr_addr[i].lyr_id.type |= 0x2;
1890 }
1891 /* get dma_buf for atw coef buffer */
1892 if (!lyr_cfg->config.info.atw.used)
1893 continue;
1894 lyr_addr[i].atw_addr = lyr_cfg->config.info.atw.cof_addr;
1895 lyr_addr[i].lyr_id.type |= 0x4;
1896 }
1897
1898 list_for_each_entry_safe(item, tmp, &mgrp->dmabuf_list, list) {
1899 if (!disp_mgr_is_address_using(num_layers, item, lyr_addr)) {
1900 list_del(&item->list);
1901 disp_dma_unmap(item);
1902 mgrp->dmabuf_cnt--;
1903 }
1904 }
1905 kfree(lyr_addr);
1906 }
1907
disp_map_afbc_header(struct disp_fb_info_inner * fb)1908 static s32 disp_map_afbc_header(struct disp_fb_info_inner *fb)
1909 {
1910 int ret;
1911 if ((fb->metadata_fd < 0)
1912 || !(fb->metadata_flag & SUNXI_METADATA_FLAG_AFBC_HEADER)
1913 || (fb->metadata_size == 0)) {
1914 DE_WRN("invalid value\n");
1915 return -1;
1916 }
1917
1918 fb->metadata_dmabuf = dma_buf_get(fb->metadata_fd);
1919
1920 if (IS_ERR(fb->metadata_dmabuf)) {
1921 DE_WRN("dma_buf_get, fd(%d)\n", fb->metadata_fd);
1922 return -1;
1923 }
1924
1925 ret = dma_buf_begin_cpu_access(fb->metadata_dmabuf, DMA_FROM_DEVICE);
1926 if (ret) {
1927 dma_buf_put(fb->metadata_dmabuf);
1928 fb->metadata_dmabuf = NULL;
1929 DE_WRN("dma_buf_begin_cpu_access failed\n");
1930 return -1;
1931 }
1932 fb->p_metadata = dma_buf_vmap(fb->metadata_dmabuf);
1933 if (!fb->p_metadata) {
1934 dma_buf_end_cpu_access(fb->metadata_dmabuf, DMA_FROM_DEVICE);
1935 dma_buf_put(fb->metadata_dmabuf);
1936 fb->metadata_dmabuf = NULL;
1937 DE_WRN("dma_buf_kmap failed\n");
1938 return -1;
1939 }
1940
1941 fb->p_afbc_header = &(fb->p_metadata->afbc_head);
1942
1943 return 0;
1944 }
1945
disp_mgr_set_layer_config2(struct disp_manager * mgr,struct disp_layer_config2 * config,unsigned int layer_num)1946 s32 disp_mgr_set_layer_config2(struct disp_manager *mgr,
1947 struct disp_layer_config2 *config,
1948 unsigned int layer_num)
1949 {
1950 struct disp_manager_private_data *mgrp;
1951 unsigned int num_layers = 0, i = 0;
1952 struct disp_layer *lyr = NULL;
1953 struct disp_layer_config_data *lyr_cfg_using[MAX_LAYERS];
1954 struct disp_layer_config_data *lyr_cfg;
1955 struct dmabuf_item *item;
1956 struct disp_layer_config2 *config1 = config;
1957 unsigned long long ref = 0;
1958 struct fb_address_transfer fb;
1959 bool pre_force_sync;
1960 struct disp_device_dynamic_config dconf;
1961 struct disp_device *dispdev = NULL;
1962 unsigned int map_err_cnt = 0;
1963 unsigned long flags;
1964 bool unmap = false;
1965
1966 if (mgr == NULL) {
1967 DE_WRN("NULL hdl!\n");
1968 goto err;
1969 }
1970 mgrp = disp_mgr_get_priv(mgr);
1971 if (mgrp == NULL) {
1972 DE_WRN("NULL hdl!\n");
1973 goto err;
1974 }
1975 pre_force_sync = mgrp->force_sync;
1976
1977 DE_INF("mgr%d, config %d layers\n", mgr->disp, layer_num);
1978
1979 num_layers = bsp_disp_feat_get_num_layers(mgr->disp);
1980 if ((config == NULL) || (layer_num == 0) || (layer_num > num_layers)) {
1981 DE_WRN("NULL hdl!\n");
1982 goto err;
1983 }
1984
1985 memset(&dconf, 0, sizeof(struct disp_device_dynamic_config));
1986 mutex_lock(&mgr_mlock);
1987
1988 {
1989 unsigned int layer_index = 0;
1990 struct disp_layer_config2 *src = config;
1991 struct disp_layer_config2 *backup = &(backup_layer2[mgr->disp][0]);
1992 backup_layer_num = layer_num;
1993 memset(backup, 0, sizeof(struct disp_layer_config2)*16);
1994 for (layer_index = 0; layer_index < layer_num; layer_index++) {
1995 memcpy(backup, src, sizeof(struct disp_layer_config2));
1996 backup++;
1997 src++;
1998 }
1999 _force_layer2 = 1;
2000 }
2001
2002 spin_lock_irqsave(&mgr_data_lock, flags);
2003 if (mgrp->unmap_dmabuf) {
2004 mgrp->unmap_dmabuf = false;
2005 unmap = true;
2006 }
2007 /*if vysnc comes before finish modifying lyr_cfgs, we should not unmap after vsync, or we will get uncorrect using address*/
2008 mgrp->setting = true;
2009 spin_unlock_irqrestore(&mgr_data_lock, flags);
2010
2011 if (unmap)
2012 disp_unmap_dmabuf(mgr, mgrp, num_layers);
2013
2014
2015 for (i = 0; i < layer_num; i++) {
2016 struct disp_layer *lyr = NULL;
2017
2018 lyr = disp_get_layer(mgr->disp, config1->channel,
2019 config1->layer_id);
2020
2021 if (lyr) {
2022 lyr->save_and_dirty_check2(lyr, config1);
2023 if (lyr->is_dirty(lyr) &&
2024 (config1->info.fb.metadata_flag & 0x3)) {
2025 dconf.metadata_fd =
2026 config1->info.fb.metadata_fd;
2027 dconf.metadata_size =
2028 config1->info.fb.metadata_size;
2029 dconf.metadata_flag =
2030 config1->info.fb.metadata_flag;
2031 }
2032 }
2033 lyr_cfg_using[i] = disp_mgr_get_layer_cfg(mgr, config1);
2034
2035 config1++;
2036 }
2037
2038 ref = gdisp.screen[mgr->disp].health_info.irq_cnt;
2039
2040 for (i = 0, lyr_cfg = lyr_cfg_using[0]; i < layer_num; i++, lyr_cfg++) {
2041
2042 if (lyr_cfg->config.enable == 0)
2043 continue;
2044 /*color mode and set_layer_config do no need to dma map*/
2045 if (lyr_cfg->config.info.mode == LAYER_MODE_COLOR)
2046 continue;
2047
2048 item = disp_dma_map(lyr_cfg->config.info.fb.fd);
2049 memcpy(&back_dma_item[mgr->disp][0], item, sizeof(struct dmabuf_item));
2050 if (item == NULL) {
2051 pr_info("disp dma map fail!\n");
2052 lyr_cfg->config.enable = 0;
2053 map_err_cnt++;
2054 continue;
2055 }
2056 memcpy(&back_dma_item[mgr->disp][0], item, sizeof(struct dmabuf_item));
2057 item->lyr_id.disp = mgr->disp;
2058 item->lyr_id.channel = lyr_cfg->config.channel;
2059 item->lyr_id.layer_id = lyr_cfg->config.layer_id;
2060 item->lyr_id.type |= 0x1;
2061 fb.format = lyr_cfg->config.info.fb.format;
2062 memcpy(fb.size, lyr_cfg->config.info.fb.size,
2063 sizeof(struct disp_rectsz) * 3);
2064 memcpy(fb.align, lyr_cfg->config.info.fb.align,
2065 sizeof(int) * 3);
2066 fb.depth = lyr_cfg->config.info.fb.depth;
2067 fb.dma_addr = item->dma_addr;
2068 disp_set_fb_info(&fb, true);
2069 memcpy(lyr_cfg->config.info.fb.addr,
2070 fb.addr,
2071 sizeof(long long) * 3);
2072
2073 lyr_cfg->config.info.fb.trd_right_addr[0] =
2074 (unsigned int)fb.trd_right_addr[0];
2075 lyr_cfg->config.info.fb.trd_right_addr[1] =
2076 (unsigned int)fb.trd_right_addr[1];
2077 lyr_cfg->config.info.fb.trd_right_addr[2] =
2078 (unsigned int)fb.trd_right_addr[2];
2079 disp_mgr_dmabuf_list_add(item, mgrp, ref);
2080
2081 if (lyr_cfg->config.info.fb.fbd_en)
2082 disp_map_afbc_header(&(lyr_cfg->config.info.fb));
2083
2084 /* get dma_buf for right image buffer */
2085 if (lyr_cfg->config.info.fb.flags == DISP_BF_STEREO_FP) {
2086 item = disp_dma_map(lyr_cfg->config.info.atw.cof_fd);
2087 memcpy(&back_dma_item[mgr->disp][1], item, sizeof(struct dmabuf_item));
2088 if (item == NULL) {
2089 DE_WRN("disp dma map for right buffer fail!\n");
2090 lyr_cfg->config.info.fb.flags = DISP_BF_NORMAL;
2091 continue;
2092 }
2093 item->lyr_id.disp = mgr->disp;
2094 item->lyr_id.channel = lyr_cfg->config.channel;
2095 item->lyr_id.layer_id = lyr_cfg->config.layer_id;
2096 item->lyr_id.type |= 0x2;
2097 fb.dma_addr = item->dma_addr;
2098 disp_set_fb_info(&fb, false);
2099 lyr_cfg->config.info.fb.trd_right_addr[0] =
2100 (unsigned int)fb.trd_right_addr[0];
2101 lyr_cfg->config.info.fb.trd_right_addr[1] =
2102 (unsigned int)fb.trd_right_addr[1];
2103 lyr_cfg->config.info.fb.trd_right_addr[2] =
2104 (unsigned int)fb.trd_right_addr[2];
2105 disp_mgr_dmabuf_list_add(item, mgrp, ref);
2106 }
2107
2108 /* process 2d plus depth stereo mode */
2109 if (lyr_cfg->config.info.fb.flags == DISP_BF_STEREO_2D_DEPTH) {
2110 lyr_cfg->config.info.fb.flags = DISP_BF_STEREO_FP;
2111 /* process depth, only support rgb format */
2112 if ((lyr_cfg->config.info.fb.depth != 0) &&
2113 (lyr_cfg->config.info.fb.format
2114 < DISP_FORMAT_YUV444_I_AYUV)) {
2115 int depth = lyr_cfg->config.info.fb.depth;
2116 unsigned long long abs_depth =
2117 (depth > 0) ? depth : (-depth);
2118
2119 memcpy(fb.addr,
2120 lyr_cfg->config.info.fb.addr,
2121 sizeof(long long) * 3);
2122 fb.trd_right_addr[0] =
2123 lyr_cfg->config.info.fb.trd_right_addr[0];
2124 fb.trd_right_addr[1] =
2125 lyr_cfg->config.info.fb.trd_right_addr[1];
2126 fb.trd_right_addr[2] =
2127 lyr_cfg->config.info.fb.trd_right_addr[2];
2128 if (disp_set_fb_base_on_depth(&fb) == 0) {
2129 memcpy(lyr_cfg->config.info.fb.addr,
2130 fb.addr,
2131 sizeof(long long) * 3);
2132 lyr_cfg->config.info.fb.trd_right_addr[0] =
2133 (unsigned int)fb.trd_right_addr[0];
2134 lyr_cfg->config.info.fb.trd_right_addr[1] =
2135 (unsigned int)fb.trd_right_addr[1];
2136 lyr_cfg->config.info.fb.trd_right_addr[2] =
2137 (unsigned int)fb.trd_right_addr[2];
2138
2139 lyr_cfg->config.info.fb.crop.width -=
2140 (abs_depth << 32);
2141 }
2142 }
2143
2144 }
2145
2146 /* get dma_buf for atw coef buffer */
2147 if (!lyr_cfg->config.info.atw.used)
2148 continue;
2149
2150 item = disp_dma_map(lyr_cfg->config.info.atw.cof_fd);
2151 memcpy(&back_dma_item[mgr->disp][2], item, sizeof(struct dmabuf_item));
2152 if (item == NULL) {
2153 DE_WRN("disp dma map for atw coef fail!\n");
2154 lyr_cfg->config.info.atw.used = 0;
2155 continue;
2156 }
2157
2158 item->lyr_id.disp = mgr->disp;
2159 item->lyr_id.channel = lyr_cfg->config.channel;
2160 item->lyr_id.layer_id = lyr_cfg->config.layer_id;
2161 item->lyr_id.type |= 0x4;
2162
2163 lyr_cfg->config.info.atw.cof_addr = item->dma_addr;
2164 disp_mgr_dmabuf_list_add(item, mgrp, ref);
2165
2166
2167 }
2168
2169 mgrp->dmabuf_cnt_max = (mgrp->dmabuf_cnt > mgrp->dmabuf_cnt_max) ?
2170 mgrp->dmabuf_cnt : mgrp->dmabuf_cnt_max;
2171 if (!_force_layer_en) {
2172 if (mgr->apply)
2173 mgr->apply(mgr);
2174
2175 lyr_cfg = disp_mgr_get_layer_cfg_head(mgr);
2176 for (i = 0; i < num_layers; i++, lyr_cfg++)
2177 if (lyr_cfg->config.info.fb.fbd_en)
2178 disp_unmap_afbc_header(
2179 &(lyr_cfg->config.info.fb));
2180
2181 list_for_each_entry(lyr, &mgr->lyr_list, list) {
2182 lyr->dirty_clear(lyr);
2183 }
2184 }
2185
2186 /* finish modifying lyr_cfgs, vsync update register will make sw_reg == hw_reg == lyr_cfgs*/
2187 spin_lock_irqsave(&mgr_data_lock, flags);
2188 mgrp->setting = false;
2189 spin_unlock_irqrestore(&mgr_data_lock, flags);
2190
2191 /* will force sync the manager when continue nosync appear */
2192 mgrp->force_sync = false;
2193 if (!mgrp->sync) {
2194 mgrp->nosync_cnt++;
2195 if (mgrp->nosync_cnt >= FORCE_SYNC_THRESHOLD) {
2196 mgrp->nosync_cnt = 0;
2197 mgrp->force_sync_cnt++;
2198 mgrp->force_sync = true;
2199 mgr->sync(mgr, true);
2200 }
2201 } else {
2202 mgrp->nosync_cnt = 0;
2203 }
2204
2205 mutex_unlock(&mgr_mlock);
2206 dispdev = mgr->device;
2207 if ((dconf.metadata_flag & 0x3) &&
2208 dispdev && dispdev->set_dynamic_config)
2209 dispdev->set_dynamic_config(dispdev, &dconf);
2210
2211 return DIS_SUCCESS;
2212 err:
2213 return -1;
2214 }
2215
2216 static s32
disp_mgr_get_layer_config(struct disp_manager * mgr,struct disp_layer_config * config,unsigned int layer_num)2217 disp_mgr_get_layer_config(struct disp_manager *mgr,
2218 struct disp_layer_config *config,
2219 unsigned int layer_num)
2220 {
2221 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
2222 struct disp_layer *lyr;
2223 unsigned int num_layers = 0, layer_index = 0;
2224
2225 if ((mgr == NULL) || (mgrp == NULL)) {
2226 DE_WRN("NULL hdl!\n");
2227 return -1;
2228 }
2229
2230 num_layers = bsp_disp_feat_get_num_layers(mgr->disp);
2231 if ((config == NULL) || (layer_num == 0) || (layer_num > num_layers)) {
2232 DE_WRN("NULL hdl!\n");
2233 return -1;
2234 }
2235 for (layer_index = 0; layer_index < layer_num; layer_index++) {
2236 lyr = disp_get_layer(mgr->disp, config->channel,
2237 config->layer_id);
2238 if (lyr == NULL) {
2239 DE_WRN("get layer(%d,%d,%d) fail\n", mgr->disp,
2240 config->channel, config->layer_id);
2241 continue;
2242 }
2243 if (lyr->get_config)
2244 lyr->get_config(lyr, config);
2245 config++;
2246 }
2247
2248 return DIS_SUCCESS;
2249 }
2250
2251 static s32
disp_mgr_get_layer_config2(struct disp_manager * mgr,struct disp_layer_config2 * config,unsigned int layer_num)2252 disp_mgr_get_layer_config2(struct disp_manager *mgr,
2253 struct disp_layer_config2 *config,
2254 unsigned int layer_num)
2255 {
2256 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
2257 struct disp_layer *lyr;
2258 unsigned int num_layers = 0, layer_index = 0;
2259
2260 if ((mgr == NULL) || (mgrp == NULL)) {
2261 DE_WRN("NULL hdl!\n");
2262 goto err;
2263 }
2264
2265 num_layers = bsp_disp_feat_get_num_layers(mgr->disp);
2266 if ((config == NULL) || (layer_num == 0) || (layer_num > num_layers)) {
2267 DE_WRN("NULL hdl!\n");
2268 goto err;
2269 }
2270 for (layer_index = 0; layer_index < layer_num; layer_index++) {
2271 lyr = disp_get_layer(mgr->disp, config->channel,
2272 config->layer_id);
2273 if (lyr != NULL)
2274 lyr->get_config2(lyr, config);
2275 else
2276 DE_WRN("get layer(%d,%d,%d) fail\n", mgr->disp,
2277 config->channel, config->layer_id);
2278 config++;
2279 }
2280
2281 return DIS_SUCCESS;
2282
2283 err:
2284 return -1;
2285 }
2286
disp_mgr_sync(struct disp_manager * mgr,bool sync)2287 static s32 disp_mgr_sync(struct disp_manager *mgr, bool sync)
2288 {
2289 unsigned long flags;
2290 struct disp_manager_private_data *mgrp = NULL;
2291 struct disp_enhance *enhance = NULL;
2292 struct disp_smbl *smbl = NULL;
2293 struct disp_capture *cptr = NULL;
2294
2295 if (mgr == NULL) {
2296 DE_WRN("NULL hdl!\n");
2297 return -1;
2298 }
2299
2300 if (disp_feat_is_using_rcq(mgr->disp))
2301 return 0;
2302
2303 mgrp = disp_mgr_get_priv(mgr);
2304 if (mgrp == NULL) {
2305 DE_WRN("get mgr %d's priv fail!!\n", mgr->disp);
2306 return -1;
2307 }
2308
2309 mgrp->sync = sync;
2310 if (!mgrp->sync) {
2311 mgrp->dmabuf_unmap_skip_cnt_max = mgrp->dmabuf_unmap_skip_cnt++ >
2312 mgrp->dmabuf_unmap_skip_cnt_max ?
2313 mgrp->dmabuf_unmap_skip_cnt :
2314 mgrp->dmabuf_unmap_skip_cnt_max;
2315 return 0;
2316 }
2317
2318 mgrp->nosync_cnt = 0;
2319
2320 enhance = mgr->enhance;
2321 smbl = mgr->smbl;
2322 cptr = mgr->cptr;
2323
2324 spin_lock_irqsave(&mgr_data_lock, flags);
2325 if (!mgrp->enabled) {
2326 spin_unlock_irqrestore(&mgr_data_lock, flags);
2327 return -1;
2328 }
2329 spin_unlock_irqrestore(&mgr_data_lock, flags);
2330
2331 /* mgr->update_regs(mgr); */
2332 disp_al_manager_sync(mgr->disp);
2333 mgr->update_regs(mgr);
2334 if (mgr->device && mgr->device->is_in_safe_period) {
2335 if (!mgr->device->is_in_safe_period(mgr->device)) {
2336 mgrp->err = true;
2337 mgrp->err_cnt++;
2338 } else {
2339 mgrp->err = false;
2340 }
2341 }
2342 spin_lock_irqsave(&mgr_data_lock, flags);
2343 mgrp->applied = false;
2344 if (!mgrp->err && !mgrp->setting) {
2345 mgrp->dmabuf_unmap_skip_cnt = 0;
2346 /*update_regs ok, we know exactly what address is using, so just unmap dmabuf*/
2347 mgrp->unmap_dmabuf = true;
2348 } else {
2349 mgrp->dmabuf_unmap_skip_cnt_max = mgrp->dmabuf_unmap_skip_cnt++ >
2350 mgrp->dmabuf_unmap_skip_cnt_max ?
2351 mgrp->dmabuf_unmap_skip_cnt :
2352 mgrp->dmabuf_unmap_skip_cnt_max;
2353 }
2354 spin_unlock_irqrestore(&mgr_data_lock, flags);
2355
2356 /* enhance */
2357 if (enhance && enhance->sync)
2358 enhance->sync(enhance);
2359
2360 /* smbl */
2361 if (smbl && smbl->sync)
2362 smbl->sync(smbl);
2363
2364 /* capture */
2365 if (cptr && cptr->sync)
2366 cptr->sync(cptr);
2367
2368 return DIS_SUCCESS;
2369 }
2370
disp_mgr_tasklet(struct disp_manager * mgr)2371 static s32 disp_mgr_tasklet(struct disp_manager *mgr)
2372 {
2373 struct disp_manager_private_data *mgrp = NULL;
2374 struct disp_enhance *enhance = NULL;
2375 struct disp_smbl *smbl = NULL;
2376 struct disp_capture *cptr = NULL;
2377
2378 if (mgr == NULL) {
2379 DE_WRN("NULL hdl!\n");
2380 return -1;
2381 }
2382 mgrp = disp_mgr_get_priv(mgr);
2383 if (mgrp == NULL) {
2384 DE_WRN("get mgr %d's priv fail!!\n", mgr->disp);
2385 return -1;
2386 }
2387 enhance = mgr->enhance;
2388 smbl = mgr->smbl;
2389 cptr = mgr->cptr;
2390
2391 if (!mgrp->enabled)
2392 return -1;
2393
2394 /* enhance */
2395 if (enhance && enhance->tasklet)
2396 enhance->tasklet(enhance);
2397
2398 /* smbl */
2399 if (smbl && smbl->tasklet)
2400 smbl->tasklet(smbl);
2401
2402 /* capture */
2403 if (cptr && cptr->tasklet)
2404 cptr->tasklet(cptr);
2405
2406 return DIS_SUCCESS;
2407 }
2408
disp_mgr_update_regs(struct disp_manager * mgr)2409 static s32 disp_mgr_update_regs(struct disp_manager *mgr)
2410 {
2411 unsigned long flags;
2412 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
2413
2414 if ((mgr == NULL) || (mgrp == NULL)) {
2415 DE_WRN("NULL hdl!\n");
2416 return -1;
2417 }
2418
2419 /* FIXME,at sometimes,other module may need to sync while mgr don't */
2420 /* if (true == mgrp->applied) */
2421 disp_al_manager_update_regs(mgr->disp);
2422
2423 spin_lock_irqsave(&mgr_data_lock, flags);
2424 mgrp->applied = false;
2425 spin_unlock_irqrestore(&mgr_data_lock, flags);
2426
2427 return DIS_SUCCESS;
2428 }
2429
disp_mgr_apply(struct disp_manager * mgr)2430 static s32 disp_mgr_apply(struct disp_manager *mgr)
2431 {
2432 unsigned long flags;
2433 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
2434 struct disp_manager_data data;
2435 bool mgr_dirty = false;
2436 bool lyr_drity = false;
2437 struct disp_layer *lyr = NULL;
2438
2439 if ((mgr == NULL) || (mgrp == NULL)) {
2440 DE_WRN("NULL hdl!\n");
2441 return -1;
2442 }
2443 DE_INF("mgr %d apply\n", mgr->disp);
2444
2445 DISP_TRACE_BEGIN(__func__);
2446 spin_lock_irqsave(&mgr_data_lock, flags);
2447 if ((mgrp->enabled) && (mgrp->cfg->flag & MANAGER_ALL_DIRTY)) {
2448 mgr_dirty = true;
2449 memcpy(&data, mgrp->cfg, sizeof(struct disp_manager_data));
2450 mgrp->cfg->flag &= ~MANAGER_ALL_DIRTY;
2451 }
2452
2453 list_for_each_entry(lyr, &mgr->lyr_list, list) {
2454 if (lyr->is_dirty && lyr->is_dirty(lyr)) {
2455 lyr_drity = true;
2456 break;
2457 }
2458 }
2459
2460 mgrp->applied = true;
2461 spin_unlock_irqrestore(&mgr_data_lock, flags);
2462
2463 if (mgr->reg_protect)
2464 mgr->reg_protect(mgr, true);
2465 if (mgr_dirty)
2466 disp_al_manager_apply(mgr->disp, &data);
2467
2468 if (lyr_drity) {
2469 u32 num_layers = bsp_disp_feat_get_num_layers(mgr->disp);
2470 struct disp_layer_config_data *lyr_cfg =
2471 disp_mgr_get_layer_cfg_head(mgr);
2472
2473 disp_al_layer_apply(mgr->disp, lyr_cfg, num_layers);
2474 }
2475 if (mgr->reg_protect)
2476 mgr->reg_protect(mgr, false);
2477
2478 spin_lock_irqsave(&mgr_data_lock, flags);
2479 mgrp->applied = true;
2480 spin_unlock_irqrestore(&mgr_data_lock, flags);
2481
2482 DISP_TRACE_END(__func__);
2483 return DIS_SUCCESS;
2484 }
2485
disp_mgr_force_apply(struct disp_manager * mgr)2486 static s32 disp_mgr_force_apply(struct disp_manager *mgr)
2487 {
2488 unsigned long flags;
2489 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
2490 struct disp_layer *lyr = NULL;
2491 struct disp_enhance *enhance = NULL;
2492 struct disp_smbl *smbl = NULL;
2493
2494 if ((mgr == NULL) || (mgrp == NULL)) {
2495 DE_WRN("NULL hdl!\n");
2496 return -1;
2497 }
2498
2499 enhance = mgr->enhance;
2500 smbl = mgr->smbl;
2501
2502 spin_lock_irqsave(&mgr_data_lock, flags);
2503 mgrp->cfg->flag |= MANAGER_ALL_DIRTY;
2504 spin_unlock_irqrestore(&mgr_data_lock, flags);
2505 list_for_each_entry(lyr, &mgr->lyr_list, list) {
2506 lyr->force_apply(lyr);
2507 }
2508
2509 disp_mgr_apply(mgr);
2510 disp_mgr_update_regs(mgr);
2511
2512 /* enhance */
2513 if (enhance && enhance->force_apply)
2514 enhance->force_apply(enhance);
2515
2516 /* smbl */
2517 if (smbl && smbl->force_apply)
2518 smbl->force_apply(smbl);
2519
2520 return 0;
2521 }
2522
disp_mgr_enable(struct disp_manager * mgr)2523 static s32 disp_mgr_enable(struct disp_manager *mgr)
2524 {
2525 unsigned long flags;
2526 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
2527 unsigned int width = 0, height = 0;
2528 unsigned int color_range = DISP_COLOR_RANGE_0_255;
2529 int ret;
2530 struct disp_device_config dev_config;
2531
2532 if ((mgr == NULL) || (mgrp == NULL)) {
2533 DE_WRN("NULL hdl!\n");
2534 return -1;
2535 }
2536 DE_INF("mgr %d enable\n", mgr->disp);
2537
2538 dev_config.bits = DISP_DATA_8BITS;
2539 dev_config.eotf = DISP_EOTF_GAMMA22;
2540 dev_config.cs = DISP_BT601_F;
2541 dev_config.dvi_hdmi = DISP_HDMI;
2542 dev_config.range = DISP_COLOR_RANGE_16_235;
2543 dev_config.scan = DISP_SCANINFO_NO_DATA;
2544 dev_config.aspect_ratio = 8;
2545 ret = disp_mgr_clk_enable(mgr);
2546 if (ret != 0)
2547 return ret;
2548
2549 if (mgrp->irq_info.irq_flag)
2550 disp_register_irq(mgr->disp, &mgrp->irq_info);
2551
2552 disp_al_manager_init(mgr->disp);
2553
2554 if (mgr->device && mgr->device->type == DISP_OUTPUT_TYPE_RTWB)
2555 disp_al_manager_set_irq_enable(mgr->disp,
2556 mgrp->irq_info.irq_flag, 0);
2557 else
2558 disp_al_manager_set_irq_enable(mgr->disp,
2559 mgrp->irq_info.irq_flag, 1);
2560 rcq_init_finished = 1;
2561
2562 if (mgr->device) {
2563 disp_al_device_set_de_id(mgr->device->hwdev_index, mgr->disp);
2564 disp_al_device_set_de_use_rcq(mgr->device->hwdev_index,
2565 disp_feat_is_using_rcq(mgr->disp));
2566 disp_al_device_set_output_type(mgr->device->hwdev_index,
2567 mgr->device->type);
2568 if (mgr->device->get_resolution)
2569 mgr->device->get_resolution(mgr->device, &width,
2570 &height);
2571 if (mgr->device->get_static_config)
2572 mgr->device->get_static_config(mgr->device,
2573 &dev_config);
2574 if (mgr->device && mgr->device->get_input_color_range)
2575 color_range =
2576 mgr->device->get_input_color_range(mgr->device);
2577 mgrp->cfg->config.disp_device = mgr->device->disp;
2578 mgrp->cfg->config.hwdev_index = mgr->device->hwdev_index;
2579 if (mgr->device && mgr->device->is_interlace)
2580 mgrp->cfg->config.interlace =
2581 mgr->device->is_interlace(mgr->device);
2582 else
2583 mgrp->cfg->config.interlace = 0;
2584 if (mgr->device && mgr->device->get_fps)
2585 mgrp->cfg->config.device_fps =
2586 mgr->device->get_fps(mgr->device);
2587 else
2588 mgrp->cfg->config.device_fps = 60;
2589 }
2590
2591 DE_INF("output res: %d x %d, cs=%d, range=%d, interlace=%d\n",
2592 width, height, dev_config.cs, color_range, mgrp->cfg->config.interlace);
2593
2594 mutex_lock(&mgr_mlock);
2595 spin_lock_irqsave(&mgr_data_lock, flags);
2596 mgrp->enabled = 1;
2597 mgrp->cfg->config.enable = 1;
2598 mgrp->cfg->flag |= MANAGER_ENABLE_DIRTY;
2599
2600 mgrp->cfg->config.size.width = width;
2601 mgrp->cfg->config.size.height = height;
2602 mgrp->cfg->config.cs = dev_config.format;
2603 mgrp->cfg->config.color_space = dev_config.cs;
2604 mgrp->cfg->config.eotf = dev_config.eotf;
2605 mgrp->cfg->config.data_bits = dev_config.bits;
2606 if (!mgrp->color_range_modified)
2607 mgrp->cfg->config.color_range = color_range;
2608 mgrp->cfg->flag |= MANAGER_SIZE_DIRTY;
2609 mgrp->cfg->flag |= MANAGER_COLOR_SPACE_DIRTY;
2610 spin_unlock_irqrestore(&mgr_data_lock, flags);
2611
2612 disp_mgr_force_apply(mgr);
2613
2614 if (mgr->enhance && mgr->enhance->enable)
2615 mgr->enhance->enable(mgr->enhance);
2616 mutex_unlock(&mgr_mlock);
2617
2618 return 0;
2619 }
2620
2621 #ifndef RTMX_USE_RCQ
2622 #define RTMX_USE_RCQ (0)
2623 #endif
2624
disp_mgr_sw_enable(struct disp_manager * mgr)2625 static s32 disp_mgr_sw_enable(struct disp_manager *mgr)
2626 {
2627 unsigned long flags;
2628 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
2629 unsigned int width = 0, height = 0;
2630 unsigned int cs = 0, color_range = DISP_COLOR_RANGE_0_255;
2631 struct disp_device_config dev_config;
2632 struct disp_enhance *enhance = NULL;
2633 struct disp_smbl *smbl = NULL;
2634 struct disp_layer *lyr = NULL;
2635 #if RTMX_USE_RCQ
2636 struct disp_device *dispdev;
2637 unsigned long curtime;
2638 unsigned int curline0, curline1;
2639 unsigned long cnt = 0;
2640 #endif
2641 memset(&dev_config, 0, sizeof(struct disp_device_config));
2642 if ((mgr == NULL) || (mgrp == NULL)) {
2643 DE_WRN("NULL hdl!\n");
2644 return -1;
2645 }
2646 DE_INF("mgr %d enable\n", mgr->disp);
2647
2648 dev_config.bits = DISP_DATA_8BITS;
2649 dev_config.eotf = DISP_EOTF_GAMMA22;
2650 dev_config.cs = DISP_BT601_F;
2651 #if !defined(CONFIG_COMMON_CLK_ENABLE_SYNCBOOT)
2652 if (disp_mgr_clk_enable(mgr) != 0)
2653 return -1;
2654 #endif
2655
2656 if (mgrp->irq_info.irq_flag)
2657 disp_register_irq(mgr->disp, &mgrp->irq_info);
2658 #if defined(DE_VERSION_V33X)
2659 disp_al_manager_init(mgr->disp);
2660 #endif
2661 disp_al_manager_set_irq_enable(mgr->disp, mgrp->irq_info.irq_flag, 1);
2662 rcq_init_finished = 1;
2663
2664 if (mgr->device) {
2665 disp_al_device_set_de_id(mgr->device->hwdev_index, mgr->disp);
2666 disp_al_device_set_de_use_rcq(mgr->device->hwdev_index,
2667 disp_feat_is_using_rcq(mgr->disp));
2668 disp_al_device_set_output_type(mgr->device->hwdev_index,
2669 mgr->device->type);
2670 if (mgr->device->get_resolution)
2671 mgr->device->get_resolution(mgr->device, &width,
2672 &height);
2673 if (mgr->device->get_static_config)
2674 mgr->device->get_static_config(mgr->device,
2675 &dev_config);
2676 if (mgr->device->get_input_csc)
2677 cs = mgr->device->get_input_csc(mgr->device);
2678 if (mgr->device && mgr->device->get_input_color_range)
2679 color_range =
2680 mgr->device->get_input_color_range(mgr->device);
2681 mgrp->cfg->config.disp_device = mgr->device->disp;
2682 mgrp->cfg->config.hwdev_index = mgr->device->hwdev_index;
2683 if (mgr->device && mgr->device->is_interlace)
2684 mgrp->cfg->config.interlace =
2685 mgr->device->is_interlace(mgr->device);
2686 else
2687 mgrp->cfg->config.interlace = 0;
2688 if (mgr->device && mgr->device->get_fps)
2689 mgrp->cfg->config.device_fps =
2690 mgr->device->get_fps(mgr->device);
2691 else
2692 mgrp->cfg->config.device_fps = 60;
2693 }
2694 DE_INF("output res: %d x %d, cs=%d, range=%d, interlace=%d\n",
2695 width, height, cs, color_range, mgrp->cfg->config.interlace);
2696
2697 spin_lock_irqsave(&mgr_data_lock, flags);
2698 mgrp->enabled = 1;
2699 mgrp->cfg->config.enable = 1;
2700
2701 mgrp->cfg->config.size.width = width;
2702 mgrp->cfg->config.size.height = height;
2703 mgrp->cfg->config.cs = dev_config.format;
2704 mgrp->cfg->config.color_space = dev_config.cs;
2705 mgrp->cfg->config.eotf = dev_config.eotf;
2706 mgrp->cfg->config.data_bits = dev_config.bits;
2707 mgrp->cfg->config.color_range = color_range;
2708 mgrp->cfg->flag |= MANAGER_ALL_DIRTY;
2709 spin_unlock_irqrestore(&mgr_data_lock, flags);
2710 list_for_each_entry(lyr, &mgr->lyr_list, list) {
2711 lyr->force_apply(lyr);
2712 }
2713
2714 #if RTMX_USE_RCQ
2715 /* wait for vertical blank period */
2716 dispdev = mgr->device;
2717 if (dispdev && dispdev->usec_before_vblank) {
2718 curtime = jiffies;
2719 while (dispdev->usec_before_vblank(dispdev) != 0) {
2720 if (time_after(jiffies, curtime + msecs_to_jiffies(50)))
2721 break;
2722 cnt++;
2723 if (cnt >= 1000 * 1000)
2724 break;
2725 }
2726 }
2727
2728 if (dispdev) {
2729 curline0 = disp_al_device_get_cur_line(dispdev->hwdev_index);
2730 disp_mgr_sync(mgr, true);
2731 #if IS_ENABLED(CONFIG_AW_IOMMU)
2732 DISP_TRACE_BEGIN("enable_iommu");
2733 mgr->enable_iommu(mgr, true);
2734 DISP_TRACE_END("enable_iommu");
2735 #endif
2736 curline1 = disp_al_device_get_cur_line(dispdev->hwdev_index);
2737 if (dispdev->is_in_safe_period) {
2738 if (!dispdev->is_in_safe_period(dispdev)) {
2739 DE_WRN("sync at non-safe "
2740 "period,start=%d,end=%d line\n",
2741 curline0, curline1);
2742 }
2743 }
2744 }
2745
2746 DISP_TRACE_BEGIN("flush_layer_address");
2747 disp_al_flush_layer_address(mgr->disp, 1, 0);
2748 DISP_TRACE_END("flush_layer_address");
2749
2750 // wait enough time to ensure the layer address
2751 // has been replace with the new framebuffer buffer address.
2752 msleep(50);
2753
2754 DISP_TRACE_BEGIN("sw_apply");
2755 disp_mgr_apply(mgr);
2756 disp_mgr_update_regs(mgr);
2757 DISP_TRACE_END("sw_apply");
2758 #else
2759 disp_mgr_apply(mgr);
2760 #endif
2761
2762 enhance = mgr->enhance;
2763 smbl = mgr->smbl;
2764
2765 /* enhance */
2766 if (enhance && enhance->force_apply)
2767 enhance->force_apply(enhance);
2768
2769 /* smbl */
2770 if (smbl && smbl->force_apply)
2771 smbl->force_apply(smbl);
2772
2773 if (mgr->enhance && mgr->enhance->enable)
2774 mgr->enhance->enable(mgr->enhance);
2775
2776 return 0;
2777 }
2778
disp_mgr_disable(struct disp_manager * mgr)2779 static s32 disp_mgr_disable(struct disp_manager *mgr)
2780 {
2781 unsigned long flags;
2782 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
2783
2784 if ((mgr == NULL) || (mgrp == NULL)) {
2785 DE_WRN("NULL hdl!\n");
2786 return -1;
2787 }
2788
2789 DE_INF("mgr %d disable\n", mgr->disp);
2790
2791 mutex_lock(&mgr_mlock);
2792 if (mgr->enhance && mgr->enhance->disable)
2793 mgr->enhance->disable(mgr->enhance);
2794
2795 spin_lock_irqsave(&mgr_data_lock, flags);
2796 mgrp->enabled = 0;
2797 mgrp->cfg->flag |= MANAGER_ENABLE_DIRTY;
2798 spin_unlock_irqrestore(&mgr_data_lock, flags);
2799 disp_mgr_force_apply(mgr);
2800 disp_delay_ms(5);
2801
2802 disp_al_manager_exit(mgr->disp);
2803 disp_mgr_clk_disable(mgr);
2804 atomic_set(&mgrp->wati_rcq_finish_flag, 0);
2805 mutex_unlock(&mgr_mlock);
2806
2807 return 0;
2808 }
2809
disp_mgr_is_enabled(struct disp_manager * mgr)2810 static s32 disp_mgr_is_enabled(struct disp_manager *mgr)
2811 {
2812 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
2813
2814 if ((mgr == NULL) || (mgrp == NULL)) {
2815 DE_WRN("NULL hdl!\n");
2816 return -1;
2817 }
2818
2819 return mgrp->enabled;
2820
2821 }
2822
disp_mgr_dump(struct disp_manager * mgr,char * buf)2823 static s32 disp_mgr_dump(struct disp_manager *mgr, char *buf)
2824 {
2825 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
2826 unsigned int count = 0;
2827 char const *fmt_name[] = {"rgb", "yuv444", "yuv422", "yuv420"};
2828 char const *bits_name[] = {
2829 "8bits",
2830 "10bits",
2831 "12bits",
2832 "16bits"
2833 };
2834 bool direct_show = false;
2835
2836 if ((mgr == NULL) || (mgrp == NULL)) {
2837 DE_WRN("NULL hdl!\n");
2838 return -1;
2839 }
2840
2841 if ((NULL == mgr) || (mgrp == NULL)) {
2842 DE_WRN("NULL hdl!\n");
2843 return -1;
2844 }
2845
2846 direct_show = disp_al_get_direct_show_state(mgr->disp);
2847
2848 count += sprintf(
2849 buf + count,
2850 "mgr%d: %dx%d fmt[%s] cs[0x%x] range[%s] eotf[0x%x] bits[%s] err[%u] force_sync[%u] %s direct_show[%s] iommu[%d]\n",
2851 mgr->disp, mgrp->cfg->config.size.width, mgrp->cfg->config.size.height,
2852 (mgrp->cfg->config.cs < 4) ? fmt_name[mgrp->cfg->config.cs]
2853 : "undef",
2854 mgrp->cfg->config.color_space,
2855 (mgrp->cfg->config.color_range == DISP_COLOR_RANGE_0_255) ? "full"
2856 : "limit",
2857 mgrp->cfg->config.eotf,
2858 (mgrp->cfg->config.data_bits < 4) ?
2859 bits_name[mgrp->cfg->config.data_bits] : "undef",
2860 mgrp->err_cnt,
2861 mgrp->force_sync_cnt,
2862 (mgrp->cfg->config.blank) ? "blank" : "unblank",
2863 (direct_show) ? "true" : "false", mgrp->iommu_en_flag);
2864
2865 count += sprintf(
2866 buf + count,
2867 "dmabuf: cache[%d] cache max[%d] umap skip[%d] umap skip max[%d]\n",
2868 mgrp->dmabuf_cnt, mgrp->dmabuf_cnt_max, mgrp->dmabuf_unmap_skip_cnt,
2869 mgrp->dmabuf_unmap_skip_cnt_max);
2870
2871 return count;
2872
2873 return 0;
2874 }
2875
disp_mgr_blank(struct disp_manager * mgr,bool blank)2876 static s32 disp_mgr_blank(struct disp_manager *mgr, bool blank)
2877 {
2878 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
2879 struct disp_layer *lyr = NULL;
2880
2881 if ((mgr == NULL) || (mgrp == NULL)) {
2882 DE_WRN("NULL hdl!\n");
2883 return -1;
2884 }
2885
2886 mutex_lock(&mgr_mlock);
2887 list_for_each_entry(lyr, &mgr->lyr_list, list) {
2888 lyr->force_apply(lyr);
2889 }
2890 mgrp->cfg->config.blank = blank;
2891 mgrp->cfg->flag |= MANAGER_BLANK_DIRTY;
2892 mgr->apply(mgr);
2893 mutex_unlock(&mgr_mlock);
2894
2895
2896 return 0;
2897 }
2898
disp_mgr_set_ksc_para(struct disp_manager * mgr,struct disp_ksc_info * pinfo)2899 s32 disp_mgr_set_ksc_para(struct disp_manager *mgr,
2900 struct disp_ksc_info *pinfo)
2901 {
2902 unsigned long flags;
2903 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
2904
2905 spin_lock_irqsave(&mgr_data_lock, flags);
2906 memcpy(&mgrp->cfg->config.ksc, pinfo, sizeof(struct disp_colorkey));
2907 mgrp->cfg->flag |= MANAGER_KSC_DIRTY;
2908 spin_unlock_irqrestore(&mgr_data_lock, flags);
2909
2910 return mgr->apply(mgr);
2911 }
2912
2913 extern void sunxi_enable_device_iommu(unsigned int mastor_id, bool flag);
2914
disp_mgr_enable_iommu(struct disp_manager * mgr,bool en)2915 static s32 disp_mgr_enable_iommu(struct disp_manager *mgr, bool en)
2916 {
2917 #if IS_ENABLED(CONFIG_AW_IOMMU)
2918 struct disp_manager_private_data *mgrp = disp_mgr_get_priv(mgr);
2919
2920 if (mgrp->iommu_en_flag != en)
2921 sunxi_enable_device_iommu(mgrp->iommu_master_id, en);
2922 mgrp->iommu_en_flag = en;
2923 #endif
2924 return 0;
2925 }
2926
disp_init_mgr(struct disp_bsp_init_para * para)2927 s32 disp_init_mgr(struct disp_bsp_init_para *para)
2928 {
2929 u32 num_screens;
2930 u32 disp;
2931 struct disp_manager *mgr;
2932 struct disp_manager_private_data *mgrp;
2933
2934 DE_INF("%s\n", __func__);
2935
2936 spin_lock_init(&mgr_data_lock);
2937 mutex_init(&mgr_mlock);
2938
2939 num_screens = bsp_disp_feat_get_num_screens();
2940 mgrs = kmalloc_array(num_screens, sizeof(struct disp_manager),
2941 GFP_KERNEL | __GFP_ZERO);
2942 if (mgrs == NULL) {
2943 DE_WRN("malloc memory fail!\n");
2944 return DIS_FAIL;
2945 }
2946 mgr_private = (struct disp_manager_private_data *)
2947 kmalloc(sizeof(struct disp_manager_private_data) *
2948 num_screens, GFP_KERNEL | __GFP_ZERO);
2949 if (mgr_private == NULL) {
2950 DE_WRN("malloc memory fail! size=0x%x x %d\n",
2951 (unsigned int)sizeof(struct disp_manager_private_data),
2952 num_screens);
2953 goto malloc_err;
2954 }
2955 mgr_cfgs = (struct disp_manager_data *)
2956 kmalloc(sizeof(struct disp_manager_data) * num_screens,
2957 GFP_KERNEL | __GFP_ZERO);
2958 if (mgr_private == NULL) {
2959 DE_WRN("malloc memory fail! size=0x%x x %d\n",
2960 (unsigned int)sizeof(struct disp_manager_private_data),
2961 num_screens);
2962 goto malloc_err;
2963 }
2964
2965 for (disp = 0; disp < num_screens; disp++) {
2966 mgr = &mgrs[disp];
2967 mgrp = &mgr_private[disp];
2968
2969 DE_INF("mgr %d, 0x%p\n", disp, mgr);
2970
2971 sprintf(mgr->name, "mgr%d", disp);
2972 mgr->disp = disp;
2973 mgrp->cfg = &mgr_cfgs[disp];
2974 mgrp->irq_no = para->irq_no[DISP_MOD_DE];
2975 mgrp->shadow_protect = para->shadow_protect;
2976 mgrp->clk = para->clk_de[disp];
2977 mgrp->clk_bus = para->clk_bus_de[disp];
2978 if (para->rst_bus_de[disp])
2979 mgrp->rst = para->rst_bus_de[disp];
2980 mgrp->clk_dpss = para->clk_bus_dpss_top[disp];
2981 if (para->rst_bus_dpss_top[disp])
2982 mgrp->rst_dpss = para->rst_bus_dpss_top[disp];
2983 mgrp->clk_extra = para->clk_bus_extra;
2984 mgrp->rst_extra = para->rst_bus_extra;
2985 mgrp->irq_info.sel = disp;
2986 mgrp->irq_info.irq_flag = disp_feat_is_using_rcq(disp) ?
2987 DISP_AL_IRQ_FLAG_RCQ_FINISH : 0;
2988 mgrp->irq_info.ptr = (void *)mgr;
2989 mgrp->irq_info.irq_handler = disp_mgr_irq_handler;
2990 if (disp_feat_is_using_rcq(disp)) {
2991 init_waitqueue_head(&mgrp->wait_rcq_finish_queue);
2992 atomic_set(&mgrp->wati_rcq_finish_flag, 0);
2993 }
2994
2995 mgrp->iommu_master_id =
2996 (disp == 0) ? IOMMU_DE0_MASTOR_ID : IOMMU_DE1_MASTOR_ID;
2997
2998
2999 mgr->enable = disp_mgr_enable;
3000 mgr->sw_enable = disp_mgr_sw_enable;
3001 mgr->disable = disp_mgr_disable;
3002 mgr->is_enabled = disp_mgr_is_enabled;
3003 mgr->set_color_key = disp_mgr_set_color_key;
3004 mgr->get_color_key = disp_mgr_get_color_key;
3005 mgr->set_back_color = disp_mgr_set_back_color;
3006 mgr->get_back_color = disp_mgr_get_back_color;
3007 mgr->set_layer_config = disp_mgr_set_layer_config;
3008 mgr->force_set_layer_config = disp_mgr_force_set_layer_config;
3009 mgr->force_set_layer_config_exit = disp_mgr_force_set_layer_config_exit;
3010 mgr->get_layer_config = disp_mgr_get_layer_config;
3011 mgr->set_layer_config2 = disp_mgr_set_layer_config2;
3012 mgr->get_layer_config2 = disp_mgr_get_layer_config2;
3013 mgr->set_output_color_range = disp_mgr_set_output_color_range;
3014 mgr->get_output_color_range = disp_mgr_get_output_color_range;
3015 mgr->update_color_space = disp_mgr_update_color_space;
3016 mgr->smooth_switch = disp_mgr_smooth_switch;
3017 mgr->set_ksc_para = disp_mgr_set_ksc_para;
3018 mgr->dump = disp_mgr_dump;
3019 mgr->blank = disp_mgr_blank;
3020 mgr->get_clk_rate = disp_mgr_get_clk_rate;
3021 mgr->set_palette = disp_mgr_set_palette;
3022
3023 mgr->init = disp_mgr_init;
3024 mgr->exit = disp_mgr_exit;
3025
3026 mgr->apply = disp_mgr_apply;
3027 mgr->update_regs = disp_mgr_update_regs;
3028 mgr->force_apply = disp_mgr_force_apply;
3029 mgr->sync = disp_mgr_sync;
3030 mgr->tasklet = disp_mgr_tasklet;
3031 mgr->enable_iommu = disp_mgr_enable_iommu;
3032
3033 if (disp_feat_is_using_rcq(disp))
3034 mgr->reg_protect = disp_mgr_protect_reg_for_rcq;
3035 else
3036 mgr->reg_protect = disp_mgr_shadow_protect;
3037
3038 INIT_LIST_HEAD(&mgr->lyr_list);
3039 INIT_LIST_HEAD(&mgrp->dmabuf_list);
3040
3041 mgr->init(mgr);
3042 }
3043
3044 disp_init_lyr(para);
3045
3046 return 0;
3047
3048 malloc_err:
3049 kfree(mgr_private);
3050 kfree(mgrs);
3051
3052 return -1;
3053 }
3054
disp_exit_mgr(void)3055 s32 disp_exit_mgr(void)
3056 {
3057 u32 num_screens;
3058 u32 disp;
3059 struct disp_manager *mgr;
3060
3061 if (!mgrs)
3062 return 0;
3063
3064 disp_exit_lyr();
3065 num_screens = bsp_disp_feat_get_num_screens();
3066 for (disp = 0; disp < num_screens; disp++) {
3067 mgr = &mgrs[disp];
3068 mgr->exit(mgr);
3069 }
3070
3071 kfree(mgr_private);
3072 kfree(mgrs);
3073
3074 return 0;
3075 }
3076
3077 #if defined(SUPPORT_RTWB)
3078 s32
disp_mgr_set_rtwb_layer(struct disp_manager * mgr,struct disp_layer_config2 * config,struct disp_capture_info2 * p_cptr_info,unsigned int layer_num)3079 disp_mgr_set_rtwb_layer(struct disp_manager *mgr,
3080 struct disp_layer_config2 *config,
3081 struct disp_capture_info2 *p_cptr_info,
3082 unsigned int layer_num)
3083 {
3084 #define LAYER_NUM_MAX 16
3085 struct disp_manager_private_data *mgrp;
3086 unsigned int num_layers = 0, i = 0;
3087 struct disp_layer *lyr = NULL;
3088 struct disp_layer_config_data *lyr_cfg_using[LAYER_NUM_MAX];
3089 struct disp_layer_config_data *lyr_cfg;
3090 struct dmabuf_item *item, *wb_item;
3091 struct disp_layer_config2 *config1 = config;
3092 unsigned long long ref = 0;
3093 struct fb_address_transfer fb;
3094 bool pre_force_sync;
3095 struct disp_device_dynamic_config dconf;
3096 unsigned int map_err_cnt = 0;
3097 int ret = -1;
3098
3099 if (!mgr || !p_cptr_info) {
3100 DE_WRN("NULL hdl!\n");
3101 goto err;
3102 }
3103 mgrp = disp_mgr_get_priv(mgr);
3104 if (mgrp == NULL) {
3105 DE_WRN("NULL hdl!\n");
3106 goto err;
3107 }
3108 pre_force_sync = mgrp->force_sync;
3109 wb_item = disp_rtwb_config(mgr, p_cptr_info);
3110 if (!wb_item) {
3111 DE_WRN("rtwb config failed!\n");
3112 goto err;
3113 }
3114
3115 DE_INF("mgr%d, config %d layers\n", mgr->disp, layer_num);
3116
3117 num_layers = bsp_disp_feat_get_num_layers(mgr->disp);
3118 if ((config == NULL) || (layer_num == 0) || (layer_num > num_layers) || (layer_num > LAYER_NUM_MAX)) {
3119 DE_WRN("NULL hdl!\n");
3120 goto err;
3121 }
3122
3123 memset(&dconf, 0, sizeof(struct disp_device_dynamic_config));
3124 mutex_lock(&mgr_mlock);
3125
3126 /*every call of set_rtwb_layer should update hw_reg/sw_reg/lyr_cfgs successfully, it is ok to unmap dmabuf*/
3127 disp_unmap_dmabuf(mgr, mgrp, num_layers);
3128 for (i = 0; i < layer_num; i++) {
3129 struct disp_layer *lyr = NULL;
3130
3131 lyr = disp_get_layer(mgr->disp, config1->channel,
3132 config1->layer_id);
3133
3134 if (lyr) {
3135 lyr->save_and_dirty_check2(lyr, config1);
3136 if (lyr->is_dirty(lyr) &&
3137 (config1->info.fb.metadata_flag & 0x3)) {
3138 dconf.metadata_fd =
3139 config1->info.fb.metadata_fd;
3140 dconf.metadata_size =
3141 config1->info.fb.metadata_size;
3142 dconf.metadata_flag =
3143 config1->info.fb.metadata_flag;
3144 }
3145 }
3146 lyr_cfg_using[i] = disp_mgr_get_layer_cfg(mgr, config1);
3147
3148 config1++;
3149 }
3150
3151
3152 lyr_cfg = disp_mgr_get_layer_cfg_head(mgr);
3153 ref = gdisp.screen[mgr->disp].health_info.irq_cnt;
3154
3155 for (i = 0, lyr_cfg = lyr_cfg_using[0]; i < layer_num; i++, lyr_cfg++) {
3156
3157 if (lyr_cfg->config.enable == 0)
3158 continue;
3159 /*color mode and set_layer_config do no need to dma map*/
3160 if (lyr_cfg->config.info.mode == LAYER_MODE_COLOR)
3161 continue;
3162
3163 item = disp_dma_map(lyr_cfg->config.info.fb.fd);
3164 if (item == NULL) {
3165 pr_info("disp dma map fail!\n");
3166 lyr_cfg->config.enable = 0;
3167 map_err_cnt++;
3168 continue;
3169 }
3170 memcpy(&back_dma_item[mgr->disp][0], item, sizeof(struct dmabuf_item));
3171 item->lyr_id.disp = mgr->disp;
3172 item->lyr_id.channel = lyr_cfg->config.channel;
3173 item->lyr_id.layer_id = lyr_cfg->config.layer_id;
3174 item->lyr_id.type |= 0x1;
3175 fb.format = lyr_cfg->config.info.fb.format;
3176 memcpy(fb.size, lyr_cfg->config.info.fb.size,
3177 sizeof(struct disp_rectsz) * 3);
3178 memcpy(fb.align, lyr_cfg->config.info.fb.align,
3179 sizeof(int) * 3);
3180 fb.depth = lyr_cfg->config.info.fb.depth;
3181 fb.dma_addr = item->dma_addr;
3182 disp_set_fb_info(&fb, true);
3183 memcpy(lyr_cfg->config.info.fb.addr,
3184 fb.addr,
3185 sizeof(long long) * 3);
3186
3187 lyr_cfg->config.info.fb.trd_right_addr[0] =
3188 (unsigned int)fb.trd_right_addr[0];
3189 lyr_cfg->config.info.fb.trd_right_addr[1] =
3190 (unsigned int)fb.trd_right_addr[1];
3191 lyr_cfg->config.info.fb.trd_right_addr[2] =
3192 (unsigned int)fb.trd_right_addr[2];
3193 disp_mgr_dmabuf_list_add(item, mgrp, ref);
3194
3195 if (lyr_cfg->config.info.fb.fbd_en)
3196 disp_map_afbc_header(&(lyr_cfg->config.info.fb));
3197
3198 /* get dma_buf for right image buffer */
3199 if (lyr_cfg->config.info.fb.flags == DISP_BF_STEREO_FP) {
3200 item = disp_dma_map(lyr_cfg->config.info.atw.cof_fd);
3201 memcpy(&back_dma_item[mgr->disp][1], item, sizeof(struct dmabuf_item));
3202 if (item == NULL) {
3203 DE_WRN("disp dma map for right buffer fail!\n");
3204 lyr_cfg->config.info.fb.flags = DISP_BF_NORMAL;
3205 continue;
3206 }
3207 item->lyr_id.disp = mgr->disp;
3208 item->lyr_id.channel = lyr_cfg->config.channel;
3209 item->lyr_id.layer_id = lyr_cfg->config.layer_id;
3210 item->lyr_id.type |= 0x2;
3211 fb.dma_addr = item->dma_addr;
3212 disp_set_fb_info(&fb, false);
3213 lyr_cfg->config.info.fb.trd_right_addr[0] =
3214 (unsigned int)fb.trd_right_addr[0];
3215 lyr_cfg->config.info.fb.trd_right_addr[1] =
3216 (unsigned int)fb.trd_right_addr[1];
3217 lyr_cfg->config.info.fb.trd_right_addr[2] =
3218 (unsigned int)fb.trd_right_addr[2];
3219 disp_mgr_dmabuf_list_add(item, mgrp, ref);
3220 }
3221
3222 /* process 2d plus depth stereo mode */
3223 if (lyr_cfg->config.info.fb.flags == DISP_BF_STEREO_2D_DEPTH) {
3224 lyr_cfg->config.info.fb.flags = DISP_BF_STEREO_FP;
3225 /* process depth, only support rgb format */
3226 if ((lyr_cfg->config.info.fb.depth != 0) &&
3227 (lyr_cfg->config.info.fb.format
3228 < DISP_FORMAT_YUV444_I_AYUV)) {
3229 int depth = lyr_cfg->config.info.fb.depth;
3230 unsigned long long abs_depth =
3231 (depth > 0) ? depth : (-depth);
3232
3233 memcpy(fb.addr,
3234 lyr_cfg->config.info.fb.addr,
3235 sizeof(long long) * 3);
3236 fb.trd_right_addr[0] =
3237 lyr_cfg->config.info.fb.trd_right_addr[0];
3238 fb.trd_right_addr[1] =
3239 lyr_cfg->config.info.fb.trd_right_addr[1];
3240 fb.trd_right_addr[2] =
3241 lyr_cfg->config.info.fb.trd_right_addr[2];
3242 if (disp_set_fb_base_on_depth(&fb) == 0) {
3243 memcpy(lyr_cfg->config.info.fb.addr,
3244 fb.addr,
3245 sizeof(long long) * 3);
3246 lyr_cfg->config.info.fb.trd_right_addr[0] =
3247 (unsigned int)fb.trd_right_addr[0];
3248 lyr_cfg->config.info.fb.trd_right_addr[1] =
3249 (unsigned int)fb.trd_right_addr[1];
3250 lyr_cfg->config.info.fb.trd_right_addr[2] =
3251 (unsigned int)fb.trd_right_addr[2];
3252
3253 lyr_cfg->config.info.fb.crop.width -=
3254 (abs_depth << 32);
3255 }
3256 }
3257
3258 }
3259
3260 /* get dma_buf for atw coef buffer */
3261 if (!lyr_cfg->config.info.atw.used)
3262 continue;
3263
3264 item = disp_dma_map(lyr_cfg->config.info.atw.cof_fd);
3265 memcpy(&back_dma_item[mgr->disp][2], item, sizeof(struct dmabuf_item));
3266 if (item == NULL) {
3267 DE_WRN("disp dma map for atw coef fail!\n");
3268 lyr_cfg->config.info.atw.used = 0;
3269 continue;
3270 }
3271
3272 item->lyr_id.disp = mgr->disp;
3273 item->lyr_id.channel = lyr_cfg->config.channel;
3274 item->lyr_id.layer_id = lyr_cfg->config.layer_id;
3275 item->lyr_id.type |= 0x4;
3276
3277 lyr_cfg->config.info.atw.cof_addr = item->dma_addr;
3278 disp_mgr_dmabuf_list_add(item, mgrp, ref);
3279
3280
3281 }
3282
3283 mgrp->dmabuf_cnt_max = (mgrp->dmabuf_cnt > mgrp->dmabuf_cnt_max) ?
3284 mgrp->dmabuf_cnt : mgrp->dmabuf_cnt_max;
3285 if (mgr->apply)
3286 mgr->apply(mgr);
3287
3288 lyr_cfg = disp_mgr_get_layer_cfg_head(mgr);
3289 for (i = 0; i < num_layers; i++, lyr_cfg++)
3290 if (lyr_cfg->config.info.fb.fbd_en)
3291 disp_unmap_afbc_header(
3292 &(lyr_cfg->config.info.fb));
3293
3294 list_for_each_entry(lyr, &mgr->lyr_list, list) {
3295 lyr->dirty_clear(lyr);
3296 }
3297
3298 if (!disp_feat_is_using_rcq(mgr->disp))
3299 mgr->sync(mgr, true);
3300
3301 ret = disp_rtwb_wait_finish(mgr);
3302
3303 disp_dma_unmap(wb_item);
3304
3305 mutex_unlock(&mgr_mlock);
3306
3307 err:
3308 return ret;
3309 }
3310 #endif
3311