1 /*
2 * Allwinner SoCs display driver.
3 *
4 * Copyright (C) 2017 Allwinner.
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11 #include "de_vsu_type.h"
12 #include "de_scaler_table.h"
13
14 #include "../../include.h"
15 #include "de_vsu.h"
16 #include "de_feat.h"
17 #include "de_top.h"
18
19 #define VSU_STEP_VALID_START_BIT 1
20 #define VSU_STEP_FRAC_BITWIDTH 19
21 #define VSU_STEP_FIXED_BITWIDTH 4
22 #define VSU_PHASE_VALID_START_BIT 1
23 #define VSU_PHASE_FRAC_BITWIDTH 19
24 #define VSU_PHASE_FIXED_BITWIDTH 4
25
26 #define VSU_MIN_INPUT_WIDTH 8
27 #define VSU_MIN_INPUT_HEIGHT 8
28
29 #define VSU8_TAP_NUM_HORI 4
30 #define VSU8_TAP_NUM_VERT 2
31 #define VSU10_TAP_NUM_HORI 8
32 #define VSU10_TAP_NUM_VERT 4
33 #define VSU_ED_TAP_NUM_HORI 8
34 #define VSU_ED_TAP_NUM_VERT 4
35
36 enum {
37 VSU8_REG_BLK_CTL = 0,
38 VSU8_REG_BLK_ATTR,
39 VSU8_REG_BLK_YPARA,
40 VSU8_REG_BLK_CPARA,
41 VSU8_REG_BLK_COEFF0,
42 VSU8_REG_BLK_COEFF1,
43 VSU8_REG_BLK_COEFF2,
44 VSU8_REG_BLK_NUM,
45 };
46
47 enum {
48 VSU10_REG_BLK_CTL = 0,
49 VSU10_REG_BLK_ATTR,
50 VSU10_REG_BLK_YPARA,
51 VSU10_REG_BLK_CPARA,
52 VSU10_REG_BLK_COEFF0,
53 VSU10_REG_BLK_COEFF1,
54 VSU10_REG_BLK_COEFF2,
55 VSU10_REG_BLK_COEFF3,
56 VSU10_REG_BLK_COEFF4,
57 VSU10_REG_BLK_COEFF5,
58 VSU10_REG_BLK_NUM,
59 };
60
61 enum {
62 VSU_ED_REG_BLK_CTL = 0,
63 VSU_ED_REG_BLK_DIR_SHP,
64 VSU_ED_REG_BLK_ATTR,
65 VSU_ED_REG_BLK_YPARA,
66 VSU_ED_REG_BLK_CPARA,
67 VSU_ED_REG_BLK_COEFF0,
68 VSU_ED_REG_BLK_COEFF1,
69 VSU_ED_REG_BLK_COEFF2,
70 VSU_ED_REG_BLK_COEFF3,
71 VSU_ED_REG_BLK_COEFF4,
72 VSU_ED_REG_BLK_COEFF5,
73 VSU_ED_REG_BLK_NUM,
74
75 VSU_MAX_REG_BLK_NUM = VSU_ED_REG_BLK_NUM,
76 };
77
78 struct de_vsu_private {
79 struct de_reg_mem_info reg_mem_info;
80 u32 reg_blk_num;
81 union {
82 struct de_reg_block vsu8_reg_blks[VSU8_REG_BLK_NUM];
83 struct de_reg_block vsu10_reg_blks[VSU10_REG_BLK_NUM];
84 struct de_reg_block vsu_ed_reg_blks[VSU_ED_REG_BLK_NUM];
85 struct de_reg_block reg_blks[VSU_MAX_REG_BLK_NUM];
86 };
87 u32 vsu_type;
88 u32 tap_num_hori;
89 u32 tap_num_vert;
90
91 void (*set_blk_dirty)(struct de_vsu_private *priv,
92 u32 blk_id, u32 dirty);
93 };
94
95 static struct de_vsu_private vsu_priv[DE_NUM][MAX_CHN_NUM];
96
get_vsu8_reg(struct de_vsu_private * priv)97 static inline struct vsu8_reg *get_vsu8_reg(
98 struct de_vsu_private *priv)
99 {
100 return (struct vsu8_reg *)(priv->vsu8_reg_blks[0].vir_addr);
101 }
102
get_vsu10_reg(struct de_vsu_private * priv)103 static inline struct vsu10_reg *get_vsu10_reg(
104 struct de_vsu_private *priv)
105 {
106 return (struct vsu10_reg *)(priv->vsu10_reg_blks[0].vir_addr);
107 }
108
get_vsu_ed_reg(struct de_vsu_private * priv)109 static inline struct vsu_ed_reg *get_vsu_ed_reg(
110 struct de_vsu_private *priv)
111 {
112 return (struct vsu_ed_reg *)(priv->vsu_ed_reg_blks[0].vir_addr);
113 }
114
vsu_set_block_dirty(struct de_vsu_private * priv,u32 blk_id,u32 dirty)115 static void vsu_set_block_dirty(
116 struct de_vsu_private *priv, u32 blk_id, u32 dirty)
117 {
118 priv->reg_blks[blk_id].dirty = dirty;
119 }
120
vsu_set_rcq_head_dirty(struct de_vsu_private * priv,u32 blk_id,u32 dirty)121 static void vsu_set_rcq_head_dirty(
122 struct de_vsu_private *priv, u32 blk_id, u32 dirty)
123 {
124 if (priv->reg_blks[blk_id].rcq_hd) {
125 priv->reg_blks[blk_id].rcq_hd->dirty.dwval = dirty;
126 } else {
127 DE_WRN("rcq_head is null ! blk_id=%d\n", blk_id);
128 }
129 }
130
131 /*
132 * function : de_vsu_calc_fir_coef(u32 step)
133 * description :
134 * parameters :
135 * step <horizontal scale ratio of vsu>
136 * return :
137 * offset (in word) of coefficient table
138 */
de_vsu_calc_fir_coef(u32 step)139 static u32 de_vsu_calc_fir_coef(u32 step)
140 {
141 u32 pt_coef;
142 u32 scale_ratio, int_part, float_part, fir_coef_ofst;
143
144 scale_ratio = step >> (VSU_PHASE_FRAC_BITWIDTH - 3);
145 int_part = scale_ratio >> 3;
146 float_part = scale_ratio & 0x7;
147 if (int_part == 0)
148 fir_coef_ofst = VSU_ZOOM0_SIZE;
149 else if (int_part == 1)
150 fir_coef_ofst = VSU_ZOOM0_SIZE + float_part;
151 else if (int_part == 2)
152 fir_coef_ofst = VSU_ZOOM0_SIZE + VSU_ZOOM1_SIZE
153 + (float_part >> 1);
154 else if (int_part == 3)
155 fir_coef_ofst = VSU_ZOOM0_SIZE + VSU_ZOOM1_SIZE
156 + VSU_ZOOM2_SIZE;
157 else if (int_part == 4)
158 fir_coef_ofst = VSU_ZOOM0_SIZE + VSU_ZOOM1_SIZE
159 + VSU_ZOOM2_SIZE + VSU_ZOOM3_SIZE;
160 else
161 fir_coef_ofst = VSU_ZOOM0_SIZE + VSU_ZOOM1_SIZE
162 + VSU_ZOOM2_SIZE + VSU_ZOOM3_SIZE + VSU_ZOOM4_SIZE;
163
164 pt_coef = fir_coef_ofst * VSU_PHASE_NUM;
165
166 return pt_coef;
167 }
168
de_vsu_calc_lay_scale_para(u32 disp,u32 chn,enum de_format_space fm_space,struct de_chn_info * chn_info,struct de_rect64_s * crop64,struct de_rect_s * scn_win,struct de_rect_s * crop32,struct de_scale_para * ypara,struct de_scale_para * cpara)169 s32 de_vsu_calc_lay_scale_para(u32 disp, u32 chn,
170 enum de_format_space fm_space, struct de_chn_info *chn_info,
171 struct de_rect64_s *crop64, struct de_rect_s *scn_win,
172 struct de_rect_s *crop32, struct de_scale_para *ypara,
173 struct de_scale_para *cpara)
174 {
175 u64 val;
176 struct de_vsu_private *priv = &(vsu_priv[disp][chn]);
177 u32 scale_mode = 0, linebuf = 0;
178
179 if (scn_win->width) {
180 val = crop64->width;
181 do_div(val, scn_win->width);
182 } else {
183 val = 0;
184 }
185 ypara->hstep = (u32)(val >> (32 - VSU_STEP_FRAC_BITWIDTH));
186
187 if (scn_win->height) {
188 val = crop64->height;
189 do_div(val, scn_win->height);
190 } else {
191 val = 0;
192 }
193 ypara->vstep = (u32)(val >> (32 - VSU_STEP_FRAC_BITWIDTH));
194
195 ypara->hphase =
196 (crop64->left & 0xffffffff) >> (32 - VSU_PHASE_FRAC_BITWIDTH);
197 ypara->vphase =
198 (crop64->top & 0xffffffff) >> (32 - VSU_PHASE_FRAC_BITWIDTH);
199
200 crop32->left = (s32)(crop64->left >> 32);
201 crop32->top = (s32)(crop64->top >> 32);
202
203 val = (crop64->width & 0xffffffff)
204 + ((u64)(crop64->left) & 0xffffffff);
205 crop32->width = (val >> 32) ?
206 ((crop64->width >> 32) + 1) : (crop64->width >> 32);
207
208 val = (crop64->height & 0xffffffff)
209 + ((u64)(crop64->top) & 0xffffffff);
210 crop32->height = (val >> 32) ?
211 ((crop64->height >> 32) + 1) : (crop64->height >> 32);
212
213 if (fm_space == DE_FORMAT_SPACE_RGB) {
214 cpara->hstep = ypara->hstep;
215 cpara->vstep = ypara->vstep;
216 cpara->hphase = ypara->hphase;
217 cpara->vphase = ypara->vphase;
218 return 0;
219 } else if (fm_space != DE_FORMAT_SPACE_YUV) {
220 DE_WRN("calc cpara for fm_space(%d)!\n", fm_space);
221 return -1;
222 }
223
224 if (chn_info->yuv_sampling == DE_YUV422) {
225 /* horizon crop info fix */
226 if (((crop32->left & 0x1) == 0x0)
227 && ((crop32->width & 0x1) == 0x1)) {
228 /* odd crop_w, crop down width, */
229 /* last line may disappear */
230 crop32->width--;
231 } else if (((crop32->left & 0x1) == 0x1)
232 && ((crop32->width & 0x1) == 0x0)) {
233 /* odd crop_x, crop down x, and phase + 1 */
234 ypara->hphase += (1U << VSU_PHASE_FRAC_BITWIDTH);
235 crop32->left--;
236 } else if (((crop32->left & 0x1) == 0x1)
237 && ((crop32->width & 0x1) == 0x1)) {
238 /* odd crop_x and crop_w, */
239 /* crop_x - 1, and phase + 1, crop_w + 1 */
240 ypara->hphase += (1U << VSU_PHASE_FRAC_BITWIDTH);
241 crop32->left--;
242 crop32->width++;
243 }
244
245 cpara->hstep = ypara->hstep >> 1;
246 cpara->vstep = ypara->vstep;
247 cpara->hphase = ypara->hphase;
248 cpara->vphase = ypara->vphase;
249 } else if (chn_info->yuv_sampling == DE_YUV420) {
250 /* horizon crop info fix */
251 if (((crop32->left & 0x1) == 0x0)
252 && ((crop32->width & 0x1) == 0x1)) {
253 /* odd crop_w, crop down width, */
254 /* last line may disappear */
255 crop32->width--;
256 } else if (((crop32->left & 0x1) == 0x1)
257 && ((crop32->width & 0x1) == 0x0)) {
258 /* odd crop_x, crop down x, and phase + 1 */
259 ypara->hphase += (1 << VSU_PHASE_FRAC_BITWIDTH);
260 crop32->left--;
261 } else if (((crop32->left & 0x1) == 0x1)
262 && ((crop32->width & 0x1) == 0x1)) {
263 /* odd crop_x and crop_w, crop_x - 1, */
264 /* and phase + 1, crop_w + 1 */
265 ypara->hphase += (1 << VSU_PHASE_FRAC_BITWIDTH);
266 crop32->left--;
267 crop32->width++;
268 }
269 /* vertical crop info fix */
270 if (((crop32->top & 0x1) == 0x0)
271 && ((crop32->height & 0x1) == 0x1)) {
272 /* odd crop_h, crop down height, */
273 /* last line may disappear */
274 crop32->height--;
275 } else if (((crop32->height & 0x1) == 0x1)
276 && ((crop32->height & 0x1) == 0x0)) {
277 /* odd crop_y, crop down y, and phase + 1 */
278 ypara->vphase += (1 << VSU_PHASE_FRAC_BITWIDTH);
279 crop32->top--;
280 } else if (((crop32->top & 0x1) == 0x1)
281 && ((crop32->height & 0x1) == 0x1)) {
282 /* odd crop_y and crop_h, crop_y - 1, */
283 /* and phase + 1, crop_h + 1 */
284 ypara->vphase += (1 << VSU_PHASE_FRAC_BITWIDTH);
285 crop32->top--;
286 crop32->height++;
287 }
288
289 cpara->hstep = ypara->hstep >> 1;
290 cpara->vstep = ypara->vstep >> 1;
291 /* H.261, H.263, MPEG-1 sample method */
292 /* cpara->hphase = (ypara->hphase>>1) */
293 /* - ((N2_POWER(1,VSU_PHASE_FRAC_BITWIDTH))>>2); */
294 /* MPEG-2, MPEG-4.2, H264, VC-1 sample method (default choise)*/
295 cpara->hphase = ypara->hphase >> 1;
296 if (chn_info->snr_en) {
297 scale_mode = 1;
298 if (chn_info->px_fmt == DE_FORMAT_YUV420_P ||
299 chn_info->px_fmt == DE_FORMAT_YUV420_SP_UVUV ||
300 chn_info->px_fmt == DE_FORMAT_YUV420_SP_VUVU ||
301 chn_info->px_fmt == DE_FORMAT_YUV420_SP_UVUV_10BIT ||
302 chn_info->px_fmt == DE_FORMAT_YUV420_SP_VUVU_10BIT ||
303 chn_info->px_fmt == DE_FORMAT_YUV420_SP_VUVU) {
304 /*cpara->vphase = 0x7b3333;*/
305 /* chorma vertical phase should -0.6 when input
306 * format
307 * is */
308 /* yuv420 and snr on */
309 if (priv->vsu_type == DE_SCALER_TYPE_VSU_ED) {
310 linebuf = de_feat_get_scale_linebuf_for_ed(disp, chn);
311 if (((chn_info->ovl_out_win.width <= linebuf)
312 && (chn_info->ovl_ypara.hstep < (1 << VSU_STEP_FRAC_BITWIDTH))
313 && (chn_info->ovl_ypara.vstep < (1 << VSU_STEP_FRAC_BITWIDTH)))) {
314 scale_mode = 2;
315 }
316 }
317
318 if (scale_mode == 2) {
319 /* chorma vertical phase should
320 * -0.25
321 * when input format is */
322 /* yuv420 */
323 cpara->vphase =
324 (ypara->vphase >> 1) -
325 (1
326 << (VSU_PHASE_FRAC_BITWIDTH - 2));
327 } else { /*scale_mode == 1*/
328 cpara->vphase =
329 (ypara->vphase >> 1) -
330 (((1 << VSU_PHASE_FRAC_BITWIDTH) *
331 153) >>
332 8);
333 }
334 } else {
335 cpara->vphase = ypara->vphase;
336 }
337 } else {
338 /* chorma vertical phase should -0.25 when input format is */
339 /* yuv420 */
340 cpara->vphase = (ypara->vphase >> 1) -
341 (1 << (VSU_PHASE_FRAC_BITWIDTH - 2));
342 }
343 } else if (chn_info->yuv_sampling == DE_YUV411) {
344 /* horizon crop info */
345 if (((crop32->left & 0x3) == 0x0)
346 && ((crop32->width & 0x3) != 0x0)) {
347 /* odd crop_w, crop down width, */
348 /* last 1-3 lines may disappear */
349 crop32->width = (crop32->width >> 2) << 2;
350 } else if (((crop32->left & 0x3) != 0x0)
351 && ((crop32->width & 0x3) == 0x0)) {
352 /* odd crop_x, crop down x, and phase + 1 */
353 ypara->hphase += ((crop32->left & 0x3) << VSU_PHASE_FRAC_BITWIDTH);
354 crop32->left = (crop32->left >> 2) << 2;
355 } else if (((crop32->left & 0x3) != 0x0)
356 && ((crop32->width & 0x3) != 0x0)) {
357 /* odd crop_x and crop_w, crop_x aligned to 4 pixel */
358 ypara->hphase += ((crop32->left & 0x3) << VSU_PHASE_FRAC_BITWIDTH);
359 crop32->width = ((crop32->width + (crop32->left & 0x3)) >> 2) << 2;
360 crop32->left = (crop32->left >> 2) << 2;
361 }
362
363 cpara->hstep = ypara->hstep >> 2;
364 cpara->vstep = ypara->vstep;
365 cpara->hphase = ypara->hphase;
366 cpara->vphase = ypara->vphase;
367 } else {
368 DE_WRN("not support yuv_sampling(%d)!\n", chn_info->yuv_sampling);
369 return -1;
370 }
371
372 return 0;
373 }
374
de_vsu_calc_ovl_coord(u32 disp,u32 chn,u32 dst_coord,u32 scale_step)375 u32 de_vsu_calc_ovl_coord(u32 disp, u32 chn,
376 u32 dst_coord, u32 scale_step)
377 {
378 u32 half, src_coord;
379
380 half = (1 << (VSU_STEP_FRAC_BITWIDTH - 1));
381 src_coord = (dst_coord * scale_step + half)
382 >> VSU_STEP_FRAC_BITWIDTH;
383 DE_INF("half_shift_coord=<%x,%d,%d,%d>\n", half,
384 VSU_STEP_FRAC_BITWIDTH, dst_coord, src_coord);
385
386 return src_coord;
387 }
388
de_vsu_calc_ovl_scale_para(u32 layer_num,struct de_scale_para * ypara,struct de_scale_para * cpara,struct de_scale_para * ovl_ypara,struct de_scale_para * ovl_cpara)389 s32 de_vsu_calc_ovl_scale_para(u32 layer_num,
390 struct de_scale_para *ypara,
391 struct de_scale_para *cpara,
392 struct de_scale_para *ovl_ypara,
393 struct de_scale_para *ovl_cpara)
394 {
395 u32 i;
396
397 if (layer_num == 1) {
398 /*only one layer enabled in one overlay */
399 /* set overlay scale para through this layer */
400 ovl_ypara->hphase = ypara[0].hphase;
401 ovl_ypara->vphase = ypara[0].vphase;
402 ovl_ypara->hstep = ypara[0].hstep;
403 ovl_ypara->vstep = ypara[0].vstep;
404
405 ovl_cpara->hphase = cpara[0].hphase;
406 ovl_cpara->vphase = cpara[0].vphase;
407 ovl_cpara->hstep = cpara[0].hstep;
408 ovl_cpara->vstep = cpara[0].vstep;
409 } else if (layer_num > 1) {
410 /* two or more layers enabled in one overlay */
411 /* set overlay scale step through first enabled layer */
412 ovl_ypara->hstep = ypara[0].hstep;
413 ovl_ypara->vstep = ypara[0].vstep;
414 ovl_cpara->hstep = cpara[0].hstep;
415 ovl_cpara->vstep = cpara[0].vstep;
416
417 /* set overlay phase through 1st enabled non-zero-phase layer */
418 for (i = 0; i < layer_num; i++) {
419 if (ypara[i].hphase != 0) {
420 ovl_ypara->hphase = ypara[i].hphase;
421 ovl_cpara->hphase = cpara[i].hphase;
422 break;
423 }
424 }
425 /* all layer phase equal to zero */
426 if (i == layer_num) {
427 ovl_ypara->hphase = ypara[0].hphase;
428 ovl_cpara->hphase = cpara[0].hphase;
429 }
430
431 /* set overlay phase through first non-zero layer */
432 for (i = 0; i < layer_num; i++) {
433 if (ypara[i].vphase != 0) {
434 ovl_ypara->vphase = ypara[i].vphase;
435 ovl_cpara->vphase = cpara[i].vphase;
436 break;
437 }
438 }
439 /* all layer phase equal to zero */
440 if (i == layer_num) {
441 ovl_ypara->vphase = ypara[0].vphase;
442 ovl_cpara->vphase = cpara[0].vphase;
443 }
444
445 }
446
447 return 0;
448 }
449
de_vsu_fix_tiny_size(u32 disp,u32 chn,struct de_rect_s * in_win,struct de_rect_s * out_win,struct de_scale_para * ypara,enum de_format_space fmt_space,struct de_rect_s * lay_win,u32 lay_num,u32 scn_width,u32 scn_height)450 u32 de_vsu_fix_tiny_size(u32 disp, u32 chn,
451 struct de_rect_s *in_win, struct de_rect_s *out_win,
452 struct de_scale_para *ypara, enum de_format_space fmt_space,
453 struct de_rect_s *lay_win, u32 lay_num,
454 u32 scn_width, u32 scn_height)
455 {
456 u32 result = 0x0;
457
458 if (!in_win->width || !in_win->height
459 || !out_win->width || !out_win->height
460 || !ypara->hstep || !ypara->vstep)
461 return result;
462
463 /* horizon */
464 if (in_win->width < VSU_MIN_INPUT_WIDTH
465 || out_win->width < VSU_MIN_INPUT_WIDTH) {
466 u32 org_out_win_width = out_win->width;
467 u32 shift = VSU_PHASE_FRAC_BITWIDTH;
468
469 if (ypara->hstep > (1 << shift)) {
470 /* scale down */
471 u64 val;
472 out_win->width = VSU_MIN_INPUT_WIDTH;
473 val = (u64)(in_win->width) << shift;
474 do_div(val, VSU_MIN_INPUT_WIDTH);
475 ypara->hstep = (u32)val;
476 result |= VSU_EXPAND_OUTWIDTH;
477 } else {
478 /* scale up */
479 in_win->width = VSU_MIN_INPUT_WIDTH;
480 out_win->width = VSU_MIN_INPUT_WIDTH * (1 << shift)
481 / ypara->hstep;
482 result |= (VSU_EXPAND_OUTWIDTH | VSU_EXPAND_OUTWIDTH);
483 }
484
485 if (out_win->width + out_win->left > scn_width) {
486 u32 i;
487
488 out_win->left -= (out_win->width - org_out_win_width);
489 for (i = 0; i < lay_num; i++) {
490 lay_win[i].left += ((ypara->hstep
491 * (out_win->width - org_out_win_width)) >> shift);
492 }
493 result |= VSU_LSHIFT_OUTWINDOW;
494 }
495 }
496
497 /* vertical */
498 if (in_win->height < VSU_MIN_INPUT_HEIGHT
499 || out_win->height < VSU_MIN_INPUT_HEIGHT) {
500 u32 org_out_win_height = out_win->height;
501 u32 shift = VSU_PHASE_FRAC_BITWIDTH;
502
503 if (ypara->vstep > (1 << shift)) {
504 /* scale down */
505 u64 val;
506 out_win->height = VSU_MIN_INPUT_HEIGHT;
507 val = (u64)(in_win->height) << shift;
508 do_div(val, VSU_MIN_INPUT_HEIGHT);
509 ypara->vstep = (u32)val;
510 result |= VSU_EXPAND_OUTHEIGHT;
511 } else {
512 /* scale up */
513 in_win->height = VSU_MIN_INPUT_HEIGHT;
514 out_win->height = VSU_MIN_INPUT_HEIGHT * (1 << shift)
515 / ypara->vstep;
516 result |= (VSU_EXPAND_OUTHEIGHT | VSU_EXPAND_INHEIGHT);
517 }
518
519 if (out_win->height + out_win->top > scn_height) {
520 u32 i;
521
522 out_win->top -= (out_win->height - org_out_win_height);
523 for (i = 0; i < lay_num; i++) {
524 lay_win[i].top += ((ypara->vstep
525 * (out_win->height - org_out_win_height)) >> shift);
526 }
527 result |= VSU_USHIFT_OUTWINDOW;
528 }
529 }
530
531 return result;
532 }
533
de_vsu_fix_big_size(u32 disp,u32 chn,struct de_rect_s * in_win,struct de_rect_s * out_win,enum de_format_space fmt_space,enum de_yuv_sampling yuv_sampling)534 u32 de_vsu_fix_big_size(u32 disp, u32 chn,
535 struct de_rect_s *in_win, struct de_rect_s *out_win,
536 enum de_format_space fmt_space, enum de_yuv_sampling yuv_sampling)
537 {
538 struct de_vsu_private *priv = &(vsu_priv[disp][chn]);
539
540 u32 result = 0;
541 u32 wshift = 0;
542 u32 hshift = 0;
543 u32 in_width, in_height;
544 u32 linebuf;
545 u32 value;
546
547 if (fmt_space == DE_FORMAT_SPACE_YUV) {
548 if (yuv_sampling == DE_YUV422) {
549 wshift = 1;
550 } else if (yuv_sampling == DE_YUV420) {
551 wshift = 1;
552 hshift = 1;
553 } else if (yuv_sampling == DE_YUV411) {
554 wshift = 2;
555 }
556 linebuf = de_feat_get_scale_linebuf_for_yuv(disp, chn);
557 } else if (fmt_space == DE_FORMAT_SPACE_RGB) {
558 linebuf = de_feat_get_scale_linebuf_for_rgb(disp, chn);
559 } else {
560 linebuf = 2048;
561 }
562
563 in_width = in_win->width;
564 in_height = in_win->height;
565
566 if (in_width > linebuf) {
567 in_width = linebuf;
568 }
569 value = priv->tap_num_hori * out_win->width;
570 if (in_width > value)
571 in_width = value;
572 in_width &= (~((1U << wshift) - 1));
573 if (in_width < in_win->width) {
574 in_win->width = in_width;
575 result |= VSU_CUT_INWIDTH;
576 }
577
578 value = priv->tap_num_vert * out_win->height;
579 if (in_height > value)
580 in_height = value;
581 in_height &= (~((1 << hshift) - 1));
582 if (in_height < in_win->height) {
583 in_win->height = in_height;
584 result |= VSU_CUT_INHEIGHT;
585 }
586
587 return result;
588 }
589
de_vsu_calc_scale_para(u32 fix_size_result,enum de_format_space fmt_space,enum de_yuv_sampling yuv_sampling,struct de_rect_s * out_win,struct de_rect_s * ywin,struct de_rect_s * cwin,struct de_scale_para * ypara,struct de_scale_para * cpara)590 s32 de_vsu_calc_scale_para(u32 fix_size_result,
591 enum de_format_space fmt_space, enum de_yuv_sampling yuv_sampling,
592 struct de_rect_s *out_win,
593 struct de_rect_s *ywin, struct de_rect_s *cwin,
594 struct de_scale_para *ypara, struct de_scale_para *cpara)
595 {
596 u32 wshift = 0;
597 u32 hshift = 0;
598
599 if (fmt_space == DE_FORMAT_SPACE_YUV) {
600 if (yuv_sampling == DE_YUV422) {
601 wshift = 1;
602 } else if (yuv_sampling == DE_YUV420) {
603 wshift = 1;
604 hshift = 1;
605 } else if (yuv_sampling == DE_YUV411) {
606 wshift = 2;
607 }
608 }
609 cwin->width = ywin->width >> wshift;
610 ywin->width = cwin->width << wshift;
611 cwin->height = ywin->height >> hshift;
612 ywin->height = cwin->height << hshift;
613
614 if (fix_size_result & VSU_CUT_INWIDTH) {
615 u64 val;
616
617 val = (u64)ywin->width << VSU_STEP_FRAC_BITWIDTH;
618 do_div(val, out_win->width);
619 ypara->hstep = (u32)val;
620 ypara->hphase = 0; /* no meaning when coarse scale using */
621
622 cwin->width = ywin->width >> wshift;
623 val = (u64)cwin->width << VSU_STEP_FRAC_BITWIDTH;
624 do_div(val, out_win->width);
625 cpara->hstep = (u32)val;
626 cpara->hphase = 0; /* no meaning when coarse scale using */
627 }
628 if (fix_size_result
629 & (VSU_CUT_INHEIGHT | RTMX_CUT_INHEIGHT)) {
630 u64 val;
631
632 val = (u64)ywin->height << VSU_STEP_FRAC_BITWIDTH;
633 do_div(val, out_win->height);
634 ypara->vstep = (u32)val;
635 ypara->vphase = 0; /* no meaning when coarse scale using */
636
637 cwin->height = ywin->height >> hshift;
638 val = (u64)cwin->height << VSU_STEP_FRAC_BITWIDTH;
639 do_div(val, out_win->height);
640 cpara->vstep = (u32)val;
641 cpara->vphase = 0; /* no meaning when coarse scale using */
642 }
643
644 return 0;
645 }
646
de_vsu8_set_para(u32 disp,u32 chn,struct de_chn_info * chn_info)647 static s32 de_vsu8_set_para(u32 disp, u32 chn,
648 struct de_chn_info *chn_info)
649 {
650 struct de_vsu_private *priv = &(vsu_priv[disp][chn]);
651 struct vsu8_reg *reg = get_vsu8_reg(priv);
652 u32 scale_mode = 0;
653 u32 pt_coef;
654
655 if (chn_info->px_fmt_space == DE_FORMAT_SPACE_YUV) {
656 if (chn_info->yuv_sampling == DE_YUV422) {
657 switch (chn_info->px_fmt) {
658 case DE_FORMAT_YUV422_I_YVYU:
659 case DE_FORMAT_YUV422_I_YUYV:
660 case DE_FORMAT_YUV422_I_UYVY:
661 case DE_FORMAT_YUV422_I_VYUY:
662 case DE_FORMAT_YUV422_I_YVYU_10BIT:
663 case DE_FORMAT_YUV422_I_YUYV_10BIT:
664 case DE_FORMAT_YUV422_I_UYVY_10BIT:
665 case DE_FORMAT_YUV422_I_VYUY_10BIT:
666 scale_mode = 0;
667 break;
668 default:
669 scale_mode = 1;
670 break;
671 }
672 } else if (chn_info->yuv_sampling == DE_YUV420) {
673 scale_mode = 1;
674 } else if (chn_info->yuv_sampling == DE_YUV411) {
675 scale_mode = 1;
676 } else {
677 DE_WRN("yuv_sampling=%d\n", chn_info->yuv_sampling);
678 }
679 } else if (chn_info->px_fmt_space != DE_FORMAT_SPACE_RGB) {
680 DE_WRN("px_fmt_space=%d\n", chn_info->px_fmt_space);
681 return -1;
682 }
683
684 reg->ctl.bits.en = 1;
685 reg->scale_mode.dwval = scale_mode;
686 priv->set_blk_dirty(priv, VSU8_REG_BLK_CTL, 1);
687
688 reg->out_size.bits.width = chn_info->scn_win.width ?
689 (chn_info->scn_win.width - 1) : 0;
690 reg->out_size.bits.height = chn_info->scn_win.height ?
691 (chn_info->scn_win.height - 1) : 0;
692
693 reg->glb_alpha.dwval = chn_info->glb_alpha;
694 priv->set_blk_dirty(priv, VSU8_REG_BLK_ATTR, 1);
695
696 reg->y_in_size.bits.width = chn_info->ovl_out_win.width ?
697 (chn_info->ovl_out_win.width - 1) : 0;
698 reg->y_in_size.bits.height = chn_info->ovl_out_win.height ?
699 (chn_info->ovl_out_win.height - 1) : 0;
700 reg->y_hstep.dwval = chn_info->ovl_ypara.hstep
701 << VSU8_STEP_VALID_START_BIT;
702 reg->y_vstep.dwval = chn_info->ovl_ypara.vstep
703 << VSU8_STEP_VALID_START_BIT;
704 reg->y_hphase.dwval = chn_info->ovl_ypara.hphase
705 << VSU8_PHASE_VALID_START_BIT;
706 reg->y_vphase.dwval = chn_info->ovl_ypara.vphase
707 << VSU8_PHASE_VALID_START_BIT;
708 priv->set_blk_dirty(priv, VSU8_REG_BLK_YPARA, 1);
709
710 reg->c_in_size.bits.width = chn_info->c_win.width ?
711 (chn_info->c_win.width - 1) : 0;
712 reg->c_in_size.bits.height = chn_info->c_win.height ?
713 (chn_info->c_win.height - 1) : 0;
714 reg->c_hstep.dwval = chn_info->ovl_cpara.hstep
715 << VSU8_STEP_VALID_START_BIT;
716 reg->c_vstep.dwval = chn_info->ovl_cpara.vstep
717 << VSU8_STEP_VALID_START_BIT;
718 reg->c_hphase.dwval = chn_info->ovl_cpara.hphase
719 << VSU8_PHASE_VALID_START_BIT;
720 reg->c_vphase.dwval = chn_info->ovl_cpara.vphase
721 << VSU8_PHASE_VALID_START_BIT;
722 priv->set_blk_dirty(priv, VSU8_REG_BLK_CPARA, 1);
723
724 /* fir coefficient */
725 /* ch0 */
726 pt_coef = de_vsu_calc_fir_coef(chn_info->ovl_ypara.hstep);
727 memcpy(reg->y_hori_coeff, lan2coefftab32 + pt_coef,
728 sizeof(u32) * VSU_PHASE_NUM);
729 pt_coef = de_vsu_calc_fir_coef(chn_info->ovl_ypara.vstep);
730 memcpy(reg->y_vert_coeff, lan2coefftab32 + pt_coef,
731 sizeof(u32) * VSU_PHASE_NUM);
732
733 /* ch1/2 */
734 if (chn_info->px_fmt_space == DE_FORMAT_SPACE_RGB) {
735 pt_coef = de_vsu_calc_fir_coef(chn_info->ovl_cpara.hstep);
736 memcpy(reg->c_hori_coeff, lan2coefftab32 + pt_coef,
737 sizeof(u32) * VSU_PHASE_NUM);
738 } else {
739 pt_coef = de_vsu_calc_fir_coef(chn_info->ovl_cpara.hstep);
740 memcpy(reg->c_hori_coeff, bicubic4coefftab32 + pt_coef,
741 sizeof(u32) * VSU_PHASE_NUM);
742 }
743 priv->set_blk_dirty(priv, VSU8_REG_BLK_COEFF0, 1);
744 priv->set_blk_dirty(priv, VSU8_REG_BLK_COEFF1, 1);
745 priv->set_blk_dirty(priv, VSU8_REG_BLK_COEFF2, 1);
746
747 return 0;
748 }
749
de_vsu10_set_para(u32 disp,u32 chn,struct de_chn_info * chn_info)750 static s32 de_vsu10_set_para(u32 disp, u32 chn,
751 struct de_chn_info *chn_info)
752 {
753 struct de_vsu_private *priv = &(vsu_priv[disp][chn]);
754 struct vsu10_reg *reg = get_vsu10_reg(priv);
755 u32 scale_mode = 0;
756 u32 pt_coef;
757
758 if (chn_info->px_fmt_space == DE_FORMAT_SPACE_YUV) {
759 switch (chn_info->yuv_sampling) {
760 case DE_YUV444:
761 scale_mode = 0;
762 break;
763 case DE_YUV422:
764 case DE_YUV420:
765 case DE_YUV411:
766 scale_mode = 1;
767 break;
768 default:
769 DE_WRN("yuv_sampling=%d\n", chn_info->yuv_sampling);
770 return -1;
771 }
772 } else if (chn_info->px_fmt_space != DE_FORMAT_SPACE_RGB) {
773 DE_WRN("px_fmt_space=%d\n", chn_info->px_fmt_space);
774 return -1;
775 }
776
777 reg->ctl.bits.en = 1;
778 reg->scale_mode.dwval = scale_mode;
779 priv->set_blk_dirty(priv, VSU10_REG_BLK_CTL, 1);
780
781 reg->out_size.bits.width = chn_info->scn_win.width ?
782 (chn_info->scn_win.width - 1) : 0;
783 reg->out_size.bits.height = chn_info->scn_win.height ?
784 (chn_info->scn_win.height - 1) : 0;
785
786 reg->glb_alpha.dwval = chn_info->glb_alpha;
787 priv->set_blk_dirty(priv, VSU10_REG_BLK_ATTR, 1);
788
789 reg->y_in_size.bits.width = chn_info->ovl_out_win.width ?
790 (chn_info->ovl_out_win.width - 1) : 0;
791 reg->y_in_size.bits.height = chn_info->ovl_out_win.height ?
792 (chn_info->ovl_out_win.height - 1) : 0;
793 reg->y_hstep.dwval = chn_info->ovl_ypara.hstep
794 << VSU10_STEP_VALID_START_BIT;
795 reg->y_vstep.dwval = chn_info->ovl_ypara.vstep
796 << VSU10_STEP_VALID_START_BIT;
797 reg->y_hphase.dwval = chn_info->ovl_ypara.hphase
798 << VSU10_PHASE_VALID_START_BIT;
799 reg->y_vphase0.dwval = chn_info->ovl_ypara.vphase
800 << VSU10_PHASE_VALID_START_BIT;
801 priv->set_blk_dirty(priv, VSU10_REG_BLK_YPARA, 1);
802
803 reg->c_in_size.bits.width = chn_info->c_win.width ?
804 (chn_info->c_win.width - 1) : 0;
805 reg->c_in_size.bits.height = chn_info->c_win.height ?
806 (chn_info->c_win.height - 1) : 0;
807 reg->c_hstep.dwval = chn_info->ovl_cpara.hstep
808 << VSU10_STEP_VALID_START_BIT;
809 reg->c_vstep.dwval = chn_info->ovl_cpara.vstep
810 << VSU10_STEP_VALID_START_BIT;
811 reg->c_hphase.dwval = chn_info->ovl_cpara.hphase
812 << VSU10_PHASE_VALID_START_BIT;
813 reg->c_vphase0.dwval = chn_info->ovl_cpara.vphase
814 << VSU10_PHASE_VALID_START_BIT;
815 priv->set_blk_dirty(priv, VSU10_REG_BLK_CPARA, 1);
816
817 /* fir coefficient */
818 /* ch0 */
819 pt_coef = de_vsu_calc_fir_coef(chn_info->ovl_ypara.hstep);
820 memcpy(reg->y_hori_coeff0, lan3coefftab32_left + pt_coef,
821 sizeof(u32) * VSU_PHASE_NUM);
822 memcpy(reg->y_hori_coeff1, lan3coefftab32_right + pt_coef,
823 sizeof(u32) * VSU_PHASE_NUM);
824 pt_coef = de_vsu_calc_fir_coef(chn_info->ovl_ypara.vstep);
825 memcpy(reg->y_vert_coeff, lan2coefftab32 + pt_coef,
826 sizeof(u32) * VSU_PHASE_NUM);
827
828 /* ch1/2 */
829 if (chn_info->px_fmt_space == DE_FORMAT_SPACE_RGB) {
830 pt_coef = de_vsu_calc_fir_coef(chn_info->ovl_cpara.hstep);
831 memcpy(reg->c_hori_coeff0, lan3coefftab32_left + pt_coef,
832 sizeof(u32) * VSU_PHASE_NUM);
833 memcpy(reg->c_hori_coeff1, lan3coefftab32_right + pt_coef,
834 sizeof(u32) * VSU_PHASE_NUM);
835 pt_coef = de_vsu_calc_fir_coef(chn_info->ovl_cpara.vstep);
836 memcpy(reg->c_vert_coeff, lan2coefftab32 + pt_coef,
837 sizeof(u32) * VSU_PHASE_NUM);
838 } else {
839 pt_coef = de_vsu_calc_fir_coef(chn_info->ovl_cpara.hstep);
840 memcpy(reg->c_hori_coeff0,
841 bicubic8coefftab32_left + pt_coef,
842 sizeof(u32) * VSU_PHASE_NUM);
843 memcpy(reg->c_hori_coeff1,
844 bicubic8coefftab32_right + pt_coef,
845 sizeof(u32) * VSU_PHASE_NUM);
846 pt_coef = de_vsu_calc_fir_coef(chn_info->ovl_cpara.vstep);
847 memcpy(reg->c_vert_coeff,
848 bicubic4coefftab32 + pt_coef,
849 sizeof(u32) * VSU_PHASE_NUM);
850 }
851 priv->set_blk_dirty(priv, VSU10_REG_BLK_COEFF0, 1);
852 priv->set_blk_dirty(priv, VSU10_REG_BLK_COEFF1, 1);
853 priv->set_blk_dirty(priv, VSU10_REG_BLK_COEFF2, 1);
854 priv->set_blk_dirty(priv, VSU10_REG_BLK_COEFF3, 1);
855 priv->set_blk_dirty(priv, VSU10_REG_BLK_COEFF4, 1);
856 priv->set_blk_dirty(priv, VSU10_REG_BLK_COEFF5, 1);
857
858 return 0;
859 }
860
de_vsu_ed_set_para(u32 disp,u32 chn,struct de_chn_info * chn_info)861 static s32 de_vsu_ed_set_para(u32 disp, u32 chn,
862 struct de_chn_info *chn_info)
863 {
864 struct de_vsu_private *priv = &(vsu_priv[disp][chn]);
865 struct vsu_ed_reg *reg = get_vsu_ed_reg(priv);
866 u32 scale_mode = 0;
867 u32 pt_coef;
868
869
870 if (chn_info->px_fmt_space == DE_FORMAT_SPACE_YUV) {
871 u32 linebuf = de_feat_get_scale_linebuf_for_ed(disp, chn);
872
873 switch (chn_info->yuv_sampling) {
874 case DE_YUV444:
875 scale_mode = 0;
876 break;
877 case DE_YUV422:
878 case DE_YUV420:
879 case DE_YUV411:
880 if ((chn_info->ovl_out_win.width <= linebuf)
881 && (chn_info->ovl_ypara.hstep < (1 << VSU_STEP_FRAC_BITWIDTH))
882 && (chn_info->ovl_ypara.vstep < (1 << VSU_STEP_FRAC_BITWIDTH)))
883 scale_mode = 2;
884 else
885 scale_mode = 1;
886 break;
887 default:
888 DE_WRN("yuv_sampling=%d\n", chn_info->yuv_sampling);
889 return -1;
890 }
891 } else if (chn_info->px_fmt_space != DE_FORMAT_SPACE_RGB) {
892 DE_WRN("px_fmt_space=%d\n", chn_info->px_fmt_space);
893 return -1;
894 }
895
896 reg->ctl.bits.en = 1;
897 reg->scale_mode.dwval = scale_mode;
898 priv->set_blk_dirty(priv, VSU_ED_REG_BLK_CTL, 1);
899
900 reg->dir_thr.dwval = 0x0000FF01;
901 reg->edge_thr.dwval = 0x00080000;
902 reg->dir_ctl.dwval = 0x00000000;
903 reg->angle_thr.dwval = 0x00020000;
904 priv->set_blk_dirty(priv, VSU_ED_REG_BLK_DIR_SHP, 1);
905
906 reg->out_size.bits.width = chn_info->scn_win.width ?
907 (chn_info->scn_win.width - 1) : 0;
908 reg->out_size.bits.height = chn_info->scn_win.height ?
909 (chn_info->scn_win.height - 1) : 0;
910
911 reg->glb_alpha.dwval = chn_info->glb_alpha;
912 priv->set_blk_dirty(priv, VSU_ED_REG_BLK_ATTR, 1);
913
914 reg->y_in_size.bits.width = chn_info->ovl_out_win.width ?
915 (chn_info->ovl_out_win.width - 1) : 0;
916 reg->y_in_size.bits.height = chn_info->ovl_out_win.height ?
917 (chn_info->ovl_out_win.height - 1) : 0;
918 reg->y_hstep.dwval = chn_info->ovl_ypara.hstep
919 << VSU_ED_STEP_VALID_START_BIT;
920 reg->y_vstep.dwval = chn_info->ovl_ypara.vstep
921 << VSU_ED_STEP_VALID_START_BIT;
922 reg->y_hphase.dwval = chn_info->ovl_ypara.hphase
923 << VSU_ED_PHASE_VALID_START_BIT;
924 reg->y_vphase0.dwval = chn_info->ovl_ypara.vphase
925 << VSU_ED_PHASE_VALID_START_BIT;
926 priv->set_blk_dirty(priv, VSU_ED_REG_BLK_YPARA, 1);
927
928 reg->c_in_size.bits.width = chn_info->c_win.width ?
929 (chn_info->c_win.width - 1) : 0;
930 reg->c_in_size.bits.height = chn_info->c_win.height ?
931 (chn_info->c_win.height - 1) : 0;
932 reg->c_hstep.dwval = chn_info->ovl_cpara.hstep
933 << VSU_ED_STEP_VALID_START_BIT;
934 reg->c_vstep.dwval = chn_info->ovl_cpara.vstep
935 << VSU_ED_STEP_VALID_START_BIT;
936 reg->c_hphase.dwval = chn_info->ovl_cpara.hphase
937 << VSU_ED_PHASE_VALID_START_BIT;
938 reg->c_vphase0.dwval = chn_info->ovl_cpara.vphase
939 << VSU_ED_PHASE_VALID_START_BIT;
940 priv->set_blk_dirty(priv, VSU_ED_REG_BLK_CPARA, 1);
941
942 /* fir coefficient */
943 /* ch0 */
944 pt_coef = de_vsu_calc_fir_coef(chn_info->ovl_ypara.hstep);
945 memcpy(reg->y_hori_coeff0, lan3coefftab32_left + pt_coef,
946 sizeof(u32) * VSU_PHASE_NUM);
947 memcpy(reg->y_hori_coeff1, lan3coefftab32_right + pt_coef,
948 sizeof(u32) * VSU_PHASE_NUM);
949 pt_coef = de_vsu_calc_fir_coef(chn_info->ovl_ypara.vstep);
950 memcpy(reg->y_vert_coeff, lan2coefftab32 + pt_coef,
951 sizeof(u32) * VSU_PHASE_NUM);
952
953 /* ch1/2 */
954 if (chn_info->px_fmt_space == DE_FORMAT_SPACE_RGB) {
955 pt_coef = de_vsu_calc_fir_coef(chn_info->ovl_cpara.hstep);
956 memcpy(reg->c_hori_coeff0, lan3coefftab32_left + pt_coef,
957 sizeof(u32) * VSU_PHASE_NUM);
958 memcpy(reg->c_hori_coeff1, lan3coefftab32_right + pt_coef,
959 sizeof(u32) * VSU_PHASE_NUM);
960 pt_coef = de_vsu_calc_fir_coef(chn_info->ovl_cpara.vstep);
961 memcpy(reg->c_vert_coeff, lan2coefftab32 + pt_coef,
962 sizeof(u32) * VSU_PHASE_NUM);
963 } else {
964 pt_coef = de_vsu_calc_fir_coef(chn_info->ovl_cpara.hstep);
965 memcpy(reg->c_hori_coeff0,
966 bicubic8coefftab32_left + pt_coef,
967 sizeof(u32) * VSU_PHASE_NUM);
968 memcpy(reg->c_hori_coeff1,
969 bicubic8coefftab32_right + pt_coef,
970 sizeof(u32) * VSU_PHASE_NUM);
971 pt_coef = de_vsu_calc_fir_coef(chn_info->ovl_cpara.vstep);
972 memcpy(reg->c_vert_coeff,
973 bicubic4coefftab32 + pt_coef,
974 sizeof(u32) * VSU_PHASE_NUM);
975 }
976 priv->set_blk_dirty(priv, VSU_ED_REG_BLK_COEFF0, 1);
977 priv->set_blk_dirty(priv, VSU_ED_REG_BLK_COEFF1, 1);
978 priv->set_blk_dirty(priv, VSU_ED_REG_BLK_COEFF2, 1);
979 priv->set_blk_dirty(priv, VSU_ED_REG_BLK_COEFF3, 1);
980 priv->set_blk_dirty(priv, VSU_ED_REG_BLK_COEFF4, 1);
981 priv->set_blk_dirty(priv, VSU_ED_REG_BLK_COEFF5, 1);
982
983 return 0;
984 }
985
986
de_vsu_set_para(u32 disp,u32 chn,struct de_chn_info * chn_info)987 s32 de_vsu_set_para(u32 disp, u32 chn,
988 struct de_chn_info *chn_info)
989 {
990 struct de_vsu_private *priv = &(vsu_priv[disp][chn]);
991
992 if (priv->vsu_type == DE_SCALER_TYPE_VSU8) {
993 return de_vsu8_set_para(disp, chn, chn_info);
994 } else if (priv->vsu_type == DE_SCALER_TYPE_VSU10) {
995 return de_vsu10_set_para(disp, chn, chn_info);
996 } else if (priv->vsu_type == DE_SCALER_TYPE_VSU_ED) {
997 return de_vsu_ed_set_para(disp, chn, chn_info);
998 }
999
1000 return -1;
1001 }
1002
de_vsu_disable(u32 disp,u32 chn)1003 s32 de_vsu_disable(u32 disp, u32 chn)
1004 {
1005 struct de_vsu_private *priv = &(vsu_priv[disp][chn]);
1006
1007 if (priv->vsu_type == DE_SCALER_TYPE_VSU8) {
1008 struct vsu8_reg *reg = get_vsu8_reg(priv);
1009
1010 reg->ctl.bits.en = 0;
1011 } else if (priv->vsu_type == DE_SCALER_TYPE_VSU10) {
1012 struct vsu10_reg *reg = get_vsu10_reg(priv);
1013
1014 reg->ctl.bits.en = 0;
1015 } else if (priv->vsu_type == DE_SCALER_TYPE_VSU_ED) {
1016 struct vsu_ed_reg *reg = get_vsu_ed_reg(priv);
1017
1018 reg->ctl.bits.en = 0;
1019 }
1020 priv->set_blk_dirty(priv, VSU8_REG_BLK_CTL, 1);
1021 return 0;
1022 }
1023
de_vsu_init_sharp_para(u32 disp,u32 chn)1024 s32 de_vsu_init_sharp_para(u32 disp, u32 chn)
1025 {
1026 struct de_vsu_private *priv = &(vsu_priv[disp][chn]);
1027
1028 if (priv->vsu_type == DE_SCALER_TYPE_VSU_ED) {
1029 struct vsu_ed_reg *reg = get_vsu_ed_reg(priv);
1030
1031 reg->sharp_en.dwval = 0;
1032 reg->sharp_coring.dwval = 0x20;
1033 reg->sharp_gain0.dwval = 0x03200078;
1034 reg->sharp_gain1.dwval = 0x01080000;
1035 priv->set_blk_dirty(priv, VSU_ED_REG_BLK_DIR_SHP, 1);
1036 }
1037 return 0;
1038 }
1039
de_vsu_set_sharp_para(u32 disp,u32 chn,u32 fmt,u32 dev_type,struct de_vsu_sharp_config * para,u32 bypass)1040 s32 de_vsu_set_sharp_para(u32 disp, u32 chn,
1041 u32 fmt, u32 dev_type,
1042 struct de_vsu_sharp_config *para, u32 bypass)
1043 {
1044 struct de_vsu_private *priv = &(vsu_priv[disp][chn]);
1045
1046 if (priv->vsu_type == DE_SCALER_TYPE_VSU_ED) {
1047 struct vsu_ed_reg *reg = get_vsu_ed_reg(priv);
1048 u32 level;
1049 u32 en;
1050 u32 gain;
1051 u32 linebuf;
1052 const u32 PEAK2D_PARA_NUM = 11;
1053 const s32 peak2d_para[][2] = {
1054 /* lcd, tv */
1055 {0, 0}, /* gain for yuv */
1056 {5, 5}, /* */
1057 {10, 10}, /* */
1058 {15, 15}, /* */
1059 {20, 20}, /* */
1060 {25, 25}, /* */
1061 {30, 30}, /* */
1062 {35, 35}, /* */
1063 {40, 40}, /* */
1064 {45, 45}, /* */
1065 {50, 50}, /* gain for yuv */
1066 };
1067
1068 linebuf = de_feat_get_scale_linebuf_for_ed(disp, chn);
1069
1070 /*
1071 * enable condition:
1072 * 1. scale up;
1073 * 2. user level > 0
1074 * 3. yuv format
1075 * 4. inw <- lb
1076 */
1077 if ((para->inw < para->outw) && (para->inw <= linebuf)
1078 && (para->inh < para->outh) && (para->level > 0)
1079 && (fmt == 0) && (bypass == 0)) {
1080 en = 1;
1081 } else {
1082 en = 0;
1083 }
1084 reg->sharp_en.dwval = en;
1085 if (en) {
1086 level = para->level > (PEAK2D_PARA_NUM - 1) ?
1087 (PEAK2D_PARA_NUM - 1) : para->level;
1088 gain = peak2d_para[level][dev_type];
1089 reg->sharp_gain1.dwval = gain;
1090 }
1091 priv->set_blk_dirty(priv, VSU_ED_REG_BLK_DIR_SHP, 1);
1092 }
1093 return 0;
1094 }
1095
de_vsu_init(u32 disp,u8 __iomem * de_reg_base)1096 s32 de_vsu_init(u32 disp, u8 __iomem *de_reg_base)
1097 {
1098 u32 chn_num, chn;
1099
1100 chn_num = de_feat_get_num_chns(disp);
1101 for (chn = 0; chn < chn_num; ++chn) {
1102 u32 phy_chn = de_feat_get_phy_chn_id(disp, chn);
1103 u8 __iomem *reg_base = (u8 __iomem *)(de_reg_base
1104 + DE_CHN_OFFSET(phy_chn) + CHN_SCALER_OFFSET);
1105 u32 rcq_used = de_feat_is_using_rcq(disp);
1106
1107 struct de_vsu_private *priv = &vsu_priv[disp][chn];
1108 struct de_reg_mem_info *reg_mem_info = &(priv->reg_mem_info);
1109 struct de_reg_block *block;
1110
1111 priv->vsu_type = de_feat_get_scaler_type(disp, chn);
1112 switch (priv->vsu_type) {
1113 case DE_SCALER_TYPE_VSU8:
1114 reg_mem_info->size = sizeof(struct vsu8_reg);
1115 reg_mem_info->vir_addr = (u8 *)de_top_reg_memory_alloc(
1116 reg_mem_info->size, (void *)&(reg_mem_info->phy_addr),
1117 rcq_used);
1118 if (NULL == reg_mem_info->vir_addr) {
1119 DE_WRN("alloc vsu[%d][%d] mm fail!size=0x%x\n",
1120 disp, chn, reg_mem_info->size);
1121 return -1;
1122 }
1123
1124 block = &(priv->vsu8_reg_blks[VSU8_REG_BLK_CTL]);
1125 block->phy_addr = reg_mem_info->phy_addr;
1126 block->vir_addr = reg_mem_info->vir_addr;
1127 block->size = 0x14;
1128 block->reg_addr = reg_base;
1129
1130 block = &(priv->vsu8_reg_blks[VSU8_REG_BLK_ATTR]);
1131 block->phy_addr = reg_mem_info->phy_addr + 0x40;
1132 block->vir_addr = reg_mem_info->vir_addr + 0x40;
1133 block->size = 0x8;
1134 block->reg_addr = reg_base + 0x40;
1135
1136 block = &(priv->vsu8_reg_blks[VSU8_REG_BLK_YPARA]);
1137 block->phy_addr = reg_mem_info->phy_addr + 0x80;
1138 block->vir_addr = reg_mem_info->vir_addr + 0x80;
1139 block->size = 0x1C;
1140 block->reg_addr = reg_base + 0x80;
1141
1142 block = &(priv->vsu8_reg_blks[VSU8_REG_BLK_CPARA]);
1143 block->phy_addr = reg_mem_info->phy_addr + 0xC0;
1144 block->vir_addr = reg_mem_info->vir_addr + 0xC0;
1145 block->size = 0x1C;
1146 block->reg_addr = reg_base + 0xC0;
1147
1148 block = &(priv->vsu8_reg_blks[VSU8_REG_BLK_COEFF0]);
1149 block->phy_addr = reg_mem_info->phy_addr + 0x200;
1150 block->vir_addr = reg_mem_info->vir_addr + 0x200;
1151 block->size = 32 * 4;
1152 block->reg_addr = reg_base + 0x200;
1153
1154 block = &(priv->vsu8_reg_blks[VSU8_REG_BLK_COEFF1]);
1155 block->phy_addr = reg_mem_info->phy_addr + 0x400;
1156 block->vir_addr = reg_mem_info->vir_addr + 0x400;
1157 block->size = 32 * 4;
1158 block->reg_addr = reg_base + 0x400;
1159
1160 block = &(priv->vsu8_reg_blks[VSU8_REG_BLK_COEFF2]);
1161 block->phy_addr = reg_mem_info->phy_addr + 0x600;
1162 block->vir_addr = reg_mem_info->vir_addr + 0x600;
1163 block->size = 32 * 4;
1164 block->reg_addr = reg_base + 0x600;
1165
1166 priv->reg_blk_num = VSU8_REG_BLK_NUM;
1167
1168 priv->tap_num_hori = VSU8_TAP_NUM_HORI;
1169 priv->tap_num_vert = VSU8_TAP_NUM_VERT;
1170 break;
1171 case DE_SCALER_TYPE_VSU10:
1172 reg_mem_info->size = sizeof(struct vsu10_reg);
1173 reg_mem_info->vir_addr = (u8 *)de_top_reg_memory_alloc(
1174 reg_mem_info->size, (void *)&(reg_mem_info->phy_addr),
1175 rcq_used);
1176 if (NULL == reg_mem_info->vir_addr) {
1177 DE_WRN("alloc vsu[%d][%d] mm fail!size=0x%x\n",
1178 disp, chn, reg_mem_info->size);
1179 return -1;
1180 }
1181
1182 block = &(priv->vsu10_reg_blks[VSU10_REG_BLK_CTL]);
1183 block->phy_addr = reg_mem_info->phy_addr;
1184 block->vir_addr = reg_mem_info->vir_addr;
1185 block->size = 0x14;
1186 block->reg_addr = reg_base;
1187
1188 block = &(priv->vsu10_reg_blks[VSU10_REG_BLK_ATTR]);
1189 block->phy_addr = reg_mem_info->phy_addr + 0x40;
1190 block->vir_addr = reg_mem_info->vir_addr + 0x40;
1191 block->size = 0x8;
1192 block->reg_addr = reg_base + 0x40;
1193
1194 block = &(priv->vsu10_reg_blks[VSU10_REG_BLK_YPARA]);
1195 block->phy_addr = reg_mem_info->phy_addr + 0x80;
1196 block->vir_addr = reg_mem_info->vir_addr + 0x80;
1197 block->size = 0x20;
1198 block->reg_addr = reg_base + 0x80;
1199
1200 block = &(priv->vsu10_reg_blks[VSU10_REG_BLK_CPARA]);
1201 block->phy_addr = reg_mem_info->phy_addr + 0xC0;
1202 block->vir_addr = reg_mem_info->vir_addr + 0xC0;
1203 block->size = 0x20;
1204 block->reg_addr = reg_base + 0xC0;
1205
1206 block = &(priv->vsu10_reg_blks[VSU10_REG_BLK_COEFF0]);
1207 block->phy_addr = reg_mem_info->phy_addr + 0x200;
1208 block->vir_addr = reg_mem_info->vir_addr + 0x200;
1209 block->size = 32 * 4;
1210 block->reg_addr = reg_base + 0x200;
1211
1212 block = &(priv->vsu10_reg_blks[VSU10_REG_BLK_COEFF1]);
1213 block->phy_addr = reg_mem_info->phy_addr + 0x300;
1214 block->vir_addr = reg_mem_info->vir_addr + 0x300;
1215 block->size = 32 * 4;
1216 block->reg_addr = reg_base + 0x300;
1217
1218 block = &(priv->vsu10_reg_blks[VSU10_REG_BLK_COEFF2]);
1219 block->phy_addr = reg_mem_info->phy_addr + 0x400;
1220 block->vir_addr = reg_mem_info->vir_addr + 0x400;
1221 block->size = 32 * 4;
1222 block->reg_addr = reg_base + 0x400;
1223
1224 block = &(priv->vsu10_reg_blks[VSU10_REG_BLK_COEFF3]);
1225 block->phy_addr = reg_mem_info->phy_addr + 0x600;
1226 block->vir_addr = reg_mem_info->vir_addr + 0x600;
1227 block->size = 32 * 4;
1228 block->reg_addr = reg_base + 0x600;
1229
1230 block = &(priv->vsu10_reg_blks[VSU10_REG_BLK_COEFF4]);
1231 block->phy_addr = reg_mem_info->phy_addr + 0x700;
1232 block->vir_addr = reg_mem_info->vir_addr + 0x700;
1233 block->size = 32 * 4;
1234 block->reg_addr = reg_base + 0x700;
1235
1236 block = &(priv->vsu10_reg_blks[VSU10_REG_BLK_COEFF5]);
1237 block->phy_addr = reg_mem_info->phy_addr + 0x800;
1238 block->vir_addr = reg_mem_info->vir_addr + 0x800;
1239 block->size = 32 * 4;
1240 block->reg_addr = reg_base + 0x800;
1241
1242 priv->reg_blk_num = VSU10_REG_BLK_NUM;
1243
1244 priv->tap_num_hori = VSU10_TAP_NUM_HORI;
1245 priv->tap_num_vert = VSU10_TAP_NUM_VERT;
1246 break;
1247 case DE_SCALER_TYPE_VSU_ED:
1248 reg_mem_info->size = sizeof(struct vsu_ed_reg);
1249 reg_mem_info->vir_addr = (u8 *)de_top_reg_memory_alloc(
1250 reg_mem_info->size, (void *)&(reg_mem_info->phy_addr),
1251 rcq_used);
1252 if (NULL == reg_mem_info->vir_addr) {
1253 DE_WRN("alloc vsu[%d][%d] mm fail!size=0x%x\n",
1254 disp, chn, reg_mem_info->size);
1255 return -1;
1256 }
1257
1258 block = &(priv->vsu_ed_reg_blks[VSU_ED_REG_BLK_CTL]);
1259 block->phy_addr = reg_mem_info->phy_addr;
1260 block->vir_addr = reg_mem_info->vir_addr;
1261 block->size = 0x14;
1262 block->reg_addr = reg_base;
1263
1264 block = &(priv->vsu_ed_reg_blks[VSU_ED_REG_BLK_DIR_SHP]);
1265 block->phy_addr = reg_mem_info->phy_addr + 0x20;
1266 block->vir_addr = reg_mem_info->vir_addr + 0x20;
1267 block->size = 0x20;
1268 block->reg_addr = reg_base + 0x20;
1269
1270 block = &(priv->vsu_ed_reg_blks[VSU_ED_REG_BLK_ATTR]);
1271 block->phy_addr = reg_mem_info->phy_addr + 0x40;
1272 block->vir_addr = reg_mem_info->vir_addr + 0x40;
1273 block->size = 0x8;
1274 block->reg_addr = reg_base + 0x40;
1275
1276 block = &(priv->vsu_ed_reg_blks[VSU_ED_REG_BLK_YPARA]);
1277 block->phy_addr = reg_mem_info->phy_addr + 0x80;
1278 block->vir_addr = reg_mem_info->vir_addr + 0x80;
1279 block->size = 0x20;
1280 block->reg_addr = reg_base + 0x80;
1281
1282 block = &(priv->vsu_ed_reg_blks[VSU_ED_REG_BLK_CPARA]);
1283 block->phy_addr = reg_mem_info->phy_addr + 0xC0;
1284 block->vir_addr = reg_mem_info->vir_addr + 0xC0;
1285 block->size = 0x20;
1286 block->reg_addr = reg_base + 0xC0;
1287
1288 block = &(priv->vsu_ed_reg_blks[VSU_ED_REG_BLK_COEFF0]);
1289 block->phy_addr = reg_mem_info->phy_addr + 0x200;
1290 block->vir_addr = reg_mem_info->vir_addr + 0x200;
1291 block->size = 32 * 4;
1292 block->reg_addr = reg_base + 0x200;
1293
1294 block = &(priv->vsu_ed_reg_blks[VSU_ED_REG_BLK_COEFF1]);
1295 block->phy_addr = reg_mem_info->phy_addr + 0x300;
1296 block->vir_addr = reg_mem_info->vir_addr + 0x300;
1297 block->size = 32 * 4;
1298 block->reg_addr = reg_base + 0x300;
1299
1300 block = &(priv->vsu_ed_reg_blks[VSU_ED_REG_BLK_COEFF2]);
1301 block->phy_addr = reg_mem_info->phy_addr + 0x400;
1302 block->vir_addr = reg_mem_info->vir_addr + 0x400;
1303 block->size = 32 * 4;
1304 block->reg_addr = reg_base + 0x400;
1305
1306 block = &(priv->vsu_ed_reg_blks[VSU_ED_REG_BLK_COEFF3]);
1307 block->phy_addr = reg_mem_info->phy_addr + 0x600;
1308 block->vir_addr = reg_mem_info->vir_addr + 0x600;
1309 block->size = 32 * 4;
1310 block->reg_addr = reg_base + 0x600;
1311
1312 block = &(priv->vsu_ed_reg_blks[VSU_ED_REG_BLK_COEFF4]);
1313 block->phy_addr = reg_mem_info->phy_addr + 0x700;
1314 block->vir_addr = reg_mem_info->vir_addr + 0x700;
1315 block->size = 32 * 4;
1316 block->reg_addr = reg_base + 0x700;
1317
1318 block = &(priv->vsu_ed_reg_blks[VSU_ED_REG_BLK_COEFF5]);
1319 block->phy_addr = reg_mem_info->phy_addr + 0x800;
1320 block->vir_addr = reg_mem_info->vir_addr + 0x800;
1321 block->size = 32 * 4;
1322 block->reg_addr = reg_base + 0x800;
1323
1324 priv->reg_blk_num = VSU_ED_REG_BLK_NUM;
1325
1326 priv->tap_num_hori = VSU_ED_TAP_NUM_HORI;
1327 priv->tap_num_vert = VSU_ED_TAP_NUM_VERT;
1328 break;
1329 default:
1330 DE_WRN("not support this vsu_type=%d\n",
1331 priv->vsu_type);
1332 break;
1333 }
1334
1335 if (rcq_used)
1336 priv->set_blk_dirty = vsu_set_rcq_head_dirty;
1337 else
1338 priv->set_blk_dirty = vsu_set_block_dirty;
1339
1340 }
1341
1342 return 0;
1343 }
1344
de_vsu_exit(u32 disp)1345 s32 de_vsu_exit(u32 disp)
1346 {
1347 u32 chn_num, chn;
1348
1349 chn_num = de_feat_get_num_chns(disp);
1350 for (chn = 0; chn < chn_num; ++chn) {
1351 struct de_vsu_private *priv = &vsu_priv[disp][chn];
1352 struct de_reg_mem_info *reg_mem_info = &(priv->reg_mem_info);
1353
1354 if (reg_mem_info->vir_addr != NULL)
1355 de_top_reg_memory_free(reg_mem_info->vir_addr,
1356 reg_mem_info->phy_addr, reg_mem_info->size);
1357 }
1358
1359 return 0;
1360 }
1361
de_vsu_get_reg_blocks(u32 disp,struct de_reg_block ** blks,u32 * blk_num)1362 s32 de_vsu_get_reg_blocks(u32 disp,
1363 struct de_reg_block **blks, u32 *blk_num)
1364 {
1365 u32 chn_num, chn;
1366 u32 total = 0;
1367
1368 chn_num = de_feat_get_num_chns(disp);
1369
1370 if (blks == NULL) {
1371 for (chn = 0; chn < chn_num; ++chn) {
1372 total += vsu_priv[disp][chn].reg_blk_num;
1373 }
1374 *blk_num = total;
1375 return 0;
1376 }
1377
1378 for (chn = 0; chn < chn_num; ++chn) {
1379 struct de_vsu_private *priv = &(vsu_priv[disp][chn]);
1380 struct de_reg_block *blk_begin, *blk_end;
1381 u32 num;
1382
1383 if (*blk_num >= priv->reg_blk_num) {
1384 num = priv->reg_blk_num;
1385 } else {
1386 DE_WRN("should not happen\n");
1387 num = *blk_num;
1388 }
1389 blk_begin = priv->vsu8_reg_blks;
1390 blk_end = blk_begin + num;
1391 for (; blk_begin != blk_end; ++blk_begin) {
1392 *blks++ = blk_begin;
1393 }
1394 total += num;
1395 *blk_num -= num;
1396 }
1397 *blk_num = total;
1398 return 0;
1399 }
1400