1 /*
2 * g2d_ovl_v.c
3 *
4 * Copyright (c) 2007-2019 Allwinnertech Co., Ltd.
5 * Author: zhengxiaobin <zhengxiaobin@allwinnertech.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17 #include "g2d_ovl_v.h"
18
g2d_ovl_v_calc_coarse(struct ovl_v_submodule * p_ovl_v,__u32 format,__u32 inw,__u32 inh,__u32 outw,__u32 outh,__u32 * midw,__u32 * midh)19 __s32 g2d_ovl_v_calc_coarse(struct ovl_v_submodule *p_ovl_v, __u32 format, __u32 inw,
20 __u32 inh, __u32 outw, __u32 outh, __u32 *midw,
21 __u32 *midh)
22 {
23 __u32 tmp;
24 __s32 ret = -1;
25 struct g2d_mixer_ovl_v_reg *p_reg = p_ovl_v->get_reg(p_ovl_v);
26
27 if (!p_reg)
28 goto OUT;
29
30 switch (format) {
31 case G2D_FORMAT_IYUV422_V0Y1U0Y0:
32 case G2D_FORMAT_IYUV422_Y1V0Y0U0:
33 case G2D_FORMAT_IYUV422_U0Y1V0Y0:
34 case G2D_FORMAT_IYUV422_Y1U0Y0V0: {
35 /* interleaved YUV422 format */
36 *midw = inw;
37 *midh = inh;
38 break;
39 }
40 case G2D_FORMAT_YUV422UVC_V1U1V0U0:
41 case G2D_FORMAT_YUV422UVC_U1V1U0V0:
42 case G2D_FORMAT_YUV422_PLANAR: {
43 if (inw >= (outw << 3)) {
44 *midw = outw << 3;
45 tmp = (*midw << 16) | inw;
46 p_reg->hor_down_sample0.dwval = tmp;
47 tmp = (*midw << 15) | ((inw + 1) >> 1);
48 p_reg->hor_down_sample1.dwval = tmp;
49 } else
50 *midw = inw;
51 if (inh >= (outh << 2)) {
52 *midh = (outh << 2);
53 tmp = (*midh << 16) | inh;
54 p_reg->ver_down_sample0.dwval = tmp;
55 p_reg->ver_down_sample1.dwval = tmp;
56 } else
57 *midh = inh;
58 break;
59 }
60 case G2D_FORMAT_Y8:
61 case G2D_FORMAT_YUV420_PLANAR:
62 case G2D_FORMAT_YUV420UVC_V1U1V0U0:
63 case G2D_FORMAT_YUV420UVC_U1V1U0V0: {
64 if (inw >= (outw << 3)) {
65 *midw = outw << 3;
66 tmp = (*midw << 16) | inw;
67 p_reg->hor_down_sample0.dwval = tmp;
68 tmp = (*midw << 15) | ((inw + 1) >> 1);
69 p_reg->hor_down_sample1.dwval = tmp;
70 } else
71 *midw = inw;
72 if (inh >= (outh << 2)) {
73 *midh = (outh << 2);
74 tmp = (*midh << 16) | inh;
75 p_reg->ver_down_sample0.dwval = tmp;
76 tmp = (*midh << 15) | ((inh + 1) >> 1);
77 p_reg->ver_down_sample1.dwval = tmp;
78 } else
79 *midh = inh;
80 break;
81 }
82 case G2D_FORMAT_YUV411_PLANAR:
83 case G2D_FORMAT_YUV411UVC_V1U1V0U0:
84 case G2D_FORMAT_YUV411UVC_U1V1U0V0: {
85 if (inw >= (outw << 3)) {
86 *midw = outw << 3;
87 tmp = ((*midw) << 16) | inw;
88 p_reg->hor_down_sample0.dwval = tmp;
89 tmp = ((*midw) << 14) | ((inw + 3) >> 2);
90 p_reg->hor_down_sample1.dwval = tmp;
91 } else
92 *midw = inw;
93 if (inh >= (outh << 2)) {
94 *midh = (outh << 2);
95 tmp = ((*midh) << 16) | inh;
96 p_reg->ver_down_sample0.dwval = tmp;
97 p_reg->ver_down_sample1.dwval = tmp;
98 } else
99 *midh = inh;
100 break;
101 }
102 default:
103 if (inw >= (outw << 3)) {
104 *midw = outw << 3;
105 tmp = ((*midw) << 16) | inw;
106 p_reg->hor_down_sample0.dwval = tmp;
107 p_reg->hor_down_sample1.dwval = tmp;
108 } else
109 *midw = inw;
110 if (inh >= (outh << 2)) {
111 *midh = (outh << 2);
112 tmp = ((*midh) << 16) | inh;
113 p_reg->ver_down_sample0.dwval = tmp;
114 p_reg->ver_down_sample1.dwval = tmp;
115 } else
116 *midh = inh;
117 break;
118 }
119 p_ovl_v->set_block_dirty(p_ovl_v, 0, 1);
120 ret = 0;
121 OUT:
122 return ret;
123 }
124
125 /**
126 * fillcolor set
127 * @color_value:fill color value
128 */
g2d_ovl_v_fc_set(struct ovl_v_submodule * p_ovl_v,__u32 color_value)129 __s32 g2d_ovl_v_fc_set(struct ovl_v_submodule *p_ovl_v, __u32 color_value)
130 {
131 __s32 ret = -1;
132 struct g2d_mixer_ovl_v_reg *p_reg = p_ovl_v->get_reg(p_ovl_v);
133 if (!p_reg)
134 goto OUT;
135
136 p_reg->ovl_attr.bits.lay_fillcolor_en = 1;
137 p_reg->ovl_fill_color = color_value;
138 p_ovl_v->set_block_dirty(p_ovl_v, 0, 1);
139 ret = 0;
140
141 OUT:
142 return ret;
143 }
144
145 /**
146 * @sel:layer no.
147 */
g2d_vlayer_set(struct ovl_v_submodule * p_ovl_v,__u32 sel,g2d_image_enh * p_image)148 __s32 g2d_vlayer_set(struct ovl_v_submodule *p_ovl_v, __u32 sel, g2d_image_enh *p_image)
149 {
150 unsigned long long addr0, addr1, addr2;
151 __u32 tmp;
152 __u32 ycnt, ucnt, vcnt;
153 __u32 pitch0, pitch1, pitch2;
154 __u32 ch, cw, cy, cx;
155 __s32 ret = -1;
156 struct g2d_mixer_ovl_v_reg *p_reg = p_ovl_v->get_reg(p_ovl_v);
157
158 if (!p_reg)
159 goto OUT;
160
161 p_reg->ovl_attr.bits.lay_fbfmt = p_image->format;
162 p_reg->ovl_attr.bits.alpha_mode = p_image->mode;
163 if (p_image->bpremul)
164 p_reg->ovl_attr.bits.lay_premul_ctl = 1;
165 p_reg->ovl_attr.bits.lay_glbalpha = p_image->alpha & 0xff;
166 p_reg->ovl_attr.bits.lay_en = 1;
167
168 p_reg->ovl_mem.bits.lay_width =
169 (p_image->clip_rect.w == 0 ? 0 : p_image->clip_rect.w - 1) & 0x1fff;
170 p_reg->ovl_mem.bits.lay_height =
171 (p_image->clip_rect.h == 0 ? 0 : p_image->clip_rect.h - 1) & 0x1fff;
172
173 p_reg->ovl_winsize.bits.width =
174 (p_image->clip_rect.w == 0 ? 0 : p_image->clip_rect.w - 1) & 0x1fff;
175 p_reg->ovl_winsize.bits.height =
176 (p_image->clip_rect.h == 0 ? 0 : p_image->clip_rect.h - 1) & 0x1fff;
177
178 /* offset is set to 0, ovl size is set to layer size */
179 p_reg->ovl_mem_coor.dwval = 0;
180 if ((p_image->format >= G2D_FORMAT_YUV422UVC_V1U1V0U0)
181 && (p_image->format <= G2D_FORMAT_YUV422_PLANAR)) {
182 cw = p_image->width >> 1;
183 ch = p_image->height;
184 cx = p_image->clip_rect.x >> 1;
185 cy = p_image->clip_rect.y;
186 }
187
188 else if ((p_image->format >= G2D_FORMAT_YUV420UVC_V1U1V0U0)
189 && (p_image->format <= G2D_FORMAT_YUV420_PLANAR)) {
190 cw = p_image->width >> 1;
191 ch = p_image->height >> 1;
192 cx = p_image->clip_rect.x >> 1;
193 cy = p_image->clip_rect.y >> 1;
194 }
195
196 else if ((p_image->format >= G2D_FORMAT_YUV411UVC_V1U1V0U0)
197 && (p_image->format <= G2D_FORMAT_YUV411_PLANAR)) {
198 cw = p_image->width >> 2;
199 ch = p_image->height;
200 cx = p_image->clip_rect.x >> 2;
201 cy = p_image->clip_rect.y;
202 }
203
204 else {
205 cw = 0;
206 ch = 0;
207 cx = 0;
208 cy = 0;
209 }
210 g2d_byte_cal(p_image->format, &ycnt, &ucnt, &vcnt);
211 pitch0 = cal_align(ycnt * p_image->width, p_image->align[0]);
212 p_reg->ovl_mem_pitch0 = pitch0;
213 pitch1 = cal_align(ucnt * cw, p_image->align[1]);
214 p_reg->ovl_mem_pitch1 = pitch1;
215 pitch2 = cal_align(vcnt * cw, p_image->align[2]);
216 p_reg->ovl_mem_pitch2 = pitch2;
217 addr0 =
218 p_image->laddr[0] + ((__u64) p_image->haddr[0] << 32) +
219 pitch0 * p_image->clip_rect.y + ycnt * p_image->clip_rect.x;
220 p_reg->ovl_mem_low_addr0 = addr0 & 0xffffffff;
221 addr1 =
222 p_image->laddr[1] + ((__u64) p_image->haddr[1] << 32) + pitch1 * cy +
223 ucnt * cx;
224 p_reg->ovl_mem_low_addr1 = addr1 & 0xffffffff;
225 addr2 =
226 p_image->laddr[2] + ((__u64) p_image->haddr[2] << 32) + pitch2 * cy +
227 vcnt * cx;
228 p_reg->ovl_mem_low_addr2 = addr2 & 0xffffffff;
229 tmp = ((addr0 >> 32) & 0xff) | ((addr1 >> 32) & 0xff) << 8 |
230 ((addr2 >> 32) & 0xff) << 16;
231 p_reg->ovl_mem_high_addr.dwval = tmp;
232 if (p_image->bbuff == 0)
233 g2d_ovl_v_fc_set(p_ovl_v, p_image->color);
234 p_ovl_v->set_block_dirty(p_ovl_v, 0, 1);
235 ret = 0;
236 OUT:
237 return ret;
238 }
239
g2d_vlayer_overlay_set(struct ovl_v_submodule * p_ovl_v,__u32 sel,g2d_coor * coor,__u32 w,__u32 h)240 __s32 g2d_vlayer_overlay_set(struct ovl_v_submodule *p_ovl_v, __u32 sel,
241 g2d_coor *coor, __u32 w, __u32 h)
242 {
243 __s32 ret = -1;
244 struct g2d_mixer_ovl_v_reg *p_reg = p_ovl_v->get_reg(p_ovl_v);
245
246 if (!p_reg)
247 goto OUT;
248
249 p_reg->ovl_winsize.bits.width = (w - 1) & 0x1fff;
250 p_reg->ovl_winsize.bits.height = (h - 1) & 0x1fff;
251
252 p_reg->ovl_mem_coor.bits.lay_xcoor = coor->x;
253 p_reg->ovl_mem_coor.bits.lay_ycoor = coor->y;
254 ret = 0;
255 OUT:
256 return ret;
257 }
ovl_v_rcq_setup(struct ovl_v_submodule * p_ovl_v,u8 __iomem * base,struct g2d_rcq_mem_info * p_rcq_info)258 static int ovl_v_rcq_setup(struct ovl_v_submodule *p_ovl_v, u8 __iomem *base,
259 struct g2d_rcq_mem_info *p_rcq_info)
260 {
261 u8 __iomem *reg_base = base + G2D_V0;
262 int ret = -1;
263
264 if (!p_ovl_v) {
265 G2D_ERR_MSG("Null pointer!\n");
266 goto OUT;
267 }
268
269 p_ovl_v->reg_info->size = sizeof(struct g2d_mixer_ovl_v_reg);
270 p_ovl_v->reg_info->vir_addr = (u8 *)g2d_top_reg_memory_alloc(
271 p_ovl_v->reg_info->size, (void *)&(p_ovl_v->reg_info->phy_addr),
272 p_rcq_info);
273
274 if (!p_ovl_v->reg_info->vir_addr) {
275 G2D_ERR_MSG("Malloc writeback reg rcq memory fail!\n");
276 goto OUT;
277 }
278
279 p_ovl_v->reg_blks->vir_addr = p_ovl_v->reg_info->vir_addr;
280 p_ovl_v->reg_blks->phy_addr = p_ovl_v->reg_info->phy_addr;
281 p_ovl_v->reg_blks->size = p_ovl_v->reg_info->size;
282 p_ovl_v->reg_blks->reg_addr = reg_base;
283 ret = 0;
284
285 OUT:
286 return ret;
287 }
288
ovl_v_get_reg_block_num(struct ovl_v_submodule * p_ovl_v)289 static __u32 ovl_v_get_reg_block_num(struct ovl_v_submodule *p_ovl_v)
290 {
291 if (p_ovl_v)
292 return p_ovl_v->reg_blk_num;
293 return 0;
294 }
295
ovl_v_get_reg_block(struct ovl_v_submodule * p_ovl_v,struct g2d_reg_block ** blks)296 static __s32 ovl_v_get_reg_block(struct ovl_v_submodule *p_ovl_v,
297 struct g2d_reg_block **blks)
298 {
299 int i = 0;
300 if (p_ovl_v) {
301 for (i = 0; i < p_ovl_v->reg_blk_num; ++i)
302 blks[i] = p_ovl_v->reg_blks + i;
303 }
304 return 0;
305 }
306
ovl_v_get_reg(struct ovl_v_submodule * p_ovl_v)307 static struct g2d_mixer_ovl_v_reg *ovl_v_get_reg(struct ovl_v_submodule *p_ovl_v)
308 {
309 #if G2D_MIXER_RCQ_USED == 1
310 return (struct g2d_mixer_ovl_v_reg *)(p_ovl_v->reg_blks
311 ->vir_addr);
312 #else
313 return (struct g2d_mixer_ovl_v_reg *)(p_ovl_v->reg_blks
314 ->reg_addr);
315 #endif
316 return NULL;
317 }
318
ovl_v_set_block_dirty(struct ovl_v_submodule * p_ovl_v,__u32 blk_id,__u32 dirty)319 static void ovl_v_set_block_dirty(struct ovl_v_submodule *p_ovl_v, __u32 blk_id, __u32 dirty)
320 {
321 #if G2D_MIXER_RCQ_USED == 1
322 if (p_ovl_v && p_ovl_v->reg_blks->rcq_hd)
323 p_ovl_v->reg_blks->rcq_hd->dirty.bits.dirty = dirty;
324 else
325 G2D_ERR_MSG("Null pointer!\n");
326 #else
327
328 if (p_ovl_v)
329 p_ovl_v->reg_blks->dirty = dirty;
330 else
331 G2D_ERR_MSG("Null pointer!\n");
332 #endif
333 }
334
335
ovl_v_get_rcq_mem_size(struct ovl_v_submodule * p_ovl_v)336 static __u32 ovl_v_get_rcq_mem_size(struct ovl_v_submodule *p_ovl_v)
337 {
338 return G2D_RCQ_BYTE_ALIGN(sizeof(struct g2d_mixer_ovl_v_reg));
339 }
340
ovl_v_destory(struct ovl_v_submodule * p_ovl_v)341 static __s32 ovl_v_destory(struct ovl_v_submodule *p_ovl_v)
342 {
343 if (p_ovl_v) {
344 kfree(p_ovl_v->reg_blks);
345 p_ovl_v->reg_blks = NULL;
346
347 kfree(p_ovl_v->reg_info);
348 p_ovl_v->reg_info = NULL;
349 kfree(p_ovl_v);
350 }
351
352 return 0;
353 }
354
355 struct ovl_v_submodule *
g2d_ovl_v_submodule_setup(struct g2d_mixer_frame * p_frame)356 g2d_ovl_v_submodule_setup(struct g2d_mixer_frame *p_frame)
357 {
358 struct ovl_v_submodule *p_ovl_v = NULL;
359
360 p_ovl_v = kmalloc(sizeof(struct ovl_v_submodule), GFP_KERNEL | __GFP_ZERO);
361
362 if (!p_ovl_v) {
363 G2D_ERR_MSG("Kmalloc wb submodule fail!\n");
364 return NULL;
365 }
366
367 p_ovl_v->rcq_setup = ovl_v_rcq_setup;
368 p_ovl_v->reg_blk_num = VI_LAYER_NUMBER;
369 p_ovl_v->get_reg_block_num = ovl_v_get_reg_block_num;
370 p_ovl_v->get_reg_block = ovl_v_get_reg_block;
371 p_ovl_v->get_reg = ovl_v_get_reg;
372 p_ovl_v->set_block_dirty = ovl_v_set_block_dirty;
373 p_ovl_v->get_rcq_mem_size = ovl_v_get_rcq_mem_size;
374 p_ovl_v->destory = ovl_v_destory;
375
376 p_ovl_v->reg_blks =
377 kmalloc(sizeof(struct g2d_reg_block) * p_ovl_v->reg_blk_num,
378 GFP_KERNEL | __GFP_ZERO);
379 p_ovl_v->reg_info =
380 kmalloc(sizeof(struct g2d_reg_mem_info), GFP_KERNEL | __GFP_ZERO);
381
382 if (!p_ovl_v->reg_blks || !p_ovl_v->reg_info) {
383 G2D_ERR_MSG("Kmalloc wb reg info fail!\n");
384 goto FREE_WB;
385 }
386
387
388 return p_ovl_v;
389 FREE_WB:
390 kfree(p_ovl_v->reg_blks);
391 kfree(p_ovl_v->reg_info);
392 kfree(p_ovl_v);
393
394 return NULL;
395 }
396