1 /* Copyright 2022 Advanced Micro Devices, Inc.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19 * OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * Authors: AMD
22 *
23 */
24 #include <string.h>
25 #include "vpe_priv.h"
26 #include "common.h"
27 #include "vpe10_resource.h"
28 #include "vpe10_cmd_builder.h"
29 #include "vpe10_vpec.h"
30 #include "vpe10_cdc.h"
31 #include "vpe10_dpp.h"
32 #include "vpe10_mpc.h"
33 #include "vpe10_opp.h"
34 #include "vpe_command.h"
35 #include "vpe10_cm_common.h"
36 #include "vpe10_background.h"
37 #include "vpe10/inc/asic/bringup_vpe_6_1_0_offset.h"
38 #include "vpe10/inc/asic/bringup_vpe_6_1_0_sh_mask.h"
39 #include "vpe10/inc/asic/bringup_vpe_6_1_0_default.h"
40 #include "vpe10/inc/asic/vpe_1_0_offset.h"
41 #include "custom_fp16.h"
42 #include "custom_float.h"
43 #include "background.h"
44 #include "vpe_visual_confirm.h"
45
46 #define LUT_NUM_ENTRIES (17 * 17 * 17)
47 #define LUT_ENTRY_SIZE (2)
48 #define LUT_NUM_COMPONENT (3)
49 #define LUT_BUFFER_SIZE (LUT_NUM_ENTRIES * LUT_ENTRY_SIZE * LUT_NUM_COMPONENT)
50 // set field/register/bitfield name
51 #define SFRB(field_name, reg_name, post_fix) .field_name = reg_name##__##field_name##post_fix
52
53 #define BASE_INNER(seg_id) VPE_BASE__INST0_SEG##seg_id
54
55 #define BASE(seg_id) BASE_INNER(seg_id)
56
57 // set register with block id and default val, init lastWrittenVal as default while isWritten set to
58 // false
59 #define SRIDFVL(reg_name, block, id) \
60 .reg_name = {BASE(reg##reg_name##_BASE_IDX) + reg##reg_name, reg##reg_name##_##DEFAULT, \
61 reg##reg_name##_##DEFAULT, false}
62
63 /***************** CDC registers ****************/
64 #define cdc_regs(id) [id] = {CDC_REG_LIST_VPE10(id)}
65
66 static struct vpe10_cdc_registers cdc_regs[] = {cdc_regs(0)};
67
68 static const struct vpe10_cdc_shift cdc_shift = {CDC_FLIED_LIST_VPE10(__SHIFT)};
69
70 static const struct vpe10_cdc_mask cdc_mask = {CDC_FLIED_LIST_VPE10(_MASK)};
71
72 /***************** DPP registers ****************/
73 #define dpp_regs(id) [id] = {DPP_REG_LIST_VPE10(id)}
74
75 static struct vpe10_dpp_registers dpp_regs[] = {dpp_regs(0)};
76
77 static const struct vpe10_dpp_shift dpp_shift = {DPP_FIELD_LIST_VPE10(__SHIFT)};
78
79 static const struct vpe10_dpp_mask dpp_mask = {DPP_FIELD_LIST_VPE10(_MASK)};
80
81 /***************** MPC registers ****************/
82 #define mpc_regs(id) [id] = {MPC_REG_LIST_VPE10(id)}
83
84 static struct vpe10_mpc_registers mpc_regs[] = {mpc_regs(0)};
85
86 static const struct vpe10_mpc_shift mpc_shift = {MPC_FIELD_LIST_VPE10(__SHIFT)};
87
88 static const struct vpe10_mpc_mask mpc_mask = {MPC_FIELD_LIST_VPE10(_MASK)};
89
90 /***************** OPP registers ****************/
91 #define opp_regs(id) [id] = {OPP_REG_LIST_VPE10(id)}
92
93 static struct vpe10_opp_registers opp_regs[] = {opp_regs(0)};
94
95 static const struct vpe10_opp_shift opp_shift = {OPP_FIELD_LIST_VPE10(__SHIFT)};
96
97 static const struct vpe10_opp_mask opp_mask = {OPP_FIELD_LIST_VPE10(_MASK)};
98
99 static struct vpe_caps caps = {
100 .lut_size = LUT_BUFFER_SIZE,
101 .rotation_support = 0,
102 .h_mirror_support = 1,
103 .v_mirror_support = 0,
104 .is_apu = 1,
105 .bg_color_check_support = 0,
106 .resource_caps =
107 {
108 .num_dpp = 1,
109 .num_opp = 1,
110 .num_mpc_3dlut = 1,
111 .num_queue = 8,
112 },
113 .color_caps = {.dpp =
114 {
115 .pre_csc = 1,
116 .luma_key = 0,
117 .dgam_ram = 0,
118 .post_csc = 1,
119 .gamma_corr = 1,
120 .hw_3dlut = 1,
121 .ogam_ram = 1, /**< programmable gam in output -> gamma_corr */
122 .ocsc = 0,
123 .dgam_rom_caps =
124 {
125 .srgb = 1,
126 .bt2020 = 1,
127 .gamma2_2 = 1,
128 .pq = 1,
129 .hlg = 1,
130 },
131 },
132 .mpc =
133 {
134 .gamut_remap = 1,
135 .ogam_ram = 1,
136 .ocsc = 1,
137 .shared_3d_lut = 1,
138 .global_alpha = 1,
139 .top_bottom_blending = 0,
140 }},
141 .plane_caps =
142 {
143 .per_pixel_alpha = 1,
144 .input_pixel_format_support =
145 {
146 .argb_packed_32b = 1,
147 .nv12 = 1,
148 .fp16 = 0,
149 .p010 = 1, /**< planar 4:2:0 10-bit */
150 .p016 = 0, /**< planar 4:2:0 16-bit */
151 .ayuv = 0, /**< packed 4:4:4 */
152 .yuy2 = 0 /**< packed 4:2:2 */
153 },
154 .output_pixel_format_support = {.argb_packed_32b = 1,
155 .nv12 = 0,
156 .fp16 = 1,
157 .p010 = 0,
158 .p016 = 0,
159 .ayuv = 0,
160 .yuy2 = 0},
161 .max_upscale_factor = 64000,
162
163 // 6:1 downscaling ratio: 1000/6 = 166.666
164 .max_downscale_factor = 167,
165
166 .pitch_alignment = 256,
167 .addr_alignment = 256,
168 .max_viewport_width = 1024,
169 },
170 };
171
vpe10_init_scaler_data(struct vpe_priv * vpe_priv,struct stream_ctx * stream_ctx,struct scaler_data * scl_data,struct vpe_rect * src_rect,struct vpe_rect * dst_rect)172 static bool vpe10_init_scaler_data(struct vpe_priv *vpe_priv, struct stream_ctx *stream_ctx,
173 struct scaler_data *scl_data, struct vpe_rect *src_rect, struct vpe_rect *dst_rect)
174 {
175 struct dpp *dpp = vpe_priv->resource.dpp[0];
176 calculate_scaling_ratios(scl_data, src_rect, dst_rect, stream_ctx->stream.surface_info.format);
177
178 if (vpe_priv->init.debug.skip_optimal_tap_check) {
179 scl_data->taps.v_taps = stream_ctx->stream.scaling_info.taps.v_taps;
180 scl_data->taps.h_taps = stream_ctx->stream.scaling_info.taps.h_taps;
181 scl_data->taps.v_taps_c = stream_ctx->stream.scaling_info.taps.v_taps_c;
182 scl_data->taps.h_taps_c = stream_ctx->stream.scaling_info.taps.h_taps_c;
183 } else {
184 if (!dpp->funcs->get_optimal_number_of_taps(
185 dpp, scl_data, &stream_ctx->stream.scaling_info.taps))
186 return false;
187 }
188
189 if ((stream_ctx->stream.use_external_scaling_coeffs ==
190 false) || /* don't try to optimize is the scaler is configured externally*/
191 (stream_ctx->stream.polyphase_scaling_coeffs.taps.h_taps == 0) ||
192 (stream_ctx->stream.polyphase_scaling_coeffs.taps.v_taps == 0)) {
193 scl_data->polyphase_filter_coeffs = 0;
194 } else {
195 if ((stream_ctx->stream.polyphase_scaling_coeffs.taps.h_taps !=
196 stream_ctx->stream.scaling_info.taps.h_taps) ||
197 (stream_ctx->stream.polyphase_scaling_coeffs.taps.v_taps !=
198 stream_ctx->stream.scaling_info.taps.v_taps)) {
199 return false; // sanity check to make sure the taps structures are the same
200 }
201 scl_data->taps = stream_ctx->stream.polyphase_scaling_coeffs
202 .taps; /* use the extenally provided tap configuration*/
203 scl_data->polyphase_filter_coeffs = &stream_ctx->stream.polyphase_scaling_coeffs;
204 }
205 // bypass scaler if all ratios are 1
206 if (IDENTITY_RATIO(scl_data->ratios.horz))
207 scl_data->taps.h_taps = 1;
208 if (IDENTITY_RATIO(scl_data->ratios.vert))
209 scl_data->taps.v_taps = 1;
210
211 return true;
212 }
213
vpe10_set_num_segments(struct vpe_priv * vpe_priv,struct stream_ctx * stream_ctx,struct scaler_data * scl_data,struct vpe_rect * src_rect,struct vpe_rect * dst_rect,uint32_t * max_seg_width)214 enum vpe_status vpe10_set_num_segments(struct vpe_priv *vpe_priv, struct stream_ctx *stream_ctx,
215 struct scaler_data *scl_data, struct vpe_rect *src_rect, struct vpe_rect *dst_rect,
216 uint32_t *max_seg_width)
217 {
218
219 uint16_t num_segs;
220 struct dpp *dpp = vpe_priv->resource.dpp[0];
221 const uint32_t max_lb_size = dpp->funcs->get_line_buffer_size();
222
223 *max_seg_width = min(*max_seg_width, max_lb_size / scl_data->taps.v_taps);
224
225 num_segs = vpe_get_num_segments(vpe_priv, src_rect, dst_rect, *max_seg_width);
226
227 stream_ctx->segment_ctx = vpe_alloc_segment_ctx(vpe_priv, num_segs);
228 if (!stream_ctx->segment_ctx)
229 return VPE_STATUS_NO_MEMORY;
230
231 stream_ctx->num_segments = num_segs;
232
233 return VPE_STATUS_OK;
234 }
235
vpe10_get_dcc_compression_cap(const struct vpe * vpe,const struct vpe_dcc_surface_param * input,struct vpe_surface_dcc_cap * output)236 bool vpe10_get_dcc_compression_cap(const struct vpe *vpe, const struct vpe_dcc_surface_param *input,
237 struct vpe_surface_dcc_cap *output)
238 {
239 struct vpe_priv *vpe_priv = container_of(vpe, struct vpe_priv, pub);
240 struct vpec *vpec = &vpe_priv->resource.vpec;
241
242 return vpec->funcs->get_dcc_compression_cap(vpec, input, output);
243 }
244
245 static struct vpe_cap_funcs cap_funcs = {.get_dcc_compression_cap = vpe10_get_dcc_compression_cap};
246
vpe10_cdc_create(struct vpe_priv * vpe_priv,int inst)247 struct cdc *vpe10_cdc_create(struct vpe_priv *vpe_priv, int inst)
248 {
249 struct vpe10_cdc *vpe10_cdc = vpe_zalloc(sizeof(struct vpe10_cdc));
250
251 if (!vpe10_cdc)
252 return NULL;
253
254 vpe10_construct_cdc(vpe_priv, &vpe10_cdc->base);
255
256 vpe10_cdc->regs = &cdc_regs[inst];
257 vpe10_cdc->mask = &cdc_mask;
258 vpe10_cdc->shift = &cdc_shift;
259
260 return &vpe10_cdc->base;
261 }
262
vpe10_dpp_create(struct vpe_priv * vpe_priv,int inst)263 struct dpp *vpe10_dpp_create(struct vpe_priv *vpe_priv, int inst)
264 {
265 struct vpe10_dpp *vpe10_dpp = vpe_zalloc(sizeof(struct vpe10_dpp));
266
267 if (!vpe10_dpp)
268 return NULL;
269
270 vpe10_construct_dpp(vpe_priv, &vpe10_dpp->base);
271
272 vpe10_dpp->regs = &dpp_regs[inst];
273 vpe10_dpp->mask = &dpp_mask;
274 vpe10_dpp->shift = &dpp_shift;
275
276 return &vpe10_dpp->base;
277 }
278
vpe10_mpc_create(struct vpe_priv * vpe_priv,int inst)279 struct mpc *vpe10_mpc_create(struct vpe_priv *vpe_priv, int inst)
280 {
281 struct vpe10_mpc *vpe10_mpc = vpe_zalloc(sizeof(struct vpe10_mpc));
282
283 if (!vpe10_mpc)
284 return NULL;
285
286 vpe10_construct_mpc(vpe_priv, &vpe10_mpc->base);
287
288 vpe10_mpc->regs = &mpc_regs[inst];
289 vpe10_mpc->mask = &mpc_mask;
290 vpe10_mpc->shift = &mpc_shift;
291
292 return &vpe10_mpc->base;
293 }
294
vpe10_opp_create(struct vpe_priv * vpe_priv,int inst)295 struct opp *vpe10_opp_create(struct vpe_priv *vpe_priv, int inst)
296 {
297 struct vpe10_opp *vpe10_opp = vpe_zalloc(sizeof(struct vpe10_opp));
298
299 if (!vpe10_opp)
300 return NULL;
301
302 vpe10_construct_opp(vpe_priv, &vpe10_opp->base);
303
304 vpe10_opp->regs = &opp_regs[inst];
305 vpe10_opp->mask = &opp_mask;
306 vpe10_opp->shift = &opp_shift;
307
308 return &vpe10_opp->base;
309 }
310
vpe10_construct_resource(struct vpe_priv * vpe_priv,struct resource * res)311 enum vpe_status vpe10_construct_resource(struct vpe_priv *vpe_priv, struct resource *res)
312 {
313 struct vpe *vpe = &vpe_priv->pub;
314
315 vpe->caps = ∩︀
316 vpe->cap_funcs = &cap_funcs;
317
318 vpe10_construct_vpec(vpe_priv, &res->vpec);
319
320 res->cdc[0] = vpe10_cdc_create(vpe_priv, 0);
321 if (!res->cdc[0])
322 goto err;
323
324 res->dpp[0] = vpe10_dpp_create(vpe_priv, 0);
325 if (!res->dpp[0])
326 goto err;
327
328 res->mpc[0] = vpe10_mpc_create(vpe_priv, 0);
329 if (!res->mpc[0])
330 goto err;
331
332 res->opp[0] = vpe10_opp_create(vpe_priv, 0);
333 if (!res->opp[0])
334 goto err;
335
336 vpe10_construct_cmd_builder(vpe_priv, &res->cmd_builder);
337 vpe_priv->num_pipe = 1;
338
339 res->internal_hdr_normalization = 1;
340
341 res->check_input_color_space = vpe10_check_input_color_space;
342 res->check_output_color_space = vpe10_check_output_color_space;
343 res->check_h_mirror_support = vpe10_check_h_mirror_support;
344 res->calculate_segments = vpe10_calculate_segments;
345 res->set_num_segments = vpe10_set_num_segments;
346 res->split_bg_gap = vpe10_split_bg_gap;
347 res->calculate_dst_viewport_and_active = vpe10_calculate_dst_viewport_and_active;
348 res->find_bg_gaps = vpe_find_bg_gaps;
349 res->create_bg_segments = vpe_create_bg_segments;
350 res->populate_cmd_info = vpe10_populate_cmd_info;
351 res->program_frontend = vpe10_program_frontend;
352 res->program_backend = vpe10_program_backend;
353 res->get_bufs_req = vpe10_get_bufs_req;
354 res->get_tf_pwl_params = vpe10_cm_get_tf_pwl_params;
355
356 return VPE_STATUS_OK;
357 err:
358 vpe10_destroy_resource(vpe_priv, res);
359 return VPE_STATUS_ERROR;
360 }
361
vpe10_destroy_resource(struct vpe_priv * vpe_priv,struct resource * res)362 void vpe10_destroy_resource(struct vpe_priv *vpe_priv, struct resource *res)
363 {
364 if (res->cdc[0] != NULL) {
365 vpe_free(container_of(res->cdc[0], struct vpe10_cdc, base));
366 res->cdc[0] = NULL;
367 }
368
369 if (res->dpp[0] != NULL) {
370 vpe_free(container_of(res->dpp[0], struct vpe10_dpp, base));
371 res->dpp[0] = NULL;
372 }
373
374 if (res->mpc[0] != NULL) {
375 vpe_free(container_of(res->mpc[0], struct vpe10_mpc, base));
376 res->mpc[0] = NULL;
377 }
378
379 if (res->opp[0] != NULL) {
380 vpe_free(container_of(res->opp[0], struct vpe10_opp, base));
381 res->opp[0] = NULL;
382 }
383 }
384
vpe10_check_input_color_space(struct vpe_priv * vpe_priv,enum vpe_surface_pixel_format format,const struct vpe_color_space * vcs)385 bool vpe10_check_input_color_space(struct vpe_priv *vpe_priv, enum vpe_surface_pixel_format format,
386 const struct vpe_color_space *vcs)
387 {
388 enum color_space cs;
389 enum color_transfer_func tf;
390
391 vpe_color_get_color_space_and_tf(vcs, &cs, &tf);
392 if (cs == COLOR_SPACE_UNKNOWN || tf == TRANSFER_FUNC_UNKNOWN)
393 return false;
394
395 return true;
396 }
397
vpe10_check_output_color_space(struct vpe_priv * vpe_priv,enum vpe_surface_pixel_format format,const struct vpe_color_space * vcs)398 bool vpe10_check_output_color_space(struct vpe_priv *vpe_priv, enum vpe_surface_pixel_format format,
399 const struct vpe_color_space *vcs)
400 {
401 enum color_space cs;
402 enum color_transfer_func tf;
403
404 // packed 32bit rgb
405 if (vcs->encoding != VPE_PIXEL_ENCODING_RGB)
406 return false;
407
408 vpe_color_get_color_space_and_tf(vcs, &cs, &tf);
409 if (cs == COLOR_SPACE_UNKNOWN || tf == TRANSFER_FUNC_UNKNOWN)
410 return false;
411
412 return true;
413 }
414
vpe10_check_h_mirror_support(bool * input_mirror,bool * output_mirror)415 bool vpe10_check_h_mirror_support(bool *input_mirror, bool *output_mirror)
416 {
417 *input_mirror = false;
418 *output_mirror = true;
419 return true;
420 }
421
vpe10_calculate_dst_viewport_and_active(struct segment_ctx * segment_ctx,uint32_t max_seg_width)422 void vpe10_calculate_dst_viewport_and_active(
423 struct segment_ctx *segment_ctx, uint32_t max_seg_width)
424 {
425 struct scaler_data *data = &segment_ctx->scaler_data;
426 struct stream_ctx *stream_ctx = segment_ctx->stream_ctx;
427 struct vpe_priv *vpe_priv = stream_ctx->vpe_priv;
428 struct vpe_rect *dst_rect = &stream_ctx->stream.scaling_info.dst_rect;
429 struct vpe_rect *target_rect = &vpe_priv->output_ctx.target_rect;
430
431 uint32_t vpc_div = vpe_is_yuv420(vpe_priv->output_ctx.surface.format) ? 2 : 1;
432
433 data->dst_viewport.x = data->recout.x + dst_rect->x;
434 data->dst_viewport.width = data->recout.width;
435
436 // 1st stream will cover the background
437 // extends the v_active to cover the full target_rect's height
438 if (stream_ctx->stream_idx == 0) {
439 data->recout.x = 0;
440 data->recout.y = dst_rect->y - target_rect->y;
441 data->dst_viewport.y = target_rect->y;
442 data->dst_viewport.height = target_rect->height;
443
444 if (!stream_ctx->flip_horizonal_output) {
445 /* first segment :
446 * if the dst_viewport.width is not 1024,
447 * and we need background on the left, extend the active to cover as much as it can
448 */
449 if (segment_ctx->segment_idx == 0) {
450 uint32_t remain_gap = min(max_seg_width - data->dst_viewport.width,
451 (uint32_t)(data->dst_viewport.x - target_rect->x));
452 data->recout.x = (int32_t)remain_gap;
453
454 data->dst_viewport.x -= (int32_t)remain_gap;
455 data->dst_viewport.width += remain_gap;
456 }
457 // last segment
458 if (segment_ctx->segment_idx == stream_ctx->num_segments - 1) {
459 uint32_t remain_gap = min(max_seg_width - data->dst_viewport.width,
460 (uint32_t)((target_rect->x + (int32_t)target_rect->width) -
461 (data->dst_viewport.x + (int32_t)data->dst_viewport.width)));
462
463 data->dst_viewport.width += remain_gap;
464 }
465 }
466 } else {
467 data->dst_viewport.y = data->recout.y + dst_rect->y;
468 data->dst_viewport.height = data->recout.height;
469 data->recout.y = 0;
470 data->recout.x = 0;
471 }
472
473 data->dst_viewport_c.x = data->dst_viewport.x / (int32_t)vpc_div;
474 data->dst_viewport_c.y = data->dst_viewport.y / (int32_t)vpc_div;
475 data->dst_viewport_c.width = data->dst_viewport.width / vpc_div;
476 data->dst_viewport_c.height = data->dst_viewport.height / vpc_div;
477
478 // [h/v]_active
479 data->h_active = data->dst_viewport.width;
480 data->v_active = data->dst_viewport.height;
481 }
482
vpe10_calculate_segments(struct vpe_priv * vpe_priv,const struct vpe_build_param * params)483 enum vpe_status vpe10_calculate_segments(
484 struct vpe_priv *vpe_priv, const struct vpe_build_param *params)
485 {
486 enum vpe_status res;
487 struct vpe_rect *gaps;
488 uint16_t gaps_cnt, max_gaps;
489 uint16_t stream_idx, seg_idx;
490 struct stream_ctx *stream_ctx;
491 struct segment_ctx *segment_ctx;
492 uint32_t max_seg_width = vpe_priv->pub.caps->plane_caps.max_viewport_width;
493 struct scaler_data scl_data;
494 struct vpe_rect *src_rect;
495 struct vpe_rect *dst_rect;
496 uint32_t factor;
497 const uint32_t max_upscale_factor = vpe_priv->pub.caps->plane_caps.max_upscale_factor;
498 const uint32_t max_downscale_factor = vpe_priv->pub.caps->plane_caps.max_downscale_factor;
499 struct dpp *dpp = vpe_priv->resource.dpp[0];
500 const uint32_t max_lb_size = dpp->funcs->get_line_buffer_size();
501
502 for (stream_idx = 0; stream_idx < params->num_streams; stream_idx++) {
503 stream_ctx = &vpe_priv->stream_ctx[stream_idx];
504 src_rect = &stream_ctx->stream.scaling_info.src_rect;
505 dst_rect = &stream_ctx->stream.scaling_info.dst_rect;
506
507 if (src_rect->width < VPE_MIN_VIEWPORT_SIZE || src_rect->height < VPE_MIN_VIEWPORT_SIZE ||
508 dst_rect->width < VPE_MIN_VIEWPORT_SIZE || dst_rect->height < VPE_MIN_VIEWPORT_SIZE) {
509 return VPE_STATUS_VIEWPORT_SIZE_NOT_SUPPORTED;
510 }
511
512 vpe_clip_stream(src_rect, dst_rect, ¶ms->target_rect);
513
514 if (src_rect->width <= 0 || src_rect->height <= 0 || dst_rect->width <= 0 ||
515 dst_rect->height <= 0) {
516 vpe_log("calculate_segments: after clipping, src or dst rect contains no area. Skip "
517 "this stream.\n");
518 stream_ctx->num_segments = 0;
519 continue;
520 }
521
522 /* If the source frame size in either dimension is 1 then the scaling ratio becomes 0
523 * in that dimension. If destination frame size in any dimesnion is 1 the scaling ratio
524 * is NAN.
525 */
526 if (src_rect->width < VPE_MIN_VIEWPORT_SIZE || src_rect->height < VPE_MIN_VIEWPORT_SIZE ||
527 dst_rect->width < VPE_MIN_VIEWPORT_SIZE || dst_rect->height < VPE_MIN_VIEWPORT_SIZE) {
528 return VPE_STATUS_VIEWPORT_SIZE_NOT_SUPPORTED;
529 }
530 factor = (uint32_t)vpe_fixpt_ceil(
531 vpe_fixpt_from_fraction((1000 * dst_rect->width), src_rect->width));
532 if (factor > max_upscale_factor || factor < max_downscale_factor)
533 return VPE_STATUS_SCALING_RATIO_NOT_SUPPORTED;
534
535 // initialize scaling data
536 if (!vpe10_init_scaler_data(vpe_priv, stream_ctx, &scl_data, src_rect, dst_rect))
537 return VPE_STATUS_SCALING_RATIO_NOT_SUPPORTED;
538
539 res = vpe_priv->resource.set_num_segments(
540 vpe_priv, stream_ctx, &scl_data, src_rect, dst_rect, &max_seg_width);
541 if (res != VPE_STATUS_OK)
542 return res;
543
544 for (seg_idx = 0; seg_idx < stream_ctx->num_segments; seg_idx++) {
545 segment_ctx = &stream_ctx->segment_ctx[seg_idx];
546 segment_ctx->segment_idx = seg_idx;
547 segment_ctx->stream_ctx = stream_ctx;
548
549 segment_ctx->scaler_data.ratios = scl_data.ratios;
550 segment_ctx->scaler_data.taps = scl_data.taps;
551 if (stream_ctx->stream.use_external_scaling_coeffs) {
552 segment_ctx->scaler_data.polyphase_filter_coeffs =
553 &stream_ctx->stream.polyphase_scaling_coeffs;
554 } else {
555 segment_ctx->scaler_data.polyphase_filter_coeffs = 0;
556 }
557 res = vpe_resource_build_scaling_params(segment_ctx);
558 if (res != VPE_STATUS_OK)
559 return res;
560
561 vpe_priv->resource.calculate_dst_viewport_and_active(segment_ctx, max_seg_width);
562 }
563 }
564
565 /* If the stream width is less than max_seg_width - 1024, and it
566 * lies inside a max_seg_width window of the background, vpe needs
567 * an extra bg segment to store that.
568 1 2 3 4 5
569 |....|....|.**.|....|
570 |....|....|.**.|....|
571 |....|....|.**.|....|
572
573 (*: stream
574 .: background
575 |: 1k separator)
576
577 */
578 max_seg_width = vpe_priv->pub.caps->plane_caps.max_viewport_width;
579 max_gaps =
580 (uint16_t)(max((params->target_rect.width + max_seg_width - 1) / max_seg_width, 1) + 1);
581 gaps = vpe_zalloc(sizeof(struct vpe_rect) * max_gaps);
582 if (!gaps)
583 return VPE_STATUS_NO_MEMORY;
584
585 gaps_cnt = vpe_priv->resource.find_bg_gaps(vpe_priv, &(params->target_rect), gaps, max_gaps);
586 if (gaps_cnt > 0)
587 vpe_priv->resource.create_bg_segments(vpe_priv, gaps, gaps_cnt, VPE_CMD_OPS_BG);
588
589 if (gaps != NULL) {
590 vpe_free(gaps);
591 gaps = NULL;
592 }
593
594 vpe_handle_output_h_mirror(vpe_priv);
595
596 res = vpe_priv->resource.populate_cmd_info(vpe_priv);
597
598 if (res == VPE_STATUS_OK)
599 res = vpe_create_visual_confirm_segs(vpe_priv, params, max_seg_width);
600
601 return res;
602 }
603
build_clamping_params(struct opp * opp,struct clamping_and_pixel_encoding_params * clamping)604 static void build_clamping_params(
605 struct opp *opp, struct clamping_and_pixel_encoding_params *clamping)
606 {
607 struct vpe_priv *vpe_priv = opp->vpe_priv;
608 struct vpe_surface_info *dst_surface = &vpe_priv->output_ctx.surface;
609 enum vpe_color_range output_range = dst_surface->cs.range;
610
611 memset(clamping, 0, sizeof(*clamping));
612 clamping->clamping_level = CLAMPING_FULL_RANGE;
613 clamping->c_depth = vpe_get_color_depth(dst_surface->format);
614 if (output_range == VPE_COLOR_RANGE_STUDIO) {
615 if (!vpe_priv->init.debug.clamping_setting) {
616 switch (clamping->c_depth) {
617 case COLOR_DEPTH_888:
618 clamping->clamping_level = CLAMPING_LIMITED_RANGE_8BPC;
619 break;
620 case COLOR_DEPTH_101010:
621 clamping->clamping_level = CLAMPING_LIMITED_RANGE_10BPC;
622 break;
623 case COLOR_DEPTH_121212:
624 clamping->clamping_level = CLAMPING_LIMITED_RANGE_12BPC;
625 break;
626 default:
627 clamping->clamping_level =
628 CLAMPING_FULL_RANGE; // for all the others bit depths set the full range
629 break;
630 }
631 } else {
632 switch (vpe_priv->init.debug.clamping_params.clamping_range) {
633 case VPE_CLAMPING_LIMITED_RANGE_8BPC:
634 clamping->clamping_level = CLAMPING_LIMITED_RANGE_8BPC;
635 break;
636 case VPE_CLAMPING_LIMITED_RANGE_10BPC:
637 clamping->clamping_level = CLAMPING_LIMITED_RANGE_10BPC;
638 break;
639 case VPE_CLAMPING_LIMITED_RANGE_12BPC:
640 clamping->clamping_level = CLAMPING_LIMITED_RANGE_12BPC;
641 break;
642 default:
643 clamping->clamping_level =
644 CLAMPING_LIMITED_RANGE_PROGRAMMABLE; // for all the others set to programmable
645 // range
646 clamping->r_clamp_component_lower =
647 vpe_priv->output_ctx.clamping_params.r_clamp_component_lower;
648 clamping->g_clamp_component_lower =
649 vpe_priv->output_ctx.clamping_params.g_clamp_component_lower;
650 clamping->b_clamp_component_lower =
651 vpe_priv->output_ctx.clamping_params.b_clamp_component_lower;
652 clamping->r_clamp_component_upper =
653 vpe_priv->output_ctx.clamping_params.r_clamp_component_upper;
654 clamping->g_clamp_component_upper =
655 vpe_priv->output_ctx.clamping_params.g_clamp_component_upper;
656 clamping->b_clamp_component_upper =
657 vpe_priv->output_ctx.clamping_params.b_clamp_component_upper;
658 break;
659 }
660 }
661 }
662 }
663
frontend_config_callback(void * ctx,uint64_t cfg_base_gpu,uint64_t cfg_base_cpu,uint64_t size)664 static void frontend_config_callback(
665 void *ctx, uint64_t cfg_base_gpu, uint64_t cfg_base_cpu, uint64_t size)
666 {
667 struct config_frontend_cb_ctx *cb_ctx = (struct config_frontend_cb_ctx *)ctx;
668 struct vpe_priv *vpe_priv = cb_ctx->vpe_priv;
669 struct stream_ctx *stream_ctx = &vpe_priv->stream_ctx[cb_ctx->stream_idx];
670 enum vpe_cmd_type cmd_type;
671
672 if (cb_ctx->stream_sharing) {
673 VPE_ASSERT(stream_ctx->num_configs <
674 (int)(sizeof(stream_ctx->configs) / sizeof(struct config_record)));
675
676 stream_ctx->configs[stream_ctx->num_configs].config_base_addr = cfg_base_gpu;
677 stream_ctx->configs[stream_ctx->num_configs].config_size = size;
678 stream_ctx->num_configs++;
679 } else if (cb_ctx->stream_op_sharing) {
680 cmd_type = cb_ctx->cmd_type;
681
682 VPE_ASSERT(
683 stream_ctx->num_stream_op_configs[cmd_type] <
684 (int)(sizeof(stream_ctx->stream_op_configs[cmd_type]) / sizeof(struct config_record)));
685
686 stream_ctx->stream_op_configs[cmd_type][stream_ctx->num_stream_op_configs[cmd_type]]
687 .config_base_addr = cfg_base_gpu;
688 stream_ctx->stream_op_configs[cmd_type][stream_ctx->num_stream_op_configs[cmd_type]]
689 .config_size = size;
690 stream_ctx->num_stream_op_configs[cmd_type]++;
691 }
692
693 vpe_desc_writer_add_config_desc(
694 &vpe_priv->vpe_desc_writer, cfg_base_gpu, false, vpe_priv->config_writer.buf->tmz);
695 }
696
vpe10_program_frontend(struct vpe_priv * vpe_priv,uint32_t pipe_idx,uint32_t cmd_idx,uint32_t cmd_input_idx,bool seg_only)697 int32_t vpe10_program_frontend(struct vpe_priv *vpe_priv, uint32_t pipe_idx, uint32_t cmd_idx,
698 uint32_t cmd_input_idx, bool seg_only)
699 {
700 struct vpe_cmd_info *cmd_info = &vpe_priv->vpe_cmd_info[cmd_idx];
701 struct vpe_cmd_input *cmd_input = &cmd_info->inputs[cmd_input_idx];
702 struct stream_ctx *stream_ctx = &vpe_priv->stream_ctx[cmd_input->stream_idx];
703 struct vpe_surface_info *surface_info = &stream_ctx->stream.surface_info;
704 struct cdc *cdc = vpe_priv->resource.cdc[pipe_idx];
705 struct dpp *dpp = vpe_priv->resource.dpp[pipe_idx];
706 struct mpc *mpc = vpe_priv->resource.mpc[pipe_idx];
707 enum input_csc_select select = INPUT_CSC_SELECT_BYPASS;
708 uint32_t hw_mult = 0;
709 struct custom_float_format fmt;
710
711 vpe_priv->fe_cb_ctx.stream_idx = cmd_input->stream_idx;
712 vpe_priv->fe_cb_ctx.vpe_priv = vpe_priv;
713
714 config_writer_set_callback(
715 &vpe_priv->config_writer, &vpe_priv->fe_cb_ctx, frontend_config_callback);
716
717 config_writer_set_type(&vpe_priv->config_writer, CONFIG_TYPE_DIRECT);
718
719 if (!seg_only) {
720 /* start front-end programming that can be shared among segments */
721 vpe_priv->fe_cb_ctx.stream_sharing = true;
722
723 cdc->funcs->program_surface_config(cdc, surface_info->format, stream_ctx->stream.rotation,
724 // set to false as h_mirror is not supported by input, only supported in output
725 false, surface_info->swizzle);
726 cdc->funcs->program_crossbar_config(cdc, surface_info->format);
727
728 dpp->funcs->program_cnv(dpp, surface_info->format, vpe_priv->expansion_mode);
729 if (stream_ctx->bias_scale)
730 dpp->funcs->program_cnv_bias_scale(dpp, stream_ctx->bias_scale);
731
732 /* If input adjustment exists, program the ICSC with those values. */
733 if (stream_ctx->input_cs) {
734 select = INPUT_CSC_SELECT_ICSC;
735 dpp->funcs->program_post_csc(dpp, stream_ctx->cs, select, stream_ctx->input_cs);
736 } else {
737 dpp->funcs->program_post_csc(dpp, stream_ctx->cs, select, NULL);
738 }
739 dpp->funcs->program_input_transfer_func(dpp, stream_ctx->input_tf);
740 dpp->funcs->program_gamut_remap(dpp, stream_ctx->gamut_remap);
741
742 // for not bypass mode, we always are in single layer coming from DPP and output to OPP
743 mpc->funcs->program_mpcc_mux(mpc, MPC_MPCCID_0, MPC_MUX_TOPSEL_DPP0, MPC_MUX_BOTSEL_DISABLE,
744 MPC_MUX_OUTMUX_MPCC0, MPC_MUX_OPPID_OPP0);
745
746 // program shaper, 3dlut and 1dlut in MPC for stream before blend
747 mpc->funcs->program_movable_cm(
748 mpc, stream_ctx->in_shaper_func, stream_ctx->lut3d_func, stream_ctx->blend_tf, false);
749
750 // program hdr_mult
751 fmt.exponenta_bits = 6;
752 fmt.mantissa_bits = 12;
753 fmt.sign = true;
754 if (stream_ctx->stream.tm_params.UID || stream_ctx->stream.tm_params.enable_3dlut) {
755 vpe_convert_to_custom_float_format(
756 stream_ctx->lut3d_func->hdr_multiplier, &fmt, &hw_mult);
757 } else {
758 vpe_convert_to_custom_float_format(stream_ctx->white_point_gain, &fmt, &hw_mult);
759 }
760 dpp->funcs->set_hdr_multiplier(dpp, hw_mult);
761
762 if (vpe_priv->init.debug.dpp_crc_ctrl)
763 dpp->funcs->program_crc(dpp, true);
764
765 if (vpe_priv->init.debug.mpc_crc_ctrl)
766 mpc->funcs->program_crc(mpc, true);
767
768 // put other hw programming for stream specific that can be shared here
769
770 config_writer_complete(&vpe_priv->config_writer);
771 }
772
773 vpe10_create_stream_ops_config(vpe_priv, pipe_idx, stream_ctx, cmd_input, cmd_info->ops);
774
775 /* start segment specific programming */
776 vpe_priv->fe_cb_ctx.stream_sharing = false;
777 vpe_priv->fe_cb_ctx.stream_op_sharing = false;
778 vpe_priv->fe_cb_ctx.cmd_type = VPE_CMD_TYPE_COMPOSITING;
779
780 cdc->funcs->program_viewport(
781 cdc, &cmd_input->scaler_data.viewport, &cmd_input->scaler_data.viewport_c);
782
783 dpp->funcs->set_segment_scaler(dpp, &cmd_input->scaler_data);
784
785 config_writer_complete(&vpe_priv->config_writer);
786
787 return 0;
788 }
789
backend_config_callback(void * ctx,uint64_t cfg_base_gpu,uint64_t cfg_base_cpu,uint64_t size)790 static void backend_config_callback(
791 void *ctx, uint64_t cfg_base_gpu, uint64_t cfg_base_cpu, uint64_t size)
792 {
793 struct config_backend_cb_ctx *cb_ctx = (struct config_backend_cb_ctx *)ctx;
794 struct vpe_priv *vpe_priv = cb_ctx->vpe_priv;
795 struct output_ctx *output_ctx = &vpe_priv->output_ctx;
796
797 if (cb_ctx->share) {
798 VPE_ASSERT(
799 output_ctx->num_configs < (sizeof(output_ctx->configs) / sizeof(struct config_record)));
800
801 output_ctx->configs[output_ctx->num_configs].config_base_addr = cfg_base_gpu;
802 output_ctx->configs[output_ctx->num_configs].config_size = size;
803 output_ctx->num_configs++;
804 }
805
806 vpe_desc_writer_add_config_desc(
807 &vpe_priv->vpe_desc_writer, cfg_base_gpu, false, vpe_priv->config_writer.buf->tmz);
808 }
809
vpe10_program_backend(struct vpe_priv * vpe_priv,uint32_t pipe_idx,uint32_t cmd_idx,bool seg_only)810 int32_t vpe10_program_backend(
811 struct vpe_priv *vpe_priv, uint32_t pipe_idx, uint32_t cmd_idx, bool seg_only)
812 {
813 struct output_ctx *output_ctx = &vpe_priv->output_ctx;
814 struct vpe_surface_info *surface_info = &vpe_priv->output_ctx.surface;
815
816 struct cdc *cdc = vpe_priv->resource.cdc[pipe_idx];
817 struct opp *opp = vpe_priv->resource.opp[pipe_idx];
818 struct mpc *mpc = vpe_priv->resource.mpc[pipe_idx];
819
820 struct bit_depth_reduction_params fmt_bit_depth;
821 struct clamping_and_pixel_encoding_params clamp_param;
822 enum color_depth display_color_depth;
823 uint16_t alpha_16;
824 bool opp_dig_bypass = false;
825
826 vpe_priv->be_cb_ctx.vpe_priv = vpe_priv;
827 config_writer_set_callback(
828 &vpe_priv->config_writer, &vpe_priv->be_cb_ctx, backend_config_callback);
829
830 config_writer_set_type(&vpe_priv->config_writer, CONFIG_TYPE_DIRECT);
831
832 if (!seg_only) {
833 /* start back-end programming that can be shared among segments */
834 vpe_priv->be_cb_ctx.share = true;
835
836 cdc->funcs->program_p2b_config(cdc, surface_info->format);
837 cdc->funcs->program_global_sync(cdc, VPE10_CDC_VUPDATE_OFFSET_DEFAULT,
838 VPE10_CDC_VUPDATE_WIDTH_DEFAULT, VPE10_CDC_VREADY_OFFSET_DEFAULT);
839
840 mpc->funcs->set_output_transfer_func(mpc, output_ctx);
841 // program shaper, 3dlut and 1dlut in MPC for after blend
842 // Note: cannot program both before and after blend CM
843 // caller should ensure only one is programmed
844 // mpc->funcs->program_movable_cm(mpc, output_ctx->in_shaper_func,
845 // output_ctx->lut3d_func, output_ctx->blend_tf, true);
846 mpc->funcs->program_mpc_out(mpc, surface_info->format);
847
848 // Post blend gamut remap
849 mpc->funcs->set_gamut_remap(mpc, output_ctx->gamut_remap);
850
851 if (vpe_is_fp16(surface_info->format)) {
852 if (vpe_priv->output_ctx.alpha_mode == VPE_ALPHA_BGCOLOR)
853 vpe_convert_from_float_to_fp16(
854 (double)vpe_priv->output_ctx.bg_color.rgba.a, &alpha_16);
855 else
856 vpe_convert_from_float_to_fp16(1.0, &alpha_16);
857
858 opp_dig_bypass = true;
859 } else {
860 if (vpe_priv->output_ctx.alpha_mode == VPE_ALPHA_BGCOLOR)
861 alpha_16 = (uint16_t)(vpe_priv->output_ctx.bg_color.rgba.a * 0xffff);
862 else
863 alpha_16 = 0xffff;
864 }
865
866 opp->funcs->program_pipe_alpha(opp, alpha_16);
867 opp->funcs->program_pipe_bypass(opp, opp_dig_bypass);
868
869 display_color_depth = vpe_get_color_depth(surface_info->format);
870 build_clamping_params(opp, &clamp_param);
871 vpe_resource_build_bit_depth_reduction_params(opp, &fmt_bit_depth);
872
873 // disable dynamic expansion for now as no use case
874 opp->funcs->set_dyn_expansion(opp, false, display_color_depth);
875 opp->funcs->program_fmt(opp, &fmt_bit_depth, &clamp_param);
876 if (vpe_priv->init.debug.opp_pipe_crc_ctrl)
877 opp->funcs->program_pipe_crc(opp, true);
878
879 config_writer_complete(&vpe_priv->config_writer);
880 }
881
882 return 0;
883 }
884
vpe10_populate_cmd_info(struct vpe_priv * vpe_priv)885 enum vpe_status vpe10_populate_cmd_info(struct vpe_priv *vpe_priv)
886 {
887 uint16_t stream_idx;
888 uint16_t segment_idx;
889 struct stream_ctx *stream_ctx;
890 struct vpe_cmd_info *cmd_info;
891 bool tm_enabled;
892
893 for (stream_idx = 0; stream_idx < vpe_priv->num_streams; stream_idx++) {
894 stream_ctx = &vpe_priv->stream_ctx[stream_idx];
895
896 tm_enabled = stream_ctx->stream.tm_params.UID != 0 || stream_ctx->stream.tm_params.enable_3dlut;
897
898 for (segment_idx = 0; segment_idx < stream_ctx->num_segments; segment_idx++) {
899 if (vpe_priv->num_vpe_cmds >= MAX_VPE_CMD) {
900 return VPE_STATUS_CMD_OVERFLOW_ERROR;
901 }
902
903 cmd_info = &vpe_priv->vpe_cmd_info[vpe_priv->num_vpe_cmds];
904 cmd_info->inputs[0].stream_idx = stream_idx;
905 cmd_info->cd = (uint8_t)(stream_ctx->num_segments - segment_idx - 1);
906 memcpy(&(cmd_info->inputs[0].scaler_data),
907 &(stream_ctx->segment_ctx[segment_idx].scaler_data), sizeof(struct scaler_data));
908 cmd_info->dst_viewport = stream_ctx->segment_ctx[segment_idx].scaler_data.dst_viewport;
909 cmd_info->dst_viewport_c =
910 stream_ctx->segment_ctx[segment_idx].scaler_data.dst_viewport_c;
911 cmd_info->num_inputs = 1;
912 cmd_info->ops = VPE_CMD_OPS_COMPOSITING;
913 cmd_info->tm_enabled = tm_enabled;
914 vpe_priv->num_vpe_cmds++;
915 if (cmd_info->cd == (stream_ctx->num_segments - 1)) {
916 cmd_info->is_begin = true;
917 }
918
919 if (cmd_info->cd == 0) {
920 cmd_info->is_end = true;
921 }
922 }
923 }
924
925 return VPE_STATUS_OK;
926 }
927
vpe10_create_stream_ops_config(struct vpe_priv * vpe_priv,uint32_t pipe_idx,struct stream_ctx * stream_ctx,struct vpe_cmd_input * cmd_input,enum vpe_cmd_ops ops)928 void vpe10_create_stream_ops_config(struct vpe_priv *vpe_priv, uint32_t pipe_idx,
929 struct stream_ctx *stream_ctx, struct vpe_cmd_input *cmd_input, enum vpe_cmd_ops ops)
930 {
931 /* put all hw programming that can be shared according to the cmd type within a stream here */
932 struct mpcc_blnd_cfg blndcfg = {0};
933 struct dpp *dpp = vpe_priv->resource.dpp[pipe_idx];
934 struct mpc *mpc = vpe_priv->resource.mpc[pipe_idx];
935 enum vpe_cmd_type cmd_type = VPE_CMD_TYPE_COUNT;
936
937 vpe_priv->fe_cb_ctx.stream_op_sharing = true;
938 vpe_priv->fe_cb_ctx.stream_sharing = false;
939
940 if (ops == VPE_CMD_OPS_BG) {
941 cmd_type = VPE_CMD_TYPE_BG;
942 } else if (ops == VPE_CMD_OPS_COMPOSITING) {
943 cmd_type = VPE_CMD_TYPE_COMPOSITING;
944 } else if (ops == VPE_CMD_OPS_BG_VSCF_INPUT) {
945 cmd_type = VPE_CMD_TYPE_BG_VSCF_INPUT;
946 } else if (ops == VPE_CMD_OPS_BG_VSCF_OUTPUT) {
947 cmd_type = VPE_CMD_TYPE_BG_VSCF_OUTPUT;
948 } else
949 return;
950
951 // return if already generated
952 if (stream_ctx->num_stream_op_configs[cmd_type])
953 return;
954
955 vpe_priv->fe_cb_ctx.cmd_type = cmd_type;
956
957 dpp->funcs->set_frame_scaler(dpp, &cmd_input->scaler_data);
958
959 if (ops == VPE_CMD_OPS_BG_VSCF_INPUT) {
960 blndcfg.bg_color = vpe_get_visual_confirm_color(stream_ctx->stream.surface_info.format,
961 stream_ctx->stream.surface_info.cs, vpe_priv->output_ctx.cs,
962 vpe_priv->output_ctx.output_tf,
963 (stream_ctx->stream.tm_params.UID != 0 || stream_ctx->stream.tm_params.enable_3dlut));
964 } else if (ops == VPE_CMD_OPS_BG_VSCF_OUTPUT) {
965 blndcfg.bg_color = vpe_get_visual_confirm_color(vpe_priv->output_ctx.surface.format,
966 vpe_priv->output_ctx.surface.cs, vpe_priv->output_ctx.cs,
967 vpe_priv->output_ctx.output_tf,
968 false); // 3DLUT should only affect input visual confirm
969 } else {
970 blndcfg.bg_color = vpe_priv->output_ctx.bg_color;
971 }
972 blndcfg.global_gain = 0xff;
973 blndcfg.pre_multiplied_alpha = false;
974
975 if (stream_ctx->stream.blend_info.blending) {
976 if (stream_ctx->per_pixel_alpha) {
977 blndcfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
978
979 blndcfg.pre_multiplied_alpha = stream_ctx->stream.blend_info.pre_multiplied_alpha;
980 if (stream_ctx->stream.blend_info.global_alpha)
981 blndcfg.global_gain =
982 (uint8_t)(stream_ctx->stream.blend_info.global_alpha_value * 0xff);
983 } else {
984 blndcfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
985 if (stream_ctx->stream.blend_info.global_alpha == true) {
986 VPE_ASSERT(stream_ctx->stream.blend_info.global_alpha_value <= 1.0f);
987 blndcfg.global_alpha =
988 (uint8_t)(stream_ctx->stream.blend_info.global_alpha_value * 0xff);
989 } else {
990 // Global alpha not enabled, make top layer opaque
991 blndcfg.global_alpha = 0xff;
992 }
993 }
994 } else {
995 blndcfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
996 blndcfg.global_alpha = 0xff;
997 }
998
999 if (cmd_type == VPE_CMD_TYPE_BG || cmd_type == VPE_CMD_TYPE_BG_VSCF_INPUT ||
1000 cmd_type == VPE_CMD_TYPE_BG_VSCF_OUTPUT) {
1001 // for bg commands, make top layer transparent
1002 // as global alpha only works when global alpha mode, set global alpha mode as well
1003 blndcfg.global_alpha = 0;
1004 blndcfg.global_gain = 0xff;
1005 blndcfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
1006 }
1007
1008 blndcfg.overlap_only = false;
1009 blndcfg.bottom_gain_mode = 0;
1010
1011 switch (vpe_priv->init.debug.bg_bit_depth) {
1012 case 8:
1013 blndcfg.background_color_bpc = 0;
1014 break;
1015 case 9:
1016 blndcfg.background_color_bpc = 1;
1017 break;
1018 case 10:
1019 blndcfg.background_color_bpc = 2;
1020 break;
1021 case 11:
1022 blndcfg.background_color_bpc = 3;
1023 break;
1024 case 12:
1025 default:
1026 blndcfg.background_color_bpc = 4; // 12 bit. DAL's choice;
1027 break;
1028 }
1029
1030 blndcfg.top_gain = 0x1f000;
1031 blndcfg.bottom_inside_gain = 0x1f000;
1032 blndcfg.bottom_outside_gain = 0x1f000;
1033
1034 mpc->funcs->program_mpcc_blending(mpc, MPC_MPCCID_0, &blndcfg);
1035
1036 config_writer_complete(&vpe_priv->config_writer);
1037 }
1038
1039 #define VPE10_GENERAL_VPE_DESC_SIZE 64 // 4 * (4 + (2 * num_configs))
1040 #define VPE10_GENERAL_EMB_USAGE_FRAME_SHARED 6000 // currently max 4804 is recorded
1041 #define VPE10_GENERAL_EMB_USAGE_3DLUT_FRAME_SHARED 40960 // currently max 35192 is recorded
1042 #define VPE10_GENERAL_EMB_USAGE_BG_SHARED 2400 // currently max 1772 + 92 + 72 = 1936 is recorded
1043 #define VPE10_GENERAL_EMB_USAGE_SEG_NON_SHARED \
1044 240 // segment specific config + plane descripor size. currently max 92 + 72 = 164 is recorded.
1045
vpe10_get_bufs_req(struct vpe_priv * vpe_priv,struct vpe_bufs_req * req)1046 void vpe10_get_bufs_req(struct vpe_priv *vpe_priv, struct vpe_bufs_req *req)
1047 {
1048 uint32_t i;
1049 struct vpe_cmd_info *cmd_info;
1050 uint32_t stream_idx = 0xFFFFFFFF;
1051 uint64_t emb_req = 0;
1052 bool have_visual_confirm_input = false;
1053 bool have_visual_confirm_output = false;
1054
1055 req->cmd_buf_size = 0;
1056 req->emb_buf_size = 0;
1057
1058 for (i = 0; i < vpe_priv->num_vpe_cmds; i++) {
1059 cmd_info = &vpe_priv->vpe_cmd_info[i];
1060
1061 // each cmd consumes one VPE descriptor
1062 req->cmd_buf_size += VPE10_GENERAL_VPE_DESC_SIZE;
1063
1064 // if a command represents the first segment of a stream,
1065 // total amount of config sizes is added, but for other segments
1066 // just the segment specific config size is added
1067 if (cmd_info->ops == VPE_CMD_OPS_COMPOSITING) {
1068 if (stream_idx != cmd_info->inputs[0].stream_idx) {
1069 emb_req = cmd_info->tm_enabled ? VPE10_GENERAL_EMB_USAGE_3DLUT_FRAME_SHARED
1070 : VPE10_GENERAL_EMB_USAGE_FRAME_SHARED;
1071 stream_idx = cmd_info->inputs[0].stream_idx;
1072 } else {
1073 emb_req = VPE10_GENERAL_EMB_USAGE_SEG_NON_SHARED;
1074 }
1075 } else if (cmd_info->ops == VPE_CMD_OPS_BG) {
1076 emb_req =
1077 i > 0 ? VPE10_GENERAL_EMB_USAGE_SEG_NON_SHARED : VPE10_GENERAL_EMB_USAGE_BG_SHARED;
1078 } else if (cmd_info->ops == VPE_CMD_OPS_BG_VSCF_INPUT) {
1079 emb_req = have_visual_confirm_input ? VPE10_GENERAL_EMB_USAGE_SEG_NON_SHARED
1080 : VPE10_GENERAL_EMB_USAGE_BG_SHARED;
1081 have_visual_confirm_input = true;
1082 } else if (cmd_info->ops == VPE_CMD_OPS_BG_VSCF_OUTPUT) {
1083 emb_req = have_visual_confirm_output ? VPE10_GENERAL_EMB_USAGE_SEG_NON_SHARED
1084 : VPE10_GENERAL_EMB_USAGE_BG_SHARED;
1085 have_visual_confirm_output = true;
1086 } else {
1087 VPE_ASSERT(0);
1088 }
1089
1090 req->emb_buf_size += emb_req;
1091 }
1092 }
1093