1 /* Copyright 2022 Advanced Micro Devices, Inc.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19 * OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * Authors: AMD
22 *
23 */
24
25 #include <string.h>
26 #include "vpelib.h"
27 #include "vpe_priv.h"
28 #include "common.h"
29 #include "color_bg.h"
30 #include "color_gamma.h"
31 #include "cmd_builder.h"
32 #include "resource.h"
33 #include "color.h"
34 #include "vpec.h"
35 #include "vpe_desc_writer.h"
36 #include "dpp.h"
37 #include "mpc.h"
38 #include "opp.h"
39 #include "geometric_scaling.h"
40 #include <stdlib.h>
41 #include <time.h>
42
dummy_sys_event(enum vpe_event_id eventId,...)43 static void dummy_sys_event(enum vpe_event_id eventId, ...)
44 {
45 // Do nothing, if no callback is provided for sys event
46 }
47
override_debug_option(struct vpe_debug_options * debug,const struct vpe_debug_options * user_debug)48 static void override_debug_option(
49 struct vpe_debug_options *debug, const struct vpe_debug_options *user_debug)
50 {
51 if ((debug == NULL) || (user_debug == NULL)) {
52 return;
53 }
54
55 if (user_debug->flags.bg_bit_depth)
56 debug->bg_bit_depth = user_debug->bg_bit_depth;
57
58 if (user_debug->flags.cm_in_bypass)
59 debug->cm_in_bypass = user_debug->cm_in_bypass;
60
61 if (user_debug->flags.vpcnvc_bypass)
62 debug->vpcnvc_bypass = user_debug->vpcnvc_bypass;
63
64 if (user_debug->flags.mpc_bypass)
65 debug->mpc_bypass = user_debug->mpc_bypass;
66
67 if (user_debug->flags.disable_reuse_bit)
68 debug->disable_reuse_bit = user_debug->disable_reuse_bit;
69
70 if (user_debug->flags.identity_3dlut)
71 debug->identity_3dlut = user_debug->identity_3dlut;
72
73 if (user_debug->flags.sce_3dlut)
74 debug->sce_3dlut = user_debug->sce_3dlut;
75
76 if (user_debug->enable_mem_low_power.flags.cm)
77 debug->enable_mem_low_power.bits.cm = user_debug->enable_mem_low_power.bits.cm;
78
79 if (user_debug->enable_mem_low_power.flags.dscl)
80 debug->enable_mem_low_power.bits.dscl = user_debug->enable_mem_low_power.bits.dscl;
81
82 if (user_debug->enable_mem_low_power.flags.mpc)
83 debug->enable_mem_low_power.bits.mpc = user_debug->enable_mem_low_power.bits.mpc;
84
85 if (user_debug->flags.bg_color_fill_only)
86 debug->bg_color_fill_only = user_debug->bg_color_fill_only;
87
88 if (user_debug->flags.assert_when_not_support)
89 debug->assert_when_not_support = user_debug->assert_when_not_support;
90
91 if (user_debug->flags.bypass_ogam)
92 debug->bypass_ogam = user_debug->bypass_ogam;
93
94 if (user_debug->flags.bypass_gamcor)
95 debug->bypass_gamcor = user_debug->bypass_gamcor;
96
97 if (user_debug->flags.bypass_dpp_gamut_remap)
98 debug->bypass_dpp_gamut_remap = user_debug->bypass_dpp_gamut_remap;
99
100 if (user_debug->flags.bypass_post_csc)
101 debug->bypass_post_csc = user_debug->bypass_post_csc;
102
103 if (user_debug->flags.clamping_setting) {
104 debug->clamping_setting = user_debug->clamping_setting;
105 debug->clamping_params = user_debug->clamping_params;
106 }
107
108 if (user_debug->flags.expansion_mode)
109 debug->expansion_mode = user_debug->expansion_mode;
110
111 if (user_debug->flags.bypass_per_pixel_alpha)
112 debug->bypass_per_pixel_alpha = user_debug->bypass_per_pixel_alpha;
113
114 if (user_debug->flags.opp_pipe_crc_ctrl)
115 debug->opp_pipe_crc_ctrl = user_debug->opp_pipe_crc_ctrl;
116
117 if (user_debug->flags.dpp_crc_ctrl)
118 debug->dpp_crc_ctrl = user_debug->dpp_crc_ctrl;
119
120 if (user_debug->flags.mpc_crc_ctrl)
121 debug->mpc_crc_ctrl = user_debug->mpc_crc_ctrl;
122
123 if (user_debug->flags.visual_confirm)
124 debug->visual_confirm_params = user_debug->visual_confirm_params;
125
126 if (user_debug->flags.skip_optimal_tap_check)
127 debug->skip_optimal_tap_check = user_debug->skip_optimal_tap_check;
128
129 if (user_debug->flags.bypass_blndgam)
130 debug->bypass_blndgam = user_debug->bypass_blndgam;
131
132 if (user_debug->flags.disable_lut_caching)
133 debug->disable_lut_caching = user_debug->disable_lut_caching;
134 }
135
verify_collaboration_mode(struct vpe_priv * vpe_priv)136 static void verify_collaboration_mode(struct vpe_priv *vpe_priv)
137 {
138 if (vpe_priv->pub.level == VPE_IP_LEVEL_1_1) {
139 if (vpe_priv->collaboration_mode == true && vpe_priv->collaborate_sync_index == 0) {
140 srand((unsigned int)time(NULL)); // Initialization, should only be called once.
141 // coverity[dont_call]
142 uint32_t randnum = (uint32_t)rand() % 15;
143 randnum = randnum << 12;
144 vpe_priv->collaborate_sync_index = (int32_t)randnum;
145 }
146 } else if (vpe_priv->pub.level == VPE_IP_LEVEL_1_0) {
147 vpe_priv->collaboration_mode = false;
148 }
149 }
150
create_output_config_vector(struct vpe_priv * vpe_priv)151 static enum vpe_status create_output_config_vector(struct vpe_priv *vpe_priv)
152 {
153 uint32_t i;
154
155 // output config vector stores all share-able configs that can be re-used later
156 for (i = 0; i < vpe_priv->pub.caps->resource_caps.num_cdc_be; i++) {
157 vpe_priv->output_ctx.configs[i] =
158 vpe_vector_create(vpe_priv, sizeof(struct config_record), MIN_NUM_CONFIG);
159 if (!vpe_priv->output_ctx.configs[i]) {
160 return VPE_STATUS_NO_MEMORY;
161 }
162 }
163 return VPE_STATUS_OK;
164 }
165
destroy_output_config_vector(struct vpe_priv * vpe_priv)166 static void destroy_output_config_vector(struct vpe_priv *vpe_priv)
167 {
168 uint32_t i;
169
170 for (i = 0; i < vpe_priv->pub.caps->resource_caps.num_cdc_be; i++) {
171 if (vpe_priv->output_ctx.configs[i]) {
172 vpe_vector_free(vpe_priv->output_ctx.configs[i]);
173 vpe_priv->output_ctx.configs[i] = NULL;
174 }
175 }
176 }
177
free_output_ctx(struct vpe_priv * vpe_priv)178 static void free_output_ctx(struct vpe_priv *vpe_priv)
179 {
180 if (vpe_priv->output_ctx.gamut_remap)
181 vpe_free(vpe_priv->output_ctx.gamut_remap);
182
183 if (vpe_priv->output_ctx.output_tf)
184 vpe_free(vpe_priv->output_ctx.output_tf);
185
186 destroy_output_config_vector(vpe_priv);
187 }
188
vpe_create(const struct vpe_init_data * params)189 struct vpe *vpe_create(const struct vpe_init_data *params)
190 {
191 struct vpe_priv *vpe_priv;
192 enum vpe_status status;
193
194 if (!params || (params->funcs.zalloc == NULL) || (params->funcs.free == NULL) ||
195 (params->funcs.log == NULL))
196 return NULL;
197
198 vpe_priv =
199 (struct vpe_priv *)params->funcs.zalloc(params->funcs.mem_ctx, sizeof(struct vpe_priv));
200 if (!vpe_priv)
201 return NULL;
202
203 vpe_priv->init = *params;
204
205 // Make sys event an optional feature but hooking up to dummy function if no callback is
206 // provided
207 if (vpe_priv->init.funcs.sys_event == NULL)
208 vpe_priv->init.funcs.sys_event = dummy_sys_event;
209
210 vpe_priv->pub.level =
211 vpe_resource_parse_ip_version(params->ver_major, params->ver_minor, params->ver_rev);
212
213 vpe_priv->pub.version = (VPELIB_API_VERSION_MAJOR << VPELIB_API_VERSION_MAJOR_SHIFT) |
214 (VPELIB_API_VERSION_MINOR << VPELIB_API_VERSION_MINOR_SHIFT);
215
216 status = vpe_construct_resource(vpe_priv, vpe_priv->pub.level, &vpe_priv->resource);
217 if (status != VPE_STATUS_OK) {
218 vpe_free(vpe_priv);
219 return NULL;
220 }
221
222 vpe_priv->vpe_cmd_vector =
223 vpe_vector_create(vpe_priv, sizeof(struct vpe_cmd_info), MIN_VPE_CMD);
224 if (!vpe_priv->vpe_cmd_vector) {
225 vpe_free(vpe_priv);
226 return NULL;
227 }
228
229 status = create_output_config_vector(vpe_priv);
230 if (status != VPE_STATUS_OK) {
231 destroy_output_config_vector(vpe_priv);
232 vpe_free(vpe_priv);
233 return NULL;
234 }
235
236 override_debug_option(&vpe_priv->init.debug, ¶ms->debug);
237
238 vpe_color_setup_x_points_distribution();
239 vpe_color_setup_x_points_distribution_degamma();
240
241 vpe_priv->ops_support = false;
242 vpe_priv->scale_yuv_matrix = true;
243
244 vpe_priv->collaborate_sync_index = 0;
245 return &vpe_priv->pub;
246 }
247
vpe_destroy(struct vpe ** vpe)248 void vpe_destroy(struct vpe **vpe)
249 {
250 struct vpe_priv *vpe_priv;
251
252 if (!vpe || ((*vpe) == NULL))
253 return;
254
255 vpe_priv = container_of(*vpe, struct vpe_priv, pub);
256
257 vpe_destroy_resource(vpe_priv, &vpe_priv->resource);
258
259 free_output_ctx(vpe_priv);
260
261 vpe_free_stream_ctx(vpe_priv);
262
263 if (vpe_priv->vpe_cmd_vector)
264 vpe_vector_free(vpe_priv->vpe_cmd_vector);
265
266 if (vpe_priv->dummy_input_param)
267 vpe_free(vpe_priv->dummy_input_param);
268
269 if (vpe_priv->dummy_stream)
270 vpe_free(vpe_priv->dummy_stream);
271
272 vpe_free(vpe_priv);
273
274 *vpe = NULL;
275 }
276
277 /*****************************************************************************************
278 * populate_bg_stream
279 * populate virtual stream for background output only
280 * struct vpe* vpe
281 * [input] vpe context
282 * const struct vpe_build_param* org_param
283 * [input] original parameter from caller
284 * struct struct vpe_stream_ctx* stream_ctx
285 * [input/output] caller provided vpe_stream_ctx struct to populate
286 *****************************************************************************************/
populate_bg_stream(struct vpe_priv * vpe_priv,const struct vpe_build_param * param,struct stream_ctx * stream_ctx)287 static enum vpe_status populate_bg_stream(struct vpe_priv *vpe_priv, const struct vpe_build_param *param, struct stream_ctx *stream_ctx)
288 {
289 struct vpe_surface_info *surface_info;
290 struct vpe_scaling_info *scaling_info;
291 struct vpe_scaling_filter_coeffs *polyphaseCoeffs;
292 struct vpe_stream *stream;
293
294 if (!param || !stream_ctx)
295 return VPE_STATUS_ERROR;
296
297 stream = &stream_ctx->stream;
298 stream_ctx->stream_type = VPE_STREAM_TYPE_BG_GEN;
299
300 // if output surface is too small, don't use it as dummy input
301 // request 2x2 instead of 1x1 for bpc safety
302 // as we are to treat output as input for RGB 1x1, need 4bytes at least
303 // but if output is YUV, bpc will be smaller and need larger dimension
304
305 if (param->dst_surface.plane_size.surface_size.width < VPE_MIN_VIEWPORT_SIZE ||
306 param->dst_surface.plane_size.surface_size.height < VPE_MIN_VIEWPORT_SIZE ||
307 param->dst_surface.plane_size.surface_pitch < 256 / 4 || // 256bytes, 4bpp
308 param->target_rect.width < VPE_MIN_VIEWPORT_SIZE ||
309 param->target_rect.height < VPE_MIN_VIEWPORT_SIZE) {
310 return VPE_STATUS_ERROR;
311 }
312
313 // set output surface as our dummy input
314 surface_info = &stream->surface_info;
315 scaling_info = &stream->scaling_info;
316 polyphaseCoeffs = &stream->polyphase_scaling_coeffs;
317 surface_info->address.type = param->dst_surface.address.type;
318 surface_info->address.tmz_surface = param->dst_surface.address.tmz_surface;
319 surface_info->address.grph.addr.quad_part =
320 param->dst_surface.address.grph.addr.quad_part;
321
322 surface_info->swizzle = param->dst_surface.swizzle; // treat it as linear for simple
323 surface_info->plane_size.surface_size.x = 0;
324 surface_info->plane_size.surface_size.y = 0;
325 // min width & height in pixels
326 surface_info->plane_size.surface_size.width = VPE_MIN_VIEWPORT_SIZE;
327 surface_info->plane_size.surface_size.height = VPE_MIN_VIEWPORT_SIZE;
328 surface_info->plane_size.surface_pitch = param->dst_surface.plane_size.surface_pitch;
329 surface_info->plane_size.surface_aligned_height = param->dst_surface.plane_size.surface_aligned_height;
330 surface_info->dcc.enable = false;
331 surface_info->format = param->dst_surface.format;
332 surface_info->cs.encoding = param->dst_surface.cs.encoding;
333 surface_info->cs.range = param->dst_surface.cs.range;
334 surface_info->cs.tf = param->dst_surface.cs.tf;
335 surface_info->cs.cositing = param->dst_surface.cs.cositing;
336 surface_info->cs.primaries = param->dst_surface.cs.primaries;
337 scaling_info->src_rect.x = 0;
338 scaling_info->src_rect.y = 0;
339 scaling_info->src_rect.width = VPE_MIN_VIEWPORT_SIZE;
340 scaling_info->src_rect.height = VPE_MIN_VIEWPORT_SIZE;
341 scaling_info->dst_rect.x = param->target_rect.x;
342 scaling_info->dst_rect.y = param->target_rect.y;
343 scaling_info->dst_rect.width = VPE_MIN_VIEWPORT_SIZE;
344 scaling_info->dst_rect.height = VPE_MIN_VIEWPORT_SIZE;
345 scaling_info->taps.v_taps = 4;
346 scaling_info->taps.h_taps = 4;
347 scaling_info->taps.v_taps_c = 2;
348 scaling_info->taps.h_taps_c = 2;
349
350 polyphaseCoeffs->taps = scaling_info->taps;
351 polyphaseCoeffs->nb_phases = 64;
352
353 stream->blend_info.blending = true;
354 stream->blend_info.pre_multiplied_alpha = false;
355 stream->blend_info.global_alpha = true; // hardcoded upon DAL request
356 stream->blend_info.global_alpha_value = 0; // transparent as we are dummy input
357
358 stream->color_adj.brightness = 0.0f;
359 stream->color_adj.contrast = 1.0f;
360 stream->color_adj.hue = 0.0f;
361 stream->color_adj.saturation = 1.0f;
362 stream->rotation = VPE_ROTATION_ANGLE_0;
363 stream->horizontal_mirror = false;
364 stream->vertical_mirror = false;
365 stream->enable_luma_key = false;
366 stream->lower_luma_bound = 0;
367 stream->upper_luma_bound = 0;
368 stream->flags.hdr_metadata = 0;
369 stream->flags.geometric_scaling = 0;
370 stream->use_external_scaling_coeffs = false;
371
372 return VPE_STATUS_OK;
373 }
374
get_required_virtual_stream_count(struct vpe_priv * vpe_priv,const struct vpe_build_param * param)375 static uint32_t get_required_virtual_stream_count(struct vpe_priv *vpe_priv, const struct vpe_build_param *param)
376 {
377 uint32_t result = 0;
378
379 // Check for zero-input background stream
380 // Normally we result++ instead of returning, but bg_color_fill_only removes other streams (and therefore other features)
381 if (param->num_streams == 0 || vpe_priv->init.debug.bg_color_fill_only)
382 return 1;
383
384 return result;
385 }
386
populate_input_streams(struct vpe_priv * vpe_priv,const struct vpe_build_param * param,struct stream_ctx * stream_ctx_base)387 static enum vpe_status populate_input_streams(struct vpe_priv *vpe_priv, const struct vpe_build_param *param, struct stream_ctx *stream_ctx_base)
388 {
389 enum vpe_status result = VPE_STATUS_OK;
390 uint32_t i;
391 struct stream_ctx* stream_ctx;
392 bool input_h_mirror, output_h_mirror;
393
394 vpe_priv->resource.check_h_mirror_support(&input_h_mirror, &output_h_mirror);
395
396 for (i = 0; i < vpe_priv->num_input_streams; i++) {
397 stream_ctx = &stream_ctx_base[i];
398 stream_ctx->stream_type = VPE_STREAM_TYPE_INPUT;
399 stream_ctx->stream_idx = (int32_t)i;
400
401 stream_ctx->per_pixel_alpha =
402 vpe_has_per_pixel_alpha(param->streams[i].surface_info.format);
403
404 if (vpe_priv->init.debug.bypass_per_pixel_alpha) {
405 stream_ctx->per_pixel_alpha = false;
406 } else if (param->streams[i].enable_luma_key) {
407 stream_ctx->per_pixel_alpha = true;
408 }
409 if (param->streams[i].horizontal_mirror && !input_h_mirror && output_h_mirror)
410 stream_ctx->flip_horizonal_output = true;
411 else
412 stream_ctx->flip_horizonal_output = false;
413
414 memcpy(&stream_ctx->stream, ¶m->streams[i], sizeof(struct vpe_stream));
415
416 /* if top-bottom blending is not supported,
417 * the 1st stream still can support blending with background,
418 * however, the 2nd stream and onward can't enable blending.
419 */
420 if (i && param->streams[i].blend_info.blending &&
421 !vpe_priv->pub.caps->color_caps.mpc.top_bottom_blending) {
422 result = VPE_STATUS_ALPHA_BLENDING_NOT_SUPPORTED;
423 break;
424 }
425 }
426
427 return result;
428 }
429
populate_virtual_streams(struct vpe_priv * vpe_priv,const struct vpe_build_param * param,struct stream_ctx * stream_ctx_base,uint32_t num_virtual_streams)430 static enum vpe_status populate_virtual_streams(struct vpe_priv* vpe_priv, const struct vpe_build_param* param, struct stream_ctx* stream_ctx_base, uint32_t num_virtual_streams)
431 {
432 enum vpe_status result = VPE_STATUS_OK;
433 uint32_t virtual_stream_idx = 0;
434 struct stream_ctx *stream_ctx;
435 bool input_h_mirror, output_h_mirror;
436
437 vpe_priv->resource.check_h_mirror_support(&input_h_mirror, &output_h_mirror);
438
439 // Background generation stream
440 if (param->num_streams == 0 || vpe_priv->init.debug.bg_color_fill_only) {
441 if (num_virtual_streams != 1)
442 result = VPE_STATUS_ERROR;
443 else
444 result = populate_bg_stream(vpe_priv, param, &stream_ctx_base[virtual_stream_idx++]);
445 }
446
447 if (result != VPE_STATUS_OK)
448 return result;
449
450 for (virtual_stream_idx = 0; virtual_stream_idx < num_virtual_streams; virtual_stream_idx++) {
451 stream_ctx = &stream_ctx_base[virtual_stream_idx];
452 stream_ctx->stream_idx = virtual_stream_idx + vpe_priv->num_input_streams;
453 stream_ctx->per_pixel_alpha =
454 vpe_has_per_pixel_alpha(stream_ctx->stream.surface_info.format);
455 if (vpe_priv->init.debug.bypass_per_pixel_alpha) {
456 stream_ctx->per_pixel_alpha = false;
457 }
458 if (stream_ctx->stream.horizontal_mirror && !input_h_mirror && output_h_mirror)
459 stream_ctx->flip_horizonal_output = true;
460 else
461 stream_ctx->flip_horizonal_output = false;
462 }
463
464 return result;
465 }
466
vpe_check_support(struct vpe * vpe,const struct vpe_build_param * param,struct vpe_bufs_req * req)467 enum vpe_status vpe_check_support(
468 struct vpe *vpe, const struct vpe_build_param *param, struct vpe_bufs_req *req)
469 {
470 struct vpe_priv *vpe_priv;
471 struct vpec *vpec;
472 struct dpp *dpp;
473 enum vpe_status status;
474 struct output_ctx *output_ctx = NULL;
475 uint32_t i, required_virtual_streams;
476
477 vpe_priv = container_of(vpe, struct vpe_priv, pub);
478 vpec = &vpe_priv->resource.vpec;
479 dpp = vpe_priv->resource.dpp[0];
480 status = VPE_STATUS_OK;
481
482 vpe_priv->collaboration_mode = param->collaboration_mode;
483 vpe_priv->vpe_num_instance = param->num_instances;
484 verify_collaboration_mode(vpe_priv);
485
486 required_virtual_streams = get_required_virtual_stream_count(vpe_priv, param);
487
488 if (!vpe_priv->stream_ctx ||
489 vpe_priv->num_streams != (param->num_streams + vpe_priv->num_virtual_streams) ||
490 vpe_priv->num_virtual_streams != required_virtual_streams) {
491 if (vpe_priv->stream_ctx)
492 vpe_free_stream_ctx(vpe_priv);
493
494 vpe_priv->stream_ctx = vpe_alloc_stream_ctx(vpe_priv, param->num_streams + required_virtual_streams);
495 }
496
497 if (!vpe_priv->stream_ctx)
498 status = VPE_STATUS_NO_MEMORY;
499 else {
500 vpe_priv->num_streams = param->num_streams + required_virtual_streams;
501 vpe_priv->num_virtual_streams = required_virtual_streams;
502 vpe_priv->num_input_streams = param->num_streams;
503 }
504
505 if (param->num_streams == 0 || vpe_priv->init.debug.bg_color_fill_only) {
506 if (!((vpe_priv->num_streams == 1) &&
507 (vpe_priv->num_virtual_streams == 1) &&
508 (vpe_priv->num_input_streams == 0))) {
509 vpe_free_stream_ctx(vpe_priv);
510 vpe_priv->stream_ctx = vpe_alloc_stream_ctx(vpe_priv, 1);
511 vpe_priv->num_streams = required_virtual_streams;
512 vpe_priv->num_virtual_streams = required_virtual_streams;
513 vpe_priv->num_input_streams = 0;
514 }
515
516 if (!vpe_priv->stream_ctx)
517 status = VPE_STATUS_NO_MEMORY;
518 }
519
520
521 if (status == VPE_STATUS_OK) {
522 // output checking - check per asic support
523 status = vpe_check_output_support(vpe, param);
524 if (status != VPE_STATUS_OK) {
525 vpe_log("fail output support check. status %d\n", (int)status);
526 }
527 }
528
529 if (status == VPE_STATUS_OK) {
530 // input checking - check per asic support
531 for (i = 0; i < param->num_streams; i++) {
532 status = vpe_check_input_support(vpe, ¶m->streams[i]);
533 if (status != VPE_STATUS_OK) {
534 vpe_log("fail input support check. status %d\n", (int)status);
535 break;
536 }
537 }
538 }
539
540 if (status == VPE_STATUS_OK) {
541 // input checking - check tone map support
542 for (i = 0; i < param->num_streams; i++) {
543 status = vpe_check_tone_map_support(vpe, ¶m->streams[i], param);
544 if (status != VPE_STATUS_OK) {
545 vpe_log("fail tone map support check. status %d\n", (int)status);
546 break;
547 }
548 }
549 }
550
551 if (status == VPE_STATUS_OK) {
552 // output resource preparation for further checking (cache the result)
553 output_ctx = &vpe_priv->output_ctx;
554 output_ctx->surface = param->dst_surface;
555 output_ctx->mpc_bg_color = param->bg_color;
556 output_ctx->opp_bg_color = param->bg_color;
557 output_ctx->target_rect = param->target_rect;
558 output_ctx->alpha_mode = param->alpha_mode;
559 output_ctx->flags.hdr_metadata = param->flags.hdr_metadata;
560 output_ctx->hdr_metadata = param->hdr_metadata;
561
562 vpe_vector_clear(vpe_priv->vpe_cmd_vector);
563 output_ctx->clamping_params = vpe_priv->init.debug.clamping_params;
564 }
565
566
567 if (status == VPE_STATUS_OK) {
568 // blending support check
569 status = populate_input_streams(vpe_priv, param, vpe_priv->stream_ctx);
570 if (status != VPE_STATUS_OK)
571 vpe_log("fail input stream population. status %d\n", (int)status);
572 }
573
574 if (status == VPE_STATUS_OK) {
575 status = populate_virtual_streams(vpe_priv, param, vpe_priv->stream_ctx + vpe_priv->num_input_streams, vpe_priv->num_virtual_streams);
576 if (status != VPE_STATUS_OK)
577 vpe_log("fail virtual stream population. status %d\n", (int)status);
578 }
579
580 if (status == VPE_STATUS_OK) {
581 status = vpe_priv->resource.calculate_segments(vpe_priv, param);
582 if (status != VPE_STATUS_OK)
583 vpe_log("failed in calculate segments %d\n", (int)status);
584 }
585
586 if (status == VPE_STATUS_OK) {
587 // if the bg_color support is false, there is a flag to verify if the bg_color falls in the
588 // output gamut
589 if (!vpe_priv->pub.caps->bg_color_check_support) {
590 status = vpe_priv->resource.check_bg_color_support(vpe_priv, &output_ctx->mpc_bg_color);
591 if (status != VPE_STATUS_OK) {
592 vpe_log(
593 "failed in checking the background color versus the output color space %d\n",
594 (int)status);
595 }
596 }
597 }
598
599 if (status == VPE_STATUS_OK) {
600 // Calculate the buffer needed (worst case)
601 vpe_priv->resource.get_bufs_req(vpe_priv, &vpe_priv->bufs_required);
602 *req = vpe_priv->bufs_required;
603 vpe_priv->ops_support = true;
604 }
605
606 if (status == VPE_STATUS_OK) {
607 status = vpe_validate_geometric_scaling_support(param);
608 }
609
610 if (vpe_priv->init.debug.assert_when_not_support)
611 VPE_ASSERT(status == VPE_STATUS_OK);
612
613 vpe_event(VPE_EVENT_CHECK_SUPPORT, vpe_priv->num_streams, param->target_rect.width,
614 param->target_rect.height, status);
615
616 return status;
617 }
618
vpe_build_noops(struct vpe * vpe,uint32_t num_dword,uint32_t ** ppcmd_space)619 enum vpe_status vpe_build_noops(struct vpe *vpe, uint32_t num_dword, uint32_t **ppcmd_space)
620 {
621 struct vpe_priv *vpe_priv;
622 struct cmd_builder *builder;
623 enum vpe_status status;
624
625 if (!vpe || !ppcmd_space || ((*ppcmd_space) == NULL))
626 return VPE_STATUS_ERROR;
627
628 vpe_priv = container_of(vpe, struct vpe_priv, pub);
629
630 builder = &vpe_priv->resource.cmd_builder;
631
632 status = builder->build_noops(vpe_priv, ppcmd_space, num_dword);
633
634 return status;
635 }
636
validate_cached_param(struct vpe_priv * vpe_priv,const struct vpe_build_param * param)637 static bool validate_cached_param(struct vpe_priv *vpe_priv, const struct vpe_build_param *param)
638 {
639 uint32_t i;
640 struct output_ctx *output_ctx;
641
642 if (vpe_priv->num_input_streams != param->num_streams &&
643 !(vpe_priv->init.debug.bg_color_fill_only == true && vpe_priv->num_streams == 1))
644 return false;
645
646 if (vpe_priv->collaboration_mode != param->collaboration_mode)
647 return false;
648
649 if (param->num_instances > 0 && vpe_priv->vpe_num_instance != param->num_instances)
650 return false;
651
652 for (i = 0; i < vpe_priv->num_input_streams; i++) {
653 struct vpe_stream stream = param->streams[i];
654
655 vpe_clip_stream(
656 &stream.scaling_info.src_rect, &stream.scaling_info.dst_rect, ¶m->target_rect);
657
658 if (memcmp(&vpe_priv->stream_ctx[i].stream, &stream, sizeof(struct vpe_stream)))
659 return false;
660 }
661
662 output_ctx = &vpe_priv->output_ctx;
663 if (output_ctx->alpha_mode != param->alpha_mode)
664 return false;
665
666 if (memcmp(&output_ctx->mpc_bg_color, ¶m->bg_color, sizeof(struct vpe_color)))
667 return false;
668
669 if (memcmp(&output_ctx->opp_bg_color, ¶m->bg_color, sizeof(struct vpe_color)))
670 return false;
671
672 if (memcmp(&output_ctx->target_rect, ¶m->target_rect, sizeof(struct vpe_rect)))
673 return false;
674
675 if (memcmp(&output_ctx->surface, ¶m->dst_surface, sizeof(struct vpe_surface_info)))
676 return false;
677
678 return true;
679 }
680
vpe_build_commands(struct vpe * vpe,const struct vpe_build_param * param,struct vpe_build_bufs * bufs)681 enum vpe_status vpe_build_commands(
682 struct vpe *vpe, const struct vpe_build_param *param, struct vpe_build_bufs *bufs)
683 {
684 struct vpe_priv *vpe_priv;
685 struct cmd_builder *builder;
686 enum vpe_status status = VPE_STATUS_OK;
687 uint32_t cmd_idx, pipe_idx, stream_idx, cmd_type_idx;
688 struct vpe_build_bufs curr_bufs;
689 int64_t cmd_buf_size;
690 int64_t emb_buf_size;
691 uint64_t cmd_buf_gpu_a, cmd_buf_cpu_a;
692 uint64_t emb_buf_gpu_a, emb_buf_cpu_a;
693 struct vpe_vector *config_vector;
694 struct vpe_cmd_info *cmd_info;
695
696 if (!vpe || !param || !bufs)
697 return VPE_STATUS_ERROR;
698
699 vpe_priv = container_of(vpe, struct vpe_priv, pub);
700
701 if (!vpe_priv->ops_support) {
702 VPE_ASSERT(vpe_priv->ops_support);
703 status = VPE_STATUS_NOT_SUPPORTED;
704 }
705
706 if (status == VPE_STATUS_OK) {
707 if (!validate_cached_param(vpe_priv, param)) {
708 status = VPE_STATUS_PARAM_CHECK_ERROR;
709 }
710 }
711
712 if (status == VPE_STATUS_OK) {
713 vpe_geometric_scaling_feature_skip(vpe_priv, param);
714
715 if (bufs->cmd_buf.size == 0 || bufs->emb_buf.size == 0) {
716 /* Here we directly return without setting ops_support to false
717 * becaues the supported check is already passed
718 * and the caller can come again with correct buffer size.
719 */
720 bufs->cmd_buf.size = vpe_priv->bufs_required.cmd_buf_size;
721 bufs->emb_buf.size = vpe_priv->bufs_required.emb_buf_size;
722
723 return VPE_STATUS_OK;
724 } else if ((bufs->cmd_buf.size < vpe_priv->bufs_required.cmd_buf_size) ||
725 (bufs->emb_buf.size < vpe_priv->bufs_required.emb_buf_size)) {
726 status = VPE_STATUS_INVALID_BUFFER_SIZE;
727 }
728 }
729
730 builder = &vpe_priv->resource.cmd_builder;
731
732 // store buffers original values
733 cmd_buf_cpu_a = bufs->cmd_buf.cpu_va;
734 cmd_buf_gpu_a = bufs->cmd_buf.gpu_va;
735 cmd_buf_size = bufs->cmd_buf.size;
736
737 emb_buf_cpu_a = bufs->emb_buf.cpu_va;
738 emb_buf_gpu_a = bufs->emb_buf.gpu_va;
739 emb_buf_size = bufs->emb_buf.size;
740
741 // curr_bufs is used for tracking the built size and next pointers
742 curr_bufs = *bufs;
743
744 // copy the param, reset saved configs
745 for (stream_idx = 0; stream_idx < vpe_priv->num_streams; stream_idx++) {
746 struct stream_ctx *stream_ctx = &vpe_priv->stream_ctx[stream_idx];
747
748 for (pipe_idx = 0; pipe_idx < MAX_INPUT_PIPE; pipe_idx++) {
749 config_vector = stream_ctx->configs[pipe_idx];
750 if (config_vector)
751 vpe_vector_clear(config_vector);
752
753 for (cmd_type_idx = 0; cmd_type_idx < VPE_CMD_TYPE_COUNT; cmd_type_idx++) {
754 config_vector = stream_ctx->stream_op_configs[pipe_idx][cmd_type_idx];
755 if (config_vector)
756 vpe_vector_clear(config_vector);
757 }
758 }
759 }
760
761 for (pipe_idx = 0; pipe_idx < vpe_priv->pub.caps->resource_caps.num_cdc_be; pipe_idx++) {
762 config_vector = vpe_priv->output_ctx.configs[pipe_idx];
763 if (config_vector)
764 vpe_vector_clear(config_vector);
765 }
766
767 // Reset pipes
768 vpe_pipe_reset(vpe_priv);
769
770 if (status == VPE_STATUS_OK) {
771 status = vpe_color_update_color_space_and_tf(vpe_priv, param);
772 if (status != VPE_STATUS_OK) {
773 vpe_log("failed in updating color space and tf %d\n", (int)status);
774 }
775 }
776
777 if (status == VPE_STATUS_OK) {
778 status = vpe_color_update_movable_cm(vpe_priv, param);
779 if (status != VPE_STATUS_OK) {
780 vpe_log("failed in updating movable 3d lut unit %d\n", (int)status);
781 }
782 }
783
784 if (status == VPE_STATUS_OK) {
785 status = vpe_color_update_whitepoint(vpe_priv, param);
786 if (status != VPE_STATUS_OK) {
787 vpe_log("failed updating whitepoint gain %d\n", (int)status);
788 }
789 }
790
791 if (status == VPE_STATUS_OK) {
792 /* since the background is generated by the first stream,
793 * the 3dlut enablement for the background color conversion
794 * is used based on the information of the first stream.
795 */
796 vpe_bg_color_convert(vpe_priv->output_ctx.cs, vpe_priv->output_ctx.output_tf,
797 vpe_priv->output_ctx.surface.format, &vpe_priv->output_ctx.mpc_bg_color,
798 &vpe_priv->output_ctx.opp_bg_color, vpe_priv->stream_ctx[0].enable_3dlut);
799
800 if (vpe_priv->collaboration_mode == true) {
801 status = builder->build_collaborate_sync_cmd(vpe_priv, &curr_bufs);
802 if (status != VPE_STATUS_OK) {
803 vpe_log("failed in building collaborate sync cmd %d\n", (int)status);
804 }
805 }
806 for (cmd_idx = 0; cmd_idx < vpe_priv->vpe_cmd_vector->num_elements; cmd_idx++) {
807 status = builder->build_vpe_cmd(vpe_priv, &curr_bufs, cmd_idx);
808 if (status != VPE_STATUS_OK) {
809 vpe_log("failed in building vpe cmd %d\n", (int)status);
810 break;
811 }
812
813 cmd_info = vpe_vector_get(vpe_priv->vpe_cmd_vector, cmd_idx);
814 if (cmd_info == NULL) {
815 status = VPE_STATUS_ERROR;
816 break;
817 }
818
819 if ((vpe_priv->collaboration_mode == true) && (cmd_info->insert_end_csync == true)) {
820 status = builder->build_collaborate_sync_cmd(vpe_priv, &curr_bufs);
821 if (status != VPE_STATUS_OK) {
822 vpe_log("failed in building collaborate sync cmd %d\n", (int)status);
823 break;
824 }
825
826 // Add next collaborate sync start command when this vpe_cmd isn't the final one.
827 if (cmd_idx < (uint32_t)(vpe_priv->vpe_cmd_vector->num_elements - 1)) {
828 status = builder->build_collaborate_sync_cmd(vpe_priv, &curr_bufs);
829 if (status != VPE_STATUS_OK) {
830 vpe_log("failed in building collaborate sync cmd %d\n", (int)status);
831 break;
832 }
833 }
834 }
835 }
836 if ((status == VPE_STATUS_OK) && (vpe_priv->collaboration_mode == true)) {
837 status = builder->build_collaborate_sync_cmd(vpe_priv, &curr_bufs);
838 if (status != VPE_STATUS_OK) {
839 vpe_log("failed in building collaborate sync cmd %d\n", (int)status);
840 }
841 }
842 }
843
844 if (status == VPE_STATUS_OK) {
845 bufs->cmd_buf.size = cmd_buf_size - curr_bufs.cmd_buf.size; // used cmd buffer size
846 bufs->cmd_buf.gpu_va = cmd_buf_gpu_a;
847 bufs->cmd_buf.cpu_va = cmd_buf_cpu_a;
848
849 bufs->emb_buf.size = emb_buf_size - curr_bufs.emb_buf.size; // used emb buffer size
850 bufs->emb_buf.gpu_va = emb_buf_gpu_a;
851 bufs->emb_buf.cpu_va = emb_buf_cpu_a;
852 }
853
854 vpe_priv->ops_support = false;
855
856 if (vpe_priv->init.debug.assert_when_not_support)
857 VPE_ASSERT(status == VPE_STATUS_OK);
858
859 return status;
860 }
861
vpe_get_optimal_num_of_taps(struct vpe * vpe,struct vpe_scaling_info * scaling_info)862 void vpe_get_optimal_num_of_taps(struct vpe *vpe, struct vpe_scaling_info *scaling_info)
863 {
864 struct vpe_priv *vpe_priv;
865 struct dpp *dpp;
866
867 vpe_priv = container_of(vpe, struct vpe_priv, pub);
868 dpp = vpe_priv->resource.dpp[0];
869
870 dpp->funcs->get_optimal_number_of_taps(
871 &scaling_info->src_rect, &scaling_info->dst_rect, &scaling_info->taps);
872 }
873