• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2022 Advanced Micro Devices, Inc.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a
4  * copy of this software and associated documentation files (the "Software"),
5  * to deal in the Software without restriction, including without limitation
6  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7  * and/or sell copies of the Software, and to permit persons to whom the
8  * Software is furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
17  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
19  * OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * Authors: AMD
22  *
23  */
24 #include <math.h>
25 #include "vpe_types.h"
26 #include "vpe_priv.h"
27 #include "vpe_version.h"
28 #include "common.h"
29 
30 #ifdef VPE_BUILD_1_0
31 #include "vpe10_resource.h"
32 #endif
33 
34 static const struct vpe_debug_options debug_defaults = {
35     .flags                   = {0},
36     .cm_in_bypass            = 0,
37     .vpcnvc_bypass           = 0,
38     .mpc_bypass              = 0,
39     .identity_3dlut          = 0,
40     .sce_3dlut               = 0,
41     .disable_reuse_bit       = 0,
42     .bg_bit_depth            = 0,
43     .bypass_gamcor           = 0,
44     .bypass_ogam             = 0,
45     .bypass_dpp_gamut_remap  = 0,
46     .bypass_post_csc         = 0,
47     .bg_color_fill_only      = 0,
48     .assert_when_not_support = 0,
49     .enable_mem_low_power =
50         {
51             .bits =
52                 {
53                     .cm   = false,
54                     .dscl = false,
55                     .mpc  = false,
56                 },
57         },
58     .force_tf_calculation                    = 1,
59     .expansion_mode                          = 1,
60     .clamping_setting                        = 1,
61     .clamping_params =
62     	{
63             .r_clamp_component_lower         = 0x1000,
64             .g_clamp_component_lower         = 0x1000,
65             .b_clamp_component_lower         = 0x1000,
66             .r_clamp_component_upper         = 0xEB00,
67             .g_clamp_component_upper         = 0xEB00,
68             .b_clamp_component_upper         = 0xEB00,
69             .clamping_range                  = 4,
70 	},
71     .bypass_per_pixel_alpha                  = 0,
72     .opp_pipe_crc_ctrl                       = 0,
73     .dpp_crc_ctrl                            = 0,
74     .mpc_crc_ctrl                            = 0,
75     .visual_confirm_params                   = {{{0}}},
76     .skip_optimal_tap_check                  = 0,
77 };
78 
vpe_resource_parse_ip_version(uint8_t major,uint8_t minor,uint8_t rev_id)79 enum vpe_ip_level vpe_resource_parse_ip_version(
80     uint8_t major, uint8_t minor, uint8_t rev_id)
81 {
82     enum vpe_ip_level ip_level = VPE_IP_LEVEL_UNKNOWN;
83     switch (VPE_VERSION(major, minor, rev_id)) {
84 #if VPE_BUILD_1_X
85 #if VPE_BUILD_1_0
86     case VPE_VERSION(6, 1, 0):
87         ip_level = VPE_IP_LEVEL_1_0;
88         break;
89 #endif
90 #endif
91     default:
92         ip_level = VPE_IP_LEVEL_UNKNOWN;
93         break;
94     }
95     return ip_level;
96 }
97 
vpe_construct_resource(struct vpe_priv * vpe_priv,enum vpe_ip_level level,struct resource * res)98 enum vpe_status vpe_construct_resource(
99     struct vpe_priv *vpe_priv, enum vpe_ip_level level, struct resource *res)
100 {
101     enum vpe_status status = VPE_STATUS_OK;
102     switch (level) {
103 #ifdef VPE_BUILD_1_0
104     case VPE_IP_LEVEL_1_0:
105         status = vpe10_construct_resource(vpe_priv, res);
106         break;
107 #endif
108     default:
109         status = VPE_STATUS_NOT_SUPPORTED;
110         vpe_log("invalid ip level: %d", (int)level);
111         break;
112     }
113 
114     vpe_priv->init.debug     = debug_defaults;
115     vpe_priv->expansion_mode = vpe_priv->init.debug.expansion_mode;
116     if (res)
117         res->vpe_priv = vpe_priv;
118 
119     return status;
120 }
121 
vpe_destroy_resource(struct vpe_priv * vpe_priv,struct resource * res)122 void vpe_destroy_resource(struct vpe_priv *vpe_priv, struct resource *res)
123 {
124     switch (vpe_priv->pub.level) {
125 #ifdef VPE_BUILD_1_0
126     case VPE_IP_LEVEL_1_0:
127         vpe10_destroy_resource(vpe_priv, res);
128         break;
129 #endif
130     default:
131         break;
132     }
133 }
134 
vpe_alloc_segment_ctx(struct vpe_priv * vpe_priv,uint16_t num_segments)135 struct segment_ctx *vpe_alloc_segment_ctx(struct vpe_priv *vpe_priv, uint16_t num_segments)
136 {
137     struct segment_ctx *segment_ctx_base;
138 
139     segment_ctx_base = (struct segment_ctx *)vpe_zalloc(sizeof(struct segment_ctx) * num_segments);
140 
141     if (!segment_ctx_base)
142         return NULL;
143 
144     return segment_ctx_base;
145 }
146 
vpe_alloc_stream_ctx(struct vpe_priv * vpe_priv,uint32_t num_streams)147 struct stream_ctx *vpe_alloc_stream_ctx(struct vpe_priv *vpe_priv, uint32_t num_streams)
148 {
149     struct stream_ctx *ctx_base, *ctx;
150     uint32_t           i;
151 
152     ctx_base = (struct stream_ctx *)vpe_zalloc(sizeof(struct stream_ctx) * num_streams);
153     if (!ctx_base)
154         return NULL;
155 
156     for (i = 0; i < num_streams; i++) {
157         ctx           = &ctx_base[i];
158         ctx->cs       = COLOR_SPACE_UNKNOWN;
159         ctx->tf       = TRANSFER_FUNC_UNKNOWN;
160         ctx->vpe_priv = vpe_priv;
161         vpe_color_set_adjustments_to_default(&ctx->color_adjustments);
162         ctx->tf_scaling_factor = vpe_fixpt_one;
163         ctx->stream.flags.geometric_scaling = 0;
164         ctx->stream.tm_params.UID = 0;
165         ctx->UID_3DLUT = 0;
166     }
167 
168     return ctx_base;
169 }
170 
vpe_free_stream_ctx(struct vpe_priv * vpe_priv)171 void vpe_free_stream_ctx(struct vpe_priv *vpe_priv)
172 {
173     uint16_t           i;
174     struct stream_ctx *ctx;
175 
176     if (!vpe_priv->stream_ctx || !vpe_priv->num_streams)
177         return;
178 
179     for (i = 0; i < vpe_priv->num_streams; i++) {
180         ctx = &vpe_priv->stream_ctx[i];
181         if (ctx->input_tf) {
182             vpe_free(ctx->input_tf);
183             ctx->input_tf = NULL;
184         }
185 
186         if (ctx->bias_scale) {
187             vpe_free(ctx->bias_scale);
188             ctx->bias_scale = NULL;
189         }
190 
191         if (ctx->input_cs) {
192             vpe_free(ctx->input_cs);
193             ctx->input_cs = NULL;
194         }
195 
196         if (ctx->gamut_remap) {
197             vpe_free(ctx->gamut_remap);
198             ctx->gamut_remap = NULL;
199         }
200 
201         if (ctx->in_shaper_func) {
202             vpe_free(ctx->in_shaper_func);
203             ctx->in_shaper_func = NULL;
204         }
205 
206         if (ctx->blend_tf) {
207             vpe_free(ctx->blend_tf);
208             ctx->blend_tf = NULL;
209         }
210 
211         if (ctx->lut3d_func) {
212             vpe_free(ctx->lut3d_func);
213             ctx->lut3d_func = NULL;
214         }
215 
216         if (ctx->segment_ctx) {
217             vpe_free(ctx->segment_ctx);
218             ctx->segment_ctx = NULL;
219         }
220     }
221     vpe_free(vpe_priv->stream_ctx);
222     vpe_priv->stream_ctx  = NULL;
223     vpe_priv->num_streams = 0;
224 }
225 
vpe_free_output_ctx(struct vpe_priv * vpe_priv)226 void vpe_free_output_ctx(struct vpe_priv *vpe_priv)
227 {
228     if (vpe_priv->output_ctx.gamut_remap)
229         vpe_free(vpe_priv->output_ctx.gamut_remap);
230 
231     if (vpe_priv->output_ctx.output_tf)
232         vpe_free(vpe_priv->output_ctx.output_tf);
233 }
234 
vpe_pipe_reset(struct vpe_priv * vpe_priv)235 void vpe_pipe_reset(struct vpe_priv *vpe_priv)
236 {
237     int              i;
238     struct pipe_ctx *pipe_ctx;
239 
240     for (i = 0; i < vpe_priv->num_pipe; i++) {
241         pipe_ctx               = &vpe_priv->pipe_ctx[i];
242         pipe_ctx->is_top_pipe  = true;
243         pipe_ctx->owner        = PIPE_CTX_NO_OWNER;
244         pipe_ctx->top_pipe_idx = 0xff;
245     }
246 }
247 
vpe_pipe_reclaim(struct vpe_priv * vpe_priv,struct vpe_cmd_info * cmd_info)248 void vpe_pipe_reclaim(struct vpe_priv *vpe_priv, struct vpe_cmd_info *cmd_info)
249 {
250     int              i, j;
251     struct pipe_ctx *pipe_ctx;
252 
253     for (i = 0; i < vpe_priv->num_pipe; i++) {
254         pipe_ctx = &vpe_priv->pipe_ctx[i];
255         if (pipe_ctx->owner != PIPE_CTX_NO_OWNER) {
256             for (j = 0; j < cmd_info->num_inputs; j++)
257                 if (pipe_ctx->owner == cmd_info->inputs[j].stream_idx)
258                     break;
259 
260             if (j == cmd_info->num_inputs) {
261                 // that stream no longer exists
262                 pipe_ctx->is_top_pipe  = true;
263                 pipe_ctx->owner        = PIPE_CTX_NO_OWNER;
264                 pipe_ctx->top_pipe_idx = 0xff;
265             }
266         }
267     }
268 }
269 
vpe_pipe_find_owner(struct vpe_priv * vpe_priv,uint32_t stream_idx,bool * reuse)270 struct pipe_ctx *vpe_pipe_find_owner(struct vpe_priv *vpe_priv, uint32_t stream_idx, bool *reuse)
271 {
272     int              i;
273     struct pipe_ctx *pipe_ctx;
274     struct pipe_ctx *free_pipe = NULL;
275 
276     for (i = 0; i < vpe_priv->num_pipe; i++) {
277         pipe_ctx = &vpe_priv->pipe_ctx[i];
278 
279         if (!free_pipe && (pipe_ctx->owner == PIPE_CTX_NO_OWNER))
280             free_pipe = pipe_ctx;
281         // re-use the same pipe
282         else if (pipe_ctx->owner == stream_idx) {
283             *reuse = true;
284             return pipe_ctx;
285         }
286     }
287 
288     if (free_pipe) {
289         free_pipe->owner = stream_idx;
290     }
291     *reuse = false;
292     return free_pipe;
293 }
294 
calculate_recout(struct segment_ctx * segment)295 static void calculate_recout(struct segment_ctx *segment)
296 {
297     struct stream_ctx  *stream_ctx = segment->stream_ctx;
298     struct scaler_data *data       = &segment->scaler_data;
299     struct vpe_rect    *dst_rect;
300     int32_t             split_count, split_idx;
301 
302     dst_rect = &stream_ctx->stream.scaling_info.dst_rect;
303 
304     split_count = stream_ctx->num_segments - 1;
305     split_idx   = segment->segment_idx;
306 
307     // src & dst rect has been clipped earlier
308     data->recout.x      = 0;
309     data->recout.y      = 0;
310     data->recout.width  = dst_rect->width;
311     data->recout.height = dst_rect->height;
312 
313     if (split_count) {
314         /* extra pixels in the division remainder need to go to pipes after
315          * the extra pixel index minus one(epimo) defined here as:
316          */
317         int32_t epimo = split_count - (int32_t)data->recout.width % (split_count + 1);
318 
319         data->recout.x += ((int32_t)data->recout.width / (split_count + 1)) * split_idx;
320         if (split_idx > epimo)
321             data->recout.x += split_idx - epimo - 1;
322 
323         data->recout.width =
324             data->recout.width / (uint32_t)(split_count + 1) + (split_idx > epimo ? 1 : 0);
325     }
326 }
327 
calculate_scaling_ratios(struct scaler_data * scl_data,struct vpe_rect * src_rect,struct vpe_rect * dst_rect,enum vpe_surface_pixel_format format)328 void calculate_scaling_ratios(struct scaler_data *scl_data, struct vpe_rect *src_rect,
329     struct vpe_rect *dst_rect, enum vpe_surface_pixel_format format)
330 {
331     // no rotation support
332 
333     scl_data->ratios.horz   = vpe_fixpt_from_fraction(src_rect->width, dst_rect->width);
334     scl_data->ratios.vert   = vpe_fixpt_from_fraction(src_rect->height, dst_rect->height);
335     scl_data->ratios.horz_c = scl_data->ratios.horz;
336     scl_data->ratios.vert_c = scl_data->ratios.vert;
337 
338     if (vpe_is_yuv420(format)) {
339         scl_data->ratios.horz_c.value /= 2;
340         scl_data->ratios.vert_c.value /= 2;
341     }
342 
343     scl_data->ratios.horz   = vpe_fixpt_truncate(scl_data->ratios.horz, 19);
344     scl_data->ratios.vert   = vpe_fixpt_truncate(scl_data->ratios.vert, 19);
345     scl_data->ratios.horz_c = vpe_fixpt_truncate(scl_data->ratios.horz_c, 19);
346     scl_data->ratios.vert_c = vpe_fixpt_truncate(scl_data->ratios.vert_c, 19);
347 }
348 
349 /*
350  * This is a preliminary vp size calculation to allow us to check taps support.
351  * The result is completely overridden afterwards.
352  */
calculate_viewport_size(struct segment_ctx * segment_ctx)353 static void calculate_viewport_size(struct segment_ctx *segment_ctx)
354 {
355     struct scaler_data *data = &segment_ctx->scaler_data;
356 
357     data->viewport.width =
358         (uint32_t)vpe_fixpt_ceil(vpe_fixpt_mul_int(data->ratios.horz, (int)data->recout.width));
359     data->viewport.height =
360         (uint32_t)vpe_fixpt_ceil(vpe_fixpt_mul_int(data->ratios.vert, (int)data->recout.height));
361     data->viewport_c.width =
362         (uint32_t)vpe_fixpt_ceil(vpe_fixpt_mul_int(data->ratios.horz_c, (int)data->recout.width));
363     data->viewport_c.height =
364         (uint32_t)vpe_fixpt_ceil(vpe_fixpt_mul_int(data->ratios.vert_c, (int)data->recout.height));
365 }
366 
367 /*
368  * We completely calculate vp offset, size and inits here based entirely on scaling
369  * ratios and recout for pixel perfect pipe combine.
370  */
calculate_init_and_vp(bool flip_scan_dir,int32_t recout_offset,uint32_t recout_size,uint32_t src_size,uint32_t taps,struct fixed31_32 ratio,struct fixed31_32 init_adj,struct fixed31_32 * init,int32_t * vp_offset,uint32_t * vp_size)371 static void calculate_init_and_vp(bool flip_scan_dir, int32_t recout_offset, uint32_t recout_size,
372     uint32_t src_size, uint32_t taps, struct fixed31_32 ratio, struct fixed31_32 init_adj,
373     struct fixed31_32 *init, int32_t *vp_offset, uint32_t *vp_size)
374 {
375 
376     struct fixed31_32 src_offset, temp;
377     int32_t           int_part;
378 
379     /*
380      * First of the taps starts sampling pixel number <init_int_part> corresponding to recout
381      * pixel 1. Next recout pixel samples int part of <init + scaling ratio> and so on.
382      * All following calculations are based on this logic.
383      */
384     src_offset = vpe_fixpt_mul_int(ratio, recout_offset);
385     *vp_offset = vpe_fixpt_floor(src_offset);
386 
387     // calculate the phase
388     init->value = src_offset.value & 0xffffffff; // for phase accumulation
389     *init       = vpe_fixpt_add(*init, init_adj);
390     int_part    = vpe_fixpt_floor(vpe_fixpt_from_fraction(taps, 2)) +
391                1; // middle point of the sampling window
392     *init = vpe_fixpt_add_int(*init, int_part);
393     *init = vpe_fixpt_truncate(*init, 19);
394     /*
395      * If there are more pixels on the left hand side (top for vertical scaling) of the
396      * sampling point which can be covered by the taps, init value needs go get increased
397      * to be able to buffer the pixels as much as taps.
398      */
399     if (int_part < (int32_t)taps) {
400         int32_t left = (int32_t)taps - int_part;
401         if (left > *vp_offset)
402             left = *vp_offset;
403         *vp_offset -= left;
404         *init = vpe_fixpt_add_int(*init, left);
405     }
406     /*
407      * If taps are sampling outside of viewport at end of recout and there are more pixels
408      * available in the surface we should increase the viewport size, regardless set vp to
409      * only what is used.
410      */
411     temp     = vpe_fixpt_add(*init, vpe_fixpt_mul_int(ratio, (int)(recout_size - 1)));
412     *vp_size = (uint32_t)vpe_fixpt_floor(temp);
413     if ((uint32_t)((int32_t)*vp_size + *vp_offset) > src_size)
414         *vp_size = (uint32_t)((int32_t)src_size - *vp_offset);
415     /* We did all the math assuming we are scanning same direction as display does,
416      * however mirror/rotation changes how vp scans vs how it is offset. If scan direction
417      * is flipped we simply need to calculate offset from the other side of plane.
418      * Note that outside of viewport all scaling hardware works in recout space.
419      */
420     if (flip_scan_dir)
421         *vp_offset = (int32_t)src_size - *vp_offset - (int32_t)*vp_size;
422 }
423 
get_vp_scan_direction(enum vpe_rotation_angle rotation,bool horizontal_mirror,bool * orthogonal_rotation,bool * flip_vert_scan_dir,bool * flip_horz_scan_dir)424 static inline void get_vp_scan_direction(enum vpe_rotation_angle rotation, bool horizontal_mirror,
425     bool *orthogonal_rotation, bool *flip_vert_scan_dir, bool *flip_horz_scan_dir)
426 {
427     *orthogonal_rotation = false;
428     *flip_vert_scan_dir  = false;
429     *flip_horz_scan_dir  = false;
430     if (rotation == VPE_ROTATION_ANGLE_180) {
431         *flip_vert_scan_dir = true;
432         *flip_horz_scan_dir = true;
433     } else if (rotation == VPE_ROTATION_ANGLE_90) {
434         *orthogonal_rotation = true;
435         *flip_horz_scan_dir  = true;
436     } else if (rotation == VPE_ROTATION_ANGLE_270) {
437         *orthogonal_rotation = true;
438         *flip_vert_scan_dir  = true;
439     }
440 
441     if (horizontal_mirror)
442         *flip_horz_scan_dir = !*flip_horz_scan_dir;
443 }
444 
calculate_inits_and_viewports(struct segment_ctx * segment_ctx)445 static enum vpe_status calculate_inits_and_viewports(struct segment_ctx *segment_ctx)
446 {
447     struct stream_ctx       *stream_ctx   = segment_ctx->stream_ctx;
448     struct vpe_surface_info *surface_info = &stream_ctx->stream.surface_info;
449     struct vpe_rect          src_rect     = stream_ctx->stream.scaling_info.src_rect;
450     struct vpe_rect         *dst_rect     = &stream_ctx->stream.scaling_info.dst_rect;
451     struct scaler_data      *data         = &segment_ctx->scaler_data;
452     uint32_t                 vpc_div      = vpe_is_yuv420(data->format) ? 2 : 1;
453     bool                     orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
454     struct fixed31_32        init_adj_h = vpe_fixpt_zero;
455     struct fixed31_32        init_adj_v = vpe_fixpt_zero;
456 
457     get_vp_scan_direction(stream_ctx->stream.rotation, stream_ctx->stream.horizontal_mirror,
458         &orthogonal_rotation, &flip_vert_scan_dir, &flip_horz_scan_dir);
459 
460     if (orthogonal_rotation) {
461         swap(src_rect.width, src_rect.height);
462         swap(flip_vert_scan_dir, flip_horz_scan_dir);
463     }
464 
465     if (flip_horz_scan_dir) {
466         if (stream_ctx->flip_horizonal_output)
467             // flip at the output instead
468             flip_horz_scan_dir = false;
469     }
470 
471     if (vpe_is_yuv420(data->format)) {
472         int sign = -1; // this gives the direction of the cositing (negative will move left, right
473                        // otherwise)
474         switch (surface_info->cs.cositing) {
475 
476         case VPE_CHROMA_COSITING_LEFT:
477             init_adj_h = vpe_fixpt_zero;
478             init_adj_v = vpe_fixpt_from_fraction(sign, 4);
479             break;
480         case VPE_CHROMA_COSITING_NONE:
481             init_adj_h = vpe_fixpt_from_fraction(sign, 4);
482             init_adj_v = vpe_fixpt_from_fraction(sign, 4);
483             break;
484         case VPE_CHROMA_COSITING_TOPLEFT:
485         default:
486             init_adj_h = vpe_fixpt_zero;
487             init_adj_v = vpe_fixpt_zero;
488             break;
489         }
490     }
491 
492     calculate_init_and_vp(flip_horz_scan_dir, data->recout.x, data->recout.width, src_rect.width,
493         data->taps.h_taps, data->ratios.horz, vpe_fixpt_zero, &data->inits.h, &data->viewport.x,
494         &data->viewport.width);
495     calculate_init_and_vp(flip_horz_scan_dir, data->recout.x, data->recout.width,
496         src_rect.width / vpc_div, data->taps.h_taps_c, data->ratios.horz_c, init_adj_h,
497         &data->inits.h_c, &data->viewport_c.x, &data->viewport_c.width);
498     calculate_init_and_vp(flip_vert_scan_dir, data->recout.y, data->recout.height, src_rect.height,
499         data->taps.v_taps, data->ratios.vert, vpe_fixpt_zero, &data->inits.v, &data->viewport.y,
500         &data->viewport.height);
501     calculate_init_and_vp(flip_vert_scan_dir, data->recout.y, data->recout.height,
502         src_rect.height / vpc_div, data->taps.v_taps_c, data->ratios.vert_c, init_adj_v,
503         &data->inits.v_c, &data->viewport_c.y, &data->viewport_c.height);
504 
505     // convert to absolute address
506     data->viewport.x += src_rect.x;
507     data->viewport.y += src_rect.y;
508     data->viewport_c.x += src_rect.x / (int32_t)vpc_div;
509     data->viewport_c.y += src_rect.y / (int32_t)vpc_div;
510 
511     return VPE_STATUS_OK;
512 }
513 
vpe_get_num_segments(struct vpe_priv * vpe_priv,const struct vpe_rect * src,const struct vpe_rect * dst,const uint32_t max_seg_width)514 uint16_t vpe_get_num_segments(struct vpe_priv *vpe_priv, const struct vpe_rect *src,
515     const struct vpe_rect *dst, const uint32_t max_seg_width)
516 {
517     int num_seg_src = (int)(ceil((double)src->width / max_seg_width));
518     int num_seg_dst = (int)(ceil((double)dst->width / max_seg_width));
519     return (uint16_t)(max(max(num_seg_src, num_seg_dst), 1));
520 }
521 
vpe_clip_stream(struct vpe_rect * src_rect,struct vpe_rect * dst_rect,const struct vpe_rect * target_rect)522 void vpe_clip_stream(
523     struct vpe_rect *src_rect, struct vpe_rect *dst_rect, const struct vpe_rect *target_rect)
524 {
525     struct fixed31_32 scaling_ratio_h;
526     struct fixed31_32 scaling_ratio_v;
527 
528     struct vpe_rect clipped_dst_rect, clipped_src_rect;
529     uint32_t        clipped_pixels;
530 
531     clipped_dst_rect = *dst_rect;
532     clipped_src_rect = *src_rect;
533 
534     scaling_ratio_h = vpe_fixpt_from_fraction(src_rect->width, dst_rect->width);
535     scaling_ratio_v = vpe_fixpt_from_fraction(src_rect->height, dst_rect->height);
536 
537     if (dst_rect->x < target_rect->x) {
538         clipped_pixels     = (uint32_t)(target_rect->x - dst_rect->x);
539         clipped_dst_rect.x = target_rect->x;
540         clipped_dst_rect.width -= clipped_pixels;
541         clipped_pixels = (uint32_t)vpe_fixpt_round(
542             vpe_fixpt_mul_int(scaling_ratio_h, (int)(target_rect->x - dst_rect->x)));
543         clipped_src_rect.x += (int32_t)clipped_pixels;
544         clipped_src_rect.width -= clipped_pixels;
545     }
546     if (dst_rect->y < target_rect->y) {
547         clipped_pixels     = (uint32_t)(target_rect->y - dst_rect->y);
548         clipped_dst_rect.y = target_rect->y;
549         clipped_dst_rect.height -= clipped_pixels;
550         clipped_pixels = (uint32_t)vpe_fixpt_round(
551             vpe_fixpt_mul_int(scaling_ratio_v, (int)(target_rect->y - dst_rect->y)));
552         clipped_src_rect.y += (int32_t)clipped_pixels;
553         clipped_src_rect.height -= clipped_pixels;
554     }
555     if (dst_rect->x + (int32_t)dst_rect->width > target_rect->x + (int32_t)target_rect->width) {
556         clipped_dst_rect.width =
557             (uint32_t)(target_rect->x + (int32_t)target_rect->width - clipped_dst_rect.x);
558         clipped_src_rect.width = (uint32_t)vpe_fixpt_round(
559             vpe_fixpt_mul_int(scaling_ratio_h, (int)clipped_dst_rect.width));
560     }
561     if (dst_rect->y + (int32_t)dst_rect->height > target_rect->y + (int32_t)target_rect->height) {
562         clipped_dst_rect.height =
563             (uint32_t)(target_rect->y + (int32_t)target_rect->height - clipped_dst_rect.y);
564         clipped_src_rect.height = (uint32_t)vpe_fixpt_round(
565             vpe_fixpt_mul_int(scaling_ratio_v, (int)clipped_dst_rect.height));
566     }
567 
568     *src_rect = clipped_src_rect;
569     *dst_rect = clipped_dst_rect;
570 }
571 
vpe_resource_build_scaling_params(struct segment_ctx * segment_ctx)572 enum vpe_status vpe_resource_build_scaling_params(struct segment_ctx *segment_ctx)
573 {
574     struct stream_ctx  *stream_ctx = segment_ctx->stream_ctx;
575     struct scaler_data *scl_data   = &segment_ctx->scaler_data;
576     struct dpp         *dpp        = stream_ctx->vpe_priv->resource.dpp[0];
577 
578     scl_data->format             = stream_ctx->stream.surface_info.format;
579     scl_data->lb_params.alpha_en = stream_ctx->per_pixel_alpha;
580 
581     // h/v active will be set later
582 
583     /* recout.x is temporary for viewport calculation,
584      * will be finalized in calculate_dst_viewport_and_active()
585      */
586 
587     calculate_recout(segment_ctx);
588     calculate_viewport_size(segment_ctx);
589 
590     if (scl_data->viewport.height < 1 || scl_data->viewport.width < 1)
591         return VPE_STATUS_VIEWPORT_SIZE_NOT_SUPPORTED;
592 
593     if (!dpp->funcs->validate_number_of_taps(dpp, scl_data)) {
594         return VPE_STATUS_SCALING_RATIO_NOT_SUPPORTED;
595     }
596 
597     calculate_inits_and_viewports(segment_ctx);
598 
599     if (scl_data->viewport.height < VPE_MIN_VIEWPORT_SIZE ||
600         scl_data->viewport.width < VPE_MIN_VIEWPORT_SIZE)
601         return VPE_STATUS_VIEWPORT_SIZE_NOT_SUPPORTED;
602 
603     return VPE_STATUS_OK;
604 }
605 
vpe_handle_output_h_mirror(struct vpe_priv * vpe_priv)606 void vpe_handle_output_h_mirror(struct vpe_priv *vpe_priv)
607 {
608     uint16_t           stream_idx;
609     int                seg_idx;
610     struct stream_ctx *stream_ctx;
611 
612     // swap the stream output location
613     for (stream_idx = 0; stream_idx < vpe_priv->num_streams; stream_idx++) {
614         stream_ctx = &vpe_priv->stream_ctx[stream_idx];
615         if (stream_ctx->flip_horizonal_output) {
616             struct segment_ctx *first_seg, *last_seg;
617 
618             // swap the segment output order, init the last segment first
619             first_seg = &stream_ctx->segment_ctx[0];
620             last_seg  = &stream_ctx->segment_ctx[stream_ctx->num_segments - 1];
621 
622             // last segment becomes first
623             last_seg->scaler_data.dst_viewport.x = first_seg->scaler_data.dst_viewport.x;
624 
625             for (seg_idx = (int)(stream_ctx->num_segments - 2); seg_idx >= 0; seg_idx--) {
626                 struct segment_ctx *prev_seg, *curr_seg;
627 
628                 // set the x in reverse order
629                 prev_seg = &stream_ctx->segment_ctx[seg_idx + 1];
630                 curr_seg = &stream_ctx->segment_ctx[seg_idx];
631 
632                 curr_seg->scaler_data.dst_viewport.x =
633                     prev_seg->scaler_data.dst_viewport.x +
634                     (int32_t)prev_seg->scaler_data.dst_viewport.width;
635 
636                 curr_seg->scaler_data.dst_viewport_c.x =
637                     prev_seg->scaler_data.dst_viewport_c.x +
638                     (int32_t)prev_seg->scaler_data.dst_viewport_c.width;
639             }
640         }
641     }
642 }
643 
vpe_resource_build_bit_depth_reduction_params(struct opp * opp,struct bit_depth_reduction_params * fmt_bit_depth)644 void vpe_resource_build_bit_depth_reduction_params(
645     struct opp *opp, struct bit_depth_reduction_params *fmt_bit_depth)
646 {
647     struct vpe_priv         *vpe_priv    = opp->vpe_priv;
648     struct vpe_surface_info *dst_surface = &vpe_priv->output_ctx.surface;
649     enum color_depth         display_color_depth;
650     memset(fmt_bit_depth, 0, sizeof(*fmt_bit_depth));
651 
652     display_color_depth = vpe_get_color_depth(dst_surface->format);
653 
654     switch (display_color_depth) {
655     case COLOR_DEPTH_888:
656     case COLOR_DEPTH_101010:
657         fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
658         fmt_bit_depth->flags.TRUNCATE_DEPTH   = (display_color_depth == COLOR_DEPTH_888) ? 1 : 2;
659         fmt_bit_depth->flags.TRUNCATE_MODE    = 1;
660         break;
661     default:
662         break;
663     }
664 }
665