• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 #include "vpe_priv.h"
3 #include "reg_helper.h"
4 #include "vpe10/inc/vpe10_cm_common.h"
5 #include "vpe10_dpp.h"
6 #include "conversion.h"
7 #include "color_pwl.h"
8 
9 #define CTX      vpe10_dpp
10 #define CTX_BASE dpp
11 
vpe10_enable_cm_block(struct dpp * dpp)12 static void vpe10_enable_cm_block(struct dpp *dpp)
13 {
14     unsigned int cm_bypass_mode = 0;
15 
16     PROGRAM_ENTRY();
17 
18     // debug option: put CM in bypass mode
19     if (vpe_priv->init.debug.cm_in_bypass)
20         cm_bypass_mode = 1;
21 
22     REG_SET(VPCM_CONTROL, 0, VPCM_BYPASS, cm_bypass_mode);
23 }
24 
vpe10_power_on_gamcor_lut(struct dpp * dpp,bool power_on)25 static void vpe10_power_on_gamcor_lut(struct dpp *dpp, bool power_on)
26 {
27     PROGRAM_ENTRY();
28 
29     if (vpe_priv->init.debug.enable_mem_low_power.bits.cm) {
30         if (power_on) {
31             REG_SET_2(VPCM_MEM_PWR_CTRL, REG_DEFAULT(VPCM_MEM_PWR_CTRL), GAMCOR_MEM_PWR_DIS, 0,
32                 GAMCOR_MEM_PWR_FORCE, 0);
33 
34             // two dummy updates (10-15clks each) for wake up delay
35             REG_SET_2(VPCM_MEM_PWR_CTRL, REG_DEFAULT(VPCM_MEM_PWR_CTRL), GAMCOR_MEM_PWR_DIS, 0,
36                 GAMCOR_MEM_PWR_FORCE, 0);
37             REG_SET_2(VPCM_MEM_PWR_CTRL, REG_DEFAULT(VPCM_MEM_PWR_CTRL), GAMCOR_MEM_PWR_DIS, 0,
38                 GAMCOR_MEM_PWR_FORCE, 0);
39         } else {
40             REG_SET_2(VPCM_MEM_PWR_CTRL, REG_DEFAULT(VPCM_MEM_PWR_CTRL), GAMCOR_MEM_PWR_DIS, 0,
41                 GAMCOR_MEM_PWR_FORCE, 3);
42         }
43     } else {
44         REG_SET_2(VPCM_MEM_PWR_CTRL, REG_DEFAULT(VPCM_MEM_PWR_CTRL), GAMCOR_MEM_PWR_DIS,
45             power_on == true ? 1 : 0, GAMCOR_MEM_PWR_FORCE, 0);
46     }
47 }
48 
vpe10_configure_gamcor_lut(struct dpp * dpp)49 static void vpe10_configure_gamcor_lut(struct dpp *dpp)
50 {
51     PROGRAM_ENTRY();
52 
53     REG_SET(VPCM_GAMCOR_LUT_CONTROL, 0, VPCM_GAMCOR_LUT_WRITE_COLOR_MASK, 7);
54     REG_SET(VPCM_GAMCOR_LUT_INDEX, 0, VPCM_GAMCOR_LUT_INDEX, 0);
55 }
56 
vpe10_dpp_gamcor_reg_field(struct dpp * dpp,struct vpe10_xfer_func_reg * reg)57 static void vpe10_dpp_gamcor_reg_field(struct dpp *dpp, struct vpe10_xfer_func_reg *reg)
58 {
59     struct vpe10_dpp *vpe10_dpp = (struct vpe10_dpp *)dpp;
60 
61     reg->shifts.field_region_start_base =
62         vpe10_dpp->shift->VPCM_GAMCOR_RAMA_EXP_REGION_START_BASE_B;
63     reg->masks.field_region_start_base = vpe10_dpp->mask->VPCM_GAMCOR_RAMA_EXP_REGION_START_BASE_B;
64     reg->shifts.field_offset           = vpe10_dpp->shift->VPCM_GAMCOR_RAMA_OFFSET_B;
65     reg->masks.field_offset            = vpe10_dpp->mask->VPCM_GAMCOR_RAMA_OFFSET_B;
66 
67     reg->shifts.exp_region0_lut_offset = vpe10_dpp->shift->VPCM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET;
68     reg->masks.exp_region0_lut_offset  = vpe10_dpp->mask->VPCM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET;
69     reg->shifts.exp_region0_num_segments =
70         vpe10_dpp->shift->VPCM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS;
71     reg->masks.exp_region0_num_segments =
72         vpe10_dpp->mask->VPCM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS;
73     reg->shifts.exp_region1_lut_offset = vpe10_dpp->shift->VPCM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET;
74     reg->masks.exp_region1_lut_offset  = vpe10_dpp->mask->VPCM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET;
75     reg->shifts.exp_region1_num_segments =
76         vpe10_dpp->shift->VPCM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;
77     reg->masks.exp_region1_num_segments =
78         vpe10_dpp->mask->VPCM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;
79 
80     reg->shifts.field_region_end       = vpe10_dpp->shift->VPCM_GAMCOR_RAMA_EXP_REGION_END_B;
81     reg->masks.field_region_end        = vpe10_dpp->mask->VPCM_GAMCOR_RAMA_EXP_REGION_END_B;
82     reg->shifts.field_region_end_slope = vpe10_dpp->shift->VPCM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B;
83     reg->masks.field_region_end_slope  = vpe10_dpp->mask->VPCM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B;
84     reg->shifts.field_region_end_base  = vpe10_dpp->shift->VPCM_GAMCOR_RAMA_EXP_REGION_END_BASE_B;
85     reg->masks.field_region_end_base   = vpe10_dpp->mask->VPCM_GAMCOR_RAMA_EXP_REGION_END_BASE_B;
86     reg->shifts.field_region_linear_slope =
87         vpe10_dpp->shift->VPCM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B;
88     reg->masks.field_region_linear_slope =
89         vpe10_dpp->mask->VPCM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B;
90     reg->shifts.exp_region_start = vpe10_dpp->shift->VPCM_GAMCOR_RAMA_EXP_REGION_START_B;
91     reg->masks.exp_region_start  = vpe10_dpp->mask->VPCM_GAMCOR_RAMA_EXP_REGION_START_B;
92     reg->shifts.exp_region_start_segment =
93         vpe10_dpp->shift->VPCM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B;
94     reg->masks.exp_region_start_segment =
95         vpe10_dpp->mask->VPCM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B;
96 }
97 
vpe10_dpp_program_gammcor_lut(struct dpp * dpp,const struct pwl_result_data * rgb,uint32_t num)98 static void vpe10_dpp_program_gammcor_lut(
99     struct dpp *dpp, const struct pwl_result_data *rgb, uint32_t num)
100 {
101     uint32_t last_base_value_red   = rgb[num].red_reg;
102     uint32_t last_base_value_green = rgb[num].blue_reg;
103     uint32_t last_base_value_blue  = rgb[num].green_reg;
104 
105     PROGRAM_ENTRY();
106 
107     /*fill in the LUT with all base values to be used by pwl module
108      * HW auto increments the LUT index: back-to-back write
109      */
110     if (vpe_is_rgb_equal(rgb, num)) {
111         vpe10_cm_helper_program_pwl(config_writer, rgb, last_base_value_red, num,
112             REG_OFFSET(VPCM_GAMCOR_LUT_DATA), REG_FIELD_SHIFT(VPCM_GAMCOR_LUT_DATA),
113             REG_FIELD_MASK(VPCM_GAMCOR_LUT_DATA), CM_PWL_R);
114     } else {
115         REG_UPDATE(VPCM_GAMCOR_LUT_CONTROL, VPCM_GAMCOR_LUT_WRITE_COLOR_MASK, 4);
116 
117         vpe10_cm_helper_program_pwl(config_writer, rgb, last_base_value_red, num,
118             REG_OFFSET(VPCM_GAMCOR_LUT_DATA), REG_FIELD_SHIFT(VPCM_GAMCOR_LUT_DATA),
119             REG_FIELD_MASK(VPCM_GAMCOR_LUT_DATA), CM_PWL_R);
120 
121         REG_SET(VPCM_GAMCOR_LUT_INDEX, 0, VPCM_GAMCOR_LUT_INDEX, 0);
122         REG_UPDATE(VPCM_GAMCOR_LUT_CONTROL, VPCM_GAMCOR_LUT_WRITE_COLOR_MASK, 2);
123 
124         vpe10_cm_helper_program_pwl(config_writer, rgb, last_base_value_green, num,
125             REG_OFFSET(VPCM_GAMCOR_LUT_DATA), REG_FIELD_SHIFT(VPCM_GAMCOR_LUT_DATA),
126             REG_FIELD_MASK(VPCM_GAMCOR_LUT_DATA), CM_PWL_G);
127 
128         REG_SET(VPCM_GAMCOR_LUT_INDEX, 0, VPCM_GAMCOR_LUT_INDEX, 0);
129         REG_UPDATE(VPCM_GAMCOR_LUT_CONTROL, VPCM_GAMCOR_LUT_WRITE_COLOR_MASK, 1);
130 
131         vpe10_cm_helper_program_pwl(config_writer, rgb, last_base_value_blue, num,
132             REG_OFFSET(VPCM_GAMCOR_LUT_DATA), REG_FIELD_SHIFT(VPCM_GAMCOR_LUT_DATA),
133             REG_FIELD_MASK(VPCM_GAMCOR_LUT_DATA), CM_PWL_B);
134     }
135 }
136 
vpe10_dpp_program_gamcor_lut(struct dpp * dpp,const struct pwl_params * params)137 static void vpe10_dpp_program_gamcor_lut(struct dpp *dpp, const struct pwl_params *params)
138 {
139     struct vpe10_xfer_func_reg gam_regs = {0};
140 
141     PROGRAM_ENTRY();
142 
143     vpe10_enable_cm_block(dpp);
144 
145     if (dpp->vpe_priv->init.debug.bypass_gamcor || params == NULL) {
146         // bypass
147         REG_SET(VPCM_GAMCOR_CONTROL, 0, VPCM_GAMCOR_MODE, 0);
148         vpe10_power_on_gamcor_lut(dpp, false);
149         return;
150     }
151 
152     vpe10_power_on_gamcor_lut(dpp, true);
153     vpe10_configure_gamcor_lut(dpp);
154 
155     REG_SET(VPCM_GAMCOR_CONTROL, 0, VPCM_GAMCOR_MODE, 2); // programmable RAM
156 
157     gam_regs.start_cntl_b       = REG_OFFSET(VPCM_GAMCOR_RAMA_START_CNTL_B);
158     gam_regs.start_cntl_g       = REG_OFFSET(VPCM_GAMCOR_RAMA_START_CNTL_G);
159     gam_regs.start_cntl_r       = REG_OFFSET(VPCM_GAMCOR_RAMA_START_CNTL_R);
160     gam_regs.start_slope_cntl_b = REG_OFFSET(VPCM_GAMCOR_RAMA_START_SLOPE_CNTL_B);
161     gam_regs.start_slope_cntl_g = REG_OFFSET(VPCM_GAMCOR_RAMA_START_SLOPE_CNTL_G);
162     gam_regs.start_slope_cntl_r = REG_OFFSET(VPCM_GAMCOR_RAMA_START_SLOPE_CNTL_R);
163     gam_regs.start_end_cntl1_b  = REG_OFFSET(VPCM_GAMCOR_RAMA_END_CNTL1_B);
164     gam_regs.start_end_cntl2_b  = REG_OFFSET(VPCM_GAMCOR_RAMA_END_CNTL2_B);
165     gam_regs.start_end_cntl1_g  = REG_OFFSET(VPCM_GAMCOR_RAMA_END_CNTL1_G);
166     gam_regs.start_end_cntl2_g  = REG_OFFSET(VPCM_GAMCOR_RAMA_END_CNTL2_G);
167     gam_regs.start_end_cntl1_r  = REG_OFFSET(VPCM_GAMCOR_RAMA_END_CNTL1_R);
168     gam_regs.start_end_cntl2_r  = REG_OFFSET(VPCM_GAMCOR_RAMA_END_CNTL2_R);
169     gam_regs.region_start       = REG_OFFSET(VPCM_GAMCOR_RAMA_REGION_0_1);
170     gam_regs.region_end         = REG_OFFSET(VPCM_GAMCOR_RAMA_REGION_32_33);
171     gam_regs.offset_b           = REG_OFFSET(VPCM_GAMCOR_RAMA_OFFSET_B);
172     gam_regs.offset_g           = REG_OFFSET(VPCM_GAMCOR_RAMA_OFFSET_G);
173     gam_regs.offset_r           = REG_OFFSET(VPCM_GAMCOR_RAMA_OFFSET_R);
174     gam_regs.start_base_cntl_b  = REG_OFFSET(VPCM_GAMCOR_RAMA_START_BASE_CNTL_B);
175     gam_regs.start_base_cntl_g  = REG_OFFSET(VPCM_GAMCOR_RAMA_START_BASE_CNTL_G);
176     gam_regs.start_base_cntl_r  = REG_OFFSET(VPCM_GAMCOR_RAMA_START_BASE_CNTL_R);
177 
178     vpe10_dpp_gamcor_reg_field(dpp, &gam_regs);
179 
180     vpe10_cm_helper_program_gamcor_xfer_func(config_writer, params, &gam_regs);
181     vpe10_dpp_program_gammcor_lut(dpp, params->rgb_resulted, params->hw_points_num);
182 }
183 
vpe10_dpp_program_input_transfer_func(struct dpp * dpp,struct transfer_func * input_tf)184 void vpe10_dpp_program_input_transfer_func(struct dpp *dpp, struct transfer_func *input_tf)
185 {
186     struct pwl_params *params = NULL;
187 
188     PROGRAM_ENTRY();
189 
190     // There should always have input_tf
191     VPE_ASSERT(input_tf);
192     // Only accept either DISTRIBUTED_POINTS or BYPASS
193     // No support for PREDEFINED case
194     VPE_ASSERT(input_tf->type == TF_TYPE_DISTRIBUTED_POINTS || input_tf->type == TF_TYPE_BYPASS);
195 
196     // VPE always do NL scaling using gamcor, thus skipping dgam (default bypass)
197     // dpp->funcs->program_pre_dgam(dpp, tf);
198     if (input_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
199         if (!input_tf->use_pre_calculated_table || dpp->vpe_priv->init.debug.force_tf_calculation) {
200             vpe10_cm_helper_translate_curve_to_degamma_hw_format(input_tf, &dpp->degamma_params);
201             params = &dpp->degamma_params;
202         } else {
203             vpe10_cm_get_tf_pwl_params(input_tf, &params, CM_DEGAM);
204             VPE_ASSERT(params != NULL);
205             if (params == NULL)
206                 return;
207         }
208     }
209     vpe10_dpp_program_gamcor_lut(dpp, params);
210 }
211 
vpe10_dpp_program_gamut_remap(struct dpp * dpp,struct colorspace_transform * gamut_remap)212 void vpe10_dpp_program_gamut_remap(struct dpp *dpp, struct colorspace_transform *gamut_remap)
213 {
214     struct color_matrices_reg gam_regs;
215     uint16_t                  arr_reg_val[12];
216 
217     PROGRAM_ENTRY();
218 
219     if (!gamut_remap || !gamut_remap->enable_remap ||
220         dpp->vpe_priv->init.debug.bypass_dpp_gamut_remap) {
221         REG_SET(VPCM_GAMUT_REMAP_CONTROL, 0, VPCM_GAMUT_REMAP_MODE, 0);
222         return;
223     }
224 
225     gam_regs.shifts.csc_c11 = REG_FIELD_SHIFT(VPCM_GAMUT_REMAP_C11);
226     gam_regs.masks.csc_c11  = REG_FIELD_MASK(VPCM_GAMUT_REMAP_C11);
227     gam_regs.shifts.csc_c12 = REG_FIELD_SHIFT(VPCM_GAMUT_REMAP_C12);
228     gam_regs.masks.csc_c12  = REG_FIELD_MASK(VPCM_GAMUT_REMAP_C12);
229     gam_regs.csc_c11_c12    = REG_OFFSET(VPCM_GAMUT_REMAP_C11_C12);
230     gam_regs.csc_c33_c34    = REG_OFFSET(VPCM_GAMUT_REMAP_C33_C34);
231 
232     conv_convert_float_matrix(arr_reg_val, gamut_remap->matrix, 12);
233 
234     vpe10_cm_helper_program_color_matrices(config_writer, arr_reg_val, &gam_regs);
235 
236     REG_SET(VPCM_GAMUT_REMAP_CONTROL, 0, VPCM_GAMUT_REMAP_MODE, 1);
237 }
238 
239 /*program post scaler scs block in dpp CM*/
vpe10_dpp_program_post_csc(struct dpp * dpp,enum color_space color_space,enum input_csc_select input_select,struct vpe_csc_matrix * input_cs)240 void vpe10_dpp_program_post_csc(struct dpp *dpp, enum color_space color_space,
241     enum input_csc_select input_select, struct vpe_csc_matrix *input_cs)
242 {
243     PROGRAM_ENTRY();
244     int             i;
245     int             arr_size = sizeof(vpe_input_csc_matrix_fixed) / sizeof(struct vpe_csc_matrix);
246     const uint16_t *regval   = NULL;
247     struct color_matrices_reg gam_regs;
248 
249     if (input_select == INPUT_CSC_SELECT_BYPASS || dpp->vpe_priv->init.debug.bypass_post_csc) {
250         REG_SET(VPCM_POST_CSC_CONTROL, 0, VPCM_POST_CSC_MODE, 0);
251         return;
252     }
253 
254     if (input_cs == NULL) {
255         for (i = 0; i < arr_size; i++)
256             if (vpe_input_csc_matrix_fixed[i].cs == color_space) {
257                 regval = vpe_input_csc_matrix_fixed[i].regval;
258                 break;
259             }
260 
261         if (regval == NULL) {
262             VPE_ASSERT(0);
263             return;
264         }
265     } else {
266         regval = input_cs->regval;
267     }
268 
269     /* Always use the only one set of CSC matrix
270      */
271 
272     gam_regs.shifts.csc_c11 = REG_FIELD_SHIFT(VPCM_POST_CSC_C11);
273     gam_regs.masks.csc_c11  = REG_FIELD_MASK(VPCM_POST_CSC_C11);
274     gam_regs.shifts.csc_c12 = REG_FIELD_SHIFT(VPCM_POST_CSC_C12);
275     gam_regs.masks.csc_c12  = REG_FIELD_MASK(VPCM_POST_CSC_C12);
276     gam_regs.csc_c11_c12    = REG_OFFSET(VPCM_POST_CSC_C11_C12);
277     gam_regs.csc_c33_c34    = REG_OFFSET(VPCM_POST_CSC_C33_C34);
278 
279     vpe10_cm_helper_program_color_matrices(config_writer, regval, &gam_regs);
280 
281     REG_SET(VPCM_POST_CSC_CONTROL, 0, VPCM_POST_CSC_MODE, input_select);
282 }
283 
vpe10_dpp_set_hdr_multiplier(struct dpp * dpp,uint32_t multiplier)284 void vpe10_dpp_set_hdr_multiplier(struct dpp *dpp, uint32_t multiplier)
285 {
286     PROGRAM_ENTRY();
287 
288     REG_SET(VPCM_HDR_MULT_COEF, REG_DEFAULT(VPCM_HDR_MULT_COEF), VPCM_HDR_MULT_COEF, multiplier);
289 }
290