• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include "dm_services.h"
27 #include "core_types.h"
28 #include "reg_helper.h"
29 #include "dcn30_dpp.h"
30 #include "basics/conversion.h"
31 #include "dcn30_cm_common.h"
32 
33 #define REG(reg)\
34 	dpp->tf_regs->reg
35 
36 #define CTX \
37 	dpp->base.ctx
38 
39 #undef FN
40 #define FN(reg_name, field_name) \
41 	dpp->tf_shift->field_name, dpp->tf_mask->field_name
42 
dpp3_enable_cm_block(struct dpp * dpp_base)43 static void dpp3_enable_cm_block(
44 		struct dpp *dpp_base)
45 {
46 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
47 
48 	unsigned int cm_bypass_mode = 0;
49 
50 	// debug option: put CM in bypass mode
51 	if (dpp_base->ctx->dc->debug.cm_in_bypass)
52 		cm_bypass_mode = 1;
53 
54 	REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode);
55 }
56 
dpp30_get_gamcor_current(struct dpp * dpp_base)57 static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base)
58 {
59 	enum dc_lut_mode mode;
60 	uint32_t state_mode;
61 	uint32_t lut_mode;
62 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
63 
64 	REG_GET(CM_GAMCOR_CONTROL,
65 			CM_GAMCOR_MODE_CURRENT, &state_mode);
66 
67 		if (state_mode == 0)
68 			mode = LUT_BYPASS;
69 
70 		if (state_mode == 2) {//Programmable RAM LUT
71 			REG_GET(CM_GAMCOR_CONTROL,
72 					CM_GAMCOR_SELECT_CURRENT, &lut_mode);
73 
74 			if (lut_mode == 0)
75 				mode = LUT_RAM_A;
76 			else
77 				mode = LUT_RAM_B;
78 		}
79 
80 		return mode;
81 }
82 
dpp3_program_gammcor_lut(struct dpp * dpp_base,const struct pwl_result_data * rgb,uint32_t num,bool is_ram_a)83 static void dpp3_program_gammcor_lut(
84 		struct dpp *dpp_base,
85 		const struct pwl_result_data *rgb,
86 		uint32_t num,
87 		bool is_ram_a)
88 {
89 	uint32_t i;
90 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
91 	uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
92 	uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg;
93 	uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg;
94 
95 	/*fill in the LUT with all base values to be used by pwl module
96 	 * HW auto increments the LUT index: back-to-back write
97 	 */
98 	if (is_rgb_equal(rgb,  num)) {
99 		for (i = 0 ; i < num; i++)
100 			REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg);
101 
102 		REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red);
103 
104 	} else {
105 		REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
106 				CM_GAMCOR_LUT_WRITE_COLOR_MASK, 4);
107 		for (i = 0 ; i < num; i++)
108 			REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg);
109 
110 		REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red);
111 
112 		REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
113 
114 		REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
115 				CM_GAMCOR_LUT_WRITE_COLOR_MASK, 2);
116 		for (i = 0 ; i < num; i++)
117 			REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].green_reg);
118 
119 		REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_green);
120 
121 		REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
122 
123 		REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
124 				CM_GAMCOR_LUT_WRITE_COLOR_MASK, 1);
125 		for (i = 0 ; i < num; i++)
126 			REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].blue_reg);
127 
128 		REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_blue);
129 	}
130 }
131 
dpp3_power_on_gamcor_lut(struct dpp * dpp_base,bool power_on)132 static void dpp3_power_on_gamcor_lut(
133 		struct dpp *dpp_base,
134 	bool power_on)
135 {
136 	uint32_t power_status;
137 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
138 
139 
140 	REG_SET(CM_MEM_PWR_CTRL, 0,
141 			GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1);
142 
143 	REG_GET(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, &power_status);
144 	if (power_status != 0)
145 		BREAK_TO_DEBUGGER();
146 
147 
148 }
149 
dpp3_program_cm_dealpha(struct dpp * dpp_base,uint32_t enable,uint32_t additive_blending)150 void dpp3_program_cm_dealpha(
151 		struct dpp *dpp_base,
152 	uint32_t enable, uint32_t additive_blending)
153 {
154 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
155 
156 	REG_SET_2(CM_DEALPHA, 0,
157 			CM_DEALPHA_EN, enable,
158 			CM_DEALPHA_ABLND, additive_blending);
159 }
160 
dpp3_program_cm_bias(struct dpp * dpp_base,struct CM_bias_params * bias_params)161 void dpp3_program_cm_bias(
162 	struct dpp *dpp_base,
163 	struct CM_bias_params *bias_params)
164 {
165 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
166 
167 	REG_SET(CM_BIAS_CR_R, 0, CM_BIAS_CR_R, bias_params->cm_bias_cr_r);
168 	REG_SET_2(CM_BIAS_Y_G_CB_B, 0,
169 			CM_BIAS_Y_G, bias_params->cm_bias_y_g,
170 			CM_BIAS_CB_B, bias_params->cm_bias_cb_b);
171 }
172 
dpp3_gamcor_reg_field(struct dcn3_dpp * dpp,struct dcn3_xfer_func_reg * reg)173 static void dpp3_gamcor_reg_field(
174 		struct dcn3_dpp *dpp,
175 		struct dcn3_xfer_func_reg *reg)
176 {
177 
178 	reg->shifts.field_region_start_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B;
179 	reg->masks.field_region_start_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B;
180 	reg->shifts.field_offset = dpp->tf_shift->CM_GAMCOR_RAMA_OFFSET_B;
181 	reg->masks.field_offset = dpp->tf_mask->CM_GAMCOR_RAMA_OFFSET_B;
182 
183 	reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET;
184 	reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET;
185 	reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS;
186 	reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS;
187 	reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET;
188 	reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET;
189 	reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;
190 	reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;
191 
192 	reg->shifts.field_region_end = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_B;
193 	reg->masks.field_region_end = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_B;
194 	reg->shifts.field_region_end_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B;
195 	reg->masks.field_region_end_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B;
196 	reg->shifts.field_region_end_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B;
197 	reg->masks.field_region_end_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B;
198 	reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B;
199 	reg->masks.field_region_linear_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B;
200 	reg->shifts.exp_region_start = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_B;
201 	reg->masks.exp_region_start = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_B;
202 	reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B;
203 	reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B;
204 }
205 
dpp3_configure_gamcor_lut(struct dpp * dpp_base,bool is_ram_a)206 static void dpp3_configure_gamcor_lut(
207 		struct dpp *dpp_base,
208 		bool is_ram_a)
209 {
210 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
211 
212 	REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
213 			CM_GAMCOR_LUT_WRITE_COLOR_MASK, 7);
214 	REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
215 			CM_GAMCOR_LUT_HOST_SEL, is_ram_a == true ? 0:1);
216 	REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
217 }
218 
219 
dpp3_program_gamcor_lut(struct dpp * dpp_base,const struct pwl_params * params)220 bool dpp3_program_gamcor_lut(
221 	struct dpp *dpp_base, const struct pwl_params *params)
222 {
223 	enum dc_lut_mode current_mode;
224 	enum dc_lut_mode next_mode;
225 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
226 	struct dcn3_xfer_func_reg gam_regs;
227 
228 	dpp3_enable_cm_block(dpp_base);
229 
230 	if (params == NULL) { //bypass if we have no pwl data
231 		REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 0);
232 		return false;
233 	}
234 	dpp3_power_on_gamcor_lut(dpp_base, true);
235 	REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 2);
236 
237 	current_mode = dpp30_get_gamcor_current(dpp_base);
238 	if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
239 		next_mode = LUT_RAM_B;
240 	else
241 		next_mode = LUT_RAM_A;
242 
243 	dpp3_power_on_gamcor_lut(dpp_base, true);
244 	dpp3_configure_gamcor_lut(dpp_base, next_mode == LUT_RAM_A ? true:false);
245 
246 	if (next_mode == LUT_RAM_B) {
247 		gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMB_START_CNTL_B);
248 		gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMB_START_CNTL_G);
249 		gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMB_START_CNTL_R);
250 		gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B);
251 		gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G);
252 		gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R);
253 		gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMB_END_CNTL1_B);
254 		gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMB_END_CNTL2_B);
255 		gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMB_END_CNTL1_G);
256 		gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMB_END_CNTL2_G);
257 		gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMB_END_CNTL1_R);
258 		gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMB_END_CNTL2_R);
259 		gam_regs.region_start = REG(CM_GAMCOR_RAMB_REGION_0_1);
260 		gam_regs.region_end = REG(CM_GAMCOR_RAMB_REGION_32_33);
261 		//New registers in DCN3AG/DCN GAMCOR block
262 		gam_regs.offset_b =  REG(CM_GAMCOR_RAMB_OFFSET_B);
263 		gam_regs.offset_g =  REG(CM_GAMCOR_RAMB_OFFSET_G);
264 		gam_regs.offset_r =  REG(CM_GAMCOR_RAMB_OFFSET_R);
265 		gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_B);
266 		gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_G);
267 		gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_R);
268 	} else {
269 		gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMA_START_CNTL_B);
270 		gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMA_START_CNTL_G);
271 		gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMA_START_CNTL_R);
272 		gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B);
273 		gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G);
274 		gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R);
275 		gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMA_END_CNTL1_B);
276 		gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMA_END_CNTL2_B);
277 		gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMA_END_CNTL1_G);
278 		gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMA_END_CNTL2_G);
279 		gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMA_END_CNTL1_R);
280 		gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMA_END_CNTL2_R);
281 		gam_regs.region_start = REG(CM_GAMCOR_RAMA_REGION_0_1);
282 		gam_regs.region_end = REG(CM_GAMCOR_RAMA_REGION_32_33);
283 		//New registers in DCN3AG/DCN GAMCOR block
284 		gam_regs.offset_b =  REG(CM_GAMCOR_RAMA_OFFSET_B);
285 		gam_regs.offset_g =  REG(CM_GAMCOR_RAMA_OFFSET_G);
286 		gam_regs.offset_r =  REG(CM_GAMCOR_RAMA_OFFSET_R);
287 		gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_B);
288 		gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_G);
289 		gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_R);
290 	}
291 
292 	//get register fields
293 	dpp3_gamcor_reg_field(dpp, &gam_regs);
294 
295 	//program register set for LUTA/LUTB
296 	cm_helper_program_gamcor_xfer_func(dpp_base->ctx, params, &gam_regs);
297 
298 	dpp3_program_gammcor_lut(dpp_base, params->rgb_resulted, params->hw_points_num,
299 			next_mode == LUT_RAM_A ? true:false);
300 
301 	//select Gamma LUT to use for next frame
302 	REG_UPDATE(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT, next_mode == LUT_RAM_A ? 0:1);
303 
304 	return true;
305 }
306 
dpp3_set_hdr_multiplier(struct dpp * dpp_base,uint32_t multiplier)307 void dpp3_set_hdr_multiplier(
308 		struct dpp *dpp_base,
309 		uint32_t multiplier)
310 {
311 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
312 
313 	REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier);
314 }
315 
316 
program_gamut_remap(struct dcn3_dpp * dpp,const uint16_t * regval,int select)317 static void program_gamut_remap(
318 		struct dcn3_dpp *dpp,
319 		const uint16_t *regval,
320 		int select)
321 {
322 	uint16_t selection = 0;
323 	struct color_matrices_reg gam_regs;
324 
325 	if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
326 		REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
327 				CM_GAMUT_REMAP_MODE, 0);
328 		return;
329 	}
330 	switch (select) {
331 	case GAMUT_REMAP_COEFF:
332 		selection = 1;
333 		break;
334 		/*this corresponds to GAMUT_REMAP coefficients set B
335 		 *we don't have common coefficient sets in dcn3ag/dcn3
336 		 */
337 	case GAMUT_REMAP_COMA_COEFF:
338 		selection = 2;
339 		break;
340 	default:
341 		break;
342 	}
343 
344 	gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
345 	gam_regs.masks.csc_c11  = dpp->tf_mask->CM_GAMUT_REMAP_C11;
346 	gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
347 	gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
348 
349 
350 	if (select == GAMUT_REMAP_COEFF) {
351 		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
352 		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
353 
354 		cm_helper_program_color_matrices(
355 				dpp->base.ctx,
356 				regval,
357 				&gam_regs);
358 
359 	} else  if (select == GAMUT_REMAP_COMA_COEFF) {
360 
361 		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
362 		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
363 
364 		cm_helper_program_color_matrices(
365 				dpp->base.ctx,
366 				regval,
367 				&gam_regs);
368 
369 	}
370 	//select coefficient set to use
371 	REG_SET(
372 			CM_GAMUT_REMAP_CONTROL, 0,
373 			CM_GAMUT_REMAP_MODE, selection);
374 }
375 
dpp3_cm_set_gamut_remap(struct dpp * dpp_base,const struct dpp_grph_csc_adjustment * adjust)376 void dpp3_cm_set_gamut_remap(
377 	struct dpp *dpp_base,
378 	const struct dpp_grph_csc_adjustment *adjust)
379 {
380 	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
381 	int i = 0;
382 	int gamut_mode;
383 
384 	if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
385 		/* Bypass if type is bypass or hw */
386 		program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS);
387 	else {
388 		struct fixed31_32 arr_matrix[12];
389 		uint16_t arr_reg_val[12];
390 
391 		for (i = 0; i < 12; i++)
392 			arr_matrix[i] = adjust->temperature_matrix[i];
393 
394 		convert_float_matrix(
395 			arr_reg_val, arr_matrix, 12);
396 
397 		//current coefficient set in use
398 		REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &gamut_mode);
399 
400 		if (gamut_mode == 0)
401 			gamut_mode = 1; //use coefficient set A
402 		else if (gamut_mode == 1)
403 			gamut_mode = 2;
404 		else
405 			gamut_mode = 1;
406 
407 		//follow dcn2 approach for now - using only coefficient set A
408 		program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF);
409 	}
410 }
411