• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dc_link_dp.h"
49 #include "dccg.h"
50 #include "clk_mgr.h"
51 #include "link_hwss.h"
52 #include "dpcd_defs.h"
53 #include "dsc.h"
54 #include "dce/dmub_hw_lock_mgr.h"
55 
56 #define DC_LOGGER_INIT(logger)
57 
58 #define CTX \
59 	hws->ctx
60 #define REG(reg)\
61 	hws->regs->reg
62 
63 #undef FN
64 #define FN(reg_name, field_name) \
65 	hws->shifts->field_name, hws->masks->field_name
66 
67 /*print is 17 wide, first two characters are spaces*/
68 #define DTN_INFO_MICRO_SEC(ref_cycle) \
69 	print_microsec(dc_ctx, log_ctx, ref_cycle)
70 
71 #define GAMMA_HW_POINTS_NUM 256
72 
print_microsec(struct dc_context * dc_ctx,struct dc_log_buffer_ctx * log_ctx,uint32_t ref_cycle)73 void print_microsec(struct dc_context *dc_ctx,
74 	struct dc_log_buffer_ctx *log_ctx,
75 	uint32_t ref_cycle)
76 {
77 	const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
78 	static const unsigned int frac = 1000;
79 	uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
80 
81 	DTN_INFO("  %11d.%03d",
82 			us_x10 / frac,
83 			us_x10 % frac);
84 }
85 
dcn10_lock_all_pipes(struct dc * dc,struct dc_state * context,bool lock)86 void dcn10_lock_all_pipes(struct dc *dc,
87 	struct dc_state *context,
88 	bool lock)
89 {
90 	struct pipe_ctx *pipe_ctx;
91 	struct timing_generator *tg;
92 	int i;
93 
94 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
95 		pipe_ctx = &context->res_ctx.pipe_ctx[i];
96 		tg = pipe_ctx->stream_res.tg;
97 
98 		/*
99 		 * Only lock the top pipe's tg to prevent redundant
100 		 * (un)locking. Also skip if pipe is disabled.
101 		 */
102 		if (pipe_ctx->top_pipe ||
103 		    !pipe_ctx->stream || !pipe_ctx->plane_state ||
104 		    !tg->funcs->is_tg_enabled(tg))
105 			continue;
106 
107 		if (lock)
108 			dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
109 		else
110 			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
111 	}
112 }
113 
log_mpc_crc(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)114 static void log_mpc_crc(struct dc *dc,
115 	struct dc_log_buffer_ctx *log_ctx)
116 {
117 	struct dc_context *dc_ctx = dc->ctx;
118 	struct dce_hwseq *hws = dc->hwseq;
119 
120 	if (REG(MPC_CRC_RESULT_GB))
121 		DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
122 		REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
123 	if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
124 		DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
125 		REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
126 }
127 
dcn10_log_hubbub_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)128 void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx)
129 {
130 	struct dc_context *dc_ctx = dc->ctx;
131 	struct dcn_hubbub_wm wm;
132 	int i;
133 
134 	memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
135 	dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
136 
137 	DTN_INFO("HUBBUB WM:      data_urgent  pte_meta_urgent"
138 			"         sr_enter          sr_exit  dram_clk_change\n");
139 
140 	for (i = 0; i < 4; i++) {
141 		struct dcn_hubbub_wm_set *s;
142 
143 		s = &wm.sets[i];
144 		DTN_INFO("WM_Set[%d]:", s->wm_set);
145 		DTN_INFO_MICRO_SEC(s->data_urgent);
146 		DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
147 		DTN_INFO_MICRO_SEC(s->sr_enter);
148 		DTN_INFO_MICRO_SEC(s->sr_exit);
149 		DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
150 		DTN_INFO("\n");
151 	}
152 
153 	DTN_INFO("\n");
154 }
155 
dcn10_log_hubp_states(struct dc * dc,void * log_ctx)156 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
157 {
158 	struct dc_context *dc_ctx = dc->ctx;
159 	struct resource_pool *pool = dc->res_pool;
160 	int i;
161 
162 	DTN_INFO(
163 		"HUBP:  format  addr_hi  width  height  rot  mir  sw_mode  dcc_en  blank_en  clock_en  ttu_dis  underflow   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
164 	for (i = 0; i < pool->pipe_count; i++) {
165 		struct hubp *hubp = pool->hubps[i];
166 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
167 
168 		hubp->funcs->hubp_read_state(hubp);
169 
170 		if (!s->blank_en) {
171 			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh  %6d  %8d  %8d  %7d  %8xh",
172 					hubp->inst,
173 					s->pixel_format,
174 					s->inuse_addr_hi,
175 					s->viewport_width,
176 					s->viewport_height,
177 					s->rotation_angle,
178 					s->h_mirror_en,
179 					s->sw_mode,
180 					s->dcc_en,
181 					s->blank_en,
182 					s->clock_en,
183 					s->ttu_disable,
184 					s->underflow_status);
185 			DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
186 			DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
187 			DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
188 			DTN_INFO("\n");
189 		}
190 	}
191 
192 	DTN_INFO("\n=========RQ========\n");
193 	DTN_INFO("HUBP:  drq_exp_m  prq_exp_m  mrq_exp_m  crq_exp_m  plane1_ba  L:chunk_s  min_chu_s  meta_ch_s"
194 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h  C:chunk_s  min_chu_s  meta_ch_s"
195 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h\n");
196 	for (i = 0; i < pool->pipe_count; i++) {
197 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
198 		struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
199 
200 		if (!s->blank_en)
201 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
202 				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
203 				rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
204 				rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
205 				rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
206 				rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
207 				rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
208 				rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
209 				rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
210 				rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
211 	}
212 
213 	DTN_INFO("========DLG========\n");
214 	DTN_INFO("HUBP:  rc_hbe     dlg_vbe    min_d_y_n  rc_per_ht  rc_x_a_s "
215 			"  dst_y_a_s  dst_y_pf   dst_y_vvb  dst_y_rvb  dst_y_vfl  dst_y_rfl  rf_pix_fq"
216 			"  vratio_pf  vrat_pf_c  rc_pg_vbl  rc_pg_vbc  rc_mc_vbl  rc_mc_vbc  rc_pg_fll"
217 			"  rc_pg_flc  rc_mc_fll  rc_mc_flc  pr_nom_l   pr_nom_c   rc_pg_nl   rc_pg_nc "
218 			"  mr_nom_l   mr_nom_c   rc_mc_nl   rc_mc_nc   rc_ld_pl   rc_ld_pc   rc_ld_l  "
219 			"  rc_ld_c    cha_cur0   ofst_cur1  cha_cur1   vr_af_vc0  ddrq_limt  x_rt_dlay"
220 			"  x_rp_dlay  x_rr_sfl\n");
221 	for (i = 0; i < pool->pipe_count; i++) {
222 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
223 		struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
224 
225 		if (!s->blank_en)
226 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
227 				"%  8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
228 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
229 				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
230 				dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
231 				dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
232 				dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
233 				dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
234 				dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
235 				dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
236 				dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
237 				dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
238 				dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
239 				dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
240 				dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
241 				dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
242 				dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
243 				dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
244 				dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
245 				dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
246 				dlg_regs->xfc_reg_remote_surface_flip_latency);
247 	}
248 
249 	DTN_INFO("========TTU========\n");
250 	DTN_INFO("HUBP:  qos_ll_wm  qos_lh_wm  mn_ttu_vb  qos_l_flp  rc_rd_p_l  rc_rd_l    rc_rd_p_c"
251 			"  rc_rd_c    rc_rd_c0   rc_rd_pc0  rc_rd_c1   rc_rd_pc1  qos_lf_l   qos_rds_l"
252 			"  qos_lf_c   qos_rds_c  qos_lf_c0  qos_rds_c0 qos_lf_c1  qos_rds_c1\n");
253 	for (i = 0; i < pool->pipe_count; i++) {
254 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
255 		struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
256 
257 		if (!s->blank_en)
258 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
259 				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
260 				ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
261 				ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
262 				ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
263 				ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
264 				ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
265 				ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
266 	}
267 	DTN_INFO("\n");
268 }
269 
dcn10_log_hw_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)270 void dcn10_log_hw_state(struct dc *dc,
271 	struct dc_log_buffer_ctx *log_ctx)
272 {
273 	struct dc_context *dc_ctx = dc->ctx;
274 	struct resource_pool *pool = dc->res_pool;
275 	int i;
276 
277 	DTN_INFO_BEGIN();
278 
279 	dcn10_log_hubbub_state(dc, log_ctx);
280 
281 	dcn10_log_hubp_states(dc, log_ctx);
282 
283 	DTN_INFO("DPP:    IGAM format  IGAM mode    DGAM mode    RGAM mode"
284 			"  GAMUT mode  C11 C12   C13 C14   C21 C22   C23 C24   "
285 			"C31 C32   C33 C34\n");
286 	for (i = 0; i < pool->pipe_count; i++) {
287 		struct dpp *dpp = pool->dpps[i];
288 		struct dcn_dpp_state s = {0};
289 
290 		dpp->funcs->dpp_read_state(dpp, &s);
291 
292 		if (!s.is_enabled)
293 			continue;
294 
295 		DTN_INFO("[%2d]:  %11xh  %-11s  %-11s  %-11s"
296 				"%8x    %08xh %08xh %08xh %08xh %08xh %08xh",
297 				dpp->inst,
298 				s.igam_input_format,
299 				(s.igam_lut_mode == 0) ? "BypassFixed" :
300 					((s.igam_lut_mode == 1) ? "BypassFloat" :
301 					((s.igam_lut_mode == 2) ? "RAM" :
302 					((s.igam_lut_mode == 3) ? "RAM" :
303 								 "Unknown"))),
304 				(s.dgam_lut_mode == 0) ? "Bypass" :
305 					((s.dgam_lut_mode == 1) ? "sRGB" :
306 					((s.dgam_lut_mode == 2) ? "Ycc" :
307 					((s.dgam_lut_mode == 3) ? "RAM" :
308 					((s.dgam_lut_mode == 4) ? "RAM" :
309 								 "Unknown")))),
310 				(s.rgam_lut_mode == 0) ? "Bypass" :
311 					((s.rgam_lut_mode == 1) ? "sRGB" :
312 					((s.rgam_lut_mode == 2) ? "Ycc" :
313 					((s.rgam_lut_mode == 3) ? "RAM" :
314 					((s.rgam_lut_mode == 4) ? "RAM" :
315 								 "Unknown")))),
316 				s.gamut_remap_mode,
317 				s.gamut_remap_c11_c12,
318 				s.gamut_remap_c13_c14,
319 				s.gamut_remap_c21_c22,
320 				s.gamut_remap_c23_c24,
321 				s.gamut_remap_c31_c32,
322 				s.gamut_remap_c33_c34);
323 		DTN_INFO("\n");
324 	}
325 	DTN_INFO("\n");
326 
327 	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
328 	for (i = 0; i < pool->pipe_count; i++) {
329 		struct mpcc_state s = {0};
330 
331 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
332 		if (s.opp_id != 0xf)
333 			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
334 				i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
335 				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
336 				s.idle);
337 	}
338 	DTN_INFO("\n");
339 
340 	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n");
341 
342 	for (i = 0; i < pool->timing_generator_count; i++) {
343 		struct timing_generator *tg = pool->timing_generators[i];
344 		struct dcn_otg_state s = {0};
345 		/* Read shared OTG state registers for all DCNx */
346 		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
347 
348 		/*
349 		 * For DCN2 and greater, a register on the OPP is used to
350 		 * determine if the CRTC is blanked instead of the OTG. So use
351 		 * dpg_is_blanked() if exists, otherwise fallback on otg.
352 		 *
353 		 * TODO: Implement DCN-specific read_otg_state hooks.
354 		 */
355 		if (pool->opps[i]->funcs->dpg_is_blanked)
356 			s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
357 		else
358 			s.blank_enabled = tg->funcs->is_blanked(tg);
359 
360 		//only print if OTG master is enabled
361 		if ((s.otg_enabled & 1) == 0)
362 			continue;
363 
364 		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d  %9d %8d\n",
365 				tg->inst,
366 				s.v_blank_start,
367 				s.v_blank_end,
368 				s.v_sync_a_start,
369 				s.v_sync_a_end,
370 				s.v_sync_a_pol,
371 				s.v_total_max,
372 				s.v_total_min,
373 				s.v_total_max_sel,
374 				s.v_total_min_sel,
375 				s.h_blank_start,
376 				s.h_blank_end,
377 				s.h_sync_a_start,
378 				s.h_sync_a_end,
379 				s.h_sync_a_pol,
380 				s.h_total,
381 				s.v_total,
382 				s.underflow_occurred_status,
383 				s.blank_enabled);
384 
385 		// Clear underflow for debug purposes
386 		// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
387 		// This function is called only from Windows or Diags test environment, hence it's safe to clear
388 		// it from here without affecting the original intent.
389 		tg->funcs->clear_optc_underflow(tg);
390 	}
391 	DTN_INFO("\n");
392 
393 	// dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
394 	// TODO: Update golden log header to reflect this name change
395 	DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");
396 	for (i = 0; i < pool->res_cap->num_dsc; i++) {
397 		struct display_stream_compressor *dsc = pool->dscs[i];
398 		struct dcn_dsc_state s = {0};
399 
400 		dsc->funcs->dsc_read_state(dsc, &s);
401 		DTN_INFO("[%d]: %-9d %-12d %-10d\n",
402 		dsc->inst,
403 			s.dsc_clock_en,
404 			s.dsc_slice_width,
405 			s.dsc_bits_per_pixel);
406 		DTN_INFO("\n");
407 	}
408 	DTN_INFO("\n");
409 
410 	DTN_INFO("S_ENC: DSC_MODE  SEC_GSP7_LINE_NUM"
411 			"  VBID6_LINE_REFERENCE  VBID6_LINE_NUM  SEC_GSP7_ENABLE  SEC_STREAM_ENABLE\n");
412 	for (i = 0; i < pool->stream_enc_count; i++) {
413 		struct stream_encoder *enc = pool->stream_enc[i];
414 		struct enc_state s = {0};
415 
416 		if (enc->funcs->enc_read_state) {
417 			enc->funcs->enc_read_state(enc, &s);
418 			DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
419 				enc->id,
420 				s.dsc_mode,
421 				s.sec_gsp_pps_line_num,
422 				s.vbid6_line_reference,
423 				s.vbid6_line_num,
424 				s.sec_gsp_pps_enable,
425 				s.sec_stream_enable);
426 			DTN_INFO("\n");
427 		}
428 	}
429 	DTN_INFO("\n");
430 
431 	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS  DP_LINK_TRAINING_COMPLETE\n");
432 	for (i = 0; i < dc->link_count; i++) {
433 		struct link_encoder *lenc = dc->links[i]->link_enc;
434 
435 		struct link_enc_state s = {0};
436 
437 		if (lenc->funcs->read_state) {
438 			lenc->funcs->read_state(lenc, &s);
439 			DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
440 				i,
441 				s.dphy_fec_en,
442 				s.dphy_fec_ready_shadow,
443 				s.dphy_fec_active_status,
444 				s.dp_link_training_complete);
445 			DTN_INFO("\n");
446 		}
447 	}
448 	DTN_INFO("\n");
449 
450 	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
451 		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
452 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
453 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
454 			dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
455 			dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
456 			dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
457 			dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
458 			dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
459 
460 	log_mpc_crc(dc, log_ctx);
461 
462 	DTN_INFO_END();
463 }
464 
dcn10_did_underflow_occur(struct dc * dc,struct pipe_ctx * pipe_ctx)465 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
466 {
467 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
468 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
469 
470 	if (tg->funcs->is_optc_underflow_occurred(tg)) {
471 		tg->funcs->clear_optc_underflow(tg);
472 		return true;
473 	}
474 
475 	if (hubp->funcs->hubp_get_underflow_status(hubp)) {
476 		hubp->funcs->hubp_clear_underflow(hubp);
477 		return true;
478 	}
479 	return false;
480 }
481 
dcn10_enable_power_gating_plane(struct dce_hwseq * hws,bool enable)482 void dcn10_enable_power_gating_plane(
483 	struct dce_hwseq *hws,
484 	bool enable)
485 {
486 	bool force_on = true; /* disable power gating */
487 
488 	if (enable)
489 		force_on = false;
490 
491 	/* DCHUBP0/1/2/3 */
492 	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
493 	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
494 	REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
495 	REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
496 
497 	/* DPP0/1/2/3 */
498 	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
499 	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
500 	REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
501 	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
502 }
503 
dcn10_disable_vga(struct dce_hwseq * hws)504 void dcn10_disable_vga(
505 	struct dce_hwseq *hws)
506 {
507 	unsigned int in_vga1_mode = 0;
508 	unsigned int in_vga2_mode = 0;
509 	unsigned int in_vga3_mode = 0;
510 	unsigned int in_vga4_mode = 0;
511 
512 	REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
513 	REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
514 	REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
515 	REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
516 
517 	if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
518 			in_vga3_mode == 0 && in_vga4_mode == 0)
519 		return;
520 
521 	REG_WRITE(D1VGA_CONTROL, 0);
522 	REG_WRITE(D2VGA_CONTROL, 0);
523 	REG_WRITE(D3VGA_CONTROL, 0);
524 	REG_WRITE(D4VGA_CONTROL, 0);
525 
526 	/* HW Engineer's Notes:
527 	 *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
528 	 *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
529 	 *
530 	 *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
531 	 *  VGA_TEST_ENABLE, to leave it in the same state as before.
532 	 */
533 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
534 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
535 }
536 
dcn10_dpp_pg_control(struct dce_hwseq * hws,unsigned int dpp_inst,bool power_on)537 void dcn10_dpp_pg_control(
538 		struct dce_hwseq *hws,
539 		unsigned int dpp_inst,
540 		bool power_on)
541 {
542 	uint32_t power_gate = power_on ? 0 : 1;
543 	uint32_t pwr_status = power_on ? 0 : 2;
544 
545 	if (hws->ctx->dc->debug.disable_dpp_power_gate)
546 		return;
547 	if (REG(DOMAIN1_PG_CONFIG) == 0)
548 		return;
549 
550 	switch (dpp_inst) {
551 	case 0: /* DPP0 */
552 		REG_UPDATE(DOMAIN1_PG_CONFIG,
553 				DOMAIN1_POWER_GATE, power_gate);
554 
555 		REG_WAIT(DOMAIN1_PG_STATUS,
556 				DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
557 				1, 1000);
558 		break;
559 	case 1: /* DPP1 */
560 		REG_UPDATE(DOMAIN3_PG_CONFIG,
561 				DOMAIN3_POWER_GATE, power_gate);
562 
563 		REG_WAIT(DOMAIN3_PG_STATUS,
564 				DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
565 				1, 1000);
566 		break;
567 	case 2: /* DPP2 */
568 		REG_UPDATE(DOMAIN5_PG_CONFIG,
569 				DOMAIN5_POWER_GATE, power_gate);
570 
571 		REG_WAIT(DOMAIN5_PG_STATUS,
572 				DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
573 				1, 1000);
574 		break;
575 	case 3: /* DPP3 */
576 		REG_UPDATE(DOMAIN7_PG_CONFIG,
577 				DOMAIN7_POWER_GATE, power_gate);
578 
579 		REG_WAIT(DOMAIN7_PG_STATUS,
580 				DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
581 				1, 1000);
582 		break;
583 	default:
584 		BREAK_TO_DEBUGGER();
585 		break;
586 	}
587 }
588 
dcn10_hubp_pg_control(struct dce_hwseq * hws,unsigned int hubp_inst,bool power_on)589 void dcn10_hubp_pg_control(
590 		struct dce_hwseq *hws,
591 		unsigned int hubp_inst,
592 		bool power_on)
593 {
594 	uint32_t power_gate = power_on ? 0 : 1;
595 	uint32_t pwr_status = power_on ? 0 : 2;
596 
597 	if (hws->ctx->dc->debug.disable_hubp_power_gate)
598 		return;
599 	if (REG(DOMAIN0_PG_CONFIG) == 0)
600 		return;
601 
602 	switch (hubp_inst) {
603 	case 0: /* DCHUBP0 */
604 		REG_UPDATE(DOMAIN0_PG_CONFIG,
605 				DOMAIN0_POWER_GATE, power_gate);
606 
607 		REG_WAIT(DOMAIN0_PG_STATUS,
608 				DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
609 				1, 1000);
610 		break;
611 	case 1: /* DCHUBP1 */
612 		REG_UPDATE(DOMAIN2_PG_CONFIG,
613 				DOMAIN2_POWER_GATE, power_gate);
614 
615 		REG_WAIT(DOMAIN2_PG_STATUS,
616 				DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
617 				1, 1000);
618 		break;
619 	case 2: /* DCHUBP2 */
620 		REG_UPDATE(DOMAIN4_PG_CONFIG,
621 				DOMAIN4_POWER_GATE, power_gate);
622 
623 		REG_WAIT(DOMAIN4_PG_STATUS,
624 				DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
625 				1, 1000);
626 		break;
627 	case 3: /* DCHUBP3 */
628 		REG_UPDATE(DOMAIN6_PG_CONFIG,
629 				DOMAIN6_POWER_GATE, power_gate);
630 
631 		REG_WAIT(DOMAIN6_PG_STATUS,
632 				DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
633 				1, 1000);
634 		break;
635 	default:
636 		BREAK_TO_DEBUGGER();
637 		break;
638 	}
639 }
640 
power_on_plane(struct dce_hwseq * hws,int plane_id)641 static void power_on_plane(
642 	struct dce_hwseq *hws,
643 	int plane_id)
644 {
645 	DC_LOGGER_INIT(hws->ctx->logger);
646 	if (REG(DC_IP_REQUEST_CNTL)) {
647 		REG_SET(DC_IP_REQUEST_CNTL, 0,
648 				IP_REQUEST_EN, 1);
649 
650 		if (hws->funcs.dpp_pg_control)
651 			hws->funcs.dpp_pg_control(hws, plane_id, true);
652 
653 		if (hws->funcs.hubp_pg_control)
654 			hws->funcs.hubp_pg_control(hws, plane_id, true);
655 
656 		REG_SET(DC_IP_REQUEST_CNTL, 0,
657 				IP_REQUEST_EN, 0);
658 		DC_LOG_DEBUG(
659 				"Un-gated front end for pipe %d\n", plane_id);
660 	}
661 }
662 
undo_DEGVIDCN10_253_wa(struct dc * dc)663 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
664 {
665 	struct dce_hwseq *hws = dc->hwseq;
666 	struct hubp *hubp = dc->res_pool->hubps[0];
667 
668 	if (!hws->wa_state.DEGVIDCN10_253_applied)
669 		return;
670 
671 	hubp->funcs->set_blank(hubp, true);
672 
673 	REG_SET(DC_IP_REQUEST_CNTL, 0,
674 			IP_REQUEST_EN, 1);
675 
676 	hws->funcs.hubp_pg_control(hws, 0, false);
677 	REG_SET(DC_IP_REQUEST_CNTL, 0,
678 			IP_REQUEST_EN, 0);
679 
680 	hws->wa_state.DEGVIDCN10_253_applied = false;
681 }
682 
apply_DEGVIDCN10_253_wa(struct dc * dc)683 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
684 {
685 	struct dce_hwseq *hws = dc->hwseq;
686 	struct hubp *hubp = dc->res_pool->hubps[0];
687 	int i;
688 
689 	if (dc->debug.disable_stutter)
690 		return;
691 
692 	if (!hws->wa.DEGVIDCN10_253)
693 		return;
694 
695 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
696 		if (!dc->res_pool->hubps[i]->power_gated)
697 			return;
698 	}
699 
700 	/* all pipe power gated, apply work around to enable stutter. */
701 
702 	REG_SET(DC_IP_REQUEST_CNTL, 0,
703 			IP_REQUEST_EN, 1);
704 
705 	hws->funcs.hubp_pg_control(hws, 0, true);
706 	REG_SET(DC_IP_REQUEST_CNTL, 0,
707 			IP_REQUEST_EN, 0);
708 
709 	hubp->funcs->set_hubp_blank_en(hubp, false);
710 	hws->wa_state.DEGVIDCN10_253_applied = true;
711 }
712 
dcn10_bios_golden_init(struct dc * dc)713 void dcn10_bios_golden_init(struct dc *dc)
714 {
715 	struct dce_hwseq *hws = dc->hwseq;
716 	struct dc_bios *bp = dc->ctx->dc_bios;
717 	int i;
718 	bool allow_self_fresh_force_enable = true;
719 
720 	if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
721 		return;
722 
723 	if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
724 		allow_self_fresh_force_enable =
725 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
726 
727 
728 	/* WA for making DF sleep when idle after resume from S0i3.
729 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
730 	 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
731 	 * before calling command table and it changed to 1 after,
732 	 * it should be set back to 0.
733 	 */
734 
735 	/* initialize dcn global */
736 	bp->funcs->enable_disp_power_gating(bp,
737 			CONTROLLER_ID_D0, ASIC_PIPE_INIT);
738 
739 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
740 		/* initialize dcn per pipe */
741 		bp->funcs->enable_disp_power_gating(bp,
742 				CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
743 	}
744 
745 	if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
746 		if (allow_self_fresh_force_enable == false &&
747 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
748 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
749 										!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
750 
751 }
752 
false_optc_underflow_wa(struct dc * dc,const struct dc_stream_state * stream,struct timing_generator * tg)753 static void false_optc_underflow_wa(
754 		struct dc *dc,
755 		const struct dc_stream_state *stream,
756 		struct timing_generator *tg)
757 {
758 	int i;
759 	bool underflow;
760 
761 	if (!dc->hwseq->wa.false_optc_underflow)
762 		return;
763 
764 	underflow = tg->funcs->is_optc_underflow_occurred(tg);
765 
766 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
767 		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
768 
769 		if (old_pipe_ctx->stream != stream)
770 			continue;
771 
772 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
773 	}
774 
775 	if (tg->funcs->set_blank_data_double_buffer)
776 		tg->funcs->set_blank_data_double_buffer(tg, true);
777 
778 	if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
779 		tg->funcs->clear_optc_underflow(tg);
780 }
781 
dcn10_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)782 enum dc_status dcn10_enable_stream_timing(
783 		struct pipe_ctx *pipe_ctx,
784 		struct dc_state *context,
785 		struct dc *dc)
786 {
787 	struct dc_stream_state *stream = pipe_ctx->stream;
788 	enum dc_color_space color_space;
789 	struct tg_color black_color = {0};
790 
791 	/* by upper caller loop, pipe0 is parent pipe and be called first.
792 	 * back end is set up by for pipe0. Other children pipe share back end
793 	 * with pipe 0. No program is needed.
794 	 */
795 	if (pipe_ctx->top_pipe != NULL)
796 		return DC_OK;
797 
798 	/* TODO check if timing_changed, disable stream if timing changed */
799 
800 	/* HW program guide assume display already disable
801 	 * by unplug sequence. OTG assume stop.
802 	 */
803 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
804 
805 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
806 			pipe_ctx->clock_source,
807 			&pipe_ctx->stream_res.pix_clk_params,
808 			&pipe_ctx->pll_settings)) {
809 		BREAK_TO_DEBUGGER();
810 		return DC_ERROR_UNEXPECTED;
811 	}
812 
813 	pipe_ctx->stream_res.tg->funcs->program_timing(
814 			pipe_ctx->stream_res.tg,
815 			&stream->timing,
816 			pipe_ctx->pipe_dlg_param.vready_offset,
817 			pipe_ctx->pipe_dlg_param.vstartup_start,
818 			pipe_ctx->pipe_dlg_param.vupdate_offset,
819 			pipe_ctx->pipe_dlg_param.vupdate_width,
820 			pipe_ctx->stream->signal,
821 			true);
822 
823 #if 0 /* move to after enable_crtc */
824 	/* TODO: OPP FMT, ABM. etc. should be done here. */
825 	/* or FPGA now. instance 0 only. TODO: move to opp.c */
826 
827 	inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
828 
829 	pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
830 				pipe_ctx->stream_res.opp,
831 				&stream->bit_depth_params,
832 				&stream->clamping);
833 #endif
834 	/* program otg blank color */
835 	color_space = stream->output_color_space;
836 	color_space_to_black_color(dc, color_space, &black_color);
837 
838 	/*
839 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
840 	 * alternate between Cb and Cr, so both channels need the pixel
841 	 * value for Y
842 	 */
843 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
844 		black_color.color_r_cr = black_color.color_g_y;
845 
846 	if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
847 		pipe_ctx->stream_res.tg->funcs->set_blank_color(
848 				pipe_ctx->stream_res.tg,
849 				&black_color);
850 
851 	if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
852 			!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
853 		pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
854 		hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
855 		false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
856 	}
857 
858 	/* VTG is  within DCHUB command block. DCFCLK is always on */
859 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
860 		BREAK_TO_DEBUGGER();
861 		return DC_ERROR_UNEXPECTED;
862 	}
863 
864 	/* TODO program crtc source select for non-virtual signal*/
865 	/* TODO program FMT */
866 	/* TODO setup link_enc */
867 	/* TODO set stream attributes */
868 	/* TODO program audio */
869 	/* TODO enable stream if timing changed */
870 	/* TODO unblank stream if DP */
871 
872 	return DC_OK;
873 }
874 
dcn10_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)875 static void dcn10_reset_back_end_for_pipe(
876 		struct dc *dc,
877 		struct pipe_ctx *pipe_ctx,
878 		struct dc_state *context)
879 {
880 	int i;
881 	struct dc_link *link;
882 	DC_LOGGER_INIT(dc->ctx->logger);
883 	if (pipe_ctx->stream_res.stream_enc == NULL) {
884 		pipe_ctx->stream = NULL;
885 		return;
886 	}
887 
888 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
889 		link = pipe_ctx->stream->link;
890 		/* DPMS may already disable or */
891 		/* dpms_off status is incorrect due to fastboot
892 		 * feature. When system resume from S4 with second
893 		 * screen only, the dpms_off would be true but
894 		 * VBIOS lit up eDP, so check link status too.
895 		 */
896 		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
897 			core_link_disable_stream(pipe_ctx);
898 		else if (pipe_ctx->stream_res.audio)
899 			dc->hwss.disable_audio_stream(pipe_ctx);
900 
901 		if (pipe_ctx->stream_res.audio) {
902 			/*disable az_endpoint*/
903 			pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
904 
905 			/*free audio*/
906 			if (dc->caps.dynamic_audio == true) {
907 				/*we have to dynamic arbitrate the audio endpoints*/
908 				/*we free the resource, need reset is_audio_acquired*/
909 				update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
910 						pipe_ctx->stream_res.audio, false);
911 				pipe_ctx->stream_res.audio = NULL;
912 			}
913 		}
914 	}
915 
916 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
917 	 * back end share by all pipes and will be disable only when disable
918 	 * parent pipe.
919 	 */
920 	if (pipe_ctx->top_pipe == NULL) {
921 
922 		if (pipe_ctx->stream_res.abm)
923 			dc->hwss.set_abm_immediate_disable(pipe_ctx);
924 
925 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
926 
927 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
928 		if (pipe_ctx->stream_res.tg->funcs->set_drr)
929 			pipe_ctx->stream_res.tg->funcs->set_drr(
930 					pipe_ctx->stream_res.tg, NULL);
931 	}
932 
933 	for (i = 0; i < dc->res_pool->pipe_count; i++)
934 		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
935 			break;
936 
937 	if (i == dc->res_pool->pipe_count)
938 		return;
939 
940 	pipe_ctx->stream = NULL;
941 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
942 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
943 }
944 
dcn10_hw_wa_force_recovery(struct dc * dc)945 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
946 {
947 	struct hubp *hubp ;
948 	unsigned int i;
949 	bool need_recover = true;
950 
951 	if (!dc->debug.recovery_enabled)
952 		return false;
953 
954 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
955 		struct pipe_ctx *pipe_ctx =
956 			&dc->current_state->res_ctx.pipe_ctx[i];
957 		if (pipe_ctx != NULL) {
958 			hubp = pipe_ctx->plane_res.hubp;
959 			if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
960 				if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
961 					/* one pipe underflow, we will reset all the pipes*/
962 					need_recover = true;
963 				}
964 			}
965 		}
966 	}
967 	if (!need_recover)
968 		return false;
969 	/*
970 	DCHUBP_CNTL:HUBP_BLANK_EN=1
971 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
972 	DCHUBP_CNTL:HUBP_DISABLE=1
973 	DCHUBP_CNTL:HUBP_DISABLE=0
974 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
975 	DCSURF_PRIMARY_SURFACE_ADDRESS
976 	DCHUBP_CNTL:HUBP_BLANK_EN=0
977 	*/
978 
979 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
980 		struct pipe_ctx *pipe_ctx =
981 			&dc->current_state->res_ctx.pipe_ctx[i];
982 		if (pipe_ctx != NULL) {
983 			hubp = pipe_ctx->plane_res.hubp;
984 			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
985 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
986 				hubp->funcs->set_hubp_blank_en(hubp, true);
987 		}
988 	}
989 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
990 	hubbub1_soft_reset(dc->res_pool->hubbub, true);
991 
992 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
993 		struct pipe_ctx *pipe_ctx =
994 			&dc->current_state->res_ctx.pipe_ctx[i];
995 		if (pipe_ctx != NULL) {
996 			hubp = pipe_ctx->plane_res.hubp;
997 			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
998 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
999 				hubp->funcs->hubp_disable_control(hubp, true);
1000 		}
1001 	}
1002 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1003 		struct pipe_ctx *pipe_ctx =
1004 			&dc->current_state->res_ctx.pipe_ctx[i];
1005 		if (pipe_ctx != NULL) {
1006 			hubp = pipe_ctx->plane_res.hubp;
1007 			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
1008 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1009 				hubp->funcs->hubp_disable_control(hubp, true);
1010 		}
1011 	}
1012 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1013 	hubbub1_soft_reset(dc->res_pool->hubbub, false);
1014 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1015 		struct pipe_ctx *pipe_ctx =
1016 			&dc->current_state->res_ctx.pipe_ctx[i];
1017 		if (pipe_ctx != NULL) {
1018 			hubp = pipe_ctx->plane_res.hubp;
1019 			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1020 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1021 				hubp->funcs->set_hubp_blank_en(hubp, true);
1022 		}
1023 	}
1024 	return true;
1025 
1026 }
1027 
1028 
dcn10_verify_allow_pstate_change_high(struct dc * dc)1029 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1030 {
1031 	static bool should_log_hw_state; /* prevent hw state log by default */
1032 
1033 	if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
1034 		if (should_log_hw_state) {
1035 			dcn10_log_hw_state(dc, NULL);
1036 		}
1037 		BREAK_TO_DEBUGGER();
1038 		if (dcn10_hw_wa_force_recovery(dc)) {
1039 		/*check again*/
1040 			if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
1041 				BREAK_TO_DEBUGGER();
1042 		}
1043 	}
1044 }
1045 
1046 /* trigger HW to start disconnect plane from stream on the next vsync */
dcn10_plane_atomic_disconnect(struct dc * dc,struct pipe_ctx * pipe_ctx)1047 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1048 {
1049 	struct dce_hwseq *hws = dc->hwseq;
1050 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1051 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
1052 	struct mpc *mpc = dc->res_pool->mpc;
1053 	struct mpc_tree *mpc_tree_params;
1054 	struct mpcc *mpcc_to_remove = NULL;
1055 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1056 
1057 	mpc_tree_params = &(opp->mpc_tree_params);
1058 	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1059 
1060 	/*Already reset*/
1061 	if (mpcc_to_remove == NULL)
1062 		return;
1063 
1064 	mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1065 	if (opp != NULL)
1066 		opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1067 
1068 	dc->optimized_required = true;
1069 
1070 	if (hubp->funcs->hubp_disconnect)
1071 		hubp->funcs->hubp_disconnect(hubp);
1072 
1073 	if (dc->debug.sanity_checks)
1074 		hws->funcs.verify_allow_pstate_change_high(dc);
1075 }
1076 
dcn10_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)1077 void dcn10_plane_atomic_power_down(struct dc *dc,
1078 		struct dpp *dpp,
1079 		struct hubp *hubp)
1080 {
1081 	struct dce_hwseq *hws = dc->hwseq;
1082 	DC_LOGGER_INIT(dc->ctx->logger);
1083 
1084 	if (REG(DC_IP_REQUEST_CNTL)) {
1085 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1086 				IP_REQUEST_EN, 1);
1087 
1088 		if (hws->funcs.dpp_pg_control)
1089 			hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1090 
1091 		if (hws->funcs.hubp_pg_control)
1092 			hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1093 
1094 		dpp->funcs->dpp_reset(dpp);
1095 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1096 				IP_REQUEST_EN, 0);
1097 		DC_LOG_DEBUG(
1098 				"Power gated front end %d\n", hubp->inst);
1099 	}
1100 }
1101 
1102 /* disable HW used by plane.
1103  * note:  cannot disable until disconnect is complete
1104  */
dcn10_plane_atomic_disable(struct dc * dc,struct pipe_ctx * pipe_ctx)1105 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1106 {
1107 	struct dce_hwseq *hws = dc->hwseq;
1108 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1109 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1110 	int opp_id = hubp->opp_id;
1111 
1112 	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1113 
1114 	hubp->funcs->hubp_clk_cntl(hubp, false);
1115 
1116 	dpp->funcs->dpp_dppclk_control(dpp, false, false);
1117 
1118 	if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1119 		pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1120 				pipe_ctx->stream_res.opp,
1121 				false);
1122 
1123 	hubp->power_gated = true;
1124 	dc->optimized_required = false; /* We're powering off, no need to optimize */
1125 
1126 	hws->funcs.plane_atomic_power_down(dc,
1127 			pipe_ctx->plane_res.dpp,
1128 			pipe_ctx->plane_res.hubp);
1129 
1130 	pipe_ctx->stream = NULL;
1131 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1132 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1133 	pipe_ctx->top_pipe = NULL;
1134 	pipe_ctx->bottom_pipe = NULL;
1135 	pipe_ctx->plane_state = NULL;
1136 }
1137 
dcn10_disable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx)1138 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1139 {
1140 	struct dce_hwseq *hws = dc->hwseq;
1141 	DC_LOGGER_INIT(dc->ctx->logger);
1142 
1143 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1144 		return;
1145 
1146 	hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1147 
1148 	apply_DEGVIDCN10_253_wa(dc);
1149 
1150 	DC_LOG_DC("Power down front end %d\n",
1151 					pipe_ctx->pipe_idx);
1152 }
1153 
dcn10_init_pipes(struct dc * dc,struct dc_state * context)1154 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1155 {
1156 	int i;
1157 	struct dce_hwseq *hws = dc->hwseq;
1158 	bool can_apply_seamless_boot = false;
1159 
1160 	for (i = 0; i < context->stream_count; i++) {
1161 		if (context->streams[i]->apply_seamless_boot_optimization) {
1162 			can_apply_seamless_boot = true;
1163 			break;
1164 		}
1165 	}
1166 
1167 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1168 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1169 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1170 
1171 		/* There is assumption that pipe_ctx is not mapping irregularly
1172 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1173 		 * we will use the pipe, so don't disable
1174 		 */
1175 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1176 			continue;
1177 
1178 		/* Blank controller using driver code instead of
1179 		 * command table.
1180 		 */
1181 		if (tg->funcs->is_tg_enabled(tg)) {
1182 			if (hws->funcs.init_blank != NULL) {
1183 				hws->funcs.init_blank(dc, tg);
1184 				tg->funcs->lock(tg);
1185 			} else {
1186 				tg->funcs->lock(tg);
1187 				tg->funcs->set_blank(tg, true);
1188 				hwss_wait_for_blank_complete(tg);
1189 			}
1190 		}
1191 	}
1192 
1193 	/* num_opp will be equal to number of mpcc */
1194 	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1195 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1196 
1197 		/* Cannot reset the MPC mux if seamless boot */
1198 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1199 			continue;
1200 
1201 		dc->res_pool->mpc->funcs->mpc_init_single_inst(
1202 				dc->res_pool->mpc, i);
1203 	}
1204 
1205 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1206 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1207 		struct hubp *hubp = dc->res_pool->hubps[i];
1208 		struct dpp *dpp = dc->res_pool->dpps[i];
1209 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1210 
1211 		/* There is assumption that pipe_ctx is not mapping irregularly
1212 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1213 		 * we will use the pipe, so don't disable
1214 		 */
1215 		if (can_apply_seamless_boot &&
1216 			pipe_ctx->stream != NULL &&
1217 			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1218 				pipe_ctx->stream_res.tg)) {
1219 			// Enable double buffering for OTG_BLANK no matter if
1220 			// seamless boot is enabled or not to suppress global sync
1221 			// signals when OTG blanked. This is to prevent pipe from
1222 			// requesting data while in PSR.
1223 			tg->funcs->tg_init(tg);
1224 			continue;
1225 		}
1226 
1227 		/* Disable on the current state so the new one isn't cleared. */
1228 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1229 
1230 		dpp->funcs->dpp_reset(dpp);
1231 
1232 		pipe_ctx->stream_res.tg = tg;
1233 		pipe_ctx->pipe_idx = i;
1234 
1235 		pipe_ctx->plane_res.hubp = hubp;
1236 		pipe_ctx->plane_res.dpp = dpp;
1237 		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1238 		hubp->mpcc_id = dpp->inst;
1239 		hubp->opp_id = OPP_ID_INVALID;
1240 		hubp->power_gated = false;
1241 
1242 		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1243 		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1244 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1245 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1246 
1247 		hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1248 
1249 		if (tg->funcs->is_tg_enabled(tg))
1250 			tg->funcs->unlock(tg);
1251 
1252 		dc->hwss.disable_plane(dc, pipe_ctx);
1253 
1254 		pipe_ctx->stream_res.tg = NULL;
1255 		pipe_ctx->plane_res.hubp = NULL;
1256 
1257 		tg->funcs->tg_init(tg);
1258 	}
1259 }
1260 
dcn10_init_hw(struct dc * dc)1261 void dcn10_init_hw(struct dc *dc)
1262 {
1263 	int i, j;
1264 	struct abm *abm = dc->res_pool->abm;
1265 	struct dmcu *dmcu = dc->res_pool->dmcu;
1266 	struct dce_hwseq *hws = dc->hwseq;
1267 	struct dc_bios *dcb = dc->ctx->dc_bios;
1268 	struct resource_pool *res_pool = dc->res_pool;
1269 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1270 	bool   is_optimized_init_done = false;
1271 
1272 	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1273 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1274 
1275 	// Initialize the dccg
1276 	if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1277 		dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1278 
1279 	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1280 
1281 		REG_WRITE(REFCLK_CNTL, 0);
1282 		REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1283 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1284 
1285 		if (!dc->debug.disable_clock_gate) {
1286 			/* enable all DCN clock gating */
1287 			REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1288 
1289 			REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1290 
1291 			REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1292 		}
1293 
1294 		//Enable ability to power gate / don't force power on permanently
1295 		if (hws->funcs.enable_power_gating_plane)
1296 			hws->funcs.enable_power_gating_plane(hws, true);
1297 
1298 		return;
1299 	}
1300 
1301 	if (!dcb->funcs->is_accelerated_mode(dcb))
1302 		hws->funcs.disable_vga(dc->hwseq);
1303 
1304 	hws->funcs.bios_golden_init(dc);
1305 
1306 	if (dc->ctx->dc_bios->fw_info_valid) {
1307 		res_pool->ref_clocks.xtalin_clock_inKhz =
1308 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1309 
1310 		if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1311 			if (res_pool->dccg && res_pool->hubbub) {
1312 
1313 				(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1314 						dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1315 						&res_pool->ref_clocks.dccg_ref_clock_inKhz);
1316 
1317 				(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1318 						res_pool->ref_clocks.dccg_ref_clock_inKhz,
1319 						&res_pool->ref_clocks.dchub_ref_clock_inKhz);
1320 			} else {
1321 				// Not all ASICs have DCCG sw component
1322 				res_pool->ref_clocks.dccg_ref_clock_inKhz =
1323 						res_pool->ref_clocks.xtalin_clock_inKhz;
1324 				res_pool->ref_clocks.dchub_ref_clock_inKhz =
1325 						res_pool->ref_clocks.xtalin_clock_inKhz;
1326 			}
1327 		}
1328 	} else
1329 		ASSERT_CRITICAL(false);
1330 
1331 	for (i = 0; i < dc->link_count; i++) {
1332 		/* Power up AND update implementation according to the
1333 		 * required signal (which may be different from the
1334 		 * default signal on connector).
1335 		 */
1336 		struct dc_link *link = dc->links[i];
1337 
1338 		if (!is_optimized_init_done)
1339 			link->link_enc->funcs->hw_init(link->link_enc);
1340 
1341 		/* Check for enabled DIG to identify enabled display */
1342 		if (link->link_enc->funcs->is_dig_enabled &&
1343 			link->link_enc->funcs->is_dig_enabled(link->link_enc))
1344 			link->link_status.link_active = true;
1345 	}
1346 
1347 	/* Power gate DSCs */
1348 	if (!is_optimized_init_done) {
1349 		for (i = 0; i < res_pool->res_cap->num_dsc; i++)
1350 			if (hws->funcs.dsc_pg_control != NULL)
1351 				hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
1352 	}
1353 
1354 	/* we want to turn off all dp displays before doing detection */
1355 	if (dc->config.power_down_display_on_boot) {
1356 		uint8_t dpcd_power_state = '\0';
1357 		enum dc_status status = DC_ERROR_UNEXPECTED;
1358 
1359 		for (i = 0; i < dc->link_count; i++) {
1360 			if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
1361 				continue;
1362 
1363 			/*
1364 			 * If any of the displays are lit up turn them off.
1365 			 * The reason is that some MST hubs cannot be turned off
1366 			 * completely until we tell them to do so.
1367 			 * If not turned off, then displays connected to MST hub
1368 			 * won't light up.
1369 			 */
1370 			status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
1371 							&dpcd_power_state, sizeof(dpcd_power_state));
1372 			if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
1373 				/* blank dp stream before power off receiver*/
1374 				if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
1375 					unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
1376 
1377 					for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1378 						if (fe == dc->res_pool->stream_enc[j]->id) {
1379 							dc->res_pool->stream_enc[j]->funcs->dp_blank(
1380 										dc->res_pool->stream_enc[j]);
1381 							break;
1382 						}
1383 					}
1384 				}
1385 				dp_receiver_power_ctrl(dc->links[i], false);
1386 			}
1387 		}
1388 	}
1389 
1390 	/* If taking control over from VBIOS, we may want to optimize our first
1391 	 * mode set, so we need to skip powering down pipes until we know which
1392 	 * pipes we want to use.
1393 	 * Otherwise, if taking control is not possible, we need to power
1394 	 * everything down.
1395 	 */
1396 	if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
1397 		if (!is_optimized_init_done) {
1398 			hws->funcs.init_pipes(dc, dc->current_state);
1399 			if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1400 				dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1401 						!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1402 		}
1403 	}
1404 
1405 	if (!is_optimized_init_done) {
1406 
1407 		for (i = 0; i < res_pool->audio_count; i++) {
1408 			struct audio *audio = res_pool->audios[i];
1409 
1410 			audio->funcs->hw_init(audio);
1411 		}
1412 
1413 		for (i = 0; i < dc->link_count; i++) {
1414 			struct dc_link *link = dc->links[i];
1415 
1416 			if (link->panel_cntl)
1417 				backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1418 		}
1419 
1420 		if (abm != NULL)
1421 			abm->funcs->abm_init(abm, backlight);
1422 
1423 		if (dmcu != NULL && !dmcu->auto_load_dmcu)
1424 			dmcu->funcs->dmcu_init(dmcu);
1425 	}
1426 
1427 	if (abm != NULL && dmcu != NULL)
1428 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1429 
1430 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1431 	if (!is_optimized_init_done)
1432 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1433 
1434 	if (!dc->debug.disable_clock_gate) {
1435 		/* enable all DCN clock gating */
1436 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1437 
1438 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1439 
1440 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1441 	}
1442 	if (hws->funcs.enable_power_gating_plane)
1443 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1444 
1445 	if (dc->clk_mgr->funcs->notify_wm_ranges)
1446 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1447 
1448 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
1449 	if (dc->clk_mgr->funcs->set_hard_max_memclk)
1450 		dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
1451 #endif
1452 
1453 }
1454 
1455 /* In headless boot cases, DIG may be turned
1456  * on which causes HW/SW discrepancies.
1457  * To avoid this, power down hardware on boot
1458  * if DIG is turned on and seamless boot not enabled
1459  */
dcn10_power_down_on_boot(struct dc * dc)1460 void dcn10_power_down_on_boot(struct dc *dc)
1461 {
1462 	int i = 0;
1463 	struct dc_link *edp_link;
1464 
1465 	if (!dc->config.power_down_display_on_boot)
1466 		return;
1467 
1468 	edp_link = get_edp_link(dc);
1469 	if (edp_link &&
1470 			edp_link->link_enc->funcs->is_dig_enabled &&
1471 			edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1472 			dc->hwseq->funcs.edp_backlight_control &&
1473 			dc->hwss.power_down &&
1474 			dc->hwss.edp_power_control) {
1475 		dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1476 		dc->hwss.power_down(dc);
1477 		dc->hwss.edp_power_control(edp_link, false);
1478 	} else {
1479 		for (i = 0; i < dc->link_count; i++) {
1480 			struct dc_link *link = dc->links[i];
1481 
1482 			if (link->link_enc->funcs->is_dig_enabled &&
1483 					link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1484 					dc->hwss.power_down) {
1485 				dc->hwss.power_down(dc);
1486 				break;
1487 			}
1488 
1489 		}
1490 	}
1491 
1492 	/*
1493 	 * Call update_clocks with empty context
1494 	 * to send DISPLAY_OFF
1495 	 * Otherwise DISPLAY_OFF may not be asserted
1496 	 */
1497 	if (dc->clk_mgr->funcs->set_low_power_state)
1498 		dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1499 }
1500 
dcn10_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1501 void dcn10_reset_hw_ctx_wrap(
1502 		struct dc *dc,
1503 		struct dc_state *context)
1504 {
1505 	int i;
1506 	struct dce_hwseq *hws = dc->hwseq;
1507 
1508 	/* Reset Back End*/
1509 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1510 		struct pipe_ctx *pipe_ctx_old =
1511 			&dc->current_state->res_ctx.pipe_ctx[i];
1512 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1513 
1514 		if (!pipe_ctx_old->stream)
1515 			continue;
1516 
1517 		if (pipe_ctx_old->top_pipe)
1518 			continue;
1519 
1520 		if (!pipe_ctx->stream ||
1521 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1522 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1523 
1524 			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1525 			if (hws->funcs.enable_stream_gating)
1526 				hws->funcs.enable_stream_gating(dc, pipe_ctx);
1527 			if (old_clk)
1528 				old_clk->funcs->cs_power_down(old_clk);
1529 		}
1530 	}
1531 }
1532 
patch_address_for_sbs_tb_stereo(struct pipe_ctx * pipe_ctx,PHYSICAL_ADDRESS_LOC * addr)1533 static bool patch_address_for_sbs_tb_stereo(
1534 		struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1535 {
1536 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1537 	bool sec_split = pipe_ctx->top_pipe &&
1538 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1539 	if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1540 		(pipe_ctx->stream->timing.timing_3d_format ==
1541 		 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1542 		 pipe_ctx->stream->timing.timing_3d_format ==
1543 		 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1544 		*addr = plane_state->address.grph_stereo.left_addr;
1545 		plane_state->address.grph_stereo.left_addr =
1546 		plane_state->address.grph_stereo.right_addr;
1547 		return true;
1548 	} else {
1549 		if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1550 			plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1551 			plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1552 			plane_state->address.grph_stereo.right_addr =
1553 			plane_state->address.grph_stereo.left_addr;
1554 		}
1555 	}
1556 	return false;
1557 }
1558 
dcn10_update_plane_addr(const struct dc * dc,struct pipe_ctx * pipe_ctx)1559 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1560 {
1561 	bool addr_patched = false;
1562 	PHYSICAL_ADDRESS_LOC addr;
1563 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1564 
1565 	if (plane_state == NULL)
1566 		return;
1567 
1568 	addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1569 
1570 	pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1571 			pipe_ctx->plane_res.hubp,
1572 			&plane_state->address,
1573 			plane_state->flip_immediate);
1574 
1575 	plane_state->status.requested_address = plane_state->address;
1576 
1577 	if (plane_state->flip_immediate)
1578 		plane_state->status.current_address = plane_state->address;
1579 
1580 	if (addr_patched)
1581 		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1582 }
1583 
dcn10_set_input_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)1584 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1585 			const struct dc_plane_state *plane_state)
1586 {
1587 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1588 	const struct dc_transfer_func *tf = NULL;
1589 	bool result = true;
1590 
1591 	if (dpp_base == NULL)
1592 		return false;
1593 
1594 	if (plane_state->in_transfer_func)
1595 		tf = plane_state->in_transfer_func;
1596 
1597 	if (plane_state->gamma_correction &&
1598 		!dpp_base->ctx->dc->debug.always_use_regamma
1599 		&& !plane_state->gamma_correction->is_identity
1600 			&& dce_use_lut(plane_state->format))
1601 		dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1602 
1603 	if (tf == NULL)
1604 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1605 	else if (tf->type == TF_TYPE_PREDEFINED) {
1606 		switch (tf->tf) {
1607 		case TRANSFER_FUNCTION_SRGB:
1608 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1609 			break;
1610 		case TRANSFER_FUNCTION_BT709:
1611 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1612 			break;
1613 		case TRANSFER_FUNCTION_LINEAR:
1614 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1615 			break;
1616 		case TRANSFER_FUNCTION_PQ:
1617 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1618 			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1619 			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1620 			result = true;
1621 			break;
1622 		default:
1623 			result = false;
1624 			break;
1625 		}
1626 	} else if (tf->type == TF_TYPE_BYPASS) {
1627 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1628 	} else {
1629 		cm_helper_translate_curve_to_degamma_hw_format(tf,
1630 					&dpp_base->degamma_params);
1631 		dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1632 				&dpp_base->degamma_params);
1633 		result = true;
1634 	}
1635 
1636 	return result;
1637 }
1638 
1639 #define MAX_NUM_HW_POINTS 0x200
1640 
log_tf(struct dc_context * ctx,struct dc_transfer_func * tf,uint32_t hw_points_num)1641 static void log_tf(struct dc_context *ctx,
1642 				struct dc_transfer_func *tf, uint32_t hw_points_num)
1643 {
1644 	// DC_LOG_GAMMA is default logging of all hw points
1645 	// DC_LOG_ALL_GAMMA logs all points, not only hw points
1646 	// DC_LOG_ALL_TF_POINTS logs all channels of the tf
1647 	int i = 0;
1648 
1649 	DC_LOGGER_INIT(ctx->logger);
1650 	DC_LOG_GAMMA("Gamma Correction TF");
1651 	DC_LOG_ALL_GAMMA("Logging all tf points...");
1652 	DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1653 
1654 	for (i = 0; i < hw_points_num; i++) {
1655 		DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1656 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1657 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1658 	}
1659 
1660 	for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1661 		DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1662 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1663 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1664 	}
1665 }
1666 
dcn10_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)1667 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1668 				const struct dc_stream_state *stream)
1669 {
1670 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1671 
1672 	if (dpp == NULL)
1673 		return false;
1674 
1675 	dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1676 
1677 	if (stream->out_transfer_func &&
1678 	    stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1679 	    stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1680 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1681 
1682 	/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1683 	 * update.
1684 	 */
1685 	else if (cm_helper_translate_curve_to_hw_format(
1686 			stream->out_transfer_func,
1687 			&dpp->regamma_params, false)) {
1688 		dpp->funcs->dpp_program_regamma_pwl(
1689 				dpp,
1690 				&dpp->regamma_params, OPP_REGAMMA_USER);
1691 	} else
1692 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1693 
1694 	if (stream != NULL && stream->ctx != NULL &&
1695 			stream->out_transfer_func != NULL) {
1696 		log_tf(stream->ctx,
1697 				stream->out_transfer_func,
1698 				dpp->regamma_params.hw_points_num);
1699 	}
1700 
1701 	return true;
1702 }
1703 
dcn10_pipe_control_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1704 void dcn10_pipe_control_lock(
1705 	struct dc *dc,
1706 	struct pipe_ctx *pipe,
1707 	bool lock)
1708 {
1709 	struct dce_hwseq *hws = dc->hwseq;
1710 
1711 	/* use TG master update lock to lock everything on the TG
1712 	 * therefore only top pipe need to lock
1713 	 */
1714 	if (!pipe || pipe->top_pipe)
1715 		return;
1716 
1717 	if (dc->debug.sanity_checks)
1718 		hws->funcs.verify_allow_pstate_change_high(dc);
1719 
1720 	if (lock)
1721 		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1722 	else
1723 		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1724 
1725 	if (dc->debug.sanity_checks)
1726 		hws->funcs.verify_allow_pstate_change_high(dc);
1727 }
1728 
1729 /**
1730  * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1731  *
1732  * Software keepout workaround to prevent cursor update locking from stalling
1733  * out cursor updates indefinitely or from old values from being retained in
1734  * the case where the viewport changes in the same frame as the cursor.
1735  *
1736  * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1737  * too close to VUPDATE, then stall out until VUPDATE finishes.
1738  *
1739  * TODO: Optimize cursor programming to be once per frame before VUPDATE
1740  *       to avoid the need for this workaround.
1741  */
delay_cursor_until_vupdate(struct dc * dc,struct pipe_ctx * pipe_ctx)1742 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1743 {
1744 	struct dc_stream_state *stream = pipe_ctx->stream;
1745 	struct crtc_position position;
1746 	uint32_t vupdate_start, vupdate_end;
1747 	unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1748 	unsigned int us_per_line, us_vupdate;
1749 
1750 	if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1751 		return;
1752 
1753 	if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1754 		return;
1755 
1756 	dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1757 				       &vupdate_end);
1758 
1759 	dc->hwss.get_position(&pipe_ctx, 1, &position);
1760 	vpos = position.vertical_count;
1761 
1762 	/* Avoid wraparound calculation issues */
1763 	vupdate_start += stream->timing.v_total;
1764 	vupdate_end += stream->timing.v_total;
1765 	vpos += stream->timing.v_total;
1766 
1767 	if (vpos <= vupdate_start) {
1768 		/* VPOS is in VACTIVE or back porch. */
1769 		lines_to_vupdate = vupdate_start - vpos;
1770 	} else if (vpos > vupdate_end) {
1771 		/* VPOS is in the front porch. */
1772 		return;
1773 	} else {
1774 		/* VPOS is in VUPDATE. */
1775 		lines_to_vupdate = 0;
1776 	}
1777 
1778 	/* Calculate time until VUPDATE in microseconds. */
1779 	us_per_line =
1780 		stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1781 	us_to_vupdate = lines_to_vupdate * us_per_line;
1782 
1783 	/* 70 us is a conservative estimate of cursor update time*/
1784 	if (us_to_vupdate > 70)
1785 		return;
1786 
1787 	/* Stall out until the cursor update completes. */
1788 	if (vupdate_end < vupdate_start)
1789 		vupdate_end += stream->timing.v_total;
1790 	us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1791 	udelay(us_to_vupdate + us_vupdate);
1792 }
1793 
dcn10_cursor_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1794 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1795 {
1796 	/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1797 	if (!pipe || pipe->top_pipe)
1798 		return;
1799 
1800 	/* Prevent cursor lock from stalling out cursor updates. */
1801 	if (lock)
1802 		delay_cursor_until_vupdate(dc, pipe);
1803 
1804 	if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1805 		union dmub_hw_lock_flags hw_locks = { 0 };
1806 		struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1807 
1808 		hw_locks.bits.lock_cursor = 1;
1809 		inst_flags.opp_inst = pipe->stream_res.opp->inst;
1810 
1811 		dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1812 					lock,
1813 					&hw_locks,
1814 					&inst_flags);
1815 	} else
1816 		dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1817 				pipe->stream_res.opp->inst, lock);
1818 }
1819 
wait_for_reset_trigger_to_occur(struct dc_context * dc_ctx,struct timing_generator * tg)1820 static bool wait_for_reset_trigger_to_occur(
1821 	struct dc_context *dc_ctx,
1822 	struct timing_generator *tg)
1823 {
1824 	bool rc = false;
1825 
1826 	/* To avoid endless loop we wait at most
1827 	 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1828 	const uint32_t frames_to_wait_on_triggered_reset = 10;
1829 	int i;
1830 
1831 	for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1832 
1833 		if (!tg->funcs->is_counter_moving(tg)) {
1834 			DC_ERROR("TG counter is not moving!\n");
1835 			break;
1836 		}
1837 
1838 		if (tg->funcs->did_triggered_reset_occur(tg)) {
1839 			rc = true;
1840 			/* usually occurs at i=1 */
1841 			DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1842 					i);
1843 			break;
1844 		}
1845 
1846 		/* Wait for one frame. */
1847 		tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1848 		tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1849 	}
1850 
1851 	if (false == rc)
1852 		DC_ERROR("GSL: Timeout on reset trigger!\n");
1853 
1854 	return rc;
1855 }
1856 
dcn10_enable_timing_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])1857 void dcn10_enable_timing_synchronization(
1858 	struct dc *dc,
1859 	int group_index,
1860 	int group_size,
1861 	struct pipe_ctx *grouped_pipes[])
1862 {
1863 	struct dc_context *dc_ctx = dc->ctx;
1864 	int i;
1865 
1866 	DC_SYNC_INFO("Setting up OTG reset trigger\n");
1867 
1868 	for (i = 1; i < group_size; i++)
1869 		grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
1870 				grouped_pipes[i]->stream_res.tg,
1871 				grouped_pipes[0]->stream_res.tg->inst);
1872 
1873 	DC_SYNC_INFO("Waiting for trigger\n");
1874 
1875 	/* Need to get only check 1 pipe for having reset as all the others are
1876 	 * synchronized. Look at last pipe programmed to reset.
1877 	 */
1878 
1879 	wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
1880 	for (i = 1; i < group_size; i++)
1881 		grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
1882 				grouped_pipes[i]->stream_res.tg);
1883 
1884 	DC_SYNC_INFO("Sync complete\n");
1885 }
1886 
dcn10_enable_per_frame_crtc_position_reset(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])1887 void dcn10_enable_per_frame_crtc_position_reset(
1888 	struct dc *dc,
1889 	int group_size,
1890 	struct pipe_ctx *grouped_pipes[])
1891 {
1892 	struct dc_context *dc_ctx = dc->ctx;
1893 	int i;
1894 
1895 	DC_SYNC_INFO("Setting up\n");
1896 	for (i = 0; i < group_size; i++)
1897 		if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
1898 			grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
1899 					grouped_pipes[i]->stream_res.tg,
1900 					0,
1901 					&grouped_pipes[i]->stream->triggered_crtc_reset);
1902 
1903 	DC_SYNC_INFO("Waiting for trigger\n");
1904 
1905 	for (i = 0; i < group_size; i++)
1906 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
1907 
1908 	DC_SYNC_INFO("Multi-display sync is complete\n");
1909 }
1910 
1911 /*static void print_rq_dlg_ttu(
1912 		struct dc *dc,
1913 		struct pipe_ctx *pipe_ctx)
1914 {
1915 	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1916 			"\n============== DML TTU Output parameters [%d] ==============\n"
1917 			"qos_level_low_wm: %d, \n"
1918 			"qos_level_high_wm: %d, \n"
1919 			"min_ttu_vblank: %d, \n"
1920 			"qos_level_flip: %d, \n"
1921 			"refcyc_per_req_delivery_l: %d, \n"
1922 			"qos_level_fixed_l: %d, \n"
1923 			"qos_ramp_disable_l: %d, \n"
1924 			"refcyc_per_req_delivery_pre_l: %d, \n"
1925 			"refcyc_per_req_delivery_c: %d, \n"
1926 			"qos_level_fixed_c: %d, \n"
1927 			"qos_ramp_disable_c: %d, \n"
1928 			"refcyc_per_req_delivery_pre_c: %d\n"
1929 			"=============================================================\n",
1930 			pipe_ctx->pipe_idx,
1931 			pipe_ctx->ttu_regs.qos_level_low_wm,
1932 			pipe_ctx->ttu_regs.qos_level_high_wm,
1933 			pipe_ctx->ttu_regs.min_ttu_vblank,
1934 			pipe_ctx->ttu_regs.qos_level_flip,
1935 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_l,
1936 			pipe_ctx->ttu_regs.qos_level_fixed_l,
1937 			pipe_ctx->ttu_regs.qos_ramp_disable_l,
1938 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_l,
1939 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_c,
1940 			pipe_ctx->ttu_regs.qos_level_fixed_c,
1941 			pipe_ctx->ttu_regs.qos_ramp_disable_c,
1942 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
1943 			);
1944 
1945 	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1946 			"\n============== DML DLG Output parameters [%d] ==============\n"
1947 			"refcyc_h_blank_end: %d, \n"
1948 			"dlg_vblank_end: %d, \n"
1949 			"min_dst_y_next_start: %d, \n"
1950 			"refcyc_per_htotal: %d, \n"
1951 			"refcyc_x_after_scaler: %d, \n"
1952 			"dst_y_after_scaler: %d, \n"
1953 			"dst_y_prefetch: %d, \n"
1954 			"dst_y_per_vm_vblank: %d, \n"
1955 			"dst_y_per_row_vblank: %d, \n"
1956 			"ref_freq_to_pix_freq: %d, \n"
1957 			"vratio_prefetch: %d, \n"
1958 			"refcyc_per_pte_group_vblank_l: %d, \n"
1959 			"refcyc_per_meta_chunk_vblank_l: %d, \n"
1960 			"dst_y_per_pte_row_nom_l: %d, \n"
1961 			"refcyc_per_pte_group_nom_l: %d, \n",
1962 			pipe_ctx->pipe_idx,
1963 			pipe_ctx->dlg_regs.refcyc_h_blank_end,
1964 			pipe_ctx->dlg_regs.dlg_vblank_end,
1965 			pipe_ctx->dlg_regs.min_dst_y_next_start,
1966 			pipe_ctx->dlg_regs.refcyc_per_htotal,
1967 			pipe_ctx->dlg_regs.refcyc_x_after_scaler,
1968 			pipe_ctx->dlg_regs.dst_y_after_scaler,
1969 			pipe_ctx->dlg_regs.dst_y_prefetch,
1970 			pipe_ctx->dlg_regs.dst_y_per_vm_vblank,
1971 			pipe_ctx->dlg_regs.dst_y_per_row_vblank,
1972 			pipe_ctx->dlg_regs.ref_freq_to_pix_freq,
1973 			pipe_ctx->dlg_regs.vratio_prefetch,
1974 			pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_l,
1975 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_l,
1976 			pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_l,
1977 			pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
1978 			);
1979 
1980 	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1981 			"\ndst_y_per_meta_row_nom_l: %d, \n"
1982 			"refcyc_per_meta_chunk_nom_l: %d, \n"
1983 			"refcyc_per_line_delivery_pre_l: %d, \n"
1984 			"refcyc_per_line_delivery_l: %d, \n"
1985 			"vratio_prefetch_c: %d, \n"
1986 			"refcyc_per_pte_group_vblank_c: %d, \n"
1987 			"refcyc_per_meta_chunk_vblank_c: %d, \n"
1988 			"dst_y_per_pte_row_nom_c: %d, \n"
1989 			"refcyc_per_pte_group_nom_c: %d, \n"
1990 			"dst_y_per_meta_row_nom_c: %d, \n"
1991 			"refcyc_per_meta_chunk_nom_c: %d, \n"
1992 			"refcyc_per_line_delivery_pre_c: %d, \n"
1993 			"refcyc_per_line_delivery_c: %d \n"
1994 			"========================================================\n",
1995 			pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_l,
1996 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_l,
1997 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_l,
1998 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_l,
1999 			pipe_ctx->dlg_regs.vratio_prefetch_c,
2000 			pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_c,
2001 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_c,
2002 			pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_c,
2003 			pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_c,
2004 			pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_c,
2005 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_c,
2006 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_c,
2007 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
2008 			);
2009 
2010 	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
2011 			"\n============== DML RQ Output parameters [%d] ==============\n"
2012 			"chunk_size: %d \n"
2013 			"min_chunk_size: %d \n"
2014 			"meta_chunk_size: %d \n"
2015 			"min_meta_chunk_size: %d \n"
2016 			"dpte_group_size: %d \n"
2017 			"mpte_group_size: %d \n"
2018 			"swath_height: %d \n"
2019 			"pte_row_height_linear: %d \n"
2020 			"========================================================\n",
2021 			pipe_ctx->pipe_idx,
2022 			pipe_ctx->rq_regs.rq_regs_l.chunk_size,
2023 			pipe_ctx->rq_regs.rq_regs_l.min_chunk_size,
2024 			pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size,
2025 			pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size,
2026 			pipe_ctx->rq_regs.rq_regs_l.dpte_group_size,
2027 			pipe_ctx->rq_regs.rq_regs_l.mpte_group_size,
2028 			pipe_ctx->rq_regs.rq_regs_l.swath_height,
2029 			pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear
2030 			);
2031 }
2032 */
2033 
mmhub_read_vm_system_aperture_settings(struct dcn10_hubp * hubp1,struct vm_system_aperture_param * apt,struct dce_hwseq * hws)2034 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2035 		struct vm_system_aperture_param *apt,
2036 		struct dce_hwseq *hws)
2037 {
2038 	PHYSICAL_ADDRESS_LOC physical_page_number;
2039 	uint32_t logical_addr_low;
2040 	uint32_t logical_addr_high;
2041 
2042 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2043 			PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2044 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2045 			PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2046 
2047 	REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2048 			LOGICAL_ADDR, &logical_addr_low);
2049 
2050 	REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2051 			LOGICAL_ADDR, &logical_addr_high);
2052 
2053 	apt->sys_default.quad_part =  physical_page_number.quad_part << 12;
2054 	apt->sys_low.quad_part =  (int64_t)logical_addr_low << 18;
2055 	apt->sys_high.quad_part =  (int64_t)logical_addr_high << 18;
2056 }
2057 
2058 /* Temporary read settings, future will get values from kmd directly */
mmhub_read_vm_context0_settings(struct dcn10_hubp * hubp1,struct vm_context0_param * vm0,struct dce_hwseq * hws)2059 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2060 		struct vm_context0_param *vm0,
2061 		struct dce_hwseq *hws)
2062 {
2063 	PHYSICAL_ADDRESS_LOC fb_base;
2064 	PHYSICAL_ADDRESS_LOC fb_offset;
2065 	uint32_t fb_base_value;
2066 	uint32_t fb_offset_value;
2067 
2068 	REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2069 	REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2070 
2071 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2072 			PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2073 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2074 			PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2075 
2076 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2077 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2078 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2079 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2080 
2081 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2082 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2083 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2084 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2085 
2086 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2087 			PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2088 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2089 			PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2090 
2091 	/*
2092 	 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2093 	 * Therefore we need to do
2094 	 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2095 	 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2096 	 */
2097 	fb_base.quad_part = (uint64_t)fb_base_value << 24;
2098 	fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2099 	vm0->pte_base.quad_part += fb_base.quad_part;
2100 	vm0->pte_base.quad_part -= fb_offset.quad_part;
2101 }
2102 
2103 
dcn10_program_pte_vm(struct dce_hwseq * hws,struct hubp * hubp)2104 void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2105 {
2106 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2107 	struct vm_system_aperture_param apt = { {{ 0 } } };
2108 	struct vm_context0_param vm0 = { { { 0 } } };
2109 
2110 	mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2111 	mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2112 
2113 	hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2114 	hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2115 }
2116 
dcn10_enable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2117 static void dcn10_enable_plane(
2118 	struct dc *dc,
2119 	struct pipe_ctx *pipe_ctx,
2120 	struct dc_state *context)
2121 {
2122 	struct dce_hwseq *hws = dc->hwseq;
2123 
2124 	if (dc->debug.sanity_checks) {
2125 		hws->funcs.verify_allow_pstate_change_high(dc);
2126 	}
2127 
2128 	undo_DEGVIDCN10_253_wa(dc);
2129 
2130 	power_on_plane(dc->hwseq,
2131 		pipe_ctx->plane_res.hubp->inst);
2132 
2133 	/* enable DCFCLK current DCHUB */
2134 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2135 
2136 	/* make sure OPP_PIPE_CLOCK_EN = 1 */
2137 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2138 			pipe_ctx->stream_res.opp,
2139 			true);
2140 
2141 /* TODO: enable/disable in dm as per update type.
2142 	if (plane_state) {
2143 		DC_LOG_DC(dc->ctx->logger,
2144 				"Pipe:%d 0x%x: addr hi:0x%x, "
2145 				"addr low:0x%x, "
2146 				"src: %d, %d, %d,"
2147 				" %d; dst: %d, %d, %d, %d;\n",
2148 				pipe_ctx->pipe_idx,
2149 				plane_state,
2150 				plane_state->address.grph.addr.high_part,
2151 				plane_state->address.grph.addr.low_part,
2152 				plane_state->src_rect.x,
2153 				plane_state->src_rect.y,
2154 				plane_state->src_rect.width,
2155 				plane_state->src_rect.height,
2156 				plane_state->dst_rect.x,
2157 				plane_state->dst_rect.y,
2158 				plane_state->dst_rect.width,
2159 				plane_state->dst_rect.height);
2160 
2161 		DC_LOG_DC(dc->ctx->logger,
2162 				"Pipe %d: width, height, x, y         format:%d\n"
2163 				"viewport:%d, %d, %d, %d\n"
2164 				"recout:  %d, %d, %d, %d\n",
2165 				pipe_ctx->pipe_idx,
2166 				plane_state->format,
2167 				pipe_ctx->plane_res.scl_data.viewport.width,
2168 				pipe_ctx->plane_res.scl_data.viewport.height,
2169 				pipe_ctx->plane_res.scl_data.viewport.x,
2170 				pipe_ctx->plane_res.scl_data.viewport.y,
2171 				pipe_ctx->plane_res.scl_data.recout.width,
2172 				pipe_ctx->plane_res.scl_data.recout.height,
2173 				pipe_ctx->plane_res.scl_data.recout.x,
2174 				pipe_ctx->plane_res.scl_data.recout.y);
2175 		print_rq_dlg_ttu(dc, pipe_ctx);
2176 	}
2177 */
2178 	if (dc->config.gpu_vm_support)
2179 		dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2180 
2181 	if (dc->debug.sanity_checks) {
2182 		hws->funcs.verify_allow_pstate_change_high(dc);
2183 	}
2184 }
2185 
dcn10_program_gamut_remap(struct pipe_ctx * pipe_ctx)2186 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2187 {
2188 	int i = 0;
2189 	struct dpp_grph_csc_adjustment adjust;
2190 	memset(&adjust, 0, sizeof(adjust));
2191 	adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2192 
2193 
2194 	if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2195 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2196 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2197 			adjust.temperature_matrix[i] =
2198 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2199 	} else if (pipe_ctx->plane_state &&
2200 		   pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2201 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2202 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2203 			adjust.temperature_matrix[i] =
2204 				pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2205 	}
2206 
2207 	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2208 }
2209 
2210 
dcn10_is_rear_mpo_fix_required(struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace)2211 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2212 {
2213 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2214 		if (pipe_ctx->top_pipe) {
2215 			struct pipe_ctx *top = pipe_ctx->top_pipe;
2216 
2217 			while (top->top_pipe)
2218 				top = top->top_pipe; // Traverse to top pipe_ctx
2219 			if (top->plane_state && top->plane_state->layer_index == 0)
2220 				return true; // Front MPO plane not hidden
2221 		}
2222 	}
2223 	return false;
2224 }
2225 
dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx * pipe_ctx,uint16_t * matrix)2226 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2227 {
2228 	// Override rear plane RGB bias to fix MPO brightness
2229 	uint16_t rgb_bias = matrix[3];
2230 
2231 	matrix[3] = 0;
2232 	matrix[7] = 0;
2233 	matrix[11] = 0;
2234 	pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2235 	matrix[3] = rgb_bias;
2236 	matrix[7] = rgb_bias;
2237 	matrix[11] = rgb_bias;
2238 }
2239 
dcn10_program_output_csc(struct dc * dc,struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace,uint16_t * matrix,int opp_id)2240 void dcn10_program_output_csc(struct dc *dc,
2241 		struct pipe_ctx *pipe_ctx,
2242 		enum dc_color_space colorspace,
2243 		uint16_t *matrix,
2244 		int opp_id)
2245 {
2246 	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2247 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2248 
2249 			/* MPO is broken with RGB colorspaces when OCSC matrix
2250 			 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2251 			 * Blending adds offsets from front + rear to rear plane
2252 			 *
2253 			 * Fix is to set RGB bias to 0 on rear plane, top plane
2254 			 * black value pixels add offset instead of rear + front
2255 			 */
2256 
2257 			int16_t rgb_bias = matrix[3];
2258 			// matrix[3/7/11] are all the same offset value
2259 
2260 			if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2261 				dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2262 			} else {
2263 				pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2264 			}
2265 		}
2266 	} else {
2267 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2268 			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2269 	}
2270 }
2271 
dcn10_get_surface_visual_confirm_color(const struct pipe_ctx * pipe_ctx,struct tg_color * color)2272 void dcn10_get_surface_visual_confirm_color(
2273 		const struct pipe_ctx *pipe_ctx,
2274 		struct tg_color *color)
2275 {
2276 	uint32_t color_value = MAX_TG_COLOR_VALUE;
2277 
2278 	switch (pipe_ctx->plane_res.scl_data.format) {
2279 	case PIXEL_FORMAT_ARGB8888:
2280 		/* set border color to red */
2281 		color->color_r_cr = color_value;
2282 		break;
2283 
2284 	case PIXEL_FORMAT_ARGB2101010:
2285 		/* set border color to blue */
2286 		color->color_b_cb = color_value;
2287 		break;
2288 	case PIXEL_FORMAT_420BPP8:
2289 		/* set border color to green */
2290 		color->color_g_y = color_value;
2291 		break;
2292 	case PIXEL_FORMAT_420BPP10:
2293 		/* set border color to yellow */
2294 		color->color_g_y = color_value;
2295 		color->color_r_cr = color_value;
2296 		break;
2297 	case PIXEL_FORMAT_FP16:
2298 		/* set border color to white */
2299 		color->color_r_cr = color_value;
2300 		color->color_b_cb = color_value;
2301 		color->color_g_y = color_value;
2302 		break;
2303 	default:
2304 		break;
2305 	}
2306 }
2307 
dcn10_get_hdr_visual_confirm_color(struct pipe_ctx * pipe_ctx,struct tg_color * color)2308 void dcn10_get_hdr_visual_confirm_color(
2309 		struct pipe_ctx *pipe_ctx,
2310 		struct tg_color *color)
2311 {
2312 	uint32_t color_value = MAX_TG_COLOR_VALUE;
2313 
2314 	// Determine the overscan color based on the top-most (desktop) plane's context
2315 	struct pipe_ctx *top_pipe_ctx  = pipe_ctx;
2316 
2317 	while (top_pipe_ctx->top_pipe != NULL)
2318 		top_pipe_ctx = top_pipe_ctx->top_pipe;
2319 
2320 	switch (top_pipe_ctx->plane_res.scl_data.format) {
2321 	case PIXEL_FORMAT_ARGB2101010:
2322 		if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2323 			/* HDR10, ARGB2101010 - set border color to red */
2324 			color->color_r_cr = color_value;
2325 		} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
2326 			/* FreeSync 2 ARGB2101010 - set border color to pink */
2327 			color->color_r_cr = color_value;
2328 			color->color_b_cb = color_value;
2329 		}
2330 		break;
2331 	case PIXEL_FORMAT_FP16:
2332 		if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2333 			/* HDR10, FP16 - set border color to blue */
2334 			color->color_b_cb = color_value;
2335 		} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
2336 			/* FreeSync 2 HDR - set border color to green */
2337 			color->color_g_y = color_value;
2338 		}
2339 		break;
2340 	default:
2341 		/* SDR - set border color to Gray */
2342 		color->color_r_cr = color_value/2;
2343 		color->color_b_cb = color_value/2;
2344 		color->color_g_y = color_value/2;
2345 		break;
2346 	}
2347 }
2348 
dcn10_update_dpp(struct dpp * dpp,struct dc_plane_state * plane_state)2349 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2350 {
2351 	struct dc_bias_and_scale bns_params = {0};
2352 
2353 	// program the input csc
2354 	dpp->funcs->dpp_setup(dpp,
2355 			plane_state->format,
2356 			EXPANSION_MODE_ZERO,
2357 			plane_state->input_csc_color_matrix,
2358 			plane_state->color_space,
2359 			NULL);
2360 
2361 	//set scale and bias registers
2362 	build_prescale_params(&bns_params, plane_state);
2363 	if (dpp->funcs->dpp_program_bias_and_scale)
2364 		dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2365 }
2366 
dcn10_update_mpcc(struct dc * dc,struct pipe_ctx * pipe_ctx)2367 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2368 {
2369 	struct dce_hwseq *hws = dc->hwseq;
2370 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2371 	struct mpcc_blnd_cfg blnd_cfg = {{0}};
2372 	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2373 	int mpcc_id;
2374 	struct mpcc *new_mpcc;
2375 	struct mpc *mpc = dc->res_pool->mpc;
2376 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2377 
2378 	if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
2379 		hws->funcs.get_hdr_visual_confirm_color(
2380 				pipe_ctx, &blnd_cfg.black_color);
2381 	} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
2382 		hws->funcs.get_surface_visual_confirm_color(
2383 				pipe_ctx, &blnd_cfg.black_color);
2384 	} else {
2385 		color_space_to_black_color(
2386 				dc, pipe_ctx->stream->output_color_space,
2387 				&blnd_cfg.black_color);
2388 	}
2389 
2390 	blnd_cfg.overlap_only = false;
2391 	blnd_cfg.global_gain = 0xff;
2392 
2393 	if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
2394 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2395 		blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2396 	} else if (per_pixel_alpha) {
2397 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2398 	} else {
2399 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2400 	}
2401 
2402 	if (pipe_ctx->plane_state->global_alpha)
2403 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2404 	else
2405 		blnd_cfg.global_alpha = 0xff;
2406 
2407 	/* DCN1.0 has output CM before MPC which seems to screw with
2408 	 * pre-multiplied alpha.
2409 	 */
2410 	blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
2411 			pipe_ctx->stream->output_color_space)
2412 					&& per_pixel_alpha;
2413 
2414 
2415 	/*
2416 	 * TODO: remove hack
2417 	 * Note: currently there is a bug in init_hw such that
2418 	 * on resume from hibernate, BIOS sets up MPCC0, and
2419 	 * we do mpcc_remove but the mpcc cannot go to idle
2420 	 * after remove. This cause us to pick mpcc1 here,
2421 	 * which causes a pstate hang for yet unknown reason.
2422 	 */
2423 	mpcc_id = hubp->inst;
2424 
2425 	/* If there is no full update, don't need to touch MPC tree*/
2426 	if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2427 		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2428 		return;
2429 	}
2430 
2431 	/* check if this MPCC is already being used */
2432 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2433 	/* remove MPCC if being used */
2434 	if (new_mpcc != NULL)
2435 		mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2436 	else
2437 		if (dc->debug.sanity_checks)
2438 			mpc->funcs->assert_mpcc_idle_before_connect(
2439 					dc->res_pool->mpc, mpcc_id);
2440 
2441 	/* Call MPC to insert new plane */
2442 	new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2443 			mpc_tree_params,
2444 			&blnd_cfg,
2445 			NULL,
2446 			NULL,
2447 			hubp->inst,
2448 			mpcc_id);
2449 
2450 	ASSERT(new_mpcc != NULL);
2451 
2452 	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2453 	hubp->mpcc_id = mpcc_id;
2454 }
2455 
update_scaler(struct pipe_ctx * pipe_ctx)2456 static void update_scaler(struct pipe_ctx *pipe_ctx)
2457 {
2458 	bool per_pixel_alpha =
2459 			pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2460 
2461 	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2462 	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
2463 	/* scaler configuration */
2464 	pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2465 			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2466 }
2467 
dcn10_update_dchubp_dpp(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2468 static void dcn10_update_dchubp_dpp(
2469 	struct dc *dc,
2470 	struct pipe_ctx *pipe_ctx,
2471 	struct dc_state *context)
2472 {
2473 	struct dce_hwseq *hws = dc->hwseq;
2474 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2475 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
2476 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2477 	struct plane_size size = plane_state->plane_size;
2478 	unsigned int compat_level = 0;
2479 	bool should_divided_by_2 = false;
2480 
2481 	/* depends on DML calculation, DPP clock value may change dynamically */
2482 	/* If request max dpp clk is lower than current dispclk, no need to
2483 	 * divided by 2
2484 	 */
2485 	if (plane_state->update_flags.bits.full_update) {
2486 
2487 		/* new calculated dispclk, dppclk are stored in
2488 		 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2489 		 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2490 		 * dcn_validate_bandwidth compute new dispclk, dppclk.
2491 		 * dispclk will put in use after optimize_bandwidth when
2492 		 * ramp_up_dispclk_with_dpp is called.
2493 		 * there are two places for dppclk be put in use. One location
2494 		 * is the same as the location as dispclk. Another is within
2495 		 * update_dchubp_dpp which happens between pre_bandwidth and
2496 		 * optimize_bandwidth.
2497 		 * dppclk updated within update_dchubp_dpp will cause new
2498 		 * clock values of dispclk and dppclk not be in use at the same
2499 		 * time. when clocks are decreased, this may cause dppclk is
2500 		 * lower than previous configuration and let pipe stuck.
2501 		 * for example, eDP + external dp,  change resolution of DP from
2502 		 * 1920x1080x144hz to 1280x960x60hz.
2503 		 * before change: dispclk = 337889 dppclk = 337889
2504 		 * change mode, dcn_validate_bandwidth calculate
2505 		 *                dispclk = 143122 dppclk = 143122
2506 		 * update_dchubp_dpp be executed before dispclk be updated,
2507 		 * dispclk = 337889, but dppclk use new value dispclk /2 =
2508 		 * 168944. this will cause pipe pstate warning issue.
2509 		 * solution: between pre_bandwidth and optimize_bandwidth, while
2510 		 * dispclk is going to be decreased, keep dppclk = dispclk
2511 		 **/
2512 		if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2513 				dc->clk_mgr->clks.dispclk_khz)
2514 			should_divided_by_2 = false;
2515 		else
2516 			should_divided_by_2 =
2517 					context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2518 					dc->clk_mgr->clks.dispclk_khz / 2;
2519 
2520 		dpp->funcs->dpp_dppclk_control(
2521 				dpp,
2522 				should_divided_by_2,
2523 				true);
2524 
2525 		if (dc->res_pool->dccg)
2526 			dc->res_pool->dccg->funcs->update_dpp_dto(
2527 					dc->res_pool->dccg,
2528 					dpp->inst,
2529 					pipe_ctx->plane_res.bw.dppclk_khz);
2530 		else
2531 			dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2532 						dc->clk_mgr->clks.dispclk_khz / 2 :
2533 							dc->clk_mgr->clks.dispclk_khz;
2534 	}
2535 
2536 	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2537 	 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2538 	 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2539 	 */
2540 	if (plane_state->update_flags.bits.full_update) {
2541 		hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2542 
2543 		hubp->funcs->hubp_setup(
2544 			hubp,
2545 			&pipe_ctx->dlg_regs,
2546 			&pipe_ctx->ttu_regs,
2547 			&pipe_ctx->rq_regs,
2548 			&pipe_ctx->pipe_dlg_param);
2549 		hubp->funcs->hubp_setup_interdependent(
2550 			hubp,
2551 			&pipe_ctx->dlg_regs,
2552 			&pipe_ctx->ttu_regs);
2553 	}
2554 
2555 	size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2556 
2557 	if (plane_state->update_flags.bits.full_update ||
2558 		plane_state->update_flags.bits.bpp_change)
2559 		dcn10_update_dpp(dpp, plane_state);
2560 
2561 	if (plane_state->update_flags.bits.full_update ||
2562 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2563 		plane_state->update_flags.bits.global_alpha_change)
2564 		hws->funcs.update_mpcc(dc, pipe_ctx);
2565 
2566 	if (plane_state->update_flags.bits.full_update ||
2567 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2568 		plane_state->update_flags.bits.global_alpha_change ||
2569 		plane_state->update_flags.bits.scaling_change ||
2570 		plane_state->update_flags.bits.position_change) {
2571 		update_scaler(pipe_ctx);
2572 	}
2573 
2574 	if (plane_state->update_flags.bits.full_update ||
2575 		plane_state->update_flags.bits.scaling_change ||
2576 		plane_state->update_flags.bits.position_change) {
2577 		hubp->funcs->mem_program_viewport(
2578 			hubp,
2579 			&pipe_ctx->plane_res.scl_data.viewport,
2580 			&pipe_ctx->plane_res.scl_data.viewport_c);
2581 	}
2582 
2583 	if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2584 		dc->hwss.set_cursor_position(pipe_ctx);
2585 		dc->hwss.set_cursor_attribute(pipe_ctx);
2586 
2587 		if (dc->hwss.set_cursor_sdr_white_level)
2588 			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2589 	}
2590 
2591 	if (plane_state->update_flags.bits.full_update) {
2592 		/*gamut remap*/
2593 		dc->hwss.program_gamut_remap(pipe_ctx);
2594 
2595 		dc->hwss.program_output_csc(dc,
2596 				pipe_ctx,
2597 				pipe_ctx->stream->output_color_space,
2598 				pipe_ctx->stream->csc_color_matrix.matrix,
2599 				pipe_ctx->stream_res.opp->inst);
2600 	}
2601 
2602 	if (plane_state->update_flags.bits.full_update ||
2603 		plane_state->update_flags.bits.pixel_format_change ||
2604 		plane_state->update_flags.bits.horizontal_mirror_change ||
2605 		plane_state->update_flags.bits.rotation_change ||
2606 		plane_state->update_flags.bits.swizzle_change ||
2607 		plane_state->update_flags.bits.dcc_change ||
2608 		plane_state->update_flags.bits.bpp_change ||
2609 		plane_state->update_flags.bits.scaling_change ||
2610 		plane_state->update_flags.bits.plane_size_change) {
2611 		hubp->funcs->hubp_program_surface_config(
2612 			hubp,
2613 			plane_state->format,
2614 			&plane_state->tiling_info,
2615 			&size,
2616 			plane_state->rotation,
2617 			&plane_state->dcc,
2618 			plane_state->horizontal_mirror,
2619 			compat_level);
2620 	}
2621 
2622 	hubp->power_gated = false;
2623 
2624 	hws->funcs.update_plane_addr(dc, pipe_ctx);
2625 
2626 	if (is_pipe_tree_visible(pipe_ctx))
2627 		hubp->funcs->set_blank(hubp, false);
2628 }
2629 
dcn10_blank_pixel_data(struct dc * dc,struct pipe_ctx * pipe_ctx,bool blank)2630 void dcn10_blank_pixel_data(
2631 		struct dc *dc,
2632 		struct pipe_ctx *pipe_ctx,
2633 		bool blank)
2634 {
2635 	enum dc_color_space color_space;
2636 	struct tg_color black_color = {0};
2637 	struct stream_resource *stream_res = &pipe_ctx->stream_res;
2638 	struct dc_stream_state *stream = pipe_ctx->stream;
2639 
2640 	/* program otg blank color */
2641 	color_space = stream->output_color_space;
2642 	color_space_to_black_color(dc, color_space, &black_color);
2643 
2644 	/*
2645 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
2646 	 * alternate between Cb and Cr, so both channels need the pixel
2647 	 * value for Y
2648 	 */
2649 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2650 		black_color.color_r_cr = black_color.color_g_y;
2651 
2652 
2653 	if (stream_res->tg->funcs->set_blank_color)
2654 		stream_res->tg->funcs->set_blank_color(
2655 				stream_res->tg,
2656 				&black_color);
2657 
2658 	if (!blank) {
2659 		if (stream_res->tg->funcs->set_blank)
2660 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2661 		if (stream_res->abm) {
2662 			dc->hwss.set_pipe(pipe_ctx);
2663 			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2664 		}
2665 	} else if (blank) {
2666 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
2667 		if (stream_res->tg->funcs->set_blank) {
2668 			stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2669 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2670 		}
2671 	}
2672 }
2673 
dcn10_set_hdr_multiplier(struct pipe_ctx * pipe_ctx)2674 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2675 {
2676 	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2677 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2678 	struct custom_float_format fmt;
2679 
2680 	fmt.exponenta_bits = 6;
2681 	fmt.mantissa_bits = 12;
2682 	fmt.sign = true;
2683 
2684 
2685 	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2686 		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2687 
2688 	pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2689 			pipe_ctx->plane_res.dpp, hw_mult);
2690 }
2691 
dcn10_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2692 void dcn10_program_pipe(
2693 		struct dc *dc,
2694 		struct pipe_ctx *pipe_ctx,
2695 		struct dc_state *context)
2696 {
2697 	struct dce_hwseq *hws = dc->hwseq;
2698 
2699 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2700 		dcn10_enable_plane(dc, pipe_ctx, context);
2701 
2702 	dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2703 
2704 	hws->funcs.set_hdr_multiplier(pipe_ctx);
2705 
2706 	if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2707 			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2708 			pipe_ctx->plane_state->update_flags.bits.gamma_change)
2709 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2710 
2711 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2712 	 * only do gamma programming for full update.
2713 	 * TODO: This can be further optimized/cleaned up
2714 	 * Always call this for now since it does memcmp inside before
2715 	 * doing heavy calculation and programming
2716 	 */
2717 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2718 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2719 }
2720 
dcn10_program_all_pipe_in_tree(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2721 static void dcn10_program_all_pipe_in_tree(
2722 		struct dc *dc,
2723 		struct pipe_ctx *pipe_ctx,
2724 		struct dc_state *context)
2725 {
2726 	struct dce_hwseq *hws = dc->hwseq;
2727 
2728 	if (pipe_ctx->top_pipe == NULL) {
2729 		bool blank = !is_pipe_tree_visible(pipe_ctx);
2730 
2731 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
2732 				pipe_ctx->stream_res.tg,
2733 				pipe_ctx->pipe_dlg_param.vready_offset,
2734 				pipe_ctx->pipe_dlg_param.vstartup_start,
2735 				pipe_ctx->pipe_dlg_param.vupdate_offset,
2736 				pipe_ctx->pipe_dlg_param.vupdate_width);
2737 
2738 		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2739 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
2740 
2741 		if (hws->funcs.setup_vupdate_interrupt)
2742 			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2743 
2744 		hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2745 	}
2746 
2747 	if (pipe_ctx->plane_state != NULL)
2748 		hws->funcs.program_pipe(dc, pipe_ctx, context);
2749 
2750 	if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
2751 		dcn10_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
2752 }
2753 
dcn10_find_top_pipe_for_stream(struct dc * dc,struct dc_state * context,const struct dc_stream_state * stream)2754 static struct pipe_ctx *dcn10_find_top_pipe_for_stream(
2755 		struct dc *dc,
2756 		struct dc_state *context,
2757 		const struct dc_stream_state *stream)
2758 {
2759 	int i;
2760 
2761 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2762 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2763 		struct pipe_ctx *old_pipe_ctx =
2764 				&dc->current_state->res_ctx.pipe_ctx[i];
2765 
2766 		if (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state)
2767 			continue;
2768 
2769 		if (pipe_ctx->stream != stream)
2770 			continue;
2771 
2772 		if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
2773 			return pipe_ctx;
2774 	}
2775 	return NULL;
2776 }
2777 
dcn10_disconnect_pipes(struct dc * dc,struct dc_state * context)2778 bool dcn10_disconnect_pipes(
2779 		struct dc *dc,
2780 		struct dc_state *context)
2781 {
2782 		bool found_pipe = false;
2783 		int i, j;
2784 		struct dce_hwseq *hws = dc->hwseq;
2785 		struct dc_state *old_ctx = dc->current_state;
2786 		bool mpcc_disconnected = false;
2787 		struct pipe_ctx *old_pipe;
2788 		struct pipe_ctx *new_pipe;
2789 		DC_LOGGER_INIT(dc->ctx->logger);
2790 
2791 		/* Set pipe update flags and lock pipes */
2792 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2793 			old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2794 			new_pipe = &context->res_ctx.pipe_ctx[i];
2795 			new_pipe->update_flags.raw = 0;
2796 
2797 			if (!old_pipe->plane_state && !new_pipe->plane_state)
2798 				continue;
2799 
2800 			if (old_pipe->plane_state && !new_pipe->plane_state)
2801 				new_pipe->update_flags.bits.disable = 1;
2802 
2803 			/* Check for scl update */
2804 			if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
2805 					new_pipe->update_flags.bits.scaler = 1;
2806 
2807 			/* Check for vp update */
2808 			if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
2809 					|| memcmp(&old_pipe->plane_res.scl_data.viewport_c,
2810 						&new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
2811 				new_pipe->update_flags.bits.viewport = 1;
2812 
2813 		}
2814 
2815 		if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
2816 			/* Disconnect mpcc here only if losing pipe split*/
2817 			for (i = 0; i < dc->res_pool->pipe_count; i++) {
2818 				if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable &&
2819 					old_ctx->res_ctx.pipe_ctx[i].top_pipe) {
2820 
2821 					/* Find the top pipe in the new ctx for the bottom pipe that we
2822 					 * want to remove by comparing the streams and planes. If both
2823 					 * pipes are being disabled then do it in the regular pipe
2824 					 * programming sequence
2825 					 */
2826 					for (j = 0; j < dc->res_pool->pipe_count; j++) {
2827 						if (old_ctx->res_ctx.pipe_ctx[i].top_pipe->stream == context->res_ctx.pipe_ctx[j].stream &&
2828 							old_ctx->res_ctx.pipe_ctx[i].top_pipe->plane_state == context->res_ctx.pipe_ctx[j].plane_state &&
2829 							!context->res_ctx.pipe_ctx[j].top_pipe &&
2830 							!context->res_ctx.pipe_ctx[j].update_flags.bits.disable) {
2831 							found_pipe = true;
2832 							break;
2833 						}
2834 					}
2835 
2836 					// Disconnect if the top pipe lost it's pipe split
2837 					if (found_pipe && !context->res_ctx.pipe_ctx[j].bottom_pipe) {
2838 						hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2839 						DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
2840 						mpcc_disconnected = true;
2841 					}
2842 				}
2843 				found_pipe = false;
2844 			}
2845 		}
2846 
2847 		if (mpcc_disconnected) {
2848 			for (i = 0; i < dc->res_pool->pipe_count; i++) {
2849 				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2850 				struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2851 				struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2852 				struct hubp *hubp = pipe_ctx->plane_res.hubp;
2853 
2854 				if (!pipe_ctx || !plane_state || !pipe_ctx->stream)
2855 					continue;
2856 
2857 				// Only update scaler and viewport here if we lose a pipe split.
2858 				// This is to prevent half the screen from being black when we
2859 				// unlock after disconnecting MPCC.
2860 				if (!(old_pipe && !pipe_ctx->top_pipe &&
2861 					!pipe_ctx->bottom_pipe && old_pipe->bottom_pipe))
2862 					continue;
2863 
2864 				if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) {
2865 					if (pipe_ctx->update_flags.bits.scaler ||
2866 						plane_state->update_flags.bits.scaling_change ||
2867 						plane_state->update_flags.bits.position_change ||
2868 						plane_state->update_flags.bits.per_pixel_alpha_change ||
2869 						pipe_ctx->stream->update_flags.bits.scaling) {
2870 
2871 						pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
2872 						ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP);
2873 						/* scaler configuration */
2874 						pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2875 						pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2876 					}
2877 
2878 					if (pipe_ctx->update_flags.bits.viewport ||
2879 						(context == dc->current_state && plane_state->update_flags.bits.position_change) ||
2880 						(context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
2881 						(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
2882 
2883 						hubp->funcs->mem_program_viewport(
2884 							hubp,
2885 							&pipe_ctx->plane_res.scl_data.viewport,
2886 							&pipe_ctx->plane_res.scl_data.viewport_c);
2887 					}
2888 				}
2889 			}
2890 		}
2891 	return mpcc_disconnected;
2892 }
2893 
dcn10_wait_for_pending_cleared(struct dc * dc,struct dc_state * context)2894 void dcn10_wait_for_pending_cleared(struct dc *dc,
2895 		struct dc_state *context)
2896 {
2897 		struct pipe_ctx *pipe_ctx;
2898 		struct timing_generator *tg;
2899 		int i;
2900 
2901 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2902 			pipe_ctx = &context->res_ctx.pipe_ctx[i];
2903 			tg = pipe_ctx->stream_res.tg;
2904 
2905 			/*
2906 			 * Only wait for top pipe's tg penindg bit
2907 			 * Also skip if pipe is disabled.
2908 			 */
2909 			if (pipe_ctx->top_pipe ||
2910 			    !pipe_ctx->stream || !pipe_ctx->plane_state ||
2911 			    !tg->funcs->is_tg_enabled(tg))
2912 				continue;
2913 
2914 			/*
2915 			 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2916 			 * For some reason waiting for OTG_UPDATE_PENDING cleared
2917 			 * seems to not trigger the update right away, and if we
2918 			 * lock again before VUPDATE then we don't get a separated
2919 			 * operation.
2920 			 */
2921 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
2922 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2923 		}
2924 }
2925 
dcn10_apply_ctx_for_surface(struct dc * dc,const struct dc_stream_state * stream,int num_planes,struct dc_state * context)2926 void dcn10_apply_ctx_for_surface(
2927 		struct dc *dc,
2928 		const struct dc_stream_state *stream,
2929 		int num_planes,
2930 		struct dc_state *context)
2931 {
2932 	struct dce_hwseq *hws = dc->hwseq;
2933 	int i;
2934 	struct timing_generator *tg;
2935 	uint32_t underflow_check_delay_us;
2936 	bool interdependent_update = false;
2937 	struct pipe_ctx *top_pipe_to_program =
2938 			dcn10_find_top_pipe_for_stream(dc, context, stream);
2939 	DC_LOGGER_INIT(dc->ctx->logger);
2940 
2941 	// Clear pipe_ctx flag
2942 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2943 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2944 		pipe_ctx->update_flags.raw = 0;
2945 	}
2946 
2947 	if (!top_pipe_to_program)
2948 		return;
2949 
2950 	tg = top_pipe_to_program->stream_res.tg;
2951 
2952 	interdependent_update = top_pipe_to_program->plane_state &&
2953 		top_pipe_to_program->plane_state->update_flags.bits.full_update;
2954 
2955 	underflow_check_delay_us = dc->debug.underflow_assert_delay_us;
2956 
2957 	if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
2958 		ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
2959 
2960 	if (underflow_check_delay_us != 0xFFFFFFFF)
2961 		udelay(underflow_check_delay_us);
2962 
2963 	if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
2964 		ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
2965 
2966 	if (num_planes == 0) {
2967 		/* OTG blank before remove all front end */
2968 		hws->funcs.blank_pixel_data(dc, top_pipe_to_program, true);
2969 	}
2970 
2971 	/* Disconnect unused mpcc */
2972 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2973 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2974 		struct pipe_ctx *old_pipe_ctx =
2975 				&dc->current_state->res_ctx.pipe_ctx[i];
2976 
2977 		if ((!pipe_ctx->plane_state ||
2978 		     pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
2979 		    old_pipe_ctx->plane_state &&
2980 		    old_pipe_ctx->stream_res.tg == tg) {
2981 
2982 			hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx);
2983 			pipe_ctx->update_flags.bits.disable = 1;
2984 
2985 			DC_LOG_DC("Reset mpcc for pipe %d\n",
2986 					old_pipe_ctx->pipe_idx);
2987 		}
2988 	}
2989 
2990 	if (num_planes > 0)
2991 		dcn10_program_all_pipe_in_tree(dc, top_pipe_to_program, context);
2992 
2993 	/* Program secondary blending tree and writeback pipes */
2994 	if ((stream->num_wb_info > 0) && (hws->funcs.program_all_writeback_pipes_in_tree))
2995 		hws->funcs.program_all_writeback_pipes_in_tree(dc, stream, context);
2996 	if (interdependent_update)
2997 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2998 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2999 			/* Skip inactive pipes and ones already updated */
3000 			if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
3001 			    !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
3002 				continue;
3003 
3004 			pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
3005 				pipe_ctx->plane_res.hubp,
3006 				&pipe_ctx->dlg_regs,
3007 				&pipe_ctx->ttu_regs);
3008 		}
3009 }
3010 
dcn10_post_unlock_program_front_end(struct dc * dc,struct dc_state * context)3011 void dcn10_post_unlock_program_front_end(
3012 		struct dc *dc,
3013 		struct dc_state *context)
3014 {
3015 	int i;
3016 
3017 	DC_LOGGER_INIT(dc->ctx->logger);
3018 
3019 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3020 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3021 
3022 		if (!pipe_ctx->top_pipe &&
3023 			!pipe_ctx->prev_odm_pipe &&
3024 			pipe_ctx->stream) {
3025 			struct timing_generator *tg = pipe_ctx->stream_res.tg;
3026 
3027 			if (context->stream_status[i].plane_count == 0)
3028 				false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
3029 		}
3030 	}
3031 
3032 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3033 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3034 			dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
3035 
3036 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3037 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3038 			dc->hwss.optimize_bandwidth(dc, context);
3039 			break;
3040 		}
3041 
3042 	if (dc->hwseq->wa.DEGVIDCN10_254)
3043 		hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3044 }
3045 
dcn10_stereo_hw_frame_pack_wa(struct dc * dc,struct dc_state * context)3046 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3047 {
3048 	uint8_t i;
3049 
3050 	for (i = 0; i < context->stream_count; i++) {
3051 		if (context->streams[i]->timing.timing_3d_format
3052 				== TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3053 			/*
3054 			 * Disable stutter
3055 			 */
3056 			hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3057 			break;
3058 		}
3059 	}
3060 }
3061 
dcn10_prepare_bandwidth(struct dc * dc,struct dc_state * context)3062 void dcn10_prepare_bandwidth(
3063 		struct dc *dc,
3064 		struct dc_state *context)
3065 {
3066 	struct dce_hwseq *hws = dc->hwseq;
3067 	struct hubbub *hubbub = dc->res_pool->hubbub;
3068 
3069 	if (dc->debug.sanity_checks)
3070 		hws->funcs.verify_allow_pstate_change_high(dc);
3071 
3072 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3073 		if (context->stream_count == 0)
3074 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3075 
3076 		dc->clk_mgr->funcs->update_clocks(
3077 				dc->clk_mgr,
3078 				context,
3079 				false);
3080 	}
3081 
3082 	dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3083 			&context->bw_ctx.bw.dcn.watermarks,
3084 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3085 			true);
3086 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3087 
3088 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
3089 		dcn_bw_notify_pplib_of_wm_ranges(dc);
3090 
3091 	if (dc->debug.sanity_checks)
3092 		hws->funcs.verify_allow_pstate_change_high(dc);
3093 }
3094 
dcn10_optimize_bandwidth(struct dc * dc,struct dc_state * context)3095 void dcn10_optimize_bandwidth(
3096 		struct dc *dc,
3097 		struct dc_state *context)
3098 {
3099 	struct dce_hwseq *hws = dc->hwseq;
3100 	struct hubbub *hubbub = dc->res_pool->hubbub;
3101 
3102 	if (dc->debug.sanity_checks)
3103 		hws->funcs.verify_allow_pstate_change_high(dc);
3104 
3105 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3106 		if (context->stream_count == 0)
3107 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3108 
3109 		dc->clk_mgr->funcs->update_clocks(
3110 				dc->clk_mgr,
3111 				context,
3112 				true);
3113 	}
3114 
3115 	hubbub->funcs->program_watermarks(hubbub,
3116 			&context->bw_ctx.bw.dcn.watermarks,
3117 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3118 			true);
3119 
3120 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3121 
3122 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
3123 		dcn_bw_notify_pplib_of_wm_ranges(dc);
3124 
3125 	if (dc->debug.sanity_checks)
3126 		hws->funcs.verify_allow_pstate_change_high(dc);
3127 }
3128 
dcn10_set_drr(struct pipe_ctx ** pipe_ctx,int num_pipes,unsigned int vmin,unsigned int vmax,unsigned int vmid,unsigned int vmid_frame_number)3129 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3130 		int num_pipes, unsigned int vmin, unsigned int vmax,
3131 		unsigned int vmid, unsigned int vmid_frame_number)
3132 {
3133 	int i = 0;
3134 	struct drr_params params = {0};
3135 	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3136 	unsigned int event_triggers = 0x800;
3137 	// Note DRR trigger events are generated regardless of whether num frames met.
3138 	unsigned int num_frames = 2;
3139 
3140 	params.vertical_total_max = vmax;
3141 	params.vertical_total_min = vmin;
3142 	params.vertical_total_mid = vmid;
3143 	params.vertical_total_mid_frame_num = vmid_frame_number;
3144 
3145 	/* TODO: If multiple pipes are to be supported, you need
3146 	 * some GSL stuff. Static screen triggers may be programmed differently
3147 	 * as well.
3148 	 */
3149 	for (i = 0; i < num_pipes; i++) {
3150 		pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3151 			pipe_ctx[i]->stream_res.tg, &params);
3152 		if (vmax != 0 && vmin != 0)
3153 			pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3154 					pipe_ctx[i]->stream_res.tg,
3155 					event_triggers, num_frames);
3156 	}
3157 }
3158 
dcn10_get_position(struct pipe_ctx ** pipe_ctx,int num_pipes,struct crtc_position * position)3159 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3160 		int num_pipes,
3161 		struct crtc_position *position)
3162 {
3163 	int i = 0;
3164 
3165 	/* TODO: handle pipes > 1
3166 	 */
3167 	for (i = 0; i < num_pipes; i++)
3168 		pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3169 }
3170 
dcn10_set_static_screen_control(struct pipe_ctx ** pipe_ctx,int num_pipes,const struct dc_static_screen_params * params)3171 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3172 		int num_pipes, const struct dc_static_screen_params *params)
3173 {
3174 	unsigned int i;
3175 	unsigned int triggers = 0;
3176 
3177 	if (params->triggers.surface_update)
3178 		triggers |= 0x80;
3179 	if (params->triggers.cursor_update)
3180 		triggers |= 0x2;
3181 	if (params->triggers.force_trigger)
3182 		triggers |= 0x1;
3183 
3184 	for (i = 0; i < num_pipes; i++)
3185 		pipe_ctx[i]->stream_res.tg->funcs->
3186 			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3187 					triggers, params->num_frames);
3188 }
3189 
dcn10_config_stereo_parameters(struct dc_stream_state * stream,struct crtc_stereo_flags * flags)3190 static void dcn10_config_stereo_parameters(
3191 		struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3192 {
3193 	enum view_3d_format view_format = stream->view_format;
3194 	enum dc_timing_3d_format timing_3d_format =\
3195 			stream->timing.timing_3d_format;
3196 	bool non_stereo_timing = false;
3197 
3198 	if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3199 		timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3200 		timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3201 		non_stereo_timing = true;
3202 
3203 	if (non_stereo_timing == false &&
3204 		view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3205 
3206 		flags->PROGRAM_STEREO         = 1;
3207 		flags->PROGRAM_POLARITY       = 1;
3208 		if (timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3209 			timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3210 			timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3211 			enum display_dongle_type dongle = \
3212 					stream->link->ddc->dongle_type;
3213 			if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3214 				dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3215 				dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3216 				flags->DISABLE_STEREO_DP_SYNC = 1;
3217 		}
3218 		flags->RIGHT_EYE_POLARITY =\
3219 				stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3220 		if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3221 			flags->FRAME_PACKED = 1;
3222 	}
3223 
3224 	return;
3225 }
3226 
dcn10_setup_stereo(struct pipe_ctx * pipe_ctx,struct dc * dc)3227 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3228 {
3229 	struct crtc_stereo_flags flags = { 0 };
3230 	struct dc_stream_state *stream = pipe_ctx->stream;
3231 
3232 	dcn10_config_stereo_parameters(stream, &flags);
3233 
3234 	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3235 		if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3236 			dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3237 	} else {
3238 		dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3239 	}
3240 
3241 	pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3242 		pipe_ctx->stream_res.opp,
3243 		flags.PROGRAM_STEREO == 1 ? true:false,
3244 		&stream->timing);
3245 
3246 	pipe_ctx->stream_res.tg->funcs->program_stereo(
3247 		pipe_ctx->stream_res.tg,
3248 		&stream->timing,
3249 		&flags);
3250 
3251 	return;
3252 }
3253 
get_hubp_by_inst(struct resource_pool * res_pool,int mpcc_inst)3254 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3255 {
3256 	int i;
3257 
3258 	for (i = 0; i < res_pool->pipe_count; i++) {
3259 		if (res_pool->hubps[i]->inst == mpcc_inst)
3260 			return res_pool->hubps[i];
3261 	}
3262 	ASSERT(false);
3263 	return NULL;
3264 }
3265 
dcn10_wait_for_mpcc_disconnect(struct dc * dc,struct resource_pool * res_pool,struct pipe_ctx * pipe_ctx)3266 void dcn10_wait_for_mpcc_disconnect(
3267 		struct dc *dc,
3268 		struct resource_pool *res_pool,
3269 		struct pipe_ctx *pipe_ctx)
3270 {
3271 	struct dce_hwseq *hws = dc->hwseq;
3272 	int mpcc_inst;
3273 
3274 	if (dc->debug.sanity_checks) {
3275 		hws->funcs.verify_allow_pstate_change_high(dc);
3276 	}
3277 
3278 	if (!pipe_ctx->stream_res.opp)
3279 		return;
3280 
3281 	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3282 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3283 			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3284 
3285 			if (pipe_ctx->stream_res.tg &&
3286 				pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3287 				res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3288 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3289 			hubp->funcs->set_blank(hubp, true);
3290 		}
3291 	}
3292 
3293 	if (dc->debug.sanity_checks) {
3294 		hws->funcs.verify_allow_pstate_change_high(dc);
3295 	}
3296 
3297 }
3298 
dcn10_dummy_display_power_gating(struct dc * dc,uint8_t controller_id,struct dc_bios * dcb,enum pipe_gating_control power_gating)3299 bool dcn10_dummy_display_power_gating(
3300 	struct dc *dc,
3301 	uint8_t controller_id,
3302 	struct dc_bios *dcb,
3303 	enum pipe_gating_control power_gating)
3304 {
3305 	return true;
3306 }
3307 
dcn10_update_pending_status(struct pipe_ctx * pipe_ctx)3308 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3309 {
3310 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3311 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3312 	bool flip_pending;
3313 	struct dc *dc = plane_state->ctx->dc;
3314 
3315 	if (plane_state == NULL)
3316 		return;
3317 
3318 	flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3319 					pipe_ctx->plane_res.hubp);
3320 
3321 	plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3322 
3323 	if (!flip_pending)
3324 		plane_state->status.current_address = plane_state->status.requested_address;
3325 
3326 	if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3327 			tg->funcs->is_stereo_left_eye) {
3328 		plane_state->status.is_right_eye =
3329 				!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3330 	}
3331 
3332 	if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3333 		struct dce_hwseq *hwseq = dc->hwseq;
3334 		struct timing_generator *tg = dc->res_pool->timing_generators[0];
3335 		unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3336 
3337 		if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3338 			struct hubbub *hubbub = dc->res_pool->hubbub;
3339 
3340 			hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3341 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3342 		}
3343 	}
3344 }
3345 
dcn10_update_dchub(struct dce_hwseq * hws,struct dchub_init_data * dh_data)3346 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3347 {
3348 	struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3349 
3350 	/* In DCN, this programming sequence is owned by the hubbub */
3351 	hubbub->funcs->update_dchub(hubbub, dh_data);
3352 }
3353 
dcn10_can_pipe_disable_cursor(struct pipe_ctx * pipe_ctx)3354 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3355 {
3356 	struct pipe_ctx *test_pipe;
3357 	const struct rect *r1 = &pipe_ctx->plane_res.scl_data.recout, *r2;
3358 	int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b;
3359 
3360 	/**
3361 	 * Disable the cursor if there's another pipe above this with a
3362 	 * plane that contains this pipe's viewport to prevent double cursor
3363 	 * and incorrect scaling artifacts.
3364 	 */
3365 	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3366 	     test_pipe = test_pipe->top_pipe) {
3367 		if (!test_pipe->plane_state->visible)
3368 			continue;
3369 
3370 		r2 = &test_pipe->plane_res.scl_data.recout;
3371 		r2_r = r2->x + r2->width;
3372 		r2_b = r2->y + r2->height;
3373 
3374 		if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b)
3375 			return true;
3376 	}
3377 
3378 	return false;
3379 }
3380 
dcn10_set_cursor_position(struct pipe_ctx * pipe_ctx)3381 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3382 {
3383 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3384 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3385 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
3386 	struct dc_cursor_mi_param param = {
3387 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3388 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3389 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
3390 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3391 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3392 		.rotation = pipe_ctx->plane_state->rotation,
3393 		.mirror = pipe_ctx->plane_state->horizontal_mirror
3394 	};
3395 	bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
3396 		(pipe_ctx->bottom_pipe != NULL);
3397 
3398 	int x_plane = pipe_ctx->plane_state->dst_rect.x;
3399 	int y_plane = pipe_ctx->plane_state->dst_rect.y;
3400 	int x_pos = pos_cpy.x;
3401 	int y_pos = pos_cpy.y;
3402 
3403 	/**
3404 	 * DC cursor is stream space, HW cursor is plane space and drawn
3405 	 * as part of the framebuffer.
3406 	 *
3407 	 * Cursor position can't be negative, but hotspot can be used to
3408 	 * shift cursor out of the plane bounds. Hotspot must be smaller
3409 	 * than the cursor size.
3410 	 */
3411 
3412 	/**
3413 	 * Translate cursor from stream space to plane space.
3414 	 *
3415 	 * If the cursor is scaled then we need to scale the position
3416 	 * to be in the approximately correct place. We can't do anything
3417 	 * about the actual size being incorrect, that's a limitation of
3418 	 * the hardware.
3419 	 */
3420 	x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3421 			pipe_ctx->plane_state->dst_rect.width;
3422 	y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3423 			pipe_ctx->plane_state->dst_rect.height;
3424 
3425 	/**
3426 	 * If the cursor's source viewport is clipped then we need to
3427 	 * translate the cursor to appear in the correct position on
3428 	 * the screen.
3429 	 *
3430 	 * This translation isn't affected by scaling so it needs to be
3431 	 * done *after* we adjust the position for the scale factor.
3432 	 *
3433 	 * This is only done by opt-in for now since there are still
3434 	 * some usecases like tiled display that might enable the
3435 	 * cursor on both streams while expecting dc to clip it.
3436 	 */
3437 	if (pos_cpy.translate_by_source) {
3438 		x_pos += pipe_ctx->plane_state->src_rect.x;
3439 		y_pos += pipe_ctx->plane_state->src_rect.y;
3440 	}
3441 
3442 	/**
3443 	 * If the position is negative then we need to add to the hotspot
3444 	 * to shift the cursor outside the plane.
3445 	 */
3446 
3447 	if (x_pos < 0) {
3448 		pos_cpy.x_hotspot -= x_pos;
3449 		x_pos = 0;
3450 	}
3451 
3452 	if (y_pos < 0) {
3453 		pos_cpy.y_hotspot -= y_pos;
3454 		y_pos = 0;
3455 	}
3456 
3457 	pos_cpy.x = (uint32_t)x_pos;
3458 	pos_cpy.y = (uint32_t)y_pos;
3459 
3460 	if (pipe_ctx->plane_state->address.type
3461 			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3462 		pos_cpy.enable = false;
3463 
3464 	if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3465 		pos_cpy.enable = false;
3466 
3467 	// Swap axis and mirror horizontally
3468 	if (param.rotation == ROTATION_ANGLE_90) {
3469 		uint32_t temp_x = pos_cpy.x;
3470 
3471 		pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3472 				(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3473 		pos_cpy.y = temp_x;
3474 	}
3475 	// Swap axis and mirror vertically
3476 	else if (param.rotation == ROTATION_ANGLE_270) {
3477 		uint32_t temp_y = pos_cpy.y;
3478 		int viewport_height =
3479 			pipe_ctx->plane_res.scl_data.viewport.height;
3480 
3481 		if (pipe_split_on) {
3482 			if (pos_cpy.x > viewport_height) {
3483 				pos_cpy.x = pos_cpy.x - viewport_height;
3484 				pos_cpy.y = viewport_height - pos_cpy.x;
3485 			} else {
3486 				pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3487 			}
3488 		} else
3489 			pos_cpy.y = viewport_height - pos_cpy.x;
3490 		pos_cpy.x = temp_y;
3491 	}
3492 	// Mirror horizontally and vertically
3493 	else if (param.rotation == ROTATION_ANGLE_180) {
3494 		int viewport_width =
3495 			pipe_ctx->plane_res.scl_data.viewport.width;
3496 		int viewport_x =
3497 			pipe_ctx->plane_res.scl_data.viewport.x;
3498 
3499 		if (pipe_split_on) {
3500 			if (pos_cpy.x >= viewport_width + viewport_x) {
3501 				pos_cpy.x = 2 * viewport_width
3502 						- pos_cpy.x + 2 * viewport_x;
3503 			} else {
3504 				uint32_t temp_x = pos_cpy.x;
3505 
3506 				pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3507 				if (temp_x >= viewport_x +
3508 					(int)hubp->curs_attr.width || pos_cpy.x
3509 					<= (int)hubp->curs_attr.width +
3510 					pipe_ctx->plane_state->src_rect.x) {
3511 					pos_cpy.x = temp_x + viewport_width;
3512 				}
3513 			}
3514 		} else {
3515 			pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3516 		}
3517 		pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3518 	}
3519 
3520 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
3521 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
3522 }
3523 
dcn10_set_cursor_attribute(struct pipe_ctx * pipe_ctx)3524 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3525 {
3526 	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3527 
3528 	pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3529 			pipe_ctx->plane_res.hubp, attributes);
3530 	pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3531 		pipe_ctx->plane_res.dpp, attributes);
3532 }
3533 
dcn10_set_cursor_sdr_white_level(struct pipe_ctx * pipe_ctx)3534 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3535 {
3536 	uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3537 	struct fixed31_32 multiplier;
3538 	struct dpp_cursor_attributes opt_attr = { 0 };
3539 	uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3540 	struct custom_float_format fmt;
3541 
3542 	if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3543 		return;
3544 
3545 	fmt.exponenta_bits = 5;
3546 	fmt.mantissa_bits = 10;
3547 	fmt.sign = true;
3548 
3549 	if (sdr_white_level > 80) {
3550 		multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3551 		convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3552 	}
3553 
3554 	opt_attr.scale = hw_scale;
3555 	opt_attr.bias = 0;
3556 
3557 	pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3558 			pipe_ctx->plane_res.dpp, &opt_attr);
3559 }
3560 
3561 /*
3562  * apply_front_porch_workaround  TODO FPGA still need?
3563  *
3564  * This is a workaround for a bug that has existed since R5xx and has not been
3565  * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3566  */
apply_front_porch_workaround(struct dc_crtc_timing * timing)3567 static void apply_front_porch_workaround(
3568 	struct dc_crtc_timing *timing)
3569 {
3570 	if (timing->flags.INTERLACE == 1) {
3571 		if (timing->v_front_porch < 2)
3572 			timing->v_front_porch = 2;
3573 	} else {
3574 		if (timing->v_front_porch < 1)
3575 			timing->v_front_porch = 1;
3576 	}
3577 }
3578 
dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx * pipe_ctx)3579 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3580 {
3581 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3582 	struct dc_crtc_timing patched_crtc_timing;
3583 	int vesa_sync_start;
3584 	int asic_blank_end;
3585 	int interlace_factor;
3586 	int vertical_line_start;
3587 
3588 	patched_crtc_timing = *dc_crtc_timing;
3589 	apply_front_porch_workaround(&patched_crtc_timing);
3590 
3591 	interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3592 
3593 	vesa_sync_start = patched_crtc_timing.v_addressable +
3594 			patched_crtc_timing.v_border_bottom +
3595 			patched_crtc_timing.v_front_porch;
3596 
3597 	asic_blank_end = (patched_crtc_timing.v_total -
3598 			vesa_sync_start -
3599 			patched_crtc_timing.v_border_top)
3600 			* interlace_factor;
3601 
3602 	vertical_line_start = asic_blank_end -
3603 			pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3604 
3605 	return vertical_line_start;
3606 }
3607 
dcn10_calc_vupdate_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3608 void dcn10_calc_vupdate_position(
3609 		struct dc *dc,
3610 		struct pipe_ctx *pipe_ctx,
3611 		uint32_t *start_line,
3612 		uint32_t *end_line)
3613 {
3614 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3615 	int vline_int_offset_from_vupdate =
3616 			pipe_ctx->stream->periodic_interrupt.lines_offset;
3617 	int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3618 	int start_position;
3619 
3620 	if (vline_int_offset_from_vupdate > 0)
3621 		vline_int_offset_from_vupdate--;
3622 	else if (vline_int_offset_from_vupdate < 0)
3623 		vline_int_offset_from_vupdate++;
3624 
3625 	start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
3626 
3627 	if (start_position >= 0)
3628 		*start_line = start_position;
3629 	else
3630 		*start_line = dc_crtc_timing->v_total + start_position - 1;
3631 
3632 	*end_line = *start_line + 2;
3633 
3634 	if (*end_line >= dc_crtc_timing->v_total)
3635 		*end_line = 2;
3636 }
3637 
dcn10_cal_vline_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3638 static void dcn10_cal_vline_position(
3639 		struct dc *dc,
3640 		struct pipe_ctx *pipe_ctx,
3641 		uint32_t *start_line,
3642 		uint32_t *end_line)
3643 {
3644 	switch (pipe_ctx->stream->periodic_interrupt.ref_point) {
3645 	case START_V_UPDATE:
3646 		dcn10_calc_vupdate_position(
3647 				dc,
3648 				pipe_ctx,
3649 				start_line,
3650 				end_line);
3651 		break;
3652 	case START_V_SYNC:
3653 		// vsync is line 0 so start_line is just the requested line offset
3654 		*start_line = pipe_ctx->stream->periodic_interrupt.lines_offset;
3655 		*end_line = *start_line + 2;
3656 		break;
3657 	default:
3658 		ASSERT(0);
3659 		break;
3660 	}
3661 }
3662 
dcn10_setup_periodic_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3663 void dcn10_setup_periodic_interrupt(
3664 		struct dc *dc,
3665 		struct pipe_ctx *pipe_ctx)
3666 {
3667 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3668 	uint32_t start_line = 0;
3669 	uint32_t end_line = 0;
3670 
3671 	dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
3672 
3673 	tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3674 }
3675 
dcn10_setup_vupdate_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3676 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3677 {
3678 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3679 	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3680 
3681 	if (start_line < 0) {
3682 		ASSERT(0);
3683 		start_line = 0;
3684 	}
3685 
3686 	if (tg->funcs->setup_vertical_interrupt2)
3687 		tg->funcs->setup_vertical_interrupt2(tg, start_line);
3688 }
3689 
dcn10_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)3690 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3691 		struct dc_link_settings *link_settings)
3692 {
3693 	struct encoder_unblank_param params = { { 0 } };
3694 	struct dc_stream_state *stream = pipe_ctx->stream;
3695 	struct dc_link *link = stream->link;
3696 	struct dce_hwseq *hws = link->dc->hwseq;
3697 
3698 	/* only 3 items below are used by unblank */
3699 	params.timing = pipe_ctx->stream->timing;
3700 
3701 	params.link_settings.link_rate = link_settings->link_rate;
3702 
3703 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3704 		if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3705 			params.timing.pix_clk_100hz /= 2;
3706 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
3707 	}
3708 
3709 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3710 		hws->funcs.edp_backlight_control(link, true);
3711 	}
3712 }
3713 
dcn10_send_immediate_sdp_message(struct pipe_ctx * pipe_ctx,const uint8_t * custom_sdp_message,unsigned int sdp_message_size)3714 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3715 				const uint8_t *custom_sdp_message,
3716 				unsigned int sdp_message_size)
3717 {
3718 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3719 		pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3720 				pipe_ctx->stream_res.stream_enc,
3721 				custom_sdp_message,
3722 				sdp_message_size);
3723 	}
3724 }
dcn10_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)3725 enum dc_status dcn10_set_clock(struct dc *dc,
3726 			enum dc_clock_type clock_type,
3727 			uint32_t clk_khz,
3728 			uint32_t stepping)
3729 {
3730 	struct dc_state *context = dc->current_state;
3731 	struct dc_clock_config clock_cfg = {0};
3732 	struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3733 
3734 	if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3735 		return DC_FAIL_UNSUPPORTED_1;
3736 
3737 	dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3738 		context, clock_type, &clock_cfg);
3739 
3740 	if (clk_khz > clock_cfg.max_clock_khz)
3741 		return DC_FAIL_CLK_EXCEED_MAX;
3742 
3743 	if (clk_khz < clock_cfg.min_clock_khz)
3744 		return DC_FAIL_CLK_BELOW_MIN;
3745 
3746 	if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3747 		return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3748 
3749 	/*update internal request clock for update clock use*/
3750 	if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3751 		current_clocks->dispclk_khz = clk_khz;
3752 	else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3753 		current_clocks->dppclk_khz = clk_khz;
3754 	else
3755 		return DC_ERROR_UNEXPECTED;
3756 
3757 	if (dc->clk_mgr->funcs->update_clocks)
3758 				dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3759 				context, true);
3760 	return DC_OK;
3761 
3762 }
3763 
dcn10_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)3764 void dcn10_get_clock(struct dc *dc,
3765 			enum dc_clock_type clock_type,
3766 			struct dc_clock_config *clock_cfg)
3767 {
3768 	struct dc_state *context = dc->current_state;
3769 
3770 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3771 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3772 
3773 }
3774