• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: MIT
2 //
3 // Copyright 2024 Advanced Micro Devices, Inc.
4 
5 #include "dm_services.h"
6 #include "dm_helpers.h"
7 #include "core_types.h"
8 #include "resource.h"
9 #include "dccg.h"
10 #include "dce/dce_hwseq.h"
11 #include "reg_helper.h"
12 #include "abm.h"
13 #include "hubp.h"
14 #include "dchubbub.h"
15 #include "timing_generator.h"
16 #include "opp.h"
17 #include "ipp.h"
18 #include "mpc.h"
19 #include "mcif_wb.h"
20 #include "dc_dmub_srv.h"
21 #include "link_hwss.h"
22 #include "dpcd_defs.h"
23 #include "clk_mgr.h"
24 #include "dsc.h"
25 #include "link.h"
26 
27 #include "dce/dmub_hw_lock_mgr.h"
28 #include "dcn10/dcn10_cm_common.h"
29 #include "dcn20/dcn20_optc.h"
30 #include "dcn30/dcn30_cm_common.h"
31 #include "dcn32/dcn32_hwseq.h"
32 #include "dcn401_hwseq.h"
33 #include "dcn401/dcn401_resource.h"
34 #include "dc_state_priv.h"
35 #include "link_enc_cfg.h"
36 
37 #define DC_LOGGER_INIT(logger)
38 
39 #define CTX \
40 	hws->ctx
41 #define REG(reg)\
42 	hws->regs->reg
43 #define DC_LOGGER \
44 	dc->ctx->logger
45 
46 
47 #undef FN
48 #define FN(reg_name, field_name) \
49 	hws->shifts->field_name, hws->masks->field_name
50 
dcn401_initialize_min_clocks(struct dc * dc)51 static void dcn401_initialize_min_clocks(struct dc *dc)
52 {
53 	struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk;
54 
55 	clocks->dcfclk_deep_sleep_khz = DCN3_2_DCFCLK_DS_INIT_KHZ;
56 	clocks->dcfclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz * 1000;
57 	clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000;
58 	clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000;
59 	clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000;
60 	if (dc->debug.disable_boot_optimizations) {
61 		clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000;
62 	} else {
63 		/* Even though DPG_EN = 1 for the connected display, it still requires the
64 		 * correct timing so we cannot set DISPCLK to min freq or it could cause
65 		 * audio corruption. Read current DISPCLK from DENTIST and request the same
66 		 * freq to ensure that the timing is valid and unchanged.
67 		 */
68 		clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr);
69 	}
70 	clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
71 	clocks->fclk_p_state_change_support = true;
72 	clocks->p_state_change_support = true;
73 
74 	dc->clk_mgr->funcs->update_clocks(
75 			dc->clk_mgr,
76 			dc->current_state,
77 			true);
78 }
79 
dcn401_program_gamut_remap(struct pipe_ctx * pipe_ctx)80 void dcn401_program_gamut_remap(struct pipe_ctx *pipe_ctx)
81 {
82 	unsigned int i = 0;
83 	struct mpc_grph_gamut_adjustment mpc_adjust;
84 	unsigned int mpcc_id = pipe_ctx->plane_res.mpcc_inst;
85 	struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
86 
87 	//For now assert if location is not pre-blend
88 	if (pipe_ctx->plane_state)
89 		ASSERT(pipe_ctx->plane_state->mcm_location == MPCC_MOVABLE_CM_LOCATION_BEFORE);
90 
91 	// program MPCC_MCM_FIRST_GAMUT_REMAP
92 	memset(&mpc_adjust, 0, sizeof(mpc_adjust));
93 	mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
94 	mpc_adjust.mpcc_gamut_remap_block_id = MPCC_MCM_FIRST_GAMUT_REMAP;
95 
96 	if (pipe_ctx->plane_state &&
97 		pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
98 		mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
99 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
100 			mpc_adjust.temperature_matrix[i] =
101 			pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
102 	}
103 
104 	mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
105 
106 	// program MPCC_MCM_SECOND_GAMUT_REMAP for Bypass / Disable for now
107 	mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
108 	mpc_adjust.mpcc_gamut_remap_block_id = MPCC_MCM_SECOND_GAMUT_REMAP;
109 
110 	mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
111 
112 	// program MPCC_OGAM_GAMUT_REMAP same as is currently used on DCN3x
113 	memset(&mpc_adjust, 0, sizeof(mpc_adjust));
114 	mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
115 	mpc_adjust.mpcc_gamut_remap_block_id = MPCC_OGAM_GAMUT_REMAP;
116 
117 	if (pipe_ctx->top_pipe == NULL) {
118 		if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
119 			mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
120 			for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
121 				mpc_adjust.temperature_matrix[i] =
122 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
123 		}
124 	}
125 
126 	mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
127 }
128 
dcn401_read_ono_state(struct dc * dc,uint8_t region)129 struct ips_ono_region_state dcn401_read_ono_state(struct dc *dc, uint8_t region)
130 {
131 	struct dce_hwseq *hws = dc->hwseq;
132 	struct ips_ono_region_state state = {0, 0};
133 
134 	switch (region) {
135 	case 0:
136 		/* dccg, dio, dcio */
137 		REG_GET_2(DOMAIN22_PG_STATUS,
138 			DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
139 			DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
140 		break;
141 	case 1:
142 		/* dchubbub, dchvm, dchubbubmem */
143 		REG_GET_2(DOMAIN23_PG_STATUS,
144 			DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
145 			DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
146 		break;
147 	case 2:
148 		/* mpc, opp, optc, dwb */
149 		REG_GET_2(DOMAIN24_PG_STATUS,
150 			DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
151 			DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
152 		break;
153 	case 3:
154 		/* hpo */
155 		REG_GET_2(DOMAIN25_PG_STATUS,
156 			DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
157 			DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
158 		break;
159 	case 4:
160 		/* dchubp0, dpp0 */
161 		REG_GET_2(DOMAIN0_PG_STATUS,
162 			DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
163 			DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
164 		break;
165 	case 5:
166 		/* dsc0 */
167 		REG_GET_2(DOMAIN16_PG_STATUS,
168 			DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
169 			DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
170 		break;
171 	case 6:
172 		/* dchubp1, dpp1 */
173 		REG_GET_2(DOMAIN1_PG_STATUS,
174 			DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
175 			DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
176 		break;
177 	case 7:
178 		/* dsc1 */
179 		REG_GET_2(DOMAIN17_PG_STATUS,
180 			DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
181 			DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
182 		break;
183 	case 8:
184 		/* dchubp2, dpp2 */
185 		REG_GET_2(DOMAIN2_PG_STATUS,
186 			DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
187 			DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
188 		break;
189 	case 9:
190 		/* dsc2 */
191 		REG_GET_2(DOMAIN18_PG_STATUS,
192 			DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
193 			DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
194 		break;
195 	case 10:
196 		/* dchubp3, dpp3 */
197 		REG_GET_2(DOMAIN3_PG_STATUS,
198 			DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
199 			DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
200 		break;
201 	case 11:
202 		/* dsc3 */
203 		REG_GET_2(DOMAIN19_PG_STATUS,
204 			DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
205 			DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
206 		break;
207 	default:
208 		break;
209 	}
210 
211 	return state;
212 }
213 
dcn401_init_hw(struct dc * dc)214 void dcn401_init_hw(struct dc *dc)
215 {
216 	struct abm **abms = dc->res_pool->multiple_abms;
217 	struct dce_hwseq *hws = dc->hwseq;
218 	struct dc_bios *dcb = dc->ctx->dc_bios;
219 	struct resource_pool *res_pool = dc->res_pool;
220 	int i;
221 	int edp_num;
222 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
223 	uint32_t user_level = MAX_BACKLIGHT_LEVEL;
224 	int current_dchub_ref_freq = 0;
225 
226 	if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) {
227 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
228 
229 		// mark dcmode limits present if any clock has distinct AC and DC values from SMU
230 		dc->caps.dcmode_power_limits_present =
231 				(dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dcfclk_mhz) ||
232 				(dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dispclk_mhz) ||
233 				(dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dtbclk_mhz) ||
234 				(dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.fclk_mhz) ||
235 				(dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.memclk_mhz) ||
236 				(dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.socclk_mhz);
237 	}
238 
239 	// Initialize the dccg
240 	if (res_pool->dccg->funcs->dccg_init)
241 		res_pool->dccg->funcs->dccg_init(res_pool->dccg);
242 
243 	// Disable DMUB Initialization until IPS state programming is finalized
244 	//if (!dcb->funcs->is_accelerated_mode(dcb)) {
245 	//	hws->funcs.bios_golden_init(dc);
246 	//}
247 
248 	// Set default OPTC memory power states
249 	if (dc->debug.enable_mem_low_power.bits.optc) {
250 		// Shutdown when unassigned and light sleep in VBLANK
251 		REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
252 	}
253 
254 	if (dc->debug.enable_mem_low_power.bits.vga) {
255 		// Power down VGA memory
256 		REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
257 	}
258 
259 	if (dc->ctx->dc_bios->fw_info_valid) {
260 		res_pool->ref_clocks.xtalin_clock_inKhz =
261 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
262 
263 		if (res_pool->hubbub) {
264 			(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
265 					dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
266 					&res_pool->ref_clocks.dccg_ref_clock_inKhz);
267 
268 			current_dchub_ref_freq = res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
269 
270 			(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
271 					res_pool->ref_clocks.dccg_ref_clock_inKhz,
272 					&res_pool->ref_clocks.dchub_ref_clock_inKhz);
273 		} else {
274 			// Not all ASICs have DCCG sw component
275 			res_pool->ref_clocks.dccg_ref_clock_inKhz =
276 					res_pool->ref_clocks.xtalin_clock_inKhz;
277 			res_pool->ref_clocks.dchub_ref_clock_inKhz =
278 					res_pool->ref_clocks.xtalin_clock_inKhz;
279 		}
280 	} else
281 		ASSERT_CRITICAL(false);
282 
283 	for (i = 0; i < dc->link_count; i++) {
284 		/* Power up AND update implementation according to the
285 		 * required signal (which may be different from the
286 		 * default signal on connector).
287 		 */
288 		struct dc_link *link = dc->links[i];
289 
290 		link->link_enc->funcs->hw_init(link->link_enc);
291 
292 		/* Check for enabled DIG to identify enabled display */
293 		if (link->link_enc->funcs->is_dig_enabled &&
294 			link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
295 			link->link_status.link_active = true;
296 			link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
297 			if (link->link_enc->funcs->fec_is_active &&
298 					link->link_enc->funcs->fec_is_active(link->link_enc))
299 				link->fec_state = dc_link_fec_enabled;
300 		}
301 	}
302 
303 	/* enable_power_gating_plane before dsc_pg_control because
304 	 * FORCEON = 1 with hw default value on bootup, resume from s3
305 	 */
306 	if (hws->funcs.enable_power_gating_plane)
307 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
308 
309 	/* we want to turn off all dp displays before doing detection */
310 	dc->link_srv->blank_all_dp_displays(dc);
311 
312 	/* If taking control over from VBIOS, we may want to optimize our first
313 	 * mode set, so we need to skip powering down pipes until we know which
314 	 * pipes we want to use.
315 	 * Otherwise, if taking control is not possible, we need to power
316 	 * everything down.
317 	 */
318 	if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
319 		/* Disable boot optimizations means power down everything including PHY, DIG,
320 		 * and OTG (i.e. the boot is not optimized because we do a full power down).
321 		 */
322 		if (dc->hwss.enable_accelerated_mode && dc->debug.disable_boot_optimizations)
323 			dc->hwss.enable_accelerated_mode(dc, dc->current_state);
324 		else
325 			hws->funcs.init_pipes(dc, dc->current_state);
326 
327 		if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
328 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
329 					!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
330 
331 		dcn401_initialize_min_clocks(dc);
332 
333 		/* On HW init, allow idle optimizations after pipes have been turned off.
334 		 *
335 		 * In certain D3 cases (i.e. BOCO / BOMACO) it's possible that hardware state
336 		 * is reset (i.e. not in idle at the time hw init is called), but software state
337 		 * still has idle_optimizations = true, so we must disable idle optimizations first
338 		 * (i.e. set false), then re-enable (set true).
339 		 */
340 		dc_allow_idle_optimizations(dc, false);
341 		dc_allow_idle_optimizations(dc, true);
342 	}
343 
344 	/* In headless boot cases, DIG may be turned
345 	 * on which causes HW/SW discrepancies.
346 	 * To avoid this, power down hardware on boot
347 	 * if DIG is turned on and seamless boot not enabled
348 	 */
349 	if (!dc->config.seamless_boot_edp_requested) {
350 		struct dc_link *edp_links[MAX_NUM_EDP];
351 		struct dc_link *edp_link;
352 
353 		dc_get_edp_links(dc, edp_links, &edp_num);
354 		if (edp_num) {
355 			for (i = 0; i < edp_num; i++) {
356 				edp_link = edp_links[i];
357 				if (edp_link->link_enc->funcs->is_dig_enabled &&
358 						edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
359 						dc->hwss.edp_backlight_control &&
360 						hws->funcs.power_down &&
361 						dc->hwss.edp_power_control) {
362 					dc->hwss.edp_backlight_control(edp_link, false);
363 					hws->funcs.power_down(dc);
364 					dc->hwss.edp_power_control(edp_link, false);
365 				}
366 			}
367 		} else {
368 			for (i = 0; i < dc->link_count; i++) {
369 				struct dc_link *link = dc->links[i];
370 
371 				if (link->link_enc->funcs->is_dig_enabled &&
372 						link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
373 						hws->funcs.power_down) {
374 					hws->funcs.power_down(dc);
375 					break;
376 				}
377 
378 			}
379 		}
380 	}
381 
382 	for (i = 0; i < res_pool->audio_count; i++) {
383 		struct audio *audio = res_pool->audios[i];
384 
385 		audio->funcs->hw_init(audio);
386 	}
387 
388 	for (i = 0; i < dc->link_count; i++) {
389 		struct dc_link *link = dc->links[i];
390 
391 		if (link->panel_cntl) {
392 			backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
393 			user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
394 		}
395 	}
396 
397 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
398 		if (abms[i] != NULL && abms[i]->funcs != NULL)
399 			abms[i]->funcs->abm_init(abms[i], backlight, user_level);
400 	}
401 
402 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
403 	REG_WRITE(DIO_MEM_PWR_CTRL, 0);
404 
405 	if (!dc->debug.disable_clock_gate) {
406 		/* enable all DCN clock gating */
407 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
408 
409 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
410 
411 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
412 	}
413 
414 	dcn401_setup_hpo_hw_control(hws, true);
415 
416 	if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
417 		dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
418 
419 	if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges)
420 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
421 
422 	if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
423 		dc->res_pool->hubbub->funcs->force_pstate_change_control(
424 				dc->res_pool->hubbub, false, false);
425 
426 	if (dc->res_pool->hubbub->funcs->init_crb)
427 		dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
428 
429 	if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0)
430 		dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc);
431 
432 	// Get DMCUB capabilities
433 	if (dc->ctx->dmub_srv) {
434 		dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
435 		dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
436 		dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0;
437 		dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
438 		dc->debug.fams2_config.bits.enable &= dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver == 2;
439 		if ((!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box)
440 			|| res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) {
441 			/* update bounding box if FAMS2 disabled, or if dchub clk has changed */
442 			if (dc->clk_mgr)
443 				dc->res_pool->funcs->update_bw_bounding_box(dc,
444 									    dc->clk_mgr->bw_params);
445 		}
446 	}
447 }
448 
dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc * dc,struct pipe_ctx * pipe_ctx,enum MCM_LUT_XABLE * shaper_xable,enum MCM_LUT_XABLE * lut3d_xable,enum MCM_LUT_XABLE * lut1d_xable)449 static void dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ctx *pipe_ctx,
450 		enum MCM_LUT_XABLE *shaper_xable,
451 		enum MCM_LUT_XABLE *lut3d_xable,
452 		enum MCM_LUT_XABLE *lut1d_xable)
453 {
454 	enum dc_cm2_shaper_3dlut_setting shaper_3dlut_setting = DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL;
455 	bool lut1d_enable = false;
456 	struct mpc *mpc = dc->res_pool->mpc;
457 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
458 
459 	if (!pipe_ctx->plane_state)
460 		return;
461 	shaper_3dlut_setting = pipe_ctx->plane_state->mcm_shaper_3dlut_setting;
462 	lut1d_enable = pipe_ctx->plane_state->mcm_lut1d_enable;
463 	mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
464 	pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
465 
466 	*lut1d_xable = lut1d_enable ? MCM_LUT_ENABLE : MCM_LUT_DISABLE;
467 
468 	switch (shaper_3dlut_setting) {
469 	case DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL:
470 		*lut3d_xable = *shaper_xable = MCM_LUT_DISABLE;
471 		break;
472 	case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER:
473 		*lut3d_xable = MCM_LUT_DISABLE;
474 		*shaper_xable = MCM_LUT_ENABLE;
475 		break;
476 	case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT:
477 		*lut3d_xable = *shaper_xable = MCM_LUT_ENABLE;
478 		break;
479 	}
480 }
481 
dcn401_populate_mcm_luts(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_cm2_func_luts mcm_luts,bool lut_bank_a)482 void dcn401_populate_mcm_luts(struct dc *dc,
483 		struct pipe_ctx *pipe_ctx,
484 		struct dc_cm2_func_luts mcm_luts,
485 		bool lut_bank_a)
486 {
487 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
488 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
489 	int mpcc_id = hubp->inst;
490 	struct mpc *mpc = dc->res_pool->mpc;
491 	union mcm_lut_params m_lut_params;
492 	enum dc_cm2_transfer_func_source lut3d_src = mcm_luts.lut3d_data.lut3d_src;
493 	enum hubp_3dlut_fl_format format;
494 	enum hubp_3dlut_fl_mode mode;
495 	enum hubp_3dlut_fl_width width;
496 	enum hubp_3dlut_fl_addressing_mode addr_mode;
497 	enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g;
498 	enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b;
499 	enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r;
500 	enum MCM_LUT_XABLE shaper_xable = MCM_LUT_DISABLE;
501 	enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE;
502 	enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE;
503 	bool is_17x17x17 = true;
504 	bool rval;
505 
506 	dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
507 
508 	/* 1D LUT */
509 	if (mcm_luts.lut1d_func && lut3d_xable != MCM_LUT_DISABLE) {
510 		memset(&m_lut_params, 0, sizeof(m_lut_params));
511 		if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL)
512 			m_lut_params.pwl = &mcm_luts.lut1d_func->pwl;
513 		else if (mcm_luts.lut1d_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
514 			rval = cm3_helper_translate_curve_to_hw_format(
515 					mcm_luts.lut1d_func,
516 					&dpp_base->regamma_params, false);
517 			m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
518 		}
519 		if (m_lut_params.pwl) {
520 			if (mpc->funcs->populate_lut)
521 				mpc->funcs->populate_lut(mpc, MCM_LUT_1DLUT, m_lut_params, lut_bank_a, mpcc_id);
522 		}
523 		if (mpc->funcs->program_lut_mode)
524 			mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable, lut_bank_a, mpcc_id);
525 	}
526 
527 	/* Shaper */
528 	if (mcm_luts.shaper) {
529 		memset(&m_lut_params, 0, sizeof(m_lut_params));
530 		if (mcm_luts.shaper->type == TF_TYPE_HWPWL)
531 			m_lut_params.pwl = &mcm_luts.shaper->pwl;
532 		else if (mcm_luts.shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
533 			ASSERT(false);
534 			rval = cm3_helper_translate_curve_to_hw_format(
535 					mcm_luts.shaper,
536 					&dpp_base->regamma_params, true);
537 			m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
538 		}
539 		if (m_lut_params.pwl) {
540 			if (mpc->funcs->populate_lut)
541 				mpc->funcs->populate_lut(mpc, MCM_LUT_SHAPER, m_lut_params, lut_bank_a, mpcc_id);
542 		}
543 		if (mpc->funcs->program_lut_mode)
544 			mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, shaper_xable, lut_bank_a, mpcc_id);
545 	}
546 
547 	/* 3DLUT */
548 	switch (lut3d_src) {
549 	case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
550 		memset(&m_lut_params, 0, sizeof(m_lut_params));
551 		if (hubp->funcs->hubp_enable_3dlut_fl)
552 			hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
553 		if (mcm_luts.lut3d_data.lut3d_func && mcm_luts.lut3d_data.lut3d_func->state.bits.initialized) {
554 			m_lut_params.lut3d = &mcm_luts.lut3d_data.lut3d_func->lut_3d;
555 			if (mpc->funcs->populate_lut)
556 				mpc->funcs->populate_lut(mpc, MCM_LUT_3DLUT, m_lut_params, lut_bank_a, mpcc_id);
557 			if (mpc->funcs->program_lut_mode)
558 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a,
559 						mpcc_id);
560 		}
561 		break;
562 	case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
563 
564 		if (mpc->funcs->program_lut_read_write_control)
565 			mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, mpcc_id);
566 		if (mpc->funcs->program_lut_mode)
567 			mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a, mpcc_id);
568 		if (mpc->funcs->program_3dlut_size)
569 			mpc->funcs->program_3dlut_size(mpc, is_17x17x17, mpcc_id);
570 		if (hubp->funcs->hubp_program_3dlut_fl_addr)
571 			hubp->funcs->hubp_program_3dlut_fl_addr(hubp, mcm_luts.lut3d_data.gpu_mem_params.addr);
572 		switch (mcm_luts.lut3d_data.gpu_mem_params.layout) {
573 		case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
574 			mode = hubp_3dlut_fl_mode_native_1;
575 			addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
576 			break;
577 		case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
578 			mode = hubp_3dlut_fl_mode_native_2;
579 			addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
580 			break;
581 		case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
582 			mode = hubp_3dlut_fl_mode_transform;
583 			addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear;
584 			break;
585 		default:
586 			mode = hubp_3dlut_fl_mode_disable;
587 			addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
588 			break;
589 		}
590 		if (hubp->funcs->hubp_program_3dlut_fl_mode)
591 			hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode);
592 
593 		if (hubp->funcs->hubp_program_3dlut_fl_addressing_mode)
594 			hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode);
595 
596 		switch (mcm_luts.lut3d_data.gpu_mem_params.format_params.format) {
597 		case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
598 		default:
599 			format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
600 			break;
601 		case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
602 			format = hubp_3dlut_fl_format_unorm_12lsb_bitslice;
603 			break;
604 		case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10:
605 			format = hubp_3dlut_fl_format_float_fp1_5_10;
606 			break;
607 		}
608 		if (hubp->funcs->hubp_program_3dlut_fl_format)
609 			hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
610 		if (hubp->funcs->hubp_update_3dlut_fl_bias_scale)
611 			hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
612 					mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
613 					mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
614 
615 		switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) {
616 		case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
617 		default:
618 			crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
619 			crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
620 			crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
621 			break;
622 		}
623 
624 		if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
625 			hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
626 					crossbar_bit_slice_y_g,
627 					crossbar_bit_slice_cb_b,
628 					crossbar_bit_slice_cr_r);
629 
630 		switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
631 		case DC_CM2_GPU_MEM_SIZE_171717:
632 		default:
633 			width = hubp_3dlut_fl_width_17;
634 			break;
635 		case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
636 			width = hubp_3dlut_fl_width_transformed;
637 			break;
638 		}
639 		if (hubp->funcs->hubp_program_3dlut_fl_width)
640 			hubp->funcs->hubp_program_3dlut_fl_width(hubp, width);
641 		if (mpc->funcs->update_3dlut_fast_load_select)
642 			mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
643 
644 		if (hubp->funcs->hubp_enable_3dlut_fl)
645 			hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
646 		else {
647 			if (mpc->funcs->program_lut_mode) {
648 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
649 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
650 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
651 			}
652 		}
653 		break;
654 
655 	}
656 }
657 
dcn401_trigger_3dlut_dma_load(struct dc * dc,struct pipe_ctx * pipe_ctx)658 void dcn401_trigger_3dlut_dma_load(struct dc *dc, struct pipe_ctx *pipe_ctx)
659 {
660 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
661 
662 	if (hubp->funcs->hubp_enable_3dlut_fl) {
663 		hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
664 	}
665 }
666 
dcn401_set_mcm_luts(struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)667 bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
668 				const struct dc_plane_state *plane_state)
669 {
670 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
671 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
672 	struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
673 	bool result;
674 	const struct pwl_params *lut_params = NULL;
675 	bool rval;
676 
677 	mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
678 	pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
679 	// 1D LUT
680 	if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
681 		lut_params = &plane_state->blend_tf.pwl;
682 	else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
683 		rval = cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf,
684 				&dpp_base->regamma_params, false);
685 		lut_params = rval ? &dpp_base->regamma_params : NULL;
686 	}
687 	result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id);
688 	lut_params = NULL;
689 
690 	// Shaper
691 	if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL)
692 		lut_params = &plane_state->in_shaper_func.pwl;
693 	else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
694 		// TODO: dpp_base replace
695 		rval = cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func,
696 				&dpp_base->shaper_params, true);
697 		lut_params = rval ? &dpp_base->shaper_params : NULL;
698 	}
699 	result &= mpc->funcs->program_shaper(mpc, lut_params, mpcc_id);
700 
701 	// 3D
702 	if (mpc->funcs->program_3dlut) {
703 		if (plane_state->lut3d_func.state.bits.initialized == 1)
704 			result &= mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id);
705 		else
706 			result &= mpc->funcs->program_3dlut(mpc, NULL, mpcc_id);
707 	}
708 
709 	return result;
710 }
711 
dcn401_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)712 bool dcn401_set_output_transfer_func(struct dc *dc,
713 				struct pipe_ctx *pipe_ctx,
714 				const struct dc_stream_state *stream)
715 {
716 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
717 	struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
718 	const struct pwl_params *params = NULL;
719 	bool ret = false;
720 
721 	/* program OGAM or 3DLUT only for the top pipe*/
722 	if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) {
723 		/*program shaper and 3dlut in MPC*/
724 		ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream);
725 		if (ret == false && mpc->funcs->set_output_gamma) {
726 			if (stream->out_transfer_func.type == TF_TYPE_HWPWL)
727 				params = &stream->out_transfer_func.pwl;
728 			else if (pipe_ctx->stream->out_transfer_func.type ==
729 					TF_TYPE_DISTRIBUTED_POINTS &&
730 					cm3_helper_translate_curve_to_hw_format(
731 					&stream->out_transfer_func,
732 					&mpc->blender_params, false))
733 				params = &mpc->blender_params;
734 			/* there are no ROM LUTs in OUTGAM */
735 			if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED)
736 				BREAK_TO_DEBUGGER();
737 		}
738 	}
739 
740 	if (mpc->funcs->set_output_gamma)
741 		mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
742 
743 	return ret;
744 }
745 
dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx * pipe_ctx,unsigned int * tmds_div)746 void dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx *pipe_ctx,
747 				unsigned int *tmds_div)
748 {
749 	struct dc_stream_state *stream = pipe_ctx->stream;
750 
751 	if (dc_is_tmds_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
752 		if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
753 			*tmds_div = PIXEL_RATE_DIV_BY_2;
754 		else
755 			*tmds_div = PIXEL_RATE_DIV_BY_4;
756 	} else {
757 		*tmds_div = PIXEL_RATE_DIV_BY_1;
758 	}
759 
760 	if (*tmds_div == PIXEL_RATE_DIV_NA)
761 		ASSERT(false);
762 
763 }
764 
enable_stream_timing_calc(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc,unsigned int * tmds_div,int * opp_inst,int * opp_cnt,struct pipe_ctx * opp_heads[MAX_PIPES],bool * manual_mode,struct drr_params * params,unsigned int * event_triggers)765 static void enable_stream_timing_calc(
766 		struct pipe_ctx *pipe_ctx,
767 		struct dc_state *context,
768 		struct dc *dc,
769 		unsigned int *tmds_div,
770 		int *opp_inst,
771 		int *opp_cnt,
772 		struct pipe_ctx *opp_heads[MAX_PIPES],
773 		bool *manual_mode,
774 		struct drr_params *params,
775 		unsigned int *event_triggers)
776 {
777 	struct dc_stream_state *stream = pipe_ctx->stream;
778 	int i;
779 
780 	if (dc_is_tmds_signal(stream->signal) || dc_is_virtual_signal(stream->signal))
781 		dcn401_calculate_dccg_tmds_div_value(pipe_ctx, tmds_div);
782 
783 	*opp_cnt = resource_get_opp_heads_for_otg_master(pipe_ctx, &context->res_ctx, opp_heads);
784 	for (i = 0; i < *opp_cnt; i++)
785 		opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
786 
787 	if (dc_is_tmds_signal(stream->signal)) {
788 		stream->link->phy_state.symclk_ref_cnts.otg = 1;
789 		if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
790 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
791 		else
792 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
793 	}
794 
795 	params->vertical_total_min = stream->adjust.v_total_min;
796 	params->vertical_total_max = stream->adjust.v_total_max;
797 	params->vertical_total_mid = stream->adjust.v_total_mid;
798 	params->vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num;
799 
800 	// DRR should set trigger event to monitor surface update event
801 	if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
802 		*event_triggers = 0x80;
803 }
804 
dcn401_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)805 enum dc_status dcn401_enable_stream_timing(
806 		struct pipe_ctx *pipe_ctx,
807 		struct dc_state *context,
808 		struct dc *dc)
809 {
810 	struct dce_hwseq *hws = dc->hwseq;
811 	struct dc_stream_state *stream = pipe_ctx->stream;
812 	struct drr_params params = {0};
813 	unsigned int event_triggers = 0;
814 	int opp_cnt = 1;
815 	int opp_inst[MAX_PIPES] = {0};
816 	struct pipe_ctx *opp_heads[MAX_PIPES] = {0};
817 	bool manual_mode;
818 	unsigned int tmds_div = PIXEL_RATE_DIV_NA;
819 	unsigned int unused_div = PIXEL_RATE_DIV_NA;
820 	int odm_slice_width;
821 	int last_odm_slice_width;
822 	int i;
823 
824 	if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER))
825 		return DC_OK;
826 
827 	enable_stream_timing_calc(pipe_ctx, context, dc, &tmds_div, opp_inst,
828 			&opp_cnt, opp_heads, &manual_mode, &params, &event_triggers);
829 
830 	if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
831 		dc->res_pool->dccg->funcs->set_pixel_rate_div(
832 			dc->res_pool->dccg, pipe_ctx->stream_res.tg->inst,
833 			tmds_div, unused_div);
834 	}
835 
836 	/* TODO check if timing_changed, disable stream if timing changed */
837 
838 	if (opp_cnt > 1) {
839 		odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false);
840 		last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true);
841 		pipe_ctx->stream_res.tg->funcs->set_odm_combine(
842 				pipe_ctx->stream_res.tg,
843 				opp_inst, opp_cnt,
844 				odm_slice_width, last_odm_slice_width);
845 	}
846 
847 	/* set DTBCLK_P */
848 	if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) {
849 		if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
850 			dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, DPREFCLK, pipe_ctx->stream_res.tg->inst);
851 		}
852 	}
853 
854 	/* HW program guide assume display already disable
855 	 * by unplug sequence. OTG assume stop.
856 	 */
857 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
858 
859 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
860 			pipe_ctx->clock_source,
861 			&pipe_ctx->stream_res.pix_clk_params,
862 			dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
863 			&pipe_ctx->pll_settings)) {
864 		BREAK_TO_DEBUGGER();
865 		return DC_ERROR_UNEXPECTED;
866 	}
867 
868 	if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
869 		dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
870 
871 	pipe_ctx->stream_res.tg->funcs->program_timing(
872 			pipe_ctx->stream_res.tg,
873 			&stream->timing,
874 			pipe_ctx->pipe_dlg_param.vready_offset,
875 			pipe_ctx->pipe_dlg_param.vstartup_start,
876 			pipe_ctx->pipe_dlg_param.vupdate_offset,
877 			pipe_ctx->pipe_dlg_param.vupdate_width,
878 			pipe_ctx->pipe_dlg_param.pstate_keepout,
879 			pipe_ctx->stream->signal,
880 			true);
881 
882 	for (i = 0; i < opp_cnt; i++) {
883 		opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control(
884 				opp_heads[i]->stream_res.opp,
885 				true);
886 		opp_heads[i]->stream_res.opp->funcs->opp_program_left_edge_extra_pixel(
887 				opp_heads[i]->stream_res.opp,
888 				stream->timing.pixel_encoding,
889 				resource_is_pipe_type(opp_heads[i], OTG_MASTER));
890 	}
891 
892 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
893 			pipe_ctx->stream_res.opp,
894 			true);
895 
896 	hws->funcs.blank_pixel_data(dc, pipe_ctx, true);
897 
898 	/* VTG is  within DCHUB command block. DCFCLK is always on */
899 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
900 		BREAK_TO_DEBUGGER();
901 		return DC_ERROR_UNEXPECTED;
902 	}
903 
904 	hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
905 	set_drr_and_clear_adjust_pending(pipe_ctx, stream, &params);
906 
907 	/* Event triggers and num frames initialized for DRR, but can be
908 	 * later updated for PSR use. Note DRR trigger events are generated
909 	 * regardless of whether num frames met.
910 	 */
911 	if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
912 		pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
913 				pipe_ctx->stream_res.tg, event_triggers, 2);
914 
915 	/* TODO program crtc source select for non-virtual signal*/
916 	/* TODO program FMT */
917 	/* TODO setup link_enc */
918 	/* TODO set stream attributes */
919 	/* TODO program audio */
920 	/* TODO enable stream if timing changed */
921 	/* TODO unblank stream if DP */
922 
923 	if (dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
924 		if (pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable)
925 			pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg);
926 	}
927 
928 	return DC_OK;
929 }
930 
get_phyd32clk_src(struct dc_link * link)931 static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
932 {
933 	switch (link->link_enc->transmitter) {
934 	case TRANSMITTER_UNIPHY_A:
935 		return PHYD32CLKA;
936 	case TRANSMITTER_UNIPHY_B:
937 		return PHYD32CLKB;
938 	case TRANSMITTER_UNIPHY_C:
939 		return PHYD32CLKC;
940 	case TRANSMITTER_UNIPHY_D:
941 		return PHYD32CLKD;
942 	case TRANSMITTER_UNIPHY_E:
943 		return PHYD32CLKE;
944 	default:
945 		return PHYD32CLKA;
946 	}
947 }
948 
dcn401_enable_stream_calc(struct pipe_ctx * pipe_ctx,int * dp_hpo_inst,enum phyd32clk_clock_source * phyd32clk,unsigned int * tmds_div,uint32_t * early_control)949 static void dcn401_enable_stream_calc(
950 		struct pipe_ctx *pipe_ctx,
951 		int *dp_hpo_inst,
952 		enum phyd32clk_clock_source *phyd32clk,
953 		unsigned int *tmds_div,
954 		uint32_t *early_control)
955 {
956 
957 	struct dc *dc = pipe_ctx->stream->ctx->dc;
958 	struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
959 	enum dc_lane_count lane_count =
960 			pipe_ctx->stream->link->cur_link_settings.lane_count;
961 	uint32_t active_total_with_borders;
962 
963 	if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx))
964 		*dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
965 
966 	*phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link);
967 
968 	if (dc_is_tmds_signal(pipe_ctx->stream->signal))
969 		dcn401_calculate_dccg_tmds_div_value(pipe_ctx, tmds_div);
970 	else
971 		*tmds_div = PIXEL_RATE_DIV_BY_1;
972 
973 	/* enable early control to avoid corruption on DP monitor*/
974 	active_total_with_borders =
975 			timing->h_addressable
976 				+ timing->h_border_left
977 				+ timing->h_border_right;
978 
979 	if (lane_count != 0)
980 		*early_control = active_total_with_borders % lane_count;
981 
982 	if (*early_control == 0)
983 		*early_control = lane_count;
984 
985 }
986 
dcn401_enable_stream(struct pipe_ctx * pipe_ctx)987 void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
988 {
989 	uint32_t early_control = 0;
990 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
991 	struct dc_link *link = pipe_ctx->stream->link;
992 	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
993 	struct dc *dc = pipe_ctx->stream->ctx->dc;
994 	struct dccg *dccg = dc->res_pool->dccg;
995 	enum phyd32clk_clock_source phyd32clk;
996 	int dp_hpo_inst = 0;
997 	unsigned int tmds_div = PIXEL_RATE_DIV_NA;
998 	unsigned int unused_div = PIXEL_RATE_DIV_NA;
999 	struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
1000 	struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
1001 
1002 	dcn401_enable_stream_calc(pipe_ctx, &dp_hpo_inst, &phyd32clk,
1003 				&tmds_div, &early_control);
1004 
1005 	if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) {
1006 		if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
1007 			dccg->funcs->set_dpstreamclk(dccg, DPREFCLK, tg->inst, dp_hpo_inst);
1008 			if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) {
1009 				dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
1010 			} else {
1011 				dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
1012 			}
1013 		} else {
1014 			dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
1015 					link_enc->transmitter - TRANSMITTER_UNIPHY_A);
1016 		}
1017 	}
1018 
1019 	if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
1020 		dc->res_pool->dccg->funcs->set_pixel_rate_div(
1021 			dc->res_pool->dccg,
1022 			pipe_ctx->stream_res.tg->inst,
1023 			tmds_div,
1024 			unused_div);
1025 	}
1026 
1027 	link_hwss->setup_stream_encoder(pipe_ctx);
1028 
1029 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
1030 		if (dc->hwss.program_dmdata_engine)
1031 			dc->hwss.program_dmdata_engine(pipe_ctx);
1032 	}
1033 
1034 	dc->hwss.update_info_frame(pipe_ctx);
1035 
1036 	if (dc_is_dp_signal(pipe_ctx->stream->signal))
1037 		dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
1038 
1039 	tg->funcs->set_early_control(tg, early_control);
1040 }
1041 
dcn401_setup_hpo_hw_control(const struct dce_hwseq * hws,bool enable)1042 void dcn401_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
1043 {
1044 	REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, enable);
1045 }
1046 
dcn401_can_pipe_disable_cursor(struct pipe_ctx * pipe_ctx)1047 static bool dcn401_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
1048 {
1049 	struct pipe_ctx *test_pipe, *split_pipe;
1050 	const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
1051 	struct rect r1 = scl_data->recout, r2, r2_half;
1052 	int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
1053 	int cur_layer = pipe_ctx->plane_state->layer_index;
1054 
1055 	/**
1056 	 * Disable the cursor if there's another pipe above this with a
1057 	 * plane that contains this pipe's viewport to prevent double cursor
1058 	 * and incorrect scaling artifacts.
1059 	 */
1060 	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
1061 		test_pipe = test_pipe->top_pipe) {
1062 		// Skip invisible layer and pipe-split plane on same layer
1063 		if (!test_pipe->plane_state ||
1064 			!test_pipe->plane_state->visible ||
1065 			test_pipe->plane_state->layer_index == cur_layer)
1066 			continue;
1067 
1068 		r2 = test_pipe->plane_res.scl_data.recout;
1069 		r2_r = r2.x + r2.width;
1070 		r2_b = r2.y + r2.height;
1071 		split_pipe = test_pipe;
1072 
1073 		/**
1074 		 * There is another half plane on same layer because of
1075 		 * pipe-split, merge together per same height.
1076 		 */
1077 		for (split_pipe = pipe_ctx->top_pipe; split_pipe;
1078 			split_pipe = split_pipe->top_pipe)
1079 			if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
1080 				r2_half = split_pipe->plane_res.scl_data.recout;
1081 				r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
1082 				r2.width = r2.width + r2_half.width;
1083 				r2_r = r2.x + r2.width;
1084 				break;
1085 			}
1086 
1087 		if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
1088 			return true;
1089 	}
1090 
1091 	return false;
1092 }
1093 
adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width,struct dc_cursor_position * pos_cpy)1094 void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy)
1095 {
1096 	if (cursor_width <= 128) {
1097 		pos_cpy->x_hotspot /= 2;
1098 		pos_cpy->x_hotspot += 1;
1099 	} else {
1100 		pos_cpy->x_hotspot /= 2;
1101 		pos_cpy->x_hotspot += 2;
1102 	}
1103 }
1104 
dcn401_set_cursor_position(struct pipe_ctx * pipe_ctx)1105 void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
1106 {
1107 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
1108 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1109 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1110 	struct dc_cursor_mi_param param = {
1111 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
1112 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
1113 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
1114 		.recout = pipe_ctx->plane_res.scl_data.recout,
1115 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
1116 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
1117 		.rotation = pipe_ctx->plane_state->rotation,
1118 		.mirror = pipe_ctx->plane_state->horizontal_mirror,
1119 		.stream = pipe_ctx->stream
1120 	};
1121 	struct rect odm_slice_src = { 0 };
1122 	bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
1123 		(pipe_ctx->prev_odm_pipe != NULL);
1124 	int prev_odm_width = 0;
1125 	struct pipe_ctx *prev_odm_pipe = NULL;
1126 	bool mpc_combine_on = false;
1127 	int  bottom_pipe_x_pos = 0;
1128 
1129 	int x_pos = pos_cpy.x;
1130 	int y_pos = pos_cpy.y;
1131 	int recout_x_pos = 0;
1132 	int recout_y_pos = 0;
1133 
1134 	if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
1135 		if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
1136 			(pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
1137 			mpc_combine_on = true;
1138 		}
1139 	}
1140 
1141 	/* DCN4 moved cursor composition after Scaler, so in HW it is in
1142 	 * recout space and for HW Cursor position programming need to
1143 	 * translate to recout space.
1144 	 *
1145 	 * Cursor X and Y position programmed into HW can't be negative,
1146 	 * in fact it is X, Y coordinate shifted for the HW Cursor Hot spot
1147 	 * position that goes into HW X and Y coordinates while HW Hot spot
1148 	 * X and Y coordinates are length relative to the cursor top left
1149 	 * corner, hotspot must be smaller than the cursor size.
1150 	 *
1151 	 * DMs/DC interface for Cursor position is in stream->src space, and
1152 	 * DMs supposed to transform Cursor coordinates to stream->src space,
1153 	 * then here we need to translate Cursor coordinates to stream->dst
1154 	 * space, as now in HW, Cursor coordinates are in per pipe recout
1155 	 * space, and for the given pipe valid coordinates are only in range
1156 	 * from 0,0 - recout width, recout height space.
1157 	 * If certain pipe combining is in place, need to further adjust per
1158 	 * pipe to make sure each pipe enabling cursor on its part of the
1159 	 * screen.
1160 	 */
1161 	x_pos = pipe_ctx->stream->dst.x + x_pos * pipe_ctx->stream->dst.width /
1162 		pipe_ctx->stream->src.width;
1163 	y_pos = pipe_ctx->stream->dst.y + y_pos * pipe_ctx->stream->dst.height /
1164 		pipe_ctx->stream->src.height;
1165 
1166 	/* If the cursor's source viewport is clipped then we need to
1167 	 * translate the cursor to appear in the correct position on
1168 	 * the screen.
1169 	 *
1170 	 * This translation isn't affected by scaling so it needs to be
1171 	 * done *after* we adjust the position for the scale factor.
1172 	 *
1173 	 * This is only done by opt-in for now since there are still
1174 	 * some usecases like tiled display that might enable the
1175 	 * cursor on both streams while expecting dc to clip it.
1176 	 */
1177 	if (pos_cpy.translate_by_source) {
1178 		x_pos += pipe_ctx->plane_state->src_rect.x;
1179 		y_pos += pipe_ctx->plane_state->src_rect.y;
1180 	}
1181 
1182 	/* Adjust for ODM Combine
1183 	 * next/prev_odm_offset is to account for scaled modes that have underscan
1184 	 */
1185 	if (odm_combine_on) {
1186 		prev_odm_pipe = pipe_ctx->prev_odm_pipe;
1187 
1188 		while (prev_odm_pipe != NULL) {
1189 			odm_slice_src = resource_get_odm_slice_src_rect(prev_odm_pipe);
1190 			prev_odm_width += odm_slice_src.width;
1191 			prev_odm_pipe = prev_odm_pipe->prev_odm_pipe;
1192 		}
1193 
1194 		x_pos -= (prev_odm_width);
1195 	}
1196 
1197 	/* If the position is negative then we need to add to the hotspot
1198 	 * to fix cursor size between ODM slices
1199 	 */
1200 
1201 	if (x_pos < 0) {
1202 		pos_cpy.x_hotspot -= x_pos;
1203 		if (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION)
1204 			adjust_hotspot_between_slices_for_2x_magnify(hubp->curs_attr.width, &pos_cpy);
1205 		x_pos = 0;
1206 	}
1207 
1208 	if (y_pos < 0) {
1209 		pos_cpy.y_hotspot -= y_pos;
1210 		y_pos = 0;
1211 	}
1212 
1213 	/* If the position on bottom MPC pipe is negative then we need to add to the hotspot and
1214 	 * adjust x_pos on bottom pipe to make cursor visible when crossing between MPC slices.
1215 	 */
1216 	if (mpc_combine_on &&
1217 		pipe_ctx->top_pipe &&
1218 		(pipe_ctx == pipe_ctx->top_pipe->bottom_pipe)) {
1219 
1220 		bottom_pipe_x_pos = x_pos - pipe_ctx->plane_res.scl_data.recout.x;
1221 		if (bottom_pipe_x_pos < 0) {
1222 			x_pos = pipe_ctx->plane_res.scl_data.recout.x;
1223 			pos_cpy.x_hotspot -= bottom_pipe_x_pos;
1224 			if (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION)
1225 				adjust_hotspot_between_slices_for_2x_magnify(hubp->curs_attr.width, &pos_cpy);
1226 		}
1227 	}
1228 
1229 	pos_cpy.x = (uint32_t)x_pos;
1230 	pos_cpy.y = (uint32_t)y_pos;
1231 
1232 	if (pos_cpy.enable && dcn401_can_pipe_disable_cursor(pipe_ctx))
1233 		pos_cpy.enable = false;
1234 
1235 	x_pos = pos_cpy.x - param.recout.x;
1236 	y_pos = pos_cpy.y - param.recout.y;
1237 
1238 	recout_x_pos = x_pos - pos_cpy.x_hotspot;
1239 	recout_y_pos = y_pos - pos_cpy.y_hotspot;
1240 
1241 	if (recout_x_pos >= (int)param.recout.width)
1242 		pos_cpy.enable = false;  /* not visible beyond right edge*/
1243 
1244 	if (recout_y_pos >= (int)param.recout.height)
1245 		pos_cpy.enable = false;  /* not visible beyond bottom edge*/
1246 
1247 	if (recout_x_pos + (int)hubp->curs_attr.width <= 0)
1248 		pos_cpy.enable = false;  /* not visible beyond left edge*/
1249 
1250 	if (recout_y_pos + (int)hubp->curs_attr.height <= 0)
1251 		pos_cpy.enable = false;  /* not visible beyond top edge*/
1252 
1253 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
1254 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
1255 }
1256 
dcn401_check_no_memory_request_for_cab(struct dc * dc)1257 static bool dcn401_check_no_memory_request_for_cab(struct dc *dc)
1258 {
1259 	int i;
1260 
1261 	/* First, check no-memory-request case */
1262 	for (i = 0; i < dc->current_state->stream_count; i++) {
1263 		if ((dc->current_state->stream_status[i].plane_count) &&
1264 			(dc->current_state->streams[i]->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED))
1265 			/* Fail eligibility on a visible stream */
1266 			return false;
1267 	}
1268 
1269 	return true;
1270 }
1271 
dcn401_calculate_cab_allocation(struct dc * dc,struct dc_state * ctx)1272 static uint32_t dcn401_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
1273 {
1274 	int i;
1275 	uint8_t num_ways = 0;
1276 	uint32_t mall_ss_size_bytes = 0;
1277 
1278 	mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes;
1279 	// TODO add additional logic for PSR active stream exclusion optimization
1280 	// mall_ss_psr_active_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes;
1281 
1282 	// Include cursor size for CAB allocation
1283 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1284 		struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[i];
1285 
1286 		if (!pipe->stream || !pipe->plane_state)
1287 			continue;
1288 
1289 		mall_ss_size_bytes += dcn32_helper_calculate_mall_bytes_for_cursor(dc, pipe, false);
1290 	}
1291 
1292 	// Convert number of cache lines required to number of ways
1293 	if (dc->debug.force_mall_ss_num_ways > 0)
1294 		num_ways = dc->debug.force_mall_ss_num_ways;
1295 	else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes)
1296 		num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, mall_ss_size_bytes);
1297 	else
1298 		num_ways = 0;
1299 
1300 	return num_ways;
1301 }
1302 
dcn401_apply_idle_power_optimizations(struct dc * dc,bool enable)1303 bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable)
1304 {
1305 	union dmub_rb_cmd cmd;
1306 	uint8_t ways, i;
1307 	int j;
1308 	bool mall_ss_unsupported = false;
1309 	struct dc_plane_state *plane = NULL;
1310 
1311 	if (!dc->ctx->dmub_srv || !dc->current_state)
1312 		return false;
1313 
1314 	for (i = 0; i < dc->current_state->stream_count; i++) {
1315 		/* MALL SS messaging is not supported with PSR at this time */
1316 		if (dc->current_state->streams[i] != NULL &&
1317 				dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) {
1318 			DC_LOG_MALL("MALL SS not supported with PSR at this time\n");
1319 			return false;
1320 		}
1321 	}
1322 
1323 	memset(&cmd, 0, sizeof(cmd));
1324 	cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
1325 	cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
1326 
1327 	if (enable) {
1328 		if (dcn401_check_no_memory_request_for_cab(dc)) {
1329 			/* 1. Check no memory request case for CAB.
1330 			 * If no memory request case, send CAB_ACTION NO_DCN_REQ DMUB message
1331 			 */
1332 			DC_LOG_MALL("sending CAB action NO_DCN_REQ\n");
1333 			cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ;
1334 		} else {
1335 			/* 2. Check if all surfaces can fit in CAB.
1336 			 * If surfaces can fit into CAB, send CAB_ACTION_ALLOW DMUB message
1337 			 * and configure HUBP's to fetch from MALL
1338 			 */
1339 			ways = dcn401_calculate_cab_allocation(dc, dc->current_state);
1340 
1341 			/* MALL not supported with Stereo3D or TMZ surface. If any plane is using stereo,
1342 			 * or TMZ surface, don't try to enter MALL.
1343 			 */
1344 			for (i = 0; i < dc->current_state->stream_count; i++) {
1345 				for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
1346 					plane = dc->current_state->stream_status[i].plane_states[j];
1347 
1348 					if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO ||
1349 							plane->address.tmz_surface) {
1350 						mall_ss_unsupported = true;
1351 						break;
1352 					}
1353 				}
1354 				if (mall_ss_unsupported)
1355 					break;
1356 			}
1357 			if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) {
1358 				cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
1359 				cmd.cab.cab_alloc_ways = ways;
1360 				DC_LOG_MALL("cab allocation: %d ways. CAB action: DCN_SS_FIT_IN_CAB\n", ways);
1361 			} else {
1362 				cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_NOT_FIT_IN_CAB;
1363 				DC_LOG_MALL("frame does not fit in CAB: %d ways required. CAB action: DCN_SS_NOT_FIT_IN_CAB\n", ways);
1364 			}
1365 		}
1366 	} else {
1367 		/* Disable CAB */
1368 		cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION;
1369 		DC_LOG_MALL("idle optimization disabled\n");
1370 	}
1371 
1372 	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
1373 
1374 	return true;
1375 }
1376 
dcn401_wait_for_dcc_meta_propagation(const struct dc * dc,const struct pipe_ctx * top_pipe)1377 void dcn401_wait_for_dcc_meta_propagation(const struct dc *dc,
1378 		const struct pipe_ctx *top_pipe)
1379 {
1380 	bool is_wait_needed = false;
1381 	const struct pipe_ctx *pipe_ctx = top_pipe;
1382 
1383 	/* check if any surfaces are updating address while using flip immediate and dcc */
1384 	while (pipe_ctx != NULL) {
1385 		if (pipe_ctx->plane_state &&
1386 				pipe_ctx->plane_state->dcc.enable &&
1387 				pipe_ctx->plane_state->flip_immediate &&
1388 				pipe_ctx->plane_state->update_flags.bits.addr_update) {
1389 			is_wait_needed = true;
1390 			break;
1391 		}
1392 
1393 		/* check next pipe */
1394 		pipe_ctx = pipe_ctx->bottom_pipe;
1395 	}
1396 
1397 	if (is_wait_needed && dc->debug.dcc_meta_propagation_delay_us > 0) {
1398 		udelay(dc->debug.dcc_meta_propagation_delay_us);
1399 	}
1400 }
1401 
dcn401_prepare_bandwidth(struct dc * dc,struct dc_state * context)1402 void dcn401_prepare_bandwidth(struct dc *dc,
1403 	struct dc_state *context)
1404 {
1405 	struct hubbub *hubbub = dc->res_pool->hubbub;
1406 	bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
1407 	unsigned int compbuf_size = 0;
1408 
1409 	/* Any transition into P-State support should disable MCLK switching first to avoid hangs */
1410 	if (p_state_change_support) {
1411 		dc->optimized_required = true;
1412 		context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
1413 	}
1414 
1415 	if (dc->clk_mgr->dc_mode_softmax_enabled)
1416 		if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
1417 				context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
1418 			dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
1419 
1420 	/* Increase clocks */
1421 	dc->clk_mgr->funcs->update_clocks(
1422 			dc->clk_mgr,
1423 			context,
1424 			false);
1425 
1426 	/* program dchubbub watermarks:
1427 	 * For assigning wm_optimized_required, use |= operator since we don't want
1428 	 * to clear the value if the optimize has not happened yet
1429 	 */
1430 	dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
1431 					&context->bw_ctx.bw.dcn.watermarks,
1432 					dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
1433 					false);
1434 
1435 	/* decrease compbuf size */
1436 	if (hubbub->funcs->program_compbuf_segments) {
1437 		compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
1438 		dc->wm_optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
1439 
1440 		hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size, false);
1441 	}
1442 
1443 	if (dc->debug.fams2_config.bits.enable) {
1444 		dcn401_fams2_global_control_lock(dc, context, true);
1445 		dcn401_fams2_update_config(dc, context, false);
1446 		dcn401_fams2_global_control_lock(dc, context, false);
1447 	}
1448 
1449 	if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) {
1450 		/* After disabling P-State, restore the original value to ensure we get the correct P-State
1451 		 * on the next optimize. */
1452 		context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
1453 	}
1454 }
1455 
dcn401_optimize_bandwidth(struct dc * dc,struct dc_state * context)1456 void dcn401_optimize_bandwidth(
1457 		struct dc *dc,
1458 		struct dc_state *context)
1459 {
1460 	int i;
1461 	struct hubbub *hubbub = dc->res_pool->hubbub;
1462 
1463 	/* enable fams2 if needed */
1464 	if (dc->debug.fams2_config.bits.enable) {
1465 		dcn401_fams2_global_control_lock(dc, context, true);
1466 		dcn401_fams2_update_config(dc, context, true);
1467 		dcn401_fams2_global_control_lock(dc, context, false);
1468 	}
1469 
1470 	/* program dchubbub watermarks */
1471 	hubbub->funcs->program_watermarks(hubbub,
1472 					&context->bw_ctx.bw.dcn.watermarks,
1473 					dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
1474 					true);
1475 
1476 	if (dc->clk_mgr->dc_mode_softmax_enabled)
1477 		if (dc->clk_mgr->clks.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
1478 				context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
1479 			dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
1480 
1481 	/* increase compbuf size */
1482 	if (hubbub->funcs->program_compbuf_segments)
1483 		hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
1484 
1485 	dc->clk_mgr->funcs->update_clocks(
1486 			dc->clk_mgr,
1487 			context,
1488 			true);
1489 	if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
1490 		for (i = 0; i < dc->res_pool->pipe_count; ++i) {
1491 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1492 
1493 			if (pipe_ctx->stream && pipe_ctx->plane_res.hubp->funcs->program_extended_blank
1494 				&& pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
1495 				&& pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
1496 					pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
1497 						pipe_ctx->dlg_regs.min_dst_y_next_start);
1498 		}
1499 	}
1500 }
1501 
dcn401_fams2_global_control_lock(struct dc * dc,struct dc_state * context,bool lock)1502 void dcn401_fams2_global_control_lock(struct dc *dc,
1503 		struct dc_state *context,
1504 		bool lock)
1505 {
1506 	/* use always for now */
1507 	union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
1508 
1509 	if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable)
1510 		return;
1511 
1512 	hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
1513 	hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER;
1514 	hw_lock_cmd.bits.lock = lock;
1515 	hw_lock_cmd.bits.should_release = !lock;
1516 	dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
1517 }
1518 
dcn401_fams2_global_control_lock_fast(union block_sequence_params * params)1519 void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params)
1520 {
1521 	struct dc *dc = params->fams2_global_control_lock_fast_params.dc;
1522 	bool lock = params->fams2_global_control_lock_fast_params.lock;
1523 
1524 	if (params->fams2_global_control_lock_fast_params.is_required) {
1525 		union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
1526 
1527 		hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
1528 		hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER;
1529 		hw_lock_cmd.bits.lock = lock;
1530 		hw_lock_cmd.bits.should_release = !lock;
1531 		dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
1532 	}
1533 }
1534 
dcn401_fams2_update_config(struct dc * dc,struct dc_state * context,bool enable)1535 void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable)
1536 {
1537 	bool fams2_required;
1538 
1539 	if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable)
1540 		return;
1541 
1542 	fams2_required = context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable;
1543 
1544 	dc_dmub_srv_fams2_update_config(dc, context, enable && fams2_required);
1545 }
1546 
update_dsc_for_odm_change(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1547 static void update_dsc_for_odm_change(struct dc *dc, struct dc_state *context,
1548 		struct pipe_ctx *otg_master)
1549 {
1550 	int i;
1551 	struct pipe_ctx *old_pipe;
1552 	struct pipe_ctx *new_pipe;
1553 	struct pipe_ctx *old_opp_heads[MAX_PIPES];
1554 	struct pipe_ctx *old_otg_master;
1555 	int old_opp_head_count = 0;
1556 
1557 	old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx];
1558 
1559 	if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) {
1560 		old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master,
1561 									   &dc->current_state->res_ctx,
1562 									   old_opp_heads);
1563 	} else {
1564 		// DC cannot assume that the current state and the new state
1565 		// share the same OTG pipe since this is not true when called
1566 		// in the context of a commit stream not checked. Hence, set
1567 		// old_otg_master to NULL to skip the DSC configuration.
1568 		old_otg_master = NULL;
1569 	}
1570 
1571 
1572 	if (otg_master->stream_res.dsc)
1573 		dcn32_update_dsc_on_stream(otg_master,
1574 				otg_master->stream->timing.flags.DSC);
1575 	if (old_otg_master && old_otg_master->stream_res.dsc) {
1576 		for (i = 0; i < old_opp_head_count; i++) {
1577 			old_pipe = old_opp_heads[i];
1578 			new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx];
1579 			if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc)
1580 				old_pipe->stream_res.dsc->funcs->dsc_disconnect(
1581 						old_pipe->stream_res.dsc);
1582 		}
1583 	}
1584 }
1585 
dcn401_update_odm(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1586 void dcn401_update_odm(struct dc *dc, struct dc_state *context,
1587 		struct pipe_ctx *otg_master)
1588 {
1589 	struct pipe_ctx *opp_heads[MAX_PIPES];
1590 	int opp_inst[MAX_PIPES] = {0};
1591 	int opp_head_count;
1592 	int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false);
1593 	int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true);
1594 	int i;
1595 
1596 	opp_head_count = resource_get_opp_heads_for_otg_master(
1597 			otg_master, &context->res_ctx, opp_heads);
1598 
1599 	for (i = 0; i < opp_head_count; i++)
1600 		opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
1601 	if (opp_head_count > 1)
1602 		otg_master->stream_res.tg->funcs->set_odm_combine(
1603 				otg_master->stream_res.tg,
1604 				opp_inst, opp_head_count,
1605 				odm_slice_width, last_odm_slice_width);
1606 	else
1607 		otg_master->stream_res.tg->funcs->set_odm_bypass(
1608 				otg_master->stream_res.tg,
1609 				&otg_master->stream->timing);
1610 
1611 	for (i = 0; i < opp_head_count; i++) {
1612 		opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control(
1613 				opp_heads[i]->stream_res.opp,
1614 				true);
1615 		opp_heads[i]->stream_res.opp->funcs->opp_program_left_edge_extra_pixel(
1616 				opp_heads[i]->stream_res.opp,
1617 				opp_heads[i]->stream->timing.pixel_encoding,
1618 				resource_is_pipe_type(opp_heads[i], OTG_MASTER));
1619 	}
1620 
1621 	update_dsc_for_odm_change(dc, context, otg_master);
1622 
1623 	if (!resource_is_pipe_type(otg_master, DPP_PIPE))
1624 		/*
1625 		 * blank pattern is generated by OPP, reprogram blank pattern
1626 		 * due to OPP count change
1627 		 */
1628 		dc->hwseq->funcs.blank_pixel_data(dc, otg_master, true);
1629 }
1630 
dcn401_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)1631 void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx,
1632 		struct dc_link_settings *link_settings)
1633 {
1634 	struct encoder_unblank_param params = {0};
1635 	struct dc_stream_state *stream = pipe_ctx->stream;
1636 	struct dc_link *link = stream->link;
1637 	struct dce_hwseq *hws = link->dc->hwseq;
1638 
1639 	/* calculate parameters for unblank */
1640 	params.opp_cnt = resource_get_odm_slice_count(pipe_ctx);
1641 
1642 	params.timing = pipe_ctx->stream->timing;
1643 	params.link_settings.link_rate = link_settings->link_rate;
1644 	params.pix_per_cycle = pipe_ctx->stream_res.pix_clk_params.dio_se_pix_per_cycle;
1645 
1646 	if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
1647 		pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
1648 				pipe_ctx->stream_res.hpo_dp_stream_enc,
1649 				pipe_ctx->stream_res.tg->inst);
1650 	} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
1651 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
1652 	}
1653 
1654 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP)
1655 		hws->funcs.edp_backlight_control(link, true);
1656 }
1657 
dcn401_hardware_release(struct dc * dc)1658 void dcn401_hardware_release(struct dc *dc)
1659 {
1660 	dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
1661 
1662 	/* If pstate unsupported, or still supported
1663 	 * by firmware, force it supported by dcn
1664 	 */
1665 	if (dc->current_state) {
1666 		if ((!dc->clk_mgr->clks.p_state_change_support ||
1667 				dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
1668 				dc->res_pool->hubbub->funcs->force_pstate_change_control)
1669 			dc->res_pool->hubbub->funcs->force_pstate_change_control(
1670 					dc->res_pool->hubbub, true, true);
1671 
1672 		dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true;
1673 		dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
1674 	}
1675 }
1676 
dcn401_wait_for_det_buffer_update(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1677 void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master)
1678 {
1679 	struct pipe_ctx *opp_heads[MAX_PIPES];
1680 	struct pipe_ctx *dpp_pipes[MAX_PIPES];
1681 	struct hubbub *hubbub = dc->res_pool->hubbub;
1682 	int dpp_count = 0;
1683 
1684 	if (!otg_master->stream)
1685 		return;
1686 
1687 	int slice_count = resource_get_opp_heads_for_otg_master(otg_master,
1688 			&context->res_ctx, opp_heads);
1689 
1690 	for (int slice_idx = 0; slice_idx < slice_count; slice_idx++) {
1691 		if (opp_heads[slice_idx]->plane_state) {
1692 			dpp_count = resource_get_dpp_pipes_for_opp_head(
1693 					opp_heads[slice_idx],
1694 					&context->res_ctx,
1695 					dpp_pipes);
1696 			for (int dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
1697 				struct pipe_ctx *dpp_pipe = dpp_pipes[dpp_idx];
1698 					if (dpp_pipe && hubbub &&
1699 						dpp_pipe->plane_res.hubp &&
1700 						hubbub->funcs->wait_for_det_update)
1701 						hubbub->funcs->wait_for_det_update(hubbub, dpp_pipe->plane_res.hubp->inst);
1702 			}
1703 		}
1704 	}
1705 }
1706 
dcn401_interdependent_update_lock(struct dc * dc,struct dc_state * context,bool lock)1707 void dcn401_interdependent_update_lock(struct dc *dc,
1708 		struct dc_state *context, bool lock)
1709 {
1710 	unsigned int i = 0;
1711 	struct pipe_ctx *pipe = NULL;
1712 	struct timing_generator *tg = NULL;
1713 	bool pipe_unlocked[MAX_PIPES] = {0};
1714 
1715 	if (lock) {
1716 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1717 			pipe = &context->res_ctx.pipe_ctx[i];
1718 			tg = pipe->stream_res.tg;
1719 
1720 			if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1721 					!tg->funcs->is_tg_enabled(tg) ||
1722 					dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
1723 				continue;
1724 			dc->hwss.pipe_control_lock(dc, pipe, true);
1725 		}
1726 	} else {
1727 		/* Unlock pipes based on the change in DET allocation instead of pipe index
1728 		 * Prevents over allocation of DET during unlock process
1729 		 * e.g. 2 pipe config with different streams with a max of 20 DET segments
1730 		 *	Before:								After:
1731 		 *		- Pipe0: 10 DET segments			- Pipe0: 12 DET segments
1732 		 *		- Pipe1: 10 DET segments			- Pipe1: 8 DET segments
1733 		 * If Pipe0 gets updated first, 22 DET segments will be allocated
1734 		 */
1735 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1736 			pipe = &context->res_ctx.pipe_ctx[i];
1737 			tg = pipe->stream_res.tg;
1738 			int current_pipe_idx = i;
1739 
1740 			if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1741 					!tg->funcs->is_tg_enabled(tg) ||
1742 					dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
1743 				pipe_unlocked[i] = true;
1744 				continue;
1745 			}
1746 
1747 			// If the same stream exists in old context, ensure the OTG_MASTER pipes for the same stream get compared
1748 			struct pipe_ctx *old_otg_master = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, pipe->stream);
1749 
1750 			if (old_otg_master)
1751 				current_pipe_idx = old_otg_master->pipe_idx;
1752 			if (resource_calculate_det_for_stream(context, pipe) <
1753 					resource_calculate_det_for_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[current_pipe_idx])) {
1754 				dc->hwss.pipe_control_lock(dc, pipe, false);
1755 				pipe_unlocked[i] = true;
1756 				dcn401_wait_for_det_buffer_update(dc, context, pipe);
1757 			}
1758 		}
1759 
1760 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1761 			if (pipe_unlocked[i])
1762 				continue;
1763 			pipe = &context->res_ctx.pipe_ctx[i];
1764 			dc->hwss.pipe_control_lock(dc, pipe, false);
1765 		}
1766 	}
1767 }
1768 
dcn401_program_outstanding_updates(struct dc * dc,struct dc_state * context)1769 void dcn401_program_outstanding_updates(struct dc *dc,
1770 		struct dc_state *context)
1771 {
1772 	struct hubbub *hubbub = dc->res_pool->hubbub;
1773 
1774 	/* update compbuf if required */
1775 	if (hubbub->funcs->program_compbuf_segments)
1776 		hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
1777 }
1778 
dcn401_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1779 void dcn401_reset_back_end_for_pipe(
1780 		struct dc *dc,
1781 		struct pipe_ctx *pipe_ctx,
1782 		struct dc_state *context)
1783 {
1784 	int i;
1785 	struct dc_link *link = pipe_ctx->stream->link;
1786 	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
1787 
1788 	DC_LOGGER_INIT(dc->ctx->logger);
1789 	if (pipe_ctx->stream_res.stream_enc == NULL) {
1790 		pipe_ctx->stream = NULL;
1791 		return;
1792 	}
1793 
1794 	/* DPMS may already disable or */
1795 	/* dpms_off status is incorrect due to fastboot
1796 	 * feature. When system resume from S4 with second
1797 	 * screen only, the dpms_off would be true but
1798 	 * VBIOS lit up eDP, so check link status too.
1799 	 */
1800 	if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1801 		dc->link_srv->set_dpms_off(pipe_ctx);
1802 	else if (pipe_ctx->stream_res.audio)
1803 		dc->hwss.disable_audio_stream(pipe_ctx);
1804 
1805 	/* free acquired resources */
1806 	if (pipe_ctx->stream_res.audio) {
1807 		/*disable az_endpoint*/
1808 		pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1809 
1810 		/*free audio*/
1811 		if (dc->caps.dynamic_audio == true) {
1812 			/*we have to dynamic arbitrate the audio endpoints*/
1813 			/*we free the resource, need reset is_audio_acquired*/
1814 			update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1815 					pipe_ctx->stream_res.audio, false);
1816 			pipe_ctx->stream_res.audio = NULL;
1817 		}
1818 	}
1819 
1820 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
1821 	 * back end share by all pipes and will be disable only when disable
1822 	 * parent pipe.
1823 	 */
1824 	if (pipe_ctx->top_pipe == NULL) {
1825 
1826 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
1827 
1828 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1829 
1830 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1831 		if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
1832 			pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
1833 					pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
1834 
1835 		set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
1836 
1837 		/* TODO - convert symclk_ref_cnts for otg to a bit map to solve
1838 		 * the case where the same symclk is shared across multiple otg
1839 		 * instances
1840 		 */
1841 		if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
1842 			link->phy_state.symclk_ref_cnts.otg = 0;
1843 		if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
1844 			link_hwss->disable_link_output(link,
1845 					&pipe_ctx->link_res, pipe_ctx->stream->signal);
1846 			link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
1847 		}
1848 
1849 		/* reset DTBCLK_P */
1850 		if (dc->res_pool->dccg->funcs->set_dtbclk_p_src)
1851 			dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, REFCLK, pipe_ctx->stream_res.tg->inst);
1852 	}
1853 
1854 	for (i = 0; i < dc->res_pool->pipe_count; i++)
1855 		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1856 			break;
1857 
1858 	if (i == dc->res_pool->pipe_count)
1859 		return;
1860 
1861 /*
1862  * In case of a dangling plane, setting this to NULL unconditionally
1863  * causes failures during reset hw ctx where, if stream is NULL,
1864  * it is expected that the pipe_ctx pointers to pipes and plane are NULL.
1865  */
1866 	pipe_ctx->stream = NULL;
1867 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1868 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1869 }
1870 
dcn401_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1871 void dcn401_reset_hw_ctx_wrap(
1872 		struct dc *dc,
1873 		struct dc_state *context)
1874 {
1875 	int i;
1876 	struct dce_hwseq *hws = dc->hwseq;
1877 
1878 	/* Reset Back End*/
1879 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1880 		struct pipe_ctx *pipe_ctx_old =
1881 			&dc->current_state->res_ctx.pipe_ctx[i];
1882 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1883 
1884 		if (!pipe_ctx_old->stream)
1885 			continue;
1886 
1887 		if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
1888 			continue;
1889 
1890 		if (!pipe_ctx->stream ||
1891 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1892 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1893 
1894 			if (hws->funcs.reset_back_end_for_pipe)
1895 				hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1896 			if (hws->funcs.enable_stream_gating)
1897 				hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1898 			if (old_clk)
1899 				old_clk->funcs->cs_power_down(old_clk);
1900 		}
1901 	}
1902 }
1903