• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 
27 #include "dcn35_clk_mgr.h"
28 
29 #include "dccg.h"
30 #include "clk_mgr_internal.h"
31 
32 // For dce12_get_dp_ref_freq_khz
33 #include "dce100/dce_clk_mgr.h"
34 
35 // For dcn20_update_clocks_update_dpp_dto
36 #include "dcn20/dcn20_clk_mgr.h"
37 
38 
39 #include "reg_helper.h"
40 #include "core_types.h"
41 #include "dcn35_smu.h"
42 #include "dm_helpers.h"
43 
44 #include "dcn31/dcn31_clk_mgr.h"
45 
46 #include "dc_dmub_srv.h"
47 #include "link.h"
48 #include "logger_types.h"
49 
50 #undef DC_LOGGER
51 #define DC_LOGGER \
52 	clk_mgr->base.base.ctx->logger
53 
54 #define DCN_BASE__INST0_SEG1 0x000000C0
55 #define mmCLK1_CLK_PLL_REQ 0x16E37
56 
57 #define mmCLK1_CLK0_DFS_CNTL 0x16E69
58 #define mmCLK1_CLK1_DFS_CNTL 0x16E6C
59 #define mmCLK1_CLK2_DFS_CNTL 0x16E6F
60 #define mmCLK1_CLK3_DFS_CNTL 0x16E72
61 #define mmCLK1_CLK4_DFS_CNTL 0x16E75
62 #define mmCLK1_CLK5_DFS_CNTL 0x16E78
63 
64 #define mmCLK1_CLK0_CURRENT_CNT 0x16EFB
65 #define mmCLK1_CLK1_CURRENT_CNT 0x16EFC
66 #define mmCLK1_CLK2_CURRENT_CNT 0x16EFD
67 #define mmCLK1_CLK3_CURRENT_CNT 0x16EFE
68 #define mmCLK1_CLK4_CURRENT_CNT 0x16EFF
69 #define mmCLK1_CLK5_CURRENT_CNT 0x16F00
70 
71 #define mmCLK1_CLK0_BYPASS_CNTL 0x16E8A
72 #define mmCLK1_CLK1_BYPASS_CNTL 0x16E93
73 #define mmCLK1_CLK2_BYPASS_CNTL 0x16E9C
74 #define mmCLK1_CLK3_BYPASS_CNTL 0x16EA5
75 #define mmCLK1_CLK4_BYPASS_CNTL 0x16EAE
76 #define mmCLK1_CLK5_BYPASS_CNTL 0x16EB7
77 
78 #define mmCLK1_CLK0_DS_CNTL 0x16E83
79 #define mmCLK1_CLK1_DS_CNTL 0x16E8C
80 #define mmCLK1_CLK2_DS_CNTL 0x16E95
81 #define mmCLK1_CLK3_DS_CNTL 0x16E9E
82 #define mmCLK1_CLK4_DS_CNTL 0x16EA7
83 #define mmCLK1_CLK5_DS_CNTL 0x16EB0
84 
85 #define mmCLK1_CLK0_ALLOW_DS 0x16E84
86 #define mmCLK1_CLK1_ALLOW_DS 0x16E8D
87 #define mmCLK1_CLK2_ALLOW_DS 0x16E96
88 #define mmCLK1_CLK3_ALLOW_DS 0x16E9F
89 #define mmCLK1_CLK4_ALLOW_DS 0x16EA8
90 #define mmCLK1_CLK5_ALLOW_DS 0x16EB1
91 
92 #define mmCLK5_spll_field_8 0x1B24B
93 #define mmDENTIST_DISPCLK_CNTL 0x0124
94 #define regDENTIST_DISPCLK_CNTL 0x0064
95 #define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
96 
97 #define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
98 #define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
99 #define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
100 #define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
101 #define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
102 #define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
103 
104 #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
105 #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
106 // DENTIST_DISPCLK_CNTL
107 #define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0
108 #define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8
109 #define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13
110 #define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14
111 #define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18
112 #define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL
113 #define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L
114 #define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L
115 #define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L
116 #define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
117 
118 #define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
119 
120 #define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
121 #undef FN
122 #define FN(reg_name, field_name) \
123 	clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
124 
125 #define REG(reg) \
126 	(clk_mgr->regs->reg)
127 
128 #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
129 
130 #define BASE(seg) BASE_INNER(seg)
131 
132 #define SR(reg_name)\
133 		.reg_name = BASE(reg ## reg_name ## _BASE_IDX) +  \
134 					reg ## reg_name
135 
136 #define CLK_SR_DCN35(reg_name)\
137 	.reg_name = mm ## reg_name
138 
139 static const struct clk_mgr_registers clk_mgr_regs_dcn35 = {
140 	CLK_REG_LIST_DCN35()
141 };
142 
143 static const struct clk_mgr_shift clk_mgr_shift_dcn35 = {
144 	CLK_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
145 };
146 
147 static const struct clk_mgr_mask clk_mgr_mask_dcn35 = {
148 	CLK_COMMON_MASK_SH_LIST_DCN32(_MASK)
149 };
150 
151 #define TO_CLK_MGR_DCN35(clk_mgr)\
152 	container_of(clk_mgr, struct clk_mgr_dcn35, base)
153 
dcn35_get_active_display_cnt_wa(struct dc * dc,struct dc_state * context,int * all_active_disps)154 static int dcn35_get_active_display_cnt_wa(
155 		struct dc *dc,
156 		struct dc_state *context,
157 		int *all_active_disps)
158 {
159 	int i, display_count = 0;
160 	bool tmds_present = false;
161 
162 	for (i = 0; i < context->stream_count; i++) {
163 		const struct dc_stream_state *stream = context->streams[i];
164 
165 		if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
166 				stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
167 				stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
168 			tmds_present = true;
169 	}
170 
171 	for (i = 0; i < dc->link_count; i++) {
172 		const struct dc_link *link = dc->links[i];
173 
174 		/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
175 		if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
176 				link->link_enc->funcs->is_dig_enabled(link->link_enc))
177 			display_count++;
178 	}
179 	if (all_active_disps != NULL)
180 		*all_active_disps = display_count;
181 	/* WA for hang on HDMI after display off back on*/
182 	if (display_count == 0 && tmds_present)
183 		display_count = 1;
184 
185 	return display_count;
186 }
dcn35_disable_otg_wa(struct clk_mgr * clk_mgr_base,struct dc_state * context,bool safe_to_lower,bool disable)187 static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
188 		bool safe_to_lower, bool disable)
189 {
190 	struct dc *dc = clk_mgr_base->ctx->dc;
191 	int i;
192 
193 	if (dc->ctx->dce_environment == DCE_ENV_DIAG)
194 		return;
195 
196 	for (i = 0; i < dc->res_pool->pipe_count; ++i) {
197 		struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
198 		struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
199 		struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base);
200 		struct dccg *dccg = clk_mgr_internal->dccg;
201 		struct pipe_ctx *pipe = safe_to_lower
202 			? &context->res_ctx.pipe_ctx[i]
203 			: &dc->current_state->res_ctx.pipe_ctx[i];
204 		bool stream_changed_otg_dig_on = false;
205 		if (pipe->top_pipe || pipe->prev_odm_pipe)
206 			continue;
207 		stream_changed_otg_dig_on = old_pipe->stream && new_pipe->stream &&
208 		old_pipe->stream != new_pipe->stream &&
209 		old_pipe->stream_res.tg == new_pipe->stream_res.tg &&
210 		new_pipe->stream->link_enc && !new_pipe->stream->dpms_off &&
211 		new_pipe->stream->link_enc->funcs->is_dig_enabled &&
212 		new_pipe->stream->link_enc->funcs->is_dig_enabled(
213 		new_pipe->stream->link_enc) &&
214 		new_pipe->stream_res.stream_enc &&
215 		new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled &&
216 		new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled(new_pipe->stream_res.stream_enc);
217 		bool has_active_hpo = dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(old_pipe) && dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(new_pipe);
218 
219 		if (!has_active_hpo && !dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe) &&
220 					(pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
221 					!pipe->stream->link_enc) && !stream_changed_otg_dig_on)) {
222 
223 
224 			/* This w/a should not trigger when we have a dig active */
225 			if (disable) {
226 				if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
227 					pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
228 
229 				reset_sync_context_for_pipe(dc, context, i);
230 			} else {
231 				pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
232 			}
233 		}
234 	}
235 }
236 
dcn35_update_clocks_update_dtb_dto(struct clk_mgr_internal * clk_mgr,struct dc_state * context,int ref_dtbclk_khz)237 static void dcn35_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr,
238 			struct dc_state *context,
239 			int ref_dtbclk_khz)
240 {
241 	struct dccg *dccg = clk_mgr->dccg;
242 	uint32_t tg_mask = 0;
243 	int i;
244 
245 	for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
246 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
247 		struct dtbclk_dto_params dto_params = {0};
248 
249 		/* use mask to program DTO once per tg */
250 		if (pipe_ctx->stream_res.tg &&
251 				!(tg_mask & (1 << pipe_ctx->stream_res.tg->inst))) {
252 			tg_mask |= (1 << pipe_ctx->stream_res.tg->inst);
253 
254 			dto_params.otg_inst = pipe_ctx->stream_res.tg->inst;
255 			dto_params.ref_dtbclk_khz = ref_dtbclk_khz;
256 
257 			dccg->funcs->set_dtbclk_dto(clk_mgr->dccg, &dto_params);
258 			//dccg->funcs->set_audio_dtbclk_dto(clk_mgr->dccg, &dto_params);
259 		}
260 	}
261 }
262 
dcn35_update_clocks_update_dpp_dto(struct clk_mgr_internal * clk_mgr,struct dc_state * context,bool safe_to_lower)263 static void dcn35_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
264 		struct dc_state *context, bool safe_to_lower)
265 {
266 	int i;
267 	bool dppclk_active[MAX_PIPES] = {0};
268 
269 
270 	clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
271 	for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
272 		int dpp_inst = 0, dppclk_khz, prev_dppclk_khz;
273 
274 		dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
275 
276 		if (context->res_ctx.pipe_ctx[i].plane_res.dpp)
277 			dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
278 		else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz == 0) {
279 			/* dpp == NULL && dppclk_khz == 0 is valid because of pipe harvesting.
280 			 * In this case just continue in loop
281 			 */
282 			continue;
283 		} else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz > 0) {
284 			/* The software state is not valid if dpp resource is NULL and
285 			 * dppclk_khz > 0.
286 			 */
287 			ASSERT(false);
288 			continue;
289 		}
290 
291 		prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i];
292 
293 		if (safe_to_lower || prev_dppclk_khz < dppclk_khz)
294 			clk_mgr->dccg->funcs->update_dpp_dto(
295 							clk_mgr->dccg, dpp_inst, dppclk_khz);
296 		dppclk_active[dpp_inst] = true;
297 	}
298 	if (safe_to_lower)
299 		for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
300 			struct dpp *old_dpp = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.dpp;
301 
302 			if (old_dpp && !dppclk_active[old_dpp->inst])
303 				clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, old_dpp->inst, 0);
304 		}
305 }
306 
get_lowest_dpia_index(const struct dc_link * link)307 static uint8_t get_lowest_dpia_index(const struct dc_link *link)
308 {
309 	const struct dc *dc_struct = link->dc;
310 	uint8_t idx = 0xFF;
311 	int i;
312 
313 	for (i = 0; i < MAX_PIPES * 2; ++i) {
314 		if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
315 			continue;
316 
317 		if (idx > dc_struct->links[i]->link_index)
318 			idx = dc_struct->links[i]->link_index;
319 	}
320 
321 	return idx;
322 }
323 
dcn35_notify_host_router_bw(struct clk_mgr * clk_mgr_base,struct dc_state * context,bool safe_to_lower)324 static void dcn35_notify_host_router_bw(struct clk_mgr *clk_mgr_base, struct dc_state *context,
325 					bool safe_to_lower)
326 {
327 	struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
328 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
329 	uint32_t host_router_bw_kbps[MAX_HOST_ROUTERS_NUM] = { 0 };
330 	int i;
331 	for (i = 0; i < context->stream_count; ++i) {
332 		const struct dc_stream_state *stream = context->streams[i];
333 		const struct dc_link *link = stream->link;
334 		uint8_t lowest_dpia_index = 0;
335 		unsigned int hr_index = 0;
336 
337 		if (!link)
338 			continue;
339 
340 		lowest_dpia_index = get_lowest_dpia_index(link);
341 		if (link->link_index < lowest_dpia_index)
342 			continue;
343 
344 		hr_index = (link->link_index - lowest_dpia_index) / 2;
345 		if (hr_index >= MAX_HOST_ROUTERS_NUM)
346 			continue;
347 		host_router_bw_kbps[hr_index] += dc_bandwidth_in_kbps_from_timing(
348 			&stream->timing, dc_link_get_highest_encoding_format(link));
349 	}
350 
351 	for (i = 0; i < MAX_HOST_ROUTERS_NUM; ++i) {
352 		new_clocks->host_router_bw_kbps[i] = host_router_bw_kbps[i];
353 		if (should_set_clock(safe_to_lower, new_clocks->host_router_bw_kbps[i], clk_mgr_base->clks.host_router_bw_kbps[i])) {
354 			clk_mgr_base->clks.host_router_bw_kbps[i] = new_clocks->host_router_bw_kbps[i];
355 			dcn35_smu_notify_host_router_bw(clk_mgr, i, new_clocks->host_router_bw_kbps[i]);
356 		}
357 	}
358 }
359 
dcn35_update_clocks(struct clk_mgr * clk_mgr_base,struct dc_state * context,bool safe_to_lower)360 void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
361 			struct dc_state *context,
362 			bool safe_to_lower)
363 {
364 	union dmub_rb_cmd cmd;
365 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
366 	struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
367 	struct dc *dc = clk_mgr_base->ctx->dc;
368 	int display_count = 0;
369 	bool update_dppclk = false;
370 	bool update_dispclk = false;
371 	bool dpp_clock_lowered = false;
372 	int all_active_disps = 0;
373 
374 	if (dc->work_arounds.skip_clock_update)
375 		return;
376 
377 	display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps);
378 	if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
379 		new_clocks->ref_dtbclk_khz = 600000;
380 
381 	/*
382 	 * if it is safe to lower, but we are already in the lower state, we don't have to do anything
383 	 * also if safe to lower is false, we just go in the higher state
384 	 */
385 	if (safe_to_lower) {
386 		if (new_clocks->zstate_support != DCN_ZSTATE_SUPPORT_DISALLOW &&
387 				new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
388 			dcn35_smu_set_zstate_support(clk_mgr, new_clocks->zstate_support);
389 			dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, true);
390 			clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
391 		}
392 
393 		if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
394 			if (clk_mgr->base.ctx->dc->config.allow_0_dtb_clk)
395 				dcn35_smu_set_dtbclk(clk_mgr, false);
396 			clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
397 		}
398 		/* check that we're not already in lower */
399 		if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
400 			/* if we can go lower, go lower */
401 			if (display_count == 0)
402 				clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
403 		}
404 	} else {
405 		if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW &&
406 				new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
407 			dcn35_smu_set_zstate_support(clk_mgr, DCN_ZSTATE_SUPPORT_DISALLOW);
408 			dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, false);
409 			clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
410 		}
411 
412 		if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
413 			dcn35_smu_set_dtbclk(clk_mgr, true);
414 			clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
415 
416 			dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
417 			clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
418 		}
419 
420 		/* check that we're not already in D0 */
421 		if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
422 			union display_idle_optimization_u idle_info = { 0 };
423 
424 			dcn35_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
425 			/* update power state */
426 			clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE;
427 		}
428 	}
429 	if (dc->debug.force_min_dcfclk_mhz > 0)
430 		new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
431 				new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
432 
433 	if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
434 		clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
435 		dcn35_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
436 	}
437 
438 	if (should_set_clock(safe_to_lower,
439 			new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
440 		clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
441 		dcn35_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
442 	}
443 
444 	// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
445 	if (new_clocks->dppclk_khz < 100000)
446 		new_clocks->dppclk_khz = 100000;
447 
448 	if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
449 		if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
450 			dpp_clock_lowered = true;
451 		clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
452 		update_dppclk = true;
453 	}
454 
455 	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
456 	    (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
457 		int requested_dispclk_khz = new_clocks->dispclk_khz;
458 
459 		dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
460 
461 		/* Clamp the requested clock to PMFW based on their limit. */
462 		if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
463 			requested_dispclk_khz = dc->debug.min_disp_clk_khz;
464 
465 		dcn35_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
466 		clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
467 
468 		dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
469 
470 		update_dispclk = true;
471 	}
472 
473 	/* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */
474 	if (!dc->debug.disable_dtb_ref_clk_switch &&
475 	    should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000,
476 			     clk_mgr_base->clks.ref_dtbclk_khz / 1000)) {
477 		dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
478 		clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
479 	}
480 
481 	if (dpp_clock_lowered) {
482 		// increase per DPP DTO before lowering global dppclk
483 		dcn35_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
484 		dcn35_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
485 	} else {
486 		// increase global DPPCLK before lowering per DPP DTO
487 		if (update_dppclk || update_dispclk)
488 			dcn35_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
489 		dcn35_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
490 	}
491 
492 	// notify PMFW of bandwidth per DPIA tunnel
493 	if (dc->debug.notify_dpia_hr_bw)
494 		dcn35_notify_host_router_bw(clk_mgr_base, context, safe_to_lower);
495 
496 	// notify DMCUB of latest clocks
497 	memset(&cmd, 0, sizeof(cmd));
498 	cmd.notify_clocks.header.type = DMUB_CMD__CLK_MGR;
499 	cmd.notify_clocks.header.sub_type = DMUB_CMD__CLK_MGR_NOTIFY_CLOCKS;
500 	cmd.notify_clocks.clocks.dcfclk_khz = clk_mgr_base->clks.dcfclk_khz;
501 	cmd.notify_clocks.clocks.dcfclk_deep_sleep_khz =
502 		clk_mgr_base->clks.dcfclk_deep_sleep_khz;
503 	cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
504 	cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
505 
506 	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
507 }
508 
get_vco_frequency_from_reg(struct clk_mgr_internal * clk_mgr)509 static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
510 {
511 	/* get FbMult value */
512 	struct fixed31_32 pll_req;
513 	unsigned int fbmult_frac_val = 0;
514 	unsigned int fbmult_int_val = 0;
515 
516 	/*
517 	 * Register value of fbmult is in 8.16 format, we are converting to 314.32
518 	 * to leverage the fix point operations available in driver
519 	 */
520 
521 	REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/
522 	REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */
523 
524 	pll_req = dc_fixpt_from_int(fbmult_int_val);
525 
526 	/*
527 	 * since fractional part is only 16 bit in register definition but is 32 bit
528 	 * in our fix point definiton, need to shift left by 16 to obtain correct value
529 	 */
530 	pll_req.value |= fbmult_frac_val << 16;
531 
532 	/* multiply by REFCLK period */
533 	pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
534 
535 	/* integer part is now VCO frequency in kHz */
536 	return dc_fixpt_floor(pll_req);
537 }
538 
dcn35_enable_pme_wa(struct clk_mgr * clk_mgr_base)539 static void dcn35_enable_pme_wa(struct clk_mgr *clk_mgr_base)
540 {
541 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
542 
543 	dcn35_smu_enable_pme_wa(clk_mgr);
544 }
545 
546 
dcn35_are_clock_states_equal(struct dc_clocks * a,struct dc_clocks * b)547 bool dcn35_are_clock_states_equal(struct dc_clocks *a,
548 		struct dc_clocks *b)
549 {
550 	if (a->dispclk_khz != b->dispclk_khz)
551 		return false;
552 	else if (a->dppclk_khz != b->dppclk_khz)
553 		return false;
554 	else if (a->dcfclk_khz != b->dcfclk_khz)
555 		return false;
556 	else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
557 		return false;
558 	else if (a->zstate_support != b->zstate_support)
559 		return false;
560 	else if (a->dtbclk_en != b->dtbclk_en)
561 		return false;
562 
563 	return true;
564 }
565 
dcn35_dump_clk_registers(struct clk_state_registers_and_bypass * regs_and_bypass,struct clk_mgr_dcn35 * clk_mgr)566 static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
567 		struct clk_mgr_dcn35 *clk_mgr)
568 {
569 }
570 
dcn35_is_spll_ssc_enabled(struct clk_mgr * clk_mgr_base)571 static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
572 {
573 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
574 
575 	uint32_t ssc_enable;
576 
577 	ssc_enable = REG_READ(CLK5_spll_field_8) & CLK5_spll_field_8__spll_ssc_en_MASK;
578 
579 	return ssc_enable != 0;
580 }
581 
init_clk_states(struct clk_mgr * clk_mgr)582 static void init_clk_states(struct clk_mgr *clk_mgr)
583 {
584 	struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
585 	uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
586 	memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
587 
588 	if (clk_mgr_int->smu_ver >= SMU_VER_THRESHOLD)
589 		clk_mgr->clks.dtbclk_en = true; // request DTBCLK disable on first commit
590 	clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk;	// restore ref_dtbclk
591 	clk_mgr->clks.p_state_change_support = true;
592 	clk_mgr->clks.prev_p_state_change_support = true;
593 	clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
594 	clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
595 }
596 
dcn35_init_clocks(struct clk_mgr * clk_mgr)597 void dcn35_init_clocks(struct clk_mgr *clk_mgr)
598 {
599 	struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
600 	init_clk_states(clk_mgr);
601 
602 	// to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
603 	if (dcn35_is_spll_ssc_enabled(clk_mgr))
604 		clk_mgr->dp_dto_source_clock_in_khz =
605 			dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
606 	else
607 		clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
608 
609 }
610 static struct clk_bw_params dcn35_bw_params = {
611 	.vram_type = Ddr4MemType,
612 	.num_channels = 1,
613 	.clk_table = {
614 		.num_entries = 4,
615 	},
616 
617 };
618 
619 static struct wm_table ddr5_wm_table = {
620 	.entries = {
621 		{
622 			.wm_inst = WM_A,
623 			.wm_type = WM_TYPE_PSTATE_CHG,
624 			.pstate_latency_us = 11.72,
625 			.sr_exit_time_us = 28.0,
626 			.sr_enter_plus_exit_time_us = 30.0,
627 			.valid = true,
628 		},
629 		{
630 			.wm_inst = WM_B,
631 			.wm_type = WM_TYPE_PSTATE_CHG,
632 			.pstate_latency_us = 11.72,
633 			.sr_exit_time_us = 28.0,
634 			.sr_enter_plus_exit_time_us = 30.0,
635 			.valid = true,
636 		},
637 		{
638 			.wm_inst = WM_C,
639 			.wm_type = WM_TYPE_PSTATE_CHG,
640 			.pstate_latency_us = 11.72,
641 			.sr_exit_time_us = 28.0,
642 			.sr_enter_plus_exit_time_us = 30.0,
643 			.valid = true,
644 		},
645 		{
646 			.wm_inst = WM_D,
647 			.wm_type = WM_TYPE_PSTATE_CHG,
648 			.pstate_latency_us = 11.72,
649 			.sr_exit_time_us = 28.0,
650 			.sr_enter_plus_exit_time_us = 30.0,
651 			.valid = true,
652 		},
653 	}
654 };
655 
656 static struct wm_table lpddr5_wm_table = {
657 	.entries = {
658 		{
659 			.wm_inst = WM_A,
660 			.wm_type = WM_TYPE_PSTATE_CHG,
661 			.pstate_latency_us = 11.65333,
662 			.sr_exit_time_us = 28.0,
663 			.sr_enter_plus_exit_time_us = 30.0,
664 			.valid = true,
665 		},
666 		{
667 			.wm_inst = WM_B,
668 			.wm_type = WM_TYPE_PSTATE_CHG,
669 			.pstate_latency_us = 11.65333,
670 			.sr_exit_time_us = 28.0,
671 			.sr_enter_plus_exit_time_us = 30.0,
672 			.valid = true,
673 		},
674 		{
675 			.wm_inst = WM_C,
676 			.wm_type = WM_TYPE_PSTATE_CHG,
677 			.pstate_latency_us = 11.65333,
678 			.sr_exit_time_us = 28.0,
679 			.sr_enter_plus_exit_time_us = 30.0,
680 			.valid = true,
681 		},
682 		{
683 			.wm_inst = WM_D,
684 			.wm_type = WM_TYPE_PSTATE_CHG,
685 			.pstate_latency_us = 11.65333,
686 			.sr_exit_time_us = 28.0,
687 			.sr_enter_plus_exit_time_us = 30.0,
688 			.valid = true,
689 		},
690 	}
691 };
692 
693 static DpmClocks_t_dcn35 dummy_clocks;
694 
695 static struct dcn35_watermarks dummy_wms = { 0 };
696 
697 static struct dcn35_ss_info_table ss_info_table = {
698 	.ss_divider = 1000,
699 	.ss_percentage = {0, 0, 375, 375, 375}
700 };
701 
dcn35_read_ss_info_from_lut(struct clk_mgr_internal * clk_mgr)702 static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
703 {
704 	uint32_t clock_source = 0;
705 
706 	clock_source = REG_READ(CLK1_CLK2_BYPASS_CNTL) & CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK;
707 
708 	// If it's DFS mode, clock_source is 0.
709 	if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
710 		clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
711 
712 		if (clk_mgr->dprefclk_ss_percentage != 0) {
713 			clk_mgr->ss_on_dprefclk = true;
714 			clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
715 		}
716 	}
717 }
718 
dcn35_build_watermark_ranges(struct clk_bw_params * bw_params,struct dcn35_watermarks * table)719 static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table)
720 {
721 	int i, num_valid_sets;
722 
723 	num_valid_sets = 0;
724 
725 	for (i = 0; i < WM_SET_COUNT; i++) {
726 		/* skip empty entries, the smu array has no holes*/
727 		if (!bw_params->wm_table.entries[i].valid)
728 			continue;
729 
730 		table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst;
731 		table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type;
732 		/* We will not select WM based on fclk, so leave it as unconstrained */
733 		table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
734 		table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
735 
736 		if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) {
737 			if (i == 0)
738 				table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0;
739 			else {
740 				/* add 1 to make it non-overlapping with next lvl */
741 				table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk =
742 						bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1;
743 			}
744 			table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk =
745 					bw_params->clk_table.entries[i].dcfclk_mhz;
746 
747 		} else {
748 			/* unconstrained for memory retraining */
749 			table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
750 			table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
751 
752 			/* Modify previous watermark range to cover up to max */
753 			table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
754 		}
755 		num_valid_sets++;
756 	}
757 
758 	ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */
759 
760 	/* modify the min and max to make sure we cover the whole range*/
761 	table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0;
762 	table->WatermarkRow[WM_DCFCLK][0].MinClock = 0;
763 	table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF;
764 	table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
765 
766 	/* This is for writeback only, does not matter currently as no writeback support*/
767 	table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A;
768 	table->WatermarkRow[WM_SOCCLK][0].MinClock = 0;
769 	table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF;
770 	table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0;
771 	table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF;
772 }
773 
dcn35_notify_wm_ranges(struct clk_mgr * clk_mgr_base)774 static void dcn35_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
775 {
776 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
777 	struct clk_mgr_dcn35 *clk_mgr_dcn35 = TO_CLK_MGR_DCN35(clk_mgr);
778 	struct dcn35_watermarks *table = clk_mgr_dcn35->smu_wm_set.wm_set;
779 
780 	if (!clk_mgr->smu_ver)
781 		return;
782 
783 	if (!table || clk_mgr_dcn35->smu_wm_set.mc_address.quad_part == 0)
784 		return;
785 
786 	memset(table, 0, sizeof(*table));
787 
788 	dcn35_build_watermark_ranges(clk_mgr_base->bw_params, table);
789 
790 	dcn35_smu_set_dram_addr_high(clk_mgr,
791 			clk_mgr_dcn35->smu_wm_set.mc_address.high_part);
792 	dcn35_smu_set_dram_addr_low(clk_mgr,
793 			clk_mgr_dcn35->smu_wm_set.mc_address.low_part);
794 	dcn35_smu_transfer_wm_table_dram_2_smu(clk_mgr);
795 }
796 
dcn35_get_dpm_table_from_smu(struct clk_mgr_internal * clk_mgr,struct dcn35_smu_dpm_clks * smu_dpm_clks)797 static void dcn35_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
798 		struct dcn35_smu_dpm_clks *smu_dpm_clks)
799 {
800 	DpmClocks_t_dcn35 *table = smu_dpm_clks->dpm_clks;
801 
802 	if (!clk_mgr->smu_ver)
803 		return;
804 
805 	if (!table || smu_dpm_clks->mc_address.quad_part == 0)
806 		return;
807 
808 	memset(table, 0, sizeof(*table));
809 
810 	dcn35_smu_set_dram_addr_high(clk_mgr,
811 			smu_dpm_clks->mc_address.high_part);
812 	dcn35_smu_set_dram_addr_low(clk_mgr,
813 			smu_dpm_clks->mc_address.low_part);
814 	dcn35_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
815 }
816 
find_max_clk_value(const uint32_t clocks[],uint32_t num_clocks)817 static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
818 {
819 	uint32_t max = 0;
820 	int i;
821 
822 	for (i = 0; i < num_clocks; ++i) {
823 		if (clocks[i] > max)
824 			max = clocks[i];
825 	}
826 
827 	return max;
828 }
829 
is_valid_clock_value(uint32_t clock_value)830 static inline bool is_valid_clock_value(uint32_t clock_value)
831 {
832 	return clock_value > 1 && clock_value < 100000;
833 }
834 
convert_wck_ratio(uint8_t wck_ratio)835 static unsigned int convert_wck_ratio(uint8_t wck_ratio)
836 {
837 	switch (wck_ratio) {
838 	case WCK_RATIO_1_2:
839 		return 2;
840 
841 	case WCK_RATIO_1_4:
842 		return 4;
843 	/* Find lowest DPM, FCLK is filled in reverse order*/
844 
845 	default:
846 			break;
847 	}
848 
849 	return 1;
850 }
851 
calc_dram_speed_mts(const MemPstateTable_t * entry)852 static inline uint32_t calc_dram_speed_mts(const MemPstateTable_t *entry)
853 {
854 	return entry->UClk * convert_wck_ratio(entry->WckRatio) * 2;
855 }
856 
dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal * clk_mgr,struct integrated_info * bios_info,DpmClocks_t_dcn35 * clock_table)857 static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr,
858 						    struct integrated_info *bios_info,
859 						    DpmClocks_t_dcn35 *clock_table)
860 {
861 	struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
862 	struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
863 	uint32_t max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
864 	uint32_t max_pstate = 0, max_dram_speed_mts = 0, min_dram_speed_mts = 0;
865 	uint32_t num_memps, num_fclk, num_dcfclk;
866 	int i;
867 
868 	/* Determine min/max p-state values. */
869 	num_memps = (clock_table->NumMemPstatesEnabled > NUM_MEM_PSTATE_LEVELS) ? NUM_MEM_PSTATE_LEVELS :
870 		clock_table->NumMemPstatesEnabled;
871 	for (i = 0; i < num_memps; i++) {
872 		uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
873 
874 		if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts > max_dram_speed_mts) {
875 			max_dram_speed_mts = dram_speed_mts;
876 			max_pstate = i;
877 		}
878 	}
879 
880 	min_dram_speed_mts = max_dram_speed_mts;
881 	min_pstate = max_pstate;
882 
883 	for (i = 0; i < num_memps; i++) {
884 		uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
885 
886 		if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts < min_dram_speed_mts) {
887 			min_dram_speed_mts = dram_speed_mts;
888 			min_pstate = i;
889 		}
890 	}
891 
892 	/* We expect the table to contain at least one valid P-state entry. */
893 	ASSERT(clock_table->NumMemPstatesEnabled &&
894 	       is_valid_clock_value(max_dram_speed_mts) &&
895 	       is_valid_clock_value(min_dram_speed_mts));
896 
897 	/* dispclk and dppclk can be max at any voltage, same number of levels for both */
898 	if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
899 	    clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
900 		max_dispclk = find_max_clk_value(clock_table->DispClocks,
901 			clock_table->NumDispClkLevelsEnabled);
902 		max_dppclk = find_max_clk_value(clock_table->DppClocks,
903 			clock_table->NumDispClkLevelsEnabled);
904 	} else {
905 		/* Invalid number of entries in the table from PMFW. */
906 		ASSERT(0);
907 	}
908 
909 	/* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
910 	ASSERT(clock_table->NumDcfClkLevelsEnabled > 0);
911 
912 	num_fclk = (clock_table->NumFclkLevelsEnabled > NUM_FCLK_DPM_LEVELS) ? NUM_FCLK_DPM_LEVELS :
913 		clock_table->NumFclkLevelsEnabled;
914 	max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk);
915 
916 	num_dcfclk = (clock_table->NumDcfClkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
917 		clock_table->NumDcfClkLevelsEnabled;
918 	for (i = 0; i < num_dcfclk; i++) {
919 		int j;
920 
921 		/* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
922 		for (j = bw_params->clk_table.num_entries - 1; j > 0; j--)
923 			if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i])
924 				break;
925 
926 		bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz;
927 		bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz;
928 		bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz;
929 
930 		/* Now update clocks we do read */
931 		bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[min_pstate].MemClk;
932 		bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[min_pstate].Voltage;
933 		bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
934 		bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
935 		bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
936 		bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
937 		bw_params->clk_table.entries[i].wck_ratio =
938 			convert_wck_ratio(clock_table->MemPstateTable[min_pstate].WckRatio);
939 
940 		/* Dcfclk and Fclk are tied, but at a different ratio */
941 		bw_params->clk_table.entries[i].fclk_mhz = min(max_fclk, 2 * clock_table->DcfClocks[i]);
942 	}
943 
944 	/* Make sure to include at least one entry at highest pstate */
945 	if (max_pstate != min_pstate || i == 0) {
946 		if (i > MAX_NUM_DPM_LVL - 1)
947 			i = MAX_NUM_DPM_LVL - 1;
948 
949 		bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
950 		bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[max_pstate].MemClk;
951 		bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[max_pstate].Voltage;
952 		bw_params->clk_table.entries[i].dcfclk_mhz =
953 			find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS);
954 		bw_params->clk_table.entries[i].socclk_mhz =
955 			find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
956 		bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
957 		bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
958 		bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
959 			clock_table->MemPstateTable[max_pstate].WckRatio);
960 		i++;
961 	}
962 	bw_params->clk_table.num_entries = i--;
963 
964 	/* Make sure all highest clocks are included*/
965 	bw_params->clk_table.entries[i].socclk_mhz =
966 		find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
967 	bw_params->clk_table.entries[i].dispclk_mhz =
968 		find_max_clk_value(clock_table->DispClocks, NUM_DISPCLK_DPM_LEVELS);
969 	bw_params->clk_table.entries[i].dppclk_mhz =
970 		find_max_clk_value(clock_table->DppClocks, NUM_DPPCLK_DPM_LEVELS);
971 	bw_params->clk_table.entries[i].fclk_mhz =
972 		find_max_clk_value(clock_table->FclkClocks_Freq, NUM_FCLK_DPM_LEVELS);
973 	ASSERT(clock_table->DcfClocks[i] == find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS));
974 	bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
975 	bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
976 	bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
977 	bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels = clock_table->NumDcfClkLevelsEnabled;
978 	bw_params->clk_table.num_entries_per_clk.num_dispclk_levels = clock_table->NumDispClkLevelsEnabled;
979 	bw_params->clk_table.num_entries_per_clk.num_dppclk_levels = clock_table->NumDispClkLevelsEnabled;
980 	bw_params->clk_table.num_entries_per_clk.num_fclk_levels = clock_table->NumFclkLevelsEnabled;
981 	bw_params->clk_table.num_entries_per_clk.num_memclk_levels = clock_table->NumMemPstatesEnabled;
982 	bw_params->clk_table.num_entries_per_clk.num_socclk_levels = clock_table->NumSocClkLevelsEnabled;
983 
984 	/*
985 	 * Set any 0 clocks to max default setting. Not an issue for
986 	 * power since we aren't doing switching in such case anyway
987 	 */
988 	for (i = 0; i < bw_params->clk_table.num_entries; i++) {
989 		if (!bw_params->clk_table.entries[i].fclk_mhz) {
990 			bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
991 			bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz;
992 			bw_params->clk_table.entries[i].voltage = def_max.voltage;
993 		}
994 		if (!bw_params->clk_table.entries[i].dcfclk_mhz)
995 			bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz;
996 		if (!bw_params->clk_table.entries[i].socclk_mhz)
997 			bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz;
998 		if (!bw_params->clk_table.entries[i].dispclk_mhz)
999 			bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz;
1000 		if (!bw_params->clk_table.entries[i].dppclk_mhz)
1001 			bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz;
1002 		if (!bw_params->clk_table.entries[i].fclk_mhz)
1003 			bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
1004 		if (!bw_params->clk_table.entries[i].phyclk_mhz)
1005 			bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
1006 		if (!bw_params->clk_table.entries[i].phyclk_d18_mhz)
1007 			bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
1008 		if (!bw_params->clk_table.entries[i].dtbclk_mhz)
1009 			bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
1010 	}
1011 	ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
1012 	bw_params->vram_type = bios_info->memory_type;
1013 	bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
1014 	bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4;
1015 
1016 	for (i = 0; i < WM_SET_COUNT; i++) {
1017 		bw_params->wm_table.entries[i].wm_inst = i;
1018 
1019 		if (i >= bw_params->clk_table.num_entries) {
1020 			bw_params->wm_table.entries[i].valid = false;
1021 			continue;
1022 		}
1023 
1024 		bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG;
1025 		bw_params->wm_table.entries[i].valid = true;
1026 	}
1027 }
1028 
dcn35_set_low_power_state(struct clk_mgr * clk_mgr_base)1029 static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base)
1030 {
1031 	int display_count;
1032 	struct dc *dc = clk_mgr_base->ctx->dc;
1033 	struct dc_state *context = dc->current_state;
1034 
1035 	if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
1036 		display_count = dcn35_get_active_display_cnt_wa(dc, context, NULL);
1037 		/* if we can go lower, go lower */
1038 		if (display_count == 0)
1039 			clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
1040 	}
1041 }
1042 
dcn35_exit_low_power_state(struct clk_mgr * clk_mgr_base)1043 static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base)
1044 {
1045 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
1046 
1047 	//SMU optimization is performed part of low power state exit.
1048 	dcn35_smu_exit_low_power_state(clk_mgr);
1049 
1050 }
1051 
dcn35_is_ips_supported(struct clk_mgr * clk_mgr_base)1052 static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base)
1053 {
1054 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
1055 	bool ips_supported = true;
1056 
1057 	ips_supported = dcn35_smu_get_ips_supported(clk_mgr) ? true : false;
1058 
1059 	return ips_supported;
1060 }
1061 
dcn35_init_clocks_fpga(struct clk_mgr * clk_mgr)1062 static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr)
1063 {
1064 	init_clk_states(clk_mgr);
1065 
1066 /* TODO: Implement the functions and remove the ifndef guard */
1067 }
1068 
dcn35_update_clocks_fpga(struct clk_mgr * clk_mgr,struct dc_state * context,bool safe_to_lower)1069 static void dcn35_update_clocks_fpga(struct clk_mgr *clk_mgr,
1070 		struct dc_state *context,
1071 		bool safe_to_lower)
1072 {
1073 	struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
1074 	struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
1075 	int fclk_adj = new_clocks->fclk_khz;
1076 
1077 	/* TODO: remove this after correctly set by DML */
1078 	new_clocks->dcfclk_khz = 400000;
1079 	new_clocks->socclk_khz = 400000;
1080 
1081 	/* Min fclk = 1.2GHz since all the extra scemi logic seems to run off of it */
1082 	//int fclk_adj = new_clocks->fclk_khz > 1200000 ? new_clocks->fclk_khz : 1200000;
1083 	new_clocks->fclk_khz = 4320000;
1084 
1085 	if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) {
1086 		clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz;
1087 	}
1088 
1089 	if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) {
1090 		clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz;
1091 	}
1092 
1093 	if (should_set_clock(safe_to_lower,
1094 			new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) {
1095 		clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
1096 	}
1097 
1098 	if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr->clks.socclk_khz)) {
1099 		clk_mgr->clks.socclk_khz = new_clocks->socclk_khz;
1100 	}
1101 
1102 	if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr->clks.dramclk_khz)) {
1103 		clk_mgr->clks.dramclk_khz = new_clocks->dramclk_khz;
1104 	}
1105 
1106 	if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->clks.dppclk_khz)) {
1107 		clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz;
1108 	}
1109 
1110 	if (should_set_clock(safe_to_lower, fclk_adj, clk_mgr->clks.fclk_khz)) {
1111 		clk_mgr->clks.fclk_khz = fclk_adj;
1112 	}
1113 
1114 	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)) {
1115 		clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
1116 	}
1117 
1118 	/* Both fclk and ref_dppclk run on the same scemi clock.
1119 	 * So take the higher value since the DPP DTO is typically programmed
1120 	 * such that max dppclk is 1:1 with ref_dppclk.
1121 	 */
1122 	if (clk_mgr->clks.fclk_khz > clk_mgr->clks.dppclk_khz)
1123 		clk_mgr->clks.dppclk_khz = clk_mgr->clks.fclk_khz;
1124 	if (clk_mgr->clks.dppclk_khz > clk_mgr->clks.fclk_khz)
1125 		clk_mgr->clks.fclk_khz = clk_mgr->clks.dppclk_khz;
1126 
1127 	// Both fclk and ref_dppclk run on the same scemi clock.
1128 	clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz;
1129 
1130 	/* TODO: set dtbclk in correct place */
1131 	clk_mgr->clks.dtbclk_en = true;
1132 	dm_set_dcn_clocks(clk_mgr->ctx, &clk_mgr->clks);
1133 	dcn35_update_clocks_update_dpp_dto(clk_mgr_int, context, safe_to_lower);
1134 
1135 	dcn35_update_clocks_update_dtb_dto(clk_mgr_int, context, clk_mgr->clks.ref_dtbclk_khz);
1136 }
1137 
1138 static struct clk_mgr_funcs dcn35_funcs = {
1139 	.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
1140 	.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
1141 	.update_clocks = dcn35_update_clocks,
1142 	.init_clocks = dcn35_init_clocks,
1143 	.enable_pme_wa = dcn35_enable_pme_wa,
1144 	.are_clock_states_equal = dcn35_are_clock_states_equal,
1145 	.notify_wm_ranges = dcn35_notify_wm_ranges,
1146 	.set_low_power_state = dcn35_set_low_power_state,
1147 	.exit_low_power_state = dcn35_exit_low_power_state,
1148 	.is_ips_supported = dcn35_is_ips_supported,
1149 };
1150 
1151 struct clk_mgr_funcs dcn35_fpga_funcs = {
1152 	.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
1153 	.update_clocks = dcn35_update_clocks_fpga,
1154 	.init_clocks = dcn35_init_clocks_fpga,
1155 	.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
1156 };
1157 
dcn35_clk_mgr_construct(struct dc_context * ctx,struct clk_mgr_dcn35 * clk_mgr,struct pp_smu_funcs * pp_smu,struct dccg * dccg)1158 void dcn35_clk_mgr_construct(
1159 		struct dc_context *ctx,
1160 		struct clk_mgr_dcn35 *clk_mgr,
1161 		struct pp_smu_funcs *pp_smu,
1162 		struct dccg *dccg)
1163 {
1164 	struct dcn35_smu_dpm_clks smu_dpm_clks = { 0 };
1165 	clk_mgr->base.base.ctx = ctx;
1166 	clk_mgr->base.base.funcs = &dcn35_funcs;
1167 
1168 	clk_mgr->base.pp_smu = pp_smu;
1169 
1170 	clk_mgr->base.dccg = dccg;
1171 	clk_mgr->base.dfs_bypass_disp_clk = 0;
1172 
1173 	clk_mgr->base.dprefclk_ss_percentage = 0;
1174 	clk_mgr->base.dprefclk_ss_divider = 1000;
1175 	clk_mgr->base.ss_on_dprefclk = false;
1176 	clk_mgr->base.dfs_ref_freq_khz = 48000;
1177 	if (ctx->dce_version == DCN_VERSION_3_5) {
1178 		clk_mgr->base.regs = &clk_mgr_regs_dcn35;
1179 		clk_mgr->base.clk_mgr_shift = &clk_mgr_shift_dcn35;
1180 		clk_mgr->base.clk_mgr_mask = &clk_mgr_mask_dcn35;
1181 	}
1182 
1183 
1184 	clk_mgr->smu_wm_set.wm_set = (struct dcn35_watermarks *)dm_helpers_allocate_gpu_mem(
1185 				clk_mgr->base.base.ctx,
1186 				DC_MEM_ALLOC_TYPE_GART,
1187 				sizeof(struct dcn35_watermarks),
1188 				&clk_mgr->smu_wm_set.mc_address.quad_part);
1189 
1190 	if (!clk_mgr->smu_wm_set.wm_set) {
1191 		clk_mgr->smu_wm_set.wm_set = &dummy_wms;
1192 		clk_mgr->smu_wm_set.mc_address.quad_part = 0;
1193 	}
1194 	ASSERT(clk_mgr->smu_wm_set.wm_set);
1195 
1196 	smu_dpm_clks.dpm_clks = (DpmClocks_t_dcn35 *)dm_helpers_allocate_gpu_mem(
1197 				clk_mgr->base.base.ctx,
1198 				DC_MEM_ALLOC_TYPE_GART,
1199 				sizeof(DpmClocks_t_dcn35),
1200 				&smu_dpm_clks.mc_address.quad_part);
1201 
1202 	if (smu_dpm_clks.dpm_clks == NULL) {
1203 		smu_dpm_clks.dpm_clks = &dummy_clocks;
1204 		smu_dpm_clks.mc_address.quad_part = 0;
1205 	}
1206 
1207 	ASSERT(smu_dpm_clks.dpm_clks);
1208 
1209 	clk_mgr->base.smu_ver = dcn35_smu_get_smu_version(&clk_mgr->base);
1210 
1211 	if (clk_mgr->base.smu_ver)
1212 		clk_mgr->base.smu_present = true;
1213 
1214 	/* TODO: Check we get what we expect during bringup */
1215 	clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
1216 
1217 	if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
1218 		dcn35_bw_params.wm_table = lpddr5_wm_table;
1219 	} else {
1220 		dcn35_bw_params.wm_table = ddr5_wm_table;
1221 	}
1222 	/* Saved clocks configured at boot for debug purposes */
1223 	dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr);
1224 
1225 	clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base);
1226 	clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
1227 
1228 	dce_clock_read_ss_info(&clk_mgr->base);
1229 	/*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/
1230 
1231 	dcn35_read_ss_info_from_lut(&clk_mgr->base);
1232 
1233 	clk_mgr->base.base.bw_params = &dcn35_bw_params;
1234 
1235 	if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
1236 		int i;
1237 		dcn35_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
1238 		DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n"
1239 				   "NumDispClkLevelsEnabled: %d\n"
1240 				   "NumSocClkLevelsEnabled: %d\n"
1241 				   "VcnClkLevelsEnabled: %d\n"
1242 				   "FClkLevelsEnabled: %d\n"
1243 				   "NumMemPstatesEnabled: %d\n"
1244 				   "MinGfxClk: %d\n"
1245 				   "MaxGfxClk: %d\n",
1246 				   smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled,
1247 				   smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled,
1248 				   smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled,
1249 				   smu_dpm_clks.dpm_clks->VcnClkLevelsEnabled,
1250 				   smu_dpm_clks.dpm_clks->NumFclkLevelsEnabled,
1251 				   smu_dpm_clks.dpm_clks->NumMemPstatesEnabled,
1252 				   smu_dpm_clks.dpm_clks->MinGfxClk,
1253 				   smu_dpm_clks.dpm_clks->MaxGfxClk);
1254 		for (i = 0; i < smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled; i++) {
1255 			DC_LOG_SMU("smu_dpm_clks.dpm_clks->DcfClocks[%d] = %d\n",
1256 					   i,
1257 					   smu_dpm_clks.dpm_clks->DcfClocks[i]);
1258 		}
1259 		for (i = 0; i < smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled; i++) {
1260 			DC_LOG_SMU("smu_dpm_clks.dpm_clks->DispClocks[%d] = %d\n",
1261 					   i, smu_dpm_clks.dpm_clks->DispClocks[i]);
1262 		}
1263 		for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++) {
1264 			DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocClocks[%d] = %d\n",
1265 					   i, smu_dpm_clks.dpm_clks->SocClocks[i]);
1266 		}
1267 		for (i = 0; i < smu_dpm_clks.dpm_clks->NumFclkLevelsEnabled; i++) {
1268 			DC_LOG_SMU("smu_dpm_clks.dpm_clks->FclkClocks_Freq[%d] = %d\n",
1269 					   i, smu_dpm_clks.dpm_clks->FclkClocks_Freq[i]);
1270 			DC_LOG_SMU("smu_dpm_clks.dpm_clks->FclkClocks_Voltage[%d] = %d\n",
1271 					   i, smu_dpm_clks.dpm_clks->FclkClocks_Voltage[i]);
1272 		}
1273 		for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++)
1274 			DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocVoltage[%d] = %d\n",
1275 					   i, smu_dpm_clks.dpm_clks->SocVoltage[i]);
1276 
1277 		for (i = 0; i < smu_dpm_clks.dpm_clks->NumMemPstatesEnabled; i++) {
1278 			DC_LOG_SMU("smu_dpm_clks.dpm_clks.MemPstateTable[%d].UClk = %d\n"
1279 					   "smu_dpm_clks.dpm_clks->MemPstateTable[%d].MemClk= %d\n"
1280 					   "smu_dpm_clks.dpm_clks->MemPstateTable[%d].Voltage = %d\n",
1281 					   i, smu_dpm_clks.dpm_clks->MemPstateTable[i].UClk,
1282 					   i, smu_dpm_clks.dpm_clks->MemPstateTable[i].MemClk,
1283 					   i, smu_dpm_clks.dpm_clks->MemPstateTable[i].Voltage);
1284 		}
1285 
1286 		if (ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) {
1287 			dcn35_clk_mgr_helper_populate_bw_params(
1288 					&clk_mgr->base,
1289 					ctx->dc_bios->integrated_info,
1290 					smu_dpm_clks.dpm_clks);
1291 		}
1292 	}
1293 
1294 	if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
1295 		dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_GART,
1296 				smu_dpm_clks.dpm_clks);
1297 
1298 	if (ctx->dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) {
1299 		bool ips_support = false;
1300 
1301 		/*avoid call pmfw at init*/
1302 		ips_support = dcn35_smu_get_ips_supported(&clk_mgr->base);
1303 		if (ips_support) {
1304 			ctx->dc->debug.ignore_pg = false;
1305 			ctx->dc->debug.disable_dpp_power_gate = false;
1306 			ctx->dc->debug.disable_hubp_power_gate = false;
1307 			ctx->dc->debug.disable_dsc_power_gate = false;
1308 
1309 			/* Disable dynamic IPS2 in older PMFW (93.12) for Z8 interop. */
1310 			if (ctx->dc->config.disable_ips == DMUB_IPS_ENABLE &&
1311 			    ctx->dce_version == DCN_VERSION_3_5 &&
1312 			    ((clk_mgr->base.smu_ver & 0x00FFFFFF) <= 0x005d0c00))
1313 				ctx->dc->config.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
1314 		} else {
1315 			/*let's reset the config control flag*/
1316 			ctx->dc->config.disable_ips = DMUB_IPS_DISABLE_ALL; /*pmfw not support it, disable it all*/
1317 		}
1318 	}
1319 }
1320 
dcn35_clk_mgr_destroy(struct clk_mgr_internal * clk_mgr_int)1321 void dcn35_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int)
1322 {
1323 	struct clk_mgr_dcn35 *clk_mgr = TO_CLK_MGR_DCN35(clk_mgr_int);
1324 
1325 	if (clk_mgr->smu_wm_set.wm_set && clk_mgr->smu_wm_set.mc_address.quad_part != 0)
1326 		dm_helpers_free_gpu_mem(clk_mgr_int->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
1327 				clk_mgr->smu_wm_set.wm_set);
1328 }
1329