• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include "dccg.h"
27 #include "clk_mgr_internal.h"
28 
29 
30 #include "dcn20/dcn20_clk_mgr.h"
31 #include "rn_clk_mgr.h"
32 
33 
34 #include "dce100/dce_clk_mgr.h"
35 #include "rn_clk_mgr_vbios_smu.h"
36 #include "reg_helper.h"
37 #include "core_types.h"
38 #include "dm_helpers.h"
39 
40 #include "atomfirmware.h"
41 #include "clk/clk_10_0_2_offset.h"
42 #include "clk/clk_10_0_2_sh_mask.h"
43 #include "renoir_ip_offset.h"
44 
45 
46 /* Constants */
47 
48 #define LPDDR_MEM_RETRAIN_LATENCY 4.977 /* Number obtained from LPDDR4 Training Counter Requirement doc */
49 
50 /* Macros */
51 
52 #define REG(reg_name) \
53 	(CLK_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
54 
rn_update_clocks(struct clk_mgr * clk_mgr_base,struct dc_state * context,bool safe_to_lower)55 void rn_update_clocks(struct clk_mgr *clk_mgr_base,
56 			struct dc_state *context,
57 			bool safe_to_lower)
58 {
59 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
60 	struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
61 	struct dc *dc = clk_mgr_base->ctx->dc;
62 	int display_count;
63 	bool update_dppclk = false;
64 	bool update_dispclk = false;
65 	bool enter_display_off = false;
66 	bool dpp_clock_lowered = false;
67 	struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
68 
69 	display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
70 
71 	if (display_count == 0)
72 		enter_display_off = true;
73 
74 	if (enter_display_off == safe_to_lower) {
75 		rn_vbios_smu_set_display_count(clk_mgr, display_count);
76 	}
77 
78 	if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) {
79 		clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz;
80 		rn_vbios_smu_set_phyclk(clk_mgr, clk_mgr_base->clks.phyclk_khz);
81 	}
82 
83 	if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
84 		clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
85 		rn_vbios_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
86 	}
87 
88 	if (should_set_clock(safe_to_lower,
89 			new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
90 		clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
91 		rn_vbios_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
92 	}
93 
94 	if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
95 		if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
96 			dpp_clock_lowered = true;
97 		clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
98 		update_dppclk = true;
99 	}
100 
101 	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
102 		clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
103 		rn_vbios_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
104 
105 		update_dispclk = true;
106 	}
107 
108 	if (dpp_clock_lowered) {
109 		// if clock is being lowered, increase DTO before lowering refclk
110 		dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
111 		rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
112 	} else {
113 		// if clock is being raised, increase refclk before lowering DTO
114 		if (update_dppclk || update_dispclk)
115 			rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
116 		if (update_dppclk)
117 			dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
118 	}
119 
120 	if (update_dispclk &&
121 			dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
122 		/*update dmcu for wait_loop count*/
123 		dmcu->funcs->set_psr_wait_loop(dmcu,
124 			clk_mgr_base->clks.dispclk_khz / 1000 / 7);
125 	}
126 }
127 
128 
get_vco_frequency_from_reg(struct clk_mgr_internal * clk_mgr)129 static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
130 {
131 	/* get FbMult value */
132 	struct fixed31_32 pll_req;
133 	unsigned int fbmult_frac_val = 0;
134 	unsigned int fbmult_int_val = 0;
135 
136 
137 	/*
138 	 * Register value of fbmult is in 8.16 format, we are converting to 31.32
139 	 * to leverage the fix point operations available in driver
140 	 */
141 
142 	REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/
143 	REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */
144 
145 	pll_req = dc_fixpt_from_int(fbmult_int_val);
146 
147 	/*
148 	 * since fractional part is only 16 bit in register definition but is 32 bit
149 	 * in our fix point definiton, need to shift left by 16 to obtain correct value
150 	 */
151 	pll_req.value |= fbmult_frac_val << 16;
152 
153 	/* multiply by REFCLK period */
154 	pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
155 
156 	/* integer part is now VCO frequency in kHz */
157 	return dc_fixpt_floor(pll_req);
158 }
159 
rn_dump_clk_registers_internal(struct rn_clk_internal * internal,struct clk_mgr * clk_mgr_base)160 static void rn_dump_clk_registers_internal(struct rn_clk_internal *internal, struct clk_mgr *clk_mgr_base)
161 {
162 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
163 
164 	internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT);
165 	internal->CLK1_CLK3_BYPASS_CNTL = REG_READ(CLK1_CLK3_BYPASS_CNTL);
166 
167 	internal->CLK1_CLK3_DS_CNTL = REG_READ(CLK1_CLK3_DS_CNTL);	//dcf deep sleep divider
168 	internal->CLK1_CLK3_ALLOW_DS = REG_READ(CLK1_CLK3_ALLOW_DS);
169 
170 	internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT);
171 	internal->CLK1_CLK1_BYPASS_CNTL = REG_READ(CLK1_CLK1_BYPASS_CNTL);
172 
173 	internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT);
174 	internal->CLK1_CLK2_BYPASS_CNTL = REG_READ(CLK1_CLK2_BYPASS_CNTL);
175 
176 	internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT);
177 	internal->CLK1_CLK0_BYPASS_CNTL = REG_READ(CLK1_CLK0_BYPASS_CNTL);
178 }
179 
180 /* This function collect raw clk register values */
rn_dump_clk_registers(struct clk_state_registers_and_bypass * regs_and_bypass,struct clk_mgr * clk_mgr_base,struct clk_log_info * log_info)181 static void rn_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
182 		struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
183 {
184 	struct rn_clk_internal internal = {0};
185 	char *bypass_clks[5] = {"0x0 DFS", "0x1 REFCLK", "0x2 ERROR", "0x3 400 FCH", "0x4 600 FCH"};
186 	unsigned int chars_printed = 0;
187 	unsigned int remaining_buffer = log_info->bufSize;
188 
189 	rn_dump_clk_registers_internal(&internal, clk_mgr_base);
190 
191 	regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10;
192 	regs_and_bypass->dcf_deep_sleep_divider = internal.CLK1_CLK3_DS_CNTL / 10;
193 	regs_and_bypass->dcf_deep_sleep_allow = internal.CLK1_CLK3_ALLOW_DS;
194 	regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10;
195 	regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10;
196 	regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
197 
198 	regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007;
199 	if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4)
200 		regs_and_bypass->dppclk_bypass = 0;
201 	regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007;
202 	if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4)
203 		regs_and_bypass->dcfclk_bypass = 0;
204 	regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007;
205 	if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4)
206 		regs_and_bypass->dispclk_bypass = 0;
207 	regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007;
208 	if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4)
209 		regs_and_bypass->dprefclk_bypass = 0;
210 
211 	if (log_info->enabled) {
212 		chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "clk_type,clk_value,deepsleep_cntl,deepsleep_allow,bypass\n");
213 		remaining_buffer -= chars_printed;
214 		*log_info->sum_chars_printed += chars_printed;
215 		log_info->pBuf += chars_printed;
216 
217 		chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "dcfclk,%d,%d,%d,%s\n",
218 			regs_and_bypass->dcfclk,
219 			regs_and_bypass->dcf_deep_sleep_divider,
220 			regs_and_bypass->dcf_deep_sleep_allow,
221 			bypass_clks[(int) regs_and_bypass->dcfclk_bypass]);
222 		remaining_buffer -= chars_printed;
223 		*log_info->sum_chars_printed += chars_printed;
224 		log_info->pBuf += chars_printed;
225 
226 		chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "dprefclk,%d,N/A,N/A,%s\n",
227 			regs_and_bypass->dprefclk,
228 			bypass_clks[(int) regs_and_bypass->dprefclk_bypass]);
229 		remaining_buffer -= chars_printed;
230 		*log_info->sum_chars_printed += chars_printed;
231 		log_info->pBuf += chars_printed;
232 
233 		chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "dispclk,%d,N/A,N/A,%s\n",
234 			regs_and_bypass->dispclk,
235 			bypass_clks[(int) regs_and_bypass->dispclk_bypass]);
236 		remaining_buffer -= chars_printed;
237 		*log_info->sum_chars_printed += chars_printed;
238 		log_info->pBuf += chars_printed;
239 
240 		//split
241 		chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "SPLIT\n");
242 		remaining_buffer -= chars_printed;
243 		*log_info->sum_chars_printed += chars_printed;
244 		log_info->pBuf += chars_printed;
245 
246 		// REGISTER VALUES
247 		chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "reg_name,value,clk_type\n");
248 		remaining_buffer -= chars_printed;
249 		*log_info->sum_chars_printed += chars_printed;
250 		log_info->pBuf += chars_printed;
251 
252 		chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_CURRENT_CNT,%d,dcfclk\n",
253 				internal.CLK1_CLK3_CURRENT_CNT);
254 		remaining_buffer -= chars_printed;
255 		*log_info->sum_chars_printed += chars_printed;
256 		log_info->pBuf += chars_printed;
257 
258 		chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_DS_CNTL,%d,dcf_deep_sleep_divider\n",
259 					internal.CLK1_CLK3_DS_CNTL);
260 		remaining_buffer -= chars_printed;
261 		*log_info->sum_chars_printed += chars_printed;
262 		log_info->pBuf += chars_printed;
263 
264 		chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_ALLOW_DS,%d,dcf_deep_sleep_allow\n",
265 					internal.CLK1_CLK3_ALLOW_DS);
266 		remaining_buffer -= chars_printed;
267 		*log_info->sum_chars_printed += chars_printed;
268 		log_info->pBuf += chars_printed;
269 
270 		chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK2_CURRENT_CNT,%d,dprefclk\n",
271 					internal.CLK1_CLK2_CURRENT_CNT);
272 		remaining_buffer -= chars_printed;
273 		*log_info->sum_chars_printed += chars_printed;
274 		log_info->pBuf += chars_printed;
275 
276 		chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK0_CURRENT_CNT,%d,dispclk\n",
277 					internal.CLK1_CLK0_CURRENT_CNT);
278 		remaining_buffer -= chars_printed;
279 		*log_info->sum_chars_printed += chars_printed;
280 		log_info->pBuf += chars_printed;
281 
282 		chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK1_CURRENT_CNT,%d,dppclk\n",
283 					internal.CLK1_CLK1_CURRENT_CNT);
284 		remaining_buffer -= chars_printed;
285 		*log_info->sum_chars_printed += chars_printed;
286 		log_info->pBuf += chars_printed;
287 
288 		chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_BYPASS_CNTL,%d,dcfclk_bypass\n",
289 					internal.CLK1_CLK3_BYPASS_CNTL);
290 		remaining_buffer -= chars_printed;
291 		*log_info->sum_chars_printed += chars_printed;
292 		log_info->pBuf += chars_printed;
293 
294 		chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK2_BYPASS_CNTL,%d,dprefclk_bypass\n",
295 					internal.CLK1_CLK2_BYPASS_CNTL);
296 		remaining_buffer -= chars_printed;
297 		*log_info->sum_chars_printed += chars_printed;
298 		log_info->pBuf += chars_printed;
299 
300 		chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK0_BYPASS_CNTL,%d,dispclk_bypass\n",
301 					internal.CLK1_CLK0_BYPASS_CNTL);
302 		remaining_buffer -= chars_printed;
303 		*log_info->sum_chars_printed += chars_printed;
304 		log_info->pBuf += chars_printed;
305 
306 		chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK1_BYPASS_CNTL,%d,dppclk_bypass\n",
307 					internal.CLK1_CLK1_BYPASS_CNTL);
308 		remaining_buffer -= chars_printed;
309 		*log_info->sum_chars_printed += chars_printed;
310 		log_info->pBuf += chars_printed;
311 	}
312 }
313 
314 /* This function produce translated logical clk state values*/
rn_get_clk_states(struct clk_mgr * clk_mgr_base,struct clk_states * s)315 void rn_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s)
316 {
317 	struct clk_state_registers_and_bypass sb = { 0 };
318 	struct clk_log_info log_info = { 0 };
319 
320 	rn_dump_clk_registers(&sb, clk_mgr_base, &log_info);
321 
322 	s->dprefclk_khz = sb.dprefclk;
323 }
324 
rn_enable_pme_wa(struct clk_mgr * clk_mgr_base)325 void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base)
326 {
327 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
328 
329 	rn_vbios_smu_enable_pme_wa(clk_mgr);
330 }
331 
332 static struct clk_mgr_funcs dcn21_funcs = {
333 	.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
334 	.update_clocks = rn_update_clocks,
335 	.init_clocks = dcn2_init_clocks,
336 	.enable_pme_wa = rn_enable_pme_wa,
337 	/* .dump_clk_registers = rn_dump_clk_registers */
338 };
339 
340 struct clk_bw_params rn_bw_params = {
341 	.vram_type = Ddr4MemType,
342 	.num_channels = 1,
343 	.clk_table = {
344 		.entries = {
345 			{
346 				.voltage = 0,
347 				.dcfclk_mhz = 400,
348 				.fclk_mhz = 400,
349 				.memclk_mhz = 800,
350 				.socclk_mhz = 0,
351 			},
352 			{
353 				.voltage = 0,
354 				.dcfclk_mhz = 483,
355 				.fclk_mhz = 800,
356 				.memclk_mhz = 1600,
357 				.socclk_mhz = 0,
358 			},
359 			{
360 				.voltage = 0,
361 				.dcfclk_mhz = 602,
362 				.fclk_mhz = 1067,
363 				.memclk_mhz = 1067,
364 				.socclk_mhz = 0,
365 			},
366 			{
367 				.voltage = 0,
368 				.dcfclk_mhz = 738,
369 				.fclk_mhz = 1333,
370 				.memclk_mhz = 1600,
371 				.socclk_mhz = 0,
372 			},
373 		},
374 
375 		.num_entries = 4,
376 	},
377 
378 	.wm_table = {
379 		.entries = {
380 			{
381 				.wm_inst = WM_A,
382 				.wm_type = WM_TYPE_PSTATE_CHG,
383 				.pstate_latency_us = 23.84,
384 				.valid = true,
385 			},
386 			{
387 				.wm_inst = WM_B,
388 				.wm_type = WM_TYPE_PSTATE_CHG,
389 				.pstate_latency_us = 23.84,
390 				.valid = true,
391 			},
392 			{
393 				.wm_inst = WM_C,
394 				.wm_type = WM_TYPE_PSTATE_CHG,
395 				.pstate_latency_us = 23.84,
396 				.valid = true,
397 			},
398 			{
399 				.wm_inst = WM_D,
400 				.wm_type = WM_TYPE_PSTATE_CHG,
401 				.pstate_latency_us = 23.84,
402 				.valid = true,
403 			},
404 		},
405 	}
406 };
407 
build_watermark_ranges(struct clk_bw_params * bw_params,struct pp_smu_wm_range_sets * ranges)408 void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
409 {
410 	int i, num_valid_sets;
411 
412 	num_valid_sets = 0;
413 
414 	for (i = 0; i < WM_SET_COUNT; i++) {
415 		/* skip empty entries, the smu array has no holes*/
416 		if (!bw_params->wm_table.entries[i].valid)
417 			continue;
418 
419 		ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst;
420 		ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;;
421 		/* We will not select WM based on dcfclk, so leave it as unconstrained */
422 		ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
423 		ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
424 		/* fclk wil be used to select WM*/
425 
426 		if (ranges->reader_wm_sets[num_valid_sets].wm_type == WM_TYPE_PSTATE_CHG) {
427 			if (i == 0)
428 				ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 0;
429 			else {
430 				/* add 1 to make it non-overlapping with next lvl */
431 				ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = bw_params->clk_table.entries[i - 1].fclk_mhz + 1;
432 			}
433 			ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
434 
435 		} else {
436 			/* unconstrained for memory retraining */
437 			ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
438 			ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
439 
440 			/* Modify previous watermark range to cover up to max */
441 			ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
442 		}
443 		num_valid_sets++;
444 	}
445 
446 	ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */
447 	ranges->num_reader_wm_sets = num_valid_sets;
448 
449 	/* modify the min and max to make sure we cover the whole range*/
450 	ranges->reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
451 	ranges->reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
452 	ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
453 	ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
454 
455 	/* This is for writeback only, does not matter currently as no writeback support*/
456 	ranges->num_writer_wm_sets = 1;
457 	ranges->writer_wm_sets[0].wm_inst = WM_A;
458 	ranges->writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
459 	ranges->writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
460 	ranges->writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
461 	ranges->writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
462 
463 }
464 
clk_mgr_helper_populate_bw_params(struct clk_bw_params * bw_params,struct dpm_clocks * clock_table,struct hw_asic_id * asic_id)465 void clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id)
466 {
467 	int i;
468 
469 	ASSERT(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL);
470 
471 	for (i = 0; i < PP_SMU_NUM_FCLK_DPM_LEVELS; i++) {
472 		if (clock_table->FClocks[i].Freq == 0)
473 			break;
474 
475 		bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i].Freq;
476 		bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[i].Freq;
477 		bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[i].Freq;
478 		bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i].Freq;
479 		bw_params->clk_table.entries[i].voltage = clock_table->FClocks[i].Vol;
480 	}
481 	bw_params->clk_table.num_entries = i;
482 
483 	bw_params->vram_type = asic_id->vram_type;
484 	bw_params->num_channels = asic_id->vram_width / DDR4_DRAM_WIDTH;
485 
486 	for (i = 0; i < WM_SET_COUNT; i++) {
487 		bw_params->wm_table.entries[i].wm_inst = i;
488 
489 		if (clock_table->FClocks[i].Freq == 0) {
490 			bw_params->wm_table.entries[i].valid = false;
491 			continue;
492 		}
493 
494 		bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG;
495 		bw_params->wm_table.entries[i].valid = true;
496 	}
497 
498 	if (bw_params->vram_type == LpDdr4MemType) {
499 		/*
500 		 * WM set D will be re-purposed for memory retraining
501 		 */
502 		bw_params->wm_table.entries[WM_D].pstate_latency_us = LPDDR_MEM_RETRAIN_LATENCY;
503 		bw_params->wm_table.entries[WM_D].wm_inst = WM_D;
504 		bw_params->wm_table.entries[WM_D].wm_type = WM_TYPE_RETRAINING;
505 		bw_params->wm_table.entries[WM_D].valid = true;
506 	}
507 
508 }
509 
rn_clk_mgr_construct(struct dc_context * ctx,struct clk_mgr_internal * clk_mgr,struct pp_smu_funcs * pp_smu,struct dccg * dccg)510 void rn_clk_mgr_construct(
511 		struct dc_context *ctx,
512 		struct clk_mgr_internal *clk_mgr,
513 		struct pp_smu_funcs *pp_smu,
514 		struct dccg *dccg)
515 {
516 	struct dc_debug_options *debug = &ctx->dc->debug;
517 	struct dpm_clocks clock_table = { 0 };
518 	struct clk_state_registers_and_bypass s = { 0 };
519 
520 	clk_mgr->base.ctx = ctx;
521 	clk_mgr->base.funcs = &dcn21_funcs;
522 
523 	clk_mgr->pp_smu = pp_smu;
524 
525 	clk_mgr->dccg = dccg;
526 	clk_mgr->dfs_bypass_disp_clk = 0;
527 
528 	clk_mgr->dprefclk_ss_percentage = 0;
529 	clk_mgr->dprefclk_ss_divider = 1000;
530 	clk_mgr->ss_on_dprefclk = false;
531 	clk_mgr->dfs_ref_freq_khz = 48000;
532 
533 	clk_mgr->smu_ver = rn_vbios_smu_get_smu_version(clk_mgr);
534 
535 	if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
536 		dcn21_funcs.update_clocks = dcn2_update_clocks_fpga;
537 		clk_mgr->dentist_vco_freq_khz = 3600000;
538 		clk_mgr->base.dprefclk_khz = 600000;
539 	} else {
540 		struct clk_log_info log_info = {0};
541 
542 		/* TODO: Check we get what we expect during bringup */
543 		clk_mgr->dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr);
544 
545 		/* in case we don't get a value from the register, use default */
546 		if (clk_mgr->dentist_vco_freq_khz == 0)
547 			clk_mgr->dentist_vco_freq_khz = 3600000;
548 
549 		rn_dump_clk_registers(&s, &clk_mgr->base, &log_info);
550 		clk_mgr->base.dprefclk_khz = s.dprefclk;
551 
552 		if (clk_mgr->base.dprefclk_khz != 600000) {
553 			clk_mgr->base.dprefclk_khz = 600000;
554 			ASSERT(1); //TODO: Renoir follow up.
555 		}
556 
557 		/* in case we don't get a value from the register, use default */
558 		if (clk_mgr->base.dprefclk_khz == 0)
559 			clk_mgr->base.dprefclk_khz = 600000;
560 	}
561 
562 	dce_clock_read_ss_info(clk_mgr);
563 
564 	clk_mgr->base.bw_params = &rn_bw_params;
565 
566 	if (pp_smu) {
567 		pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
568 		clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id);
569 	}
570 
571 	/*
572 	 * Notify SMU which set of WM should be selected for different ranges of fclk
573 	 * On Renoir there is a maximumum of 4 DF pstates supported, could be less
574 	 * depending on DDR speed and fused maximum fclk.
575 	 */
576 	if (!debug->disable_pplib_wm_range) {
577 		struct pp_smu_wm_range_sets ranges = {0};
578 
579 		build_watermark_ranges(clk_mgr->base.bw_params, &ranges);
580 
581 		/* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
582 		if (pp_smu && pp_smu->rn_funcs.set_wm_ranges)
583 			pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges);
584 	}
585 
586 	/* enable powerfeatures when displaycount goes to 0 */
587 	if (!debug->disable_48mhz_pwrdwn)
588 		rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr);
589 }
590 
591