• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/fb.h>
26 #include <asm/div64.h>
27 #include "linux/delay.h"
28 #include "pp_acpi.h"
29 #include "pp_debug.h"
30 #include "ppatomctrl.h"
31 #include "atombios.h"
32 #include "pptable_v1_0.h"
33 #include "pppcielanes.h"
34 #include "amd_pcie_helpers.h"
35 #include "hardwaremanager.h"
36 #include "process_pptables_v1_0.h"
37 #include "cgs_common.h"
38 
39 #include "smu7_common.h"
40 
41 #include "hwmgr.h"
42 #include "smu7_hwmgr.h"
43 #include "smu7_powertune.h"
44 #include "smu7_dyn_defaults.h"
45 #include "smu7_thermal.h"
46 #include "smu7_clockpowergating.h"
47 #include "processpptables.h"
48 
49 #define MC_CG_ARB_FREQ_F0           0x0a
50 #define MC_CG_ARB_FREQ_F1           0x0b
51 #define MC_CG_ARB_FREQ_F2           0x0c
52 #define MC_CG_ARB_FREQ_F3           0x0d
53 
54 #define MC_CG_SEQ_DRAMCONF_S0       0x05
55 #define MC_CG_SEQ_DRAMCONF_S1       0x06
56 #define MC_CG_SEQ_YCLK_SUSPEND      0x04
57 #define MC_CG_SEQ_YCLK_RESUME       0x0a
58 
59 #define SMC_CG_IND_START            0xc0030000
60 #define SMC_CG_IND_END              0xc0040000
61 
62 #define VOLTAGE_SCALE               4
63 #define VOLTAGE_VID_OFFSET_SCALE1   625
64 #define VOLTAGE_VID_OFFSET_SCALE2   100
65 
66 #define MEM_FREQ_LOW_LATENCY        25000
67 #define MEM_FREQ_HIGH_LATENCY       80000
68 
69 #define MEM_LATENCY_HIGH            45
70 #define MEM_LATENCY_LOW             35
71 #define MEM_LATENCY_ERR             0xFFFF
72 
73 #define MC_SEQ_MISC0_GDDR5_SHIFT 28
74 #define MC_SEQ_MISC0_GDDR5_MASK  0xf0000000
75 #define MC_SEQ_MISC0_GDDR5_VALUE 5
76 
77 #define PCIE_BUS_CLK                10000
78 #define TCLK                        (PCIE_BUS_CLK / 10)
79 
80 
81 /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
82 enum DPM_EVENT_SRC {
83 	DPM_EVENT_SRC_ANALOG = 0,
84 	DPM_EVENT_SRC_EXTERNAL = 1,
85 	DPM_EVENT_SRC_DIGITAL = 2,
86 	DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
87 	DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
88 };
89 
90 static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
91 
cast_phw_smu7_power_state(struct pp_hw_power_state * hw_ps)92 struct smu7_power_state *cast_phw_smu7_power_state(
93 				  struct pp_hw_power_state *hw_ps)
94 {
95 	PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
96 				"Invalid Powerstate Type!",
97 				 return NULL);
98 
99 	return (struct smu7_power_state *)hw_ps;
100 }
101 
cast_const_phw_smu7_power_state(const struct pp_hw_power_state * hw_ps)102 const struct smu7_power_state *cast_const_phw_smu7_power_state(
103 				 const struct pp_hw_power_state *hw_ps)
104 {
105 	PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
106 				"Invalid Powerstate Type!",
107 				 return NULL);
108 
109 	return (const struct smu7_power_state *)hw_ps;
110 }
111 
112 /**
113  * Find the MC microcode version and store it in the HwMgr struct
114  *
115  * @param    hwmgr  the address of the powerplay hardware manager.
116  * @return   always 0
117  */
smu7_get_mc_microcode_version(struct pp_hwmgr * hwmgr)118 int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
119 {
120 	cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
121 
122 	hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
123 
124 	return 0;
125 }
126 
smu7_get_current_pcie_speed(struct pp_hwmgr * hwmgr)127 uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
128 {
129 	uint32_t speedCntl = 0;
130 
131 	/* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
132 	speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
133 			ixPCIE_LC_SPEED_CNTL);
134 	return((uint16_t)PHM_GET_FIELD(speedCntl,
135 			PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
136 }
137 
smu7_get_current_pcie_lane_number(struct pp_hwmgr * hwmgr)138 int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
139 {
140 	uint32_t link_width;
141 
142 	/* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
143 	link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
144 			PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
145 
146 	PP_ASSERT_WITH_CODE((7 >= link_width),
147 			"Invalid PCIe lane width!", return 0);
148 
149 	return decode_pcie_lane_width(link_width);
150 }
151 
152 /**
153 * Enable voltage control
154 *
155 * @param    pHwMgr  the address of the powerplay hardware manager.
156 * @return   always PP_Result_OK
157 */
smu7_enable_smc_voltage_controller(struct pp_hwmgr * hwmgr)158 int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
159 {
160 	if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
161 		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable);
162 
163 	return 0;
164 }
165 
166 /**
167 * Checks if we want to support voltage control
168 *
169 * @param    hwmgr  the address of the powerplay hardware manager.
170 */
smu7_voltage_control(const struct pp_hwmgr * hwmgr)171 static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
172 {
173 	const struct smu7_hwmgr *data =
174 			(const struct smu7_hwmgr *)(hwmgr->backend);
175 
176 	return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
177 }
178 
179 /**
180 * Enable voltage control
181 *
182 * @param    hwmgr  the address of the powerplay hardware manager.
183 * @return   always 0
184 */
smu7_enable_voltage_control(struct pp_hwmgr * hwmgr)185 static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
186 {
187 	/* enable voltage control */
188 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
189 			GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
190 
191 	return 0;
192 }
193 
phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table * voltage_table,struct phm_clock_voltage_dependency_table * voltage_dependency_table)194 static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
195 		struct phm_clock_voltage_dependency_table *voltage_dependency_table
196 		)
197 {
198 	uint32_t i;
199 
200 	PP_ASSERT_WITH_CODE((NULL != voltage_table),
201 			"Voltage Dependency Table empty.", return -EINVAL;);
202 
203 	voltage_table->mask_low = 0;
204 	voltage_table->phase_delay = 0;
205 	voltage_table->count = voltage_dependency_table->count;
206 
207 	for (i = 0; i < voltage_dependency_table->count; i++) {
208 		voltage_table->entries[i].value =
209 			voltage_dependency_table->entries[i].v;
210 		voltage_table->entries[i].smio_low = 0;
211 	}
212 
213 	return 0;
214 }
215 
216 
217 /**
218 * Create Voltage Tables.
219 *
220 * @param    hwmgr  the address of the powerplay hardware manager.
221 * @return   always 0
222 */
smu7_construct_voltage_tables(struct pp_hwmgr * hwmgr)223 static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
224 {
225 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
226 	struct phm_ppt_v1_information *table_info =
227 			(struct phm_ppt_v1_information *)hwmgr->pptable;
228 	int result = 0;
229 	uint32_t tmp;
230 
231 	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
232 		result = atomctrl_get_voltage_table_v3(hwmgr,
233 				VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
234 				&(data->mvdd_voltage_table));
235 		PP_ASSERT_WITH_CODE((0 == result),
236 				"Failed to retrieve MVDD table.",
237 				return result);
238 	} else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
239 		if (hwmgr->pp_table_version == PP_TABLE_V1)
240 			result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
241 					table_info->vdd_dep_on_mclk);
242 		else if (hwmgr->pp_table_version == PP_TABLE_V0)
243 			result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
244 					hwmgr->dyn_state.mvdd_dependency_on_mclk);
245 
246 		PP_ASSERT_WITH_CODE((0 == result),
247 				"Failed to retrieve SVI2 MVDD table from dependancy table.",
248 				return result;);
249 	}
250 
251 	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
252 		result = atomctrl_get_voltage_table_v3(hwmgr,
253 				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
254 				&(data->vddci_voltage_table));
255 		PP_ASSERT_WITH_CODE((0 == result),
256 				"Failed to retrieve VDDCI table.",
257 				return result);
258 	} else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
259 		if (hwmgr->pp_table_version == PP_TABLE_V1)
260 			result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
261 					table_info->vdd_dep_on_mclk);
262 		else if (hwmgr->pp_table_version == PP_TABLE_V0)
263 			result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
264 					hwmgr->dyn_state.vddci_dependency_on_mclk);
265 		PP_ASSERT_WITH_CODE((0 == result),
266 				"Failed to retrieve SVI2 VDDCI table from dependancy table.",
267 				return result);
268 	}
269 
270 	if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
271 		/* VDDGFX has only SVI2 voltage control */
272 		result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
273 					table_info->vddgfx_lookup_table);
274 		PP_ASSERT_WITH_CODE((0 == result),
275 			"Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
276 	}
277 
278 
279 	if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
280 		result = atomctrl_get_voltage_table_v3(hwmgr,
281 					VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
282 					&data->vddc_voltage_table);
283 		PP_ASSERT_WITH_CODE((0 == result),
284 			"Failed to retrieve VDDC table.", return result;);
285 	} else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
286 
287 		if (hwmgr->pp_table_version == PP_TABLE_V0)
288 			result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
289 					hwmgr->dyn_state.vddc_dependency_on_mclk);
290 		else if (hwmgr->pp_table_version == PP_TABLE_V1)
291 			result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
292 				table_info->vddc_lookup_table);
293 
294 		PP_ASSERT_WITH_CODE((0 == result),
295 			"Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
296 	}
297 
298 	tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDC);
299 	PP_ASSERT_WITH_CODE(
300 			(data->vddc_voltage_table.count <= tmp),
301 		"Too many voltage values for VDDC. Trimming to fit state table.",
302 			phm_trim_voltage_table_to_fit_state_table(tmp,
303 						&(data->vddc_voltage_table)));
304 
305 	tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
306 	PP_ASSERT_WITH_CODE(
307 			(data->vddgfx_voltage_table.count <= tmp),
308 		"Too many voltage values for VDDC. Trimming to fit state table.",
309 			phm_trim_voltage_table_to_fit_state_table(tmp,
310 						&(data->vddgfx_voltage_table)));
311 
312 	tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDCI);
313 	PP_ASSERT_WITH_CODE(
314 			(data->vddci_voltage_table.count <= tmp),
315 		"Too many voltage values for VDDCI. Trimming to fit state table.",
316 			phm_trim_voltage_table_to_fit_state_table(tmp,
317 					&(data->vddci_voltage_table)));
318 
319 	tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_MVDD);
320 	PP_ASSERT_WITH_CODE(
321 			(data->mvdd_voltage_table.count <= tmp),
322 		"Too many voltage values for MVDD. Trimming to fit state table.",
323 			phm_trim_voltage_table_to_fit_state_table(tmp,
324 						&(data->mvdd_voltage_table)));
325 
326 	return 0;
327 }
328 
329 /**
330 * Programs static screed detection parameters
331 *
332 * @param    hwmgr  the address of the powerplay hardware manager.
333 * @return   always 0
334 */
smu7_program_static_screen_threshold_parameters(struct pp_hwmgr * hwmgr)335 static int smu7_program_static_screen_threshold_parameters(
336 							struct pp_hwmgr *hwmgr)
337 {
338 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
339 
340 	/* Set static screen threshold unit */
341 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
342 			CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
343 			data->static_screen_threshold_unit);
344 	/* Set static screen threshold */
345 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
346 			CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
347 			data->static_screen_threshold);
348 
349 	return 0;
350 }
351 
352 /**
353 * Setup display gap for glitch free memory clock switching.
354 *
355 * @param    hwmgr  the address of the powerplay hardware manager.
356 * @return   always  0
357 */
smu7_enable_display_gap(struct pp_hwmgr * hwmgr)358 static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
359 {
360 	uint32_t display_gap =
361 			cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
362 					ixCG_DISPLAY_GAP_CNTL);
363 
364 	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
365 			DISP_GAP, DISPLAY_GAP_IGNORE);
366 
367 	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
368 			DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
369 
370 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
371 			ixCG_DISPLAY_GAP_CNTL, display_gap);
372 
373 	return 0;
374 }
375 
376 /**
377 * Programs activity state transition voting clients
378 *
379 * @param    hwmgr  the address of the powerplay hardware manager.
380 * @return   always  0
381 */
smu7_program_voting_clients(struct pp_hwmgr * hwmgr)382 static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
383 {
384 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
385 
386 	/* Clear reset for voting clients before enabling DPM */
387 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
388 			SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
389 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
390 			SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
391 
392 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
393 			ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
394 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
395 			ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
396 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
397 			ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
398 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
399 			ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
400 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
401 			ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
402 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
403 			ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
404 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
405 			ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
406 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
407 			ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
408 
409 	return 0;
410 }
411 
smu7_clear_voting_clients(struct pp_hwmgr * hwmgr)412 static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
413 {
414 	/* Reset voting clients before disabling DPM */
415 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
416 			SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
417 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
418 			SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
419 
420 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
421 			ixCG_FREQ_TRAN_VOTING_0, 0);
422 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
423 			ixCG_FREQ_TRAN_VOTING_1, 0);
424 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
425 			ixCG_FREQ_TRAN_VOTING_2, 0);
426 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
427 			ixCG_FREQ_TRAN_VOTING_3, 0);
428 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
429 			ixCG_FREQ_TRAN_VOTING_4, 0);
430 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
431 			ixCG_FREQ_TRAN_VOTING_5, 0);
432 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
433 			ixCG_FREQ_TRAN_VOTING_6, 0);
434 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
435 			ixCG_FREQ_TRAN_VOTING_7, 0);
436 
437 	return 0;
438 }
439 
440 /* Copy one arb setting to another and then switch the active set.
441  * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
442  */
smu7_copy_and_switch_arb_sets(struct pp_hwmgr * hwmgr,uint32_t arb_src,uint32_t arb_dest)443 static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
444 		uint32_t arb_src, uint32_t arb_dest)
445 {
446 	uint32_t mc_arb_dram_timing;
447 	uint32_t mc_arb_dram_timing2;
448 	uint32_t burst_time;
449 	uint32_t mc_cg_config;
450 
451 	switch (arb_src) {
452 	case MC_CG_ARB_FREQ_F0:
453 		mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
454 		mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
455 		burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
456 		break;
457 	case MC_CG_ARB_FREQ_F1:
458 		mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
459 		mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
460 		burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
461 		break;
462 	default:
463 		return -EINVAL;
464 	}
465 
466 	switch (arb_dest) {
467 	case MC_CG_ARB_FREQ_F0:
468 		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
469 		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
470 		PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
471 		break;
472 	case MC_CG_ARB_FREQ_F1:
473 		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
474 		cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
475 		PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
476 		break;
477 	default:
478 		return -EINVAL;
479 	}
480 
481 	mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
482 	mc_cg_config |= 0x0000000F;
483 	cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
484 	PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
485 
486 	return 0;
487 }
488 
smu7_reset_to_default(struct pp_hwmgr * hwmgr)489 static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
490 {
491 	return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
492 }
493 
494 /**
495 * Initial switch from ARB F0->F1
496 *
497 * @param    hwmgr  the address of the powerplay hardware manager.
498 * @return   always 0
499 * This function is to be called from the SetPowerState table.
500 */
smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr * hwmgr)501 static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
502 {
503 	return smu7_copy_and_switch_arb_sets(hwmgr,
504 			MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
505 }
506 
smu7_force_switch_to_arbf0(struct pp_hwmgr * hwmgr)507 static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
508 {
509 	uint32_t tmp;
510 
511 	tmp = (cgs_read_ind_register(hwmgr->device,
512 			CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
513 			0x0000ff00) >> 8;
514 
515 	if (tmp == MC_CG_ARB_FREQ_F0)
516 		return 0;
517 
518 	return smu7_copy_and_switch_arb_sets(hwmgr,
519 			tmp, MC_CG_ARB_FREQ_F0);
520 }
521 
smu7_setup_default_pcie_table(struct pp_hwmgr * hwmgr)522 static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
523 {
524 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
525 
526 	struct phm_ppt_v1_information *table_info =
527 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
528 	struct phm_ppt_v1_pcie_table *pcie_table = NULL;
529 
530 	uint32_t i, max_entry;
531 	uint32_t tmp;
532 
533 	PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
534 			data->use_pcie_power_saving_levels), "No pcie performance levels!",
535 			return -EINVAL);
536 
537 	if (table_info != NULL)
538 		pcie_table = table_info->pcie_table;
539 
540 	if (data->use_pcie_performance_levels &&
541 			!data->use_pcie_power_saving_levels) {
542 		data->pcie_gen_power_saving = data->pcie_gen_performance;
543 		data->pcie_lane_power_saving = data->pcie_lane_performance;
544 	} else if (!data->use_pcie_performance_levels &&
545 			data->use_pcie_power_saving_levels) {
546 		data->pcie_gen_performance = data->pcie_gen_power_saving;
547 		data->pcie_lane_performance = data->pcie_lane_power_saving;
548 	}
549 	tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_LINK);
550 	phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
551 					tmp,
552 					MAX_REGULAR_DPM_NUMBER);
553 
554 	if (pcie_table != NULL) {
555 		/* max_entry is used to make sure we reserve one PCIE level
556 		 * for boot level (fix for A+A PSPP issue).
557 		 * If PCIE table from PPTable have ULV entry + 8 entries,
558 		 * then ignore the last entry.*/
559 		max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
560 		for (i = 1; i < max_entry; i++) {
561 			phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
562 					get_pcie_gen_support(data->pcie_gen_cap,
563 							pcie_table->entries[i].gen_speed),
564 					get_pcie_lane_support(data->pcie_lane_cap,
565 							pcie_table->entries[i].lane_width));
566 		}
567 		data->dpm_table.pcie_speed_table.count = max_entry - 1;
568 		smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
569 	} else {
570 		/* Hardcode Pcie Table */
571 		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
572 				get_pcie_gen_support(data->pcie_gen_cap,
573 						PP_Min_PCIEGen),
574 				get_pcie_lane_support(data->pcie_lane_cap,
575 						PP_Max_PCIELane));
576 		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
577 				get_pcie_gen_support(data->pcie_gen_cap,
578 						PP_Min_PCIEGen),
579 				get_pcie_lane_support(data->pcie_lane_cap,
580 						PP_Max_PCIELane));
581 		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
582 				get_pcie_gen_support(data->pcie_gen_cap,
583 						PP_Max_PCIEGen),
584 				get_pcie_lane_support(data->pcie_lane_cap,
585 						PP_Max_PCIELane));
586 		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
587 				get_pcie_gen_support(data->pcie_gen_cap,
588 						PP_Max_PCIEGen),
589 				get_pcie_lane_support(data->pcie_lane_cap,
590 						PP_Max_PCIELane));
591 		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
592 				get_pcie_gen_support(data->pcie_gen_cap,
593 						PP_Max_PCIEGen),
594 				get_pcie_lane_support(data->pcie_lane_cap,
595 						PP_Max_PCIELane));
596 		phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
597 				get_pcie_gen_support(data->pcie_gen_cap,
598 						PP_Max_PCIEGen),
599 				get_pcie_lane_support(data->pcie_lane_cap,
600 						PP_Max_PCIELane));
601 
602 		data->dpm_table.pcie_speed_table.count = 6;
603 	}
604 	/* Populate last level for boot PCIE level, but do not increment count. */
605 	phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
606 			data->dpm_table.pcie_speed_table.count,
607 			get_pcie_gen_support(data->pcie_gen_cap,
608 					PP_Min_PCIEGen),
609 			get_pcie_lane_support(data->pcie_lane_cap,
610 					PP_Max_PCIELane));
611 
612 	return 0;
613 }
614 
smu7_reset_dpm_tables(struct pp_hwmgr * hwmgr)615 static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
616 {
617 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
618 
619 	memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
620 
621 	phm_reset_single_dpm_table(
622 			&data->dpm_table.sclk_table,
623 				smum_get_mac_definition(hwmgr->smumgr,
624 					SMU_MAX_LEVELS_GRAPHICS),
625 					MAX_REGULAR_DPM_NUMBER);
626 	phm_reset_single_dpm_table(
627 			&data->dpm_table.mclk_table,
628 			smum_get_mac_definition(hwmgr->smumgr,
629 				SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
630 
631 	phm_reset_single_dpm_table(
632 			&data->dpm_table.vddc_table,
633 				smum_get_mac_definition(hwmgr->smumgr,
634 					SMU_MAX_LEVELS_VDDC),
635 					MAX_REGULAR_DPM_NUMBER);
636 	phm_reset_single_dpm_table(
637 			&data->dpm_table.vddci_table,
638 			smum_get_mac_definition(hwmgr->smumgr,
639 				SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
640 
641 	phm_reset_single_dpm_table(
642 			&data->dpm_table.mvdd_table,
643 				smum_get_mac_definition(hwmgr->smumgr,
644 					SMU_MAX_LEVELS_MVDD),
645 					MAX_REGULAR_DPM_NUMBER);
646 	return 0;
647 }
648 /*
649  * This function is to initialize all DPM state tables
650  * for SMU7 based on the dependency table.
651  * Dynamic state patching function will then trim these
652  * state tables to the allowed range based
653  * on the power policy or external client requests,
654  * such as UVD request, etc.
655  */
656 
smu7_setup_dpm_tables_v0(struct pp_hwmgr * hwmgr)657 static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
658 {
659 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
660 	struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
661 		hwmgr->dyn_state.vddc_dependency_on_sclk;
662 	struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
663 		hwmgr->dyn_state.vddc_dependency_on_mclk;
664 	struct phm_cac_leakage_table *std_voltage_table =
665 		hwmgr->dyn_state.cac_leakage_table;
666 	uint32_t i;
667 
668 	PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
669 		"SCLK dependency table is missing. This table is mandatory", return -EINVAL);
670 	PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
671 		"SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
672 
673 	PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
674 		"MCLK dependency table is missing. This table is mandatory", return -EINVAL);
675 	PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
676 		"VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
677 
678 
679 	/* Initialize Sclk DPM table based on allow Sclk values*/
680 	data->dpm_table.sclk_table.count = 0;
681 
682 	for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
683 		if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
684 				allowed_vdd_sclk_table->entries[i].clk) {
685 			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
686 				allowed_vdd_sclk_table->entries[i].clk;
687 			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */
688 			data->dpm_table.sclk_table.count++;
689 		}
690 	}
691 
692 	PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
693 		"MCLK dependency table is missing. This table is mandatory", return -EINVAL);
694 	/* Initialize Mclk DPM table based on allow Mclk values */
695 	data->dpm_table.mclk_table.count = 0;
696 	for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
697 		if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
698 			allowed_vdd_mclk_table->entries[i].clk) {
699 			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
700 				allowed_vdd_mclk_table->entries[i].clk;
701 			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */
702 			data->dpm_table.mclk_table.count++;
703 		}
704 	}
705 
706 	/* Initialize Vddc DPM table based on allow Vddc values.  And populate corresponding std values. */
707 	for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
708 		data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
709 		data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
710 		/* param1 is for corresponding std voltage */
711 		data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
712 	}
713 
714 	data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
715 	allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
716 
717 	if (NULL != allowed_vdd_mclk_table) {
718 		/* Initialize Vddci DPM table based on allow Mclk values */
719 		for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
720 			data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
721 			data->dpm_table.vddci_table.dpm_levels[i].enabled = 1;
722 		}
723 		data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
724 	}
725 
726 	allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
727 
728 	if (NULL != allowed_vdd_mclk_table) {
729 		/*
730 		 * Initialize MVDD DPM table based on allow Mclk
731 		 * values
732 		 */
733 		for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
734 			data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
735 			data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
736 		}
737 		data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
738 	}
739 
740 	return 0;
741 }
742 
smu7_setup_dpm_tables_v1(struct pp_hwmgr * hwmgr)743 static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
744 {
745 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
746 	struct phm_ppt_v1_information *table_info =
747 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
748 	uint32_t i;
749 
750 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
751 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
752 
753 	if (table_info == NULL)
754 		return -EINVAL;
755 
756 	dep_sclk_table = table_info->vdd_dep_on_sclk;
757 	dep_mclk_table = table_info->vdd_dep_on_mclk;
758 
759 	PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
760 			"SCLK dependency table is missing.",
761 			return -EINVAL);
762 	PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
763 			"SCLK dependency table count is 0.",
764 			return -EINVAL);
765 
766 	PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
767 			"MCLK dependency table is missing.",
768 			return -EINVAL);
769 	PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
770 			"MCLK dependency table count is 0",
771 			return -EINVAL);
772 
773 	/* Initialize Sclk DPM table based on allow Sclk values */
774 	data->dpm_table.sclk_table.count = 0;
775 	for (i = 0; i < dep_sclk_table->count; i++) {
776 		if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
777 						dep_sclk_table->entries[i].clk) {
778 
779 			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
780 					dep_sclk_table->entries[i].clk;
781 
782 			data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
783 					(i == 0) ? true : false;
784 			data->dpm_table.sclk_table.count++;
785 		}
786 	}
787 
788 	/* Initialize Mclk DPM table based on allow Mclk values */
789 	data->dpm_table.mclk_table.count = 0;
790 	for (i = 0; i < dep_mclk_table->count; i++) {
791 		if (i == 0 || data->dpm_table.mclk_table.dpm_levels
792 				[data->dpm_table.mclk_table.count - 1].value !=
793 						dep_mclk_table->entries[i].clk) {
794 			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
795 							dep_mclk_table->entries[i].clk;
796 			data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
797 							(i == 0) ? true : false;
798 			data->dpm_table.mclk_table.count++;
799 		}
800 	}
801 
802 	return 0;
803 }
804 
smu7_setup_default_dpm_tables(struct pp_hwmgr * hwmgr)805 int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
806 {
807 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
808 
809 	smu7_reset_dpm_tables(hwmgr);
810 
811 	if (hwmgr->pp_table_version == PP_TABLE_V1)
812 		smu7_setup_dpm_tables_v1(hwmgr);
813 	else if (hwmgr->pp_table_version == PP_TABLE_V0)
814 		smu7_setup_dpm_tables_v0(hwmgr);
815 
816 	smu7_setup_default_pcie_table(hwmgr);
817 
818 	/* save a copy of the default DPM table */
819 	memcpy(&(data->golden_dpm_table), &(data->dpm_table),
820 			sizeof(struct smu7_dpm_table));
821 	return 0;
822 }
823 
smu7_get_xclk(struct pp_hwmgr * hwmgr)824 uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr)
825 {
826 	uint32_t reference_clock, tmp;
827 	struct cgs_display_info info = {0};
828 	struct cgs_mode_info mode_info = {0};
829 
830 	info.mode_info = &mode_info;
831 
832 	tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
833 
834 	if (tmp)
835 		return TCLK;
836 
837 	cgs_get_active_displays_info(hwmgr->device, &info);
838 	reference_clock = mode_info.ref_clock;
839 
840 	tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
841 
842 	if (0 != tmp)
843 		return reference_clock / 4;
844 
845 	return reference_clock;
846 }
847 
smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr * hwmgr)848 static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
849 {
850 
851 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
852 			PHM_PlatformCaps_RegulatorHot))
853 		return smum_send_msg_to_smc(hwmgr->smumgr,
854 				PPSMC_MSG_EnableVRHotGPIOInterrupt);
855 
856 	return 0;
857 }
858 
smu7_enable_sclk_control(struct pp_hwmgr * hwmgr)859 static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
860 {
861 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
862 			SCLK_PWRMGT_OFF, 0);
863 	return 0;
864 }
865 
smu7_enable_ulv(struct pp_hwmgr * hwmgr)866 static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
867 {
868 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
869 
870 	if (data->ulv_supported)
871 		return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
872 
873 	return 0;
874 }
875 
smu7_disable_ulv(struct pp_hwmgr * hwmgr)876 static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
877 {
878 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
879 
880 	if (data->ulv_supported)
881 		return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
882 
883 	return 0;
884 }
885 
smu7_enable_deep_sleep_master_switch(struct pp_hwmgr * hwmgr)886 static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
887 {
888 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
889 			PHM_PlatformCaps_SclkDeepSleep)) {
890 		if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
891 			PP_ASSERT_WITH_CODE(false,
892 					"Attempt to enable Master Deep Sleep switch failed!",
893 					return -EINVAL);
894 	} else {
895 		if (smum_send_msg_to_smc(hwmgr->smumgr,
896 				PPSMC_MSG_MASTER_DeepSleep_OFF)) {
897 			PP_ASSERT_WITH_CODE(false,
898 					"Attempt to disable Master Deep Sleep switch failed!",
899 					return -EINVAL);
900 		}
901 	}
902 
903 	return 0;
904 }
905 
smu7_disable_deep_sleep_master_switch(struct pp_hwmgr * hwmgr)906 static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
907 {
908 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
909 			PHM_PlatformCaps_SclkDeepSleep)) {
910 		if (smum_send_msg_to_smc(hwmgr->smumgr,
911 				PPSMC_MSG_MASTER_DeepSleep_OFF)) {
912 			PP_ASSERT_WITH_CODE(false,
913 					"Attempt to disable Master Deep Sleep switch failed!",
914 					return -EINVAL);
915 		}
916 	}
917 
918 	return 0;
919 }
920 
smu7_disable_handshake_uvd(struct pp_hwmgr * hwmgr)921 static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
922 {
923 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
924 	uint32_t soft_register_value = 0;
925 	uint32_t handshake_disables_offset = data->soft_regs_start
926 				+ smum_get_offsetof(hwmgr->smumgr,
927 					SMU_SoftRegisters, HandshakeDisables);
928 
929 	soft_register_value = cgs_read_ind_register(hwmgr->device,
930 				CGS_IND_REG__SMC, handshake_disables_offset);
931 	soft_register_value |= smum_get_mac_definition(hwmgr->smumgr,
932 					SMU_UVD_MCLK_HANDSHAKE_DISABLE);
933 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
934 			handshake_disables_offset, soft_register_value);
935 	return 0;
936 }
937 
smu7_enable_sclk_mclk_dpm(struct pp_hwmgr * hwmgr)938 static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
939 {
940 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
941 
942 	/* enable SCLK dpm */
943 	if (!data->sclk_dpm_key_disabled)
944 		PP_ASSERT_WITH_CODE(
945 		(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
946 		"Failed to enable SCLK DPM during DPM Start Function!",
947 		return -EINVAL);
948 
949 	/* enable MCLK dpm */
950 	if (0 == data->mclk_dpm_key_disabled) {
951 		if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
952 			smu7_disable_handshake_uvd(hwmgr);
953 		PP_ASSERT_WITH_CODE(
954 				(0 == smum_send_msg_to_smc(hwmgr->smumgr,
955 						PPSMC_MSG_MCLKDPM_Enable)),
956 				"Failed to enable MCLK DPM during DPM Start Function!",
957 				return -EINVAL);
958 
959 		PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
960 
961 		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
962 		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
963 		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
964 		udelay(10);
965 		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
966 		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
967 		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
968 	}
969 
970 	return 0;
971 }
972 
smu7_start_dpm(struct pp_hwmgr * hwmgr)973 static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
974 {
975 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
976 
977 	/*enable general power management */
978 
979 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
980 			GLOBAL_PWRMGT_EN, 1);
981 
982 	/* enable sclk deep sleep */
983 
984 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
985 			DYNAMIC_PM_EN, 1);
986 
987 	/* prepare for PCIE DPM */
988 
989 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
990 			data->soft_regs_start +
991 			smum_get_offsetof(hwmgr->smumgr, SMU_SoftRegisters,
992 						VoltageChangeTimeout), 0x1000);
993 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
994 			SWRST_COMMAND_1, RESETLC, 0x0);
995 
996 	PP_ASSERT_WITH_CODE(
997 			(0 == smum_send_msg_to_smc(hwmgr->smumgr,
998 					PPSMC_MSG_Voltage_Cntl_Enable)),
999 			"Failed to enable voltage DPM during DPM Start Function!",
1000 			return -EINVAL);
1001 
1002 
1003 	if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
1004 		printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
1005 		return -EINVAL;
1006 	}
1007 
1008 	/* enable PCIE dpm */
1009 	if (0 == data->pcie_dpm_key_disabled) {
1010 		PP_ASSERT_WITH_CODE(
1011 				(0 == smum_send_msg_to_smc(hwmgr->smumgr,
1012 						PPSMC_MSG_PCIeDPM_Enable)),
1013 				"Failed to enable pcie DPM during DPM Start Function!",
1014 				return -EINVAL);
1015 	}
1016 
1017 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1018 				PHM_PlatformCaps_Falcon_QuickTransition)) {
1019 		PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr,
1020 				PPSMC_MSG_EnableACDCGPIOInterrupt)),
1021 				"Failed to enable AC DC GPIO Interrupt!",
1022 				);
1023 	}
1024 
1025 	return 0;
1026 }
1027 
smu7_disable_sclk_mclk_dpm(struct pp_hwmgr * hwmgr)1028 static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1029 {
1030 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1031 
1032 	/* disable SCLK dpm */
1033 	if (!data->sclk_dpm_key_disabled) {
1034 		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1035 				"Trying to disable SCLK DPM when DPM is disabled",
1036 				return 0);
1037 		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Disable);
1038 	}
1039 
1040 	/* disable MCLK dpm */
1041 	if (!data->mclk_dpm_key_disabled) {
1042 		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1043 				"Trying to disable MCLK DPM when DPM is disabled",
1044 				return 0);
1045 		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_Disable);
1046 	}
1047 
1048 	return 0;
1049 }
1050 
smu7_stop_dpm(struct pp_hwmgr * hwmgr)1051 static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
1052 {
1053 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1054 
1055 	/* disable general power management */
1056 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1057 			GLOBAL_PWRMGT_EN, 0);
1058 	/* disable sclk deep sleep */
1059 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1060 			DYNAMIC_PM_EN, 0);
1061 
1062 	/* disable PCIE dpm */
1063 	if (!data->pcie_dpm_key_disabled) {
1064 		PP_ASSERT_WITH_CODE(
1065 				(smum_send_msg_to_smc(hwmgr->smumgr,
1066 						PPSMC_MSG_PCIeDPM_Disable) == 0),
1067 				"Failed to disable pcie DPM during DPM Stop Function!",
1068 				return -EINVAL);
1069 	}
1070 
1071 	smu7_disable_sclk_mclk_dpm(hwmgr);
1072 
1073 	PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1074 			"Trying to disable voltage DPM when DPM is disabled",
1075 			return 0);
1076 
1077 	smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Disable);
1078 
1079 	return 0;
1080 }
1081 
smu7_set_dpm_event_sources(struct pp_hwmgr * hwmgr,uint32_t sources)1082 static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
1083 {
1084 	bool protection;
1085 	enum DPM_EVENT_SRC src;
1086 
1087 	switch (sources) {
1088 	default:
1089 		printk(KERN_ERR "Unknown throttling event sources.");
1090 		/* fall through */
1091 	case 0:
1092 		protection = false;
1093 		/* src is unused */
1094 		break;
1095 	case (1 << PHM_AutoThrottleSource_Thermal):
1096 		protection = true;
1097 		src = DPM_EVENT_SRC_DIGITAL;
1098 		break;
1099 	case (1 << PHM_AutoThrottleSource_External):
1100 		protection = true;
1101 		src = DPM_EVENT_SRC_EXTERNAL;
1102 		break;
1103 	case (1 << PHM_AutoThrottleSource_External) |
1104 			(1 << PHM_AutoThrottleSource_Thermal):
1105 		protection = true;
1106 		src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
1107 		break;
1108 	}
1109 	/* Order matters - don't enable thermal protection for the wrong source. */
1110 	if (protection) {
1111 		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
1112 				DPM_EVENT_SRC, src);
1113 		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1114 				THERMAL_PROTECTION_DIS,
1115 				!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1116 						PHM_PlatformCaps_ThermalController));
1117 	} else
1118 		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1119 				THERMAL_PROTECTION_DIS, 1);
1120 }
1121 
smu7_enable_auto_throttle_source(struct pp_hwmgr * hwmgr,PHM_AutoThrottleSource source)1122 static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1123 		PHM_AutoThrottleSource source)
1124 {
1125 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1126 
1127 	if (!(data->active_auto_throttle_sources & (1 << source))) {
1128 		data->active_auto_throttle_sources |= 1 << source;
1129 		smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1130 	}
1131 	return 0;
1132 }
1133 
smu7_enable_thermal_auto_throttle(struct pp_hwmgr * hwmgr)1134 static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1135 {
1136 	return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1137 }
1138 
smu7_disable_auto_throttle_source(struct pp_hwmgr * hwmgr,PHM_AutoThrottleSource source)1139 static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1140 		PHM_AutoThrottleSource source)
1141 {
1142 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1143 
1144 	if (data->active_auto_throttle_sources & (1 << source)) {
1145 		data->active_auto_throttle_sources &= ~(1 << source);
1146 		smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1147 	}
1148 	return 0;
1149 }
1150 
smu7_disable_thermal_auto_throttle(struct pp_hwmgr * hwmgr)1151 static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1152 {
1153 	return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1154 }
1155 
smu7_pcie_performance_request(struct pp_hwmgr * hwmgr)1156 int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
1157 {
1158 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1159 	data->pcie_performance_request = true;
1160 
1161 	return 0;
1162 }
1163 
smu7_enable_dpm_tasks(struct pp_hwmgr * hwmgr)1164 int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1165 {
1166 	int tmp_result = 0;
1167 	int result = 0;
1168 
1169 	tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1;
1170 	PP_ASSERT_WITH_CODE(tmp_result == 0,
1171 			"DPM is already running",
1172 			);
1173 
1174 	if (smu7_voltage_control(hwmgr)) {
1175 		tmp_result = smu7_enable_voltage_control(hwmgr);
1176 		PP_ASSERT_WITH_CODE(tmp_result == 0,
1177 				"Failed to enable voltage control!",
1178 				result = tmp_result);
1179 
1180 		tmp_result = smu7_construct_voltage_tables(hwmgr);
1181 		PP_ASSERT_WITH_CODE((0 == tmp_result),
1182 				"Failed to contruct voltage tables!",
1183 				result = tmp_result);
1184 	}
1185 	smum_initialize_mc_reg_table(hwmgr);
1186 
1187 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1188 			PHM_PlatformCaps_EngineSpreadSpectrumSupport))
1189 		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1190 				GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
1191 
1192 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1193 			PHM_PlatformCaps_ThermalController))
1194 		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1195 				GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
1196 
1197 	tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
1198 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1199 			"Failed to program static screen threshold parameters!",
1200 			result = tmp_result);
1201 
1202 	tmp_result = smu7_enable_display_gap(hwmgr);
1203 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1204 			"Failed to enable display gap!", result = tmp_result);
1205 
1206 	tmp_result = smu7_program_voting_clients(hwmgr);
1207 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1208 			"Failed to program voting clients!", result = tmp_result);
1209 
1210 	tmp_result = smum_process_firmware_header(hwmgr);
1211 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1212 			"Failed to process firmware header!", result = tmp_result);
1213 
1214 	tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
1215 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1216 			"Failed to initialize switch from ArbF0 to F1!",
1217 			result = tmp_result);
1218 
1219 	result = smu7_setup_default_dpm_tables(hwmgr);
1220 	PP_ASSERT_WITH_CODE(0 == result,
1221 			"Failed to setup default DPM tables!", return result);
1222 
1223 	tmp_result = smum_init_smc_table(hwmgr);
1224 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1225 			"Failed to initialize SMC table!", result = tmp_result);
1226 
1227 	tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
1228 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1229 			"Failed to enable VR hot GPIO interrupt!", result = tmp_result);
1230 
1231 	smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_NoDisplay);
1232 
1233 	tmp_result = smu7_enable_sclk_control(hwmgr);
1234 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1235 			"Failed to enable SCLK control!", result = tmp_result);
1236 
1237 	tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
1238 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1239 			"Failed to enable voltage control!", result = tmp_result);
1240 
1241 	tmp_result = smu7_enable_ulv(hwmgr);
1242 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1243 			"Failed to enable ULV!", result = tmp_result);
1244 
1245 	tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
1246 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1247 			"Failed to enable deep sleep master switch!", result = tmp_result);
1248 
1249 	tmp_result = smu7_enable_didt_config(hwmgr);
1250 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1251 			"Failed to enable deep sleep master switch!", result = tmp_result);
1252 
1253 	tmp_result = smu7_start_dpm(hwmgr);
1254 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1255 			"Failed to start DPM!", result = tmp_result);
1256 
1257 	tmp_result = smu7_enable_smc_cac(hwmgr);
1258 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1259 			"Failed to enable SMC CAC!", result = tmp_result);
1260 
1261 	tmp_result = smu7_enable_power_containment(hwmgr);
1262 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1263 			"Failed to enable power containment!", result = tmp_result);
1264 
1265 	tmp_result = smu7_power_control_set_level(hwmgr);
1266 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1267 			"Failed to power control set level!", result = tmp_result);
1268 
1269 	tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
1270 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1271 			"Failed to enable thermal auto throttle!", result = tmp_result);
1272 
1273 	tmp_result = smu7_pcie_performance_request(hwmgr);
1274 	PP_ASSERT_WITH_CODE((0 == tmp_result),
1275 			"pcie performance request failed!", result = tmp_result);
1276 
1277 	return 0;
1278 }
1279 
smu7_disable_dpm_tasks(struct pp_hwmgr * hwmgr)1280 int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1281 {
1282 	int tmp_result, result = 0;
1283 
1284 	tmp_result = (smum_is_dpm_running(hwmgr)) ? 0 : -1;
1285 	PP_ASSERT_WITH_CODE(tmp_result == 0,
1286 			"DPM is not running right now, no need to disable DPM!",
1287 			return 0);
1288 
1289 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1290 			PHM_PlatformCaps_ThermalController))
1291 		PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1292 				GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
1293 
1294 	tmp_result = smu7_disable_power_containment(hwmgr);
1295 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1296 			"Failed to disable power containment!", result = tmp_result);
1297 
1298 	tmp_result = smu7_disable_smc_cac(hwmgr);
1299 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1300 			"Failed to disable SMC CAC!", result = tmp_result);
1301 
1302 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1303 			CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
1304 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1305 			GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
1306 
1307 	tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
1308 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1309 			"Failed to disable thermal auto throttle!", result = tmp_result);
1310 
1311 	if (1 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1312 		PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableAvfs)),
1313 					"Failed to disable AVFS!",
1314 					return -EINVAL);
1315 	}
1316 
1317 	tmp_result = smu7_stop_dpm(hwmgr);
1318 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1319 			"Failed to stop DPM!", result = tmp_result);
1320 
1321 	tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
1322 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1323 			"Failed to disable deep sleep master switch!", result = tmp_result);
1324 
1325 	tmp_result = smu7_disable_ulv(hwmgr);
1326 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1327 			"Failed to disable ULV!", result = tmp_result);
1328 
1329 	tmp_result = smu7_clear_voting_clients(hwmgr);
1330 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1331 			"Failed to clear voting clients!", result = tmp_result);
1332 
1333 	tmp_result = smu7_reset_to_default(hwmgr);
1334 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1335 			"Failed to reset to default!", result = tmp_result);
1336 
1337 	tmp_result = smu7_force_switch_to_arbf0(hwmgr);
1338 	PP_ASSERT_WITH_CODE((tmp_result == 0),
1339 			"Failed to force to switch arbf0!", result = tmp_result);
1340 
1341 	return result;
1342 }
1343 
smu7_reset_asic_tasks(struct pp_hwmgr * hwmgr)1344 int smu7_reset_asic_tasks(struct pp_hwmgr *hwmgr)
1345 {
1346 
1347 	return 0;
1348 }
1349 
smu7_init_dpm_defaults(struct pp_hwmgr * hwmgr)1350 static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1351 {
1352 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1353 	struct phm_ppt_v1_information *table_info =
1354 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
1355 
1356 	data->dll_default_on = false;
1357 	data->mclk_dpm0_activity_target = 0xa;
1358 	data->mclk_activity_target = SMU7_MCLK_TARGETACTIVITY_DFLT;
1359 	data->vddc_vddgfx_delta = 300;
1360 	data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
1361 	data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
1362 	data->voting_rights_clients0 = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
1363 	data->voting_rights_clients1 = SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
1364 	data->voting_rights_clients2 = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
1365 	data->voting_rights_clients3 = SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
1366 	data->voting_rights_clients4 = SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
1367 	data->voting_rights_clients5 = SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
1368 	data->voting_rights_clients6 = SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
1369 	data->voting_rights_clients7 = SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
1370 
1371 	data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
1372 	data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
1373 	data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
1374 	/* need to set voltage control types before EVV patching */
1375 	data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
1376 	data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
1377 	data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
1378 	data->enable_tdc_limit_feature = true;
1379 	data->enable_pkg_pwr_tracking_feature = true;
1380 	data->force_pcie_gen = PP_PCIEGenInvalid;
1381 	data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
1382 
1383 	data->fast_watermark_threshold = 100;
1384 	if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1385 			VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
1386 		data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1387 
1388 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1389 			PHM_PlatformCaps_ControlVDDGFX)) {
1390 		if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1391 			VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
1392 			data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1393 		}
1394 	}
1395 
1396 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1397 			PHM_PlatformCaps_EnableMVDDControl)) {
1398 		if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1399 				VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
1400 			data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1401 		else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1402 				VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
1403 			data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1404 	}
1405 
1406 	if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) {
1407 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1408 			PHM_PlatformCaps_ControlVDDGFX);
1409 	}
1410 
1411 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1412 			PHM_PlatformCaps_ControlVDDCI)) {
1413 		if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1414 				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
1415 			data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1416 		else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
1417 				VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
1418 			data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1419 	}
1420 
1421 	if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
1422 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1423 				PHM_PlatformCaps_EnableMVDDControl);
1424 
1425 	if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
1426 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1427 				PHM_PlatformCaps_ControlVDDCI);
1428 
1429 	if ((hwmgr->pp_table_version != PP_TABLE_V0)
1430 		&& (table_info->cac_dtp_table->usClockStretchAmount != 0))
1431 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1432 					PHM_PlatformCaps_ClockStretcher);
1433 
1434 	data->pcie_gen_performance.max = PP_PCIEGen1;
1435 	data->pcie_gen_performance.min = PP_PCIEGen3;
1436 	data->pcie_gen_power_saving.max = PP_PCIEGen1;
1437 	data->pcie_gen_power_saving.min = PP_PCIEGen3;
1438 	data->pcie_lane_performance.max = 0;
1439 	data->pcie_lane_performance.min = 16;
1440 	data->pcie_lane_power_saving.max = 0;
1441 	data->pcie_lane_power_saving.min = 16;
1442 }
1443 
1444 /**
1445 * Get Leakage VDDC based on leakage ID.
1446 *
1447 * @param    hwmgr  the address of the powerplay hardware manager.
1448 * @return   always 0
1449 */
smu7_get_evv_voltages(struct pp_hwmgr * hwmgr)1450 static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1451 {
1452 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1453 	uint16_t vv_id;
1454 	uint16_t vddc = 0;
1455 	uint16_t vddgfx = 0;
1456 	uint16_t i, j;
1457 	uint32_t sclk = 0;
1458 	struct phm_ppt_v1_information *table_info =
1459 			(struct phm_ppt_v1_information *)hwmgr->pptable;
1460 	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
1461 
1462 
1463 	for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
1464 		vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1465 
1466 		if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1467 			if ((hwmgr->pp_table_version == PP_TABLE_V1)
1468 			    && !phm_get_sclk_for_voltage_evv(hwmgr,
1469 						table_info->vddgfx_lookup_table, vv_id, &sclk)) {
1470 				if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1471 							PHM_PlatformCaps_ClockStretcher)) {
1472 					sclk_table = table_info->vdd_dep_on_sclk;
1473 
1474 					for (j = 1; j < sclk_table->count; j++) {
1475 						if (sclk_table->entries[j].clk == sclk &&
1476 								sclk_table->entries[j].cks_enable == 0) {
1477 							sclk += 5000;
1478 							break;
1479 						}
1480 					}
1481 				}
1482 				if (0 == atomctrl_get_voltage_evv_on_sclk
1483 				    (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
1484 				     vv_id, &vddgfx)) {
1485 					/* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
1486 					PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
1487 
1488 					/* the voltage should not be zero nor equal to leakage ID */
1489 					if (vddgfx != 0 && vddgfx != vv_id) {
1490 						data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
1491 						data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
1492 						data->vddcgfx_leakage.count++;
1493 					}
1494 				} else {
1495 					printk("Error retrieving EVV voltage value!\n");
1496 				}
1497 			}
1498 		} else {
1499 			if ((hwmgr->pp_table_version == PP_TABLE_V0)
1500 				|| !phm_get_sclk_for_voltage_evv(hwmgr,
1501 					table_info->vddc_lookup_table, vv_id, &sclk)) {
1502 				if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1503 						PHM_PlatformCaps_ClockStretcher)) {
1504 					if (table_info == NULL)
1505 						return -EINVAL;
1506 					sclk_table = table_info->vdd_dep_on_sclk;
1507 
1508 					for (j = 1; j < sclk_table->count; j++) {
1509 						if (sclk_table->entries[j].clk == sclk &&
1510 								sclk_table->entries[j].cks_enable == 0) {
1511 							sclk += 5000;
1512 							break;
1513 						}
1514 					}
1515 				}
1516 
1517 				if (phm_get_voltage_evv_on_sclk(hwmgr,
1518 							VOLTAGE_TYPE_VDDC,
1519 							sclk, vv_id, &vddc) == 0) {
1520 					if (vddc >= 2000 || vddc == 0)
1521 						return -EINVAL;
1522 				} else {
1523 					printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
1524 					continue;
1525 				}
1526 
1527 				/* the voltage should not be zero nor equal to leakage ID */
1528 				if (vddc != 0 && vddc != vv_id) {
1529 					data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
1530 					data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
1531 					data->vddc_leakage.count++;
1532 				}
1533 			}
1534 		}
1535 	}
1536 
1537 	return 0;
1538 }
1539 
1540 /**
1541  * Change virtual leakage voltage to actual value.
1542  *
1543  * @param     hwmgr  the address of the powerplay hardware manager.
1544  * @param     pointer to changing voltage
1545  * @param     pointer to leakage table
1546  */
smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr * hwmgr,uint16_t * voltage,struct smu7_leakage_voltage * leakage_table)1547 static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
1548 		uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
1549 {
1550 	uint32_t index;
1551 
1552 	/* search for leakage voltage ID 0xff01 ~ 0xff08 */
1553 	for (index = 0; index < leakage_table->count; index++) {
1554 		/* if this voltage matches a leakage voltage ID */
1555 		/* patch with actual leakage voltage */
1556 		if (leakage_table->leakage_id[index] == *voltage) {
1557 			*voltage = leakage_table->actual_voltage[index];
1558 			break;
1559 		}
1560 	}
1561 
1562 	if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
1563 		printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
1564 }
1565 
1566 /**
1567 * Patch voltage lookup table by EVV leakages.
1568 *
1569 * @param     hwmgr  the address of the powerplay hardware manager.
1570 * @param     pointer to voltage lookup table
1571 * @param     pointer to leakage table
1572 * @return     always 0
1573 */
smu7_patch_lookup_table_with_leakage(struct pp_hwmgr * hwmgr,phm_ppt_v1_voltage_lookup_table * lookup_table,struct smu7_leakage_voltage * leakage_table)1574 static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
1575 		phm_ppt_v1_voltage_lookup_table *lookup_table,
1576 		struct smu7_leakage_voltage *leakage_table)
1577 {
1578 	uint32_t i;
1579 
1580 	for (i = 0; i < lookup_table->count; i++)
1581 		smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
1582 				&lookup_table->entries[i].us_vdd, leakage_table);
1583 
1584 	return 0;
1585 }
1586 
smu7_patch_clock_voltage_limits_with_vddc_leakage(struct pp_hwmgr * hwmgr,struct smu7_leakage_voltage * leakage_table,uint16_t * vddc)1587 static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
1588 		struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
1589 		uint16_t *vddc)
1590 {
1591 	struct phm_ppt_v1_information *table_info =
1592 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
1593 	smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
1594 	hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
1595 			table_info->max_clock_voltage_on_dc.vddc;
1596 	return 0;
1597 }
1598 
smu7_patch_voltage_dependency_tables_with_lookup_table(struct pp_hwmgr * hwmgr)1599 static int smu7_patch_voltage_dependency_tables_with_lookup_table(
1600 		struct pp_hwmgr *hwmgr)
1601 {
1602 	uint8_t entry_id;
1603 	uint8_t voltage_id;
1604 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1605 	struct phm_ppt_v1_information *table_info =
1606 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
1607 
1608 	struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1609 			table_info->vdd_dep_on_sclk;
1610 	struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
1611 			table_info->vdd_dep_on_mclk;
1612 	struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1613 			table_info->mm_dep_table;
1614 
1615 	if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1616 		for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1617 			voltage_id = sclk_table->entries[entry_id].vddInd;
1618 			sclk_table->entries[entry_id].vddgfx =
1619 				table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
1620 		}
1621 	} else {
1622 		for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1623 			voltage_id = sclk_table->entries[entry_id].vddInd;
1624 			sclk_table->entries[entry_id].vddc =
1625 				table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1626 		}
1627 	}
1628 
1629 	for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1630 		voltage_id = mclk_table->entries[entry_id].vddInd;
1631 		mclk_table->entries[entry_id].vddc =
1632 			table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1633 	}
1634 
1635 	for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
1636 		voltage_id = mm_table->entries[entry_id].vddcInd;
1637 		mm_table->entries[entry_id].vddc =
1638 			table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1639 	}
1640 
1641 	return 0;
1642 
1643 }
1644 
phm_add_voltage(struct pp_hwmgr * hwmgr,phm_ppt_v1_voltage_lookup_table * look_up_table,phm_ppt_v1_voltage_lookup_record * record)1645 static int phm_add_voltage(struct pp_hwmgr *hwmgr,
1646 			phm_ppt_v1_voltage_lookup_table *look_up_table,
1647 			phm_ppt_v1_voltage_lookup_record *record)
1648 {
1649 	uint32_t i;
1650 
1651 	PP_ASSERT_WITH_CODE((NULL != look_up_table),
1652 		"Lookup Table empty.", return -EINVAL);
1653 	PP_ASSERT_WITH_CODE((0 != look_up_table->count),
1654 		"Lookup Table empty.", return -EINVAL);
1655 
1656 	i = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
1657 	PP_ASSERT_WITH_CODE((i >= look_up_table->count),
1658 		"Lookup Table is full.", return -EINVAL);
1659 
1660 	/* This is to avoid entering duplicate calculated records. */
1661 	for (i = 0; i < look_up_table->count; i++) {
1662 		if (look_up_table->entries[i].us_vdd == record->us_vdd) {
1663 			if (look_up_table->entries[i].us_calculated == 1)
1664 				return 0;
1665 			break;
1666 		}
1667 	}
1668 
1669 	look_up_table->entries[i].us_calculated = 1;
1670 	look_up_table->entries[i].us_vdd = record->us_vdd;
1671 	look_up_table->entries[i].us_cac_low = record->us_cac_low;
1672 	look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
1673 	look_up_table->entries[i].us_cac_high = record->us_cac_high;
1674 	/* Only increment the count when we're appending, not replacing duplicate entry. */
1675 	if (i == look_up_table->count)
1676 		look_up_table->count++;
1677 
1678 	return 0;
1679 }
1680 
1681 
smu7_calc_voltage_dependency_tables(struct pp_hwmgr * hwmgr)1682 static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
1683 {
1684 	uint8_t entry_id;
1685 	struct phm_ppt_v1_voltage_lookup_record v_record;
1686 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1687 	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1688 
1689 	phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
1690 	phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
1691 
1692 	if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1693 		for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1694 			if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
1695 				v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1696 					sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1697 			else
1698 				v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1699 					sclk_table->entries[entry_id].vdd_offset;
1700 
1701 			sclk_table->entries[entry_id].vddc =
1702 				v_record.us_cac_low = v_record.us_cac_mid =
1703 				v_record.us_cac_high = v_record.us_vdd;
1704 
1705 			phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
1706 		}
1707 
1708 		for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1709 			if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
1710 				v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1711 					mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1712 			else
1713 				v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1714 					mclk_table->entries[entry_id].vdd_offset;
1715 
1716 			mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1717 				v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1718 			phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1719 		}
1720 	}
1721 	return 0;
1722 }
1723 
smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr * hwmgr)1724 static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
1725 {
1726 	uint8_t entry_id;
1727 	struct phm_ppt_v1_voltage_lookup_record v_record;
1728 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1729 	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1730 	phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1731 
1732 	if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1733 		for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
1734 			if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
1735 				v_record.us_vdd = mm_table->entries[entry_id].vddc +
1736 					mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
1737 			else
1738 				v_record.us_vdd = mm_table->entries[entry_id].vddc +
1739 					mm_table->entries[entry_id].vddgfx_offset;
1740 
1741 			/* Add the calculated VDDGFX to the VDDGFX lookup table */
1742 			mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1743 				v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1744 			phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1745 		}
1746 	}
1747 	return 0;
1748 }
1749 
smu7_sort_lookup_table(struct pp_hwmgr * hwmgr,struct phm_ppt_v1_voltage_lookup_table * lookup_table)1750 static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
1751 		struct phm_ppt_v1_voltage_lookup_table *lookup_table)
1752 {
1753 	uint32_t table_size, i, j;
1754 	struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
1755 	table_size = lookup_table->count;
1756 
1757 	PP_ASSERT_WITH_CODE(0 != lookup_table->count,
1758 		"Lookup table is empty", return -EINVAL);
1759 
1760 	/* Sorting voltages */
1761 	for (i = 0; i < table_size - 1; i++) {
1762 		for (j = i + 1; j > 0; j--) {
1763 			if (lookup_table->entries[j].us_vdd <
1764 					lookup_table->entries[j - 1].us_vdd) {
1765 				tmp_voltage_lookup_record = lookup_table->entries[j - 1];
1766 				lookup_table->entries[j - 1] = lookup_table->entries[j];
1767 				lookup_table->entries[j] = tmp_voltage_lookup_record;
1768 			}
1769 		}
1770 	}
1771 
1772 	return 0;
1773 }
1774 
smu7_complete_dependency_tables(struct pp_hwmgr * hwmgr)1775 static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
1776 {
1777 	int result = 0;
1778 	int tmp_result;
1779 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1780 	struct phm_ppt_v1_information *table_info =
1781 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
1782 
1783 	if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1784 		tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
1785 			table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
1786 		if (tmp_result != 0)
1787 			result = tmp_result;
1788 
1789 		smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
1790 			&table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
1791 	} else {
1792 
1793 		tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
1794 				table_info->vddc_lookup_table, &(data->vddc_leakage));
1795 		if (tmp_result)
1796 			result = tmp_result;
1797 
1798 		tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
1799 				&(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
1800 		if (tmp_result)
1801 			result = tmp_result;
1802 	}
1803 
1804 	tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
1805 	if (tmp_result)
1806 		result = tmp_result;
1807 
1808 	tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
1809 	if (tmp_result)
1810 		result = tmp_result;
1811 
1812 	tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
1813 	if (tmp_result)
1814 		result = tmp_result;
1815 
1816 	tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
1817 	if (tmp_result)
1818 		result = tmp_result;
1819 
1820 	tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
1821 	if (tmp_result)
1822 		result = tmp_result;
1823 
1824 	return result;
1825 }
1826 
smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr * hwmgr)1827 static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
1828 {
1829 	struct phm_ppt_v1_information *table_info =
1830 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
1831 
1832 	struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
1833 						table_info->vdd_dep_on_sclk;
1834 	struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
1835 						table_info->vdd_dep_on_mclk;
1836 
1837 	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
1838 		"VDD dependency on SCLK table is missing.",
1839 		return -EINVAL);
1840 	PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
1841 		"VDD dependency on SCLK table has to have is missing.",
1842 		return -EINVAL);
1843 
1844 	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
1845 		"VDD dependency on MCLK table is missing",
1846 		return -EINVAL);
1847 	PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
1848 		"VDD dependency on MCLK table has to have is missing.",
1849 		return -EINVAL);
1850 
1851 	table_info->max_clock_voltage_on_ac.sclk =
1852 		allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
1853 	table_info->max_clock_voltage_on_ac.mclk =
1854 		allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
1855 	table_info->max_clock_voltage_on_ac.vddc =
1856 		allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
1857 	table_info->max_clock_voltage_on_ac.vddci =
1858 		allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
1859 
1860 	hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
1861 	hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
1862 	hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
1863 	hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
1864 
1865 	return 0;
1866 }
1867 
smu7_patch_voltage_workaround(struct pp_hwmgr * hwmgr)1868 int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
1869 {
1870 	struct phm_ppt_v1_information *table_info =
1871 		       (struct phm_ppt_v1_information *)(hwmgr->pptable);
1872 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
1873 	struct phm_ppt_v1_voltage_lookup_table *lookup_table;
1874 	uint32_t i;
1875 	uint32_t hw_revision, sub_vendor_id, sub_sys_id;
1876 	struct cgs_system_info sys_info = {0};
1877 
1878 	if (table_info != NULL) {
1879 		dep_mclk_table = table_info->vdd_dep_on_mclk;
1880 		lookup_table = table_info->vddc_lookup_table;
1881 	} else
1882 		return 0;
1883 
1884 	sys_info.size = sizeof(struct cgs_system_info);
1885 
1886 	sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
1887 	cgs_query_system_info(hwmgr->device, &sys_info);
1888 	hw_revision = (uint32_t)sys_info.value;
1889 
1890 	sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID;
1891 	cgs_query_system_info(hwmgr->device, &sys_info);
1892 	sub_sys_id = (uint32_t)sys_info.value;
1893 
1894 	sys_info.info_id = CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID;
1895 	cgs_query_system_info(hwmgr->device, &sys_info);
1896 	sub_vendor_id = (uint32_t)sys_info.value;
1897 
1898 	if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
1899 			((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
1900 		    (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
1901 		    (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
1902 		if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
1903 			return 0;
1904 
1905 		for (i = 0; i < lookup_table->count; i++) {
1906 			if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
1907 				dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
1908 				return 0;
1909 			}
1910 		}
1911 	}
1912 	return 0;
1913 }
1914 
smu7_thermal_parameter_init(struct pp_hwmgr * hwmgr)1915 static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
1916 {
1917 	struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
1918 	uint32_t temp_reg;
1919 	struct phm_ppt_v1_information *table_info =
1920 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
1921 
1922 
1923 	if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
1924 		temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
1925 		switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
1926 		case 0:
1927 			temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
1928 			break;
1929 		case 1:
1930 			temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
1931 			break;
1932 		case 2:
1933 			temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
1934 			break;
1935 		case 3:
1936 			temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
1937 			break;
1938 		case 4:
1939 			temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
1940 			break;
1941 		default:
1942 			PP_ASSERT_WITH_CODE(0,
1943 			"Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!",
1944 			);
1945 			break;
1946 		}
1947 		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
1948 	}
1949 
1950 	if (table_info == NULL)
1951 		return 0;
1952 
1953 	if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
1954 		hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
1955 		hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
1956 			(uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
1957 
1958 		hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
1959 			(uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
1960 
1961 		hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
1962 
1963 		hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
1964 
1965 		hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
1966 			(uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
1967 
1968 		hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
1969 
1970 		table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
1971 								(table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
1972 
1973 		table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
1974 		table_info->cac_dtp_table->usOperatingTempStep = 1;
1975 		table_info->cac_dtp_table->usOperatingTempHyst = 1;
1976 
1977 		hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
1978 			       hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
1979 
1980 		hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
1981 			       hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
1982 
1983 		hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
1984 			       table_info->cac_dtp_table->usOperatingTempMinLimit;
1985 
1986 		hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
1987 			       table_info->cac_dtp_table->usOperatingTempMaxLimit;
1988 
1989 		hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
1990 			       table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
1991 
1992 		hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
1993 			       table_info->cac_dtp_table->usOperatingTempStep;
1994 
1995 		hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
1996 			       table_info->cac_dtp_table->usTargetOperatingTemp;
1997 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1998 						PHM_PlatformCaps_ODFuzzyFanControlSupport);
1999 	}
2000 
2001 	return 0;
2002 }
2003 
2004 /**
2005  * Change virtual leakage voltage to actual value.
2006  *
2007  * @param     hwmgr  the address of the powerplay hardware manager.
2008  * @param     pointer to changing voltage
2009  * @param     pointer to leakage table
2010  */
smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr * hwmgr,uint32_t * voltage,struct smu7_leakage_voltage * leakage_table)2011 static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2012 		uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
2013 {
2014 	uint32_t index;
2015 
2016 	/* search for leakage voltage ID 0xff01 ~ 0xff08 */
2017 	for (index = 0; index < leakage_table->count; index++) {
2018 		/* if this voltage matches a leakage voltage ID */
2019 		/* patch with actual leakage voltage */
2020 		if (leakage_table->leakage_id[index] == *voltage) {
2021 			*voltage = leakage_table->actual_voltage[index];
2022 			break;
2023 		}
2024 	}
2025 
2026 	if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2027 		printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
2028 }
2029 
2030 
smu7_patch_vddc(struct pp_hwmgr * hwmgr,struct phm_clock_voltage_dependency_table * tab)2031 static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
2032 			      struct phm_clock_voltage_dependency_table *tab)
2033 {
2034 	uint16_t i;
2035 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2036 
2037 	if (tab)
2038 		for (i = 0; i < tab->count; i++)
2039 			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2040 						&data->vddc_leakage);
2041 
2042 	return 0;
2043 }
2044 
smu7_patch_vddci(struct pp_hwmgr * hwmgr,struct phm_clock_voltage_dependency_table * tab)2045 static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
2046 			       struct phm_clock_voltage_dependency_table *tab)
2047 {
2048 	uint16_t i;
2049 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2050 
2051 	if (tab)
2052 		for (i = 0; i < tab->count; i++)
2053 			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2054 							&data->vddci_leakage);
2055 
2056 	return 0;
2057 }
2058 
smu7_patch_vce_vddc(struct pp_hwmgr * hwmgr,struct phm_vce_clock_voltage_dependency_table * tab)2059 static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
2060 				  struct phm_vce_clock_voltage_dependency_table *tab)
2061 {
2062 	uint16_t i;
2063 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2064 
2065 	if (tab)
2066 		for (i = 0; i < tab->count; i++)
2067 			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2068 							&data->vddc_leakage);
2069 
2070 	return 0;
2071 }
2072 
2073 
smu7_patch_uvd_vddc(struct pp_hwmgr * hwmgr,struct phm_uvd_clock_voltage_dependency_table * tab)2074 static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
2075 				  struct phm_uvd_clock_voltage_dependency_table *tab)
2076 {
2077 	uint16_t i;
2078 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2079 
2080 	if (tab)
2081 		for (i = 0; i < tab->count; i++)
2082 			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2083 							&data->vddc_leakage);
2084 
2085 	return 0;
2086 }
2087 
smu7_patch_vddc_shed_limit(struct pp_hwmgr * hwmgr,struct phm_phase_shedding_limits_table * tab)2088 static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
2089 					 struct phm_phase_shedding_limits_table *tab)
2090 {
2091 	uint16_t i;
2092 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2093 
2094 	if (tab)
2095 		for (i = 0; i < tab->count; i++)
2096 			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
2097 							&data->vddc_leakage);
2098 
2099 	return 0;
2100 }
2101 
smu7_patch_samu_vddc(struct pp_hwmgr * hwmgr,struct phm_samu_clock_voltage_dependency_table * tab)2102 static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
2103 				   struct phm_samu_clock_voltage_dependency_table *tab)
2104 {
2105 	uint16_t i;
2106 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2107 
2108 	if (tab)
2109 		for (i = 0; i < tab->count; i++)
2110 			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2111 							&data->vddc_leakage);
2112 
2113 	return 0;
2114 }
2115 
smu7_patch_acp_vddc(struct pp_hwmgr * hwmgr,struct phm_acp_clock_voltage_dependency_table * tab)2116 static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
2117 				  struct phm_acp_clock_voltage_dependency_table *tab)
2118 {
2119 	uint16_t i;
2120 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2121 
2122 	if (tab)
2123 		for (i = 0; i < tab->count; i++)
2124 			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2125 					&data->vddc_leakage);
2126 
2127 	return 0;
2128 }
2129 
smu7_patch_limits_vddc(struct pp_hwmgr * hwmgr,struct phm_clock_and_voltage_limits * tab)2130 static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
2131 				  struct phm_clock_and_voltage_limits *tab)
2132 {
2133 	uint32_t vddc, vddci;
2134 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2135 
2136 	if (tab) {
2137 		vddc = tab->vddc;
2138 		smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
2139 						   &data->vddc_leakage);
2140 		tab->vddc = vddc;
2141 		vddci = tab->vddci;
2142 		smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
2143 						   &data->vddci_leakage);
2144 		tab->vddci = vddci;
2145 	}
2146 
2147 	return 0;
2148 }
2149 
smu7_patch_cac_vddc(struct pp_hwmgr * hwmgr,struct phm_cac_leakage_table * tab)2150 static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
2151 {
2152 	uint32_t i;
2153 	uint32_t vddc;
2154 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2155 
2156 	if (tab) {
2157 		for (i = 0; i < tab->count; i++) {
2158 			vddc = (uint32_t)(tab->entries[i].Vddc);
2159 			smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
2160 			tab->entries[i].Vddc = (uint16_t)vddc;
2161 		}
2162 	}
2163 
2164 	return 0;
2165 }
2166 
smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr * hwmgr)2167 static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
2168 {
2169 	int tmp;
2170 
2171 	tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
2172 	if (tmp)
2173 		return -EINVAL;
2174 
2175 	tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
2176 	if (tmp)
2177 		return -EINVAL;
2178 
2179 	tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2180 	if (tmp)
2181 		return -EINVAL;
2182 
2183 	tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
2184 	if (tmp)
2185 		return -EINVAL;
2186 
2187 	tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
2188 	if (tmp)
2189 		return -EINVAL;
2190 
2191 	tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
2192 	if (tmp)
2193 		return -EINVAL;
2194 
2195 	tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
2196 	if (tmp)
2197 		return -EINVAL;
2198 
2199 	tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
2200 	if (tmp)
2201 		return -EINVAL;
2202 
2203 	tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
2204 	if (tmp)
2205 		return -EINVAL;
2206 
2207 	tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
2208 	if (tmp)
2209 		return -EINVAL;
2210 
2211 	tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
2212 	if (tmp)
2213 		return -EINVAL;
2214 
2215 	tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
2216 	if (tmp)
2217 		return -EINVAL;
2218 
2219 	return 0;
2220 }
2221 
2222 
smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr * hwmgr)2223 static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
2224 {
2225 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2226 
2227 	struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
2228 	struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
2229 	struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
2230 
2231 	PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
2232 		"VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL);
2233 	PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
2234 		"VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
2235 
2236 	PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
2237 		"VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL);
2238 	PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
2239 		"VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
2240 
2241 	data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
2242 	data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2243 
2244 	hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
2245 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
2246 	hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
2247 		allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
2248 	hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
2249 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2250 
2251 	if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
2252 		data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
2253 		data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
2254 	}
2255 
2256 	if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1)
2257 		hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
2258 
2259 	return 0;
2260 }
2261 
smu7_hwmgr_backend_init(struct pp_hwmgr * hwmgr)2262 int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2263 {
2264 	struct smu7_hwmgr *data;
2265 	int result;
2266 
2267 	data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
2268 	if (data == NULL)
2269 		return -ENOMEM;
2270 
2271 	hwmgr->backend = data;
2272 
2273 	smu7_patch_voltage_workaround(hwmgr);
2274 	smu7_init_dpm_defaults(hwmgr);
2275 
2276 	/* Get leakage voltage based on leakage ID. */
2277 	result = smu7_get_evv_voltages(hwmgr);
2278 
2279 	if (result) {
2280 		printk("Get EVV Voltage Failed.  Abort Driver loading!\n");
2281 		return -EINVAL;
2282 	}
2283 
2284 	if (hwmgr->pp_table_version == PP_TABLE_V1) {
2285 		smu7_complete_dependency_tables(hwmgr);
2286 		smu7_set_private_data_based_on_pptable_v1(hwmgr);
2287 	} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
2288 		smu7_patch_dependency_tables_with_leakage(hwmgr);
2289 		smu7_set_private_data_based_on_pptable_v0(hwmgr);
2290 	}
2291 
2292 	/* Initalize Dynamic State Adjustment Rule Settings */
2293 	result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
2294 
2295 	if (0 == result) {
2296 		struct cgs_system_info sys_info = {0};
2297 
2298 		data->is_tlu_enabled = false;
2299 
2300 		hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
2301 							SMU7_MAX_HARDWARE_POWERLEVELS;
2302 		hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
2303 		hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
2304 
2305 		sys_info.size = sizeof(struct cgs_system_info);
2306 		sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
2307 		result = cgs_query_system_info(hwmgr->device, &sys_info);
2308 		if (result)
2309 			data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2310 		else
2311 			data->pcie_gen_cap = (uint32_t)sys_info.value;
2312 		if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2313 			data->pcie_spc_cap = 20;
2314 		sys_info.size = sizeof(struct cgs_system_info);
2315 		sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
2316 		result = cgs_query_system_info(hwmgr->device, &sys_info);
2317 		if (result)
2318 			data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
2319 		else
2320 			data->pcie_lane_cap = (uint32_t)sys_info.value;
2321 
2322 		hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
2323 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
2324 		hwmgr->platform_descriptor.clockStep.engineClock = 500;
2325 		hwmgr->platform_descriptor.clockStep.memoryClock = 500;
2326 		smu7_thermal_parameter_init(hwmgr);
2327 	} else {
2328 		/* Ignore return value in here, we are cleaning up a mess. */
2329 		phm_hwmgr_backend_fini(hwmgr);
2330 	}
2331 
2332 	return 0;
2333 }
2334 
smu7_force_dpm_highest(struct pp_hwmgr * hwmgr)2335 static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
2336 {
2337 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2338 	uint32_t level, tmp;
2339 
2340 	if (!data->pcie_dpm_key_disabled) {
2341 		if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2342 			level = 0;
2343 			tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
2344 			while (tmp >>= 1)
2345 				level++;
2346 
2347 			if (level)
2348 				smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2349 						PPSMC_MSG_PCIeDPM_ForceLevel, level);
2350 		}
2351 	}
2352 
2353 	if (!data->sclk_dpm_key_disabled) {
2354 		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2355 			level = 0;
2356 			tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
2357 			while (tmp >>= 1)
2358 				level++;
2359 
2360 			if (level)
2361 				smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2362 						PPSMC_MSG_SCLKDPM_SetEnabledMask,
2363 						(1 << level));
2364 		}
2365 	}
2366 
2367 	if (!data->mclk_dpm_key_disabled) {
2368 		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2369 			level = 0;
2370 			tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
2371 			while (tmp >>= 1)
2372 				level++;
2373 
2374 			if (level)
2375 				smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2376 						PPSMC_MSG_MCLKDPM_SetEnabledMask,
2377 						(1 << level));
2378 		}
2379 	}
2380 
2381 	return 0;
2382 }
2383 
smu7_upload_dpm_level_enable_mask(struct pp_hwmgr * hwmgr)2384 static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
2385 {
2386 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2387 
2388 	if (hwmgr->pp_table_version == PP_TABLE_V1)
2389 		phm_apply_dal_min_voltage_request(hwmgr);
2390 /* TO DO  for v0 iceland and Ci*/
2391 
2392 	if (!data->sclk_dpm_key_disabled) {
2393 		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
2394 			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2395 					PPSMC_MSG_SCLKDPM_SetEnabledMask,
2396 					data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2397 	}
2398 
2399 	if (!data->mclk_dpm_key_disabled) {
2400 		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
2401 			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2402 					PPSMC_MSG_MCLKDPM_SetEnabledMask,
2403 					data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2404 	}
2405 
2406 	return 0;
2407 }
2408 
smu7_unforce_dpm_levels(struct pp_hwmgr * hwmgr)2409 static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2410 {
2411 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2412 
2413 	if (!smum_is_dpm_running(hwmgr))
2414 		return -EINVAL;
2415 
2416 	if (!data->pcie_dpm_key_disabled) {
2417 		smum_send_msg_to_smc(hwmgr->smumgr,
2418 				PPSMC_MSG_PCIeDPM_UnForceLevel);
2419 	}
2420 
2421 	return smu7_upload_dpm_level_enable_mask(hwmgr);
2422 }
2423 
smu7_force_dpm_lowest(struct pp_hwmgr * hwmgr)2424 static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2425 {
2426 	struct smu7_hwmgr *data =
2427 			(struct smu7_hwmgr *)(hwmgr->backend);
2428 	uint32_t level;
2429 
2430 	if (!data->sclk_dpm_key_disabled)
2431 		if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2432 			level = phm_get_lowest_enabled_level(hwmgr,
2433 							      data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2434 			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2435 							    PPSMC_MSG_SCLKDPM_SetEnabledMask,
2436 							    (1 << level));
2437 
2438 	}
2439 
2440 	if (!data->mclk_dpm_key_disabled) {
2441 		if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2442 			level = phm_get_lowest_enabled_level(hwmgr,
2443 							      data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2444 			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2445 							    PPSMC_MSG_MCLKDPM_SetEnabledMask,
2446 							    (1 << level));
2447 		}
2448 	}
2449 
2450 	if (!data->pcie_dpm_key_disabled) {
2451 		if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2452 			level = phm_get_lowest_enabled_level(hwmgr,
2453 							      data->dpm_level_enable_mask.pcie_dpm_enable_mask);
2454 			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2455 							    PPSMC_MSG_PCIeDPM_ForceLevel,
2456 							    (level));
2457 		}
2458 	}
2459 
2460 	return 0;
2461 
2462 }
smu7_force_dpm_level(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level level)2463 static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
2464 				enum amd_dpm_forced_level level)
2465 {
2466 	int ret = 0;
2467 
2468 	switch (level) {
2469 	case AMD_DPM_FORCED_LEVEL_HIGH:
2470 		ret = smu7_force_dpm_highest(hwmgr);
2471 		if (ret)
2472 			return ret;
2473 		break;
2474 	case AMD_DPM_FORCED_LEVEL_LOW:
2475 		ret = smu7_force_dpm_lowest(hwmgr);
2476 		if (ret)
2477 			return ret;
2478 		break;
2479 	case AMD_DPM_FORCED_LEVEL_AUTO:
2480 		ret = smu7_unforce_dpm_levels(hwmgr);
2481 		if (ret)
2482 			return ret;
2483 		break;
2484 	default:
2485 		break;
2486 	}
2487 
2488 	hwmgr->dpm_level = level;
2489 
2490 	return ret;
2491 }
2492 
smu7_get_power_state_size(struct pp_hwmgr * hwmgr)2493 static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
2494 {
2495 	return sizeof(struct smu7_power_state);
2496 }
2497 
2498 
smu7_apply_state_adjust_rules(struct pp_hwmgr * hwmgr,struct pp_power_state * request_ps,const struct pp_power_state * current_ps)2499 static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2500 				struct pp_power_state *request_ps,
2501 			const struct pp_power_state *current_ps)
2502 {
2503 
2504 	struct smu7_power_state *smu7_ps =
2505 				cast_phw_smu7_power_state(&request_ps->hardware);
2506 	uint32_t sclk;
2507 	uint32_t mclk;
2508 	struct PP_Clocks minimum_clocks = {0};
2509 	bool disable_mclk_switching;
2510 	bool disable_mclk_switching_for_frame_lock;
2511 	struct cgs_display_info info = {0};
2512 	const struct phm_clock_and_voltage_limits *max_limits;
2513 	uint32_t i;
2514 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2515 	struct phm_ppt_v1_information *table_info =
2516 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
2517 	int32_t count;
2518 	int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
2519 
2520 	data->battery_state = (PP_StateUILabel_Battery ==
2521 			request_ps->classification.ui_label);
2522 
2523 	PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
2524 				 "VI should always have 2 performance levels",
2525 				);
2526 
2527 	max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
2528 			&(hwmgr->dyn_state.max_clock_voltage_on_ac) :
2529 			&(hwmgr->dyn_state.max_clock_voltage_on_dc);
2530 
2531 	/* Cap clock DPM tables at DC MAX if it is in DC. */
2532 	if (PP_PowerSource_DC == hwmgr->power_source) {
2533 		for (i = 0; i < smu7_ps->performance_level_count; i++) {
2534 			if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
2535 				smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
2536 			if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
2537 				smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
2538 		}
2539 	}
2540 
2541 	smu7_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
2542 	smu7_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
2543 
2544 	cgs_get_active_displays_info(hwmgr->device, &info);
2545 
2546 	/*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
2547 
2548 	minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
2549 	minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
2550 
2551 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2552 			PHM_PlatformCaps_StablePState)) {
2553 		max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
2554 		stable_pstate_sclk = (max_limits->sclk * 75) / 100;
2555 
2556 		for (count = table_info->vdd_dep_on_sclk->count - 1;
2557 				count >= 0; count--) {
2558 			if (stable_pstate_sclk >=
2559 					table_info->vdd_dep_on_sclk->entries[count].clk) {
2560 				stable_pstate_sclk =
2561 						table_info->vdd_dep_on_sclk->entries[count].clk;
2562 				break;
2563 			}
2564 		}
2565 
2566 		if (count < 0)
2567 			stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
2568 
2569 		stable_pstate_mclk = max_limits->mclk;
2570 
2571 		minimum_clocks.engineClock = stable_pstate_sclk;
2572 		minimum_clocks.memoryClock = stable_pstate_mclk;
2573 	}
2574 
2575 	if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
2576 		minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
2577 
2578 	if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
2579 		minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
2580 
2581 	smu7_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
2582 
2583 	if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
2584 		PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
2585 				hwmgr->platform_descriptor.overdriveLimit.engineClock),
2586 				"Overdrive sclk exceeds limit",
2587 				hwmgr->gfx_arbiter.sclk_over_drive =
2588 						hwmgr->platform_descriptor.overdriveLimit.engineClock);
2589 
2590 		if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
2591 			smu7_ps->performance_levels[1].engine_clock =
2592 					hwmgr->gfx_arbiter.sclk_over_drive;
2593 	}
2594 
2595 	if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
2596 		PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
2597 				hwmgr->platform_descriptor.overdriveLimit.memoryClock),
2598 				"Overdrive mclk exceeds limit",
2599 				hwmgr->gfx_arbiter.mclk_over_drive =
2600 						hwmgr->platform_descriptor.overdriveLimit.memoryClock);
2601 
2602 		if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
2603 			smu7_ps->performance_levels[1].memory_clock =
2604 					hwmgr->gfx_arbiter.mclk_over_drive;
2605 	}
2606 
2607 	disable_mclk_switching_for_frame_lock = phm_cap_enabled(
2608 				    hwmgr->platform_descriptor.platformCaps,
2609 				    PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2610 
2611 
2612 	disable_mclk_switching = (1 < info.display_count) ||
2613 				    disable_mclk_switching_for_frame_lock;
2614 
2615 	sclk = smu7_ps->performance_levels[0].engine_clock;
2616 	mclk = smu7_ps->performance_levels[0].memory_clock;
2617 
2618 	if (disable_mclk_switching)
2619 		mclk = smu7_ps->performance_levels
2620 		[smu7_ps->performance_level_count - 1].memory_clock;
2621 
2622 	if (sclk < minimum_clocks.engineClock)
2623 		sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
2624 				max_limits->sclk : minimum_clocks.engineClock;
2625 
2626 	if (mclk < minimum_clocks.memoryClock)
2627 		mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
2628 				max_limits->mclk : minimum_clocks.memoryClock;
2629 
2630 	smu7_ps->performance_levels[0].engine_clock = sclk;
2631 	smu7_ps->performance_levels[0].memory_clock = mclk;
2632 
2633 	smu7_ps->performance_levels[1].engine_clock =
2634 		(smu7_ps->performance_levels[1].engine_clock >=
2635 				smu7_ps->performance_levels[0].engine_clock) ?
2636 						smu7_ps->performance_levels[1].engine_clock :
2637 						smu7_ps->performance_levels[0].engine_clock;
2638 
2639 	if (disable_mclk_switching) {
2640 		if (mclk < smu7_ps->performance_levels[1].memory_clock)
2641 			mclk = smu7_ps->performance_levels[1].memory_clock;
2642 
2643 		smu7_ps->performance_levels[0].memory_clock = mclk;
2644 		smu7_ps->performance_levels[1].memory_clock = mclk;
2645 	} else {
2646 		if (smu7_ps->performance_levels[1].memory_clock <
2647 				smu7_ps->performance_levels[0].memory_clock)
2648 			smu7_ps->performance_levels[1].memory_clock =
2649 					smu7_ps->performance_levels[0].memory_clock;
2650 	}
2651 
2652 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2653 			PHM_PlatformCaps_StablePState)) {
2654 		for (i = 0; i < smu7_ps->performance_level_count; i++) {
2655 			smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
2656 			smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
2657 			smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
2658 			smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
2659 		}
2660 	}
2661 	return 0;
2662 }
2663 
2664 
smu7_dpm_get_mclk(struct pp_hwmgr * hwmgr,bool low)2665 static int smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
2666 {
2667 	struct pp_power_state  *ps;
2668 	struct smu7_power_state  *smu7_ps;
2669 
2670 	if (hwmgr == NULL)
2671 		return -EINVAL;
2672 
2673 	ps = hwmgr->request_ps;
2674 
2675 	if (ps == NULL)
2676 		return -EINVAL;
2677 
2678 	smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
2679 
2680 	if (low)
2681 		return smu7_ps->performance_levels[0].memory_clock;
2682 	else
2683 		return smu7_ps->performance_levels
2684 				[smu7_ps->performance_level_count-1].memory_clock;
2685 }
2686 
smu7_dpm_get_sclk(struct pp_hwmgr * hwmgr,bool low)2687 static int smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
2688 {
2689 	struct pp_power_state  *ps;
2690 	struct smu7_power_state  *smu7_ps;
2691 
2692 	if (hwmgr == NULL)
2693 		return -EINVAL;
2694 
2695 	ps = hwmgr->request_ps;
2696 
2697 	if (ps == NULL)
2698 		return -EINVAL;
2699 
2700 	smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
2701 
2702 	if (low)
2703 		return smu7_ps->performance_levels[0].engine_clock;
2704 	else
2705 		return smu7_ps->performance_levels
2706 				[smu7_ps->performance_level_count-1].engine_clock;
2707 }
2708 
smu7_dpm_patch_boot_state(struct pp_hwmgr * hwmgr,struct pp_hw_power_state * hw_ps)2709 static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
2710 					struct pp_hw_power_state *hw_ps)
2711 {
2712 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2713 	struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
2714 	ATOM_FIRMWARE_INFO_V2_2 *fw_info;
2715 	uint16_t size;
2716 	uint8_t frev, crev;
2717 	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
2718 
2719 	/* First retrieve the Boot clocks and VDDC from the firmware info table.
2720 	 * We assume here that fw_info is unchanged if this call fails.
2721 	 */
2722 	fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
2723 			hwmgr->device, index,
2724 			&size, &frev, &crev);
2725 	if (!fw_info)
2726 		/* During a test, there is no firmware info table. */
2727 		return 0;
2728 
2729 	/* Patch the state. */
2730 	data->vbios_boot_state.sclk_bootup_value =
2731 			le32_to_cpu(fw_info->ulDefaultEngineClock);
2732 	data->vbios_boot_state.mclk_bootup_value =
2733 			le32_to_cpu(fw_info->ulDefaultMemoryClock);
2734 	data->vbios_boot_state.mvdd_bootup_value =
2735 			le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
2736 	data->vbios_boot_state.vddc_bootup_value =
2737 			le16_to_cpu(fw_info->usBootUpVDDCVoltage);
2738 	data->vbios_boot_state.vddci_bootup_value =
2739 			le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
2740 	data->vbios_boot_state.pcie_gen_bootup_value =
2741 			smu7_get_current_pcie_speed(hwmgr);
2742 
2743 	data->vbios_boot_state.pcie_lane_bootup_value =
2744 			(uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
2745 
2746 	/* set boot power state */
2747 	ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
2748 	ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
2749 	ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
2750 	ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
2751 
2752 	return 0;
2753 }
2754 
smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr * hwmgr)2755 static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
2756 {
2757 	int result;
2758 	unsigned long ret = 0;
2759 
2760 	if (hwmgr->pp_table_version == PP_TABLE_V0) {
2761 		result = pp_tables_get_num_of_entries(hwmgr, &ret);
2762 		return result ? 0 : ret;
2763 	} else if (hwmgr->pp_table_version == PP_TABLE_V1) {
2764 		result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
2765 		return result;
2766 	}
2767 	return 0;
2768 }
2769 
smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr * hwmgr,void * state,struct pp_power_state * power_state,void * pp_table,uint32_t classification_flag)2770 static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
2771 		void *state, struct pp_power_state *power_state,
2772 		void *pp_table, uint32_t classification_flag)
2773 {
2774 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2775 	struct smu7_power_state  *smu7_power_state =
2776 			(struct smu7_power_state *)(&(power_state->hardware));
2777 	struct smu7_performance_level *performance_level;
2778 	ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
2779 	ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
2780 			(ATOM_Tonga_POWERPLAYTABLE *)pp_table;
2781 	PPTable_Generic_SubTable_Header *sclk_dep_table =
2782 			(PPTable_Generic_SubTable_Header *)
2783 			(((unsigned long)powerplay_table) +
2784 				le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
2785 
2786 	ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
2787 			(ATOM_Tonga_MCLK_Dependency_Table *)
2788 			(((unsigned long)powerplay_table) +
2789 				le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
2790 
2791 	/* The following fields are not initialized here: id orderedList allStatesList */
2792 	power_state->classification.ui_label =
2793 			(le16_to_cpu(state_entry->usClassification) &
2794 			ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
2795 			ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
2796 	power_state->classification.flags = classification_flag;
2797 	/* NOTE: There is a classification2 flag in BIOS that is not being used right now */
2798 
2799 	power_state->classification.temporary_state = false;
2800 	power_state->classification.to_be_deleted = false;
2801 
2802 	power_state->validation.disallowOnDC =
2803 			(0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
2804 					ATOM_Tonga_DISALLOW_ON_DC));
2805 
2806 	power_state->pcie.lanes = 0;
2807 
2808 	power_state->display.disableFrameModulation = false;
2809 	power_state->display.limitRefreshrate = false;
2810 	power_state->display.enableVariBright =
2811 			(0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
2812 					ATOM_Tonga_ENABLE_VARIBRIGHT));
2813 
2814 	power_state->validation.supportedPowerLevels = 0;
2815 	power_state->uvd_clocks.VCLK = 0;
2816 	power_state->uvd_clocks.DCLK = 0;
2817 	power_state->temperatures.min = 0;
2818 	power_state->temperatures.max = 0;
2819 
2820 	performance_level = &(smu7_power_state->performance_levels
2821 			[smu7_power_state->performance_level_count++]);
2822 
2823 	PP_ASSERT_WITH_CODE(
2824 			(smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
2825 			"Performance levels exceeds SMC limit!",
2826 			return -EINVAL);
2827 
2828 	PP_ASSERT_WITH_CODE(
2829 			(smu7_power_state->performance_level_count <=
2830 					hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
2831 			"Performance levels exceeds Driver limit!",
2832 			return -EINVAL);
2833 
2834 	/* Performance levels are arranged from low to high. */
2835 	performance_level->memory_clock = mclk_dep_table->entries
2836 			[state_entry->ucMemoryClockIndexLow].ulMclk;
2837 	if (sclk_dep_table->ucRevId == 0)
2838 		performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
2839 			[state_entry->ucEngineClockIndexLow].ulSclk;
2840 	else if (sclk_dep_table->ucRevId == 1)
2841 		performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
2842 			[state_entry->ucEngineClockIndexLow].ulSclk;
2843 	performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
2844 			state_entry->ucPCIEGenLow);
2845 	performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
2846 			state_entry->ucPCIELaneHigh);
2847 
2848 	performance_level = &(smu7_power_state->performance_levels
2849 			[smu7_power_state->performance_level_count++]);
2850 	performance_level->memory_clock = mclk_dep_table->entries
2851 			[state_entry->ucMemoryClockIndexHigh].ulMclk;
2852 
2853 	if (sclk_dep_table->ucRevId == 0)
2854 		performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
2855 			[state_entry->ucEngineClockIndexHigh].ulSclk;
2856 	else if (sclk_dep_table->ucRevId == 1)
2857 		performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
2858 			[state_entry->ucEngineClockIndexHigh].ulSclk;
2859 
2860 	performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
2861 			state_entry->ucPCIEGenHigh);
2862 	performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
2863 			state_entry->ucPCIELaneHigh);
2864 
2865 	return 0;
2866 }
2867 
smu7_get_pp_table_entry_v1(struct pp_hwmgr * hwmgr,unsigned long entry_index,struct pp_power_state * state)2868 static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
2869 		unsigned long entry_index, struct pp_power_state *state)
2870 {
2871 	int result;
2872 	struct smu7_power_state *ps;
2873 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2874 	struct phm_ppt_v1_information *table_info =
2875 			(struct phm_ppt_v1_information *)(hwmgr->pptable);
2876 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
2877 			table_info->vdd_dep_on_mclk;
2878 
2879 	state->hardware.magic = PHM_VIslands_Magic;
2880 
2881 	ps = (struct smu7_power_state *)(&state->hardware);
2882 
2883 	result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
2884 			smu7_get_pp_table_entry_callback_func_v1);
2885 
2886 	/* This is the earliest time we have all the dependency table and the VBIOS boot state
2887 	 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
2888 	 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
2889 	 */
2890 	if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
2891 		if (dep_mclk_table->entries[0].clk !=
2892 				data->vbios_boot_state.mclk_bootup_value)
2893 			printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
2894 					"does not match VBIOS boot MCLK level");
2895 		if (dep_mclk_table->entries[0].vddci !=
2896 				data->vbios_boot_state.vddci_bootup_value)
2897 			printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
2898 					"does not match VBIOS boot VDDCI level");
2899 	}
2900 
2901 	/* set DC compatible flag if this state supports DC */
2902 	if (!state->validation.disallowOnDC)
2903 		ps->dc_compatible = true;
2904 
2905 	if (state->classification.flags & PP_StateClassificationFlag_ACPI)
2906 		data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
2907 
2908 	ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
2909 	ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
2910 
2911 	if (!result) {
2912 		uint32_t i;
2913 
2914 		switch (state->classification.ui_label) {
2915 		case PP_StateUILabel_Performance:
2916 			data->use_pcie_performance_levels = true;
2917 			for (i = 0; i < ps->performance_level_count; i++) {
2918 				if (data->pcie_gen_performance.max <
2919 						ps->performance_levels[i].pcie_gen)
2920 					data->pcie_gen_performance.max =
2921 							ps->performance_levels[i].pcie_gen;
2922 
2923 				if (data->pcie_gen_performance.min >
2924 						ps->performance_levels[i].pcie_gen)
2925 					data->pcie_gen_performance.min =
2926 							ps->performance_levels[i].pcie_gen;
2927 
2928 				if (data->pcie_lane_performance.max <
2929 						ps->performance_levels[i].pcie_lane)
2930 					data->pcie_lane_performance.max =
2931 							ps->performance_levels[i].pcie_lane;
2932 				if (data->pcie_lane_performance.min >
2933 						ps->performance_levels[i].pcie_lane)
2934 					data->pcie_lane_performance.min =
2935 							ps->performance_levels[i].pcie_lane;
2936 			}
2937 			break;
2938 		case PP_StateUILabel_Battery:
2939 			data->use_pcie_power_saving_levels = true;
2940 
2941 			for (i = 0; i < ps->performance_level_count; i++) {
2942 				if (data->pcie_gen_power_saving.max <
2943 						ps->performance_levels[i].pcie_gen)
2944 					data->pcie_gen_power_saving.max =
2945 							ps->performance_levels[i].pcie_gen;
2946 
2947 				if (data->pcie_gen_power_saving.min >
2948 						ps->performance_levels[i].pcie_gen)
2949 					data->pcie_gen_power_saving.min =
2950 							ps->performance_levels[i].pcie_gen;
2951 
2952 				if (data->pcie_lane_power_saving.max <
2953 						ps->performance_levels[i].pcie_lane)
2954 					data->pcie_lane_power_saving.max =
2955 							ps->performance_levels[i].pcie_lane;
2956 
2957 				if (data->pcie_lane_power_saving.min >
2958 						ps->performance_levels[i].pcie_lane)
2959 					data->pcie_lane_power_saving.min =
2960 							ps->performance_levels[i].pcie_lane;
2961 			}
2962 			break;
2963 		default:
2964 			break;
2965 		}
2966 	}
2967 	return 0;
2968 }
2969 
smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr * hwmgr,struct pp_hw_power_state * power_state,unsigned int index,const void * clock_info)2970 static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
2971 					struct pp_hw_power_state *power_state,
2972 					unsigned int index, const void *clock_info)
2973 {
2974 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2975 	struct smu7_power_state  *ps = cast_phw_smu7_power_state(power_state);
2976 	const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
2977 	struct smu7_performance_level *performance_level;
2978 	uint32_t engine_clock, memory_clock;
2979 	uint16_t pcie_gen_from_bios;
2980 
2981 	engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
2982 	memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
2983 
2984 	if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
2985 		data->highest_mclk = memory_clock;
2986 
2987 	PP_ASSERT_WITH_CODE(
2988 			(ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
2989 			"Performance levels exceeds SMC limit!",
2990 			return -EINVAL);
2991 
2992 	PP_ASSERT_WITH_CODE(
2993 			(ps->performance_level_count <
2994 					hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
2995 			"Performance levels exceeds Driver limit, Skip!",
2996 			return 0);
2997 
2998 	performance_level = &(ps->performance_levels
2999 			[ps->performance_level_count++]);
3000 
3001 	/* Performance levels are arranged from low to high. */
3002 	performance_level->memory_clock = memory_clock;
3003 	performance_level->engine_clock = engine_clock;
3004 
3005 	pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
3006 
3007 	performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
3008 	performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
3009 
3010 	return 0;
3011 }
3012 
smu7_get_pp_table_entry_v0(struct pp_hwmgr * hwmgr,unsigned long entry_index,struct pp_power_state * state)3013 static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
3014 		unsigned long entry_index, struct pp_power_state *state)
3015 {
3016 	int result;
3017 	struct smu7_power_state *ps;
3018 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3019 	struct phm_clock_voltage_dependency_table *dep_mclk_table =
3020 			hwmgr->dyn_state.vddci_dependency_on_mclk;
3021 
3022 	memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
3023 
3024 	state->hardware.magic = PHM_VIslands_Magic;
3025 
3026 	ps = (struct smu7_power_state *)(&state->hardware);
3027 
3028 	result = pp_tables_get_entry(hwmgr, entry_index, state,
3029 			smu7_get_pp_table_entry_callback_func_v0);
3030 
3031 	/*
3032 	 * This is the earliest time we have all the dependency table
3033 	 * and the VBIOS boot state as
3034 	 * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
3035 	 * state if there is only one VDDCI/MCLK level, check if it's
3036 	 * the same as VBIOS boot state
3037 	 */
3038 	if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3039 		if (dep_mclk_table->entries[0].clk !=
3040 				data->vbios_boot_state.mclk_bootup_value)
3041 			printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
3042 					"does not match VBIOS boot MCLK level");
3043 		if (dep_mclk_table->entries[0].v !=
3044 				data->vbios_boot_state.vddci_bootup_value)
3045 			printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
3046 					"does not match VBIOS boot VDDCI level");
3047 	}
3048 
3049 	/* set DC compatible flag if this state supports DC */
3050 	if (!state->validation.disallowOnDC)
3051 		ps->dc_compatible = true;
3052 
3053 	if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3054 		data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3055 
3056 	ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3057 	ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3058 
3059 	if (!result) {
3060 		uint32_t i;
3061 
3062 		switch (state->classification.ui_label) {
3063 		case PP_StateUILabel_Performance:
3064 			data->use_pcie_performance_levels = true;
3065 
3066 			for (i = 0; i < ps->performance_level_count; i++) {
3067 				if (data->pcie_gen_performance.max <
3068 						ps->performance_levels[i].pcie_gen)
3069 					data->pcie_gen_performance.max =
3070 							ps->performance_levels[i].pcie_gen;
3071 
3072 				if (data->pcie_gen_performance.min >
3073 						ps->performance_levels[i].pcie_gen)
3074 					data->pcie_gen_performance.min =
3075 							ps->performance_levels[i].pcie_gen;
3076 
3077 				if (data->pcie_lane_performance.max <
3078 						ps->performance_levels[i].pcie_lane)
3079 					data->pcie_lane_performance.max =
3080 							ps->performance_levels[i].pcie_lane;
3081 
3082 				if (data->pcie_lane_performance.min >
3083 						ps->performance_levels[i].pcie_lane)
3084 					data->pcie_lane_performance.min =
3085 							ps->performance_levels[i].pcie_lane;
3086 			}
3087 			break;
3088 		case PP_StateUILabel_Battery:
3089 			data->use_pcie_power_saving_levels = true;
3090 
3091 			for (i = 0; i < ps->performance_level_count; i++) {
3092 				if (data->pcie_gen_power_saving.max <
3093 						ps->performance_levels[i].pcie_gen)
3094 					data->pcie_gen_power_saving.max =
3095 							ps->performance_levels[i].pcie_gen;
3096 
3097 				if (data->pcie_gen_power_saving.min >
3098 						ps->performance_levels[i].pcie_gen)
3099 					data->pcie_gen_power_saving.min =
3100 							ps->performance_levels[i].pcie_gen;
3101 
3102 				if (data->pcie_lane_power_saving.max <
3103 						ps->performance_levels[i].pcie_lane)
3104 					data->pcie_lane_power_saving.max =
3105 							ps->performance_levels[i].pcie_lane;
3106 
3107 				if (data->pcie_lane_power_saving.min >
3108 						ps->performance_levels[i].pcie_lane)
3109 					data->pcie_lane_power_saving.min =
3110 							ps->performance_levels[i].pcie_lane;
3111 			}
3112 			break;
3113 		default:
3114 			break;
3115 		}
3116 	}
3117 	return 0;
3118 }
3119 
smu7_get_pp_table_entry(struct pp_hwmgr * hwmgr,unsigned long entry_index,struct pp_power_state * state)3120 static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3121 		unsigned long entry_index, struct pp_power_state *state)
3122 {
3123 	if (hwmgr->pp_table_version == PP_TABLE_V0)
3124 		return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
3125 	else if (hwmgr->pp_table_version == PP_TABLE_V1)
3126 		return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
3127 
3128 	return 0;
3129 }
3130 
smu7_read_sensor(struct pp_hwmgr * hwmgr,int idx,int32_t * value)3131 static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
3132 {
3133 	uint32_t sclk, mclk, activity_percent;
3134 	uint32_t offset;
3135 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3136 
3137 	switch (idx) {
3138 	case AMDGPU_PP_SENSOR_GFX_SCLK:
3139 		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
3140 		sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3141 		*value = sclk;
3142 		return 0;
3143 	case AMDGPU_PP_SENSOR_GFX_MCLK:
3144 		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
3145 		mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3146 		*value = mclk;
3147 		return 0;
3148 	case AMDGPU_PP_SENSOR_GPU_LOAD:
3149 		offset = data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
3150 								SMU_SoftRegisters,
3151 								AverageGraphicsActivity);
3152 
3153 		activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
3154 		activity_percent += 0x80;
3155 		activity_percent >>= 8;
3156 		*value = activity_percent > 100 ? 100 : activity_percent;
3157 		return 0;
3158 	case AMDGPU_PP_SENSOR_GPU_TEMP:
3159 		*value = smu7_thermal_get_temperature(hwmgr);
3160 		return 0;
3161 	case AMDGPU_PP_SENSOR_UVD_POWER:
3162 		*value = data->uvd_power_gated ? 0 : 1;
3163 		return 0;
3164 	case AMDGPU_PP_SENSOR_VCE_POWER:
3165 		*value = data->vce_power_gated ? 0 : 1;
3166 		return 0;
3167 	default:
3168 		return -EINVAL;
3169 	}
3170 }
3171 
smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr * hwmgr,const void * input)3172 static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3173 {
3174 	const struct phm_set_power_state_input *states =
3175 			(const struct phm_set_power_state_input *)input;
3176 	const struct smu7_power_state *smu7_ps =
3177 			cast_const_phw_smu7_power_state(states->pnew_state);
3178 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3179 	struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
3180 	uint32_t sclk = smu7_ps->performance_levels
3181 			[smu7_ps->performance_level_count - 1].engine_clock;
3182 	struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
3183 	uint32_t mclk = smu7_ps->performance_levels
3184 			[smu7_ps->performance_level_count - 1].memory_clock;
3185 	struct PP_Clocks min_clocks = {0};
3186 	uint32_t i;
3187 	struct cgs_display_info info = {0};
3188 
3189 	data->need_update_smu7_dpm_table = 0;
3190 
3191 	for (i = 0; i < sclk_table->count; i++) {
3192 		if (sclk == sclk_table->dpm_levels[i].value)
3193 			break;
3194 	}
3195 
3196 	if (i >= sclk_table->count)
3197 		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3198 	else {
3199 	/* TODO: Check SCLK in DAL's minimum clocks
3200 	 * in case DeepSleep divider update is required.
3201 	 */
3202 		if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
3203 			(min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
3204 				data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
3205 			data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3206 	}
3207 
3208 	for (i = 0; i < mclk_table->count; i++) {
3209 		if (mclk == mclk_table->dpm_levels[i].value)
3210 			break;
3211 	}
3212 
3213 	if (i >= mclk_table->count)
3214 		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3215 
3216 	cgs_get_active_displays_info(hwmgr->device, &info);
3217 
3218 	if (data->display_timing.num_existing_displays != info.display_count)
3219 		data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3220 
3221 	return 0;
3222 }
3223 
smu7_get_maximum_link_speed(struct pp_hwmgr * hwmgr,const struct smu7_power_state * smu7_ps)3224 static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
3225 		const struct smu7_power_state *smu7_ps)
3226 {
3227 	uint32_t i;
3228 	uint32_t sclk, max_sclk = 0;
3229 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3230 	struct smu7_dpm_table *dpm_table = &data->dpm_table;
3231 
3232 	for (i = 0; i < smu7_ps->performance_level_count; i++) {
3233 		sclk = smu7_ps->performance_levels[i].engine_clock;
3234 		if (max_sclk < sclk)
3235 			max_sclk = sclk;
3236 	}
3237 
3238 	for (i = 0; i < dpm_table->sclk_table.count; i++) {
3239 		if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
3240 			return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
3241 					dpm_table->pcie_speed_table.dpm_levels
3242 					[dpm_table->pcie_speed_table.count - 1].value :
3243 					dpm_table->pcie_speed_table.dpm_levels[i].value);
3244 	}
3245 
3246 	return 0;
3247 }
3248 
smu7_request_link_speed_change_before_state_change(struct pp_hwmgr * hwmgr,const void * input)3249 static int smu7_request_link_speed_change_before_state_change(
3250 		struct pp_hwmgr *hwmgr, const void *input)
3251 {
3252 	const struct phm_set_power_state_input *states =
3253 			(const struct phm_set_power_state_input *)input;
3254 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3255 	const struct smu7_power_state *smu7_nps =
3256 			cast_const_phw_smu7_power_state(states->pnew_state);
3257 	const struct smu7_power_state *polaris10_cps =
3258 			cast_const_phw_smu7_power_state(states->pcurrent_state);
3259 
3260 	uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
3261 	uint16_t current_link_speed;
3262 
3263 	if (data->force_pcie_gen == PP_PCIEGenInvalid)
3264 		current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
3265 	else
3266 		current_link_speed = data->force_pcie_gen;
3267 
3268 	data->force_pcie_gen = PP_PCIEGenInvalid;
3269 	data->pspp_notify_required = false;
3270 
3271 	if (target_link_speed > current_link_speed) {
3272 		switch (target_link_speed) {
3273 		case PP_PCIEGen3:
3274 			if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
3275 				break;
3276 			data->force_pcie_gen = PP_PCIEGen2;
3277 			if (current_link_speed == PP_PCIEGen2)
3278 				break;
3279 		case PP_PCIEGen2:
3280 			if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
3281 				break;
3282 		default:
3283 			data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
3284 			break;
3285 		}
3286 	} else {
3287 		if (target_link_speed < current_link_speed)
3288 			data->pspp_notify_required = true;
3289 	}
3290 
3291 	return 0;
3292 }
3293 
smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr * hwmgr)3294 static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3295 {
3296 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3297 
3298 	if (0 == data->need_update_smu7_dpm_table)
3299 		return 0;
3300 
3301 	if ((0 == data->sclk_dpm_key_disabled) &&
3302 		(data->need_update_smu7_dpm_table &
3303 			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3304 		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3305 				"Trying to freeze SCLK DPM when DPM is disabled",
3306 				);
3307 		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
3308 				PPSMC_MSG_SCLKDPM_FreezeLevel),
3309 				"Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
3310 				return -EINVAL);
3311 	}
3312 
3313 	if ((0 == data->mclk_dpm_key_disabled) &&
3314 		(data->need_update_smu7_dpm_table &
3315 		 DPMTABLE_OD_UPDATE_MCLK)) {
3316 		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3317 				"Trying to freeze MCLK DPM when DPM is disabled",
3318 				);
3319 		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
3320 				PPSMC_MSG_MCLKDPM_FreezeLevel),
3321 				"Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
3322 				return -EINVAL);
3323 	}
3324 
3325 	return 0;
3326 }
3327 
smu7_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr * hwmgr,const void * input)3328 static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
3329 		struct pp_hwmgr *hwmgr, const void *input)
3330 {
3331 	int result = 0;
3332 	const struct phm_set_power_state_input *states =
3333 			(const struct phm_set_power_state_input *)input;
3334 	const struct smu7_power_state *smu7_ps =
3335 			cast_const_phw_smu7_power_state(states->pnew_state);
3336 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3337 	uint32_t sclk = smu7_ps->performance_levels
3338 			[smu7_ps->performance_level_count - 1].engine_clock;
3339 	uint32_t mclk = smu7_ps->performance_levels
3340 			[smu7_ps->performance_level_count - 1].memory_clock;
3341 	struct smu7_dpm_table *dpm_table = &data->dpm_table;
3342 
3343 	struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
3344 	uint32_t dpm_count, clock_percent;
3345 	uint32_t i;
3346 
3347 	if (0 == data->need_update_smu7_dpm_table)
3348 		return 0;
3349 
3350 	if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
3351 		dpm_table->sclk_table.dpm_levels
3352 		[dpm_table->sclk_table.count - 1].value = sclk;
3353 
3354 		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
3355 		    phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
3356 		/* Need to do calculation based on the golden DPM table
3357 		 * as the Heatmap GPU Clock axis is also based on the default values
3358 		 */
3359 			PP_ASSERT_WITH_CODE(
3360 				(golden_dpm_table->sclk_table.dpm_levels
3361 						[golden_dpm_table->sclk_table.count - 1].value != 0),
3362 				"Divide by 0!",
3363 				return -EINVAL);
3364 			dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2;
3365 
3366 			for (i = dpm_count; i > 1; i--) {
3367 				if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) {
3368 					clock_percent =
3369 					      ((sclk
3370 						- golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value
3371 						) * 100)
3372 						/ golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
3373 
3374 					dpm_table->sclk_table.dpm_levels[i].value =
3375 							golden_dpm_table->sclk_table.dpm_levels[i].value +
3376 							(golden_dpm_table->sclk_table.dpm_levels[i].value *
3377 								clock_percent)/100;
3378 
3379 				} else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) {
3380 					clock_percent =
3381 						((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value
3382 						- sclk) * 100)
3383 						/ golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
3384 
3385 					dpm_table->sclk_table.dpm_levels[i].value =
3386 							golden_dpm_table->sclk_table.dpm_levels[i].value -
3387 							(golden_dpm_table->sclk_table.dpm_levels[i].value *
3388 									clock_percent) / 100;
3389 				} else
3390 					dpm_table->sclk_table.dpm_levels[i].value =
3391 							golden_dpm_table->sclk_table.dpm_levels[i].value;
3392 			}
3393 		}
3394 	}
3395 
3396 	if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
3397 		dpm_table->mclk_table.dpm_levels
3398 			[dpm_table->mclk_table.count - 1].value = mclk;
3399 
3400 		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
3401 		    phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
3402 
3403 			PP_ASSERT_WITH_CODE(
3404 					(golden_dpm_table->mclk_table.dpm_levels
3405 						[golden_dpm_table->mclk_table.count-1].value != 0),
3406 					"Divide by 0!",
3407 					return -EINVAL);
3408 			dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2;
3409 			for (i = dpm_count; i > 1; i--) {
3410 				if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) {
3411 					clock_percent = ((mclk -
3412 					golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100)
3413 					/ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
3414 
3415 					dpm_table->mclk_table.dpm_levels[i].value =
3416 							golden_dpm_table->mclk_table.dpm_levels[i].value +
3417 							(golden_dpm_table->mclk_table.dpm_levels[i].value *
3418 							clock_percent) / 100;
3419 
3420 				} else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) {
3421 					clock_percent = (
3422 					 (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk)
3423 					* 100)
3424 					/ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
3425 
3426 					dpm_table->mclk_table.dpm_levels[i].value =
3427 							golden_dpm_table->mclk_table.dpm_levels[i].value -
3428 							(golden_dpm_table->mclk_table.dpm_levels[i].value *
3429 									clock_percent) / 100;
3430 				} else
3431 					dpm_table->mclk_table.dpm_levels[i].value =
3432 							golden_dpm_table->mclk_table.dpm_levels[i].value;
3433 			}
3434 		}
3435 	}
3436 
3437 	if (data->need_update_smu7_dpm_table &
3438 			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
3439 		result = smum_populate_all_graphic_levels(hwmgr);
3440 		PP_ASSERT_WITH_CODE((0 == result),
3441 				"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3442 				return result);
3443 	}
3444 
3445 	if (data->need_update_smu7_dpm_table &
3446 			(DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3447 		/*populate MCLK dpm table to SMU7 */
3448 		result = smum_populate_all_memory_levels(hwmgr);
3449 		PP_ASSERT_WITH_CODE((0 == result),
3450 				"Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3451 				return result);
3452 	}
3453 
3454 	return result;
3455 }
3456 
smu7_trim_single_dpm_states(struct pp_hwmgr * hwmgr,struct smu7_single_dpm_table * dpm_table,uint32_t low_limit,uint32_t high_limit)3457 static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3458 			  struct smu7_single_dpm_table *dpm_table,
3459 			uint32_t low_limit, uint32_t high_limit)
3460 {
3461 	uint32_t i;
3462 
3463 	for (i = 0; i < dpm_table->count; i++) {
3464 		if ((dpm_table->dpm_levels[i].value < low_limit)
3465 		|| (dpm_table->dpm_levels[i].value > high_limit))
3466 			dpm_table->dpm_levels[i].enabled = false;
3467 		else
3468 			dpm_table->dpm_levels[i].enabled = true;
3469 	}
3470 
3471 	return 0;
3472 }
3473 
smu7_trim_dpm_states(struct pp_hwmgr * hwmgr,const struct smu7_power_state * smu7_ps)3474 static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
3475 		const struct smu7_power_state *smu7_ps)
3476 {
3477 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3478 	uint32_t high_limit_count;
3479 
3480 	PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
3481 			"power state did not have any performance level",
3482 			return -EINVAL);
3483 
3484 	high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
3485 
3486 	smu7_trim_single_dpm_states(hwmgr,
3487 			&(data->dpm_table.sclk_table),
3488 			smu7_ps->performance_levels[0].engine_clock,
3489 			smu7_ps->performance_levels[high_limit_count].engine_clock);
3490 
3491 	smu7_trim_single_dpm_states(hwmgr,
3492 			&(data->dpm_table.mclk_table),
3493 			smu7_ps->performance_levels[0].memory_clock,
3494 			smu7_ps->performance_levels[high_limit_count].memory_clock);
3495 
3496 	return 0;
3497 }
3498 
smu7_generate_dpm_level_enable_mask(struct pp_hwmgr * hwmgr,const void * input)3499 static int smu7_generate_dpm_level_enable_mask(
3500 		struct pp_hwmgr *hwmgr, const void *input)
3501 {
3502 	int result;
3503 	const struct phm_set_power_state_input *states =
3504 			(const struct phm_set_power_state_input *)input;
3505 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3506 	const struct smu7_power_state *smu7_ps =
3507 			cast_const_phw_smu7_power_state(states->pnew_state);
3508 
3509 	result = smu7_trim_dpm_states(hwmgr, smu7_ps);
3510 	if (result)
3511 		return result;
3512 
3513 	data->dpm_level_enable_mask.sclk_dpm_enable_mask =
3514 			phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
3515 	data->dpm_level_enable_mask.mclk_dpm_enable_mask =
3516 			phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
3517 	data->dpm_level_enable_mask.pcie_dpm_enable_mask =
3518 			phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
3519 
3520 	return 0;
3521 }
3522 
smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr * hwmgr)3523 static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3524 {
3525 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3526 
3527 	if (0 == data->need_update_smu7_dpm_table)
3528 		return 0;
3529 
3530 	if ((0 == data->sclk_dpm_key_disabled) &&
3531 		(data->need_update_smu7_dpm_table &
3532 		(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3533 
3534 		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3535 				"Trying to Unfreeze SCLK DPM when DPM is disabled",
3536 				);
3537 		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
3538 				PPSMC_MSG_SCLKDPM_UnfreezeLevel),
3539 			"Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
3540 			return -EINVAL);
3541 	}
3542 
3543 	if ((0 == data->mclk_dpm_key_disabled) &&
3544 		(data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
3545 
3546 		PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3547 				"Trying to Unfreeze MCLK DPM when DPM is disabled",
3548 				);
3549 		PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
3550 				PPSMC_MSG_SCLKDPM_UnfreezeLevel),
3551 		    "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
3552 		    return -EINVAL);
3553 	}
3554 
3555 	data->need_update_smu7_dpm_table = 0;
3556 
3557 	return 0;
3558 }
3559 
smu7_notify_link_speed_change_after_state_change(struct pp_hwmgr * hwmgr,const void * input)3560 static int smu7_notify_link_speed_change_after_state_change(
3561 		struct pp_hwmgr *hwmgr, const void *input)
3562 {
3563 	const struct phm_set_power_state_input *states =
3564 			(const struct phm_set_power_state_input *)input;
3565 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3566 	const struct smu7_power_state *smu7_ps =
3567 			cast_const_phw_smu7_power_state(states->pnew_state);
3568 	uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
3569 	uint8_t  request;
3570 
3571 	if (data->pspp_notify_required) {
3572 		if (target_link_speed == PP_PCIEGen3)
3573 			request = PCIE_PERF_REQ_GEN3;
3574 		else if (target_link_speed == PP_PCIEGen2)
3575 			request = PCIE_PERF_REQ_GEN2;
3576 		else
3577 			request = PCIE_PERF_REQ_GEN1;
3578 
3579 		if (request == PCIE_PERF_REQ_GEN1 &&
3580 				smu7_get_current_pcie_speed(hwmgr) > 0)
3581 			return 0;
3582 
3583 		if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
3584 			if (PP_PCIEGen2 == target_link_speed)
3585 				printk("PSPP request to switch to Gen2 from Gen3 Failed!");
3586 			else
3587 				printk("PSPP request to switch to Gen1 from Gen2 Failed!");
3588 		}
3589 	}
3590 
3591 	return 0;
3592 }
3593 
smu7_notify_smc_display(struct pp_hwmgr * hwmgr)3594 static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
3595 {
3596 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3597 
3598 	if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK)
3599 		smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3600 			(PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
3601 	return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ?  0 : -EINVAL;
3602 }
3603 
smu7_set_power_state_tasks(struct pp_hwmgr * hwmgr,const void * input)3604 static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
3605 {
3606 	int tmp_result, result = 0;
3607 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3608 
3609 	tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3610 	PP_ASSERT_WITH_CODE((0 == tmp_result),
3611 			"Failed to find DPM states clocks in DPM table!",
3612 			result = tmp_result);
3613 
3614 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3615 			PHM_PlatformCaps_PCIEPerformanceRequest)) {
3616 		tmp_result =
3617 			smu7_request_link_speed_change_before_state_change(hwmgr, input);
3618 		PP_ASSERT_WITH_CODE((0 == tmp_result),
3619 				"Failed to request link speed change before state change!",
3620 				result = tmp_result);
3621 	}
3622 
3623 	tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
3624 	PP_ASSERT_WITH_CODE((0 == tmp_result),
3625 			"Failed to freeze SCLK MCLK DPM!", result = tmp_result);
3626 
3627 	tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3628 	PP_ASSERT_WITH_CODE((0 == tmp_result),
3629 			"Failed to populate and upload SCLK MCLK DPM levels!",
3630 			result = tmp_result);
3631 
3632 	tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
3633 	PP_ASSERT_WITH_CODE((0 == tmp_result),
3634 			"Failed to generate DPM level enabled mask!",
3635 			result = tmp_result);
3636 
3637 	tmp_result = smum_update_sclk_threshold(hwmgr);
3638 	PP_ASSERT_WITH_CODE((0 == tmp_result),
3639 			"Failed to update SCLK threshold!",
3640 			result = tmp_result);
3641 
3642 	tmp_result = smu7_notify_smc_display(hwmgr);
3643 	PP_ASSERT_WITH_CODE((0 == tmp_result),
3644 			"Failed to notify smc display settings!",
3645 			result = tmp_result);
3646 
3647 	tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
3648 	PP_ASSERT_WITH_CODE((0 == tmp_result),
3649 			"Failed to unfreeze SCLK MCLK DPM!",
3650 			result = tmp_result);
3651 
3652 	tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
3653 	PP_ASSERT_WITH_CODE((0 == tmp_result),
3654 			"Failed to upload DPM level enabled mask!",
3655 			result = tmp_result);
3656 
3657 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3658 			PHM_PlatformCaps_PCIEPerformanceRequest)) {
3659 		tmp_result =
3660 			smu7_notify_link_speed_change_after_state_change(hwmgr, input);
3661 		PP_ASSERT_WITH_CODE((0 == tmp_result),
3662 				"Failed to notify link speed change after state change!",
3663 				result = tmp_result);
3664 	}
3665 	data->apply_optimized_settings = false;
3666 	return result;
3667 }
3668 
smu7_set_max_fan_pwm_output(struct pp_hwmgr * hwmgr,uint16_t us_max_fan_pwm)3669 static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
3670 {
3671 	hwmgr->thermal_controller.
3672 	advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
3673 
3674 	if (phm_is_hw_access_blocked(hwmgr))
3675 		return 0;
3676 
3677 	return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3678 			PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
3679 }
3680 
smu7_notify_smc_display_change(struct pp_hwmgr * hwmgr,bool has_display)3681 int smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
3682 {
3683 	PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
3684 
3685 	return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ?  0 : -1;
3686 }
3687 
smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr * hwmgr)3688 int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
3689 {
3690 	uint32_t num_active_displays = 0;
3691 	struct cgs_display_info info = {0};
3692 
3693 	info.mode_info = NULL;
3694 	cgs_get_active_displays_info(hwmgr->device, &info);
3695 
3696 	num_active_displays = info.display_count;
3697 
3698 	if (num_active_displays > 1 && hwmgr->display_config.multi_monitor_in_sync != true)
3699 		smu7_notify_smc_display_change(hwmgr, false);
3700 
3701 	return 0;
3702 }
3703 
3704 /**
3705 * Programs the display gap
3706 *
3707 * @param    hwmgr  the address of the powerplay hardware manager.
3708 * @return   always OK
3709 */
smu7_program_display_gap(struct pp_hwmgr * hwmgr)3710 int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
3711 {
3712 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3713 	uint32_t num_active_displays = 0;
3714 	uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
3715 	uint32_t display_gap2;
3716 	uint32_t pre_vbi_time_in_us;
3717 	uint32_t frame_time_in_us;
3718 	uint32_t ref_clock;
3719 	uint32_t refresh_rate = 0;
3720 	struct cgs_display_info info = {0};
3721 	struct cgs_mode_info mode_info = {0};
3722 
3723 	info.mode_info = &mode_info;
3724 	cgs_get_active_displays_info(hwmgr->device, &info);
3725 	num_active_displays = info.display_count;
3726 
3727 	display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
3728 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
3729 
3730 	ref_clock = mode_info.ref_clock;
3731 	refresh_rate = mode_info.refresh_rate;
3732 
3733 	if (0 == refresh_rate)
3734 		refresh_rate = 60;
3735 
3736 	frame_time_in_us = 1000000 / refresh_rate;
3737 
3738 	pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
3739 
3740 	data->frame_time_x2 = frame_time_in_us * 2 / 100;
3741 
3742 	display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
3743 
3744 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
3745 
3746 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3747 			data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
3748 							SMU_SoftRegisters,
3749 							PreVBlankGap), 0x64);
3750 
3751 	cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3752 			data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
3753 							SMU_SoftRegisters,
3754 							VBlankTimeout),
3755 					(frame_time_in_us - pre_vbi_time_in_us));
3756 
3757 	return 0;
3758 }
3759 
smu7_display_configuration_changed_task(struct pp_hwmgr * hwmgr)3760 int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
3761 {
3762 	return smu7_program_display_gap(hwmgr);
3763 }
3764 
3765 /**
3766 *  Set maximum target operating fan output RPM
3767 *
3768 * @param    hwmgr:  the address of the powerplay hardware manager.
3769 * @param    usMaxFanRpm:  max operating fan RPM value.
3770 * @return   The response that came from the SMC.
3771 */
smu7_set_max_fan_rpm_output(struct pp_hwmgr * hwmgr,uint16_t us_max_fan_rpm)3772 static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
3773 {
3774 	hwmgr->thermal_controller.
3775 	advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
3776 
3777 	if (phm_is_hw_access_blocked(hwmgr))
3778 		return 0;
3779 
3780 	return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3781 			PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
3782 }
3783 
smu7_register_internal_thermal_interrupt(struct pp_hwmgr * hwmgr,const void * thermal_interrupt_info)3784 int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
3785 					const void *thermal_interrupt_info)
3786 {
3787 	return 0;
3788 }
3789 
smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr * hwmgr)3790 bool smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
3791 {
3792 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3793 	bool is_update_required = false;
3794 	struct cgs_display_info info = {0, 0, NULL};
3795 
3796 	cgs_get_active_displays_info(hwmgr->device, &info);
3797 
3798 	if (data->display_timing.num_existing_displays != info.display_count)
3799 		is_update_required = true;
3800 
3801 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
3802 		if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr &&
3803 			(data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
3804 			hwmgr->display_config.min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
3805 			is_update_required = true;
3806 	}
3807 	return is_update_required;
3808 }
3809 
smu7_are_power_levels_equal(const struct smu7_performance_level * pl1,const struct smu7_performance_level * pl2)3810 static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
3811 							   const struct smu7_performance_level *pl2)
3812 {
3813 	return ((pl1->memory_clock == pl2->memory_clock) &&
3814 		  (pl1->engine_clock == pl2->engine_clock) &&
3815 		  (pl1->pcie_gen == pl2->pcie_gen) &&
3816 		  (pl1->pcie_lane == pl2->pcie_lane));
3817 }
3818 
smu7_check_states_equal(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * pstate1,const struct pp_hw_power_state * pstate2,bool * equal)3819 int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
3820 {
3821 	const struct smu7_power_state *psa;
3822 	const struct smu7_power_state *psb;
3823 	int i;
3824 
3825 	if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
3826 		return -EINVAL;
3827 
3828 	psa = cast_const_phw_smu7_power_state(pstate1);
3829 	psb = cast_const_phw_smu7_power_state(pstate2);
3830 	/* If the two states don't even have the same number of performance levels they cannot be the same state. */
3831 	if (psa->performance_level_count != psb->performance_level_count) {
3832 		*equal = false;
3833 		return 0;
3834 	}
3835 
3836 	for (i = 0; i < psa->performance_level_count; i++) {
3837 		if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
3838 			/* If we have found even one performance level pair that is different the states are different. */
3839 			*equal = false;
3840 			return 0;
3841 		}
3842 	}
3843 
3844 	/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
3845 	*equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
3846 	*equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
3847 	*equal &= (psa->sclk_threshold == psb->sclk_threshold);
3848 
3849 	return 0;
3850 }
3851 
smu7_upload_mc_firmware(struct pp_hwmgr * hwmgr)3852 int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
3853 {
3854 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3855 
3856 	uint32_t vbios_version;
3857 	uint32_t tmp;
3858 
3859 	/* Read MC indirect register offset 0x9F bits [3:0] to see
3860 	 * if VBIOS has already loaded a full version of MC ucode
3861 	 * or not.
3862 	 */
3863 
3864 	smu7_get_mc_microcode_version(hwmgr);
3865 	vbios_version = hwmgr->microcode_version_info.MC & 0xf;
3866 
3867 	data->need_long_memory_training = false;
3868 
3869 	cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
3870 							ixMC_IO_DEBUG_UP_13);
3871 	tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
3872 
3873 	if (tmp & (1 << 23)) {
3874 		data->mem_latency_high = MEM_LATENCY_HIGH;
3875 		data->mem_latency_low = MEM_LATENCY_LOW;
3876 	} else {
3877 		data->mem_latency_high = 330;
3878 		data->mem_latency_low = 330;
3879 	}
3880 
3881 	return 0;
3882 }
3883 
smu7_read_clock_registers(struct pp_hwmgr * hwmgr)3884 static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
3885 {
3886 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3887 
3888 	data->clock_registers.vCG_SPLL_FUNC_CNTL         =
3889 		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
3890 	data->clock_registers.vCG_SPLL_FUNC_CNTL_2       =
3891 		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
3892 	data->clock_registers.vCG_SPLL_FUNC_CNTL_3       =
3893 		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
3894 	data->clock_registers.vCG_SPLL_FUNC_CNTL_4       =
3895 		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
3896 	data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM   =
3897 		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
3898 	data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
3899 		cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
3900 	data->clock_registers.vDLL_CNTL                  =
3901 		cgs_read_register(hwmgr->device, mmDLL_CNTL);
3902 	data->clock_registers.vMCLK_PWRMGT_CNTL          =
3903 		cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
3904 	data->clock_registers.vMPLL_AD_FUNC_CNTL         =
3905 		cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
3906 	data->clock_registers.vMPLL_DQ_FUNC_CNTL         =
3907 		cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
3908 	data->clock_registers.vMPLL_FUNC_CNTL            =
3909 		cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
3910 	data->clock_registers.vMPLL_FUNC_CNTL_1          =
3911 		cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
3912 	data->clock_registers.vMPLL_FUNC_CNTL_2          =
3913 		cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
3914 	data->clock_registers.vMPLL_SS1                  =
3915 		cgs_read_register(hwmgr->device, mmMPLL_SS1);
3916 	data->clock_registers.vMPLL_SS2                  =
3917 		cgs_read_register(hwmgr->device, mmMPLL_SS2);
3918 	return 0;
3919 
3920 }
3921 
3922 /**
3923  * Find out if memory is GDDR5.
3924  *
3925  * @param    hwmgr  the address of the powerplay hardware manager.
3926  * @return   always 0
3927  */
smu7_get_memory_type(struct pp_hwmgr * hwmgr)3928 static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
3929 {
3930 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3931 	uint32_t temp;
3932 
3933 	temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
3934 
3935 	data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
3936 			((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
3937 			 MC_SEQ_MISC0_GDDR5_SHIFT));
3938 
3939 	return 0;
3940 }
3941 
3942 /**
3943  * Enables Dynamic Power Management by SMC
3944  *
3945  * @param    hwmgr  the address of the powerplay hardware manager.
3946  * @return   always 0
3947  */
smu7_enable_acpi_power_management(struct pp_hwmgr * hwmgr)3948 static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
3949 {
3950 	PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3951 			GENERAL_PWRMGT, STATIC_PM_EN, 1);
3952 
3953 	return 0;
3954 }
3955 
3956 /**
3957  * Initialize PowerGating States for different engines
3958  *
3959  * @param    hwmgr  the address of the powerplay hardware manager.
3960  * @return   always 0
3961  */
smu7_init_power_gate_state(struct pp_hwmgr * hwmgr)3962 static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
3963 {
3964 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3965 
3966 	data->uvd_power_gated = false;
3967 	data->vce_power_gated = false;
3968 	data->samu_power_gated = false;
3969 
3970 	return 0;
3971 }
3972 
smu7_init_sclk_threshold(struct pp_hwmgr * hwmgr)3973 static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
3974 {
3975 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3976 
3977 	data->low_sclk_interrupt_threshold = 0;
3978 	return 0;
3979 }
3980 
smu7_setup_asic_task(struct pp_hwmgr * hwmgr)3981 int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
3982 {
3983 	int tmp_result, result = 0;
3984 
3985 	smu7_upload_mc_firmware(hwmgr);
3986 
3987 	tmp_result = smu7_read_clock_registers(hwmgr);
3988 	PP_ASSERT_WITH_CODE((0 == tmp_result),
3989 			"Failed to read clock registers!", result = tmp_result);
3990 
3991 	tmp_result = smu7_get_memory_type(hwmgr);
3992 	PP_ASSERT_WITH_CODE((0 == tmp_result),
3993 			"Failed to get memory type!", result = tmp_result);
3994 
3995 	tmp_result = smu7_enable_acpi_power_management(hwmgr);
3996 	PP_ASSERT_WITH_CODE((0 == tmp_result),
3997 			"Failed to enable ACPI power management!", result = tmp_result);
3998 
3999 	tmp_result = smu7_init_power_gate_state(hwmgr);
4000 	PP_ASSERT_WITH_CODE((0 == tmp_result),
4001 			"Failed to init power gate state!", result = tmp_result);
4002 
4003 	tmp_result = smu7_get_mc_microcode_version(hwmgr);
4004 	PP_ASSERT_WITH_CODE((0 == tmp_result),
4005 			"Failed to get MC microcode version!", result = tmp_result);
4006 
4007 	tmp_result = smu7_init_sclk_threshold(hwmgr);
4008 	PP_ASSERT_WITH_CODE((0 == tmp_result),
4009 			"Failed to init sclk threshold!", result = tmp_result);
4010 
4011 	return result;
4012 }
4013 
smu7_force_clock_level(struct pp_hwmgr * hwmgr,enum pp_clock_type type,uint32_t mask)4014 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
4015 		enum pp_clock_type type, uint32_t mask)
4016 {
4017 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4018 
4019 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
4020 		return -EINVAL;
4021 
4022 	switch (type) {
4023 	case PP_SCLK:
4024 		if (!data->sclk_dpm_key_disabled)
4025 			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4026 					PPSMC_MSG_SCLKDPM_SetEnabledMask,
4027 					data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
4028 		break;
4029 	case PP_MCLK:
4030 		if (!data->mclk_dpm_key_disabled)
4031 			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4032 					PPSMC_MSG_MCLKDPM_SetEnabledMask,
4033 					data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
4034 		break;
4035 	case PP_PCIE:
4036 	{
4037 		uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
4038 		uint32_t level = 0;
4039 
4040 		while (tmp >>= 1)
4041 			level++;
4042 
4043 		if (!data->pcie_dpm_key_disabled)
4044 			smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4045 					PPSMC_MSG_PCIeDPM_ForceLevel,
4046 					level);
4047 		break;
4048 	}
4049 	default:
4050 		break;
4051 	}
4052 
4053 	return 0;
4054 }
4055 
smu7_print_clock_levels(struct pp_hwmgr * hwmgr,enum pp_clock_type type,char * buf)4056 static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
4057 		enum pp_clock_type type, char *buf)
4058 {
4059 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4060 	struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4061 	struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4062 	struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
4063 	int i, now, size = 0;
4064 	uint32_t clock, pcie_speed;
4065 
4066 	switch (type) {
4067 	case PP_SCLK:
4068 		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
4069 		clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4070 
4071 		for (i = 0; i < sclk_table->count; i++) {
4072 			if (clock > sclk_table->dpm_levels[i].value)
4073 				continue;
4074 			break;
4075 		}
4076 		now = i;
4077 
4078 		for (i = 0; i < sclk_table->count; i++)
4079 			size += sprintf(buf + size, "%d: %uMhz %s\n",
4080 					i, sclk_table->dpm_levels[i].value / 100,
4081 					(i == now) ? "*" : "");
4082 		break;
4083 	case PP_MCLK:
4084 		smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
4085 		clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4086 
4087 		for (i = 0; i < mclk_table->count; i++) {
4088 			if (clock > mclk_table->dpm_levels[i].value)
4089 				continue;
4090 			break;
4091 		}
4092 		now = i;
4093 
4094 		for (i = 0; i < mclk_table->count; i++)
4095 			size += sprintf(buf + size, "%d: %uMhz %s\n",
4096 					i, mclk_table->dpm_levels[i].value / 100,
4097 					(i == now) ? "*" : "");
4098 		break;
4099 	case PP_PCIE:
4100 		pcie_speed = smu7_get_current_pcie_speed(hwmgr);
4101 		for (i = 0; i < pcie_table->count; i++) {
4102 			if (pcie_speed != pcie_table->dpm_levels[i].value)
4103 				continue;
4104 			break;
4105 		}
4106 		now = i;
4107 
4108 		for (i = 0; i < pcie_table->count; i++)
4109 			size += sprintf(buf + size, "%d: %s %s\n", i,
4110 					(pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
4111 					(pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
4112 					(pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
4113 					(i == now) ? "*" : "");
4114 		break;
4115 	default:
4116 		break;
4117 	}
4118 	return size;
4119 }
4120 
smu7_set_fan_control_mode(struct pp_hwmgr * hwmgr,uint32_t mode)4121 static int smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4122 {
4123 	if (mode) {
4124 		/* stop auto-manage */
4125 		if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4126 				PHM_PlatformCaps_MicrocodeFanControl))
4127 			smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
4128 		smu7_fan_ctrl_set_static_mode(hwmgr, mode);
4129 	} else
4130 		/* restart auto-manage */
4131 		smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
4132 
4133 	return 0;
4134 }
4135 
smu7_get_fan_control_mode(struct pp_hwmgr * hwmgr)4136 static int smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4137 {
4138 	if (hwmgr->fan_ctrl_is_in_default_mode)
4139 		return hwmgr->fan_ctrl_default_mode;
4140 	else
4141 		return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4142 				CG_FDO_CTRL2, FDO_PWM_MODE);
4143 }
4144 
smu7_get_sclk_od(struct pp_hwmgr * hwmgr)4145 static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
4146 {
4147 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4148 	struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4149 	struct smu7_single_dpm_table *golden_sclk_table =
4150 			&(data->golden_dpm_table.sclk_table);
4151 	int value;
4152 
4153 	value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
4154 			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
4155 			100 /
4156 			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4157 
4158 	return value;
4159 }
4160 
smu7_set_sclk_od(struct pp_hwmgr * hwmgr,uint32_t value)4161 static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4162 {
4163 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4164 	struct smu7_single_dpm_table *golden_sclk_table =
4165 			&(data->golden_dpm_table.sclk_table);
4166 	struct pp_power_state  *ps;
4167 	struct smu7_power_state  *smu7_ps;
4168 
4169 	if (value > 20)
4170 		value = 20;
4171 
4172 	ps = hwmgr->request_ps;
4173 
4174 	if (ps == NULL)
4175 		return -EINVAL;
4176 
4177 	smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4178 
4179 	smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
4180 			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
4181 			value / 100 +
4182 			golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4183 
4184 	return 0;
4185 }
4186 
smu7_get_mclk_od(struct pp_hwmgr * hwmgr)4187 static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
4188 {
4189 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4190 	struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4191 	struct smu7_single_dpm_table *golden_mclk_table =
4192 			&(data->golden_dpm_table.mclk_table);
4193 	int value;
4194 
4195 	value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
4196 			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
4197 			100 /
4198 			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4199 
4200 	return value;
4201 }
4202 
smu7_set_mclk_od(struct pp_hwmgr * hwmgr,uint32_t value)4203 static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4204 {
4205 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4206 	struct smu7_single_dpm_table *golden_mclk_table =
4207 			&(data->golden_dpm_table.mclk_table);
4208 	struct pp_power_state  *ps;
4209 	struct smu7_power_state  *smu7_ps;
4210 
4211 	if (value > 20)
4212 		value = 20;
4213 
4214 	ps = hwmgr->request_ps;
4215 
4216 	if (ps == NULL)
4217 		return -EINVAL;
4218 
4219 	smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4220 
4221 	smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
4222 			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
4223 			value / 100 +
4224 			golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4225 
4226 	return 0;
4227 }
4228 
4229 
smu7_get_sclks(struct pp_hwmgr * hwmgr,struct amd_pp_clocks * clocks)4230 static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4231 {
4232 	struct phm_ppt_v1_information *table_info =
4233 			(struct phm_ppt_v1_information *)hwmgr->pptable;
4234 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
4235 	struct phm_clock_voltage_dependency_table *sclk_table;
4236 	int i;
4237 
4238 	if (hwmgr->pp_table_version == PP_TABLE_V1) {
4239 		if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
4240 			return -EINVAL;
4241 		dep_sclk_table = table_info->vdd_dep_on_sclk;
4242 		for (i = 0; i < dep_sclk_table->count; i++) {
4243 			clocks->clock[i] = dep_sclk_table->entries[i].clk;
4244 			clocks->count++;
4245 		}
4246 	} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4247 		sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
4248 		for (i = 0; i < sclk_table->count; i++) {
4249 			clocks->clock[i] = sclk_table->entries[i].clk;
4250 			clocks->count++;
4251 		}
4252 	}
4253 
4254 	return 0;
4255 }
4256 
smu7_get_mem_latency(struct pp_hwmgr * hwmgr,uint32_t clk)4257 static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
4258 {
4259 	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4260 
4261 	if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
4262 		return data->mem_latency_high;
4263 	else if (clk >= MEM_FREQ_HIGH_LATENCY)
4264 		return data->mem_latency_low;
4265 	else
4266 		return MEM_LATENCY_ERR;
4267 }
4268 
smu7_get_mclks(struct pp_hwmgr * hwmgr,struct amd_pp_clocks * clocks)4269 static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4270 {
4271 	struct phm_ppt_v1_information *table_info =
4272 			(struct phm_ppt_v1_information *)hwmgr->pptable;
4273 	struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
4274 	int i;
4275 	struct phm_clock_voltage_dependency_table *mclk_table;
4276 
4277 	if (hwmgr->pp_table_version == PP_TABLE_V1) {
4278 		if (table_info == NULL)
4279 			return -EINVAL;
4280 		dep_mclk_table = table_info->vdd_dep_on_mclk;
4281 		for (i = 0; i < dep_mclk_table->count; i++) {
4282 			clocks->clock[i] = dep_mclk_table->entries[i].clk;
4283 			clocks->latency[i] = smu7_get_mem_latency(hwmgr,
4284 						dep_mclk_table->entries[i].clk);
4285 			clocks->count++;
4286 		}
4287 	} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4288 		mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
4289 		for (i = 0; i < mclk_table->count; i++) {
4290 			clocks->clock[i] = mclk_table->entries[i].clk;
4291 			clocks->count++;
4292 		}
4293 	}
4294 	return 0;
4295 }
4296 
smu7_get_clock_by_type(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct amd_pp_clocks * clocks)4297 static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
4298 						struct amd_pp_clocks *clocks)
4299 {
4300 	switch (type) {
4301 	case amd_pp_sys_clock:
4302 		smu7_get_sclks(hwmgr, clocks);
4303 		break;
4304 	case amd_pp_mem_clock:
4305 		smu7_get_mclks(hwmgr, clocks);
4306 		break;
4307 	default:
4308 		return -EINVAL;
4309 	}
4310 
4311 	return 0;
4312 }
4313 
4314 static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
4315 	.backend_init = &smu7_hwmgr_backend_init,
4316 	.backend_fini = &phm_hwmgr_backend_fini,
4317 	.asic_setup = &smu7_setup_asic_task,
4318 	.dynamic_state_management_enable = &smu7_enable_dpm_tasks,
4319 	.apply_state_adjust_rules = smu7_apply_state_adjust_rules,
4320 	.force_dpm_level = &smu7_force_dpm_level,
4321 	.power_state_set = smu7_set_power_state_tasks,
4322 	.get_power_state_size = smu7_get_power_state_size,
4323 	.get_mclk = smu7_dpm_get_mclk,
4324 	.get_sclk = smu7_dpm_get_sclk,
4325 	.patch_boot_state = smu7_dpm_patch_boot_state,
4326 	.get_pp_table_entry = smu7_get_pp_table_entry,
4327 	.get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
4328 	.powerdown_uvd = smu7_powerdown_uvd,
4329 	.powergate_uvd = smu7_powergate_uvd,
4330 	.powergate_vce = smu7_powergate_vce,
4331 	.disable_clock_power_gating = smu7_disable_clock_power_gating,
4332 	.update_clock_gatings = smu7_update_clock_gatings,
4333 	.notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
4334 	.display_config_changed = smu7_display_configuration_changed_task,
4335 	.set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
4336 	.set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
4337 	.get_temperature = smu7_thermal_get_temperature,
4338 	.stop_thermal_controller = smu7_thermal_stop_thermal_controller,
4339 	.get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
4340 	.get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
4341 	.set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
4342 	.reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
4343 	.get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
4344 	.set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
4345 	.uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
4346 	.register_internal_thermal_interrupt = smu7_register_internal_thermal_interrupt,
4347 	.check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
4348 	.check_states_equal = smu7_check_states_equal,
4349 	.set_fan_control_mode = smu7_set_fan_control_mode,
4350 	.get_fan_control_mode = smu7_get_fan_control_mode,
4351 	.force_clock_level = smu7_force_clock_level,
4352 	.print_clock_levels = smu7_print_clock_levels,
4353 	.enable_per_cu_power_gating = smu7_enable_per_cu_power_gating,
4354 	.get_sclk_od = smu7_get_sclk_od,
4355 	.set_sclk_od = smu7_set_sclk_od,
4356 	.get_mclk_od = smu7_get_mclk_od,
4357 	.set_mclk_od = smu7_set_mclk_od,
4358 	.get_clock_by_type = smu7_get_clock_by_type,
4359 	.read_sensor = smu7_read_sensor,
4360 	.dynamic_state_management_disable = smu7_disable_dpm_tasks,
4361 };
4362 
smu7_get_sleep_divider_id_from_clock(uint32_t clock,uint32_t clock_insr)4363 uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
4364 		uint32_t clock_insr)
4365 {
4366 	uint8_t i;
4367 	uint32_t temp;
4368 	uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
4369 
4370 	PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
4371 	for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
4372 		temp = clock >> i;
4373 
4374 		if (temp >= min || i == 0)
4375 			break;
4376 	}
4377 	return i;
4378 }
4379 
smu7_hwmgr_init(struct pp_hwmgr * hwmgr)4380 int smu7_hwmgr_init(struct pp_hwmgr *hwmgr)
4381 {
4382 	int ret = 0;
4383 
4384 	hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
4385 	if (hwmgr->pp_table_version == PP_TABLE_V0)
4386 		hwmgr->pptable_func = &pptable_funcs;
4387 	else if (hwmgr->pp_table_version == PP_TABLE_V1)
4388 		hwmgr->pptable_func = &pptable_v1_0_funcs;
4389 
4390 	pp_smu7_thermal_initialize(hwmgr);
4391 	return ret;
4392 }
4393 
4394