• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 
25 #include <linux/pci.h>
26 
27 #include "atom.h"
28 #include "cypress_dpm.h"
29 #include "evergreend.h"
30 #include "r600_dpm.h"
31 #include "radeon.h"
32 #include "radeon_asic.h"
33 
34 #define SMC_RAM_END 0x8000
35 
36 #define MC_CG_ARB_FREQ_F0           0x0a
37 #define MC_CG_ARB_FREQ_F1           0x0b
38 #define MC_CG_ARB_FREQ_F2           0x0c
39 #define MC_CG_ARB_FREQ_F3           0x0d
40 
41 #define MC_CG_SEQ_DRAMCONF_S0       0x05
42 #define MC_CG_SEQ_DRAMCONF_S1       0x06
43 #define MC_CG_SEQ_YCLK_SUSPEND      0x04
44 #define MC_CG_SEQ_YCLK_RESUME       0x0a
45 
46 struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps);
47 struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
48 struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
49 
cypress_enable_bif_dynamic_pcie_gen2(struct radeon_device * rdev,bool enable)50 static void cypress_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
51 						 bool enable)
52 {
53 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
54 	u32 tmp, bif;
55 
56 	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
57 	if (enable) {
58 		if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
59 		    (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
60 			if (!pi->boot_in_gen2) {
61 				bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
62 				bif |= CG_CLIENT_REQ(0xd);
63 				WREG32(CG_BIF_REQ_AND_RSP, bif);
64 
65 				tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
66 				tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
67 				tmp |= LC_GEN2_EN_STRAP;
68 
69 				tmp |= LC_CLR_FAILED_SPD_CHANGE_CNT;
70 				WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
71 				udelay(10);
72 				tmp &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
73 				WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
74 			}
75 		}
76 	} else {
77 		if (!pi->boot_in_gen2) {
78 			tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
79 			tmp &= ~LC_GEN2_EN_STRAP;
80 		}
81 		if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
82 		    (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
83 			WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
84 	}
85 }
86 
cypress_enable_dynamic_pcie_gen2(struct radeon_device * rdev,bool enable)87 static void cypress_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
88 					     bool enable)
89 {
90 	cypress_enable_bif_dynamic_pcie_gen2(rdev, enable);
91 
92 	if (enable)
93 		WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
94 	else
95 		WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
96 }
97 
98 #if 0
99 static int cypress_enter_ulp_state(struct radeon_device *rdev)
100 {
101 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
102 
103 	if (pi->gfx_clock_gating) {
104 		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
105 		WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
106 		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
107 
108 		RREG32(GB_ADDR_CONFIG);
109 	}
110 
111 	WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower),
112 		 ~HOST_SMC_MSG_MASK);
113 
114 	udelay(7000);
115 
116 	return 0;
117 }
118 #endif
119 
cypress_gfx_clock_gating_enable(struct radeon_device * rdev,bool enable)120 static void cypress_gfx_clock_gating_enable(struct radeon_device *rdev,
121 					    bool enable)
122 {
123 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
124 
125 	if (enable) {
126 		if (eg_pi->light_sleep) {
127 			WREG32(GRBM_GFX_INDEX, 0xC0000000);
128 
129 			WREG32_CG(CG_CGLS_TILE_0, 0xFFFFFFFF);
130 			WREG32_CG(CG_CGLS_TILE_1, 0xFFFFFFFF);
131 			WREG32_CG(CG_CGLS_TILE_2, 0xFFFFFFFF);
132 			WREG32_CG(CG_CGLS_TILE_3, 0xFFFFFFFF);
133 			WREG32_CG(CG_CGLS_TILE_4, 0xFFFFFFFF);
134 			WREG32_CG(CG_CGLS_TILE_5, 0xFFFFFFFF);
135 			WREG32_CG(CG_CGLS_TILE_6, 0xFFFFFFFF);
136 			WREG32_CG(CG_CGLS_TILE_7, 0xFFFFFFFF);
137 			WREG32_CG(CG_CGLS_TILE_8, 0xFFFFFFFF);
138 			WREG32_CG(CG_CGLS_TILE_9, 0xFFFFFFFF);
139 			WREG32_CG(CG_CGLS_TILE_10, 0xFFFFFFFF);
140 			WREG32_CG(CG_CGLS_TILE_11, 0xFFFFFFFF);
141 
142 			WREG32_P(SCLK_PWRMGT_CNTL, DYN_LIGHT_SLEEP_EN, ~DYN_LIGHT_SLEEP_EN);
143 		}
144 		WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
145 	} else {
146 		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
147 		WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
148 		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
149 		RREG32(GB_ADDR_CONFIG);
150 
151 		if (eg_pi->light_sleep) {
152 			WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_LIGHT_SLEEP_EN);
153 
154 			WREG32(GRBM_GFX_INDEX, 0xC0000000);
155 
156 			WREG32_CG(CG_CGLS_TILE_0, 0);
157 			WREG32_CG(CG_CGLS_TILE_1, 0);
158 			WREG32_CG(CG_CGLS_TILE_2, 0);
159 			WREG32_CG(CG_CGLS_TILE_3, 0);
160 			WREG32_CG(CG_CGLS_TILE_4, 0);
161 			WREG32_CG(CG_CGLS_TILE_5, 0);
162 			WREG32_CG(CG_CGLS_TILE_6, 0);
163 			WREG32_CG(CG_CGLS_TILE_7, 0);
164 			WREG32_CG(CG_CGLS_TILE_8, 0);
165 			WREG32_CG(CG_CGLS_TILE_9, 0);
166 			WREG32_CG(CG_CGLS_TILE_10, 0);
167 			WREG32_CG(CG_CGLS_TILE_11, 0);
168 		}
169 	}
170 }
171 
cypress_mg_clock_gating_enable(struct radeon_device * rdev,bool enable)172 static void cypress_mg_clock_gating_enable(struct radeon_device *rdev,
173 					   bool enable)
174 {
175 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
176 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
177 
178 	if (enable) {
179 		u32 cgts_sm_ctrl_reg;
180 
181 		if (rdev->family == CHIP_CEDAR)
182 			cgts_sm_ctrl_reg = CEDAR_MGCGCGTSSMCTRL_DFLT;
183 		else if (rdev->family == CHIP_REDWOOD)
184 			cgts_sm_ctrl_reg = REDWOOD_MGCGCGTSSMCTRL_DFLT;
185 		else
186 			cgts_sm_ctrl_reg = CYPRESS_MGCGCGTSSMCTRL_DFLT;
187 
188 		WREG32(GRBM_GFX_INDEX, 0xC0000000);
189 
190 		WREG32_CG(CG_CGTT_LOCAL_0, CYPRESS_MGCGTTLOCAL0_DFLT);
191 		WREG32_CG(CG_CGTT_LOCAL_1, CYPRESS_MGCGTTLOCAL1_DFLT & 0xFFFFCFFF);
192 		WREG32_CG(CG_CGTT_LOCAL_2, CYPRESS_MGCGTTLOCAL2_DFLT);
193 		WREG32_CG(CG_CGTT_LOCAL_3, CYPRESS_MGCGTTLOCAL3_DFLT);
194 
195 		if (pi->mgcgtssm)
196 			WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
197 
198 		if (eg_pi->mcls) {
199 			WREG32_P(MC_CITF_MISC_RD_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
200 			WREG32_P(MC_CITF_MISC_WR_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
201 			WREG32_P(MC_CITF_MISC_VM_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
202 			WREG32_P(MC_HUB_MISC_HUB_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
203 			WREG32_P(MC_HUB_MISC_VM_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
204 			WREG32_P(MC_HUB_MISC_SIP_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
205 			WREG32_P(MC_XPB_CLK_GAT, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
206 			WREG32_P(VM_L2_CG, MEM_LS_ENABLE, ~MEM_LS_ENABLE);
207 		}
208 	} else {
209 		WREG32(GRBM_GFX_INDEX, 0xC0000000);
210 
211 		WREG32_CG(CG_CGTT_LOCAL_0, 0xFFFFFFFF);
212 		WREG32_CG(CG_CGTT_LOCAL_1, 0xFFFFFFFF);
213 		WREG32_CG(CG_CGTT_LOCAL_2, 0xFFFFFFFF);
214 		WREG32_CG(CG_CGTT_LOCAL_3, 0xFFFFFFFF);
215 
216 		if (pi->mgcgtssm)
217 			WREG32(CGTS_SM_CTRL_REG, 0x81f44bc0);
218 	}
219 }
220 
cypress_enable_spread_spectrum(struct radeon_device * rdev,bool enable)221 void cypress_enable_spread_spectrum(struct radeon_device *rdev,
222 				    bool enable)
223 {
224 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
225 
226 	if (enable) {
227 		if (pi->sclk_ss)
228 			WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
229 
230 		if (pi->mclk_ss)
231 			WREG32_P(MPLL_CNTL_MODE, SS_SSEN, ~SS_SSEN);
232 	} else {
233 		WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
234 		WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
235 		WREG32_P(MPLL_CNTL_MODE, 0, ~SS_SSEN);
236 		WREG32_P(MPLL_CNTL_MODE, 0, ~SS_DSMODE_EN);
237 	}
238 }
239 
cypress_start_dpm(struct radeon_device * rdev)240 void cypress_start_dpm(struct radeon_device *rdev)
241 {
242 	WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
243 }
244 
cypress_enable_sclk_control(struct radeon_device * rdev,bool enable)245 void cypress_enable_sclk_control(struct radeon_device *rdev,
246 				 bool enable)
247 {
248 	if (enable)
249 		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
250 	else
251 		WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
252 }
253 
cypress_enable_mclk_control(struct radeon_device * rdev,bool enable)254 void cypress_enable_mclk_control(struct radeon_device *rdev,
255 				 bool enable)
256 {
257 	if (enable)
258 		WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
259 	else
260 		WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
261 }
262 
cypress_notify_smc_display_change(struct radeon_device * rdev,bool has_display)263 int cypress_notify_smc_display_change(struct radeon_device *rdev,
264 				      bool has_display)
265 {
266 	PPSMC_Msg msg = has_display ?
267 		(PPSMC_Msg)PPSMC_MSG_HasDisplay : (PPSMC_Msg)PPSMC_MSG_NoDisplay;
268 
269 	if (rv770_send_msg_to_smc(rdev, msg) != PPSMC_Result_OK)
270 		return -EINVAL;
271 
272 	return 0;
273 }
274 
cypress_program_response_times(struct radeon_device * rdev)275 void cypress_program_response_times(struct radeon_device *rdev)
276 {
277 	u32 reference_clock;
278 	u32 mclk_switch_limit;
279 
280 	reference_clock = radeon_get_xclk(rdev);
281 	mclk_switch_limit = (460 * reference_clock) / 100;
282 
283 	rv770_write_smc_soft_register(rdev,
284 				      RV770_SMC_SOFT_REGISTER_mclk_switch_lim,
285 				      mclk_switch_limit);
286 
287 	rv770_write_smc_soft_register(rdev,
288 				      RV770_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
289 
290 	rv770_write_smc_soft_register(rdev,
291 				      RV770_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
292 
293 	rv770_program_response_times(rdev);
294 
295 	if (ASIC_IS_LOMBOK(rdev))
296 		rv770_write_smc_soft_register(rdev,
297 					      RV770_SMC_SOFT_REGISTER_is_asic_lombok, 1);
298 
299 }
300 
cypress_pcie_performance_request(struct radeon_device * rdev,u8 perf_req,bool advertise)301 static int cypress_pcie_performance_request(struct radeon_device *rdev,
302 					    u8 perf_req, bool advertise)
303 {
304 #if defined(CONFIG_ACPI)
305 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
306 #endif
307 	u32 tmp;
308 
309 	udelay(10);
310 	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
311 	if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) && (tmp & LC_CURRENT_DATA_RATE))
312 		return 0;
313 
314 #if defined(CONFIG_ACPI)
315 	if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
316 	    (perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
317 		eg_pi->pcie_performance_request_registered = true;
318 		return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
319 	} else if ((perf_req == PCIE_PERF_REQ_REMOVE_REGISTRY) &&
320 		   eg_pi->pcie_performance_request_registered) {
321 		eg_pi->pcie_performance_request_registered = false;
322 		return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
323 	}
324 #endif
325 
326 	return 0;
327 }
328 
cypress_advertise_gen2_capability(struct radeon_device * rdev)329 void cypress_advertise_gen2_capability(struct radeon_device *rdev)
330 {
331 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
332 	u32 tmp;
333 
334 #if defined(CONFIG_ACPI)
335 	radeon_acpi_pcie_notify_device_ready(rdev);
336 #endif
337 
338 	tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
339 
340 	if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
341 	    (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
342 		pi->pcie_gen2 = true;
343 	else
344 		pi->pcie_gen2 = false;
345 
346 	if (!pi->pcie_gen2)
347 		cypress_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, true);
348 
349 }
350 
cypress_get_maximum_link_speed(struct radeon_ps * radeon_state)351 static enum radeon_pcie_gen cypress_get_maximum_link_speed(struct radeon_ps *radeon_state)
352 {
353 	struct rv7xx_ps *state = rv770_get_ps(radeon_state);
354 
355 	if (state->high.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
356 		return 1;
357 	return 0;
358 }
359 
cypress_notify_link_speed_change_after_state_change(struct radeon_device * rdev,struct radeon_ps * radeon_new_state,struct radeon_ps * radeon_current_state)360 void cypress_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
361 							 struct radeon_ps *radeon_new_state,
362 							 struct radeon_ps *radeon_current_state)
363 {
364 	enum radeon_pcie_gen pcie_link_speed_target =
365 		cypress_get_maximum_link_speed(radeon_new_state);
366 	enum radeon_pcie_gen pcie_link_speed_current =
367 		cypress_get_maximum_link_speed(radeon_current_state);
368 	u8 request;
369 
370 	if (pcie_link_speed_target < pcie_link_speed_current) {
371 		if (pcie_link_speed_target == RADEON_PCIE_GEN1)
372 			request = PCIE_PERF_REQ_PECI_GEN1;
373 		else if (pcie_link_speed_target == RADEON_PCIE_GEN2)
374 			request = PCIE_PERF_REQ_PECI_GEN2;
375 		else
376 			request = PCIE_PERF_REQ_PECI_GEN3;
377 
378 		cypress_pcie_performance_request(rdev, request, false);
379 	}
380 }
381 
cypress_notify_link_speed_change_before_state_change(struct radeon_device * rdev,struct radeon_ps * radeon_new_state,struct radeon_ps * radeon_current_state)382 void cypress_notify_link_speed_change_before_state_change(struct radeon_device *rdev,
383 							  struct radeon_ps *radeon_new_state,
384 							  struct radeon_ps *radeon_current_state)
385 {
386 	enum radeon_pcie_gen pcie_link_speed_target =
387 		cypress_get_maximum_link_speed(radeon_new_state);
388 	enum radeon_pcie_gen pcie_link_speed_current =
389 		cypress_get_maximum_link_speed(radeon_current_state);
390 	u8 request;
391 
392 	if (pcie_link_speed_target > pcie_link_speed_current) {
393 		if (pcie_link_speed_target == RADEON_PCIE_GEN1)
394 			request = PCIE_PERF_REQ_PECI_GEN1;
395 		else if (pcie_link_speed_target == RADEON_PCIE_GEN2)
396 			request = PCIE_PERF_REQ_PECI_GEN2;
397 		else
398 			request = PCIE_PERF_REQ_PECI_GEN3;
399 
400 		cypress_pcie_performance_request(rdev, request, false);
401 	}
402 }
403 
cypress_populate_voltage_value(struct radeon_device * rdev,struct atom_voltage_table * table,u16 value,RV770_SMC_VOLTAGE_VALUE * voltage)404 static int cypress_populate_voltage_value(struct radeon_device *rdev,
405 					  struct atom_voltage_table *table,
406 					  u16 value, RV770_SMC_VOLTAGE_VALUE *voltage)
407 {
408 	unsigned int i;
409 
410 	for (i = 0; i < table->count; i++) {
411 		if (value <= table->entries[i].value) {
412 			voltage->index = (u8)i;
413 			voltage->value = cpu_to_be16(table->entries[i].value);
414 			break;
415 		}
416 	}
417 
418 	if (i == table->count)
419 		return -EINVAL;
420 
421 	return 0;
422 }
423 
cypress_get_strobe_mode_settings(struct radeon_device * rdev,u32 mclk)424 u8 cypress_get_strobe_mode_settings(struct radeon_device *rdev, u32 mclk)
425 {
426 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
427 	u8 result = 0;
428 	bool strobe_mode = false;
429 
430 	if (pi->mem_gddr5) {
431 		if (mclk <= pi->mclk_strobe_mode_threshold)
432 			strobe_mode = true;
433 		result = cypress_get_mclk_frequency_ratio(rdev, mclk, strobe_mode);
434 
435 		if (strobe_mode)
436 			result |= SMC_STROBE_ENABLE;
437 	}
438 
439 	return result;
440 }
441 
cypress_map_clkf_to_ibias(struct radeon_device * rdev,u32 clkf)442 u32 cypress_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf)
443 {
444 	u32 ref_clk = rdev->clock.mpll.reference_freq;
445 	u32 vco = clkf * ref_clk;
446 
447 	/* 100 Mhz ref clk */
448 	if (ref_clk == 10000) {
449 		if (vco > 500000)
450 			return 0xC6;
451 		if (vco > 400000)
452 			return 0x9D;
453 		if (vco > 330000)
454 			return 0x6C;
455 		if (vco > 250000)
456 			return 0x2B;
457 		if (vco >  160000)
458 			return 0x5B;
459 		if (vco > 120000)
460 			return 0x0A;
461 		return 0x4B;
462 	}
463 
464 	/* 27 Mhz ref clk */
465 	if (vco > 250000)
466 		return 0x8B;
467 	if (vco > 200000)
468 		return 0xCC;
469 	if (vco > 150000)
470 		return 0x9B;
471 	return 0x6B;
472 }
473 
cypress_populate_mclk_value(struct radeon_device * rdev,u32 engine_clock,u32 memory_clock,RV7XX_SMC_MCLK_VALUE * mclk,bool strobe_mode,bool dll_state_on)474 static int cypress_populate_mclk_value(struct radeon_device *rdev,
475 				       u32 engine_clock, u32 memory_clock,
476 				       RV7XX_SMC_MCLK_VALUE *mclk,
477 				       bool strobe_mode, bool dll_state_on)
478 {
479 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
480 
481 	u32 mpll_ad_func_cntl =
482 		pi->clk_regs.rv770.mpll_ad_func_cntl;
483 	u32 mpll_ad_func_cntl_2 =
484 		pi->clk_regs.rv770.mpll_ad_func_cntl_2;
485 	u32 mpll_dq_func_cntl =
486 		pi->clk_regs.rv770.mpll_dq_func_cntl;
487 	u32 mpll_dq_func_cntl_2 =
488 		pi->clk_regs.rv770.mpll_dq_func_cntl_2;
489 	u32 mclk_pwrmgt_cntl =
490 		pi->clk_regs.rv770.mclk_pwrmgt_cntl;
491 	u32 dll_cntl =
492 		pi->clk_regs.rv770.dll_cntl;
493 	u32 mpll_ss1 = pi->clk_regs.rv770.mpll_ss1;
494 	u32 mpll_ss2 = pi->clk_regs.rv770.mpll_ss2;
495 	struct atom_clock_dividers dividers;
496 	u32 ibias;
497 	u32 dll_speed;
498 	int ret;
499 	u32 mc_seq_misc7;
500 
501 	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
502 					     memory_clock, strobe_mode, &dividers);
503 	if (ret)
504 		return ret;
505 
506 	if (!strobe_mode) {
507 		mc_seq_misc7 = RREG32(MC_SEQ_MISC7);
508 
509 		if(mc_seq_misc7 & 0x8000000)
510 			dividers.post_div = 1;
511 	}
512 
513 	ibias = cypress_map_clkf_to_ibias(rdev, dividers.whole_fb_div);
514 
515 	mpll_ad_func_cntl &= ~(CLKR_MASK |
516 			       YCLK_POST_DIV_MASK |
517 			       CLKF_MASK |
518 			       CLKFRAC_MASK |
519 			       IBIAS_MASK);
520 	mpll_ad_func_cntl |= CLKR(dividers.ref_div);
521 	mpll_ad_func_cntl |= YCLK_POST_DIV(dividers.post_div);
522 	mpll_ad_func_cntl |= CLKF(dividers.whole_fb_div);
523 	mpll_ad_func_cntl |= CLKFRAC(dividers.frac_fb_div);
524 	mpll_ad_func_cntl |= IBIAS(ibias);
525 
526 	if (dividers.vco_mode)
527 		mpll_ad_func_cntl_2 |= VCO_MODE;
528 	else
529 		mpll_ad_func_cntl_2 &= ~VCO_MODE;
530 
531 	if (pi->mem_gddr5) {
532 		mpll_dq_func_cntl &= ~(CLKR_MASK |
533 				       YCLK_POST_DIV_MASK |
534 				       CLKF_MASK |
535 				       CLKFRAC_MASK |
536 				       IBIAS_MASK);
537 		mpll_dq_func_cntl |= CLKR(dividers.ref_div);
538 		mpll_dq_func_cntl |= YCLK_POST_DIV(dividers.post_div);
539 		mpll_dq_func_cntl |= CLKF(dividers.whole_fb_div);
540 		mpll_dq_func_cntl |= CLKFRAC(dividers.frac_fb_div);
541 		mpll_dq_func_cntl |= IBIAS(ibias);
542 
543 		if (strobe_mode)
544 			mpll_dq_func_cntl &= ~PDNB;
545 		else
546 			mpll_dq_func_cntl |= PDNB;
547 
548 		if (dividers.vco_mode)
549 			mpll_dq_func_cntl_2 |= VCO_MODE;
550 		else
551 			mpll_dq_func_cntl_2 &= ~VCO_MODE;
552 	}
553 
554 	if (pi->mclk_ss) {
555 		struct radeon_atom_ss ss;
556 		u32 vco_freq = memory_clock * dividers.post_div;
557 
558 		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
559 						     ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
560 			u32 reference_clock = rdev->clock.mpll.reference_freq;
561 			u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
562 			u32 clk_s, clk_v;
563 
564 			if (!decoded_ref)
565 				return -EINVAL;
566 			clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
567 			clk_v = ss.percentage *
568 				(0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625);
569 
570 			mpll_ss1 &= ~CLKV_MASK;
571 			mpll_ss1 |= CLKV(clk_v);
572 
573 			mpll_ss2 &= ~CLKS_MASK;
574 			mpll_ss2 |= CLKS(clk_s);
575 		}
576 	}
577 
578 	dll_speed = rv740_get_dll_speed(pi->mem_gddr5,
579 					memory_clock);
580 
581 	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
582 	mclk_pwrmgt_cntl |= DLL_SPEED(dll_speed);
583 	if (dll_state_on)
584 		mclk_pwrmgt_cntl |= (MRDCKA0_PDNB |
585 				     MRDCKA1_PDNB |
586 				     MRDCKB0_PDNB |
587 				     MRDCKB1_PDNB |
588 				     MRDCKC0_PDNB |
589 				     MRDCKC1_PDNB |
590 				     MRDCKD0_PDNB |
591 				     MRDCKD1_PDNB);
592 	else
593 		mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
594 				      MRDCKA1_PDNB |
595 				      MRDCKB0_PDNB |
596 				      MRDCKB1_PDNB |
597 				      MRDCKC0_PDNB |
598 				      MRDCKC1_PDNB |
599 				      MRDCKD0_PDNB |
600 				      MRDCKD1_PDNB);
601 
602 	mclk->mclk770.mclk_value = cpu_to_be32(memory_clock);
603 	mclk->mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
604 	mclk->mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
605 	mclk->mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
606 	mclk->mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
607 	mclk->mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
608 	mclk->mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
609 	mclk->mclk770.vMPLL_SS = cpu_to_be32(mpll_ss1);
610 	mclk->mclk770.vMPLL_SS2 = cpu_to_be32(mpll_ss2);
611 
612 	return 0;
613 }
614 
cypress_get_mclk_frequency_ratio(struct radeon_device * rdev,u32 memory_clock,bool strobe_mode)615 u8 cypress_get_mclk_frequency_ratio(struct radeon_device *rdev,
616 				    u32 memory_clock, bool strobe_mode)
617 {
618 	u8 mc_para_index;
619 
620 	if (rdev->family >= CHIP_BARTS) {
621 		if (strobe_mode) {
622 			if (memory_clock < 10000)
623 				mc_para_index = 0x00;
624 			else if (memory_clock > 47500)
625 				mc_para_index = 0x0f;
626 			else
627 				mc_para_index = (u8)((memory_clock - 10000) / 2500);
628 		} else {
629 			if (memory_clock < 65000)
630 				mc_para_index = 0x00;
631 			else if (memory_clock > 135000)
632 				mc_para_index = 0x0f;
633 			else
634 				mc_para_index = (u8)((memory_clock - 60000) / 5000);
635 		}
636 	} else {
637 		if (strobe_mode) {
638 			if (memory_clock < 10000)
639 				mc_para_index = 0x00;
640 			else if (memory_clock > 47500)
641 				mc_para_index = 0x0f;
642 			else
643 				mc_para_index = (u8)((memory_clock - 10000) / 2500);
644 		} else {
645 			if (memory_clock < 40000)
646 				mc_para_index = 0x00;
647 			else if (memory_clock > 115000)
648 				mc_para_index = 0x0f;
649 			else
650 				mc_para_index = (u8)((memory_clock - 40000) / 5000);
651 		}
652 	}
653 	return mc_para_index;
654 }
655 
cypress_populate_mvdd_value(struct radeon_device * rdev,u32 mclk,RV770_SMC_VOLTAGE_VALUE * voltage)656 static int cypress_populate_mvdd_value(struct radeon_device *rdev,
657 				       u32 mclk,
658 				       RV770_SMC_VOLTAGE_VALUE *voltage)
659 {
660 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
661 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
662 
663 	if (!pi->mvdd_control) {
664 		voltage->index = eg_pi->mvdd_high_index;
665 		voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
666 		return 0;
667 	}
668 
669 	if (mclk <= pi->mvdd_split_frequency) {
670 		voltage->index = eg_pi->mvdd_low_index;
671 		voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
672 	} else {
673 		voltage->index = eg_pi->mvdd_high_index;
674 		voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
675 	}
676 
677 	return 0;
678 }
679 
cypress_convert_power_level_to_smc(struct radeon_device * rdev,struct rv7xx_pl * pl,RV770_SMC_HW_PERFORMANCE_LEVEL * level,u8 watermark_level)680 int cypress_convert_power_level_to_smc(struct radeon_device *rdev,
681 				       struct rv7xx_pl *pl,
682 				       RV770_SMC_HW_PERFORMANCE_LEVEL *level,
683 				       u8 watermark_level)
684 {
685 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
686 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
687 	int ret;
688 	bool dll_state_on;
689 
690 	level->gen2PCIE = pi->pcie_gen2 ?
691 		((pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0) : 0;
692 	level->gen2XSP  = (pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0;
693 	level->backbias = (pl->flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? 1 : 0;
694 	level->displayWatermark = watermark_level;
695 
696 	ret = rv740_populate_sclk_value(rdev, pl->sclk, &level->sclk);
697 	if (ret)
698 		return ret;
699 
700 	level->mcFlags =  0;
701 	if (pi->mclk_stutter_mode_threshold &&
702 	    (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
703 	    !eg_pi->uvd_enabled) {
704 		level->mcFlags |= SMC_MC_STUTTER_EN;
705 		if (eg_pi->sclk_deep_sleep)
706 			level->stateFlags |= PPSMC_STATEFLAG_AUTO_PULSE_SKIP;
707 		else
708 			level->stateFlags &= ~PPSMC_STATEFLAG_AUTO_PULSE_SKIP;
709 	}
710 
711 	if (pi->mem_gddr5) {
712 		if (pl->mclk > pi->mclk_edc_enable_threshold)
713 			level->mcFlags |= SMC_MC_EDC_RD_FLAG;
714 
715 		if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
716 			level->mcFlags |= SMC_MC_EDC_WR_FLAG;
717 
718 		level->strobeMode = cypress_get_strobe_mode_settings(rdev, pl->mclk);
719 
720 		if (level->strobeMode & SMC_STROBE_ENABLE) {
721 			if (cypress_get_mclk_frequency_ratio(rdev, pl->mclk, true) >=
722 			    ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
723 				dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
724 			else
725 				dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
726 		} else
727 			dll_state_on = eg_pi->dll_default_on;
728 
729 		ret = cypress_populate_mclk_value(rdev,
730 						  pl->sclk,
731 						  pl->mclk,
732 						  &level->mclk,
733 						  (level->strobeMode & SMC_STROBE_ENABLE) != 0,
734 						  dll_state_on);
735 	} else {
736 		ret = cypress_populate_mclk_value(rdev,
737 						  pl->sclk,
738 						  pl->mclk,
739 						  &level->mclk,
740 						  true,
741 						  true);
742 	}
743 	if (ret)
744 		return ret;
745 
746 	ret = cypress_populate_voltage_value(rdev,
747 					     &eg_pi->vddc_voltage_table,
748 					     pl->vddc,
749 					     &level->vddc);
750 	if (ret)
751 		return ret;
752 
753 	if (eg_pi->vddci_control) {
754 		ret = cypress_populate_voltage_value(rdev,
755 						     &eg_pi->vddci_voltage_table,
756 						     pl->vddci,
757 						     &level->vddci);
758 		if (ret)
759 			return ret;
760 	}
761 
762 	ret = cypress_populate_mvdd_value(rdev, pl->mclk, &level->mvdd);
763 
764 	return ret;
765 }
766 
cypress_convert_power_state_to_smc(struct radeon_device * rdev,struct radeon_ps * radeon_state,RV770_SMC_SWSTATE * smc_state)767 static int cypress_convert_power_state_to_smc(struct radeon_device *rdev,
768 					      struct radeon_ps *radeon_state,
769 					      RV770_SMC_SWSTATE *smc_state)
770 {
771 	struct rv7xx_ps *state = rv770_get_ps(radeon_state);
772 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
773 	int ret;
774 
775 	if (!(radeon_state->caps & ATOM_PPLIB_DISALLOW_ON_DC))
776 		smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
777 
778 	ret = cypress_convert_power_level_to_smc(rdev,
779 						 &state->low,
780 						 &smc_state->levels[0],
781 						 PPSMC_DISPLAY_WATERMARK_LOW);
782 	if (ret)
783 		return ret;
784 
785 	ret = cypress_convert_power_level_to_smc(rdev,
786 						 &state->medium,
787 						 &smc_state->levels[1],
788 						 PPSMC_DISPLAY_WATERMARK_LOW);
789 	if (ret)
790 		return ret;
791 
792 	ret = cypress_convert_power_level_to_smc(rdev,
793 						 &state->high,
794 						 &smc_state->levels[2],
795 						 PPSMC_DISPLAY_WATERMARK_HIGH);
796 	if (ret)
797 		return ret;
798 
799 	smc_state->levels[0].arbValue = MC_CG_ARB_FREQ_F1;
800 	smc_state->levels[1].arbValue = MC_CG_ARB_FREQ_F2;
801 	smc_state->levels[2].arbValue = MC_CG_ARB_FREQ_F3;
802 
803 	if (eg_pi->dynamic_ac_timing) {
804 		smc_state->levels[0].ACIndex = 2;
805 		smc_state->levels[1].ACIndex = 3;
806 		smc_state->levels[2].ACIndex = 4;
807 	} else {
808 		smc_state->levels[0].ACIndex = 0;
809 		smc_state->levels[1].ACIndex = 0;
810 		smc_state->levels[2].ACIndex = 0;
811 	}
812 
813 	rv770_populate_smc_sp(rdev, radeon_state, smc_state);
814 
815 	return rv770_populate_smc_t(rdev, radeon_state, smc_state);
816 }
817 
cypress_convert_mc_registers(struct evergreen_mc_reg_entry * entry,SMC_Evergreen_MCRegisterSet * data,u32 num_entries,u32 valid_flag)818 static void cypress_convert_mc_registers(struct evergreen_mc_reg_entry *entry,
819 					 SMC_Evergreen_MCRegisterSet *data,
820 					 u32 num_entries, u32 valid_flag)
821 {
822 	u32 i, j;
823 
824 	for (i = 0, j = 0; j < num_entries; j++) {
825 		if (valid_flag & (1 << j)) {
826 			data->value[i] = cpu_to_be32(entry->mc_data[j]);
827 			i++;
828 		}
829 	}
830 }
831 
cypress_convert_mc_reg_table_entry_to_smc(struct radeon_device * rdev,struct rv7xx_pl * pl,SMC_Evergreen_MCRegisterSet * mc_reg_table_data)832 static void cypress_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
833 						      struct rv7xx_pl *pl,
834 						      SMC_Evergreen_MCRegisterSet *mc_reg_table_data)
835 {
836 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
837 	u32 i = 0;
838 
839 	for (i = 0; i < eg_pi->mc_reg_table.num_entries; i++) {
840 		if (pl->mclk <=
841 		    eg_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
842 			break;
843 	}
844 
845 	if ((i == eg_pi->mc_reg_table.num_entries) && (i > 0))
846 		--i;
847 
848 	cypress_convert_mc_registers(&eg_pi->mc_reg_table.mc_reg_table_entry[i],
849 				     mc_reg_table_data,
850 				     eg_pi->mc_reg_table.last,
851 				     eg_pi->mc_reg_table.valid_flag);
852 }
853 
cypress_convert_mc_reg_table_to_smc(struct radeon_device * rdev,struct radeon_ps * radeon_state,SMC_Evergreen_MCRegisters * mc_reg_table)854 static void cypress_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
855 						struct radeon_ps *radeon_state,
856 						SMC_Evergreen_MCRegisters *mc_reg_table)
857 {
858 	struct rv7xx_ps *state = rv770_get_ps(radeon_state);
859 
860 	cypress_convert_mc_reg_table_entry_to_smc(rdev,
861 						  &state->low,
862 						  &mc_reg_table->data[2]);
863 	cypress_convert_mc_reg_table_entry_to_smc(rdev,
864 						  &state->medium,
865 						  &mc_reg_table->data[3]);
866 	cypress_convert_mc_reg_table_entry_to_smc(rdev,
867 						  &state->high,
868 						  &mc_reg_table->data[4]);
869 }
870 
cypress_upload_sw_state(struct radeon_device * rdev,struct radeon_ps * radeon_new_state)871 int cypress_upload_sw_state(struct radeon_device *rdev,
872 			    struct radeon_ps *radeon_new_state)
873 {
874 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
875 	u16 address = pi->state_table_start +
876 		offsetof(RV770_SMC_STATETABLE, driverState);
877 	RV770_SMC_SWSTATE state = { 0 };
878 	int ret;
879 
880 	ret = cypress_convert_power_state_to_smc(rdev, radeon_new_state, &state);
881 	if (ret)
882 		return ret;
883 
884 	return rv770_copy_bytes_to_smc(rdev, address, (u8 *)&state,
885 				    sizeof(RV770_SMC_SWSTATE),
886 				    pi->sram_end);
887 }
888 
cypress_upload_mc_reg_table(struct radeon_device * rdev,struct radeon_ps * radeon_new_state)889 int cypress_upload_mc_reg_table(struct radeon_device *rdev,
890 				struct radeon_ps *radeon_new_state)
891 {
892 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
893 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
894 	SMC_Evergreen_MCRegisters mc_reg_table = { 0 };
895 	u16 address;
896 
897 	cypress_convert_mc_reg_table_to_smc(rdev, radeon_new_state, &mc_reg_table);
898 
899 	address = eg_pi->mc_reg_table_start +
900 		(u16)offsetof(SMC_Evergreen_MCRegisters, data[2]);
901 
902 	return rv770_copy_bytes_to_smc(rdev, address,
903 				       (u8 *)&mc_reg_table.data[2],
904 				       sizeof(SMC_Evergreen_MCRegisterSet) * 3,
905 				       pi->sram_end);
906 }
907 
cypress_calculate_burst_time(struct radeon_device * rdev,u32 engine_clock,u32 memory_clock)908 u32 cypress_calculate_burst_time(struct radeon_device *rdev,
909 				 u32 engine_clock, u32 memory_clock)
910 {
911 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
912 	u32 multiplier = pi->mem_gddr5 ? 1 : 2;
913 	u32 result = (4 * multiplier * engine_clock) / (memory_clock / 2);
914 	u32 burst_time;
915 
916 	if (result <= 4)
917 		burst_time = 0;
918 	else if (result < 8)
919 		burst_time = result - 4;
920 	else {
921 		burst_time = result / 2 ;
922 		if (burst_time > 18)
923 			burst_time = 18;
924 	}
925 
926 	return burst_time;
927 }
928 
cypress_program_memory_timing_parameters(struct radeon_device * rdev,struct radeon_ps * radeon_new_state)929 void cypress_program_memory_timing_parameters(struct radeon_device *rdev,
930 					      struct radeon_ps *radeon_new_state)
931 {
932 	struct rv7xx_ps *new_state = rv770_get_ps(radeon_new_state);
933 	u32 mc_arb_burst_time = RREG32(MC_ARB_BURST_TIME);
934 
935 	mc_arb_burst_time &= ~(STATE1_MASK | STATE2_MASK | STATE3_MASK);
936 
937 	mc_arb_burst_time |= STATE1(cypress_calculate_burst_time(rdev,
938 								 new_state->low.sclk,
939 								 new_state->low.mclk));
940 	mc_arb_burst_time |= STATE2(cypress_calculate_burst_time(rdev,
941 								 new_state->medium.sclk,
942 								 new_state->medium.mclk));
943 	mc_arb_burst_time |= STATE3(cypress_calculate_burst_time(rdev,
944 								 new_state->high.sclk,
945 								 new_state->high.mclk));
946 
947 	rv730_program_memory_timing_parameters(rdev, radeon_new_state);
948 
949 	WREG32(MC_ARB_BURST_TIME, mc_arb_burst_time);
950 }
951 
cypress_populate_mc_reg_addresses(struct radeon_device * rdev,SMC_Evergreen_MCRegisters * mc_reg_table)952 static void cypress_populate_mc_reg_addresses(struct radeon_device *rdev,
953 					      SMC_Evergreen_MCRegisters *mc_reg_table)
954 {
955 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
956 	u32 i, j;
957 
958 	for (i = 0, j = 0; j < eg_pi->mc_reg_table.last; j++) {
959 		if (eg_pi->mc_reg_table.valid_flag & (1 << j)) {
960 			mc_reg_table->address[i].s0 =
961 				cpu_to_be16(eg_pi->mc_reg_table.mc_reg_address[j].s0);
962 			mc_reg_table->address[i].s1 =
963 				cpu_to_be16(eg_pi->mc_reg_table.mc_reg_address[j].s1);
964 			i++;
965 		}
966 	}
967 
968 	mc_reg_table->last = (u8)i;
969 }
970 
cypress_set_mc_reg_address_table(struct radeon_device * rdev)971 static void cypress_set_mc_reg_address_table(struct radeon_device *rdev)
972 {
973 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
974 	u32 i = 0;
975 
976 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_RAS_TIMING_LP >> 2;
977 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_RAS_TIMING >> 2;
978 	i++;
979 
980 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_CAS_TIMING_LP >> 2;
981 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_CAS_TIMING >> 2;
982 	i++;
983 
984 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_MISC_TIMING_LP >> 2;
985 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_MISC_TIMING >> 2;
986 	i++;
987 
988 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_MISC_TIMING2_LP >> 2;
989 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_MISC_TIMING2 >> 2;
990 	i++;
991 
992 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_RD_CTL_D0_LP >> 2;
993 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_RD_CTL_D0 >> 2;
994 	i++;
995 
996 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_RD_CTL_D1_LP >> 2;
997 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_RD_CTL_D1 >> 2;
998 	i++;
999 
1000 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_WR_CTL_D0_LP >> 2;
1001 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_WR_CTL_D0 >> 2;
1002 	i++;
1003 
1004 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_WR_CTL_D1_LP >> 2;
1005 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_WR_CTL_D1 >> 2;
1006 	i++;
1007 
1008 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
1009 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_PMG_CMD_EMRS >> 2;
1010 	i++;
1011 
1012 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
1013 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_PMG_CMD_MRS >> 2;
1014 	i++;
1015 
1016 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
1017 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_PMG_CMD_MRS1 >> 2;
1018 	i++;
1019 
1020 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_MISC1 >> 2;
1021 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_MISC1 >> 2;
1022 	i++;
1023 
1024 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_RESERVE_M >> 2;
1025 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_RESERVE_M >> 2;
1026 	i++;
1027 
1028 	eg_pi->mc_reg_table.mc_reg_address[i].s0 = MC_SEQ_MISC3 >> 2;
1029 	eg_pi->mc_reg_table.mc_reg_address[i].s1 = MC_SEQ_MISC3 >> 2;
1030 	i++;
1031 
1032 	eg_pi->mc_reg_table.last = (u8)i;
1033 }
1034 
cypress_retrieve_ac_timing_for_one_entry(struct radeon_device * rdev,struct evergreen_mc_reg_entry * entry)1035 static void cypress_retrieve_ac_timing_for_one_entry(struct radeon_device *rdev,
1036 						     struct evergreen_mc_reg_entry *entry)
1037 {
1038 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1039 	u32 i;
1040 
1041 	for (i = 0; i < eg_pi->mc_reg_table.last; i++)
1042 		entry->mc_data[i] =
1043 			RREG32(eg_pi->mc_reg_table.mc_reg_address[i].s1 << 2);
1044 
1045 }
1046 
cypress_retrieve_ac_timing_for_all_ranges(struct radeon_device * rdev,struct atom_memory_clock_range_table * range_table)1047 static void cypress_retrieve_ac_timing_for_all_ranges(struct radeon_device *rdev,
1048 						      struct atom_memory_clock_range_table *range_table)
1049 {
1050 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1051 	u32 i, j;
1052 
1053 	for (i = 0; i < range_table->num_entries; i++) {
1054 		eg_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max =
1055 			range_table->mclk[i];
1056 		radeon_atom_set_ac_timing(rdev, range_table->mclk[i]);
1057 		cypress_retrieve_ac_timing_for_one_entry(rdev,
1058 							 &eg_pi->mc_reg_table.mc_reg_table_entry[i]);
1059 	}
1060 
1061 	eg_pi->mc_reg_table.num_entries = range_table->num_entries;
1062 	eg_pi->mc_reg_table.valid_flag = 0;
1063 
1064 	for (i = 0; i < eg_pi->mc_reg_table.last; i++) {
1065 		for (j = 1; j < range_table->num_entries; j++) {
1066 			if (eg_pi->mc_reg_table.mc_reg_table_entry[j-1].mc_data[i] !=
1067 			    eg_pi->mc_reg_table.mc_reg_table_entry[j].mc_data[i]) {
1068 				eg_pi->mc_reg_table.valid_flag |= (1 << i);
1069 				break;
1070 			}
1071 		}
1072 	}
1073 }
1074 
cypress_initialize_mc_reg_table(struct radeon_device * rdev)1075 static int cypress_initialize_mc_reg_table(struct radeon_device *rdev)
1076 {
1077 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1078 	u8 module_index = rv770_get_memory_module_index(rdev);
1079 	struct atom_memory_clock_range_table range_table = { 0 };
1080 	int ret;
1081 
1082 	ret = radeon_atom_get_mclk_range_table(rdev,
1083 					       pi->mem_gddr5,
1084 					       module_index, &range_table);
1085 	if (ret)
1086 		return ret;
1087 
1088 	cypress_retrieve_ac_timing_for_all_ranges(rdev, &range_table);
1089 
1090 	return 0;
1091 }
1092 
cypress_wait_for_mc_sequencer(struct radeon_device * rdev,u8 value)1093 static void cypress_wait_for_mc_sequencer(struct radeon_device *rdev, u8 value)
1094 {
1095 	u32 i, j;
1096 	u32 channels = 2;
1097 
1098 	if ((rdev->family == CHIP_CYPRESS) ||
1099 	    (rdev->family == CHIP_HEMLOCK))
1100 		channels = 4;
1101 	else if (rdev->family == CHIP_CEDAR)
1102 		channels = 1;
1103 
1104 	for (i = 0; i < channels; i++) {
1105 		if ((rdev->family == CHIP_CYPRESS) ||
1106 		    (rdev->family == CHIP_HEMLOCK)) {
1107 			WREG32_P(MC_CONFIG_MCD, MC_RD_ENABLE_MCD(i), ~MC_RD_ENABLE_MCD_MASK);
1108 			WREG32_P(MC_CG_CONFIG_MCD, MC_RD_ENABLE_MCD(i), ~MC_RD_ENABLE_MCD_MASK);
1109 		} else {
1110 			WREG32_P(MC_CONFIG, MC_RD_ENABLE(i), ~MC_RD_ENABLE_MASK);
1111 			WREG32_P(MC_CG_CONFIG, MC_RD_ENABLE(i), ~MC_RD_ENABLE_MASK);
1112 		}
1113 		for (j = 0; j < rdev->usec_timeout; j++) {
1114 			if (((RREG32(MC_SEQ_CG) & CG_SEQ_RESP_MASK) >> CG_SEQ_RESP_SHIFT) == value)
1115 				break;
1116 			udelay(1);
1117 		}
1118 	}
1119 }
1120 
cypress_force_mc_use_s1(struct radeon_device * rdev,struct radeon_ps * radeon_boot_state)1121 static void cypress_force_mc_use_s1(struct radeon_device *rdev,
1122 				    struct radeon_ps *radeon_boot_state)
1123 {
1124 	struct rv7xx_ps *boot_state = rv770_get_ps(radeon_boot_state);
1125 	u32 strobe_mode;
1126 	u32 mc_seq_cg;
1127 	int i;
1128 
1129 	if (RREG32(MC_SEQ_STATUS_M) & PMG_PWRSTATE)
1130 		return;
1131 
1132 	radeon_atom_set_ac_timing(rdev, boot_state->low.mclk);
1133 	radeon_mc_wait_for_idle(rdev);
1134 
1135 	if ((rdev->family == CHIP_CYPRESS) ||
1136 	    (rdev->family == CHIP_HEMLOCK)) {
1137 		WREG32(MC_CONFIG_MCD, 0xf);
1138 		WREG32(MC_CG_CONFIG_MCD, 0xf);
1139 	} else {
1140 		WREG32(MC_CONFIG, 0xf);
1141 		WREG32(MC_CG_CONFIG, 0xf);
1142 	}
1143 
1144 	for (i = 0; i < rdev->num_crtc; i++)
1145 		radeon_wait_for_vblank(rdev, i);
1146 
1147 	WREG32(MC_SEQ_CG, MC_CG_SEQ_YCLK_SUSPEND);
1148 	cypress_wait_for_mc_sequencer(rdev, MC_CG_SEQ_YCLK_SUSPEND);
1149 
1150 	strobe_mode = cypress_get_strobe_mode_settings(rdev,
1151 						       boot_state->low.mclk);
1152 
1153 	mc_seq_cg = CG_SEQ_REQ(MC_CG_SEQ_DRAMCONF_S1);
1154 	mc_seq_cg |= SEQ_CG_RESP(strobe_mode);
1155 	WREG32(MC_SEQ_CG, mc_seq_cg);
1156 
1157 	for (i = 0; i < rdev->usec_timeout; i++) {
1158 		if (RREG32(MC_SEQ_STATUS_M) & PMG_PWRSTATE)
1159 			break;
1160 		udelay(1);
1161 	}
1162 
1163 	mc_seq_cg &= ~CG_SEQ_REQ_MASK;
1164 	mc_seq_cg |= CG_SEQ_REQ(MC_CG_SEQ_YCLK_RESUME);
1165 	WREG32(MC_SEQ_CG, mc_seq_cg);
1166 
1167 	cypress_wait_for_mc_sequencer(rdev, MC_CG_SEQ_YCLK_RESUME);
1168 }
1169 
cypress_copy_ac_timing_from_s1_to_s0(struct radeon_device * rdev)1170 static void cypress_copy_ac_timing_from_s1_to_s0(struct radeon_device *rdev)
1171 {
1172 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1173 	u32 value;
1174 	u32 i;
1175 
1176 	for (i = 0; i < eg_pi->mc_reg_table.last; i++) {
1177 		value = RREG32(eg_pi->mc_reg_table.mc_reg_address[i].s1 << 2);
1178 		WREG32(eg_pi->mc_reg_table.mc_reg_address[i].s0 << 2, value);
1179 	}
1180 }
1181 
cypress_force_mc_use_s0(struct radeon_device * rdev,struct radeon_ps * radeon_boot_state)1182 static void cypress_force_mc_use_s0(struct radeon_device *rdev,
1183 				    struct radeon_ps *radeon_boot_state)
1184 {
1185 	struct rv7xx_ps *boot_state = rv770_get_ps(radeon_boot_state);
1186 	u32 strobe_mode;
1187 	u32 mc_seq_cg;
1188 	int i;
1189 
1190 	cypress_copy_ac_timing_from_s1_to_s0(rdev);
1191 	radeon_mc_wait_for_idle(rdev);
1192 
1193 	if ((rdev->family == CHIP_CYPRESS) ||
1194 	    (rdev->family == CHIP_HEMLOCK)) {
1195 		WREG32(MC_CONFIG_MCD, 0xf);
1196 		WREG32(MC_CG_CONFIG_MCD, 0xf);
1197 	} else {
1198 		WREG32(MC_CONFIG, 0xf);
1199 		WREG32(MC_CG_CONFIG, 0xf);
1200 	}
1201 
1202 	for (i = 0; i < rdev->num_crtc; i++)
1203 		radeon_wait_for_vblank(rdev, i);
1204 
1205 	WREG32(MC_SEQ_CG, MC_CG_SEQ_YCLK_SUSPEND);
1206 	cypress_wait_for_mc_sequencer(rdev, MC_CG_SEQ_YCLK_SUSPEND);
1207 
1208 	strobe_mode = cypress_get_strobe_mode_settings(rdev,
1209 						       boot_state->low.mclk);
1210 
1211 	mc_seq_cg = CG_SEQ_REQ(MC_CG_SEQ_DRAMCONF_S0);
1212 	mc_seq_cg |= SEQ_CG_RESP(strobe_mode);
1213 	WREG32(MC_SEQ_CG, mc_seq_cg);
1214 
1215 	for (i = 0; i < rdev->usec_timeout; i++) {
1216 		if (!(RREG32(MC_SEQ_STATUS_M) & PMG_PWRSTATE))
1217 			break;
1218 		udelay(1);
1219 	}
1220 
1221 	mc_seq_cg &= ~CG_SEQ_REQ_MASK;
1222 	mc_seq_cg |= CG_SEQ_REQ(MC_CG_SEQ_YCLK_RESUME);
1223 	WREG32(MC_SEQ_CG, mc_seq_cg);
1224 
1225 	cypress_wait_for_mc_sequencer(rdev, MC_CG_SEQ_YCLK_RESUME);
1226 }
1227 
cypress_populate_initial_mvdd_value(struct radeon_device * rdev,RV770_SMC_VOLTAGE_VALUE * voltage)1228 static int cypress_populate_initial_mvdd_value(struct radeon_device *rdev,
1229 					       RV770_SMC_VOLTAGE_VALUE *voltage)
1230 {
1231 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1232 
1233 	voltage->index = eg_pi->mvdd_high_index;
1234 	voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
1235 
1236 	return 0;
1237 }
1238 
cypress_populate_smc_initial_state(struct radeon_device * rdev,struct radeon_ps * radeon_initial_state,RV770_SMC_STATETABLE * table)1239 int cypress_populate_smc_initial_state(struct radeon_device *rdev,
1240 				       struct radeon_ps *radeon_initial_state,
1241 				       RV770_SMC_STATETABLE *table)
1242 {
1243 	struct rv7xx_ps *initial_state = rv770_get_ps(radeon_initial_state);
1244 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1245 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1246 	u32 a_t;
1247 
1248 	table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL =
1249 		cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl);
1250 	table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 =
1251 		cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl_2);
1252 	table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL =
1253 		cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl);
1254 	table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 =
1255 		cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl_2);
1256 	table->initialState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL =
1257 		cpu_to_be32(pi->clk_regs.rv770.mclk_pwrmgt_cntl);
1258 	table->initialState.levels[0].mclk.mclk770.vDLL_CNTL =
1259 		cpu_to_be32(pi->clk_regs.rv770.dll_cntl);
1260 
1261 	table->initialState.levels[0].mclk.mclk770.vMPLL_SS =
1262 		cpu_to_be32(pi->clk_regs.rv770.mpll_ss1);
1263 	table->initialState.levels[0].mclk.mclk770.vMPLL_SS2 =
1264 		cpu_to_be32(pi->clk_regs.rv770.mpll_ss2);
1265 
1266 	table->initialState.levels[0].mclk.mclk770.mclk_value =
1267 		cpu_to_be32(initial_state->low.mclk);
1268 
1269 	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
1270 		cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl);
1271 	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
1272 		cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_2);
1273 	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
1274 		cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_3);
1275 	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
1276 		cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum);
1277 	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
1278 		cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum_2);
1279 
1280 	table->initialState.levels[0].sclk.sclk_value =
1281 		cpu_to_be32(initial_state->low.sclk);
1282 
1283 	table->initialState.levels[0].arbValue = MC_CG_ARB_FREQ_F0;
1284 
1285 	table->initialState.levels[0].ACIndex = 0;
1286 
1287 	cypress_populate_voltage_value(rdev,
1288 				       &eg_pi->vddc_voltage_table,
1289 				       initial_state->low.vddc,
1290 				       &table->initialState.levels[0].vddc);
1291 
1292 	if (eg_pi->vddci_control)
1293 		cypress_populate_voltage_value(rdev,
1294 					       &eg_pi->vddci_voltage_table,
1295 					       initial_state->low.vddci,
1296 					       &table->initialState.levels[0].vddci);
1297 
1298 	cypress_populate_initial_mvdd_value(rdev,
1299 					    &table->initialState.levels[0].mvdd);
1300 
1301 	a_t = CG_R(0xffff) | CG_L(0);
1302 	table->initialState.levels[0].aT = cpu_to_be32(a_t);
1303 
1304 	table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
1305 
1306 
1307 	if (pi->boot_in_gen2)
1308 		table->initialState.levels[0].gen2PCIE = 1;
1309 	else
1310 		table->initialState.levels[0].gen2PCIE = 0;
1311 	if (initial_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
1312 		table->initialState.levels[0].gen2XSP = 1;
1313 	else
1314 		table->initialState.levels[0].gen2XSP = 0;
1315 
1316 	if (pi->mem_gddr5) {
1317 		table->initialState.levels[0].strobeMode =
1318 			cypress_get_strobe_mode_settings(rdev,
1319 							 initial_state->low.mclk);
1320 
1321 		if (initial_state->low.mclk > pi->mclk_edc_enable_threshold)
1322 			table->initialState.levels[0].mcFlags = SMC_MC_EDC_RD_FLAG | SMC_MC_EDC_WR_FLAG;
1323 		else
1324 			table->initialState.levels[0].mcFlags =  0;
1325 	}
1326 
1327 	table->initialState.levels[1] = table->initialState.levels[0];
1328 	table->initialState.levels[2] = table->initialState.levels[0];
1329 
1330 	table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
1331 
1332 	return 0;
1333 }
1334 
cypress_populate_smc_acpi_state(struct radeon_device * rdev,RV770_SMC_STATETABLE * table)1335 int cypress_populate_smc_acpi_state(struct radeon_device *rdev,
1336 				    RV770_SMC_STATETABLE *table)
1337 {
1338 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1339 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1340 	u32 mpll_ad_func_cntl =
1341 		pi->clk_regs.rv770.mpll_ad_func_cntl;
1342 	u32 mpll_ad_func_cntl_2 =
1343 		pi->clk_regs.rv770.mpll_ad_func_cntl_2;
1344 	u32 mpll_dq_func_cntl =
1345 		pi->clk_regs.rv770.mpll_dq_func_cntl;
1346 	u32 mpll_dq_func_cntl_2 =
1347 		pi->clk_regs.rv770.mpll_dq_func_cntl_2;
1348 	u32 spll_func_cntl =
1349 		pi->clk_regs.rv770.cg_spll_func_cntl;
1350 	u32 spll_func_cntl_2 =
1351 		pi->clk_regs.rv770.cg_spll_func_cntl_2;
1352 	u32 spll_func_cntl_3 =
1353 		pi->clk_regs.rv770.cg_spll_func_cntl_3;
1354 	u32 mclk_pwrmgt_cntl =
1355 		pi->clk_regs.rv770.mclk_pwrmgt_cntl;
1356 	u32 dll_cntl =
1357 		pi->clk_regs.rv770.dll_cntl;
1358 
1359 	table->ACPIState = table->initialState;
1360 
1361 	table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
1362 
1363 	if (pi->acpi_vddc) {
1364 		cypress_populate_voltage_value(rdev,
1365 					       &eg_pi->vddc_voltage_table,
1366 					       pi->acpi_vddc,
1367 					       &table->ACPIState.levels[0].vddc);
1368 		if (pi->pcie_gen2) {
1369 			if (pi->acpi_pcie_gen2)
1370 				table->ACPIState.levels[0].gen2PCIE = 1;
1371 			else
1372 				table->ACPIState.levels[0].gen2PCIE = 0;
1373 		} else
1374 			table->ACPIState.levels[0].gen2PCIE = 0;
1375 		if (pi->acpi_pcie_gen2)
1376 			table->ACPIState.levels[0].gen2XSP = 1;
1377 		else
1378 			table->ACPIState.levels[0].gen2XSP = 0;
1379 	} else {
1380 		cypress_populate_voltage_value(rdev,
1381 					       &eg_pi->vddc_voltage_table,
1382 					       pi->min_vddc_in_table,
1383 					       &table->ACPIState.levels[0].vddc);
1384 		table->ACPIState.levels[0].gen2PCIE = 0;
1385 	}
1386 
1387 	if (eg_pi->acpi_vddci) {
1388 		if (eg_pi->vddci_control) {
1389 			cypress_populate_voltage_value(rdev,
1390 						       &eg_pi->vddci_voltage_table,
1391 						       eg_pi->acpi_vddci,
1392 						       &table->ACPIState.levels[0].vddci);
1393 		}
1394 	}
1395 
1396 	mpll_ad_func_cntl &= ~PDNB;
1397 
1398 	mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
1399 
1400 	if (pi->mem_gddr5)
1401 		mpll_dq_func_cntl &= ~PDNB;
1402 	mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS;
1403 
1404 	mclk_pwrmgt_cntl |= (MRDCKA0_RESET |
1405 			     MRDCKA1_RESET |
1406 			     MRDCKB0_RESET |
1407 			     MRDCKB1_RESET |
1408 			     MRDCKC0_RESET |
1409 			     MRDCKC1_RESET |
1410 			     MRDCKD0_RESET |
1411 			     MRDCKD1_RESET);
1412 
1413 	mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
1414 			      MRDCKA1_PDNB |
1415 			      MRDCKB0_PDNB |
1416 			      MRDCKB1_PDNB |
1417 			      MRDCKC0_PDNB |
1418 			      MRDCKC1_PDNB |
1419 			      MRDCKD0_PDNB |
1420 			      MRDCKD1_PDNB);
1421 
1422 	dll_cntl |= (MRDCKA0_BYPASS |
1423 		     MRDCKA1_BYPASS |
1424 		     MRDCKB0_BYPASS |
1425 		     MRDCKB1_BYPASS |
1426 		     MRDCKC0_BYPASS |
1427 		     MRDCKC1_BYPASS |
1428 		     MRDCKD0_BYPASS |
1429 		     MRDCKD1_BYPASS);
1430 
1431 	/* evergreen only */
1432 	if (rdev->family <= CHIP_HEMLOCK)
1433 		spll_func_cntl |= SPLL_RESET | SPLL_SLEEP | SPLL_BYPASS_EN;
1434 
1435 	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
1436 	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
1437 
1438 	table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL =
1439 		cpu_to_be32(mpll_ad_func_cntl);
1440 	table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 =
1441 		cpu_to_be32(mpll_ad_func_cntl_2);
1442 	table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL =
1443 		cpu_to_be32(mpll_dq_func_cntl);
1444 	table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 =
1445 		cpu_to_be32(mpll_dq_func_cntl_2);
1446 	table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL =
1447 		cpu_to_be32(mclk_pwrmgt_cntl);
1448 	table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
1449 
1450 	table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0;
1451 
1452 	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
1453 		cpu_to_be32(spll_func_cntl);
1454 	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
1455 		cpu_to_be32(spll_func_cntl_2);
1456 	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
1457 		cpu_to_be32(spll_func_cntl_3);
1458 
1459 	table->ACPIState.levels[0].sclk.sclk_value = 0;
1460 
1461 	cypress_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
1462 
1463 	if (eg_pi->dynamic_ac_timing)
1464 		table->ACPIState.levels[0].ACIndex = 1;
1465 
1466 	table->ACPIState.levels[1] = table->ACPIState.levels[0];
1467 	table->ACPIState.levels[2] = table->ACPIState.levels[0];
1468 
1469 	return 0;
1470 }
1471 
cypress_trim_voltage_table_to_fit_state_table(struct radeon_device * rdev,struct atom_voltage_table * voltage_table)1472 static void cypress_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
1473 							  struct atom_voltage_table *voltage_table)
1474 {
1475 	unsigned int i, diff;
1476 
1477 	if (voltage_table->count <= MAX_NO_VREG_STEPS)
1478 		return;
1479 
1480 	diff = voltage_table->count - MAX_NO_VREG_STEPS;
1481 
1482 	for (i= 0; i < MAX_NO_VREG_STEPS; i++)
1483 		voltage_table->entries[i] = voltage_table->entries[i + diff];
1484 
1485 	voltage_table->count = MAX_NO_VREG_STEPS;
1486 }
1487 
cypress_construct_voltage_tables(struct radeon_device * rdev)1488 int cypress_construct_voltage_tables(struct radeon_device *rdev)
1489 {
1490 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1491 	int ret;
1492 
1493 	ret = radeon_atom_get_voltage_table(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0,
1494 					    &eg_pi->vddc_voltage_table);
1495 	if (ret)
1496 		return ret;
1497 
1498 	if (eg_pi->vddc_voltage_table.count > MAX_NO_VREG_STEPS)
1499 		cypress_trim_voltage_table_to_fit_state_table(rdev,
1500 							      &eg_pi->vddc_voltage_table);
1501 
1502 	if (eg_pi->vddci_control) {
1503 		ret = radeon_atom_get_voltage_table(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0,
1504 						    &eg_pi->vddci_voltage_table);
1505 		if (ret)
1506 			return ret;
1507 
1508 		if (eg_pi->vddci_voltage_table.count > MAX_NO_VREG_STEPS)
1509 			cypress_trim_voltage_table_to_fit_state_table(rdev,
1510 								      &eg_pi->vddci_voltage_table);
1511 	}
1512 
1513 	return 0;
1514 }
1515 
cypress_populate_smc_voltage_table(struct radeon_device * rdev,struct atom_voltage_table * voltage_table,RV770_SMC_STATETABLE * table)1516 static void cypress_populate_smc_voltage_table(struct radeon_device *rdev,
1517 					       struct atom_voltage_table *voltage_table,
1518 					       RV770_SMC_STATETABLE *table)
1519 {
1520 	unsigned int i;
1521 
1522 	for (i = 0; i < voltage_table->count; i++) {
1523 		table->highSMIO[i] = 0;
1524 		table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
1525 	}
1526 }
1527 
cypress_populate_smc_voltage_tables(struct radeon_device * rdev,RV770_SMC_STATETABLE * table)1528 int cypress_populate_smc_voltage_tables(struct radeon_device *rdev,
1529 					RV770_SMC_STATETABLE *table)
1530 {
1531 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1532 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1533 	unsigned char i;
1534 
1535 	if (eg_pi->vddc_voltage_table.count) {
1536 		cypress_populate_smc_voltage_table(rdev,
1537 						   &eg_pi->vddc_voltage_table,
1538 						   table);
1539 
1540 		table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDC] = 0;
1541 		table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDC] =
1542 			cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
1543 
1544 		for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
1545 			if (pi->max_vddc_in_table <=
1546 			    eg_pi->vddc_voltage_table.entries[i].value) {
1547 				table->maxVDDCIndexInPPTable = i;
1548 				break;
1549 			}
1550 		}
1551 	}
1552 
1553 	if (eg_pi->vddci_voltage_table.count) {
1554 		cypress_populate_smc_voltage_table(rdev,
1555 						   &eg_pi->vddci_voltage_table,
1556 						   table);
1557 
1558 		table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0;
1559 		table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] =
1560 			cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
1561 	}
1562 
1563 	return 0;
1564 }
1565 
cypress_get_mclk_split_point(struct atom_memory_info * memory_info)1566 static u32 cypress_get_mclk_split_point(struct atom_memory_info *memory_info)
1567 {
1568 	if ((memory_info->mem_type == MEM_TYPE_GDDR3) ||
1569 	    (memory_info->mem_type == MEM_TYPE_DDR3))
1570 		return 30000;
1571 
1572 	return 0;
1573 }
1574 
cypress_get_mvdd_configuration(struct radeon_device * rdev)1575 int cypress_get_mvdd_configuration(struct radeon_device *rdev)
1576 {
1577 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1578 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1579 	u8 module_index;
1580 	struct atom_memory_info memory_info;
1581 	u32 tmp = RREG32(GENERAL_PWRMGT);
1582 
1583 	if (!(tmp & BACKBIAS_PAD_EN)) {
1584 		eg_pi->mvdd_high_index = 0;
1585 		eg_pi->mvdd_low_index = 1;
1586 		pi->mvdd_control = false;
1587 		return 0;
1588 	}
1589 
1590 	if (tmp & BACKBIAS_VALUE)
1591 		eg_pi->mvdd_high_index = 1;
1592 	else
1593 		eg_pi->mvdd_high_index = 0;
1594 
1595 	eg_pi->mvdd_low_index =
1596 		(eg_pi->mvdd_high_index == 0) ? 1 : 0;
1597 
1598 	module_index = rv770_get_memory_module_index(rdev);
1599 
1600 	if (radeon_atom_get_memory_info(rdev, module_index, &memory_info)) {
1601 		pi->mvdd_control = false;
1602 		return 0;
1603 	}
1604 
1605 	pi->mvdd_split_frequency =
1606 		cypress_get_mclk_split_point(&memory_info);
1607 
1608 	if (pi->mvdd_split_frequency == 0) {
1609 		pi->mvdd_control = false;
1610 		return 0;
1611 	}
1612 
1613 	return 0;
1614 }
1615 
cypress_init_smc_table(struct radeon_device * rdev,struct radeon_ps * radeon_boot_state)1616 static int cypress_init_smc_table(struct radeon_device *rdev,
1617 				  struct radeon_ps *radeon_boot_state)
1618 {
1619 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1620 	RV770_SMC_STATETABLE *table = &pi->smc_statetable;
1621 	int ret;
1622 
1623 	memset(table, 0, sizeof(RV770_SMC_STATETABLE));
1624 
1625 	cypress_populate_smc_voltage_tables(rdev, table);
1626 
1627 	switch (rdev->pm.int_thermal_type) {
1628 	case THERMAL_TYPE_EVERGREEN:
1629 	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
1630 		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
1631 		break;
1632 	case THERMAL_TYPE_NONE:
1633 		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
1634 		break;
1635 	default:
1636 		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
1637 		break;
1638 	}
1639 
1640 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
1641 		table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1642 
1643 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1644 		table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
1645 
1646 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
1647 		table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1648 
1649 	if (pi->mem_gddr5)
1650 		table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1651 
1652 	ret = cypress_populate_smc_initial_state(rdev, radeon_boot_state, table);
1653 	if (ret)
1654 		return ret;
1655 
1656 	ret = cypress_populate_smc_acpi_state(rdev, table);
1657 	if (ret)
1658 		return ret;
1659 
1660 	table->driverState = table->initialState;
1661 
1662 	return rv770_copy_bytes_to_smc(rdev,
1663 				       pi->state_table_start,
1664 				       (u8 *)table, sizeof(RV770_SMC_STATETABLE),
1665 				       pi->sram_end);
1666 }
1667 
cypress_populate_mc_reg_table(struct radeon_device * rdev,struct radeon_ps * radeon_boot_state)1668 int cypress_populate_mc_reg_table(struct radeon_device *rdev,
1669 				  struct radeon_ps *radeon_boot_state)
1670 {
1671 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1672 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1673 	struct rv7xx_ps *boot_state = rv770_get_ps(radeon_boot_state);
1674 	SMC_Evergreen_MCRegisters mc_reg_table = { 0 };
1675 
1676 	rv770_write_smc_soft_register(rdev,
1677 				      RV770_SMC_SOFT_REGISTER_seq_index, 1);
1678 
1679 	cypress_populate_mc_reg_addresses(rdev, &mc_reg_table);
1680 
1681 	cypress_convert_mc_reg_table_entry_to_smc(rdev,
1682 						  &boot_state->low,
1683 						  &mc_reg_table.data[0]);
1684 
1685 	cypress_convert_mc_registers(&eg_pi->mc_reg_table.mc_reg_table_entry[0],
1686 				     &mc_reg_table.data[1], eg_pi->mc_reg_table.last,
1687 				     eg_pi->mc_reg_table.valid_flag);
1688 
1689 	cypress_convert_mc_reg_table_to_smc(rdev, radeon_boot_state, &mc_reg_table);
1690 
1691 	return rv770_copy_bytes_to_smc(rdev, eg_pi->mc_reg_table_start,
1692 				       (u8 *)&mc_reg_table, sizeof(SMC_Evergreen_MCRegisters),
1693 				       pi->sram_end);
1694 }
1695 
cypress_get_table_locations(struct radeon_device * rdev)1696 int cypress_get_table_locations(struct radeon_device *rdev)
1697 {
1698 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1699 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1700 	u32 tmp;
1701 	int ret;
1702 
1703 	ret = rv770_read_smc_sram_dword(rdev,
1704 					EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION +
1705 					EVERGREEN_SMC_FIRMWARE_HEADER_stateTable,
1706 					&tmp, pi->sram_end);
1707 	if (ret)
1708 		return ret;
1709 
1710 	pi->state_table_start = (u16)tmp;
1711 
1712 	ret = rv770_read_smc_sram_dword(rdev,
1713 					EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION +
1714 					EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters,
1715 					&tmp, pi->sram_end);
1716 	if (ret)
1717 		return ret;
1718 
1719 	pi->soft_regs_start = (u16)tmp;
1720 
1721 	ret = rv770_read_smc_sram_dword(rdev,
1722 					EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION +
1723 					EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable,
1724 					&tmp, pi->sram_end);
1725 	if (ret)
1726 		return ret;
1727 
1728 	eg_pi->mc_reg_table_start = (u16)tmp;
1729 
1730 	return 0;
1731 }
1732 
cypress_enable_display_gap(struct radeon_device * rdev)1733 void cypress_enable_display_gap(struct radeon_device *rdev)
1734 {
1735 	u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
1736 
1737 	tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
1738 	tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
1739 		DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
1740 
1741 	tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
1742 	tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
1743 		DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
1744 	WREG32(CG_DISPLAY_GAP_CNTL, tmp);
1745 }
1746 
cypress_program_display_gap(struct radeon_device * rdev)1747 static void cypress_program_display_gap(struct radeon_device *rdev)
1748 {
1749 	u32 tmp, pipe;
1750 	int i;
1751 
1752 	tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
1753 	if (rdev->pm.dpm.new_active_crtc_count > 0)
1754 		tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1755 	else
1756 		tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1757 
1758 	if (rdev->pm.dpm.new_active_crtc_count > 1)
1759 		tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1760 	else
1761 		tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1762 
1763 	WREG32(CG_DISPLAY_GAP_CNTL, tmp);
1764 
1765 	tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
1766 	pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
1767 
1768 	if ((rdev->pm.dpm.new_active_crtc_count > 0) &&
1769 	    (!(rdev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
1770 		/* find the first active crtc */
1771 		for (i = 0; i < rdev->num_crtc; i++) {
1772 			if (rdev->pm.dpm.new_active_crtcs & (1 << i))
1773 				break;
1774 		}
1775 		if (i == rdev->num_crtc)
1776 			pipe = 0;
1777 		else
1778 			pipe = i;
1779 
1780 		tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
1781 		tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
1782 		WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
1783 	}
1784 
1785 	cypress_notify_smc_display_change(rdev, rdev->pm.dpm.new_active_crtc_count > 0);
1786 }
1787 
cypress_dpm_setup_asic(struct radeon_device * rdev)1788 void cypress_dpm_setup_asic(struct radeon_device *rdev)
1789 {
1790 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1791 
1792 	rv740_read_clock_registers(rdev);
1793 	rv770_read_voltage_smio_registers(rdev);
1794 	rv770_get_max_vddc(rdev);
1795 	rv770_get_memory_type(rdev);
1796 
1797 	if (eg_pi->pcie_performance_request)
1798 		eg_pi->pcie_performance_request_registered = false;
1799 
1800 	if (eg_pi->pcie_performance_request)
1801 		cypress_advertise_gen2_capability(rdev);
1802 
1803 	rv770_get_pcie_gen2_status(rdev);
1804 
1805 	rv770_enable_acpi_pm(rdev);
1806 }
1807 
cypress_dpm_enable(struct radeon_device * rdev)1808 int cypress_dpm_enable(struct radeon_device *rdev)
1809 {
1810 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1811 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1812 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
1813 	int ret;
1814 
1815 	if (pi->gfx_clock_gating)
1816 		rv770_restore_cgcg(rdev);
1817 
1818 	if (rv770_dpm_enabled(rdev))
1819 		return -EINVAL;
1820 
1821 	if (pi->voltage_control) {
1822 		rv770_enable_voltage_control(rdev, true);
1823 		ret = cypress_construct_voltage_tables(rdev);
1824 		if (ret) {
1825 			DRM_ERROR("cypress_construct_voltage_tables failed\n");
1826 			return ret;
1827 		}
1828 	}
1829 
1830 	if (pi->mvdd_control) {
1831 		ret = cypress_get_mvdd_configuration(rdev);
1832 		if (ret) {
1833 			DRM_ERROR("cypress_get_mvdd_configuration failed\n");
1834 			return ret;
1835 		}
1836 	}
1837 
1838 	if (eg_pi->dynamic_ac_timing) {
1839 		cypress_set_mc_reg_address_table(rdev);
1840 		cypress_force_mc_use_s0(rdev, boot_ps);
1841 		ret = cypress_initialize_mc_reg_table(rdev);
1842 		if (ret)
1843 			eg_pi->dynamic_ac_timing = false;
1844 		cypress_force_mc_use_s1(rdev, boot_ps);
1845 	}
1846 
1847 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1848 		rv770_enable_backbias(rdev, true);
1849 
1850 	if (pi->dynamic_ss)
1851 		cypress_enable_spread_spectrum(rdev, true);
1852 
1853 	if (pi->thermal_protection)
1854 		rv770_enable_thermal_protection(rdev, true);
1855 
1856 	rv770_setup_bsp(rdev);
1857 	rv770_program_git(rdev);
1858 	rv770_program_tp(rdev);
1859 	rv770_program_tpp(rdev);
1860 	rv770_program_sstp(rdev);
1861 	rv770_program_engine_speed_parameters(rdev);
1862 	cypress_enable_display_gap(rdev);
1863 	rv770_program_vc(rdev);
1864 
1865 	if (pi->dynamic_pcie_gen2)
1866 		cypress_enable_dynamic_pcie_gen2(rdev, true);
1867 
1868 	ret = rv770_upload_firmware(rdev);
1869 	if (ret) {
1870 		DRM_ERROR("rv770_upload_firmware failed\n");
1871 		return ret;
1872 	}
1873 
1874 	ret = cypress_get_table_locations(rdev);
1875 	if (ret) {
1876 		DRM_ERROR("cypress_get_table_locations failed\n");
1877 		return ret;
1878 	}
1879 	ret = cypress_init_smc_table(rdev, boot_ps);
1880 	if (ret) {
1881 		DRM_ERROR("cypress_init_smc_table failed\n");
1882 		return ret;
1883 	}
1884 	if (eg_pi->dynamic_ac_timing) {
1885 		ret = cypress_populate_mc_reg_table(rdev, boot_ps);
1886 		if (ret) {
1887 			DRM_ERROR("cypress_populate_mc_reg_table failed\n");
1888 			return ret;
1889 		}
1890 	}
1891 
1892 	cypress_program_response_times(rdev);
1893 
1894 	r7xx_start_smc(rdev);
1895 
1896 	ret = cypress_notify_smc_display_change(rdev, false);
1897 	if (ret) {
1898 		DRM_ERROR("cypress_notify_smc_display_change failed\n");
1899 		return ret;
1900 	}
1901 	cypress_enable_sclk_control(rdev, true);
1902 
1903 	if (eg_pi->memory_transition)
1904 		cypress_enable_mclk_control(rdev, true);
1905 
1906 	cypress_start_dpm(rdev);
1907 
1908 	if (pi->gfx_clock_gating)
1909 		cypress_gfx_clock_gating_enable(rdev, true);
1910 
1911 	if (pi->mg_clock_gating)
1912 		cypress_mg_clock_gating_enable(rdev, true);
1913 
1914 	rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
1915 
1916 	return 0;
1917 }
1918 
cypress_dpm_disable(struct radeon_device * rdev)1919 void cypress_dpm_disable(struct radeon_device *rdev)
1920 {
1921 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1922 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1923 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
1924 
1925 	if (!rv770_dpm_enabled(rdev))
1926 		return;
1927 
1928 	rv770_clear_vc(rdev);
1929 
1930 	if (pi->thermal_protection)
1931 		rv770_enable_thermal_protection(rdev, false);
1932 
1933 	if (pi->dynamic_pcie_gen2)
1934 		cypress_enable_dynamic_pcie_gen2(rdev, false);
1935 
1936 	if (rdev->irq.installed &&
1937 	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1938 		rdev->irq.dpm_thermal = false;
1939 		radeon_irq_set(rdev);
1940 	}
1941 
1942 	if (pi->gfx_clock_gating)
1943 		cypress_gfx_clock_gating_enable(rdev, false);
1944 
1945 	if (pi->mg_clock_gating)
1946 		cypress_mg_clock_gating_enable(rdev, false);
1947 
1948 	rv770_stop_dpm(rdev);
1949 	r7xx_stop_smc(rdev);
1950 
1951 	cypress_enable_spread_spectrum(rdev, false);
1952 
1953 	if (eg_pi->dynamic_ac_timing)
1954 		cypress_force_mc_use_s1(rdev, boot_ps);
1955 
1956 	rv770_reset_smio_status(rdev);
1957 }
1958 
cypress_dpm_set_power_state(struct radeon_device * rdev)1959 int cypress_dpm_set_power_state(struct radeon_device *rdev)
1960 {
1961 	struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1962 	struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
1963 	struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
1964 	int ret;
1965 
1966 	ret = rv770_restrict_performance_levels_before_switch(rdev);
1967 	if (ret) {
1968 		DRM_ERROR("rv770_restrict_performance_levels_before_switch failed\n");
1969 		return ret;
1970 	}
1971 	if (eg_pi->pcie_performance_request)
1972 		cypress_notify_link_speed_change_before_state_change(rdev, new_ps, old_ps);
1973 
1974 	rv770_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
1975 	ret = rv770_halt_smc(rdev);
1976 	if (ret) {
1977 		DRM_ERROR("rv770_halt_smc failed\n");
1978 		return ret;
1979 	}
1980 	ret = cypress_upload_sw_state(rdev, new_ps);
1981 	if (ret) {
1982 		DRM_ERROR("cypress_upload_sw_state failed\n");
1983 		return ret;
1984 	}
1985 	if (eg_pi->dynamic_ac_timing) {
1986 		ret = cypress_upload_mc_reg_table(rdev, new_ps);
1987 		if (ret) {
1988 			DRM_ERROR("cypress_upload_mc_reg_table failed\n");
1989 			return ret;
1990 		}
1991 	}
1992 
1993 	cypress_program_memory_timing_parameters(rdev, new_ps);
1994 
1995 	ret = rv770_resume_smc(rdev);
1996 	if (ret) {
1997 		DRM_ERROR("rv770_resume_smc failed\n");
1998 		return ret;
1999 	}
2000 	ret = rv770_set_sw_state(rdev);
2001 	if (ret) {
2002 		DRM_ERROR("rv770_set_sw_state failed\n");
2003 		return ret;
2004 	}
2005 	rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
2006 
2007 	if (eg_pi->pcie_performance_request)
2008 		cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
2009 
2010 	return 0;
2011 }
2012 
2013 #if 0
2014 void cypress_dpm_reset_asic(struct radeon_device *rdev)
2015 {
2016 	rv770_restrict_performance_levels_before_switch(rdev);
2017 	rv770_set_boot_state(rdev);
2018 }
2019 #endif
2020 
cypress_dpm_display_configuration_changed(struct radeon_device * rdev)2021 void cypress_dpm_display_configuration_changed(struct radeon_device *rdev)
2022 {
2023 	cypress_program_display_gap(rdev);
2024 }
2025 
cypress_dpm_init(struct radeon_device * rdev)2026 int cypress_dpm_init(struct radeon_device *rdev)
2027 {
2028 	struct rv7xx_power_info *pi;
2029 	struct evergreen_power_info *eg_pi;
2030 	struct atom_clock_dividers dividers;
2031 	int ret;
2032 
2033 	eg_pi = kzalloc(sizeof(struct evergreen_power_info), GFP_KERNEL);
2034 	if (eg_pi == NULL)
2035 		return -ENOMEM;
2036 	rdev->pm.dpm.priv = eg_pi;
2037 	pi = &eg_pi->rv7xx;
2038 
2039 	rv770_get_max_vddc(rdev);
2040 
2041 	eg_pi->ulv.supported = false;
2042 	pi->acpi_vddc = 0;
2043 	eg_pi->acpi_vddci = 0;
2044 	pi->min_vddc_in_table = 0;
2045 	pi->max_vddc_in_table = 0;
2046 
2047 	ret = r600_get_platform_caps(rdev);
2048 	if (ret)
2049 		return ret;
2050 
2051 	ret = rv7xx_parse_power_table(rdev);
2052 	if (ret)
2053 		return ret;
2054 
2055 	if (rdev->pm.dpm.voltage_response_time == 0)
2056 		rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
2057 	if (rdev->pm.dpm.backbias_response_time == 0)
2058 		rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
2059 
2060 	ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
2061 					     0, false, &dividers);
2062 	if (ret)
2063 		pi->ref_div = dividers.ref_div + 1;
2064 	else
2065 		pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
2066 
2067 	pi->mclk_strobe_mode_threshold = 40000;
2068 	pi->mclk_edc_enable_threshold = 40000;
2069 	eg_pi->mclk_edc_wr_enable_threshold = 40000;
2070 
2071 	pi->rlp = RV770_RLP_DFLT;
2072 	pi->rmp = RV770_RMP_DFLT;
2073 	pi->lhp = RV770_LHP_DFLT;
2074 	pi->lmp = RV770_LMP_DFLT;
2075 
2076 	pi->voltage_control =
2077 		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
2078 
2079 	pi->mvdd_control =
2080 		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
2081 
2082 	eg_pi->vddci_control =
2083 		radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
2084 
2085 	rv770_get_engine_memory_ss(rdev);
2086 
2087 	pi->asi = RV770_ASI_DFLT;
2088 	pi->pasi = CYPRESS_HASI_DFLT;
2089 	pi->vrc = CYPRESS_VRC_DFLT;
2090 
2091 	pi->power_gating = false;
2092 
2093 	if ((rdev->family == CHIP_CYPRESS) ||
2094 	    (rdev->family == CHIP_HEMLOCK))
2095 		pi->gfx_clock_gating = false;
2096 	else
2097 		pi->gfx_clock_gating = true;
2098 
2099 	pi->mg_clock_gating = true;
2100 	pi->mgcgtssm = true;
2101 	eg_pi->ls_clock_gating = false;
2102 	eg_pi->sclk_deep_sleep = false;
2103 
2104 	pi->dynamic_pcie_gen2 = true;
2105 
2106 	if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
2107 		pi->thermal_protection = true;
2108 	else
2109 		pi->thermal_protection = false;
2110 
2111 	pi->display_gap = true;
2112 
2113 	if (rdev->flags & RADEON_IS_MOBILITY)
2114 		pi->dcodt = true;
2115 	else
2116 		pi->dcodt = false;
2117 
2118 	pi->ulps = true;
2119 
2120 	eg_pi->dynamic_ac_timing = true;
2121 	eg_pi->abm = true;
2122 	eg_pi->mcls = true;
2123 	eg_pi->light_sleep = true;
2124 	eg_pi->memory_transition = true;
2125 #if defined(CONFIG_ACPI)
2126 	eg_pi->pcie_performance_request =
2127 		radeon_acpi_is_pcie_performance_request_supported(rdev);
2128 #else
2129 	eg_pi->pcie_performance_request = false;
2130 #endif
2131 
2132 	if ((rdev->family == CHIP_CYPRESS) ||
2133 	    (rdev->family == CHIP_HEMLOCK) ||
2134 	    (rdev->family == CHIP_JUNIPER))
2135 		eg_pi->dll_default_on = true;
2136 	else
2137 		eg_pi->dll_default_on = false;
2138 
2139 	eg_pi->sclk_deep_sleep = false;
2140 	pi->mclk_stutter_mode_threshold = 0;
2141 
2142 	pi->sram_end = SMC_RAM_END;
2143 
2144 	return 0;
2145 }
2146 
cypress_dpm_fini(struct radeon_device * rdev)2147 void cypress_dpm_fini(struct radeon_device *rdev)
2148 {
2149 	int i;
2150 
2151 	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
2152 		kfree(rdev->pm.dpm.ps[i].ps_priv);
2153 	}
2154 	kfree(rdev->pm.dpm.ps);
2155 	kfree(rdev->pm.dpm.priv);
2156 }
2157 
cypress_dpm_vblank_too_short(struct radeon_device * rdev)2158 bool cypress_dpm_vblank_too_short(struct radeon_device *rdev)
2159 {
2160 	struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2161 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
2162 	/* we never hit the non-gddr5 limit so disable it */
2163 	u32 switch_limit = pi->mem_gddr5 ? 450 : 0;
2164 
2165 	if (vblank_time < switch_limit)
2166 		return true;
2167 	else
2168 		return false;
2169 
2170 }
2171