• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
29 #include "atom.h"
30 #include "amd_pcie.h"
31 #include "amdgpu_display.h"
32 #include "hwmgr.h"
33 #include <linux/power_supply.h>
34 
35 #define WIDTH_4K 3840
36 
amdgpu_dpm_print_class_info(u32 class,u32 class2)37 void amdgpu_dpm_print_class_info(u32 class, u32 class2)
38 {
39 	const char *s;
40 
41 	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
42 	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
43 	default:
44 		s = "none";
45 		break;
46 	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
47 		s = "battery";
48 		break;
49 	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
50 		s = "balanced";
51 		break;
52 	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
53 		s = "performance";
54 		break;
55 	}
56 	printk("\tui class: %s\n", s);
57 	printk("\tinternal class:");
58 	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
59 	    (class2 == 0))
60 		pr_cont(" none");
61 	else {
62 		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
63 			pr_cont(" boot");
64 		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
65 			pr_cont(" thermal");
66 		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
67 			pr_cont(" limited_pwr");
68 		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
69 			pr_cont(" rest");
70 		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
71 			pr_cont(" forced");
72 		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
73 			pr_cont(" 3d_perf");
74 		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
75 			pr_cont(" ovrdrv");
76 		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
77 			pr_cont(" uvd");
78 		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
79 			pr_cont(" 3d_low");
80 		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
81 			pr_cont(" acpi");
82 		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
83 			pr_cont(" uvd_hd2");
84 		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
85 			pr_cont(" uvd_hd");
86 		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
87 			pr_cont(" uvd_sd");
88 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
89 			pr_cont(" limited_pwr2");
90 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
91 			pr_cont(" ulv");
92 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
93 			pr_cont(" uvd_mvc");
94 	}
95 	pr_cont("\n");
96 }
97 
amdgpu_dpm_print_cap_info(u32 caps)98 void amdgpu_dpm_print_cap_info(u32 caps)
99 {
100 	printk("\tcaps:");
101 	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
102 		pr_cont(" single_disp");
103 	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
104 		pr_cont(" video");
105 	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
106 		pr_cont(" no_dc");
107 	pr_cont("\n");
108 }
109 
amdgpu_dpm_print_ps_status(struct amdgpu_device * adev,struct amdgpu_ps * rps)110 void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
111 				struct amdgpu_ps *rps)
112 {
113 	printk("\tstatus:");
114 	if (rps == adev->pm.dpm.current_ps)
115 		pr_cont(" c");
116 	if (rps == adev->pm.dpm.requested_ps)
117 		pr_cont(" r");
118 	if (rps == adev->pm.dpm.boot_ps)
119 		pr_cont(" b");
120 	pr_cont("\n");
121 }
122 
amdgpu_dpm_get_active_displays(struct amdgpu_device * adev)123 void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
124 {
125 	struct drm_device *ddev = adev_to_drm(adev);
126 	struct drm_crtc *crtc;
127 	struct amdgpu_crtc *amdgpu_crtc;
128 
129 	adev->pm.dpm.new_active_crtcs = 0;
130 	adev->pm.dpm.new_active_crtc_count = 0;
131 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
132 		list_for_each_entry(crtc,
133 				    &ddev->mode_config.crtc_list, head) {
134 			amdgpu_crtc = to_amdgpu_crtc(crtc);
135 			if (amdgpu_crtc->enabled) {
136 				adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
137 				adev->pm.dpm.new_active_crtc_count++;
138 			}
139 		}
140 	}
141 }
142 
143 
amdgpu_dpm_get_vblank_time(struct amdgpu_device * adev)144 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
145 {
146 	struct drm_device *dev = adev_to_drm(adev);
147 	struct drm_crtc *crtc;
148 	struct amdgpu_crtc *amdgpu_crtc;
149 	u32 vblank_in_pixels;
150 	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
151 
152 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
153 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
154 			amdgpu_crtc = to_amdgpu_crtc(crtc);
155 			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
156 				vblank_in_pixels =
157 					amdgpu_crtc->hw_mode.crtc_htotal *
158 					(amdgpu_crtc->hw_mode.crtc_vblank_end -
159 					amdgpu_crtc->hw_mode.crtc_vdisplay +
160 					(amdgpu_crtc->v_border * 2));
161 
162 				vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
163 				break;
164 			}
165 		}
166 	}
167 
168 	return vblank_time_us;
169 }
170 
amdgpu_dpm_get_vrefresh(struct amdgpu_device * adev)171 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
172 {
173 	struct drm_device *dev = adev_to_drm(adev);
174 	struct drm_crtc *crtc;
175 	struct amdgpu_crtc *amdgpu_crtc;
176 	u32 vrefresh = 0;
177 
178 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
179 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
180 			amdgpu_crtc = to_amdgpu_crtc(crtc);
181 			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
182 				vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
183 				break;
184 			}
185 		}
186 	}
187 
188 	return vrefresh;
189 }
190 
amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)191 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
192 {
193 	switch (sensor) {
194 	case THERMAL_TYPE_RV6XX:
195 	case THERMAL_TYPE_RV770:
196 	case THERMAL_TYPE_EVERGREEN:
197 	case THERMAL_TYPE_SUMO:
198 	case THERMAL_TYPE_NI:
199 	case THERMAL_TYPE_SI:
200 	case THERMAL_TYPE_CI:
201 	case THERMAL_TYPE_KV:
202 		return true;
203 	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
204 	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
205 		return false; /* need special handling */
206 	case THERMAL_TYPE_NONE:
207 	case THERMAL_TYPE_EXTERNAL:
208 	case THERMAL_TYPE_EXTERNAL_GPIO:
209 	default:
210 		return false;
211 	}
212 }
213 
214 union power_info {
215 	struct _ATOM_POWERPLAY_INFO info;
216 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
217 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
218 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
219 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
220 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
221 	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
222 	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
223 };
224 
225 union fan_info {
226 	struct _ATOM_PPLIB_FANTABLE fan;
227 	struct _ATOM_PPLIB_FANTABLE2 fan2;
228 	struct _ATOM_PPLIB_FANTABLE3 fan3;
229 };
230 
amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table * amdgpu_table,ATOM_PPLIB_Clock_Voltage_Dependency_Table * atom_table)231 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
232 					      ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
233 {
234 	u32 size = atom_table->ucNumEntries *
235 		sizeof(struct amdgpu_clock_voltage_dependency_entry);
236 	int i;
237 	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
238 
239 	amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
240 	if (!amdgpu_table->entries)
241 		return -ENOMEM;
242 
243 	entry = &atom_table->entries[0];
244 	for (i = 0; i < atom_table->ucNumEntries; i++) {
245 		amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
246 			(entry->ucClockHigh << 16);
247 		amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
248 		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
249 			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
250 	}
251 	amdgpu_table->count = atom_table->ucNumEntries;
252 
253 	return 0;
254 }
255 
amdgpu_get_platform_caps(struct amdgpu_device * adev)256 int amdgpu_get_platform_caps(struct amdgpu_device *adev)
257 {
258 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
259 	union power_info *power_info;
260 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
261 	u16 data_offset;
262 	u8 frev, crev;
263 
264 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
265 				   &frev, &crev, &data_offset))
266 		return -EINVAL;
267 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
268 
269 	adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
270 	adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
271 	adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
272 
273 	return 0;
274 }
275 
276 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
277 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
278 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
279 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
280 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
281 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
282 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
283 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
284 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
285 
amdgpu_parse_extended_power_table(struct amdgpu_device * adev)286 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
287 {
288 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
289 	union power_info *power_info;
290 	union fan_info *fan_info;
291 	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
292 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
293 	u16 data_offset;
294 	u8 frev, crev;
295 	int ret, i;
296 
297 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
298 				   &frev, &crev, &data_offset))
299 		return -EINVAL;
300 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
301 
302 	/* fan table */
303 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
304 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
305 		if (power_info->pplib3.usFanTableOffset) {
306 			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
307 						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
308 			adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
309 			adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
310 			adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
311 			adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
312 			adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
313 			adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
314 			adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
315 			if (fan_info->fan.ucFanTableFormat >= 2)
316 				adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
317 			else
318 				adev->pm.dpm.fan.t_max = 10900;
319 			adev->pm.dpm.fan.cycle_delay = 100000;
320 			if (fan_info->fan.ucFanTableFormat >= 3) {
321 				adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
322 				adev->pm.dpm.fan.default_max_fan_pwm =
323 					le16_to_cpu(fan_info->fan3.usFanPWMMax);
324 				adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
325 				adev->pm.dpm.fan.fan_output_sensitivity =
326 					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
327 			}
328 			adev->pm.dpm.fan.ucode_fan_control = true;
329 		}
330 	}
331 
332 	/* clock dependancy tables, shedding tables */
333 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
334 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
335 		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
336 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
337 				(mode_info->atom_context->bios + data_offset +
338 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
339 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
340 								 dep_table);
341 			if (ret) {
342 				amdgpu_free_extended_power_table(adev);
343 				return ret;
344 			}
345 		}
346 		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
347 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
348 				(mode_info->atom_context->bios + data_offset +
349 				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
350 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
351 								 dep_table);
352 			if (ret) {
353 				amdgpu_free_extended_power_table(adev);
354 				return ret;
355 			}
356 		}
357 		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
358 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
359 				(mode_info->atom_context->bios + data_offset +
360 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
361 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
362 								 dep_table);
363 			if (ret) {
364 				amdgpu_free_extended_power_table(adev);
365 				return ret;
366 			}
367 		}
368 		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
369 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
370 				(mode_info->atom_context->bios + data_offset +
371 				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
372 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
373 								 dep_table);
374 			if (ret) {
375 				amdgpu_free_extended_power_table(adev);
376 				return ret;
377 			}
378 		}
379 		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
380 			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
381 				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
382 				(mode_info->atom_context->bios + data_offset +
383 				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
384 			if (clk_v->ucNumEntries) {
385 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
386 					le16_to_cpu(clk_v->entries[0].usSclkLow) |
387 					(clk_v->entries[0].ucSclkHigh << 16);
388 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
389 					le16_to_cpu(clk_v->entries[0].usMclkLow) |
390 					(clk_v->entries[0].ucMclkHigh << 16);
391 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
392 					le16_to_cpu(clk_v->entries[0].usVddc);
393 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
394 					le16_to_cpu(clk_v->entries[0].usVddci);
395 			}
396 		}
397 		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
398 			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
399 				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
400 				(mode_info->atom_context->bios + data_offset +
401 				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
402 			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
403 
404 			adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
405 				kcalloc(psl->ucNumEntries,
406 					sizeof(struct amdgpu_phase_shedding_limits_entry),
407 					GFP_KERNEL);
408 			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
409 				amdgpu_free_extended_power_table(adev);
410 				return -ENOMEM;
411 			}
412 
413 			entry = &psl->entries[0];
414 			for (i = 0; i < psl->ucNumEntries; i++) {
415 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
416 					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
417 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
418 					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
419 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
420 					le16_to_cpu(entry->usVoltage);
421 				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
422 					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
423 			}
424 			adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
425 				psl->ucNumEntries;
426 		}
427 	}
428 
429 	/* cac data */
430 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
431 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
432 		adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
433 		adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
434 		adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
435 		adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
436 		if (adev->pm.dpm.tdp_od_limit)
437 			adev->pm.dpm.power_control = true;
438 		else
439 			adev->pm.dpm.power_control = false;
440 		adev->pm.dpm.tdp_adjustment = 0;
441 		adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
442 		adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
443 		adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
444 		if (power_info->pplib5.usCACLeakageTableOffset) {
445 			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
446 				(ATOM_PPLIB_CAC_Leakage_Table *)
447 				(mode_info->atom_context->bios + data_offset +
448 				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
449 			ATOM_PPLIB_CAC_Leakage_Record *entry;
450 			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
451 			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
452 			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
453 				amdgpu_free_extended_power_table(adev);
454 				return -ENOMEM;
455 			}
456 			entry = &cac_table->entries[0];
457 			for (i = 0; i < cac_table->ucNumEntries; i++) {
458 				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
459 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
460 						le16_to_cpu(entry->usVddc1);
461 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
462 						le16_to_cpu(entry->usVddc2);
463 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
464 						le16_to_cpu(entry->usVddc3);
465 				} else {
466 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
467 						le16_to_cpu(entry->usVddc);
468 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
469 						le32_to_cpu(entry->ulLeakageValue);
470 				}
471 				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
472 					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
473 			}
474 			adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
475 		}
476 	}
477 
478 	/* ext tables */
479 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
480 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
481 		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
482 			(mode_info->atom_context->bios + data_offset +
483 			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
484 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
485 			ext_hdr->usVCETableOffset) {
486 			VCEClockInfoArray *array = (VCEClockInfoArray *)
487 				(mode_info->atom_context->bios + data_offset +
488 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
489 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
490 				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
491 				(mode_info->atom_context->bios + data_offset +
492 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
493 				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
494 			ATOM_PPLIB_VCE_State_Table *states =
495 				(ATOM_PPLIB_VCE_State_Table *)
496 				(mode_info->atom_context->bios + data_offset +
497 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
498 				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
499 				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
500 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
501 			ATOM_PPLIB_VCE_State_Record *state_entry;
502 			VCEClockInfo *vce_clk;
503 			u32 size = limits->numEntries *
504 				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
505 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
506 				kzalloc(size, GFP_KERNEL);
507 			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
508 				amdgpu_free_extended_power_table(adev);
509 				return -ENOMEM;
510 			}
511 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
512 				limits->numEntries;
513 			entry = &limits->entries[0];
514 			state_entry = &states->entries[0];
515 			for (i = 0; i < limits->numEntries; i++) {
516 				vce_clk = (VCEClockInfo *)
517 					((u8 *)&array->entries[0] +
518 					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
519 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
520 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
521 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
522 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
523 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
524 					le16_to_cpu(entry->usVoltage);
525 				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
526 					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
527 			}
528 			adev->pm.dpm.num_of_vce_states =
529 					states->numEntries > AMD_MAX_VCE_LEVELS ?
530 					AMD_MAX_VCE_LEVELS : states->numEntries;
531 			for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
532 				vce_clk = (VCEClockInfo *)
533 					((u8 *)&array->entries[0] +
534 					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
535 				adev->pm.dpm.vce_states[i].evclk =
536 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
537 				adev->pm.dpm.vce_states[i].ecclk =
538 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
539 				adev->pm.dpm.vce_states[i].clk_idx =
540 					state_entry->ucClockInfoIndex & 0x3f;
541 				adev->pm.dpm.vce_states[i].pstate =
542 					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
543 				state_entry = (ATOM_PPLIB_VCE_State_Record *)
544 					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
545 			}
546 		}
547 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
548 			ext_hdr->usUVDTableOffset) {
549 			UVDClockInfoArray *array = (UVDClockInfoArray *)
550 				(mode_info->atom_context->bios + data_offset +
551 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
552 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
553 				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
554 				(mode_info->atom_context->bios + data_offset +
555 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
556 				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
557 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
558 			u32 size = limits->numEntries *
559 				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
560 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
561 				kzalloc(size, GFP_KERNEL);
562 			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
563 				amdgpu_free_extended_power_table(adev);
564 				return -ENOMEM;
565 			}
566 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
567 				limits->numEntries;
568 			entry = &limits->entries[0];
569 			for (i = 0; i < limits->numEntries; i++) {
570 				UVDClockInfo *uvd_clk = (UVDClockInfo *)
571 					((u8 *)&array->entries[0] +
572 					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
573 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
574 					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
575 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
576 					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
577 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
578 					le16_to_cpu(entry->usVoltage);
579 				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
580 					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
581 			}
582 		}
583 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
584 			ext_hdr->usSAMUTableOffset) {
585 			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
586 				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
587 				(mode_info->atom_context->bios + data_offset +
588 				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
589 			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
590 			u32 size = limits->numEntries *
591 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
592 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
593 				kzalloc(size, GFP_KERNEL);
594 			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
595 				amdgpu_free_extended_power_table(adev);
596 				return -ENOMEM;
597 			}
598 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
599 				limits->numEntries;
600 			entry = &limits->entries[0];
601 			for (i = 0; i < limits->numEntries; i++) {
602 				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
603 					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
604 				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
605 					le16_to_cpu(entry->usVoltage);
606 				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
607 					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
608 			}
609 		}
610 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
611 		    ext_hdr->usPPMTableOffset) {
612 			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
613 				(mode_info->atom_context->bios + data_offset +
614 				 le16_to_cpu(ext_hdr->usPPMTableOffset));
615 			adev->pm.dpm.dyn_state.ppm_table =
616 				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
617 			if (!adev->pm.dpm.dyn_state.ppm_table) {
618 				amdgpu_free_extended_power_table(adev);
619 				return -ENOMEM;
620 			}
621 			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
622 			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
623 				le16_to_cpu(ppm->usCpuCoreNumber);
624 			adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
625 				le32_to_cpu(ppm->ulPlatformTDP);
626 			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
627 				le32_to_cpu(ppm->ulSmallACPlatformTDP);
628 			adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
629 				le32_to_cpu(ppm->ulPlatformTDC);
630 			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
631 				le32_to_cpu(ppm->ulSmallACPlatformTDC);
632 			adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
633 				le32_to_cpu(ppm->ulApuTDP);
634 			adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
635 				le32_to_cpu(ppm->ulDGpuTDP);
636 			adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
637 				le32_to_cpu(ppm->ulDGpuUlvPower);
638 			adev->pm.dpm.dyn_state.ppm_table->tj_max =
639 				le32_to_cpu(ppm->ulTjmax);
640 		}
641 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
642 			ext_hdr->usACPTableOffset) {
643 			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
644 				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
645 				(mode_info->atom_context->bios + data_offset +
646 				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
647 			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
648 			u32 size = limits->numEntries *
649 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
650 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
651 				kzalloc(size, GFP_KERNEL);
652 			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
653 				amdgpu_free_extended_power_table(adev);
654 				return -ENOMEM;
655 			}
656 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
657 				limits->numEntries;
658 			entry = &limits->entries[0];
659 			for (i = 0; i < limits->numEntries; i++) {
660 				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
661 					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
662 				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
663 					le16_to_cpu(entry->usVoltage);
664 				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
665 					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
666 			}
667 		}
668 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
669 			ext_hdr->usPowerTuneTableOffset) {
670 			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
671 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
672 			ATOM_PowerTune_Table *pt;
673 			adev->pm.dpm.dyn_state.cac_tdp_table =
674 				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
675 			if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
676 				amdgpu_free_extended_power_table(adev);
677 				return -ENOMEM;
678 			}
679 			if (rev > 0) {
680 				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
681 					(mode_info->atom_context->bios + data_offset +
682 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
683 				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
684 					ppt->usMaximumPowerDeliveryLimit;
685 				pt = &ppt->power_tune_table;
686 			} else {
687 				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
688 					(mode_info->atom_context->bios + data_offset +
689 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
690 				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
691 				pt = &ppt->power_tune_table;
692 			}
693 			adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
694 			adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
695 				le16_to_cpu(pt->usConfigurableTDP);
696 			adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
697 			adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
698 				le16_to_cpu(pt->usBatteryPowerLimit);
699 			adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
700 				le16_to_cpu(pt->usSmallPowerLimit);
701 			adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
702 				le16_to_cpu(pt->usLowCACLeakage);
703 			adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
704 				le16_to_cpu(pt->usHighCACLeakage);
705 		}
706 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
707 				ext_hdr->usSclkVddgfxTableOffset) {
708 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
709 				(mode_info->atom_context->bios + data_offset +
710 				 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
711 			ret = amdgpu_parse_clk_voltage_dep_table(
712 					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
713 					dep_table);
714 			if (ret) {
715 				kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
716 				return ret;
717 			}
718 		}
719 	}
720 
721 	return 0;
722 }
723 
amdgpu_free_extended_power_table(struct amdgpu_device * adev)724 void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
725 {
726 	struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
727 
728 	kfree(dyn_state->vddc_dependency_on_sclk.entries);
729 	kfree(dyn_state->vddci_dependency_on_mclk.entries);
730 	kfree(dyn_state->vddc_dependency_on_mclk.entries);
731 	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
732 	kfree(dyn_state->cac_leakage_table.entries);
733 	kfree(dyn_state->phase_shedding_limits_table.entries);
734 	kfree(dyn_state->ppm_table);
735 	kfree(dyn_state->cac_tdp_table);
736 	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
737 	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
738 	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
739 	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
740 	kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
741 }
742 
743 static const char *pp_lib_thermal_controller_names[] = {
744 	"NONE",
745 	"lm63",
746 	"adm1032",
747 	"adm1030",
748 	"max6649",
749 	"lm64",
750 	"f75375",
751 	"RV6xx",
752 	"RV770",
753 	"adt7473",
754 	"NONE",
755 	"External GPIO",
756 	"Evergreen",
757 	"emc2103",
758 	"Sumo",
759 	"Northern Islands",
760 	"Southern Islands",
761 	"lm96163",
762 	"Sea Islands",
763 	"Kaveri/Kabini",
764 };
765 
amdgpu_add_thermal_controller(struct amdgpu_device * adev)766 void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
767 {
768 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
769 	ATOM_PPLIB_POWERPLAYTABLE *power_table;
770 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
771 	ATOM_PPLIB_THERMALCONTROLLER *controller;
772 	struct amdgpu_i2c_bus_rec i2c_bus;
773 	u16 data_offset;
774 	u8 frev, crev;
775 
776 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
777 				   &frev, &crev, &data_offset))
778 		return;
779 	power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
780 		(mode_info->atom_context->bios + data_offset);
781 	controller = &power_table->sThermalController;
782 
783 	/* add the i2c bus for thermal/fan chip */
784 	if (controller->ucType > 0) {
785 		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
786 			adev->pm.no_fan = true;
787 		adev->pm.fan_pulses_per_revolution =
788 			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
789 		if (adev->pm.fan_pulses_per_revolution) {
790 			adev->pm.fan_min_rpm = controller->ucFanMinRPM;
791 			adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
792 		}
793 		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
794 			DRM_INFO("Internal thermal controller %s fan control\n",
795 				 (controller->ucFanParameters &
796 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
797 			adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
798 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
799 			DRM_INFO("Internal thermal controller %s fan control\n",
800 				 (controller->ucFanParameters &
801 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
802 			adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
803 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
804 			DRM_INFO("Internal thermal controller %s fan control\n",
805 				 (controller->ucFanParameters &
806 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
807 			adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
808 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
809 			DRM_INFO("Internal thermal controller %s fan control\n",
810 				 (controller->ucFanParameters &
811 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
812 			adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
813 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
814 			DRM_INFO("Internal thermal controller %s fan control\n",
815 				 (controller->ucFanParameters &
816 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
817 			adev->pm.int_thermal_type = THERMAL_TYPE_NI;
818 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
819 			DRM_INFO("Internal thermal controller %s fan control\n",
820 				 (controller->ucFanParameters &
821 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
822 			adev->pm.int_thermal_type = THERMAL_TYPE_SI;
823 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
824 			DRM_INFO("Internal thermal controller %s fan control\n",
825 				 (controller->ucFanParameters &
826 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
827 			adev->pm.int_thermal_type = THERMAL_TYPE_CI;
828 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
829 			DRM_INFO("Internal thermal controller %s fan control\n",
830 				 (controller->ucFanParameters &
831 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
832 			adev->pm.int_thermal_type = THERMAL_TYPE_KV;
833 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
834 			DRM_INFO("External GPIO thermal controller %s fan control\n",
835 				 (controller->ucFanParameters &
836 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
837 			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
838 		} else if (controller->ucType ==
839 			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
840 			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
841 				 (controller->ucFanParameters &
842 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
843 			adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
844 		} else if (controller->ucType ==
845 			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
846 			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
847 				 (controller->ucFanParameters &
848 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
849 			adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
850 		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
851 			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
852 				 pp_lib_thermal_controller_names[controller->ucType],
853 				 controller->ucI2cAddress >> 1,
854 				 (controller->ucFanParameters &
855 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
856 			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
857 			i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
858 			adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
859 			if (adev->pm.i2c_bus) {
860 				struct i2c_board_info info = { };
861 				const char *name = pp_lib_thermal_controller_names[controller->ucType];
862 				info.addr = controller->ucI2cAddress >> 1;
863 				strlcpy(info.type, name, sizeof(info.type));
864 				i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
865 			}
866 		} else {
867 			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
868 				 controller->ucType,
869 				 controller->ucI2cAddress >> 1,
870 				 (controller->ucFanParameters &
871 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
872 		}
873 	}
874 }
875 
amdgpu_get_pcie_gen_support(struct amdgpu_device * adev,u32 sys_mask,enum amdgpu_pcie_gen asic_gen,enum amdgpu_pcie_gen default_gen)876 enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
877 						 u32 sys_mask,
878 						 enum amdgpu_pcie_gen asic_gen,
879 						 enum amdgpu_pcie_gen default_gen)
880 {
881 	switch (asic_gen) {
882 	case AMDGPU_PCIE_GEN1:
883 		return AMDGPU_PCIE_GEN1;
884 	case AMDGPU_PCIE_GEN2:
885 		return AMDGPU_PCIE_GEN2;
886 	case AMDGPU_PCIE_GEN3:
887 		return AMDGPU_PCIE_GEN3;
888 	default:
889 		if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
890 		    (default_gen == AMDGPU_PCIE_GEN3))
891 			return AMDGPU_PCIE_GEN3;
892 		else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
893 			 (default_gen == AMDGPU_PCIE_GEN2))
894 			return AMDGPU_PCIE_GEN2;
895 		else
896 			return AMDGPU_PCIE_GEN1;
897 	}
898 	return AMDGPU_PCIE_GEN1;
899 }
900 
901 struct amd_vce_state*
amdgpu_get_vce_clock_state(void * handle,u32 idx)902 amdgpu_get_vce_clock_state(void *handle, u32 idx)
903 {
904 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
905 
906 	if (idx < adev->pm.dpm.num_of_vce_states)
907 		return &adev->pm.dpm.vce_states[idx];
908 
909 	return NULL;
910 }
911 
amdgpu_dpm_get_sclk(struct amdgpu_device * adev,bool low)912 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
913 {
914 	uint32_t clk_freq;
915 	int ret = 0;
916 	if (is_support_sw_smu(adev)) {
917 		ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK,
918 					     low ? &clk_freq : NULL,
919 					     !low ? &clk_freq : NULL);
920 		if (ret)
921 			return 0;
922 		return clk_freq * 100;
923 
924 	} else {
925 		return (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
926 	}
927 }
928 
amdgpu_dpm_get_mclk(struct amdgpu_device * adev,bool low)929 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
930 {
931 	uint32_t clk_freq;
932 	int ret = 0;
933 	if (is_support_sw_smu(adev)) {
934 		ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK,
935 					     low ? &clk_freq : NULL,
936 					     !low ? &clk_freq : NULL);
937 		if (ret)
938 			return 0;
939 		return clk_freq * 100;
940 
941 	} else {
942 		return (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
943 	}
944 }
945 
amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device * adev,uint32_t block_type,bool gate)946 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
947 {
948 	int ret = 0;
949 	bool swsmu = is_support_sw_smu(adev);
950 
951 	switch (block_type) {
952 	case AMD_IP_BLOCK_TYPE_UVD:
953 	case AMD_IP_BLOCK_TYPE_VCE:
954 		if (swsmu) {
955 			ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
956 		} else if (adev->powerplay.pp_funcs &&
957 			   adev->powerplay.pp_funcs->set_powergating_by_smu) {
958 			/*
959 			 * TODO: need a better lock mechanism
960 			 *
961 			 * Here adev->pm.mutex lock protection is enforced on
962 			 * UVD and VCE cases only. Since for other cases, there
963 			 * may be already lock protection in amdgpu_pm.c.
964 			 * This is a quick fix for the deadlock issue below.
965 			 *     NFO: task ocltst:2028 blocked for more than 120 seconds.
966 			 *     Tainted: G           OE     5.0.0-37-generic #40~18.04.1-Ubuntu
967 			 *     echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
968 			 *     cltst          D    0  2028   2026 0x00000000
969 			 *     all Trace:
970 			 *     __schedule+0x2c0/0x870
971 			 *     schedule+0x2c/0x70
972 			 *     schedule_preempt_disabled+0xe/0x10
973 			 *     __mutex_lock.isra.9+0x26d/0x4e0
974 			 *     __mutex_lock_slowpath+0x13/0x20
975 			 *     ? __mutex_lock_slowpath+0x13/0x20
976 			 *     mutex_lock+0x2f/0x40
977 			 *     amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
978 			 *     gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
979 			 *     gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
980 			 *     amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
981 			 *     pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
982 			 *     amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
983 			 */
984 			mutex_lock(&adev->pm.mutex);
985 			ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
986 				(adev)->powerplay.pp_handle, block_type, gate));
987 			mutex_unlock(&adev->pm.mutex);
988 		}
989 		break;
990 	case AMD_IP_BLOCK_TYPE_GFX:
991 	case AMD_IP_BLOCK_TYPE_VCN:
992 	case AMD_IP_BLOCK_TYPE_SDMA:
993 		if (swsmu)
994 			ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
995 		else if (adev->powerplay.pp_funcs &&
996 			 adev->powerplay.pp_funcs->set_powergating_by_smu)
997 			ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
998 				(adev)->powerplay.pp_handle, block_type, gate));
999 		break;
1000 	case AMD_IP_BLOCK_TYPE_JPEG:
1001 		if (swsmu)
1002 			ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
1003 		break;
1004 	case AMD_IP_BLOCK_TYPE_GMC:
1005 	case AMD_IP_BLOCK_TYPE_ACP:
1006 		if (adev->powerplay.pp_funcs &&
1007 		    adev->powerplay.pp_funcs->set_powergating_by_smu)
1008 			ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
1009 				(adev)->powerplay.pp_handle, block_type, gate));
1010 		break;
1011 	default:
1012 		break;
1013 	}
1014 
1015 	return ret;
1016 }
1017 
amdgpu_dpm_baco_enter(struct amdgpu_device * adev)1018 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
1019 {
1020 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1021 	void *pp_handle = adev->powerplay.pp_handle;
1022 	struct smu_context *smu = &adev->smu;
1023 	int ret = 0;
1024 
1025 	if (is_support_sw_smu(adev)) {
1026 		ret = smu_baco_enter(smu);
1027 	} else {
1028 		if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1029 			return -ENOENT;
1030 
1031 		/* enter BACO state */
1032 		ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1033 	}
1034 
1035 	return ret;
1036 }
1037 
amdgpu_dpm_baco_exit(struct amdgpu_device * adev)1038 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
1039 {
1040 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1041 	void *pp_handle = adev->powerplay.pp_handle;
1042 	struct smu_context *smu = &adev->smu;
1043 	int ret = 0;
1044 
1045 	if (is_support_sw_smu(adev)) {
1046 		ret = smu_baco_exit(smu);
1047 	} else {
1048 		if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1049 			return -ENOENT;
1050 
1051 		/* exit BACO state */
1052 		ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1053 	}
1054 
1055 	return ret;
1056 }
1057 
amdgpu_dpm_set_mp1_state(struct amdgpu_device * adev,enum pp_mp1_state mp1_state)1058 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
1059 			     enum pp_mp1_state mp1_state)
1060 {
1061 	int ret = 0;
1062 
1063 	if (is_support_sw_smu(adev)) {
1064 		ret = smu_set_mp1_state(&adev->smu, mp1_state);
1065 	} else if (adev->powerplay.pp_funcs &&
1066 		   adev->powerplay.pp_funcs->set_mp1_state) {
1067 		ret = adev->powerplay.pp_funcs->set_mp1_state(
1068 				adev->powerplay.pp_handle,
1069 				mp1_state);
1070 	}
1071 
1072 	return ret;
1073 }
1074 
amdgpu_dpm_is_baco_supported(struct amdgpu_device * adev)1075 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
1076 {
1077 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1078 	void *pp_handle = adev->powerplay.pp_handle;
1079 	struct smu_context *smu = &adev->smu;
1080 	bool baco_cap;
1081 
1082 	if (is_support_sw_smu(adev)) {
1083 		return smu_baco_is_support(smu);
1084 	} else {
1085 		if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
1086 			return false;
1087 
1088 		if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
1089 			return false;
1090 
1091 		return baco_cap ? true : false;
1092 	}
1093 }
1094 
amdgpu_dpm_mode2_reset(struct amdgpu_device * adev)1095 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
1096 {
1097 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1098 	void *pp_handle = adev->powerplay.pp_handle;
1099 	struct smu_context *smu = &adev->smu;
1100 
1101 	if (is_support_sw_smu(adev)) {
1102 		return smu_mode2_reset(smu);
1103 	} else {
1104 		if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
1105 			return -ENOENT;
1106 
1107 		return pp_funcs->asic_reset_mode_2(pp_handle);
1108 	}
1109 }
1110 
amdgpu_dpm_baco_reset(struct amdgpu_device * adev)1111 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
1112 {
1113 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1114 	void *pp_handle = adev->powerplay.pp_handle;
1115 	struct smu_context *smu = &adev->smu;
1116 	int ret = 0;
1117 
1118 	if (is_support_sw_smu(adev)) {
1119 		ret = smu_baco_enter(smu);
1120 		if (ret)
1121 			return ret;
1122 
1123 		ret = smu_baco_exit(smu);
1124 		if (ret)
1125 			return ret;
1126 	} else {
1127 		if (!pp_funcs
1128 		    || !pp_funcs->set_asic_baco_state)
1129 			return -ENOENT;
1130 
1131 		/* enter BACO state */
1132 		ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1133 		if (ret)
1134 			return ret;
1135 
1136 		/* exit BACO state */
1137 		ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1138 		if (ret)
1139 			return ret;
1140 	}
1141 
1142 	return 0;
1143 }
1144 
amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device * adev)1145 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
1146 {
1147 	struct smu_context *smu = &adev->smu;
1148 
1149 	if (is_support_sw_smu(adev))
1150 		return smu_mode1_reset_is_support(smu);
1151 
1152 	return false;
1153 }
1154 
amdgpu_dpm_mode1_reset(struct amdgpu_device * adev)1155 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
1156 {
1157 	struct smu_context *smu = &adev->smu;
1158 
1159 	if (is_support_sw_smu(adev))
1160 		return smu_mode1_reset(smu);
1161 
1162 	return -EOPNOTSUPP;
1163 }
1164 
amdgpu_dpm_switch_power_profile(struct amdgpu_device * adev,enum PP_SMC_POWER_PROFILE type,bool en)1165 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
1166 				    enum PP_SMC_POWER_PROFILE type,
1167 				    bool en)
1168 {
1169 	int ret = 0;
1170 
1171 	if (is_support_sw_smu(adev))
1172 		ret = smu_switch_power_profile(&adev->smu, type, en);
1173 	else if (adev->powerplay.pp_funcs &&
1174 		 adev->powerplay.pp_funcs->switch_power_profile)
1175 		ret = adev->powerplay.pp_funcs->switch_power_profile(
1176 			adev->powerplay.pp_handle, type, en);
1177 
1178 	return ret;
1179 }
1180 
amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device * adev,uint32_t pstate)1181 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
1182 			       uint32_t pstate)
1183 {
1184 	int ret = 0;
1185 
1186 	if (is_support_sw_smu(adev))
1187 		ret = smu_set_xgmi_pstate(&adev->smu, pstate);
1188 	else if (adev->powerplay.pp_funcs &&
1189 		 adev->powerplay.pp_funcs->set_xgmi_pstate)
1190 		ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
1191 								pstate);
1192 
1193 	return ret;
1194 }
1195 
amdgpu_dpm_set_df_cstate(struct amdgpu_device * adev,uint32_t cstate)1196 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
1197 			     uint32_t cstate)
1198 {
1199 	int ret = 0;
1200 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1201 	void *pp_handle = adev->powerplay.pp_handle;
1202 	struct smu_context *smu = &adev->smu;
1203 
1204 	if (is_support_sw_smu(adev))
1205 		ret = smu_set_df_cstate(smu, cstate);
1206 	else if (pp_funcs &&
1207 		 pp_funcs->set_df_cstate)
1208 		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
1209 
1210 	return ret;
1211 }
1212 
amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device * adev,bool en)1213 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
1214 {
1215 	struct smu_context *smu = &adev->smu;
1216 
1217 	if (is_support_sw_smu(adev))
1218 		return smu_allow_xgmi_power_down(smu, en);
1219 
1220 	return 0;
1221 }
1222 
amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device * adev)1223 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
1224 {
1225 	void *pp_handle = adev->powerplay.pp_handle;
1226 	const struct amd_pm_funcs *pp_funcs =
1227 			adev->powerplay.pp_funcs;
1228 	struct smu_context *smu = &adev->smu;
1229 	int ret = 0;
1230 
1231 	if (is_support_sw_smu(adev))
1232 		ret = smu_enable_mgpu_fan_boost(smu);
1233 	else if (pp_funcs && pp_funcs->enable_mgpu_fan_boost)
1234 		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
1235 
1236 	return ret;
1237 }
1238 
amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device * adev,uint32_t msg_id)1239 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
1240 				      uint32_t msg_id)
1241 {
1242 	void *pp_handle = adev->powerplay.pp_handle;
1243 	const struct amd_pm_funcs *pp_funcs =
1244 			adev->powerplay.pp_funcs;
1245 	int ret = 0;
1246 
1247 	if (pp_funcs && pp_funcs->set_clockgating_by_smu)
1248 		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
1249 						       msg_id);
1250 
1251 	return ret;
1252 }
1253 
amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device * adev,bool acquire)1254 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
1255 				  bool acquire)
1256 {
1257 	void *pp_handle = adev->powerplay.pp_handle;
1258 	const struct amd_pm_funcs *pp_funcs =
1259 			adev->powerplay.pp_funcs;
1260 	int ret = -EOPNOTSUPP;
1261 
1262 	if (pp_funcs && pp_funcs->smu_i2c_bus_access)
1263 		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
1264 						   acquire);
1265 
1266 	return ret;
1267 }
1268 
amdgpu_pm_acpi_event_handler(struct amdgpu_device * adev)1269 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
1270 {
1271 	if (adev->pm.dpm_enabled) {
1272 		mutex_lock(&adev->pm.mutex);
1273 		if (power_supply_is_system_supplied() > 0)
1274 			adev->pm.ac_power = true;
1275 		else
1276 			adev->pm.ac_power = false;
1277 		if (adev->powerplay.pp_funcs &&
1278 		    adev->powerplay.pp_funcs->enable_bapm)
1279 			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
1280 		mutex_unlock(&adev->pm.mutex);
1281 
1282 		if (is_support_sw_smu(adev))
1283 			smu_set_ac_dc(&adev->smu);
1284 	}
1285 }
1286 
amdgpu_dpm_read_sensor(struct amdgpu_device * adev,enum amd_pp_sensors sensor,void * data,uint32_t * size)1287 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
1288 			   void *data, uint32_t *size)
1289 {
1290 	int ret = 0;
1291 
1292 	if (!data || !size)
1293 		return -EINVAL;
1294 
1295 	if (is_support_sw_smu(adev))
1296 		ret = smu_read_sensor(&adev->smu, sensor, data, size);
1297 	else {
1298 		if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
1299 			ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
1300 								    sensor, data, size);
1301 		else
1302 			ret = -EINVAL;
1303 	}
1304 
1305 	return ret;
1306 }
1307 
amdgpu_dpm_thermal_work_handler(struct work_struct * work)1308 void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
1309 {
1310 	struct amdgpu_device *adev =
1311 		container_of(work, struct amdgpu_device,
1312 			     pm.dpm.thermal.work);
1313 	/* switch to the thermal state */
1314 	enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
1315 	int temp, size = sizeof(temp);
1316 
1317 	if (!adev->pm.dpm_enabled)
1318 		return;
1319 
1320 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
1321 				    (void *)&temp, &size)) {
1322 		if (temp < adev->pm.dpm.thermal.min_temp)
1323 			/* switch back the user state */
1324 			dpm_state = adev->pm.dpm.user_state;
1325 	} else {
1326 		if (adev->pm.dpm.thermal.high_to_low)
1327 			/* switch back the user state */
1328 			dpm_state = adev->pm.dpm.user_state;
1329 	}
1330 	mutex_lock(&adev->pm.mutex);
1331 	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
1332 		adev->pm.dpm.thermal_active = true;
1333 	else
1334 		adev->pm.dpm.thermal_active = false;
1335 	adev->pm.dpm.state = dpm_state;
1336 	mutex_unlock(&adev->pm.mutex);
1337 
1338 	amdgpu_pm_compute_clocks(adev);
1339 }
1340 
amdgpu_dpm_pick_power_state(struct amdgpu_device * adev,enum amd_pm_state_type dpm_state)1341 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
1342 						     enum amd_pm_state_type dpm_state)
1343 {
1344 	int i;
1345 	struct amdgpu_ps *ps;
1346 	u32 ui_class;
1347 	bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
1348 		true : false;
1349 
1350 	/* check if the vblank period is too short to adjust the mclk */
1351 	if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
1352 		if (amdgpu_dpm_vblank_too_short(adev))
1353 			single_display = false;
1354 	}
1355 
1356 	/* certain older asics have a separare 3D performance state,
1357 	 * so try that first if the user selected performance
1358 	 */
1359 	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
1360 		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
1361 	/* balanced states don't exist at the moment */
1362 	if (dpm_state == POWER_STATE_TYPE_BALANCED)
1363 		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1364 
1365 restart_search:
1366 	/* Pick the best power state based on current conditions */
1367 	for (i = 0; i < adev->pm.dpm.num_ps; i++) {
1368 		ps = &adev->pm.dpm.ps[i];
1369 		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
1370 		switch (dpm_state) {
1371 		/* user states */
1372 		case POWER_STATE_TYPE_BATTERY:
1373 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
1374 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1375 					if (single_display)
1376 						return ps;
1377 				} else
1378 					return ps;
1379 			}
1380 			break;
1381 		case POWER_STATE_TYPE_BALANCED:
1382 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
1383 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1384 					if (single_display)
1385 						return ps;
1386 				} else
1387 					return ps;
1388 			}
1389 			break;
1390 		case POWER_STATE_TYPE_PERFORMANCE:
1391 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
1392 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1393 					if (single_display)
1394 						return ps;
1395 				} else
1396 					return ps;
1397 			}
1398 			break;
1399 		/* internal states */
1400 		case POWER_STATE_TYPE_INTERNAL_UVD:
1401 			if (adev->pm.dpm.uvd_ps)
1402 				return adev->pm.dpm.uvd_ps;
1403 			else
1404 				break;
1405 		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1406 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
1407 				return ps;
1408 			break;
1409 		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1410 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
1411 				return ps;
1412 			break;
1413 		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1414 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
1415 				return ps;
1416 			break;
1417 		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1418 			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
1419 				return ps;
1420 			break;
1421 		case POWER_STATE_TYPE_INTERNAL_BOOT:
1422 			return adev->pm.dpm.boot_ps;
1423 		case POWER_STATE_TYPE_INTERNAL_THERMAL:
1424 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
1425 				return ps;
1426 			break;
1427 		case POWER_STATE_TYPE_INTERNAL_ACPI:
1428 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
1429 				return ps;
1430 			break;
1431 		case POWER_STATE_TYPE_INTERNAL_ULV:
1432 			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
1433 				return ps;
1434 			break;
1435 		case POWER_STATE_TYPE_INTERNAL_3DPERF:
1436 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
1437 				return ps;
1438 			break;
1439 		default:
1440 			break;
1441 		}
1442 	}
1443 	/* use a fallback state if we didn't match */
1444 	switch (dpm_state) {
1445 	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1446 		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1447 		goto restart_search;
1448 	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1449 	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1450 	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1451 		if (adev->pm.dpm.uvd_ps) {
1452 			return adev->pm.dpm.uvd_ps;
1453 		} else {
1454 			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1455 			goto restart_search;
1456 		}
1457 	case POWER_STATE_TYPE_INTERNAL_THERMAL:
1458 		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
1459 		goto restart_search;
1460 	case POWER_STATE_TYPE_INTERNAL_ACPI:
1461 		dpm_state = POWER_STATE_TYPE_BATTERY;
1462 		goto restart_search;
1463 	case POWER_STATE_TYPE_BATTERY:
1464 	case POWER_STATE_TYPE_BALANCED:
1465 	case POWER_STATE_TYPE_INTERNAL_3DPERF:
1466 		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1467 		goto restart_search;
1468 	default:
1469 		break;
1470 	}
1471 
1472 	return NULL;
1473 }
1474 
amdgpu_dpm_change_power_state_locked(struct amdgpu_device * adev)1475 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
1476 {
1477 	struct amdgpu_ps *ps;
1478 	enum amd_pm_state_type dpm_state;
1479 	int ret;
1480 	bool equal = false;
1481 
1482 	/* if dpm init failed */
1483 	if (!adev->pm.dpm_enabled)
1484 		return;
1485 
1486 	if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
1487 		/* add other state override checks here */
1488 		if ((!adev->pm.dpm.thermal_active) &&
1489 		    (!adev->pm.dpm.uvd_active))
1490 			adev->pm.dpm.state = adev->pm.dpm.user_state;
1491 	}
1492 	dpm_state = adev->pm.dpm.state;
1493 
1494 	ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
1495 	if (ps)
1496 		adev->pm.dpm.requested_ps = ps;
1497 	else
1498 		return;
1499 
1500 	if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
1501 		printk("switching from power state:\n");
1502 		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
1503 		printk("switching to power state:\n");
1504 		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
1505 	}
1506 
1507 	/* update whether vce is active */
1508 	ps->vce_active = adev->pm.dpm.vce_active;
1509 	if (adev->powerplay.pp_funcs->display_configuration_changed)
1510 		amdgpu_dpm_display_configuration_changed(adev);
1511 
1512 	ret = amdgpu_dpm_pre_set_power_state(adev);
1513 	if (ret)
1514 		return;
1515 
1516 	if (adev->powerplay.pp_funcs->check_state_equal) {
1517 		if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
1518 			equal = false;
1519 	}
1520 
1521 	if (equal)
1522 		return;
1523 
1524 	amdgpu_dpm_set_power_state(adev);
1525 	amdgpu_dpm_post_set_power_state(adev);
1526 
1527 	adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
1528 	adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
1529 
1530 	if (adev->powerplay.pp_funcs->force_performance_level) {
1531 		if (adev->pm.dpm.thermal_active) {
1532 			enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
1533 			/* force low perf level for thermal */
1534 			amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
1535 			/* save the user's level */
1536 			adev->pm.dpm.forced_level = level;
1537 		} else {
1538 			/* otherwise, user selected level */
1539 			amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
1540 		}
1541 	}
1542 }
1543 
amdgpu_pm_compute_clocks(struct amdgpu_device * adev)1544 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1545 {
1546 	int i = 0;
1547 
1548 	if (!adev->pm.dpm_enabled)
1549 		return;
1550 
1551 	if (adev->mode_info.num_crtc)
1552 		amdgpu_display_bandwidth_update(adev);
1553 
1554 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1555 		struct amdgpu_ring *ring = adev->rings[i];
1556 		if (ring && ring->sched.ready)
1557 			amdgpu_fence_wait_empty(ring);
1558 	}
1559 
1560 	if (is_support_sw_smu(adev)) {
1561 		struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
1562 		smu_handle_task(&adev->smu,
1563 				smu_dpm->dpm_level,
1564 				AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
1565 				true);
1566 	} else {
1567 		if (adev->powerplay.pp_funcs->dispatch_tasks) {
1568 			if (!amdgpu_device_has_dc_support(adev)) {
1569 				mutex_lock(&adev->pm.mutex);
1570 				amdgpu_dpm_get_active_displays(adev);
1571 				adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1572 				adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1573 				adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1574 				/* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
1575 				if (adev->pm.pm_display_cfg.vrefresh > 120)
1576 					adev->pm.pm_display_cfg.min_vblank_time = 0;
1577 				if (adev->powerplay.pp_funcs->display_configuration_change)
1578 					adev->powerplay.pp_funcs->display_configuration_change(
1579 									adev->powerplay.pp_handle,
1580 									&adev->pm.pm_display_cfg);
1581 				mutex_unlock(&adev->pm.mutex);
1582 			}
1583 			amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
1584 		} else {
1585 			mutex_lock(&adev->pm.mutex);
1586 			amdgpu_dpm_get_active_displays(adev);
1587 			amdgpu_dpm_change_power_state_locked(adev);
1588 			mutex_unlock(&adev->pm.mutex);
1589 		}
1590 	}
1591 }
1592 
amdgpu_dpm_enable_uvd(struct amdgpu_device * adev,bool enable)1593 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
1594 {
1595 	int ret = 0;
1596 
1597 	if (adev->family == AMDGPU_FAMILY_SI) {
1598 		mutex_lock(&adev->pm.mutex);
1599 		if (enable) {
1600 			adev->pm.dpm.uvd_active = true;
1601 			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
1602 		} else {
1603 			adev->pm.dpm.uvd_active = false;
1604 		}
1605 		mutex_unlock(&adev->pm.mutex);
1606 
1607 		amdgpu_pm_compute_clocks(adev);
1608 	} else {
1609 		ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
1610 		if (ret)
1611 			DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
1612 				  enable ? "enable" : "disable", ret);
1613 
1614 		/* enable/disable Low Memory PState for UVD (4k videos) */
1615 		if (adev->asic_type == CHIP_STONEY &&
1616 			adev->uvd.decode_image_width >= WIDTH_4K) {
1617 			struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1618 
1619 			if (hwmgr && hwmgr->hwmgr_func &&
1620 			    hwmgr->hwmgr_func->update_nbdpm_pstate)
1621 				hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
1622 								       !enable,
1623 								       true);
1624 		}
1625 	}
1626 }
1627 
amdgpu_dpm_enable_vce(struct amdgpu_device * adev,bool enable)1628 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
1629 {
1630 	int ret = 0;
1631 
1632 	if (adev->family == AMDGPU_FAMILY_SI) {
1633 		mutex_lock(&adev->pm.mutex);
1634 		if (enable) {
1635 			adev->pm.dpm.vce_active = true;
1636 			/* XXX select vce level based on ring/task */
1637 			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
1638 		} else {
1639 			adev->pm.dpm.vce_active = false;
1640 		}
1641 		mutex_unlock(&adev->pm.mutex);
1642 
1643 		amdgpu_pm_compute_clocks(adev);
1644 	} else {
1645 		ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
1646 		if (ret)
1647 			DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
1648 				  enable ? "enable" : "disable", ret);
1649 	}
1650 }
1651 
amdgpu_pm_print_power_states(struct amdgpu_device * adev)1652 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
1653 {
1654 	int i;
1655 
1656 	if (adev->powerplay.pp_funcs->print_power_state == NULL)
1657 		return;
1658 
1659 	for (i = 0; i < adev->pm.dpm.num_ps; i++)
1660 		amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
1661 
1662 }
1663 
amdgpu_dpm_enable_jpeg(struct amdgpu_device * adev,bool enable)1664 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
1665 {
1666 	int ret = 0;
1667 
1668 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
1669 	if (ret)
1670 		DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
1671 			  enable ? "enable" : "disable", ret);
1672 }
1673 
amdgpu_pm_load_smu_firmware(struct amdgpu_device * adev,uint32_t * smu_version)1674 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
1675 {
1676 	int r;
1677 
1678 	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
1679 		r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
1680 		if (r) {
1681 			pr_err("smu firmware loading failed\n");
1682 			return r;
1683 		}
1684 		*smu_version = adev->pm.fw_version;
1685 	}
1686 	return 0;
1687 }
1688