• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_atombios.h"
28 #include "amdgpu_i2c.h"
29 #include "amdgpu_dpm.h"
30 #include "atom.h"
31 
amdgpu_dpm_print_class_info(u32 class,u32 class2)32 void amdgpu_dpm_print_class_info(u32 class, u32 class2)
33 {
34 	const char *s;
35 
36 	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
37 	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
38 	default:
39 		s = "none";
40 		break;
41 	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
42 		s = "battery";
43 		break;
44 	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
45 		s = "balanced";
46 		break;
47 	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
48 		s = "performance";
49 		break;
50 	}
51 	printk("\tui class: %s\n", s);
52 	printk("\tinternal class:");
53 	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
54 	    (class2 == 0))
55 		pr_cont(" none");
56 	else {
57 		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
58 			pr_cont(" boot");
59 		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
60 			pr_cont(" thermal");
61 		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
62 			pr_cont(" limited_pwr");
63 		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
64 			pr_cont(" rest");
65 		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
66 			pr_cont(" forced");
67 		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
68 			pr_cont(" 3d_perf");
69 		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
70 			pr_cont(" ovrdrv");
71 		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
72 			pr_cont(" uvd");
73 		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
74 			pr_cont(" 3d_low");
75 		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
76 			pr_cont(" acpi");
77 		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
78 			pr_cont(" uvd_hd2");
79 		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
80 			pr_cont(" uvd_hd");
81 		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
82 			pr_cont(" uvd_sd");
83 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
84 			pr_cont(" limited_pwr2");
85 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
86 			pr_cont(" ulv");
87 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
88 			pr_cont(" uvd_mvc");
89 	}
90 	pr_cont("\n");
91 }
92 
amdgpu_dpm_print_cap_info(u32 caps)93 void amdgpu_dpm_print_cap_info(u32 caps)
94 {
95 	printk("\tcaps:");
96 	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
97 		pr_cont(" single_disp");
98 	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
99 		pr_cont(" video");
100 	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
101 		pr_cont(" no_dc");
102 	pr_cont("\n");
103 }
104 
amdgpu_dpm_print_ps_status(struct amdgpu_device * adev,struct amdgpu_ps * rps)105 void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
106 				struct amdgpu_ps *rps)
107 {
108 	printk("\tstatus:");
109 	if (rps == adev->pm.dpm.current_ps)
110 		pr_cont(" c");
111 	if (rps == adev->pm.dpm.requested_ps)
112 		pr_cont(" r");
113 	if (rps == adev->pm.dpm.boot_ps)
114 		pr_cont(" b");
115 	pr_cont("\n");
116 }
117 
118 
amdgpu_dpm_get_vblank_time(struct amdgpu_device * adev)119 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
120 {
121 	struct drm_device *dev = adev->ddev;
122 	struct drm_crtc *crtc;
123 	struct amdgpu_crtc *amdgpu_crtc;
124 	u32 vblank_in_pixels;
125 	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
126 
127 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
128 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
129 			amdgpu_crtc = to_amdgpu_crtc(crtc);
130 			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
131 				vblank_in_pixels =
132 					amdgpu_crtc->hw_mode.crtc_htotal *
133 					(amdgpu_crtc->hw_mode.crtc_vblank_end -
134 					amdgpu_crtc->hw_mode.crtc_vdisplay +
135 					(amdgpu_crtc->v_border * 2));
136 
137 				vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
138 				break;
139 			}
140 		}
141 	}
142 
143 	return vblank_time_us;
144 }
145 
amdgpu_dpm_get_vrefresh(struct amdgpu_device * adev)146 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
147 {
148 	struct drm_device *dev = adev->ddev;
149 	struct drm_crtc *crtc;
150 	struct amdgpu_crtc *amdgpu_crtc;
151 	u32 vrefresh = 0;
152 
153 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
154 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
155 			amdgpu_crtc = to_amdgpu_crtc(crtc);
156 			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
157 				vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
158 				break;
159 			}
160 		}
161 	}
162 
163 	return vrefresh;
164 }
165 
amdgpu_calculate_u_and_p(u32 i,u32 r_c,u32 p_b,u32 * p,u32 * u)166 void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
167 			      u32 *p, u32 *u)
168 {
169 	u32 b_c = 0;
170 	u32 i_c;
171 	u32 tmp;
172 
173 	i_c = (i * r_c) / 100;
174 	tmp = i_c >> p_b;
175 
176 	while (tmp) {
177 		b_c++;
178 		tmp >>= 1;
179 	}
180 
181 	*u = (b_c + 1) / 2;
182 	*p = i_c / (1 << (2 * (*u)));
183 }
184 
amdgpu_calculate_at(u32 t,u32 h,u32 fh,u32 fl,u32 * tl,u32 * th)185 int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
186 {
187 	u32 k, a, ah, al;
188 	u32 t1;
189 
190 	if ((fl == 0) || (fh == 0) || (fl > fh))
191 		return -EINVAL;
192 
193 	k = (100 * fh) / fl;
194 	t1 = (t * (k - 100));
195 	a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
196 	a = (a + 5) / 10;
197 	ah = ((a * t) + 5000) / 10000;
198 	al = a - ah;
199 
200 	*th = t - ah;
201 	*tl = t + al;
202 
203 	return 0;
204 }
205 
amdgpu_is_uvd_state(u32 class,u32 class2)206 bool amdgpu_is_uvd_state(u32 class, u32 class2)
207 {
208 	if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
209 		return true;
210 	if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
211 		return true;
212 	if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
213 		return true;
214 	if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
215 		return true;
216 	if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
217 		return true;
218 	return false;
219 }
220 
amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)221 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
222 {
223 	switch (sensor) {
224 	case THERMAL_TYPE_RV6XX:
225 	case THERMAL_TYPE_RV770:
226 	case THERMAL_TYPE_EVERGREEN:
227 	case THERMAL_TYPE_SUMO:
228 	case THERMAL_TYPE_NI:
229 	case THERMAL_TYPE_SI:
230 	case THERMAL_TYPE_CI:
231 	case THERMAL_TYPE_KV:
232 		return true;
233 	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
234 	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
235 		return false; /* need special handling */
236 	case THERMAL_TYPE_NONE:
237 	case THERMAL_TYPE_EXTERNAL:
238 	case THERMAL_TYPE_EXTERNAL_GPIO:
239 	default:
240 		return false;
241 	}
242 }
243 
244 union power_info {
245 	struct _ATOM_POWERPLAY_INFO info;
246 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
247 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
248 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
249 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
250 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
251 	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
252 	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
253 };
254 
255 union fan_info {
256 	struct _ATOM_PPLIB_FANTABLE fan;
257 	struct _ATOM_PPLIB_FANTABLE2 fan2;
258 	struct _ATOM_PPLIB_FANTABLE3 fan3;
259 };
260 
amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table * amdgpu_table,ATOM_PPLIB_Clock_Voltage_Dependency_Table * atom_table)261 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
262 					      ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
263 {
264 	u32 size = atom_table->ucNumEntries *
265 		sizeof(struct amdgpu_clock_voltage_dependency_entry);
266 	int i;
267 	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
268 
269 	amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
270 	if (!amdgpu_table->entries)
271 		return -ENOMEM;
272 
273 	entry = &atom_table->entries[0];
274 	for (i = 0; i < atom_table->ucNumEntries; i++) {
275 		amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
276 			(entry->ucClockHigh << 16);
277 		amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
278 		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
279 			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
280 	}
281 	amdgpu_table->count = atom_table->ucNumEntries;
282 
283 	return 0;
284 }
285 
amdgpu_get_platform_caps(struct amdgpu_device * adev)286 int amdgpu_get_platform_caps(struct amdgpu_device *adev)
287 {
288 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
289 	union power_info *power_info;
290 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
291 	u16 data_offset;
292 	u8 frev, crev;
293 
294 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
295 				   &frev, &crev, &data_offset))
296 		return -EINVAL;
297 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
298 
299 	adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
300 	adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
301 	adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
302 
303 	return 0;
304 }
305 
306 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
307 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
308 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
309 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
310 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
311 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
312 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
313 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
314 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
315 
amdgpu_parse_extended_power_table(struct amdgpu_device * adev)316 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
317 {
318 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
319 	union power_info *power_info;
320 	union fan_info *fan_info;
321 	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
322 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
323 	u16 data_offset;
324 	u8 frev, crev;
325 	int ret, i;
326 
327 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
328 				   &frev, &crev, &data_offset))
329 		return -EINVAL;
330 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
331 
332 	/* fan table */
333 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
334 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
335 		if (power_info->pplib3.usFanTableOffset) {
336 			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
337 						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
338 			adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
339 			adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
340 			adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
341 			adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
342 			adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
343 			adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
344 			adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
345 			if (fan_info->fan.ucFanTableFormat >= 2)
346 				adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
347 			else
348 				adev->pm.dpm.fan.t_max = 10900;
349 			adev->pm.dpm.fan.cycle_delay = 100000;
350 			if (fan_info->fan.ucFanTableFormat >= 3) {
351 				adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
352 				adev->pm.dpm.fan.default_max_fan_pwm =
353 					le16_to_cpu(fan_info->fan3.usFanPWMMax);
354 				adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
355 				adev->pm.dpm.fan.fan_output_sensitivity =
356 					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
357 			}
358 			adev->pm.dpm.fan.ucode_fan_control = true;
359 		}
360 	}
361 
362 	/* clock dependancy tables, shedding tables */
363 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
364 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
365 		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
366 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
367 				(mode_info->atom_context->bios + data_offset +
368 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
369 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
370 								 dep_table);
371 			if (ret) {
372 				amdgpu_free_extended_power_table(adev);
373 				return ret;
374 			}
375 		}
376 		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
377 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
378 				(mode_info->atom_context->bios + data_offset +
379 				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
380 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
381 								 dep_table);
382 			if (ret) {
383 				amdgpu_free_extended_power_table(adev);
384 				return ret;
385 			}
386 		}
387 		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
388 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
389 				(mode_info->atom_context->bios + data_offset +
390 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
391 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
392 								 dep_table);
393 			if (ret) {
394 				amdgpu_free_extended_power_table(adev);
395 				return ret;
396 			}
397 		}
398 		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
399 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
400 				(mode_info->atom_context->bios + data_offset +
401 				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
402 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
403 								 dep_table);
404 			if (ret) {
405 				amdgpu_free_extended_power_table(adev);
406 				return ret;
407 			}
408 		}
409 		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
410 			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
411 				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
412 				(mode_info->atom_context->bios + data_offset +
413 				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
414 			if (clk_v->ucNumEntries) {
415 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
416 					le16_to_cpu(clk_v->entries[0].usSclkLow) |
417 					(clk_v->entries[0].ucSclkHigh << 16);
418 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
419 					le16_to_cpu(clk_v->entries[0].usMclkLow) |
420 					(clk_v->entries[0].ucMclkHigh << 16);
421 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
422 					le16_to_cpu(clk_v->entries[0].usVddc);
423 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
424 					le16_to_cpu(clk_v->entries[0].usVddci);
425 			}
426 		}
427 		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
428 			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
429 				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
430 				(mode_info->atom_context->bios + data_offset +
431 				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
432 			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
433 
434 			adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
435 				kzalloc(psl->ucNumEntries *
436 					sizeof(struct amdgpu_phase_shedding_limits_entry),
437 					GFP_KERNEL);
438 			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
439 				amdgpu_free_extended_power_table(adev);
440 				return -ENOMEM;
441 			}
442 
443 			entry = &psl->entries[0];
444 			for (i = 0; i < psl->ucNumEntries; i++) {
445 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
446 					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
447 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
448 					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
449 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
450 					le16_to_cpu(entry->usVoltage);
451 				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
452 					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
453 			}
454 			adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
455 				psl->ucNumEntries;
456 		}
457 	}
458 
459 	/* cac data */
460 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
461 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
462 		adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
463 		adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
464 		adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
465 		adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
466 		if (adev->pm.dpm.tdp_od_limit)
467 			adev->pm.dpm.power_control = true;
468 		else
469 			adev->pm.dpm.power_control = false;
470 		adev->pm.dpm.tdp_adjustment = 0;
471 		adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
472 		adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
473 		adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
474 		if (power_info->pplib5.usCACLeakageTableOffset) {
475 			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
476 				(ATOM_PPLIB_CAC_Leakage_Table *)
477 				(mode_info->atom_context->bios + data_offset +
478 				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
479 			ATOM_PPLIB_CAC_Leakage_Record *entry;
480 			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
481 			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
482 			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
483 				amdgpu_free_extended_power_table(adev);
484 				return -ENOMEM;
485 			}
486 			entry = &cac_table->entries[0];
487 			for (i = 0; i < cac_table->ucNumEntries; i++) {
488 				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
489 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
490 						le16_to_cpu(entry->usVddc1);
491 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
492 						le16_to_cpu(entry->usVddc2);
493 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
494 						le16_to_cpu(entry->usVddc3);
495 				} else {
496 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
497 						le16_to_cpu(entry->usVddc);
498 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
499 						le32_to_cpu(entry->ulLeakageValue);
500 				}
501 				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
502 					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
503 			}
504 			adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
505 		}
506 	}
507 
508 	/* ext tables */
509 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
510 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
511 		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
512 			(mode_info->atom_context->bios + data_offset +
513 			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
514 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
515 			ext_hdr->usVCETableOffset) {
516 			VCEClockInfoArray *array = (VCEClockInfoArray *)
517 				(mode_info->atom_context->bios + data_offset +
518 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
519 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
520 				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
521 				(mode_info->atom_context->bios + data_offset +
522 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
523 				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
524 			ATOM_PPLIB_VCE_State_Table *states =
525 				(ATOM_PPLIB_VCE_State_Table *)
526 				(mode_info->atom_context->bios + data_offset +
527 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
528 				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
529 				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
530 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
531 			ATOM_PPLIB_VCE_State_Record *state_entry;
532 			VCEClockInfo *vce_clk;
533 			u32 size = limits->numEntries *
534 				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
535 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
536 				kzalloc(size, GFP_KERNEL);
537 			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
538 				amdgpu_free_extended_power_table(adev);
539 				return -ENOMEM;
540 			}
541 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
542 				limits->numEntries;
543 			entry = &limits->entries[0];
544 			state_entry = &states->entries[0];
545 			for (i = 0; i < limits->numEntries; i++) {
546 				vce_clk = (VCEClockInfo *)
547 					((u8 *)&array->entries[0] +
548 					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
549 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
550 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
551 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
552 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
553 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
554 					le16_to_cpu(entry->usVoltage);
555 				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
556 					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
557 			}
558 			adev->pm.dpm.num_of_vce_states =
559 					states->numEntries > AMD_MAX_VCE_LEVELS ?
560 					AMD_MAX_VCE_LEVELS : states->numEntries;
561 			for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
562 				vce_clk = (VCEClockInfo *)
563 					((u8 *)&array->entries[0] +
564 					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
565 				adev->pm.dpm.vce_states[i].evclk =
566 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
567 				adev->pm.dpm.vce_states[i].ecclk =
568 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
569 				adev->pm.dpm.vce_states[i].clk_idx =
570 					state_entry->ucClockInfoIndex & 0x3f;
571 				adev->pm.dpm.vce_states[i].pstate =
572 					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
573 				state_entry = (ATOM_PPLIB_VCE_State_Record *)
574 					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
575 			}
576 		}
577 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
578 			ext_hdr->usUVDTableOffset) {
579 			UVDClockInfoArray *array = (UVDClockInfoArray *)
580 				(mode_info->atom_context->bios + data_offset +
581 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
582 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
583 				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
584 				(mode_info->atom_context->bios + data_offset +
585 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
586 				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
587 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
588 			u32 size = limits->numEntries *
589 				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
590 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
591 				kzalloc(size, GFP_KERNEL);
592 			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
593 				amdgpu_free_extended_power_table(adev);
594 				return -ENOMEM;
595 			}
596 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
597 				limits->numEntries;
598 			entry = &limits->entries[0];
599 			for (i = 0; i < limits->numEntries; i++) {
600 				UVDClockInfo *uvd_clk = (UVDClockInfo *)
601 					((u8 *)&array->entries[0] +
602 					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
603 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
604 					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
605 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
606 					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
607 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
608 					le16_to_cpu(entry->usVoltage);
609 				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
610 					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
611 			}
612 		}
613 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
614 			ext_hdr->usSAMUTableOffset) {
615 			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
616 				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
617 				(mode_info->atom_context->bios + data_offset +
618 				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
619 			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
620 			u32 size = limits->numEntries *
621 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
622 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
623 				kzalloc(size, GFP_KERNEL);
624 			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
625 				amdgpu_free_extended_power_table(adev);
626 				return -ENOMEM;
627 			}
628 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
629 				limits->numEntries;
630 			entry = &limits->entries[0];
631 			for (i = 0; i < limits->numEntries; i++) {
632 				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
633 					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
634 				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
635 					le16_to_cpu(entry->usVoltage);
636 				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
637 					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
638 			}
639 		}
640 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
641 		    ext_hdr->usPPMTableOffset) {
642 			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
643 				(mode_info->atom_context->bios + data_offset +
644 				 le16_to_cpu(ext_hdr->usPPMTableOffset));
645 			adev->pm.dpm.dyn_state.ppm_table =
646 				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
647 			if (!adev->pm.dpm.dyn_state.ppm_table) {
648 				amdgpu_free_extended_power_table(adev);
649 				return -ENOMEM;
650 			}
651 			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
652 			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
653 				le16_to_cpu(ppm->usCpuCoreNumber);
654 			adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
655 				le32_to_cpu(ppm->ulPlatformTDP);
656 			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
657 				le32_to_cpu(ppm->ulSmallACPlatformTDP);
658 			adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
659 				le32_to_cpu(ppm->ulPlatformTDC);
660 			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
661 				le32_to_cpu(ppm->ulSmallACPlatformTDC);
662 			adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
663 				le32_to_cpu(ppm->ulApuTDP);
664 			adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
665 				le32_to_cpu(ppm->ulDGpuTDP);
666 			adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
667 				le32_to_cpu(ppm->ulDGpuUlvPower);
668 			adev->pm.dpm.dyn_state.ppm_table->tj_max =
669 				le32_to_cpu(ppm->ulTjmax);
670 		}
671 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
672 			ext_hdr->usACPTableOffset) {
673 			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
674 				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
675 				(mode_info->atom_context->bios + data_offset +
676 				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
677 			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
678 			u32 size = limits->numEntries *
679 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
680 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
681 				kzalloc(size, GFP_KERNEL);
682 			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
683 				amdgpu_free_extended_power_table(adev);
684 				return -ENOMEM;
685 			}
686 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
687 				limits->numEntries;
688 			entry = &limits->entries[0];
689 			for (i = 0; i < limits->numEntries; i++) {
690 				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
691 					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
692 				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
693 					le16_to_cpu(entry->usVoltage);
694 				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
695 					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
696 			}
697 		}
698 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
699 			ext_hdr->usPowerTuneTableOffset) {
700 			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
701 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
702 			ATOM_PowerTune_Table *pt;
703 			adev->pm.dpm.dyn_state.cac_tdp_table =
704 				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
705 			if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
706 				amdgpu_free_extended_power_table(adev);
707 				return -ENOMEM;
708 			}
709 			if (rev > 0) {
710 				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
711 					(mode_info->atom_context->bios + data_offset +
712 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
713 				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
714 					ppt->usMaximumPowerDeliveryLimit;
715 				pt = &ppt->power_tune_table;
716 			} else {
717 				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
718 					(mode_info->atom_context->bios + data_offset +
719 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
720 				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
721 				pt = &ppt->power_tune_table;
722 			}
723 			adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
724 			adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
725 				le16_to_cpu(pt->usConfigurableTDP);
726 			adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
727 			adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
728 				le16_to_cpu(pt->usBatteryPowerLimit);
729 			adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
730 				le16_to_cpu(pt->usSmallPowerLimit);
731 			adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
732 				le16_to_cpu(pt->usLowCACLeakage);
733 			adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
734 				le16_to_cpu(pt->usHighCACLeakage);
735 		}
736 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
737 				ext_hdr->usSclkVddgfxTableOffset) {
738 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
739 				(mode_info->atom_context->bios + data_offset +
740 				 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
741 			ret = amdgpu_parse_clk_voltage_dep_table(
742 					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
743 					dep_table);
744 			if (ret) {
745 				kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
746 				return ret;
747 			}
748 		}
749 	}
750 
751 	return 0;
752 }
753 
amdgpu_free_extended_power_table(struct amdgpu_device * adev)754 void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
755 {
756 	struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
757 
758 	kfree(dyn_state->vddc_dependency_on_sclk.entries);
759 	kfree(dyn_state->vddci_dependency_on_mclk.entries);
760 	kfree(dyn_state->vddc_dependency_on_mclk.entries);
761 	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
762 	kfree(dyn_state->cac_leakage_table.entries);
763 	kfree(dyn_state->phase_shedding_limits_table.entries);
764 	kfree(dyn_state->ppm_table);
765 	kfree(dyn_state->cac_tdp_table);
766 	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
767 	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
768 	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
769 	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
770 	kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
771 }
772 
773 static const char *pp_lib_thermal_controller_names[] = {
774 	"NONE",
775 	"lm63",
776 	"adm1032",
777 	"adm1030",
778 	"max6649",
779 	"lm64",
780 	"f75375",
781 	"RV6xx",
782 	"RV770",
783 	"adt7473",
784 	"NONE",
785 	"External GPIO",
786 	"Evergreen",
787 	"emc2103",
788 	"Sumo",
789 	"Northern Islands",
790 	"Southern Islands",
791 	"lm96163",
792 	"Sea Islands",
793 	"Kaveri/Kabini",
794 };
795 
amdgpu_add_thermal_controller(struct amdgpu_device * adev)796 void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
797 {
798 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
799 	ATOM_PPLIB_POWERPLAYTABLE *power_table;
800 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
801 	ATOM_PPLIB_THERMALCONTROLLER *controller;
802 	struct amdgpu_i2c_bus_rec i2c_bus;
803 	u16 data_offset;
804 	u8 frev, crev;
805 
806 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
807 				   &frev, &crev, &data_offset))
808 		return;
809 	power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
810 		(mode_info->atom_context->bios + data_offset);
811 	controller = &power_table->sThermalController;
812 
813 	/* add the i2c bus for thermal/fan chip */
814 	if (controller->ucType > 0) {
815 		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
816 			adev->pm.no_fan = true;
817 		adev->pm.fan_pulses_per_revolution =
818 			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
819 		if (adev->pm.fan_pulses_per_revolution) {
820 			adev->pm.fan_min_rpm = controller->ucFanMinRPM;
821 			adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
822 		}
823 		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
824 			DRM_INFO("Internal thermal controller %s fan control\n",
825 				 (controller->ucFanParameters &
826 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
827 			adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
828 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
829 			DRM_INFO("Internal thermal controller %s fan control\n",
830 				 (controller->ucFanParameters &
831 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
832 			adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
833 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
834 			DRM_INFO("Internal thermal controller %s fan control\n",
835 				 (controller->ucFanParameters &
836 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
837 			adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
838 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
839 			DRM_INFO("Internal thermal controller %s fan control\n",
840 				 (controller->ucFanParameters &
841 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
842 			adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
843 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
844 			DRM_INFO("Internal thermal controller %s fan control\n",
845 				 (controller->ucFanParameters &
846 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
847 			adev->pm.int_thermal_type = THERMAL_TYPE_NI;
848 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
849 			DRM_INFO("Internal thermal controller %s fan control\n",
850 				 (controller->ucFanParameters &
851 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
852 			adev->pm.int_thermal_type = THERMAL_TYPE_SI;
853 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
854 			DRM_INFO("Internal thermal controller %s fan control\n",
855 				 (controller->ucFanParameters &
856 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
857 			adev->pm.int_thermal_type = THERMAL_TYPE_CI;
858 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
859 			DRM_INFO("Internal thermal controller %s fan control\n",
860 				 (controller->ucFanParameters &
861 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
862 			adev->pm.int_thermal_type = THERMAL_TYPE_KV;
863 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
864 			DRM_INFO("External GPIO thermal controller %s fan control\n",
865 				 (controller->ucFanParameters &
866 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
867 			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
868 		} else if (controller->ucType ==
869 			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
870 			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
871 				 (controller->ucFanParameters &
872 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
873 			adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
874 		} else if (controller->ucType ==
875 			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
876 			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
877 				 (controller->ucFanParameters &
878 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
879 			adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
880 		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
881 			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
882 				 pp_lib_thermal_controller_names[controller->ucType],
883 				 controller->ucI2cAddress >> 1,
884 				 (controller->ucFanParameters &
885 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
886 			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
887 			i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
888 			adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
889 			if (adev->pm.i2c_bus) {
890 				struct i2c_board_info info = { };
891 				const char *name = pp_lib_thermal_controller_names[controller->ucType];
892 				info.addr = controller->ucI2cAddress >> 1;
893 				strlcpy(info.type, name, sizeof(info.type));
894 				i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
895 			}
896 		} else {
897 			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
898 				 controller->ucType,
899 				 controller->ucI2cAddress >> 1,
900 				 (controller->ucFanParameters &
901 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
902 		}
903 	}
904 }
905 
amdgpu_get_pcie_gen_support(struct amdgpu_device * adev,u32 sys_mask,enum amdgpu_pcie_gen asic_gen,enum amdgpu_pcie_gen default_gen)906 enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
907 						 u32 sys_mask,
908 						 enum amdgpu_pcie_gen asic_gen,
909 						 enum amdgpu_pcie_gen default_gen)
910 {
911 	switch (asic_gen) {
912 	case AMDGPU_PCIE_GEN1:
913 		return AMDGPU_PCIE_GEN1;
914 	case AMDGPU_PCIE_GEN2:
915 		return AMDGPU_PCIE_GEN2;
916 	case AMDGPU_PCIE_GEN3:
917 		return AMDGPU_PCIE_GEN3;
918 	default:
919 		if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
920 			return AMDGPU_PCIE_GEN3;
921 		else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
922 			return AMDGPU_PCIE_GEN2;
923 		else
924 			return AMDGPU_PCIE_GEN1;
925 	}
926 	return AMDGPU_PCIE_GEN1;
927 }
928 
amdgpu_get_pcie_lane_support(struct amdgpu_device * adev,u16 asic_lanes,u16 default_lanes)929 u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
930 				 u16 asic_lanes,
931 				 u16 default_lanes)
932 {
933 	switch (asic_lanes) {
934 	case 0:
935 	default:
936 		return default_lanes;
937 	case 1:
938 		return 1;
939 	case 2:
940 		return 2;
941 	case 4:
942 		return 4;
943 	case 8:
944 		return 8;
945 	case 12:
946 		return 12;
947 	case 16:
948 		return 16;
949 	}
950 }
951 
amdgpu_encode_pci_lane_width(u32 lanes)952 u8 amdgpu_encode_pci_lane_width(u32 lanes)
953 {
954 	u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
955 
956 	if (lanes > 16)
957 		return 0;
958 
959 	return encoded_lanes[lanes];
960 }
961 
962 struct amd_vce_state*
amdgpu_get_vce_clock_state(struct amdgpu_device * adev,unsigned idx)963 amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx)
964 {
965 	if (idx < adev->pm.dpm.num_of_vce_states)
966 		return &adev->pm.dpm.vce_states[idx];
967 
968 	return NULL;
969 }
970