• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 
25 #include "drmP.h"
26 #include "amdgpu.h"
27 #include "amdgpu_atombios.h"
28 #include "amdgpu_i2c.h"
29 #include "amdgpu_dpm.h"
30 #include "atom.h"
31 
amdgpu_dpm_print_class_info(u32 class,u32 class2)32 void amdgpu_dpm_print_class_info(u32 class, u32 class2)
33 {
34 	printk("\tui class: ");
35 	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
36 	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
37 	default:
38 		printk("none\n");
39 		break;
40 	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
41 		printk("battery\n");
42 		break;
43 	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
44 		printk("balanced\n");
45 		break;
46 	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
47 		printk("performance\n");
48 		break;
49 	}
50 	printk("\tinternal class: ");
51 	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
52 	    (class2 == 0))
53 		printk("none");
54 	else {
55 		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
56 			printk("boot ");
57 		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
58 			printk("thermal ");
59 		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
60 			printk("limited_pwr ");
61 		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
62 			printk("rest ");
63 		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
64 			printk("forced ");
65 		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
66 			printk("3d_perf ");
67 		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
68 			printk("ovrdrv ");
69 		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
70 			printk("uvd ");
71 		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
72 			printk("3d_low ");
73 		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
74 			printk("acpi ");
75 		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
76 			printk("uvd_hd2 ");
77 		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
78 			printk("uvd_hd ");
79 		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
80 			printk("uvd_sd ");
81 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
82 			printk("limited_pwr2 ");
83 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
84 			printk("ulv ");
85 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
86 			printk("uvd_mvc ");
87 	}
88 	printk("\n");
89 }
90 
amdgpu_dpm_print_cap_info(u32 caps)91 void amdgpu_dpm_print_cap_info(u32 caps)
92 {
93 	printk("\tcaps: ");
94 	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
95 		printk("single_disp ");
96 	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
97 		printk("video ");
98 	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
99 		printk("no_dc ");
100 	printk("\n");
101 }
102 
amdgpu_dpm_print_ps_status(struct amdgpu_device * adev,struct amdgpu_ps * rps)103 void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
104 				struct amdgpu_ps *rps)
105 {
106 	printk("\tstatus: ");
107 	if (rps == adev->pm.dpm.current_ps)
108 		printk("c ");
109 	if (rps == adev->pm.dpm.requested_ps)
110 		printk("r ");
111 	if (rps == adev->pm.dpm.boot_ps)
112 		printk("b ");
113 	printk("\n");
114 }
115 
116 
amdgpu_dpm_get_vblank_time(struct amdgpu_device * adev)117 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
118 {
119 	struct drm_device *dev = adev->ddev;
120 	struct drm_crtc *crtc;
121 	struct amdgpu_crtc *amdgpu_crtc;
122 	u32 vblank_in_pixels;
123 	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
124 
125 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
126 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
127 			amdgpu_crtc = to_amdgpu_crtc(crtc);
128 			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
129 				vblank_in_pixels =
130 					amdgpu_crtc->hw_mode.crtc_htotal *
131 					(amdgpu_crtc->hw_mode.crtc_vblank_end -
132 					amdgpu_crtc->hw_mode.crtc_vdisplay +
133 					(amdgpu_crtc->v_border * 2));
134 
135 				vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
136 				break;
137 			}
138 		}
139 	}
140 
141 	return vblank_time_us;
142 }
143 
amdgpu_dpm_get_vrefresh(struct amdgpu_device * adev)144 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
145 {
146 	struct drm_device *dev = adev->ddev;
147 	struct drm_crtc *crtc;
148 	struct amdgpu_crtc *amdgpu_crtc;
149 	u32 vrefresh = 0;
150 
151 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
152 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
153 			amdgpu_crtc = to_amdgpu_crtc(crtc);
154 			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
155 				vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
156 				break;
157 			}
158 		}
159 	}
160 
161 	return vrefresh;
162 }
163 
amdgpu_calculate_u_and_p(u32 i,u32 r_c,u32 p_b,u32 * p,u32 * u)164 void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
165 			      u32 *p, u32 *u)
166 {
167 	u32 b_c = 0;
168 	u32 i_c;
169 	u32 tmp;
170 
171 	i_c = (i * r_c) / 100;
172 	tmp = i_c >> p_b;
173 
174 	while (tmp) {
175 		b_c++;
176 		tmp >>= 1;
177 	}
178 
179 	*u = (b_c + 1) / 2;
180 	*p = i_c / (1 << (2 * (*u)));
181 }
182 
amdgpu_calculate_at(u32 t,u32 h,u32 fh,u32 fl,u32 * tl,u32 * th)183 int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
184 {
185 	u32 k, a, ah, al;
186 	u32 t1;
187 
188 	if ((fl == 0) || (fh == 0) || (fl > fh))
189 		return -EINVAL;
190 
191 	k = (100 * fh) / fl;
192 	t1 = (t * (k - 100));
193 	a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
194 	a = (a + 5) / 10;
195 	ah = ((a * t) + 5000) / 10000;
196 	al = a - ah;
197 
198 	*th = t - ah;
199 	*tl = t + al;
200 
201 	return 0;
202 }
203 
amdgpu_is_uvd_state(u32 class,u32 class2)204 bool amdgpu_is_uvd_state(u32 class, u32 class2)
205 {
206 	if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
207 		return true;
208 	if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
209 		return true;
210 	if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
211 		return true;
212 	if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
213 		return true;
214 	if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
215 		return true;
216 	return false;
217 }
218 
amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)219 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
220 {
221 	switch (sensor) {
222 	case THERMAL_TYPE_RV6XX:
223 	case THERMAL_TYPE_RV770:
224 	case THERMAL_TYPE_EVERGREEN:
225 	case THERMAL_TYPE_SUMO:
226 	case THERMAL_TYPE_NI:
227 	case THERMAL_TYPE_SI:
228 	case THERMAL_TYPE_CI:
229 	case THERMAL_TYPE_KV:
230 		return true;
231 	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
232 	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
233 		return false; /* need special handling */
234 	case THERMAL_TYPE_NONE:
235 	case THERMAL_TYPE_EXTERNAL:
236 	case THERMAL_TYPE_EXTERNAL_GPIO:
237 	default:
238 		return false;
239 	}
240 }
241 
242 union power_info {
243 	struct _ATOM_POWERPLAY_INFO info;
244 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
245 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
246 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
247 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
248 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
249 	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
250 	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
251 };
252 
253 union fan_info {
254 	struct _ATOM_PPLIB_FANTABLE fan;
255 	struct _ATOM_PPLIB_FANTABLE2 fan2;
256 	struct _ATOM_PPLIB_FANTABLE3 fan3;
257 };
258 
amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table * amdgpu_table,ATOM_PPLIB_Clock_Voltage_Dependency_Table * atom_table)259 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
260 					      ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
261 {
262 	u32 size = atom_table->ucNumEntries *
263 		sizeof(struct amdgpu_clock_voltage_dependency_entry);
264 	int i;
265 	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
266 
267 	amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
268 	if (!amdgpu_table->entries)
269 		return -ENOMEM;
270 
271 	entry = &atom_table->entries[0];
272 	for (i = 0; i < atom_table->ucNumEntries; i++) {
273 		amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
274 			(entry->ucClockHigh << 16);
275 		amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
276 		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
277 			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
278 	}
279 	amdgpu_table->count = atom_table->ucNumEntries;
280 
281 	return 0;
282 }
283 
amdgpu_get_platform_caps(struct amdgpu_device * adev)284 int amdgpu_get_platform_caps(struct amdgpu_device *adev)
285 {
286 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
287 	union power_info *power_info;
288 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
289 	u16 data_offset;
290 	u8 frev, crev;
291 
292 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
293 				   &frev, &crev, &data_offset))
294 		return -EINVAL;
295 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
296 
297 	adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
298 	adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
299 	adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
300 
301 	return 0;
302 }
303 
304 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
305 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
306 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
307 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
308 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
309 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
310 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
311 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
312 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
313 
amdgpu_parse_extended_power_table(struct amdgpu_device * adev)314 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
315 {
316 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
317 	union power_info *power_info;
318 	union fan_info *fan_info;
319 	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
320 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
321 	u16 data_offset;
322 	u8 frev, crev;
323 	int ret, i;
324 
325 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
326 				   &frev, &crev, &data_offset))
327 		return -EINVAL;
328 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
329 
330 	/* fan table */
331 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
332 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
333 		if (power_info->pplib3.usFanTableOffset) {
334 			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
335 						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
336 			adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
337 			adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
338 			adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
339 			adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
340 			adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
341 			adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
342 			adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
343 			if (fan_info->fan.ucFanTableFormat >= 2)
344 				adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
345 			else
346 				adev->pm.dpm.fan.t_max = 10900;
347 			adev->pm.dpm.fan.cycle_delay = 100000;
348 			if (fan_info->fan.ucFanTableFormat >= 3) {
349 				adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
350 				adev->pm.dpm.fan.default_max_fan_pwm =
351 					le16_to_cpu(fan_info->fan3.usFanPWMMax);
352 				adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
353 				adev->pm.dpm.fan.fan_output_sensitivity =
354 					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
355 			}
356 			adev->pm.dpm.fan.ucode_fan_control = true;
357 		}
358 	}
359 
360 	/* clock dependancy tables, shedding tables */
361 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
362 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
363 		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
364 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
365 				(mode_info->atom_context->bios + data_offset +
366 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
367 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
368 								 dep_table);
369 			if (ret) {
370 				amdgpu_free_extended_power_table(adev);
371 				return ret;
372 			}
373 		}
374 		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
375 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
376 				(mode_info->atom_context->bios + data_offset +
377 				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
378 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
379 								 dep_table);
380 			if (ret) {
381 				amdgpu_free_extended_power_table(adev);
382 				return ret;
383 			}
384 		}
385 		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
386 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
387 				(mode_info->atom_context->bios + data_offset +
388 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
389 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
390 								 dep_table);
391 			if (ret) {
392 				amdgpu_free_extended_power_table(adev);
393 				return ret;
394 			}
395 		}
396 		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
397 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
398 				(mode_info->atom_context->bios + data_offset +
399 				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
400 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
401 								 dep_table);
402 			if (ret) {
403 				amdgpu_free_extended_power_table(adev);
404 				return ret;
405 			}
406 		}
407 		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
408 			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
409 				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
410 				(mode_info->atom_context->bios + data_offset +
411 				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
412 			if (clk_v->ucNumEntries) {
413 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
414 					le16_to_cpu(clk_v->entries[0].usSclkLow) |
415 					(clk_v->entries[0].ucSclkHigh << 16);
416 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
417 					le16_to_cpu(clk_v->entries[0].usMclkLow) |
418 					(clk_v->entries[0].ucMclkHigh << 16);
419 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
420 					le16_to_cpu(clk_v->entries[0].usVddc);
421 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
422 					le16_to_cpu(clk_v->entries[0].usVddci);
423 			}
424 		}
425 		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
426 			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
427 				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
428 				(mode_info->atom_context->bios + data_offset +
429 				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
430 			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
431 
432 			adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
433 				kzalloc(psl->ucNumEntries *
434 					sizeof(struct amdgpu_phase_shedding_limits_entry),
435 					GFP_KERNEL);
436 			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
437 				amdgpu_free_extended_power_table(adev);
438 				return -ENOMEM;
439 			}
440 
441 			entry = &psl->entries[0];
442 			for (i = 0; i < psl->ucNumEntries; i++) {
443 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
444 					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
445 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
446 					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
447 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
448 					le16_to_cpu(entry->usVoltage);
449 				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
450 					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
451 			}
452 			adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
453 				psl->ucNumEntries;
454 		}
455 	}
456 
457 	/* cac data */
458 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
459 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
460 		adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
461 		adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
462 		adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
463 		adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
464 		if (adev->pm.dpm.tdp_od_limit)
465 			adev->pm.dpm.power_control = true;
466 		else
467 			adev->pm.dpm.power_control = false;
468 		adev->pm.dpm.tdp_adjustment = 0;
469 		adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
470 		adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
471 		adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
472 		if (power_info->pplib5.usCACLeakageTableOffset) {
473 			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
474 				(ATOM_PPLIB_CAC_Leakage_Table *)
475 				(mode_info->atom_context->bios + data_offset +
476 				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
477 			ATOM_PPLIB_CAC_Leakage_Record *entry;
478 			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
479 			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
480 			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
481 				amdgpu_free_extended_power_table(adev);
482 				return -ENOMEM;
483 			}
484 			entry = &cac_table->entries[0];
485 			for (i = 0; i < cac_table->ucNumEntries; i++) {
486 				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
487 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
488 						le16_to_cpu(entry->usVddc1);
489 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
490 						le16_to_cpu(entry->usVddc2);
491 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
492 						le16_to_cpu(entry->usVddc3);
493 				} else {
494 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
495 						le16_to_cpu(entry->usVddc);
496 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
497 						le32_to_cpu(entry->ulLeakageValue);
498 				}
499 				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
500 					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
501 			}
502 			adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
503 		}
504 	}
505 
506 	/* ext tables */
507 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
508 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
509 		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
510 			(mode_info->atom_context->bios + data_offset +
511 			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
512 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
513 			ext_hdr->usVCETableOffset) {
514 			VCEClockInfoArray *array = (VCEClockInfoArray *)
515 				(mode_info->atom_context->bios + data_offset +
516 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
517 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
518 				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
519 				(mode_info->atom_context->bios + data_offset +
520 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
521 				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
522 			ATOM_PPLIB_VCE_State_Table *states =
523 				(ATOM_PPLIB_VCE_State_Table *)
524 				(mode_info->atom_context->bios + data_offset +
525 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
526 				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
527 				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
528 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
529 			ATOM_PPLIB_VCE_State_Record *state_entry;
530 			VCEClockInfo *vce_clk;
531 			u32 size = limits->numEntries *
532 				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
533 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
534 				kzalloc(size, GFP_KERNEL);
535 			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
536 				amdgpu_free_extended_power_table(adev);
537 				return -ENOMEM;
538 			}
539 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
540 				limits->numEntries;
541 			entry = &limits->entries[0];
542 			state_entry = &states->entries[0];
543 			for (i = 0; i < limits->numEntries; i++) {
544 				vce_clk = (VCEClockInfo *)
545 					((u8 *)&array->entries[0] +
546 					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
547 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
548 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
549 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
550 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
551 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
552 					le16_to_cpu(entry->usVoltage);
553 				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
554 					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
555 			}
556 			for (i = 0; i < states->numEntries; i++) {
557 				if (i >= AMDGPU_MAX_VCE_LEVELS)
558 					break;
559 				vce_clk = (VCEClockInfo *)
560 					((u8 *)&array->entries[0] +
561 					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
562 				adev->pm.dpm.vce_states[i].evclk =
563 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
564 				adev->pm.dpm.vce_states[i].ecclk =
565 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
566 				adev->pm.dpm.vce_states[i].clk_idx =
567 					state_entry->ucClockInfoIndex & 0x3f;
568 				adev->pm.dpm.vce_states[i].pstate =
569 					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
570 				state_entry = (ATOM_PPLIB_VCE_State_Record *)
571 					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
572 			}
573 		}
574 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
575 			ext_hdr->usUVDTableOffset) {
576 			UVDClockInfoArray *array = (UVDClockInfoArray *)
577 				(mode_info->atom_context->bios + data_offset +
578 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
579 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
580 				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
581 				(mode_info->atom_context->bios + data_offset +
582 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
583 				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
584 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
585 			u32 size = limits->numEntries *
586 				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
587 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
588 				kzalloc(size, GFP_KERNEL);
589 			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
590 				amdgpu_free_extended_power_table(adev);
591 				return -ENOMEM;
592 			}
593 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
594 				limits->numEntries;
595 			entry = &limits->entries[0];
596 			for (i = 0; i < limits->numEntries; i++) {
597 				UVDClockInfo *uvd_clk = (UVDClockInfo *)
598 					((u8 *)&array->entries[0] +
599 					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
600 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
601 					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
602 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
603 					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
604 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
605 					le16_to_cpu(entry->usVoltage);
606 				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
607 					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
608 			}
609 		}
610 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
611 			ext_hdr->usSAMUTableOffset) {
612 			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
613 				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
614 				(mode_info->atom_context->bios + data_offset +
615 				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
616 			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
617 			u32 size = limits->numEntries *
618 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
619 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
620 				kzalloc(size, GFP_KERNEL);
621 			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
622 				amdgpu_free_extended_power_table(adev);
623 				return -ENOMEM;
624 			}
625 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
626 				limits->numEntries;
627 			entry = &limits->entries[0];
628 			for (i = 0; i < limits->numEntries; i++) {
629 				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
630 					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
631 				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
632 					le16_to_cpu(entry->usVoltage);
633 				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
634 					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
635 			}
636 		}
637 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
638 		    ext_hdr->usPPMTableOffset) {
639 			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
640 				(mode_info->atom_context->bios + data_offset +
641 				 le16_to_cpu(ext_hdr->usPPMTableOffset));
642 			adev->pm.dpm.dyn_state.ppm_table =
643 				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
644 			if (!adev->pm.dpm.dyn_state.ppm_table) {
645 				amdgpu_free_extended_power_table(adev);
646 				return -ENOMEM;
647 			}
648 			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
649 			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
650 				le16_to_cpu(ppm->usCpuCoreNumber);
651 			adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
652 				le32_to_cpu(ppm->ulPlatformTDP);
653 			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
654 				le32_to_cpu(ppm->ulSmallACPlatformTDP);
655 			adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
656 				le32_to_cpu(ppm->ulPlatformTDC);
657 			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
658 				le32_to_cpu(ppm->ulSmallACPlatformTDC);
659 			adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
660 				le32_to_cpu(ppm->ulApuTDP);
661 			adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
662 				le32_to_cpu(ppm->ulDGpuTDP);
663 			adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
664 				le32_to_cpu(ppm->ulDGpuUlvPower);
665 			adev->pm.dpm.dyn_state.ppm_table->tj_max =
666 				le32_to_cpu(ppm->ulTjmax);
667 		}
668 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
669 			ext_hdr->usACPTableOffset) {
670 			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
671 				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
672 				(mode_info->atom_context->bios + data_offset +
673 				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
674 			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
675 			u32 size = limits->numEntries *
676 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
677 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
678 				kzalloc(size, GFP_KERNEL);
679 			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
680 				amdgpu_free_extended_power_table(adev);
681 				return -ENOMEM;
682 			}
683 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
684 				limits->numEntries;
685 			entry = &limits->entries[0];
686 			for (i = 0; i < limits->numEntries; i++) {
687 				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
688 					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
689 				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
690 					le16_to_cpu(entry->usVoltage);
691 				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
692 					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
693 			}
694 		}
695 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
696 			ext_hdr->usPowerTuneTableOffset) {
697 			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
698 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
699 			ATOM_PowerTune_Table *pt;
700 			adev->pm.dpm.dyn_state.cac_tdp_table =
701 				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
702 			if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
703 				amdgpu_free_extended_power_table(adev);
704 				return -ENOMEM;
705 			}
706 			if (rev > 0) {
707 				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
708 					(mode_info->atom_context->bios + data_offset +
709 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
710 				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
711 					ppt->usMaximumPowerDeliveryLimit;
712 				pt = &ppt->power_tune_table;
713 			} else {
714 				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
715 					(mode_info->atom_context->bios + data_offset +
716 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
717 				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
718 				pt = &ppt->power_tune_table;
719 			}
720 			adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
721 			adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
722 				le16_to_cpu(pt->usConfigurableTDP);
723 			adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
724 			adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
725 				le16_to_cpu(pt->usBatteryPowerLimit);
726 			adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
727 				le16_to_cpu(pt->usSmallPowerLimit);
728 			adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
729 				le16_to_cpu(pt->usLowCACLeakage);
730 			adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
731 				le16_to_cpu(pt->usHighCACLeakage);
732 		}
733 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
734 				ext_hdr->usSclkVddgfxTableOffset) {
735 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
736 				(mode_info->atom_context->bios + data_offset +
737 				 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
738 			ret = amdgpu_parse_clk_voltage_dep_table(
739 					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
740 					dep_table);
741 			if (ret) {
742 				kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
743 				return ret;
744 			}
745 		}
746 	}
747 
748 	return 0;
749 }
750 
amdgpu_free_extended_power_table(struct amdgpu_device * adev)751 void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
752 {
753 	struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
754 
755 	kfree(dyn_state->vddc_dependency_on_sclk.entries);
756 	kfree(dyn_state->vddci_dependency_on_mclk.entries);
757 	kfree(dyn_state->vddc_dependency_on_mclk.entries);
758 	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
759 	kfree(dyn_state->cac_leakage_table.entries);
760 	kfree(dyn_state->phase_shedding_limits_table.entries);
761 	kfree(dyn_state->ppm_table);
762 	kfree(dyn_state->cac_tdp_table);
763 	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
764 	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
765 	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
766 	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
767 	kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
768 }
769 
770 static const char *pp_lib_thermal_controller_names[] = {
771 	"NONE",
772 	"lm63",
773 	"adm1032",
774 	"adm1030",
775 	"max6649",
776 	"lm64",
777 	"f75375",
778 	"RV6xx",
779 	"RV770",
780 	"adt7473",
781 	"NONE",
782 	"External GPIO",
783 	"Evergreen",
784 	"emc2103",
785 	"Sumo",
786 	"Northern Islands",
787 	"Southern Islands",
788 	"lm96163",
789 	"Sea Islands",
790 	"Kaveri/Kabini",
791 };
792 
amdgpu_add_thermal_controller(struct amdgpu_device * adev)793 void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
794 {
795 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
796 	ATOM_PPLIB_POWERPLAYTABLE *power_table;
797 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
798 	ATOM_PPLIB_THERMALCONTROLLER *controller;
799 	struct amdgpu_i2c_bus_rec i2c_bus;
800 	u16 data_offset;
801 	u8 frev, crev;
802 
803 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
804 				   &frev, &crev, &data_offset))
805 		return;
806 	power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
807 		(mode_info->atom_context->bios + data_offset);
808 	controller = &power_table->sThermalController;
809 
810 	/* add the i2c bus for thermal/fan chip */
811 	if (controller->ucType > 0) {
812 		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
813 			adev->pm.no_fan = true;
814 		adev->pm.fan_pulses_per_revolution =
815 			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
816 		if (adev->pm.fan_pulses_per_revolution) {
817 			adev->pm.fan_min_rpm = controller->ucFanMinRPM;
818 			adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
819 		}
820 		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
821 			DRM_INFO("Internal thermal controller %s fan control\n",
822 				 (controller->ucFanParameters &
823 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
824 			adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
825 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
826 			DRM_INFO("Internal thermal controller %s fan control\n",
827 				 (controller->ucFanParameters &
828 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
829 			adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
830 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
831 			DRM_INFO("Internal thermal controller %s fan control\n",
832 				 (controller->ucFanParameters &
833 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
834 			adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
835 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
836 			DRM_INFO("Internal thermal controller %s fan control\n",
837 				 (controller->ucFanParameters &
838 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
839 			adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
840 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
841 			DRM_INFO("Internal thermal controller %s fan control\n",
842 				 (controller->ucFanParameters &
843 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
844 			adev->pm.int_thermal_type = THERMAL_TYPE_NI;
845 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
846 			DRM_INFO("Internal thermal controller %s fan control\n",
847 				 (controller->ucFanParameters &
848 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
849 			adev->pm.int_thermal_type = THERMAL_TYPE_SI;
850 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
851 			DRM_INFO("Internal thermal controller %s fan control\n",
852 				 (controller->ucFanParameters &
853 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
854 			adev->pm.int_thermal_type = THERMAL_TYPE_CI;
855 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
856 			DRM_INFO("Internal thermal controller %s fan control\n",
857 				 (controller->ucFanParameters &
858 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
859 			adev->pm.int_thermal_type = THERMAL_TYPE_KV;
860 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
861 			DRM_INFO("External GPIO thermal controller %s fan control\n",
862 				 (controller->ucFanParameters &
863 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
864 			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
865 		} else if (controller->ucType ==
866 			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
867 			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
868 				 (controller->ucFanParameters &
869 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
870 			adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
871 		} else if (controller->ucType ==
872 			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
873 			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
874 				 (controller->ucFanParameters &
875 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
876 			adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
877 		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
878 			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
879 				 pp_lib_thermal_controller_names[controller->ucType],
880 				 controller->ucI2cAddress >> 1,
881 				 (controller->ucFanParameters &
882 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
883 			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
884 			i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
885 			adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
886 			if (adev->pm.i2c_bus) {
887 				struct i2c_board_info info = { };
888 				const char *name = pp_lib_thermal_controller_names[controller->ucType];
889 				info.addr = controller->ucI2cAddress >> 1;
890 				strlcpy(info.type, name, sizeof(info.type));
891 				i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
892 			}
893 		} else {
894 			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
895 				 controller->ucType,
896 				 controller->ucI2cAddress >> 1,
897 				 (controller->ucFanParameters &
898 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
899 		}
900 	}
901 }
902 
amdgpu_get_pcie_gen_support(struct amdgpu_device * adev,u32 sys_mask,enum amdgpu_pcie_gen asic_gen,enum amdgpu_pcie_gen default_gen)903 enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
904 						 u32 sys_mask,
905 						 enum amdgpu_pcie_gen asic_gen,
906 						 enum amdgpu_pcie_gen default_gen)
907 {
908 	switch (asic_gen) {
909 	case AMDGPU_PCIE_GEN1:
910 		return AMDGPU_PCIE_GEN1;
911 	case AMDGPU_PCIE_GEN2:
912 		return AMDGPU_PCIE_GEN2;
913 	case AMDGPU_PCIE_GEN3:
914 		return AMDGPU_PCIE_GEN3;
915 	default:
916 		if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
917 			return AMDGPU_PCIE_GEN3;
918 		else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
919 			return AMDGPU_PCIE_GEN2;
920 		else
921 			return AMDGPU_PCIE_GEN1;
922 	}
923 	return AMDGPU_PCIE_GEN1;
924 }
925 
amdgpu_get_pcie_lane_support(struct amdgpu_device * adev,u16 asic_lanes,u16 default_lanes)926 u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
927 				 u16 asic_lanes,
928 				 u16 default_lanes)
929 {
930 	switch (asic_lanes) {
931 	case 0:
932 	default:
933 		return default_lanes;
934 	case 1:
935 		return 1;
936 	case 2:
937 		return 2;
938 	case 4:
939 		return 4;
940 	case 8:
941 		return 8;
942 	case 12:
943 		return 12;
944 	case 16:
945 		return 16;
946 	}
947 }
948 
amdgpu_encode_pci_lane_width(u32 lanes)949 u8 amdgpu_encode_pci_lane_width(u32 lanes)
950 {
951 	u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
952 
953 	if (lanes > 16)
954 		return 0;
955 
956 	return encoded_lanes[lanes];
957 }
958