1 /*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
29 #include "atom.h"
30 #include "amd_pcie.h"
31 #include "amdgpu_display.h"
32 #include "hwmgr.h"
33 #include <linux/power_supply.h>
34
35 #define WIDTH_4K 3840
36
amdgpu_dpm_print_class_info(u32 class,u32 class2)37 void amdgpu_dpm_print_class_info(u32 class, u32 class2)
38 {
39 const char *s;
40
41 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
42 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
43 default:
44 s = "none";
45 break;
46 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
47 s = "battery";
48 break;
49 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
50 s = "balanced";
51 break;
52 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
53 s = "performance";
54 break;
55 }
56 printk("\tui class: %s\n", s);
57 printk("\tinternal class:");
58 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
59 (class2 == 0))
60 pr_cont(" none");
61 else {
62 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
63 pr_cont(" boot");
64 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
65 pr_cont(" thermal");
66 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
67 pr_cont(" limited_pwr");
68 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
69 pr_cont(" rest");
70 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
71 pr_cont(" forced");
72 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
73 pr_cont(" 3d_perf");
74 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
75 pr_cont(" ovrdrv");
76 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
77 pr_cont(" uvd");
78 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
79 pr_cont(" 3d_low");
80 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
81 pr_cont(" acpi");
82 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
83 pr_cont(" uvd_hd2");
84 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
85 pr_cont(" uvd_hd");
86 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
87 pr_cont(" uvd_sd");
88 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
89 pr_cont(" limited_pwr2");
90 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
91 pr_cont(" ulv");
92 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
93 pr_cont(" uvd_mvc");
94 }
95 pr_cont("\n");
96 }
97
amdgpu_dpm_print_cap_info(u32 caps)98 void amdgpu_dpm_print_cap_info(u32 caps)
99 {
100 printk("\tcaps:");
101 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
102 pr_cont(" single_disp");
103 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
104 pr_cont(" video");
105 if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
106 pr_cont(" no_dc");
107 pr_cont("\n");
108 }
109
amdgpu_dpm_print_ps_status(struct amdgpu_device * adev,struct amdgpu_ps * rps)110 void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
111 struct amdgpu_ps *rps)
112 {
113 printk("\tstatus:");
114 if (rps == adev->pm.dpm.current_ps)
115 pr_cont(" c");
116 if (rps == adev->pm.dpm.requested_ps)
117 pr_cont(" r");
118 if (rps == adev->pm.dpm.boot_ps)
119 pr_cont(" b");
120 pr_cont("\n");
121 }
122
amdgpu_dpm_get_active_displays(struct amdgpu_device * adev)123 void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
124 {
125 struct drm_device *ddev = adev_to_drm(adev);
126 struct drm_crtc *crtc;
127 struct amdgpu_crtc *amdgpu_crtc;
128
129 adev->pm.dpm.new_active_crtcs = 0;
130 adev->pm.dpm.new_active_crtc_count = 0;
131 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
132 list_for_each_entry(crtc,
133 &ddev->mode_config.crtc_list, head) {
134 amdgpu_crtc = to_amdgpu_crtc(crtc);
135 if (amdgpu_crtc->enabled) {
136 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
137 adev->pm.dpm.new_active_crtc_count++;
138 }
139 }
140 }
141 }
142
143
amdgpu_dpm_get_vblank_time(struct amdgpu_device * adev)144 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
145 {
146 struct drm_device *dev = adev_to_drm(adev);
147 struct drm_crtc *crtc;
148 struct amdgpu_crtc *amdgpu_crtc;
149 u32 vblank_in_pixels;
150 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
151
152 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
153 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
154 amdgpu_crtc = to_amdgpu_crtc(crtc);
155 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
156 vblank_in_pixels =
157 amdgpu_crtc->hw_mode.crtc_htotal *
158 (amdgpu_crtc->hw_mode.crtc_vblank_end -
159 amdgpu_crtc->hw_mode.crtc_vdisplay +
160 (amdgpu_crtc->v_border * 2));
161
162 vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
163 break;
164 }
165 }
166 }
167
168 return vblank_time_us;
169 }
170
amdgpu_dpm_get_vrefresh(struct amdgpu_device * adev)171 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
172 {
173 struct drm_device *dev = adev_to_drm(adev);
174 struct drm_crtc *crtc;
175 struct amdgpu_crtc *amdgpu_crtc;
176 u32 vrefresh = 0;
177
178 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
179 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
180 amdgpu_crtc = to_amdgpu_crtc(crtc);
181 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
182 vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
183 break;
184 }
185 }
186 }
187
188 return vrefresh;
189 }
190
amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)191 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
192 {
193 switch (sensor) {
194 case THERMAL_TYPE_RV6XX:
195 case THERMAL_TYPE_RV770:
196 case THERMAL_TYPE_EVERGREEN:
197 case THERMAL_TYPE_SUMO:
198 case THERMAL_TYPE_NI:
199 case THERMAL_TYPE_SI:
200 case THERMAL_TYPE_CI:
201 case THERMAL_TYPE_KV:
202 return true;
203 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
204 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
205 return false; /* need special handling */
206 case THERMAL_TYPE_NONE:
207 case THERMAL_TYPE_EXTERNAL:
208 case THERMAL_TYPE_EXTERNAL_GPIO:
209 default:
210 return false;
211 }
212 }
213
214 union power_info {
215 struct _ATOM_POWERPLAY_INFO info;
216 struct _ATOM_POWERPLAY_INFO_V2 info_2;
217 struct _ATOM_POWERPLAY_INFO_V3 info_3;
218 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
219 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
220 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
221 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
222 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
223 };
224
225 union fan_info {
226 struct _ATOM_PPLIB_FANTABLE fan;
227 struct _ATOM_PPLIB_FANTABLE2 fan2;
228 struct _ATOM_PPLIB_FANTABLE3 fan3;
229 };
230
amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table * amdgpu_table,ATOM_PPLIB_Clock_Voltage_Dependency_Table * atom_table)231 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
232 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
233 {
234 u32 size = atom_table->ucNumEntries *
235 sizeof(struct amdgpu_clock_voltage_dependency_entry);
236 int i;
237 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
238
239 amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
240 if (!amdgpu_table->entries)
241 return -ENOMEM;
242
243 entry = &atom_table->entries[0];
244 for (i = 0; i < atom_table->ucNumEntries; i++) {
245 amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
246 (entry->ucClockHigh << 16);
247 amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
248 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
249 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
250 }
251 amdgpu_table->count = atom_table->ucNumEntries;
252
253 return 0;
254 }
255
amdgpu_get_platform_caps(struct amdgpu_device * adev)256 int amdgpu_get_platform_caps(struct amdgpu_device *adev)
257 {
258 struct amdgpu_mode_info *mode_info = &adev->mode_info;
259 union power_info *power_info;
260 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
261 u16 data_offset;
262 u8 frev, crev;
263
264 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
265 &frev, &crev, &data_offset))
266 return -EINVAL;
267 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
268
269 adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
270 adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
271 adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
272
273 return 0;
274 }
275
276 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
277 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
278 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
279 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
280 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
281 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
282 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
283 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
284 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
285
amdgpu_parse_extended_power_table(struct amdgpu_device * adev)286 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
287 {
288 struct amdgpu_mode_info *mode_info = &adev->mode_info;
289 union power_info *power_info;
290 union fan_info *fan_info;
291 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
292 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
293 u16 data_offset;
294 u8 frev, crev;
295 int ret, i;
296
297 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
298 &frev, &crev, &data_offset))
299 return -EINVAL;
300 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
301
302 /* fan table */
303 if (le16_to_cpu(power_info->pplib.usTableSize) >=
304 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
305 if (power_info->pplib3.usFanTableOffset) {
306 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
307 le16_to_cpu(power_info->pplib3.usFanTableOffset));
308 adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
309 adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
310 adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
311 adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
312 adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
313 adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
314 adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
315 if (fan_info->fan.ucFanTableFormat >= 2)
316 adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
317 else
318 adev->pm.dpm.fan.t_max = 10900;
319 adev->pm.dpm.fan.cycle_delay = 100000;
320 if (fan_info->fan.ucFanTableFormat >= 3) {
321 adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
322 adev->pm.dpm.fan.default_max_fan_pwm =
323 le16_to_cpu(fan_info->fan3.usFanPWMMax);
324 adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
325 adev->pm.dpm.fan.fan_output_sensitivity =
326 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
327 }
328 adev->pm.dpm.fan.ucode_fan_control = true;
329 }
330 }
331
332 /* clock dependancy tables, shedding tables */
333 if (le16_to_cpu(power_info->pplib.usTableSize) >=
334 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
335 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
336 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
337 (mode_info->atom_context->bios + data_offset +
338 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
339 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
340 dep_table);
341 if (ret) {
342 amdgpu_free_extended_power_table(adev);
343 return ret;
344 }
345 }
346 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
347 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
348 (mode_info->atom_context->bios + data_offset +
349 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
350 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
351 dep_table);
352 if (ret) {
353 amdgpu_free_extended_power_table(adev);
354 return ret;
355 }
356 }
357 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
358 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
359 (mode_info->atom_context->bios + data_offset +
360 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
361 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
362 dep_table);
363 if (ret) {
364 amdgpu_free_extended_power_table(adev);
365 return ret;
366 }
367 }
368 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
369 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
370 (mode_info->atom_context->bios + data_offset +
371 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
372 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
373 dep_table);
374 if (ret) {
375 amdgpu_free_extended_power_table(adev);
376 return ret;
377 }
378 }
379 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
380 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
381 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
382 (mode_info->atom_context->bios + data_offset +
383 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
384 if (clk_v->ucNumEntries) {
385 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
386 le16_to_cpu(clk_v->entries[0].usSclkLow) |
387 (clk_v->entries[0].ucSclkHigh << 16);
388 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
389 le16_to_cpu(clk_v->entries[0].usMclkLow) |
390 (clk_v->entries[0].ucMclkHigh << 16);
391 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
392 le16_to_cpu(clk_v->entries[0].usVddc);
393 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
394 le16_to_cpu(clk_v->entries[0].usVddci);
395 }
396 }
397 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
398 ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
399 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
400 (mode_info->atom_context->bios + data_offset +
401 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
402 ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
403
404 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
405 kcalloc(psl->ucNumEntries,
406 sizeof(struct amdgpu_phase_shedding_limits_entry),
407 GFP_KERNEL);
408 if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
409 amdgpu_free_extended_power_table(adev);
410 return -ENOMEM;
411 }
412
413 entry = &psl->entries[0];
414 for (i = 0; i < psl->ucNumEntries; i++) {
415 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
416 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
417 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
418 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
419 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
420 le16_to_cpu(entry->usVoltage);
421 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
422 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
423 }
424 adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
425 psl->ucNumEntries;
426 }
427 }
428
429 /* cac data */
430 if (le16_to_cpu(power_info->pplib.usTableSize) >=
431 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
432 adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
433 adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
434 adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
435 adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
436 if (adev->pm.dpm.tdp_od_limit)
437 adev->pm.dpm.power_control = true;
438 else
439 adev->pm.dpm.power_control = false;
440 adev->pm.dpm.tdp_adjustment = 0;
441 adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
442 adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
443 adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
444 if (power_info->pplib5.usCACLeakageTableOffset) {
445 ATOM_PPLIB_CAC_Leakage_Table *cac_table =
446 (ATOM_PPLIB_CAC_Leakage_Table *)
447 (mode_info->atom_context->bios + data_offset +
448 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
449 ATOM_PPLIB_CAC_Leakage_Record *entry;
450 u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
451 adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
452 if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
453 amdgpu_free_extended_power_table(adev);
454 return -ENOMEM;
455 }
456 entry = &cac_table->entries[0];
457 for (i = 0; i < cac_table->ucNumEntries; i++) {
458 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
459 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
460 le16_to_cpu(entry->usVddc1);
461 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
462 le16_to_cpu(entry->usVddc2);
463 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
464 le16_to_cpu(entry->usVddc3);
465 } else {
466 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
467 le16_to_cpu(entry->usVddc);
468 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
469 le32_to_cpu(entry->ulLeakageValue);
470 }
471 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
472 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
473 }
474 adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
475 }
476 }
477
478 /* ext tables */
479 if (le16_to_cpu(power_info->pplib.usTableSize) >=
480 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
481 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
482 (mode_info->atom_context->bios + data_offset +
483 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
484 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
485 ext_hdr->usVCETableOffset) {
486 VCEClockInfoArray *array = (VCEClockInfoArray *)
487 (mode_info->atom_context->bios + data_offset +
488 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
489 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
490 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
491 (mode_info->atom_context->bios + data_offset +
492 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
493 1 + array->ucNumEntries * sizeof(VCEClockInfo));
494 ATOM_PPLIB_VCE_State_Table *states =
495 (ATOM_PPLIB_VCE_State_Table *)
496 (mode_info->atom_context->bios + data_offset +
497 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
498 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
499 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
500 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
501 ATOM_PPLIB_VCE_State_Record *state_entry;
502 VCEClockInfo *vce_clk;
503 u32 size = limits->numEntries *
504 sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
505 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
506 kzalloc(size, GFP_KERNEL);
507 if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
508 amdgpu_free_extended_power_table(adev);
509 return -ENOMEM;
510 }
511 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
512 limits->numEntries;
513 entry = &limits->entries[0];
514 state_entry = &states->entries[0];
515 for (i = 0; i < limits->numEntries; i++) {
516 vce_clk = (VCEClockInfo *)
517 ((u8 *)&array->entries[0] +
518 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
519 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
520 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
521 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
522 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
523 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
524 le16_to_cpu(entry->usVoltage);
525 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
526 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
527 }
528 adev->pm.dpm.num_of_vce_states =
529 states->numEntries > AMD_MAX_VCE_LEVELS ?
530 AMD_MAX_VCE_LEVELS : states->numEntries;
531 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
532 vce_clk = (VCEClockInfo *)
533 ((u8 *)&array->entries[0] +
534 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
535 adev->pm.dpm.vce_states[i].evclk =
536 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
537 adev->pm.dpm.vce_states[i].ecclk =
538 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
539 adev->pm.dpm.vce_states[i].clk_idx =
540 state_entry->ucClockInfoIndex & 0x3f;
541 adev->pm.dpm.vce_states[i].pstate =
542 (state_entry->ucClockInfoIndex & 0xc0) >> 6;
543 state_entry = (ATOM_PPLIB_VCE_State_Record *)
544 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
545 }
546 }
547 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
548 ext_hdr->usUVDTableOffset) {
549 UVDClockInfoArray *array = (UVDClockInfoArray *)
550 (mode_info->atom_context->bios + data_offset +
551 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
552 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
553 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
554 (mode_info->atom_context->bios + data_offset +
555 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
556 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
557 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
558 u32 size = limits->numEntries *
559 sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
560 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
561 kzalloc(size, GFP_KERNEL);
562 if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
563 amdgpu_free_extended_power_table(adev);
564 return -ENOMEM;
565 }
566 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
567 limits->numEntries;
568 entry = &limits->entries[0];
569 for (i = 0; i < limits->numEntries; i++) {
570 UVDClockInfo *uvd_clk = (UVDClockInfo *)
571 ((u8 *)&array->entries[0] +
572 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
573 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
574 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
575 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
576 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
577 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
578 le16_to_cpu(entry->usVoltage);
579 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
580 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
581 }
582 }
583 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
584 ext_hdr->usSAMUTableOffset) {
585 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
586 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
587 (mode_info->atom_context->bios + data_offset +
588 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
589 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
590 u32 size = limits->numEntries *
591 sizeof(struct amdgpu_clock_voltage_dependency_entry);
592 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
593 kzalloc(size, GFP_KERNEL);
594 if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
595 amdgpu_free_extended_power_table(adev);
596 return -ENOMEM;
597 }
598 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
599 limits->numEntries;
600 entry = &limits->entries[0];
601 for (i = 0; i < limits->numEntries; i++) {
602 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
603 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
604 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
605 le16_to_cpu(entry->usVoltage);
606 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
607 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
608 }
609 }
610 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
611 ext_hdr->usPPMTableOffset) {
612 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
613 (mode_info->atom_context->bios + data_offset +
614 le16_to_cpu(ext_hdr->usPPMTableOffset));
615 adev->pm.dpm.dyn_state.ppm_table =
616 kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
617 if (!adev->pm.dpm.dyn_state.ppm_table) {
618 amdgpu_free_extended_power_table(adev);
619 return -ENOMEM;
620 }
621 adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
622 adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
623 le16_to_cpu(ppm->usCpuCoreNumber);
624 adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
625 le32_to_cpu(ppm->ulPlatformTDP);
626 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
627 le32_to_cpu(ppm->ulSmallACPlatformTDP);
628 adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
629 le32_to_cpu(ppm->ulPlatformTDC);
630 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
631 le32_to_cpu(ppm->ulSmallACPlatformTDC);
632 adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
633 le32_to_cpu(ppm->ulApuTDP);
634 adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
635 le32_to_cpu(ppm->ulDGpuTDP);
636 adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
637 le32_to_cpu(ppm->ulDGpuUlvPower);
638 adev->pm.dpm.dyn_state.ppm_table->tj_max =
639 le32_to_cpu(ppm->ulTjmax);
640 }
641 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
642 ext_hdr->usACPTableOffset) {
643 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
644 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
645 (mode_info->atom_context->bios + data_offset +
646 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
647 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
648 u32 size = limits->numEntries *
649 sizeof(struct amdgpu_clock_voltage_dependency_entry);
650 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
651 kzalloc(size, GFP_KERNEL);
652 if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
653 amdgpu_free_extended_power_table(adev);
654 return -ENOMEM;
655 }
656 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
657 limits->numEntries;
658 entry = &limits->entries[0];
659 for (i = 0; i < limits->numEntries; i++) {
660 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
661 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
662 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
663 le16_to_cpu(entry->usVoltage);
664 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
665 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
666 }
667 }
668 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
669 ext_hdr->usPowerTuneTableOffset) {
670 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
671 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
672 ATOM_PowerTune_Table *pt;
673 adev->pm.dpm.dyn_state.cac_tdp_table =
674 kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
675 if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
676 amdgpu_free_extended_power_table(adev);
677 return -ENOMEM;
678 }
679 if (rev > 0) {
680 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
681 (mode_info->atom_context->bios + data_offset +
682 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
683 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
684 ppt->usMaximumPowerDeliveryLimit;
685 pt = &ppt->power_tune_table;
686 } else {
687 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
688 (mode_info->atom_context->bios + data_offset +
689 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
690 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
691 pt = &ppt->power_tune_table;
692 }
693 adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
694 adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
695 le16_to_cpu(pt->usConfigurableTDP);
696 adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
697 adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
698 le16_to_cpu(pt->usBatteryPowerLimit);
699 adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
700 le16_to_cpu(pt->usSmallPowerLimit);
701 adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
702 le16_to_cpu(pt->usLowCACLeakage);
703 adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
704 le16_to_cpu(pt->usHighCACLeakage);
705 }
706 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
707 ext_hdr->usSclkVddgfxTableOffset) {
708 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
709 (mode_info->atom_context->bios + data_offset +
710 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
711 ret = amdgpu_parse_clk_voltage_dep_table(
712 &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
713 dep_table);
714 if (ret) {
715 kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
716 return ret;
717 }
718 }
719 }
720
721 return 0;
722 }
723
amdgpu_free_extended_power_table(struct amdgpu_device * adev)724 void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
725 {
726 struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
727
728 kfree(dyn_state->vddc_dependency_on_sclk.entries);
729 kfree(dyn_state->vddci_dependency_on_mclk.entries);
730 kfree(dyn_state->vddc_dependency_on_mclk.entries);
731 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
732 kfree(dyn_state->cac_leakage_table.entries);
733 kfree(dyn_state->phase_shedding_limits_table.entries);
734 kfree(dyn_state->ppm_table);
735 kfree(dyn_state->cac_tdp_table);
736 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
737 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
738 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
739 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
740 kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
741 }
742
743 static const char *pp_lib_thermal_controller_names[] = {
744 "NONE",
745 "lm63",
746 "adm1032",
747 "adm1030",
748 "max6649",
749 "lm64",
750 "f75375",
751 "RV6xx",
752 "RV770",
753 "adt7473",
754 "NONE",
755 "External GPIO",
756 "Evergreen",
757 "emc2103",
758 "Sumo",
759 "Northern Islands",
760 "Southern Islands",
761 "lm96163",
762 "Sea Islands",
763 "Kaveri/Kabini",
764 };
765
amdgpu_add_thermal_controller(struct amdgpu_device * adev)766 void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
767 {
768 struct amdgpu_mode_info *mode_info = &adev->mode_info;
769 ATOM_PPLIB_POWERPLAYTABLE *power_table;
770 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
771 ATOM_PPLIB_THERMALCONTROLLER *controller;
772 struct amdgpu_i2c_bus_rec i2c_bus;
773 u16 data_offset;
774 u8 frev, crev;
775
776 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
777 &frev, &crev, &data_offset))
778 return;
779 power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
780 (mode_info->atom_context->bios + data_offset);
781 controller = &power_table->sThermalController;
782
783 /* add the i2c bus for thermal/fan chip */
784 if (controller->ucType > 0) {
785 if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
786 adev->pm.no_fan = true;
787 adev->pm.fan_pulses_per_revolution =
788 controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
789 if (adev->pm.fan_pulses_per_revolution) {
790 adev->pm.fan_min_rpm = controller->ucFanMinRPM;
791 adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
792 }
793 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
794 DRM_INFO("Internal thermal controller %s fan control\n",
795 (controller->ucFanParameters &
796 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
797 adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
798 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
799 DRM_INFO("Internal thermal controller %s fan control\n",
800 (controller->ucFanParameters &
801 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
802 adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
803 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
804 DRM_INFO("Internal thermal controller %s fan control\n",
805 (controller->ucFanParameters &
806 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
807 adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
808 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
809 DRM_INFO("Internal thermal controller %s fan control\n",
810 (controller->ucFanParameters &
811 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
812 adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
813 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
814 DRM_INFO("Internal thermal controller %s fan control\n",
815 (controller->ucFanParameters &
816 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
817 adev->pm.int_thermal_type = THERMAL_TYPE_NI;
818 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
819 DRM_INFO("Internal thermal controller %s fan control\n",
820 (controller->ucFanParameters &
821 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
822 adev->pm.int_thermal_type = THERMAL_TYPE_SI;
823 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
824 DRM_INFO("Internal thermal controller %s fan control\n",
825 (controller->ucFanParameters &
826 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
827 adev->pm.int_thermal_type = THERMAL_TYPE_CI;
828 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
829 DRM_INFO("Internal thermal controller %s fan control\n",
830 (controller->ucFanParameters &
831 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
832 adev->pm.int_thermal_type = THERMAL_TYPE_KV;
833 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
834 DRM_INFO("External GPIO thermal controller %s fan control\n",
835 (controller->ucFanParameters &
836 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
837 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
838 } else if (controller->ucType ==
839 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
840 DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
841 (controller->ucFanParameters &
842 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
843 adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
844 } else if (controller->ucType ==
845 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
846 DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
847 (controller->ucFanParameters &
848 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
849 adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
850 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
851 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
852 pp_lib_thermal_controller_names[controller->ucType],
853 controller->ucI2cAddress >> 1,
854 (controller->ucFanParameters &
855 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
856 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
857 i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
858 adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
859 if (adev->pm.i2c_bus) {
860 struct i2c_board_info info = { };
861 const char *name = pp_lib_thermal_controller_names[controller->ucType];
862 info.addr = controller->ucI2cAddress >> 1;
863 strlcpy(info.type, name, sizeof(info.type));
864 i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
865 }
866 } else {
867 DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
868 controller->ucType,
869 controller->ucI2cAddress >> 1,
870 (controller->ucFanParameters &
871 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
872 }
873 }
874 }
875
amdgpu_get_pcie_gen_support(struct amdgpu_device * adev,u32 sys_mask,enum amdgpu_pcie_gen asic_gen,enum amdgpu_pcie_gen default_gen)876 enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
877 u32 sys_mask,
878 enum amdgpu_pcie_gen asic_gen,
879 enum amdgpu_pcie_gen default_gen)
880 {
881 switch (asic_gen) {
882 case AMDGPU_PCIE_GEN1:
883 return AMDGPU_PCIE_GEN1;
884 case AMDGPU_PCIE_GEN2:
885 return AMDGPU_PCIE_GEN2;
886 case AMDGPU_PCIE_GEN3:
887 return AMDGPU_PCIE_GEN3;
888 default:
889 if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
890 (default_gen == AMDGPU_PCIE_GEN3))
891 return AMDGPU_PCIE_GEN3;
892 else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
893 (default_gen == AMDGPU_PCIE_GEN2))
894 return AMDGPU_PCIE_GEN2;
895 else
896 return AMDGPU_PCIE_GEN1;
897 }
898 return AMDGPU_PCIE_GEN1;
899 }
900
901 struct amd_vce_state*
amdgpu_get_vce_clock_state(void * handle,u32 idx)902 amdgpu_get_vce_clock_state(void *handle, u32 idx)
903 {
904 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
905
906 if (idx < adev->pm.dpm.num_of_vce_states)
907 return &adev->pm.dpm.vce_states[idx];
908
909 return NULL;
910 }
911
amdgpu_dpm_get_sclk(struct amdgpu_device * adev,bool low)912 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
913 {
914 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
915
916 return pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
917 }
918
amdgpu_dpm_get_mclk(struct amdgpu_device * adev,bool low)919 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
920 {
921 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
922
923 return pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
924 }
925
amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device * adev,uint32_t block_type,bool gate)926 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
927 {
928 int ret = 0;
929 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
930 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
931
932 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
933 dev_dbg(adev->dev, "IP block%d already in the target %s state!",
934 block_type, gate ? "gate" : "ungate");
935 return 0;
936 }
937
938 switch (block_type) {
939 case AMD_IP_BLOCK_TYPE_UVD:
940 case AMD_IP_BLOCK_TYPE_VCE:
941 if (pp_funcs && pp_funcs->set_powergating_by_smu) {
942 /*
943 * TODO: need a better lock mechanism
944 *
945 * Here adev->pm.mutex lock protection is enforced on
946 * UVD and VCE cases only. Since for other cases, there
947 * may be already lock protection in amdgpu_pm.c.
948 * This is a quick fix for the deadlock issue below.
949 * NFO: task ocltst:2028 blocked for more than 120 seconds.
950 * Tainted: G OE 5.0.0-37-generic #40~18.04.1-Ubuntu
951 * echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
952 * cltst D 0 2028 2026 0x00000000
953 * all Trace:
954 * __schedule+0x2c0/0x870
955 * schedule+0x2c/0x70
956 * schedule_preempt_disabled+0xe/0x10
957 * __mutex_lock.isra.9+0x26d/0x4e0
958 * __mutex_lock_slowpath+0x13/0x20
959 * ? __mutex_lock_slowpath+0x13/0x20
960 * mutex_lock+0x2f/0x40
961 * amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
962 * gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
963 * gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
964 * amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
965 * pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
966 * amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
967 */
968 mutex_lock(&adev->pm.mutex);
969 ret = (pp_funcs->set_powergating_by_smu(
970 (adev)->powerplay.pp_handle, block_type, gate));
971 mutex_unlock(&adev->pm.mutex);
972 }
973 break;
974 case AMD_IP_BLOCK_TYPE_GFX:
975 case AMD_IP_BLOCK_TYPE_VCN:
976 case AMD_IP_BLOCK_TYPE_SDMA:
977 case AMD_IP_BLOCK_TYPE_JPEG:
978 case AMD_IP_BLOCK_TYPE_GMC:
979 case AMD_IP_BLOCK_TYPE_ACP:
980 if (pp_funcs && pp_funcs->set_powergating_by_smu) {
981 ret = (pp_funcs->set_powergating_by_smu(
982 (adev)->powerplay.pp_handle, block_type, gate));
983 }
984 break;
985 default:
986 break;
987 }
988
989 if (!ret)
990 atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
991
992 return ret;
993 }
994
amdgpu_dpm_baco_enter(struct amdgpu_device * adev)995 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
996 {
997 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
998 void *pp_handle = adev->powerplay.pp_handle;
999 int ret = 0;
1000
1001 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1002 return -ENOENT;
1003
1004 /* enter BACO state */
1005 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1006
1007 return ret;
1008 }
1009
amdgpu_dpm_baco_exit(struct amdgpu_device * adev)1010 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
1011 {
1012 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1013 void *pp_handle = adev->powerplay.pp_handle;
1014 int ret = 0;
1015
1016 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1017 return -ENOENT;
1018
1019 /* exit BACO state */
1020 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1021
1022 return ret;
1023 }
1024
amdgpu_dpm_set_mp1_state(struct amdgpu_device * adev,enum pp_mp1_state mp1_state)1025 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
1026 enum pp_mp1_state mp1_state)
1027 {
1028 int ret = 0;
1029 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1030
1031 if (pp_funcs && pp_funcs->set_mp1_state) {
1032 ret = pp_funcs->set_mp1_state(
1033 adev->powerplay.pp_handle,
1034 mp1_state);
1035 }
1036
1037 return ret;
1038 }
1039
amdgpu_dpm_is_baco_supported(struct amdgpu_device * adev)1040 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
1041 {
1042 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1043 void *pp_handle = adev->powerplay.pp_handle;
1044 bool baco_cap;
1045
1046 if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
1047 return false;
1048 /* Don't use baco for reset in S3.
1049 * This is a workaround for some platforms
1050 * where entering BACO during suspend
1051 * seems to cause reboots or hangs.
1052 * This might be related to the fact that BACO controls
1053 * power to the whole GPU including devices like audio and USB.
1054 * Powering down/up everything may adversely affect these other
1055 * devices. Needs more investigation.
1056 */
1057 if (adev->in_s3)
1058 return false;
1059
1060 if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
1061 return false;
1062
1063 return baco_cap;
1064 }
1065
amdgpu_dpm_mode2_reset(struct amdgpu_device * adev)1066 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
1067 {
1068 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1069 void *pp_handle = adev->powerplay.pp_handle;
1070
1071 if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
1072 return -ENOENT;
1073
1074 return pp_funcs->asic_reset_mode_2(pp_handle);
1075 }
1076
amdgpu_dpm_baco_reset(struct amdgpu_device * adev)1077 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
1078 {
1079 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1080 void *pp_handle = adev->powerplay.pp_handle;
1081 int ret = 0;
1082
1083 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1084 return -ENOENT;
1085
1086 /* enter BACO state */
1087 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1088 if (ret)
1089 return ret;
1090
1091 /* exit BACO state */
1092 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1093 if (ret)
1094 return ret;
1095
1096 return 0;
1097 }
1098
amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device * adev)1099 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
1100 {
1101 struct smu_context *smu = &adev->smu;
1102
1103 if (is_support_sw_smu(adev))
1104 return smu_mode1_reset_is_support(smu);
1105
1106 return false;
1107 }
1108
amdgpu_dpm_mode1_reset(struct amdgpu_device * adev)1109 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
1110 {
1111 struct smu_context *smu = &adev->smu;
1112
1113 if (is_support_sw_smu(adev))
1114 return smu_mode1_reset(smu);
1115
1116 return -EOPNOTSUPP;
1117 }
1118
amdgpu_dpm_switch_power_profile(struct amdgpu_device * adev,enum PP_SMC_POWER_PROFILE type,bool en)1119 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
1120 enum PP_SMC_POWER_PROFILE type,
1121 bool en)
1122 {
1123 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1124 int ret = 0;
1125
1126 if (amdgpu_sriov_vf(adev))
1127 return 0;
1128
1129 if (pp_funcs && pp_funcs->switch_power_profile)
1130 ret = pp_funcs->switch_power_profile(
1131 adev->powerplay.pp_handle, type, en);
1132
1133 return ret;
1134 }
1135
amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device * adev,uint32_t pstate)1136 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
1137 uint32_t pstate)
1138 {
1139 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1140 int ret = 0;
1141
1142 if (pp_funcs && pp_funcs->set_xgmi_pstate)
1143 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
1144 pstate);
1145
1146 return ret;
1147 }
1148
amdgpu_dpm_set_df_cstate(struct amdgpu_device * adev,uint32_t cstate)1149 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
1150 uint32_t cstate)
1151 {
1152 int ret = 0;
1153 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1154 void *pp_handle = adev->powerplay.pp_handle;
1155
1156 if (pp_funcs && pp_funcs->set_df_cstate)
1157 ret = pp_funcs->set_df_cstate(pp_handle, cstate);
1158
1159 return ret;
1160 }
1161
amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device * adev,bool en)1162 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
1163 {
1164 struct smu_context *smu = &adev->smu;
1165
1166 if (is_support_sw_smu(adev))
1167 return smu_allow_xgmi_power_down(smu, en);
1168
1169 return 0;
1170 }
1171
amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device * adev)1172 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
1173 {
1174 void *pp_handle = adev->powerplay.pp_handle;
1175 const struct amd_pm_funcs *pp_funcs =
1176 adev->powerplay.pp_funcs;
1177 int ret = 0;
1178
1179 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost)
1180 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
1181
1182 return ret;
1183 }
1184
amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device * adev,uint32_t msg_id)1185 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
1186 uint32_t msg_id)
1187 {
1188 void *pp_handle = adev->powerplay.pp_handle;
1189 const struct amd_pm_funcs *pp_funcs =
1190 adev->powerplay.pp_funcs;
1191 int ret = 0;
1192
1193 if (pp_funcs && pp_funcs->set_clockgating_by_smu)
1194 ret = pp_funcs->set_clockgating_by_smu(pp_handle,
1195 msg_id);
1196
1197 return ret;
1198 }
1199
amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device * adev,bool acquire)1200 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
1201 bool acquire)
1202 {
1203 void *pp_handle = adev->powerplay.pp_handle;
1204 const struct amd_pm_funcs *pp_funcs =
1205 adev->powerplay.pp_funcs;
1206 int ret = -EOPNOTSUPP;
1207
1208 if (pp_funcs && pp_funcs->smu_i2c_bus_access)
1209 ret = pp_funcs->smu_i2c_bus_access(pp_handle,
1210 acquire);
1211
1212 return ret;
1213 }
1214
amdgpu_pm_acpi_event_handler(struct amdgpu_device * adev)1215 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
1216 {
1217 if (adev->pm.dpm_enabled) {
1218 mutex_lock(&adev->pm.mutex);
1219 if (power_supply_is_system_supplied() > 0)
1220 adev->pm.ac_power = true;
1221 else
1222 adev->pm.ac_power = false;
1223 if (adev->powerplay.pp_funcs &&
1224 adev->powerplay.pp_funcs->enable_bapm)
1225 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
1226 mutex_unlock(&adev->pm.mutex);
1227
1228 if (is_support_sw_smu(adev))
1229 smu_set_ac_dc(&adev->smu);
1230 }
1231 }
1232
amdgpu_dpm_read_sensor(struct amdgpu_device * adev,enum amd_pp_sensors sensor,void * data,uint32_t * size)1233 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
1234 void *data, uint32_t *size)
1235 {
1236 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1237 int ret = 0;
1238
1239 if (!data || !size)
1240 return -EINVAL;
1241
1242 if (pp_funcs && pp_funcs->read_sensor)
1243 ret = pp_funcs->read_sensor((adev)->powerplay.pp_handle,
1244 sensor, data, size);
1245 else
1246 ret = -EINVAL;
1247
1248 return ret;
1249 }
1250
amdgpu_dpm_thermal_work_handler(struct work_struct * work)1251 void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
1252 {
1253 struct amdgpu_device *adev =
1254 container_of(work, struct amdgpu_device,
1255 pm.dpm.thermal.work);
1256 /* switch to the thermal state */
1257 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
1258 int temp, size = sizeof(temp);
1259
1260 if (!adev->pm.dpm_enabled)
1261 return;
1262
1263 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
1264 (void *)&temp, &size)) {
1265 if (temp < adev->pm.dpm.thermal.min_temp)
1266 /* switch back the user state */
1267 dpm_state = adev->pm.dpm.user_state;
1268 } else {
1269 if (adev->pm.dpm.thermal.high_to_low)
1270 /* switch back the user state */
1271 dpm_state = adev->pm.dpm.user_state;
1272 }
1273 mutex_lock(&adev->pm.mutex);
1274 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
1275 adev->pm.dpm.thermal_active = true;
1276 else
1277 adev->pm.dpm.thermal_active = false;
1278 adev->pm.dpm.state = dpm_state;
1279 mutex_unlock(&adev->pm.mutex);
1280
1281 amdgpu_pm_compute_clocks(adev);
1282 }
1283
amdgpu_dpm_pick_power_state(struct amdgpu_device * adev,enum amd_pm_state_type dpm_state)1284 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
1285 enum amd_pm_state_type dpm_state)
1286 {
1287 int i;
1288 struct amdgpu_ps *ps;
1289 u32 ui_class;
1290 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
1291 true : false;
1292
1293 /* check if the vblank period is too short to adjust the mclk */
1294 if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
1295 if (amdgpu_dpm_vblank_too_short(adev))
1296 single_display = false;
1297 }
1298
1299 /* certain older asics have a separare 3D performance state,
1300 * so try that first if the user selected performance
1301 */
1302 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
1303 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
1304 /* balanced states don't exist at the moment */
1305 if (dpm_state == POWER_STATE_TYPE_BALANCED)
1306 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1307
1308 restart_search:
1309 /* Pick the best power state based on current conditions */
1310 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
1311 ps = &adev->pm.dpm.ps[i];
1312 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
1313 switch (dpm_state) {
1314 /* user states */
1315 case POWER_STATE_TYPE_BATTERY:
1316 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
1317 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1318 if (single_display)
1319 return ps;
1320 } else
1321 return ps;
1322 }
1323 break;
1324 case POWER_STATE_TYPE_BALANCED:
1325 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
1326 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1327 if (single_display)
1328 return ps;
1329 } else
1330 return ps;
1331 }
1332 break;
1333 case POWER_STATE_TYPE_PERFORMANCE:
1334 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
1335 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1336 if (single_display)
1337 return ps;
1338 } else
1339 return ps;
1340 }
1341 break;
1342 /* internal states */
1343 case POWER_STATE_TYPE_INTERNAL_UVD:
1344 if (adev->pm.dpm.uvd_ps)
1345 return adev->pm.dpm.uvd_ps;
1346 else
1347 break;
1348 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1349 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
1350 return ps;
1351 break;
1352 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1353 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
1354 return ps;
1355 break;
1356 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1357 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
1358 return ps;
1359 break;
1360 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1361 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
1362 return ps;
1363 break;
1364 case POWER_STATE_TYPE_INTERNAL_BOOT:
1365 return adev->pm.dpm.boot_ps;
1366 case POWER_STATE_TYPE_INTERNAL_THERMAL:
1367 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
1368 return ps;
1369 break;
1370 case POWER_STATE_TYPE_INTERNAL_ACPI:
1371 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
1372 return ps;
1373 break;
1374 case POWER_STATE_TYPE_INTERNAL_ULV:
1375 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
1376 return ps;
1377 break;
1378 case POWER_STATE_TYPE_INTERNAL_3DPERF:
1379 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
1380 return ps;
1381 break;
1382 default:
1383 break;
1384 }
1385 }
1386 /* use a fallback state if we didn't match */
1387 switch (dpm_state) {
1388 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1389 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1390 goto restart_search;
1391 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1392 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1393 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1394 if (adev->pm.dpm.uvd_ps) {
1395 return adev->pm.dpm.uvd_ps;
1396 } else {
1397 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1398 goto restart_search;
1399 }
1400 case POWER_STATE_TYPE_INTERNAL_THERMAL:
1401 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
1402 goto restart_search;
1403 case POWER_STATE_TYPE_INTERNAL_ACPI:
1404 dpm_state = POWER_STATE_TYPE_BATTERY;
1405 goto restart_search;
1406 case POWER_STATE_TYPE_BATTERY:
1407 case POWER_STATE_TYPE_BALANCED:
1408 case POWER_STATE_TYPE_INTERNAL_3DPERF:
1409 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1410 goto restart_search;
1411 default:
1412 break;
1413 }
1414
1415 return NULL;
1416 }
1417
amdgpu_dpm_change_power_state_locked(struct amdgpu_device * adev)1418 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
1419 {
1420 struct amdgpu_ps *ps;
1421 enum amd_pm_state_type dpm_state;
1422 int ret;
1423 bool equal = false;
1424
1425 /* if dpm init failed */
1426 if (!adev->pm.dpm_enabled)
1427 return;
1428
1429 if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
1430 /* add other state override checks here */
1431 if ((!adev->pm.dpm.thermal_active) &&
1432 (!adev->pm.dpm.uvd_active))
1433 adev->pm.dpm.state = adev->pm.dpm.user_state;
1434 }
1435 dpm_state = adev->pm.dpm.state;
1436
1437 ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
1438 if (ps)
1439 adev->pm.dpm.requested_ps = ps;
1440 else
1441 return;
1442
1443 if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
1444 printk("switching from power state:\n");
1445 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
1446 printk("switching to power state:\n");
1447 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
1448 }
1449
1450 /* update whether vce is active */
1451 ps->vce_active = adev->pm.dpm.vce_active;
1452 if (adev->powerplay.pp_funcs->display_configuration_changed)
1453 amdgpu_dpm_display_configuration_changed(adev);
1454
1455 ret = amdgpu_dpm_pre_set_power_state(adev);
1456 if (ret)
1457 return;
1458
1459 if (adev->powerplay.pp_funcs->check_state_equal) {
1460 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
1461 equal = false;
1462 }
1463
1464 if (equal)
1465 return;
1466
1467 amdgpu_dpm_set_power_state(adev);
1468 amdgpu_dpm_post_set_power_state(adev);
1469
1470 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
1471 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
1472
1473 if (adev->powerplay.pp_funcs->force_performance_level) {
1474 if (adev->pm.dpm.thermal_active) {
1475 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
1476 /* force low perf level for thermal */
1477 amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
1478 /* save the user's level */
1479 adev->pm.dpm.forced_level = level;
1480 } else {
1481 /* otherwise, user selected level */
1482 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
1483 }
1484 }
1485 }
1486
amdgpu_pm_compute_clocks(struct amdgpu_device * adev)1487 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1488 {
1489 int i = 0;
1490
1491 if (!adev->pm.dpm_enabled)
1492 return;
1493
1494 if (adev->mode_info.num_crtc)
1495 amdgpu_display_bandwidth_update(adev);
1496
1497 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1498 struct amdgpu_ring *ring = adev->rings[i];
1499 if (ring && ring->sched.ready)
1500 amdgpu_fence_wait_empty(ring);
1501 }
1502
1503 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1504 if (!amdgpu_device_has_dc_support(adev)) {
1505 mutex_lock(&adev->pm.mutex);
1506 amdgpu_dpm_get_active_displays(adev);
1507 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1508 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1509 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1510 /* we have issues with mclk switching with
1511 * refresh rates over 120 hz on the non-DC code.
1512 */
1513 if (adev->pm.pm_display_cfg.vrefresh > 120)
1514 adev->pm.pm_display_cfg.min_vblank_time = 0;
1515 if (adev->powerplay.pp_funcs->display_configuration_change)
1516 adev->powerplay.pp_funcs->display_configuration_change(
1517 adev->powerplay.pp_handle,
1518 &adev->pm.pm_display_cfg);
1519 mutex_unlock(&adev->pm.mutex);
1520 }
1521 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
1522 } else {
1523 mutex_lock(&adev->pm.mutex);
1524 amdgpu_dpm_get_active_displays(adev);
1525 amdgpu_dpm_change_power_state_locked(adev);
1526 mutex_unlock(&adev->pm.mutex);
1527 }
1528 }
1529
amdgpu_dpm_enable_uvd(struct amdgpu_device * adev,bool enable)1530 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
1531 {
1532 int ret = 0;
1533
1534 if (adev->family == AMDGPU_FAMILY_SI) {
1535 mutex_lock(&adev->pm.mutex);
1536 if (enable) {
1537 adev->pm.dpm.uvd_active = true;
1538 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
1539 } else {
1540 adev->pm.dpm.uvd_active = false;
1541 }
1542 mutex_unlock(&adev->pm.mutex);
1543
1544 amdgpu_pm_compute_clocks(adev);
1545 } else {
1546 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
1547 if (ret)
1548 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
1549 enable ? "enable" : "disable", ret);
1550
1551 /* enable/disable Low Memory PState for UVD (4k videos) */
1552 if (adev->asic_type == CHIP_STONEY &&
1553 adev->uvd.decode_image_width >= WIDTH_4K) {
1554 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1555
1556 if (hwmgr && hwmgr->hwmgr_func &&
1557 hwmgr->hwmgr_func->update_nbdpm_pstate)
1558 hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
1559 !enable,
1560 true);
1561 }
1562 }
1563 }
1564
amdgpu_dpm_enable_vce(struct amdgpu_device * adev,bool enable)1565 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
1566 {
1567 int ret = 0;
1568
1569 if (adev->family == AMDGPU_FAMILY_SI) {
1570 mutex_lock(&adev->pm.mutex);
1571 if (enable) {
1572 adev->pm.dpm.vce_active = true;
1573 /* XXX select vce level based on ring/task */
1574 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
1575 } else {
1576 adev->pm.dpm.vce_active = false;
1577 }
1578 mutex_unlock(&adev->pm.mutex);
1579
1580 amdgpu_pm_compute_clocks(adev);
1581 } else {
1582 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
1583 if (ret)
1584 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
1585 enable ? "enable" : "disable", ret);
1586 }
1587 }
1588
amdgpu_pm_print_power_states(struct amdgpu_device * adev)1589 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
1590 {
1591 int i;
1592
1593 if (adev->powerplay.pp_funcs->print_power_state == NULL)
1594 return;
1595
1596 for (i = 0; i < adev->pm.dpm.num_ps; i++)
1597 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
1598
1599 }
1600
amdgpu_dpm_enable_jpeg(struct amdgpu_device * adev,bool enable)1601 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
1602 {
1603 int ret = 0;
1604
1605 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
1606 if (ret)
1607 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
1608 enable ? "enable" : "disable", ret);
1609 }
1610
amdgpu_pm_load_smu_firmware(struct amdgpu_device * adev,uint32_t * smu_version)1611 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
1612 {
1613 int r;
1614
1615 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
1616 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
1617 if (r) {
1618 pr_err("smu firmware loading failed\n");
1619 return r;
1620 }
1621
1622 if (smu_version)
1623 *smu_version = adev->pm.fw_version;
1624 }
1625
1626 return 0;
1627 }
1628