1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/delay.h>
25 #include <linux/fb.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28
29 #include "hwmgr.h"
30 #include "amd_powerplay.h"
31 #include "vega20_smumgr.h"
32 #include "hardwaremanager.h"
33 #include "ppatomfwctrl.h"
34 #include "atomfirmware.h"
35 #include "cgs_common.h"
36 #include "vega20_powertune.h"
37 #include "vega20_inc.h"
38 #include "pppcielanes.h"
39 #include "vega20_hwmgr.h"
40 #include "vega20_processpptables.h"
41 #include "vega20_pptable.h"
42 #include "vega20_thermal.h"
43 #include "vega20_ppsmc.h"
44 #include "pp_debug.h"
45 #include "amd_pcie_helpers.h"
46 #include "ppinterrupt.h"
47 #include "pp_overdriver.h"
48 #include "pp_thermal.h"
49 #include "soc15_common.h"
50 #include "vega20_baco.h"
51 #include "smuio/smuio_9_0_offset.h"
52 #include "smuio/smuio_9_0_sh_mask.h"
53 #include "nbio/nbio_7_4_sh_mask.h"
54
55 #define smnPCIE_LC_SPEED_CNTL 0x11140290
56 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
57
58 #define LINK_WIDTH_MAX 6
59 #define LINK_SPEED_MAX 3
60 static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
61 static int link_speed[] = {25, 50, 80, 160};
62
vega20_set_default_registry_data(struct pp_hwmgr * hwmgr)63 static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
64 {
65 struct vega20_hwmgr *data =
66 (struct vega20_hwmgr *)(hwmgr->backend);
67
68 data->gfxclk_average_alpha = PPVEGA20_VEGA20GFXCLKAVERAGEALPHA_DFLT;
69 data->socclk_average_alpha = PPVEGA20_VEGA20SOCCLKAVERAGEALPHA_DFLT;
70 data->uclk_average_alpha = PPVEGA20_VEGA20UCLKCLKAVERAGEALPHA_DFLT;
71 data->gfx_activity_average_alpha = PPVEGA20_VEGA20GFXACTIVITYAVERAGEALPHA_DFLT;
72 data->lowest_uclk_reserved_for_ulv = PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT;
73
74 data->display_voltage_mode = PPVEGA20_VEGA20DISPLAYVOLTAGEMODE_DFLT;
75 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
76 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
77 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
78 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
79 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
80 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
81 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
82 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
83 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
84 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
85 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
86 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
87
88 /*
89 * Disable the following features for now:
90 * GFXCLK DS
91 * SOCLK DS
92 * LCLK DS
93 * DCEFCLK DS
94 * FCLK DS
95 * MP1CLK DS
96 * MP0CLK DS
97 */
98 data->registry_data.disallowed_features = 0xE0041C00;
99 /* ECC feature should be disabled on old SMUs */
100 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version);
101 if (hwmgr->smu_version < 0x282100)
102 data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
103
104 if (!(hwmgr->feature_mask & PP_PCIE_DPM_MASK))
105 data->registry_data.disallowed_features |= FEATURE_DPM_LINK_MASK;
106
107 if (!(hwmgr->feature_mask & PP_SCLK_DPM_MASK))
108 data->registry_data.disallowed_features |= FEATURE_DPM_GFXCLK_MASK;
109
110 if (!(hwmgr->feature_mask & PP_SOCCLK_DPM_MASK))
111 data->registry_data.disallowed_features |= FEATURE_DPM_SOCCLK_MASK;
112
113 if (!(hwmgr->feature_mask & PP_MCLK_DPM_MASK))
114 data->registry_data.disallowed_features |= FEATURE_DPM_UCLK_MASK;
115
116 if (!(hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK))
117 data->registry_data.disallowed_features |= FEATURE_DPM_DCEFCLK_MASK;
118
119 if (!(hwmgr->feature_mask & PP_ULV_MASK))
120 data->registry_data.disallowed_features |= FEATURE_ULV_MASK;
121
122 if (!(hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK))
123 data->registry_data.disallowed_features |= FEATURE_DS_GFXCLK_MASK;
124
125 data->registry_data.od_state_in_dc_support = 0;
126 data->registry_data.thermal_support = 1;
127 data->registry_data.skip_baco_hardware = 0;
128
129 data->registry_data.log_avfs_param = 0;
130 data->registry_data.sclk_throttle_low_notification = 1;
131 data->registry_data.force_dpm_high = 0;
132 data->registry_data.stable_pstate_sclk_dpm_percentage = 75;
133
134 data->registry_data.didt_support = 0;
135 if (data->registry_data.didt_support) {
136 data->registry_data.didt_mode = 6;
137 data->registry_data.sq_ramping_support = 1;
138 data->registry_data.db_ramping_support = 0;
139 data->registry_data.td_ramping_support = 0;
140 data->registry_data.tcp_ramping_support = 0;
141 data->registry_data.dbr_ramping_support = 0;
142 data->registry_data.edc_didt_support = 1;
143 data->registry_data.gc_didt_support = 0;
144 data->registry_data.psm_didt_support = 0;
145 }
146
147 data->registry_data.pcie_lane_override = 0xff;
148 data->registry_data.pcie_speed_override = 0xff;
149 data->registry_data.pcie_clock_override = 0xffffffff;
150 data->registry_data.regulator_hot_gpio_support = 1;
151 data->registry_data.ac_dc_switch_gpio_support = 0;
152 data->registry_data.quick_transition_support = 0;
153 data->registry_data.zrpm_start_temp = 0xffff;
154 data->registry_data.zrpm_stop_temp = 0xffff;
155 data->registry_data.od8_feature_enable = 1;
156 data->registry_data.disable_water_mark = 0;
157 data->registry_data.disable_pp_tuning = 0;
158 data->registry_data.disable_xlpp_tuning = 0;
159 data->registry_data.disable_workload_policy = 0;
160 data->registry_data.perf_ui_tuning_profile_turbo = 0x19190F0F;
161 data->registry_data.perf_ui_tuning_profile_powerSave = 0x19191919;
162 data->registry_data.perf_ui_tuning_profile_xl = 0x00000F0A;
163 data->registry_data.force_workload_policy_mask = 0;
164 data->registry_data.disable_3d_fs_detection = 0;
165 data->registry_data.fps_support = 1;
166 data->registry_data.disable_auto_wattman = 1;
167 data->registry_data.auto_wattman_debug = 0;
168 data->registry_data.auto_wattman_sample_period = 100;
169 data->registry_data.fclk_gfxclk_ratio = 0;
170 data->registry_data.auto_wattman_threshold = 50;
171 data->registry_data.gfxoff_controlled_by_driver = 1;
172 data->gfxoff_allowed = false;
173 data->counter_gfxoff = 0;
174 data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
175 }
176
vega20_set_features_platform_caps(struct pp_hwmgr * hwmgr)177 static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
178 {
179 struct vega20_hwmgr *data =
180 (struct vega20_hwmgr *)(hwmgr->backend);
181 struct amdgpu_device *adev = hwmgr->adev;
182
183 if (data->vddci_control == VEGA20_VOLTAGE_CONTROL_NONE)
184 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
185 PHM_PlatformCaps_ControlVDDCI);
186
187 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
188 PHM_PlatformCaps_TablelessHardwareInterface);
189
190 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
191 PHM_PlatformCaps_BACO);
192
193 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
194 PHM_PlatformCaps_EnableSMU7ThermalManagement);
195
196 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
197 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
198 PHM_PlatformCaps_UVDPowerGating);
199
200 if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
201 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
202 PHM_PlatformCaps_VCEPowerGating);
203
204 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
205 PHM_PlatformCaps_UnTabledHardwareInterface);
206
207 if (data->registry_data.od8_feature_enable)
208 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
209 PHM_PlatformCaps_OD8inACSupport);
210
211 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
212 PHM_PlatformCaps_ActivityReporting);
213 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
214 PHM_PlatformCaps_FanSpeedInTableIsRPM);
215
216 if (data->registry_data.od_state_in_dc_support) {
217 if (data->registry_data.od8_feature_enable)
218 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
219 PHM_PlatformCaps_OD8inDCSupport);
220 }
221
222 if (data->registry_data.thermal_support &&
223 data->registry_data.fuzzy_fan_control_support &&
224 hwmgr->thermal_controller.advanceFanControlParameters.usTMax)
225 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
226 PHM_PlatformCaps_ODFuzzyFanControlSupport);
227
228 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
229 PHM_PlatformCaps_DynamicPowerManagement);
230 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
231 PHM_PlatformCaps_SMC);
232 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
233 PHM_PlatformCaps_ThermalPolicyDelay);
234
235 if (data->registry_data.force_dpm_high)
236 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
237 PHM_PlatformCaps_ExclusiveModeAlwaysHigh);
238
239 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
240 PHM_PlatformCaps_DynamicUVDState);
241
242 if (data->registry_data.sclk_throttle_low_notification)
243 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
244 PHM_PlatformCaps_SclkThrottleLowNotification);
245
246 /* power tune caps */
247 /* assume disabled */
248 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
249 PHM_PlatformCaps_PowerContainment);
250 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
251 PHM_PlatformCaps_DiDtSupport);
252 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
253 PHM_PlatformCaps_SQRamping);
254 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
255 PHM_PlatformCaps_DBRamping);
256 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
257 PHM_PlatformCaps_TDRamping);
258 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
259 PHM_PlatformCaps_TCPRamping);
260 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
261 PHM_PlatformCaps_DBRRamping);
262 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
263 PHM_PlatformCaps_DiDtEDCEnable);
264 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
265 PHM_PlatformCaps_GCEDC);
266 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
267 PHM_PlatformCaps_PSM);
268
269 if (data->registry_data.didt_support) {
270 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
271 PHM_PlatformCaps_DiDtSupport);
272 if (data->registry_data.sq_ramping_support)
273 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
274 PHM_PlatformCaps_SQRamping);
275 if (data->registry_data.db_ramping_support)
276 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
277 PHM_PlatformCaps_DBRamping);
278 if (data->registry_data.td_ramping_support)
279 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
280 PHM_PlatformCaps_TDRamping);
281 if (data->registry_data.tcp_ramping_support)
282 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
283 PHM_PlatformCaps_TCPRamping);
284 if (data->registry_data.dbr_ramping_support)
285 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
286 PHM_PlatformCaps_DBRRamping);
287 if (data->registry_data.edc_didt_support)
288 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
289 PHM_PlatformCaps_DiDtEDCEnable);
290 if (data->registry_data.gc_didt_support)
291 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
292 PHM_PlatformCaps_GCEDC);
293 if (data->registry_data.psm_didt_support)
294 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
295 PHM_PlatformCaps_PSM);
296 }
297
298 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
299 PHM_PlatformCaps_RegulatorHot);
300
301 if (data->registry_data.ac_dc_switch_gpio_support) {
302 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
303 PHM_PlatformCaps_AutomaticDCTransition);
304 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
305 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
306 }
307
308 if (data->registry_data.quick_transition_support) {
309 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
310 PHM_PlatformCaps_AutomaticDCTransition);
311 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
312 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
313 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
314 PHM_PlatformCaps_Falcon_QuickTransition);
315 }
316
317 if (data->lowest_uclk_reserved_for_ulv != PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT) {
318 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
319 PHM_PlatformCaps_LowestUclkReservedForUlv);
320 if (data->lowest_uclk_reserved_for_ulv == 1)
321 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
322 PHM_PlatformCaps_LowestUclkReservedForUlv);
323 }
324
325 if (data->registry_data.custom_fan_support)
326 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
327 PHM_PlatformCaps_CustomFanControlSupport);
328
329 return 0;
330 }
331
vega20_init_dpm_defaults(struct pp_hwmgr * hwmgr)332 static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
333 {
334 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
335 struct amdgpu_device *adev = hwmgr->adev;
336 uint32_t top32, bottom32;
337 int i;
338
339 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
340 FEATURE_DPM_PREFETCHER_BIT;
341 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
342 FEATURE_DPM_GFXCLK_BIT;
343 data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
344 FEATURE_DPM_UCLK_BIT;
345 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
346 FEATURE_DPM_SOCCLK_BIT;
347 data->smu_features[GNLD_DPM_UVD].smu_feature_id =
348 FEATURE_DPM_UVD_BIT;
349 data->smu_features[GNLD_DPM_VCE].smu_feature_id =
350 FEATURE_DPM_VCE_BIT;
351 data->smu_features[GNLD_ULV].smu_feature_id =
352 FEATURE_ULV_BIT;
353 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
354 FEATURE_DPM_MP0CLK_BIT;
355 data->smu_features[GNLD_DPM_LINK].smu_feature_id =
356 FEATURE_DPM_LINK_BIT;
357 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
358 FEATURE_DPM_DCEFCLK_BIT;
359 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
360 FEATURE_DS_GFXCLK_BIT;
361 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
362 FEATURE_DS_SOCCLK_BIT;
363 data->smu_features[GNLD_DS_LCLK].smu_feature_id =
364 FEATURE_DS_LCLK_BIT;
365 data->smu_features[GNLD_PPT].smu_feature_id =
366 FEATURE_PPT_BIT;
367 data->smu_features[GNLD_TDC].smu_feature_id =
368 FEATURE_TDC_BIT;
369 data->smu_features[GNLD_THERMAL].smu_feature_id =
370 FEATURE_THERMAL_BIT;
371 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
372 FEATURE_GFX_PER_CU_CG_BIT;
373 data->smu_features[GNLD_RM].smu_feature_id =
374 FEATURE_RM_BIT;
375 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
376 FEATURE_DS_DCEFCLK_BIT;
377 data->smu_features[GNLD_ACDC].smu_feature_id =
378 FEATURE_ACDC_BIT;
379 data->smu_features[GNLD_VR0HOT].smu_feature_id =
380 FEATURE_VR0HOT_BIT;
381 data->smu_features[GNLD_VR1HOT].smu_feature_id =
382 FEATURE_VR1HOT_BIT;
383 data->smu_features[GNLD_FW_CTF].smu_feature_id =
384 FEATURE_FW_CTF_BIT;
385 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
386 FEATURE_LED_DISPLAY_BIT;
387 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
388 FEATURE_FAN_CONTROL_BIT;
389 data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
390 data->smu_features[GNLD_GFXOFF].smu_feature_id = FEATURE_GFXOFF_BIT;
391 data->smu_features[GNLD_CG].smu_feature_id = FEATURE_CG_BIT;
392 data->smu_features[GNLD_DPM_FCLK].smu_feature_id = FEATURE_DPM_FCLK_BIT;
393 data->smu_features[GNLD_DS_FCLK].smu_feature_id = FEATURE_DS_FCLK_BIT;
394 data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT;
395 data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT;
396 data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT;
397 data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT;
398
399 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
400 data->smu_features[i].smu_feature_bitmap =
401 (uint64_t)(1ULL << data->smu_features[i].smu_feature_id);
402 data->smu_features[i].allowed =
403 ((data->registry_data.disallowed_features >> i) & 1) ?
404 false : true;
405 }
406
407 /* Get the SN to turn into a Unique ID */
408 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
409 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
410
411 adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
412 }
413
vega20_set_private_data_based_on_pptable(struct pp_hwmgr * hwmgr)414 static int vega20_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
415 {
416 return 0;
417 }
418
vega20_hwmgr_backend_fini(struct pp_hwmgr * hwmgr)419 static int vega20_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
420 {
421 kfree(hwmgr->backend);
422 hwmgr->backend = NULL;
423
424 return 0;
425 }
426
vega20_hwmgr_backend_init(struct pp_hwmgr * hwmgr)427 static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
428 {
429 struct vega20_hwmgr *data;
430 struct amdgpu_device *adev = hwmgr->adev;
431
432 data = kzalloc(sizeof(struct vega20_hwmgr), GFP_KERNEL);
433 if (data == NULL)
434 return -ENOMEM;
435
436 hwmgr->backend = data;
437
438 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
439 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
440 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
441
442 vega20_set_default_registry_data(hwmgr);
443
444 data->disable_dpm_mask = 0xff;
445
446 /* need to set voltage control types before EVV patching */
447 data->vddc_control = VEGA20_VOLTAGE_CONTROL_NONE;
448 data->mvdd_control = VEGA20_VOLTAGE_CONTROL_NONE;
449 data->vddci_control = VEGA20_VOLTAGE_CONTROL_NONE;
450
451 data->water_marks_bitmap = 0;
452 data->avfs_exist = false;
453
454 vega20_set_features_platform_caps(hwmgr);
455
456 vega20_init_dpm_defaults(hwmgr);
457
458 /* Parse pptable data read from VBIOS */
459 vega20_set_private_data_based_on_pptable(hwmgr);
460
461 data->is_tlu_enabled = false;
462
463 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
464 VEGA20_MAX_HARDWARE_POWERLEVELS;
465 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
466 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
467
468 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
469 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
470 hwmgr->platform_descriptor.clockStep.engineClock = 500;
471 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
472
473 data->total_active_cus = adev->gfx.cu_info.number;
474 data->is_custom_profile_set = false;
475
476 return 0;
477 }
478
vega20_init_sclk_threshold(struct pp_hwmgr * hwmgr)479 static int vega20_init_sclk_threshold(struct pp_hwmgr *hwmgr)
480 {
481 struct vega20_hwmgr *data =
482 (struct vega20_hwmgr *)(hwmgr->backend);
483
484 data->low_sclk_interrupt_threshold = 0;
485
486 return 0;
487 }
488
vega20_setup_asic_task(struct pp_hwmgr * hwmgr)489 static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
490 {
491 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
492 int ret = 0;
493 bool use_baco = (amdgpu_in_reset(adev) &&
494 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
495 (adev->in_runpm && amdgpu_asic_supports_baco(adev));
496
497 ret = vega20_init_sclk_threshold(hwmgr);
498 PP_ASSERT_WITH_CODE(!ret,
499 "Failed to init sclk threshold!",
500 return ret);
501
502 if (use_baco) {
503 ret = vega20_baco_apply_vdci_flush_workaround(hwmgr);
504 if (ret)
505 pr_err("Failed to apply vega20 baco workaround!\n");
506 }
507
508 return ret;
509 }
510
511 /*
512 * @fn vega20_init_dpm_state
513 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
514 *
515 * @param dpm_state - the address of the DPM Table to initiailize.
516 * @return None.
517 */
vega20_init_dpm_state(struct vega20_dpm_state * dpm_state)518 static void vega20_init_dpm_state(struct vega20_dpm_state *dpm_state)
519 {
520 dpm_state->soft_min_level = 0x0;
521 dpm_state->soft_max_level = VG20_CLOCK_MAX_DEFAULT;
522 dpm_state->hard_min_level = 0x0;
523 dpm_state->hard_max_level = VG20_CLOCK_MAX_DEFAULT;
524 }
525
vega20_get_number_of_dpm_level(struct pp_hwmgr * hwmgr,PPCLK_e clk_id,uint32_t * num_of_levels)526 static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
527 PPCLK_e clk_id, uint32_t *num_of_levels)
528 {
529 int ret = 0;
530
531 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
532 PPSMC_MSG_GetDpmFreqByIndex,
533 (clk_id << 16 | 0xFF),
534 num_of_levels);
535 PP_ASSERT_WITH_CODE(!ret,
536 "[GetNumOfDpmLevel] failed to get dpm levels!",
537 return ret);
538
539 return ret;
540 }
541
vega20_get_dpm_frequency_by_index(struct pp_hwmgr * hwmgr,PPCLK_e clk_id,uint32_t index,uint32_t * clk)542 static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
543 PPCLK_e clk_id, uint32_t index, uint32_t *clk)
544 {
545 int ret = 0;
546
547 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
548 PPSMC_MSG_GetDpmFreqByIndex,
549 (clk_id << 16 | index),
550 clk);
551 PP_ASSERT_WITH_CODE(!ret,
552 "[GetDpmFreqByIndex] failed to get dpm freq by index!",
553 return ret);
554
555 return ret;
556 }
557
vega20_setup_single_dpm_table(struct pp_hwmgr * hwmgr,struct vega20_single_dpm_table * dpm_table,PPCLK_e clk_id)558 static int vega20_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
559 struct vega20_single_dpm_table *dpm_table, PPCLK_e clk_id)
560 {
561 int ret = 0;
562 uint32_t i, num_of_levels, clk;
563
564 ret = vega20_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels);
565 PP_ASSERT_WITH_CODE(!ret,
566 "[SetupSingleDpmTable] failed to get clk levels!",
567 return ret);
568
569 dpm_table->count = num_of_levels;
570
571 for (i = 0; i < num_of_levels; i++) {
572 ret = vega20_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk);
573 PP_ASSERT_WITH_CODE(!ret,
574 "[SetupSingleDpmTable] failed to get clk of specific level!",
575 return ret);
576 dpm_table->dpm_levels[i].value = clk;
577 dpm_table->dpm_levels[i].enabled = true;
578 }
579
580 return ret;
581 }
582
vega20_setup_gfxclk_dpm_table(struct pp_hwmgr * hwmgr)583 static int vega20_setup_gfxclk_dpm_table(struct pp_hwmgr *hwmgr)
584 {
585 struct vega20_hwmgr *data =
586 (struct vega20_hwmgr *)(hwmgr->backend);
587 struct vega20_single_dpm_table *dpm_table;
588 int ret = 0;
589
590 dpm_table = &(data->dpm_table.gfx_table);
591 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
592 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
593 PP_ASSERT_WITH_CODE(!ret,
594 "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
595 return ret);
596 } else {
597 dpm_table->count = 1;
598 dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
599 }
600
601 return ret;
602 }
603
vega20_setup_memclk_dpm_table(struct pp_hwmgr * hwmgr)604 static int vega20_setup_memclk_dpm_table(struct pp_hwmgr *hwmgr)
605 {
606 struct vega20_hwmgr *data =
607 (struct vega20_hwmgr *)(hwmgr->backend);
608 struct vega20_single_dpm_table *dpm_table;
609 int ret = 0;
610
611 dpm_table = &(data->dpm_table.mem_table);
612 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
613 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
614 PP_ASSERT_WITH_CODE(!ret,
615 "[SetupDefaultDpmTable] failed to get memclk dpm levels!",
616 return ret);
617 } else {
618 dpm_table->count = 1;
619 dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
620 }
621
622 return ret;
623 }
624
625 /*
626 * This function is to initialize all DPM state tables
627 * for SMU based on the dependency table.
628 * Dynamic state patching function will then trim these
629 * state tables to the allowed range based
630 * on the power policy or external client requests,
631 * such as UVD request, etc.
632 */
vega20_setup_default_dpm_tables(struct pp_hwmgr * hwmgr)633 static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
634 {
635 struct vega20_hwmgr *data =
636 (struct vega20_hwmgr *)(hwmgr->backend);
637 struct vega20_single_dpm_table *dpm_table;
638 int ret = 0;
639
640 memset(&data->dpm_table, 0, sizeof(data->dpm_table));
641
642 /* socclk */
643 dpm_table = &(data->dpm_table.soc_table);
644 if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
645 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK);
646 PP_ASSERT_WITH_CODE(!ret,
647 "[SetupDefaultDpmTable] failed to get socclk dpm levels!",
648 return ret);
649 } else {
650 dpm_table->count = 1;
651 dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100;
652 }
653 vega20_init_dpm_state(&(dpm_table->dpm_state));
654
655 /* gfxclk */
656 dpm_table = &(data->dpm_table.gfx_table);
657 ret = vega20_setup_gfxclk_dpm_table(hwmgr);
658 if (ret)
659 return ret;
660 vega20_init_dpm_state(&(dpm_table->dpm_state));
661
662 /* memclk */
663 dpm_table = &(data->dpm_table.mem_table);
664 ret = vega20_setup_memclk_dpm_table(hwmgr);
665 if (ret)
666 return ret;
667 vega20_init_dpm_state(&(dpm_table->dpm_state));
668
669 /* eclk */
670 dpm_table = &(data->dpm_table.eclk_table);
671 if (data->smu_features[GNLD_DPM_VCE].enabled) {
672 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK);
673 PP_ASSERT_WITH_CODE(!ret,
674 "[SetupDefaultDpmTable] failed to get eclk dpm levels!",
675 return ret);
676 } else {
677 dpm_table->count = 1;
678 dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100;
679 }
680 vega20_init_dpm_state(&(dpm_table->dpm_state));
681
682 /* vclk */
683 dpm_table = &(data->dpm_table.vclk_table);
684 if (data->smu_features[GNLD_DPM_UVD].enabled) {
685 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK);
686 PP_ASSERT_WITH_CODE(!ret,
687 "[SetupDefaultDpmTable] failed to get vclk dpm levels!",
688 return ret);
689 } else {
690 dpm_table->count = 1;
691 dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100;
692 }
693 vega20_init_dpm_state(&(dpm_table->dpm_state));
694
695 /* dclk */
696 dpm_table = &(data->dpm_table.dclk_table);
697 if (data->smu_features[GNLD_DPM_UVD].enabled) {
698 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK);
699 PP_ASSERT_WITH_CODE(!ret,
700 "[SetupDefaultDpmTable] failed to get dclk dpm levels!",
701 return ret);
702 } else {
703 dpm_table->count = 1;
704 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100;
705 }
706 vega20_init_dpm_state(&(dpm_table->dpm_state));
707
708 /* dcefclk */
709 dpm_table = &(data->dpm_table.dcef_table);
710 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
711 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK);
712 PP_ASSERT_WITH_CODE(!ret,
713 "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
714 return ret);
715 } else {
716 dpm_table->count = 1;
717 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100;
718 }
719 vega20_init_dpm_state(&(dpm_table->dpm_state));
720
721 /* pixclk */
722 dpm_table = &(data->dpm_table.pixel_table);
723 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
724 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK);
725 PP_ASSERT_WITH_CODE(!ret,
726 "[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
727 return ret);
728 } else
729 dpm_table->count = 0;
730 vega20_init_dpm_state(&(dpm_table->dpm_state));
731
732 /* dispclk */
733 dpm_table = &(data->dpm_table.display_table);
734 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
735 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK);
736 PP_ASSERT_WITH_CODE(!ret,
737 "[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
738 return ret);
739 } else
740 dpm_table->count = 0;
741 vega20_init_dpm_state(&(dpm_table->dpm_state));
742
743 /* phyclk */
744 dpm_table = &(data->dpm_table.phy_table);
745 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
746 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK);
747 PP_ASSERT_WITH_CODE(!ret,
748 "[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
749 return ret);
750 } else
751 dpm_table->count = 0;
752 vega20_init_dpm_state(&(dpm_table->dpm_state));
753
754 /* fclk */
755 dpm_table = &(data->dpm_table.fclk_table);
756 if (data->smu_features[GNLD_DPM_FCLK].enabled) {
757 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_FCLK);
758 PP_ASSERT_WITH_CODE(!ret,
759 "[SetupDefaultDpmTable] failed to get fclk dpm levels!",
760 return ret);
761 } else {
762 dpm_table->count = 1;
763 dpm_table->dpm_levels[0].value = data->vbios_boot_state.fclock / 100;
764 }
765 vega20_init_dpm_state(&(dpm_table->dpm_state));
766
767 /* save a copy of the default DPM table */
768 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
769 sizeof(struct vega20_dpm_table));
770
771 return 0;
772 }
773
774 /**
775 * Initializes the SMC table and uploads it
776 *
777 * @param hwmgr the address of the powerplay hardware manager.
778 * @param pInput the pointer to input data (PowerState)
779 * @return always 0
780 */
vega20_init_smc_table(struct pp_hwmgr * hwmgr)781 static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
782 {
783 int result;
784 struct vega20_hwmgr *data =
785 (struct vega20_hwmgr *)(hwmgr->backend);
786 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
787 struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
788 struct phm_ppt_v3_information *pptable_information =
789 (struct phm_ppt_v3_information *)hwmgr->pptable;
790
791 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
792 PP_ASSERT_WITH_CODE(!result,
793 "[InitSMCTable] Failed to get vbios bootup values!",
794 return result);
795
796 data->vbios_boot_state.vddc = boot_up_values.usVddc;
797 data->vbios_boot_state.vddci = boot_up_values.usVddci;
798 data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
799 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
800 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
801 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
802 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
803 data->vbios_boot_state.eclock = boot_up_values.ulEClk;
804 data->vbios_boot_state.vclock = boot_up_values.ulVClk;
805 data->vbios_boot_state.dclock = boot_up_values.ulDClk;
806 data->vbios_boot_state.fclock = boot_up_values.ulFClk;
807 data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
808
809 smum_send_msg_to_smc_with_parameter(hwmgr,
810 PPSMC_MSG_SetMinDeepSleepDcefclk,
811 (uint32_t)(data->vbios_boot_state.dcef_clock / 100),
812 NULL);
813
814 memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
815
816 result = smum_smc_table_manager(hwmgr,
817 (uint8_t *)pp_table, TABLE_PPTABLE, false);
818 PP_ASSERT_WITH_CODE(!result,
819 "[InitSMCTable] Failed to upload PPtable!",
820 return result);
821
822 return 0;
823 }
824
825 /*
826 * Override PCIe link speed and link width for DPM Level 1. PPTable entries
827 * reflect the ASIC capabilities and not the system capabilities. For e.g.
828 * Vega20 board in a PCI Gen3 system. In this case, when SMU's tries to switch
829 * to DPM1, it fails as system doesn't support Gen4.
830 */
vega20_override_pcie_parameters(struct pp_hwmgr * hwmgr)831 static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
832 {
833 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
834 struct vega20_hwmgr *data =
835 (struct vega20_hwmgr *)(hwmgr->backend);
836 uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg;
837 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
838 int i;
839 int ret;
840
841 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
842 pcie_gen = 3;
843 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
844 pcie_gen = 2;
845 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
846 pcie_gen = 1;
847 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
848 pcie_gen = 0;
849
850 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
851 pcie_width = 6;
852 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
853 pcie_width = 5;
854 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
855 pcie_width = 4;
856 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
857 pcie_width = 3;
858 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
859 pcie_width = 2;
860 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
861 pcie_width = 1;
862
863 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
864 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
865 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
866 */
867 for (i = 0; i < NUM_LINK_LEVELS; i++) {
868 pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen :
869 pp_table->PcieGenSpeed[i];
870 pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width :
871 pp_table->PcieLaneCount[i];
872
873 if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg !=
874 pp_table->PcieLaneCount[i]) {
875 smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg;
876 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
877 PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
878 NULL);
879 PP_ASSERT_WITH_CODE(!ret,
880 "[OverridePcieParameters] Attempt to override pcie params failed!",
881 return ret);
882 }
883
884 /* update the pptable */
885 pp_table->PcieGenSpeed[i] = pcie_gen_arg;
886 pp_table->PcieLaneCount[i] = pcie_width_arg;
887 }
888
889 /* override to the highest if it's disabled from ppfeaturmask */
890 if (data->registry_data.pcie_dpm_key_disabled) {
891 for (i = 0; i < NUM_LINK_LEVELS; i++) {
892 smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width;
893 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
894 PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
895 NULL);
896 PP_ASSERT_WITH_CODE(!ret,
897 "[OverridePcieParameters] Attempt to override pcie params failed!",
898 return ret);
899
900 pp_table->PcieGenSpeed[i] = pcie_gen;
901 pp_table->PcieLaneCount[i] = pcie_width;
902 }
903 ret = vega20_enable_smc_features(hwmgr,
904 false,
905 data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap);
906 PP_ASSERT_WITH_CODE(!ret,
907 "Attempt to Disable DPM LINK Failed!",
908 return ret);
909 data->smu_features[GNLD_DPM_LINK].enabled = false;
910 data->smu_features[GNLD_DPM_LINK].supported = false;
911 }
912
913 return 0;
914 }
915
vega20_set_allowed_featuresmask(struct pp_hwmgr * hwmgr)916 static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
917 {
918 struct vega20_hwmgr *data =
919 (struct vega20_hwmgr *)(hwmgr->backend);
920 uint32_t allowed_features_low = 0, allowed_features_high = 0;
921 int i;
922 int ret = 0;
923
924 for (i = 0; i < GNLD_FEATURES_MAX; i++)
925 if (data->smu_features[i].allowed)
926 data->smu_features[i].smu_feature_id > 31 ?
927 (allowed_features_high |=
928 ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_HIGH_SHIFT)
929 & 0xFFFFFFFF)) :
930 (allowed_features_low |=
931 ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT)
932 & 0xFFFFFFFF));
933
934 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
935 PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high, NULL);
936 PP_ASSERT_WITH_CODE(!ret,
937 "[SetAllowedFeaturesMask] Attempt to set allowed features mask(high) failed!",
938 return ret);
939
940 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
941 PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low, NULL);
942 PP_ASSERT_WITH_CODE(!ret,
943 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
944 return ret);
945
946 return 0;
947 }
948
vega20_run_btc(struct pp_hwmgr * hwmgr)949 static int vega20_run_btc(struct pp_hwmgr *hwmgr)
950 {
951 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc, NULL);
952 }
953
vega20_run_btc_afll(struct pp_hwmgr * hwmgr)954 static int vega20_run_btc_afll(struct pp_hwmgr *hwmgr)
955 {
956 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc, NULL);
957 }
958
vega20_enable_all_smu_features(struct pp_hwmgr * hwmgr)959 static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
960 {
961 struct vega20_hwmgr *data =
962 (struct vega20_hwmgr *)(hwmgr->backend);
963 uint64_t features_enabled;
964 int i;
965 bool enabled;
966 int ret = 0;
967
968 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
969 PPSMC_MSG_EnableAllSmuFeatures,
970 NULL)) == 0,
971 "[EnableAllSMUFeatures] Failed to enable all smu features!",
972 return ret);
973
974 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
975 PP_ASSERT_WITH_CODE(!ret,
976 "[EnableAllSmuFeatures] Failed to get enabled smc features!",
977 return ret);
978
979 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
980 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
981 true : false;
982 data->smu_features[i].enabled = enabled;
983 data->smu_features[i].supported = enabled;
984
985 #if 0
986 if (data->smu_features[i].allowed && !enabled)
987 pr_info("[EnableAllSMUFeatures] feature %d is expected enabled!", i);
988 else if (!data->smu_features[i].allowed && enabled)
989 pr_info("[EnableAllSMUFeatures] feature %d is expected disabled!", i);
990 #endif
991 }
992
993 return 0;
994 }
995
vega20_notify_smc_display_change(struct pp_hwmgr * hwmgr)996 static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr)
997 {
998 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
999
1000 if (data->smu_features[GNLD_DPM_UCLK].enabled)
1001 return smum_send_msg_to_smc_with_parameter(hwmgr,
1002 PPSMC_MSG_SetUclkFastSwitch,
1003 1,
1004 NULL);
1005
1006 return 0;
1007 }
1008
vega20_send_clock_ratio(struct pp_hwmgr * hwmgr)1009 static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr)
1010 {
1011 struct vega20_hwmgr *data =
1012 (struct vega20_hwmgr *)(hwmgr->backend);
1013
1014 return smum_send_msg_to_smc_with_parameter(hwmgr,
1015 PPSMC_MSG_SetFclkGfxClkRatio,
1016 data->registry_data.fclk_gfxclk_ratio,
1017 NULL);
1018 }
1019
vega20_disable_all_smu_features(struct pp_hwmgr * hwmgr)1020 static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
1021 {
1022 struct vega20_hwmgr *data =
1023 (struct vega20_hwmgr *)(hwmgr->backend);
1024 int i, ret = 0;
1025
1026 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
1027 PPSMC_MSG_DisableAllSmuFeatures,
1028 NULL)) == 0,
1029 "[DisableAllSMUFeatures] Failed to disable all smu features!",
1030 return ret);
1031
1032 for (i = 0; i < GNLD_FEATURES_MAX; i++)
1033 data->smu_features[i].enabled = 0;
1034
1035 return 0;
1036 }
1037
vega20_od8_set_feature_capabilities(struct pp_hwmgr * hwmgr)1038 static int vega20_od8_set_feature_capabilities(
1039 struct pp_hwmgr *hwmgr)
1040 {
1041 struct phm_ppt_v3_information *pptable_information =
1042 (struct phm_ppt_v3_information *)hwmgr->pptable;
1043 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1044 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1045 struct vega20_od8_settings *od_settings = &(data->od8_settings);
1046
1047 od_settings->overdrive8_capabilities = 0;
1048
1049 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
1050 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_LIMITS] &&
1051 pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] > 0 &&
1052 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN] > 0 &&
1053 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] >=
1054 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN]))
1055 od_settings->overdrive8_capabilities |= OD8_GFXCLK_LIMITS;
1056
1057 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_CURVE] &&
1058 (pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1] >=
1059 pp_table->MinVoltageGfx / VOLTAGE_SCALE) &&
1060 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] <=
1061 pp_table->MaxVoltageGfx / VOLTAGE_SCALE) &&
1062 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] >=
1063 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1]))
1064 od_settings->overdrive8_capabilities |= OD8_GFXCLK_CURVE;
1065 }
1066
1067 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
1068 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] =
1069 data->dpm_table.mem_table.dpm_levels[data->dpm_table.mem_table.count - 2].value;
1070 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] &&
1071 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 &&
1072 pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 &&
1073 (pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] >=
1074 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX]))
1075 od_settings->overdrive8_capabilities |= OD8_UCLK_MAX;
1076 }
1077
1078 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_POWER_LIMIT] &&
1079 pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] > 0 &&
1080 pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] <= 100 &&
1081 pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] > 0 &&
1082 pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] <= 100)
1083 od_settings->overdrive8_capabilities |= OD8_POWER_LIMIT;
1084
1085 if (data->smu_features[GNLD_FAN_CONTROL].enabled) {
1086 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ACOUSTIC_LIMIT] &&
1087 pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 &&
1088 pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 &&
1089 (pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] >=
1090 pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT]))
1091 od_settings->overdrive8_capabilities |= OD8_ACOUSTIC_LIMIT_SCLK;
1092
1093 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_SPEED_MIN] &&
1094 (pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED] >=
1095 (pp_table->FanPwmMin * pp_table->FanMaximumRpm / 100)) &&
1096 pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] > 0 &&
1097 (pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] >=
1098 pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED]))
1099 od_settings->overdrive8_capabilities |= OD8_FAN_SPEED_MIN;
1100 }
1101
1102 if (data->smu_features[GNLD_THERMAL].enabled) {
1103 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_FAN] &&
1104 pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] > 0 &&
1105 pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP] > 0 &&
1106 (pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] >=
1107 pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP]))
1108 od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_FAN;
1109
1110 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_SYSTEM] &&
1111 pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] > 0 &&
1112 pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX] > 0 &&
1113 (pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] >=
1114 pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX]))
1115 od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_SYSTEM;
1116 }
1117
1118 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_MEMORY_TIMING_TUNE])
1119 od_settings->overdrive8_capabilities |= OD8_MEMORY_TIMING_TUNE;
1120
1121 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ZERO_RPM_CONTROL] &&
1122 pp_table->FanZeroRpmEnable)
1123 od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL;
1124
1125 if (!od_settings->overdrive8_capabilities)
1126 hwmgr->od_enabled = false;
1127
1128 return 0;
1129 }
1130
vega20_od8_set_feature_id(struct pp_hwmgr * hwmgr)1131 static int vega20_od8_set_feature_id(
1132 struct pp_hwmgr *hwmgr)
1133 {
1134 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1135 struct vega20_od8_settings *od_settings = &(data->od8_settings);
1136
1137 if (od_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) {
1138 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id =
1139 OD8_GFXCLK_LIMITS;
1140 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id =
1141 OD8_GFXCLK_LIMITS;
1142 } else {
1143 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id =
1144 0;
1145 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id =
1146 0;
1147 }
1148
1149 if (od_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) {
1150 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id =
1151 OD8_GFXCLK_CURVE;
1152 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id =
1153 OD8_GFXCLK_CURVE;
1154 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id =
1155 OD8_GFXCLK_CURVE;
1156 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id =
1157 OD8_GFXCLK_CURVE;
1158 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id =
1159 OD8_GFXCLK_CURVE;
1160 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id =
1161 OD8_GFXCLK_CURVE;
1162 } else {
1163 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id =
1164 0;
1165 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id =
1166 0;
1167 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id =
1168 0;
1169 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id =
1170 0;
1171 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id =
1172 0;
1173 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id =
1174 0;
1175 }
1176
1177 if (od_settings->overdrive8_capabilities & OD8_UCLK_MAX)
1178 od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = OD8_UCLK_MAX;
1179 else
1180 od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = 0;
1181
1182 if (od_settings->overdrive8_capabilities & OD8_POWER_LIMIT)
1183 od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = OD8_POWER_LIMIT;
1184 else
1185 od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = 0;
1186
1187 if (od_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK)
1188 od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id =
1189 OD8_ACOUSTIC_LIMIT_SCLK;
1190 else
1191 od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id =
1192 0;
1193
1194 if (od_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN)
1195 od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id =
1196 OD8_FAN_SPEED_MIN;
1197 else
1198 od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id =
1199 0;
1200
1201 if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN)
1202 od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id =
1203 OD8_TEMPERATURE_FAN;
1204 else
1205 od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id =
1206 0;
1207
1208 if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM)
1209 od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id =
1210 OD8_TEMPERATURE_SYSTEM;
1211 else
1212 od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id =
1213 0;
1214
1215 return 0;
1216 }
1217
vega20_od8_get_gfx_clock_base_voltage(struct pp_hwmgr * hwmgr,uint32_t * voltage,uint32_t freq)1218 static int vega20_od8_get_gfx_clock_base_voltage(
1219 struct pp_hwmgr *hwmgr,
1220 uint32_t *voltage,
1221 uint32_t freq)
1222 {
1223 int ret = 0;
1224
1225 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1226 PPSMC_MSG_GetAVFSVoltageByDpm,
1227 ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq),
1228 voltage);
1229 PP_ASSERT_WITH_CODE(!ret,
1230 "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!",
1231 return ret);
1232
1233 *voltage = *voltage / VOLTAGE_SCALE;
1234
1235 return 0;
1236 }
1237
vega20_od8_initialize_default_settings(struct pp_hwmgr * hwmgr)1238 static int vega20_od8_initialize_default_settings(
1239 struct pp_hwmgr *hwmgr)
1240 {
1241 struct phm_ppt_v3_information *pptable_information =
1242 (struct phm_ppt_v3_information *)hwmgr->pptable;
1243 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1244 struct vega20_od8_settings *od8_settings = &(data->od8_settings);
1245 OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table);
1246 int i, ret = 0;
1247
1248 /* Set Feature Capabilities */
1249 vega20_od8_set_feature_capabilities(hwmgr);
1250
1251 /* Map FeatureID to individual settings */
1252 vega20_od8_set_feature_id(hwmgr);
1253
1254 /* Set default values */
1255 ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, true);
1256 PP_ASSERT_WITH_CODE(!ret,
1257 "Failed to export over drive table!",
1258 return ret);
1259
1260 if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) {
1261 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value =
1262 od_table->GfxclkFmin;
1263 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value =
1264 od_table->GfxclkFmax;
1265 } else {
1266 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value =
1267 0;
1268 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value =
1269 0;
1270 }
1271
1272 if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) {
1273 od_table->GfxclkFreq1 = od_table->GfxclkFmin;
1274 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value =
1275 od_table->GfxclkFreq1;
1276
1277 od_table->GfxclkFreq3 = od_table->GfxclkFmax;
1278 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value =
1279 od_table->GfxclkFreq3;
1280
1281 od_table->GfxclkFreq2 = (od_table->GfxclkFreq1 + od_table->GfxclkFreq3) / 2;
1282 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value =
1283 od_table->GfxclkFreq2;
1284
1285 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr,
1286 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value),
1287 od_table->GfxclkFreq1),
1288 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!",
1289 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value = 0);
1290 od_table->GfxclkVolt1 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value
1291 * VOLTAGE_SCALE;
1292
1293 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr,
1294 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value),
1295 od_table->GfxclkFreq2),
1296 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!",
1297 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value = 0);
1298 od_table->GfxclkVolt2 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value
1299 * VOLTAGE_SCALE;
1300
1301 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr,
1302 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value),
1303 od_table->GfxclkFreq3),
1304 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!",
1305 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value = 0);
1306 od_table->GfxclkVolt3 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value
1307 * VOLTAGE_SCALE;
1308 } else {
1309 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value =
1310 0;
1311 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value =
1312 0;
1313 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value =
1314 0;
1315 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value =
1316 0;
1317 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value =
1318 0;
1319 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value =
1320 0;
1321 }
1322
1323 if (od8_settings->overdrive8_capabilities & OD8_UCLK_MAX)
1324 od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value =
1325 od_table->UclkFmax;
1326 else
1327 od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value =
1328 0;
1329
1330 if (od8_settings->overdrive8_capabilities & OD8_POWER_LIMIT)
1331 od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value =
1332 od_table->OverDrivePct;
1333 else
1334 od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value =
1335 0;
1336
1337 if (od8_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK)
1338 od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value =
1339 od_table->FanMaximumRpm;
1340 else
1341 od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value =
1342 0;
1343
1344 if (od8_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN)
1345 od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value =
1346 od_table->FanMinimumPwm * data->smc_state_table.pp_table.FanMaximumRpm / 100;
1347 else
1348 od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value =
1349 0;
1350
1351 if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN)
1352 od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value =
1353 od_table->FanTargetTemperature;
1354 else
1355 od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value =
1356 0;
1357
1358 if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM)
1359 od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value =
1360 od_table->MaxOpTemp;
1361 else
1362 od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value =
1363 0;
1364
1365 for (i = 0; i < OD8_SETTING_COUNT; i++) {
1366 if (od8_settings->od8_settings_array[i].feature_id) {
1367 od8_settings->od8_settings_array[i].min_value =
1368 pptable_information->od_settings_min[i];
1369 od8_settings->od8_settings_array[i].max_value =
1370 pptable_information->od_settings_max[i];
1371 od8_settings->od8_settings_array[i].current_value =
1372 od8_settings->od8_settings_array[i].default_value;
1373 } else {
1374 od8_settings->od8_settings_array[i].min_value =
1375 0;
1376 od8_settings->od8_settings_array[i].max_value =
1377 0;
1378 od8_settings->od8_settings_array[i].current_value =
1379 0;
1380 }
1381 }
1382
1383 ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, false);
1384 PP_ASSERT_WITH_CODE(!ret,
1385 "Failed to import over drive table!",
1386 return ret);
1387
1388 return 0;
1389 }
1390
vega20_od8_set_settings(struct pp_hwmgr * hwmgr,uint32_t index,uint32_t value)1391 static int vega20_od8_set_settings(
1392 struct pp_hwmgr *hwmgr,
1393 uint32_t index,
1394 uint32_t value)
1395 {
1396 OverDriveTable_t od_table;
1397 int ret = 0;
1398 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1399 struct vega20_od8_single_setting *od8_settings =
1400 data->od8_settings.od8_settings_array;
1401
1402 ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, true);
1403 PP_ASSERT_WITH_CODE(!ret,
1404 "Failed to export over drive table!",
1405 return ret);
1406
1407 switch(index) {
1408 case OD8_SETTING_GFXCLK_FMIN:
1409 od_table.GfxclkFmin = (uint16_t)value;
1410 break;
1411 case OD8_SETTING_GFXCLK_FMAX:
1412 if (value < od8_settings[OD8_SETTING_GFXCLK_FMAX].min_value ||
1413 value > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value)
1414 return -EINVAL;
1415
1416 od_table.GfxclkFmax = (uint16_t)value;
1417 break;
1418 case OD8_SETTING_GFXCLK_FREQ1:
1419 od_table.GfxclkFreq1 = (uint16_t)value;
1420 break;
1421 case OD8_SETTING_GFXCLK_VOLTAGE1:
1422 od_table.GfxclkVolt1 = (uint16_t)value;
1423 break;
1424 case OD8_SETTING_GFXCLK_FREQ2:
1425 od_table.GfxclkFreq2 = (uint16_t)value;
1426 break;
1427 case OD8_SETTING_GFXCLK_VOLTAGE2:
1428 od_table.GfxclkVolt2 = (uint16_t)value;
1429 break;
1430 case OD8_SETTING_GFXCLK_FREQ3:
1431 od_table.GfxclkFreq3 = (uint16_t)value;
1432 break;
1433 case OD8_SETTING_GFXCLK_VOLTAGE3:
1434 od_table.GfxclkVolt3 = (uint16_t)value;
1435 break;
1436 case OD8_SETTING_UCLK_FMAX:
1437 if (value < od8_settings[OD8_SETTING_UCLK_FMAX].min_value ||
1438 value > od8_settings[OD8_SETTING_UCLK_FMAX].max_value)
1439 return -EINVAL;
1440 od_table.UclkFmax = (uint16_t)value;
1441 break;
1442 case OD8_SETTING_POWER_PERCENTAGE:
1443 od_table.OverDrivePct = (int16_t)value;
1444 break;
1445 case OD8_SETTING_FAN_ACOUSTIC_LIMIT:
1446 od_table.FanMaximumRpm = (uint16_t)value;
1447 break;
1448 case OD8_SETTING_FAN_MIN_SPEED:
1449 od_table.FanMinimumPwm = (uint16_t)value;
1450 break;
1451 case OD8_SETTING_FAN_TARGET_TEMP:
1452 od_table.FanTargetTemperature = (uint16_t)value;
1453 break;
1454 case OD8_SETTING_OPERATING_TEMP_MAX:
1455 od_table.MaxOpTemp = (uint16_t)value;
1456 break;
1457 }
1458
1459 ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, false);
1460 PP_ASSERT_WITH_CODE(!ret,
1461 "Failed to import over drive table!",
1462 return ret);
1463
1464 return 0;
1465 }
1466
vega20_get_sclk_od(struct pp_hwmgr * hwmgr)1467 static int vega20_get_sclk_od(
1468 struct pp_hwmgr *hwmgr)
1469 {
1470 struct vega20_hwmgr *data = hwmgr->backend;
1471 struct vega20_single_dpm_table *sclk_table =
1472 &(data->dpm_table.gfx_table);
1473 struct vega20_single_dpm_table *golden_sclk_table =
1474 &(data->golden_dpm_table.gfx_table);
1475 int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
1476 int golden_value = golden_sclk_table->dpm_levels
1477 [golden_sclk_table->count - 1].value;
1478
1479 /* od percentage */
1480 value -= golden_value;
1481 value = DIV_ROUND_UP(value * 100, golden_value);
1482
1483 return value;
1484 }
1485
vega20_set_sclk_od(struct pp_hwmgr * hwmgr,uint32_t value)1486 static int vega20_set_sclk_od(
1487 struct pp_hwmgr *hwmgr, uint32_t value)
1488 {
1489 struct vega20_hwmgr *data = hwmgr->backend;
1490 struct vega20_single_dpm_table *golden_sclk_table =
1491 &(data->golden_dpm_table.gfx_table);
1492 uint32_t od_sclk;
1493 int ret = 0;
1494
1495 od_sclk = golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * value;
1496 od_sclk /= 100;
1497 od_sclk += golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
1498
1499 ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_GFXCLK_FMAX, od_sclk);
1500 PP_ASSERT_WITH_CODE(!ret,
1501 "[SetSclkOD] failed to set od gfxclk!",
1502 return ret);
1503
1504 /* retrieve updated gfxclk table */
1505 ret = vega20_setup_gfxclk_dpm_table(hwmgr);
1506 PP_ASSERT_WITH_CODE(!ret,
1507 "[SetSclkOD] failed to refresh gfxclk table!",
1508 return ret);
1509
1510 return 0;
1511 }
1512
vega20_get_mclk_od(struct pp_hwmgr * hwmgr)1513 static int vega20_get_mclk_od(
1514 struct pp_hwmgr *hwmgr)
1515 {
1516 struct vega20_hwmgr *data = hwmgr->backend;
1517 struct vega20_single_dpm_table *mclk_table =
1518 &(data->dpm_table.mem_table);
1519 struct vega20_single_dpm_table *golden_mclk_table =
1520 &(data->golden_dpm_table.mem_table);
1521 int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
1522 int golden_value = golden_mclk_table->dpm_levels
1523 [golden_mclk_table->count - 1].value;
1524
1525 /* od percentage */
1526 value -= golden_value;
1527 value = DIV_ROUND_UP(value * 100, golden_value);
1528
1529 return value;
1530 }
1531
vega20_set_mclk_od(struct pp_hwmgr * hwmgr,uint32_t value)1532 static int vega20_set_mclk_od(
1533 struct pp_hwmgr *hwmgr, uint32_t value)
1534 {
1535 struct vega20_hwmgr *data = hwmgr->backend;
1536 struct vega20_single_dpm_table *golden_mclk_table =
1537 &(data->golden_dpm_table.mem_table);
1538 uint32_t od_mclk;
1539 int ret = 0;
1540
1541 od_mclk = golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * value;
1542 od_mclk /= 100;
1543 od_mclk += golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
1544
1545 ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_UCLK_FMAX, od_mclk);
1546 PP_ASSERT_WITH_CODE(!ret,
1547 "[SetMclkOD] failed to set od memclk!",
1548 return ret);
1549
1550 /* retrieve updated memclk table */
1551 ret = vega20_setup_memclk_dpm_table(hwmgr);
1552 PP_ASSERT_WITH_CODE(!ret,
1553 "[SetMclkOD] failed to refresh memclk table!",
1554 return ret);
1555
1556 return 0;
1557 }
1558
vega20_populate_umdpstate_clocks(struct pp_hwmgr * hwmgr)1559 static int vega20_populate_umdpstate_clocks(
1560 struct pp_hwmgr *hwmgr)
1561 {
1562 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1563 struct vega20_single_dpm_table *gfx_table = &(data->dpm_table.gfx_table);
1564 struct vega20_single_dpm_table *mem_table = &(data->dpm_table.mem_table);
1565
1566 hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value;
1567 hwmgr->pstate_mclk = mem_table->dpm_levels[0].value;
1568
1569 if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
1570 mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) {
1571 hwmgr->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
1572 hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
1573 }
1574
1575 hwmgr->pstate_sclk = hwmgr->pstate_sclk * 100;
1576 hwmgr->pstate_mclk = hwmgr->pstate_mclk * 100;
1577
1578 return 0;
1579 }
1580
vega20_get_max_sustainable_clock(struct pp_hwmgr * hwmgr,PP_Clock * clock,PPCLK_e clock_select)1581 static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
1582 PP_Clock *clock, PPCLK_e clock_select)
1583 {
1584 int ret = 0;
1585
1586 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1587 PPSMC_MSG_GetDcModeMaxDpmFreq,
1588 (clock_select << 16),
1589 clock)) == 0,
1590 "[GetMaxSustainableClock] Failed to get max DC clock from SMC!",
1591 return ret);
1592
1593 /* if DC limit is zero, return AC limit */
1594 if (*clock == 0) {
1595 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1596 PPSMC_MSG_GetMaxDpmFreq,
1597 (clock_select << 16),
1598 clock)) == 0,
1599 "[GetMaxSustainableClock] failed to get max AC clock from SMC!",
1600 return ret);
1601 }
1602
1603 return 0;
1604 }
1605
vega20_init_max_sustainable_clocks(struct pp_hwmgr * hwmgr)1606 static int vega20_init_max_sustainable_clocks(struct pp_hwmgr *hwmgr)
1607 {
1608 struct vega20_hwmgr *data =
1609 (struct vega20_hwmgr *)(hwmgr->backend);
1610 struct vega20_max_sustainable_clocks *max_sustainable_clocks =
1611 &(data->max_sustainable_clocks);
1612 int ret = 0;
1613
1614 max_sustainable_clocks->uclock = data->vbios_boot_state.mem_clock / 100;
1615 max_sustainable_clocks->soc_clock = data->vbios_boot_state.soc_clock / 100;
1616 max_sustainable_clocks->dcef_clock = data->vbios_boot_state.dcef_clock / 100;
1617 max_sustainable_clocks->display_clock = 0xFFFFFFFF;
1618 max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
1619 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
1620
1621 if (data->smu_features[GNLD_DPM_UCLK].enabled)
1622 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1623 &(max_sustainable_clocks->uclock),
1624 PPCLK_UCLK)) == 0,
1625 "[InitMaxSustainableClocks] failed to get max UCLK from SMC!",
1626 return ret);
1627
1628 if (data->smu_features[GNLD_DPM_SOCCLK].enabled)
1629 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1630 &(max_sustainable_clocks->soc_clock),
1631 PPCLK_SOCCLK)) == 0,
1632 "[InitMaxSustainableClocks] failed to get max SOCCLK from SMC!",
1633 return ret);
1634
1635 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
1636 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1637 &(max_sustainable_clocks->dcef_clock),
1638 PPCLK_DCEFCLK)) == 0,
1639 "[InitMaxSustainableClocks] failed to get max DCEFCLK from SMC!",
1640 return ret);
1641 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1642 &(max_sustainable_clocks->display_clock),
1643 PPCLK_DISPCLK)) == 0,
1644 "[InitMaxSustainableClocks] failed to get max DISPCLK from SMC!",
1645 return ret);
1646 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1647 &(max_sustainable_clocks->phy_clock),
1648 PPCLK_PHYCLK)) == 0,
1649 "[InitMaxSustainableClocks] failed to get max PHYCLK from SMC!",
1650 return ret);
1651 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1652 &(max_sustainable_clocks->pixel_clock),
1653 PPCLK_PIXCLK)) == 0,
1654 "[InitMaxSustainableClocks] failed to get max PIXCLK from SMC!",
1655 return ret);
1656 }
1657
1658 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
1659 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
1660
1661 return 0;
1662 }
1663
vega20_enable_mgpu_fan_boost(struct pp_hwmgr * hwmgr)1664 static int vega20_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr)
1665 {
1666 int result;
1667
1668 result = smum_send_msg_to_smc(hwmgr,
1669 PPSMC_MSG_SetMGpuFanBoostLimitRpm,
1670 NULL);
1671 PP_ASSERT_WITH_CODE(!result,
1672 "[EnableMgpuFan] Failed to enable mgpu fan boost!",
1673 return result);
1674
1675 return 0;
1676 }
1677
vega20_init_powergate_state(struct pp_hwmgr * hwmgr)1678 static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr)
1679 {
1680 struct vega20_hwmgr *data =
1681 (struct vega20_hwmgr *)(hwmgr->backend);
1682
1683 data->uvd_power_gated = true;
1684 data->vce_power_gated = true;
1685 }
1686
vega20_enable_dpm_tasks(struct pp_hwmgr * hwmgr)1687 static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1688 {
1689 int result = 0;
1690
1691 smum_send_msg_to_smc_with_parameter(hwmgr,
1692 PPSMC_MSG_NumOfDisplays, 0, NULL);
1693
1694 result = vega20_set_allowed_featuresmask(hwmgr);
1695 PP_ASSERT_WITH_CODE(!result,
1696 "[EnableDPMTasks] Failed to set allowed featuresmask!\n",
1697 return result);
1698
1699 result = vega20_init_smc_table(hwmgr);
1700 PP_ASSERT_WITH_CODE(!result,
1701 "[EnableDPMTasks] Failed to initialize SMC table!",
1702 return result);
1703
1704 result = vega20_run_btc(hwmgr);
1705 PP_ASSERT_WITH_CODE(!result,
1706 "[EnableDPMTasks] Failed to run btc!",
1707 return result);
1708
1709 result = vega20_run_btc_afll(hwmgr);
1710 PP_ASSERT_WITH_CODE(!result,
1711 "[EnableDPMTasks] Failed to run btc afll!",
1712 return result);
1713
1714 result = vega20_enable_all_smu_features(hwmgr);
1715 PP_ASSERT_WITH_CODE(!result,
1716 "[EnableDPMTasks] Failed to enable all smu features!",
1717 return result);
1718
1719 result = vega20_override_pcie_parameters(hwmgr);
1720 PP_ASSERT_WITH_CODE(!result,
1721 "[EnableDPMTasks] Failed to override pcie parameters!",
1722 return result);
1723
1724 result = vega20_notify_smc_display_change(hwmgr);
1725 PP_ASSERT_WITH_CODE(!result,
1726 "[EnableDPMTasks] Failed to notify smc display change!",
1727 return result);
1728
1729 result = vega20_send_clock_ratio(hwmgr);
1730 PP_ASSERT_WITH_CODE(!result,
1731 "[EnableDPMTasks] Failed to send clock ratio!",
1732 return result);
1733
1734 /* Initialize UVD/VCE powergating state */
1735 vega20_init_powergate_state(hwmgr);
1736
1737 result = vega20_setup_default_dpm_tables(hwmgr);
1738 PP_ASSERT_WITH_CODE(!result,
1739 "[EnableDPMTasks] Failed to setup default DPM tables!",
1740 return result);
1741
1742 result = vega20_init_max_sustainable_clocks(hwmgr);
1743 PP_ASSERT_WITH_CODE(!result,
1744 "[EnableDPMTasks] Failed to get maximum sustainable clocks!",
1745 return result);
1746
1747 result = vega20_power_control_set_level(hwmgr);
1748 PP_ASSERT_WITH_CODE(!result,
1749 "[EnableDPMTasks] Failed to power control set level!",
1750 return result);
1751
1752 result = vega20_od8_initialize_default_settings(hwmgr);
1753 PP_ASSERT_WITH_CODE(!result,
1754 "[EnableDPMTasks] Failed to initialize odn settings!",
1755 return result);
1756
1757 result = vega20_populate_umdpstate_clocks(hwmgr);
1758 PP_ASSERT_WITH_CODE(!result,
1759 "[EnableDPMTasks] Failed to populate umdpstate clocks!",
1760 return result);
1761
1762 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit,
1763 POWER_SOURCE_AC << 16, &hwmgr->default_power_limit);
1764 PP_ASSERT_WITH_CODE(!result,
1765 "[GetPptLimit] get default PPT limit failed!",
1766 return result);
1767 hwmgr->power_limit =
1768 hwmgr->default_power_limit;
1769
1770 return 0;
1771 }
1772
vega20_find_lowest_dpm_level(struct vega20_single_dpm_table * table)1773 static uint32_t vega20_find_lowest_dpm_level(
1774 struct vega20_single_dpm_table *table)
1775 {
1776 uint32_t i;
1777
1778 for (i = 0; i < table->count; i++) {
1779 if (table->dpm_levels[i].enabled)
1780 break;
1781 }
1782 if (i >= table->count) {
1783 i = 0;
1784 table->dpm_levels[i].enabled = true;
1785 }
1786
1787 return i;
1788 }
1789
vega20_find_highest_dpm_level(struct vega20_single_dpm_table * table)1790 static uint32_t vega20_find_highest_dpm_level(
1791 struct vega20_single_dpm_table *table)
1792 {
1793 int i = 0;
1794
1795 PP_ASSERT_WITH_CODE(table != NULL,
1796 "[FindHighestDPMLevel] DPM Table does not exist!",
1797 return 0);
1798 PP_ASSERT_WITH_CODE(table->count > 0,
1799 "[FindHighestDPMLevel] DPM Table has no entry!",
1800 return 0);
1801 PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
1802 "[FindHighestDPMLevel] DPM Table has too many entries!",
1803 return MAX_REGULAR_DPM_NUMBER - 1);
1804
1805 for (i = table->count - 1; i >= 0; i--) {
1806 if (table->dpm_levels[i].enabled)
1807 break;
1808 }
1809 if (i < 0) {
1810 i = 0;
1811 table->dpm_levels[i].enabled = true;
1812 }
1813
1814 return i;
1815 }
1816
vega20_upload_dpm_min_level(struct pp_hwmgr * hwmgr,uint32_t feature_mask)1817 static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
1818 {
1819 struct vega20_hwmgr *data =
1820 (struct vega20_hwmgr *)(hwmgr->backend);
1821 uint32_t min_freq;
1822 int ret = 0;
1823
1824 if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
1825 (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
1826 min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
1827 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1828 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1829 (PPCLK_GFXCLK << 16) | (min_freq & 0xffff),
1830 NULL)),
1831 "Failed to set soft min gfxclk !",
1832 return ret);
1833 }
1834
1835 if (data->smu_features[GNLD_DPM_UCLK].enabled &&
1836 (feature_mask & FEATURE_DPM_UCLK_MASK)) {
1837 min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
1838 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1839 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1840 (PPCLK_UCLK << 16) | (min_freq & 0xffff),
1841 NULL)),
1842 "Failed to set soft min memclk !",
1843 return ret);
1844 }
1845
1846 if (data->smu_features[GNLD_DPM_UVD].enabled &&
1847 (feature_mask & FEATURE_DPM_UVD_MASK)) {
1848 min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
1849
1850 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1851 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1852 (PPCLK_VCLK << 16) | (min_freq & 0xffff),
1853 NULL)),
1854 "Failed to set soft min vclk!",
1855 return ret);
1856
1857 min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level;
1858
1859 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1860 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1861 (PPCLK_DCLK << 16) | (min_freq & 0xffff),
1862 NULL)),
1863 "Failed to set soft min dclk!",
1864 return ret);
1865 }
1866
1867 if (data->smu_features[GNLD_DPM_VCE].enabled &&
1868 (feature_mask & FEATURE_DPM_VCE_MASK)) {
1869 min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
1870
1871 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1872 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1873 (PPCLK_ECLK << 16) | (min_freq & 0xffff),
1874 NULL)),
1875 "Failed to set soft min eclk!",
1876 return ret);
1877 }
1878
1879 if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
1880 (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
1881 min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
1882
1883 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1884 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1885 (PPCLK_SOCCLK << 16) | (min_freq & 0xffff),
1886 NULL)),
1887 "Failed to set soft min socclk!",
1888 return ret);
1889 }
1890
1891 if (data->smu_features[GNLD_DPM_FCLK].enabled &&
1892 (feature_mask & FEATURE_DPM_FCLK_MASK)) {
1893 min_freq = data->dpm_table.fclk_table.dpm_state.soft_min_level;
1894
1895 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1896 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1897 (PPCLK_FCLK << 16) | (min_freq & 0xffff),
1898 NULL)),
1899 "Failed to set soft min fclk!",
1900 return ret);
1901 }
1902
1903 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled &&
1904 (feature_mask & FEATURE_DPM_DCEFCLK_MASK)) {
1905 min_freq = data->dpm_table.dcef_table.dpm_state.hard_min_level;
1906
1907 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1908 hwmgr, PPSMC_MSG_SetHardMinByFreq,
1909 (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff),
1910 NULL)),
1911 "Failed to set hard min dcefclk!",
1912 return ret);
1913 }
1914
1915 return ret;
1916 }
1917
vega20_upload_dpm_max_level(struct pp_hwmgr * hwmgr,uint32_t feature_mask)1918 static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
1919 {
1920 struct vega20_hwmgr *data =
1921 (struct vega20_hwmgr *)(hwmgr->backend);
1922 uint32_t max_freq;
1923 int ret = 0;
1924
1925 if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
1926 (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
1927 max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
1928
1929 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1930 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1931 (PPCLK_GFXCLK << 16) | (max_freq & 0xffff),
1932 NULL)),
1933 "Failed to set soft max gfxclk!",
1934 return ret);
1935 }
1936
1937 if (data->smu_features[GNLD_DPM_UCLK].enabled &&
1938 (feature_mask & FEATURE_DPM_UCLK_MASK)) {
1939 max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
1940
1941 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1942 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1943 (PPCLK_UCLK << 16) | (max_freq & 0xffff),
1944 NULL)),
1945 "Failed to set soft max memclk!",
1946 return ret);
1947 }
1948
1949 if (data->smu_features[GNLD_DPM_UVD].enabled &&
1950 (feature_mask & FEATURE_DPM_UVD_MASK)) {
1951 max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
1952
1953 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1954 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1955 (PPCLK_VCLK << 16) | (max_freq & 0xffff),
1956 NULL)),
1957 "Failed to set soft max vclk!",
1958 return ret);
1959
1960 max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
1961 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1962 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1963 (PPCLK_DCLK << 16) | (max_freq & 0xffff),
1964 NULL)),
1965 "Failed to set soft max dclk!",
1966 return ret);
1967 }
1968
1969 if (data->smu_features[GNLD_DPM_VCE].enabled &&
1970 (feature_mask & FEATURE_DPM_VCE_MASK)) {
1971 max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
1972
1973 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1974 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1975 (PPCLK_ECLK << 16) | (max_freq & 0xffff),
1976 NULL)),
1977 "Failed to set soft max eclk!",
1978 return ret);
1979 }
1980
1981 if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
1982 (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
1983 max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
1984
1985 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1986 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1987 (PPCLK_SOCCLK << 16) | (max_freq & 0xffff),
1988 NULL)),
1989 "Failed to set soft max socclk!",
1990 return ret);
1991 }
1992
1993 if (data->smu_features[GNLD_DPM_FCLK].enabled &&
1994 (feature_mask & FEATURE_DPM_FCLK_MASK)) {
1995 max_freq = data->dpm_table.fclk_table.dpm_state.soft_max_level;
1996
1997 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1998 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1999 (PPCLK_FCLK << 16) | (max_freq & 0xffff),
2000 NULL)),
2001 "Failed to set soft max fclk!",
2002 return ret);
2003 }
2004
2005 return ret;
2006 }
2007
vega20_enable_disable_vce_dpm(struct pp_hwmgr * hwmgr,bool enable)2008 static int vega20_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
2009 {
2010 struct vega20_hwmgr *data =
2011 (struct vega20_hwmgr *)(hwmgr->backend);
2012 int ret = 0;
2013
2014 if (data->smu_features[GNLD_DPM_VCE].supported) {
2015 if (data->smu_features[GNLD_DPM_VCE].enabled == enable) {
2016 if (enable)
2017 PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already enabled!\n");
2018 else
2019 PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already disabled!\n");
2020 }
2021
2022 ret = vega20_enable_smc_features(hwmgr,
2023 enable,
2024 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap);
2025 PP_ASSERT_WITH_CODE(!ret,
2026 "Attempt to Enable/Disable DPM VCE Failed!",
2027 return ret);
2028 data->smu_features[GNLD_DPM_VCE].enabled = enable;
2029 }
2030
2031 return 0;
2032 }
2033
vega20_get_clock_ranges(struct pp_hwmgr * hwmgr,uint32_t * clock,PPCLK_e clock_select,bool max)2034 static int vega20_get_clock_ranges(struct pp_hwmgr *hwmgr,
2035 uint32_t *clock,
2036 PPCLK_e clock_select,
2037 bool max)
2038 {
2039 int ret;
2040 *clock = 0;
2041
2042 if (max) {
2043 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2044 PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16),
2045 clock)) == 0,
2046 "[GetClockRanges] Failed to get max clock from SMC!",
2047 return ret);
2048 } else {
2049 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2050 PPSMC_MSG_GetMinDpmFreq,
2051 (clock_select << 16),
2052 clock)) == 0,
2053 "[GetClockRanges] Failed to get min clock from SMC!",
2054 return ret);
2055 }
2056
2057 return 0;
2058 }
2059
vega20_dpm_get_sclk(struct pp_hwmgr * hwmgr,bool low)2060 static uint32_t vega20_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
2061 {
2062 struct vega20_hwmgr *data =
2063 (struct vega20_hwmgr *)(hwmgr->backend);
2064 uint32_t gfx_clk;
2065 int ret = 0;
2066
2067 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled,
2068 "[GetSclks]: gfxclk dpm not enabled!\n",
2069 return -EPERM);
2070
2071 if (low) {
2072 ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, false);
2073 PP_ASSERT_WITH_CODE(!ret,
2074 "[GetSclks]: fail to get min PPCLK_GFXCLK\n",
2075 return ret);
2076 } else {
2077 ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, true);
2078 PP_ASSERT_WITH_CODE(!ret,
2079 "[GetSclks]: fail to get max PPCLK_GFXCLK\n",
2080 return ret);
2081 }
2082
2083 return (gfx_clk * 100);
2084 }
2085
vega20_dpm_get_mclk(struct pp_hwmgr * hwmgr,bool low)2086 static uint32_t vega20_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
2087 {
2088 struct vega20_hwmgr *data =
2089 (struct vega20_hwmgr *)(hwmgr->backend);
2090 uint32_t mem_clk;
2091 int ret = 0;
2092
2093 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled,
2094 "[MemMclks]: memclk dpm not enabled!\n",
2095 return -EPERM);
2096
2097 if (low) {
2098 ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, false);
2099 PP_ASSERT_WITH_CODE(!ret,
2100 "[GetMclks]: fail to get min PPCLK_UCLK\n",
2101 return ret);
2102 } else {
2103 ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, true);
2104 PP_ASSERT_WITH_CODE(!ret,
2105 "[GetMclks]: fail to get max PPCLK_UCLK\n",
2106 return ret);
2107 }
2108
2109 return (mem_clk * 100);
2110 }
2111
vega20_get_metrics_table(struct pp_hwmgr * hwmgr,SmuMetrics_t * metrics_table,bool bypass_cache)2112 static int vega20_get_metrics_table(struct pp_hwmgr *hwmgr,
2113 SmuMetrics_t *metrics_table,
2114 bool bypass_cache)
2115 {
2116 struct vega20_hwmgr *data =
2117 (struct vega20_hwmgr *)(hwmgr->backend);
2118 int ret = 0;
2119
2120 if (bypass_cache ||
2121 !data->metrics_time ||
2122 time_after(jiffies, data->metrics_time + msecs_to_jiffies(1))) {
2123 ret = smum_smc_table_manager(hwmgr,
2124 (uint8_t *)(&data->metrics_table),
2125 TABLE_SMU_METRICS,
2126 true);
2127 if (ret) {
2128 pr_info("Failed to export SMU metrics table!\n");
2129 return ret;
2130 }
2131 data->metrics_time = jiffies;
2132 }
2133
2134 if (metrics_table)
2135 memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t));
2136
2137 return ret;
2138 }
2139
vega20_get_gpu_power(struct pp_hwmgr * hwmgr,uint32_t * query)2140 static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
2141 uint32_t *query)
2142 {
2143 int ret = 0;
2144 SmuMetrics_t metrics_table;
2145
2146 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
2147 if (ret)
2148 return ret;
2149
2150 /* For the 40.46 release, they changed the value name */
2151 if (hwmgr->smu_version == 0x282e00)
2152 *query = metrics_table.AverageSocketPower << 8;
2153 else
2154 *query = metrics_table.CurrSocketPower << 8;
2155
2156 return ret;
2157 }
2158
vega20_get_current_clk_freq(struct pp_hwmgr * hwmgr,PPCLK_e clk_id,uint32_t * clk_freq)2159 static int vega20_get_current_clk_freq(struct pp_hwmgr *hwmgr,
2160 PPCLK_e clk_id, uint32_t *clk_freq)
2161 {
2162 int ret = 0;
2163
2164 *clk_freq = 0;
2165
2166 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2167 PPSMC_MSG_GetDpmClockFreq, (clk_id << 16),
2168 clk_freq)) == 0,
2169 "[GetCurrentClkFreq] Attempt to get Current Frequency Failed!",
2170 return ret);
2171
2172 *clk_freq = *clk_freq * 100;
2173
2174 return 0;
2175 }
2176
vega20_get_current_activity_percent(struct pp_hwmgr * hwmgr,int idx,uint32_t * activity_percent)2177 static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr,
2178 int idx,
2179 uint32_t *activity_percent)
2180 {
2181 int ret = 0;
2182 SmuMetrics_t metrics_table;
2183
2184 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
2185 if (ret)
2186 return ret;
2187
2188 switch (idx) {
2189 case AMDGPU_PP_SENSOR_GPU_LOAD:
2190 *activity_percent = metrics_table.AverageGfxActivity;
2191 break;
2192 case AMDGPU_PP_SENSOR_MEM_LOAD:
2193 *activity_percent = metrics_table.AverageUclkActivity;
2194 break;
2195 default:
2196 pr_err("Invalid index for retrieving clock activity\n");
2197 return -EINVAL;
2198 }
2199
2200 return ret;
2201 }
2202
vega20_read_sensor(struct pp_hwmgr * hwmgr,int idx,void * value,int * size)2203 static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
2204 void *value, int *size)
2205 {
2206 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2207 struct amdgpu_device *adev = hwmgr->adev;
2208 SmuMetrics_t metrics_table;
2209 uint32_t val_vid;
2210 int ret = 0;
2211
2212 switch (idx) {
2213 case AMDGPU_PP_SENSOR_GFX_SCLK:
2214 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
2215 if (ret)
2216 return ret;
2217
2218 *((uint32_t *)value) = metrics_table.AverageGfxclkFrequency * 100;
2219 *size = 4;
2220 break;
2221 case AMDGPU_PP_SENSOR_GFX_MCLK:
2222 ret = vega20_get_current_clk_freq(hwmgr,
2223 PPCLK_UCLK,
2224 (uint32_t *)value);
2225 if (!ret)
2226 *size = 4;
2227 break;
2228 case AMDGPU_PP_SENSOR_GPU_LOAD:
2229 case AMDGPU_PP_SENSOR_MEM_LOAD:
2230 ret = vega20_get_current_activity_percent(hwmgr, idx, (uint32_t *)value);
2231 if (!ret)
2232 *size = 4;
2233 break;
2234 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
2235 *((uint32_t *)value) = vega20_thermal_get_temperature(hwmgr);
2236 *size = 4;
2237 break;
2238 case AMDGPU_PP_SENSOR_EDGE_TEMP:
2239 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
2240 if (ret)
2241 return ret;
2242
2243 *((uint32_t *)value) = metrics_table.TemperatureEdge *
2244 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2245 *size = 4;
2246 break;
2247 case AMDGPU_PP_SENSOR_MEM_TEMP:
2248 ret = vega20_get_metrics_table(hwmgr, &metrics_table, false);
2249 if (ret)
2250 return ret;
2251
2252 *((uint32_t *)value) = metrics_table.TemperatureHBM *
2253 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2254 *size = 4;
2255 break;
2256 case AMDGPU_PP_SENSOR_UVD_POWER:
2257 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
2258 *size = 4;
2259 break;
2260 case AMDGPU_PP_SENSOR_VCE_POWER:
2261 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
2262 *size = 4;
2263 break;
2264 case AMDGPU_PP_SENSOR_GPU_POWER:
2265 *size = 16;
2266 ret = vega20_get_gpu_power(hwmgr, (uint32_t *)value);
2267 break;
2268 case AMDGPU_PP_SENSOR_VDDGFX:
2269 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
2270 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
2271 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
2272 *((uint32_t *)value) =
2273 (uint32_t)convert_to_vddc((uint8_t)val_vid);
2274 break;
2275 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2276 ret = vega20_get_enabled_smc_features(hwmgr, (uint64_t *)value);
2277 if (!ret)
2278 *size = 8;
2279 break;
2280 default:
2281 ret = -EINVAL;
2282 break;
2283 }
2284 return ret;
2285 }
2286
vega20_display_clock_voltage_request(struct pp_hwmgr * hwmgr,struct pp_display_clock_request * clock_req)2287 static int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
2288 struct pp_display_clock_request *clock_req)
2289 {
2290 int result = 0;
2291 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2292 enum amd_pp_clock_type clk_type = clock_req->clock_type;
2293 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
2294 PPCLK_e clk_select = 0;
2295 uint32_t clk_request = 0;
2296
2297 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
2298 switch (clk_type) {
2299 case amd_pp_dcef_clock:
2300 clk_select = PPCLK_DCEFCLK;
2301 break;
2302 case amd_pp_disp_clock:
2303 clk_select = PPCLK_DISPCLK;
2304 break;
2305 case amd_pp_pixel_clock:
2306 clk_select = PPCLK_PIXCLK;
2307 break;
2308 case amd_pp_phy_clock:
2309 clk_select = PPCLK_PHYCLK;
2310 break;
2311 default:
2312 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
2313 result = -EINVAL;
2314 break;
2315 }
2316
2317 if (!result) {
2318 clk_request = (clk_select << 16) | clk_freq;
2319 result = smum_send_msg_to_smc_with_parameter(hwmgr,
2320 PPSMC_MSG_SetHardMinByFreq,
2321 clk_request,
2322 NULL);
2323 }
2324 }
2325
2326 return result;
2327 }
2328
vega20_get_performance_level(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * state,PHM_PerformanceLevelDesignation designation,uint32_t index,PHM_PerformanceLevel * level)2329 static int vega20_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
2330 PHM_PerformanceLevelDesignation designation, uint32_t index,
2331 PHM_PerformanceLevel *level)
2332 {
2333 return 0;
2334 }
2335
vega20_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr * hwmgr)2336 static int vega20_notify_smc_display_config_after_ps_adjustment(
2337 struct pp_hwmgr *hwmgr)
2338 {
2339 struct vega20_hwmgr *data =
2340 (struct vega20_hwmgr *)(hwmgr->backend);
2341 struct vega20_single_dpm_table *dpm_table =
2342 &data->dpm_table.mem_table;
2343 struct PP_Clocks min_clocks = {0};
2344 struct pp_display_clock_request clock_req;
2345 int ret = 0;
2346
2347 min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
2348 min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
2349 min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
2350
2351 if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
2352 clock_req.clock_type = amd_pp_dcef_clock;
2353 clock_req.clock_freq_in_khz = min_clocks.dcefClock * 10;
2354 if (!vega20_display_clock_voltage_request(hwmgr, &clock_req)) {
2355 if (data->smu_features[GNLD_DS_DCEFCLK].supported)
2356 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(
2357 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
2358 min_clocks.dcefClockInSR / 100,
2359 NULL)) == 0,
2360 "Attempt to set divider for DCEFCLK Failed!",
2361 return ret);
2362 } else {
2363 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
2364 }
2365 }
2366
2367 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
2368 dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100;
2369 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2370 PPSMC_MSG_SetHardMinByFreq,
2371 (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
2372 NULL)),
2373 "[SetHardMinFreq] Set hard min uclk failed!",
2374 return ret);
2375 }
2376
2377 return 0;
2378 }
2379
vega20_force_dpm_highest(struct pp_hwmgr * hwmgr)2380 static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
2381 {
2382 struct vega20_hwmgr *data =
2383 (struct vega20_hwmgr *)(hwmgr->backend);
2384 uint32_t soft_level;
2385 int ret = 0;
2386
2387 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
2388
2389 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2390 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2391 data->dpm_table.gfx_table.dpm_levels[soft_level].value;
2392
2393 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
2394
2395 data->dpm_table.mem_table.dpm_state.soft_min_level =
2396 data->dpm_table.mem_table.dpm_state.soft_max_level =
2397 data->dpm_table.mem_table.dpm_levels[soft_level].value;
2398
2399 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
2400
2401 data->dpm_table.soc_table.dpm_state.soft_min_level =
2402 data->dpm_table.soc_table.dpm_state.soft_max_level =
2403 data->dpm_table.soc_table.dpm_levels[soft_level].value;
2404
2405 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2406 FEATURE_DPM_UCLK_MASK |
2407 FEATURE_DPM_SOCCLK_MASK);
2408 PP_ASSERT_WITH_CODE(!ret,
2409 "Failed to upload boot level to highest!",
2410 return ret);
2411
2412 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2413 FEATURE_DPM_UCLK_MASK |
2414 FEATURE_DPM_SOCCLK_MASK);
2415 PP_ASSERT_WITH_CODE(!ret,
2416 "Failed to upload dpm max level to highest!",
2417 return ret);
2418
2419 return 0;
2420 }
2421
vega20_force_dpm_lowest(struct pp_hwmgr * hwmgr)2422 static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2423 {
2424 struct vega20_hwmgr *data =
2425 (struct vega20_hwmgr *)(hwmgr->backend);
2426 uint32_t soft_level;
2427 int ret = 0;
2428
2429 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
2430
2431 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2432 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2433 data->dpm_table.gfx_table.dpm_levels[soft_level].value;
2434
2435 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
2436
2437 data->dpm_table.mem_table.dpm_state.soft_min_level =
2438 data->dpm_table.mem_table.dpm_state.soft_max_level =
2439 data->dpm_table.mem_table.dpm_levels[soft_level].value;
2440
2441 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
2442
2443 data->dpm_table.soc_table.dpm_state.soft_min_level =
2444 data->dpm_table.soc_table.dpm_state.soft_max_level =
2445 data->dpm_table.soc_table.dpm_levels[soft_level].value;
2446
2447 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2448 FEATURE_DPM_UCLK_MASK |
2449 FEATURE_DPM_SOCCLK_MASK);
2450 PP_ASSERT_WITH_CODE(!ret,
2451 "Failed to upload boot level to highest!",
2452 return ret);
2453
2454 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2455 FEATURE_DPM_UCLK_MASK |
2456 FEATURE_DPM_SOCCLK_MASK);
2457 PP_ASSERT_WITH_CODE(!ret,
2458 "Failed to upload dpm max level to highest!",
2459 return ret);
2460
2461 return 0;
2462
2463 }
2464
vega20_unforce_dpm_levels(struct pp_hwmgr * hwmgr)2465 static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2466 {
2467 struct vega20_hwmgr *data =
2468 (struct vega20_hwmgr *)(hwmgr->backend);
2469 uint32_t soft_min_level, soft_max_level;
2470 int ret = 0;
2471
2472 /* gfxclk soft min/max settings */
2473 soft_min_level =
2474 vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
2475 soft_max_level =
2476 vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
2477
2478 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2479 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2480 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2481 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
2482
2483 /* uclk soft min/max settings */
2484 soft_min_level =
2485 vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
2486 soft_max_level =
2487 vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
2488
2489 data->dpm_table.mem_table.dpm_state.soft_min_level =
2490 data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2491 data->dpm_table.mem_table.dpm_state.soft_max_level =
2492 data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
2493
2494 /* socclk soft min/max settings */
2495 soft_min_level =
2496 vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
2497 soft_max_level =
2498 vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
2499
2500 data->dpm_table.soc_table.dpm_state.soft_min_level =
2501 data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
2502 data->dpm_table.soc_table.dpm_state.soft_max_level =
2503 data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
2504
2505 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2506 FEATURE_DPM_UCLK_MASK |
2507 FEATURE_DPM_SOCCLK_MASK);
2508 PP_ASSERT_WITH_CODE(!ret,
2509 "Failed to upload DPM Bootup Levels!",
2510 return ret);
2511
2512 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2513 FEATURE_DPM_UCLK_MASK |
2514 FEATURE_DPM_SOCCLK_MASK);
2515 PP_ASSERT_WITH_CODE(!ret,
2516 "Failed to upload DPM Max Levels!",
2517 return ret);
2518
2519 return 0;
2520 }
2521
vega20_get_profiling_clk_mask(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level level,uint32_t * sclk_mask,uint32_t * mclk_mask,uint32_t * soc_mask)2522 static int vega20_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
2523 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
2524 {
2525 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2526 struct vega20_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
2527 struct vega20_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
2528 struct vega20_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table);
2529
2530 *sclk_mask = 0;
2531 *mclk_mask = 0;
2532 *soc_mask = 0;
2533
2534 if (gfx_dpm_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
2535 mem_dpm_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL &&
2536 soc_dpm_table->count > VEGA20_UMD_PSTATE_SOCCLK_LEVEL) {
2537 *sclk_mask = VEGA20_UMD_PSTATE_GFXCLK_LEVEL;
2538 *mclk_mask = VEGA20_UMD_PSTATE_MCLK_LEVEL;
2539 *soc_mask = VEGA20_UMD_PSTATE_SOCCLK_LEVEL;
2540 }
2541
2542 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2543 *sclk_mask = 0;
2544 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
2545 *mclk_mask = 0;
2546 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2547 *sclk_mask = gfx_dpm_table->count - 1;
2548 *mclk_mask = mem_dpm_table->count - 1;
2549 *soc_mask = soc_dpm_table->count - 1;
2550 }
2551
2552 return 0;
2553 }
2554
vega20_force_clock_level(struct pp_hwmgr * hwmgr,enum pp_clock_type type,uint32_t mask)2555 static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
2556 enum pp_clock_type type, uint32_t mask)
2557 {
2558 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2559 uint32_t soft_min_level, soft_max_level, hard_min_level;
2560 int ret = 0;
2561
2562 switch (type) {
2563 case PP_SCLK:
2564 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2565 soft_max_level = mask ? (fls(mask) - 1) : 0;
2566
2567 if (soft_max_level >= data->dpm_table.gfx_table.count) {
2568 pr_err("Clock level specified %d is over max allowed %d\n",
2569 soft_max_level,
2570 data->dpm_table.gfx_table.count - 1);
2571 return -EINVAL;
2572 }
2573
2574 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2575 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2576 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2577 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
2578
2579 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
2580 PP_ASSERT_WITH_CODE(!ret,
2581 "Failed to upload boot level to lowest!",
2582 return ret);
2583
2584 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
2585 PP_ASSERT_WITH_CODE(!ret,
2586 "Failed to upload dpm max level to highest!",
2587 return ret);
2588 break;
2589
2590 case PP_MCLK:
2591 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2592 soft_max_level = mask ? (fls(mask) - 1) : 0;
2593
2594 if (soft_max_level >= data->dpm_table.mem_table.count) {
2595 pr_err("Clock level specified %d is over max allowed %d\n",
2596 soft_max_level,
2597 data->dpm_table.mem_table.count - 1);
2598 return -EINVAL;
2599 }
2600
2601 data->dpm_table.mem_table.dpm_state.soft_min_level =
2602 data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2603 data->dpm_table.mem_table.dpm_state.soft_max_level =
2604 data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
2605
2606 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_UCLK_MASK);
2607 PP_ASSERT_WITH_CODE(!ret,
2608 "Failed to upload boot level to lowest!",
2609 return ret);
2610
2611 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_UCLK_MASK);
2612 PP_ASSERT_WITH_CODE(!ret,
2613 "Failed to upload dpm max level to highest!",
2614 return ret);
2615
2616 break;
2617
2618 case PP_SOCCLK:
2619 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2620 soft_max_level = mask ? (fls(mask) - 1) : 0;
2621
2622 if (soft_max_level >= data->dpm_table.soc_table.count) {
2623 pr_err("Clock level specified %d is over max allowed %d\n",
2624 soft_max_level,
2625 data->dpm_table.soc_table.count - 1);
2626 return -EINVAL;
2627 }
2628
2629 data->dpm_table.soc_table.dpm_state.soft_min_level =
2630 data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
2631 data->dpm_table.soc_table.dpm_state.soft_max_level =
2632 data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
2633
2634 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_SOCCLK_MASK);
2635 PP_ASSERT_WITH_CODE(!ret,
2636 "Failed to upload boot level to lowest!",
2637 return ret);
2638
2639 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_SOCCLK_MASK);
2640 PP_ASSERT_WITH_CODE(!ret,
2641 "Failed to upload dpm max level to highest!",
2642 return ret);
2643
2644 break;
2645
2646 case PP_FCLK:
2647 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2648 soft_max_level = mask ? (fls(mask) - 1) : 0;
2649
2650 if (soft_max_level >= data->dpm_table.fclk_table.count) {
2651 pr_err("Clock level specified %d is over max allowed %d\n",
2652 soft_max_level,
2653 data->dpm_table.fclk_table.count - 1);
2654 return -EINVAL;
2655 }
2656
2657 data->dpm_table.fclk_table.dpm_state.soft_min_level =
2658 data->dpm_table.fclk_table.dpm_levels[soft_min_level].value;
2659 data->dpm_table.fclk_table.dpm_state.soft_max_level =
2660 data->dpm_table.fclk_table.dpm_levels[soft_max_level].value;
2661
2662 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_FCLK_MASK);
2663 PP_ASSERT_WITH_CODE(!ret,
2664 "Failed to upload boot level to lowest!",
2665 return ret);
2666
2667 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_FCLK_MASK);
2668 PP_ASSERT_WITH_CODE(!ret,
2669 "Failed to upload dpm max level to highest!",
2670 return ret);
2671
2672 break;
2673
2674 case PP_DCEFCLK:
2675 hard_min_level = mask ? (ffs(mask) - 1) : 0;
2676
2677 if (hard_min_level >= data->dpm_table.dcef_table.count) {
2678 pr_err("Clock level specified %d is over max allowed %d\n",
2679 hard_min_level,
2680 data->dpm_table.dcef_table.count - 1);
2681 return -EINVAL;
2682 }
2683
2684 data->dpm_table.dcef_table.dpm_state.hard_min_level =
2685 data->dpm_table.dcef_table.dpm_levels[hard_min_level].value;
2686
2687 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_DCEFCLK_MASK);
2688 PP_ASSERT_WITH_CODE(!ret,
2689 "Failed to upload boot level to lowest!",
2690 return ret);
2691
2692 //TODO: Setting DCEFCLK max dpm level is not supported
2693
2694 break;
2695
2696 case PP_PCIE:
2697 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2698 soft_max_level = mask ? (fls(mask) - 1) : 0;
2699 if (soft_min_level >= NUM_LINK_LEVELS ||
2700 soft_max_level >= NUM_LINK_LEVELS)
2701 return -EINVAL;
2702
2703 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2704 PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level,
2705 NULL);
2706 PP_ASSERT_WITH_CODE(!ret,
2707 "Failed to set min link dpm level!",
2708 return ret);
2709
2710 break;
2711
2712 default:
2713 break;
2714 }
2715
2716 return 0;
2717 }
2718
vega20_dpm_force_dpm_level(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level level)2719 static int vega20_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
2720 enum amd_dpm_forced_level level)
2721 {
2722 int ret = 0;
2723 uint32_t sclk_mask, mclk_mask, soc_mask;
2724
2725 switch (level) {
2726 case AMD_DPM_FORCED_LEVEL_HIGH:
2727 ret = vega20_force_dpm_highest(hwmgr);
2728 break;
2729
2730 case AMD_DPM_FORCED_LEVEL_LOW:
2731 ret = vega20_force_dpm_lowest(hwmgr);
2732 break;
2733
2734 case AMD_DPM_FORCED_LEVEL_AUTO:
2735 ret = vega20_unforce_dpm_levels(hwmgr);
2736 break;
2737
2738 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
2739 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
2740 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
2741 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
2742 ret = vega20_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
2743 if (ret)
2744 return ret;
2745 vega20_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
2746 vega20_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
2747 vega20_force_clock_level(hwmgr, PP_SOCCLK, 1 << soc_mask);
2748 break;
2749
2750 case AMD_DPM_FORCED_LEVEL_MANUAL:
2751 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
2752 default:
2753 break;
2754 }
2755
2756 return ret;
2757 }
2758
vega20_get_fan_control_mode(struct pp_hwmgr * hwmgr)2759 static uint32_t vega20_get_fan_control_mode(struct pp_hwmgr *hwmgr)
2760 {
2761 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2762
2763 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
2764 return AMD_FAN_CTRL_MANUAL;
2765 else
2766 return AMD_FAN_CTRL_AUTO;
2767 }
2768
vega20_set_fan_control_mode(struct pp_hwmgr * hwmgr,uint32_t mode)2769 static void vega20_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
2770 {
2771 switch (mode) {
2772 case AMD_FAN_CTRL_NONE:
2773 vega20_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
2774 break;
2775 case AMD_FAN_CTRL_MANUAL:
2776 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
2777 vega20_fan_ctrl_stop_smc_fan_control(hwmgr);
2778 break;
2779 case AMD_FAN_CTRL_AUTO:
2780 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
2781 vega20_fan_ctrl_start_smc_fan_control(hwmgr);
2782 break;
2783 default:
2784 break;
2785 }
2786 }
2787
vega20_get_dal_power_level(struct pp_hwmgr * hwmgr,struct amd_pp_simple_clock_info * info)2788 static int vega20_get_dal_power_level(struct pp_hwmgr *hwmgr,
2789 struct amd_pp_simple_clock_info *info)
2790 {
2791 #if 0
2792 struct phm_ppt_v2_information *table_info =
2793 (struct phm_ppt_v2_information *)hwmgr->pptable;
2794 struct phm_clock_and_voltage_limits *max_limits =
2795 &table_info->max_clock_voltage_on_ac;
2796
2797 info->engine_max_clock = max_limits->sclk;
2798 info->memory_max_clock = max_limits->mclk;
2799 #endif
2800 return 0;
2801 }
2802
2803
vega20_get_sclks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)2804 static int vega20_get_sclks(struct pp_hwmgr *hwmgr,
2805 struct pp_clock_levels_with_latency *clocks)
2806 {
2807 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2808 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
2809 int i, count;
2810
2811 if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
2812 return -1;
2813
2814 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2815 clocks->num_levels = count;
2816
2817 for (i = 0; i < count; i++) {
2818 clocks->data[i].clocks_in_khz =
2819 dpm_table->dpm_levels[i].value * 1000;
2820 clocks->data[i].latency_in_us = 0;
2821 }
2822
2823 return 0;
2824 }
2825
vega20_get_mem_latency(struct pp_hwmgr * hwmgr,uint32_t clock)2826 static uint32_t vega20_get_mem_latency(struct pp_hwmgr *hwmgr,
2827 uint32_t clock)
2828 {
2829 return 25;
2830 }
2831
vega20_get_memclocks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)2832 static int vega20_get_memclocks(struct pp_hwmgr *hwmgr,
2833 struct pp_clock_levels_with_latency *clocks)
2834 {
2835 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2836 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.mem_table);
2837 int i, count;
2838
2839 if (!data->smu_features[GNLD_DPM_UCLK].enabled)
2840 return -1;
2841
2842 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2843 clocks->num_levels = data->mclk_latency_table.count = count;
2844
2845 for (i = 0; i < count; i++) {
2846 clocks->data[i].clocks_in_khz =
2847 data->mclk_latency_table.entries[i].frequency =
2848 dpm_table->dpm_levels[i].value * 1000;
2849 clocks->data[i].latency_in_us =
2850 data->mclk_latency_table.entries[i].latency =
2851 vega20_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
2852 }
2853
2854 return 0;
2855 }
2856
vega20_get_dcefclocks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)2857 static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr,
2858 struct pp_clock_levels_with_latency *clocks)
2859 {
2860 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2861 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.dcef_table);
2862 int i, count;
2863
2864 if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled)
2865 return -1;
2866
2867 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2868 clocks->num_levels = count;
2869
2870 for (i = 0; i < count; i++) {
2871 clocks->data[i].clocks_in_khz =
2872 dpm_table->dpm_levels[i].value * 1000;
2873 clocks->data[i].latency_in_us = 0;
2874 }
2875
2876 return 0;
2877 }
2878
vega20_get_socclocks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)2879 static int vega20_get_socclocks(struct pp_hwmgr *hwmgr,
2880 struct pp_clock_levels_with_latency *clocks)
2881 {
2882 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2883 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.soc_table);
2884 int i, count;
2885
2886 if (!data->smu_features[GNLD_DPM_SOCCLK].enabled)
2887 return -1;
2888
2889 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2890 clocks->num_levels = count;
2891
2892 for (i = 0; i < count; i++) {
2893 clocks->data[i].clocks_in_khz =
2894 dpm_table->dpm_levels[i].value * 1000;
2895 clocks->data[i].latency_in_us = 0;
2896 }
2897
2898 return 0;
2899
2900 }
2901
vega20_get_clock_by_type_with_latency(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)2902 static int vega20_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
2903 enum amd_pp_clock_type type,
2904 struct pp_clock_levels_with_latency *clocks)
2905 {
2906 int ret;
2907
2908 switch (type) {
2909 case amd_pp_sys_clock:
2910 ret = vega20_get_sclks(hwmgr, clocks);
2911 break;
2912 case amd_pp_mem_clock:
2913 ret = vega20_get_memclocks(hwmgr, clocks);
2914 break;
2915 case amd_pp_dcef_clock:
2916 ret = vega20_get_dcefclocks(hwmgr, clocks);
2917 break;
2918 case amd_pp_soc_clock:
2919 ret = vega20_get_socclocks(hwmgr, clocks);
2920 break;
2921 default:
2922 return -EINVAL;
2923 }
2924
2925 return ret;
2926 }
2927
vega20_get_clock_by_type_with_voltage(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)2928 static int vega20_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
2929 enum amd_pp_clock_type type,
2930 struct pp_clock_levels_with_voltage *clocks)
2931 {
2932 clocks->num_levels = 0;
2933
2934 return 0;
2935 }
2936
vega20_set_watermarks_for_clocks_ranges(struct pp_hwmgr * hwmgr,void * clock_ranges)2937 static int vega20_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
2938 void *clock_ranges)
2939 {
2940 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2941 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
2942 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
2943
2944 if (!data->registry_data.disable_water_mark &&
2945 data->smu_features[GNLD_DPM_DCEFCLK].supported &&
2946 data->smu_features[GNLD_DPM_SOCCLK].supported) {
2947 smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
2948 data->water_marks_bitmap |= WaterMarksExist;
2949 data->water_marks_bitmap &= ~WaterMarksLoaded;
2950 }
2951
2952 return 0;
2953 }
2954
vega20_odn_edit_dpm_table(struct pp_hwmgr * hwmgr,enum PP_OD_DPM_TABLE_COMMAND type,long * input,uint32_t size)2955 static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
2956 enum PP_OD_DPM_TABLE_COMMAND type,
2957 long *input, uint32_t size)
2958 {
2959 struct vega20_hwmgr *data =
2960 (struct vega20_hwmgr *)(hwmgr->backend);
2961 struct vega20_od8_single_setting *od8_settings =
2962 data->od8_settings.od8_settings_array;
2963 OverDriveTable_t *od_table =
2964 &(data->smc_state_table.overdrive_table);
2965 int32_t input_clk, input_vol, i;
2966 uint32_t input_index;
2967 int od8_id;
2968 int ret;
2969
2970 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
2971 return -EINVAL);
2972
2973 switch (type) {
2974 case PP_OD_EDIT_SCLK_VDDC_TABLE:
2975 if (!(od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
2976 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id)) {
2977 pr_info("Sclk min/max frequency overdrive not supported\n");
2978 return -EOPNOTSUPP;
2979 }
2980
2981 for (i = 0; i < size; i += 2) {
2982 if (i + 2 > size) {
2983 pr_info("invalid number of input parameters %d\n",
2984 size);
2985 return -EINVAL;
2986 }
2987
2988 input_index = input[i];
2989 input_clk = input[i + 1];
2990
2991 if (input_index != 0 && input_index != 1) {
2992 pr_info("Invalid index %d\n", input_index);
2993 pr_info("Support min/max sclk frequency setting only which index by 0/1\n");
2994 return -EINVAL;
2995 }
2996
2997 if (input_clk < od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value ||
2998 input_clk > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value) {
2999 pr_info("clock freq %d is not within allowed range [%d - %d]\n",
3000 input_clk,
3001 od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
3002 od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
3003 return -EINVAL;
3004 }
3005
3006 if ((input_index == 0 && od_table->GfxclkFmin != input_clk) ||
3007 (input_index == 1 && od_table->GfxclkFmax != input_clk))
3008 data->gfxclk_overdrive = true;
3009
3010 if (input_index == 0)
3011 od_table->GfxclkFmin = input_clk;
3012 else
3013 od_table->GfxclkFmax = input_clk;
3014 }
3015
3016 break;
3017
3018 case PP_OD_EDIT_MCLK_VDDC_TABLE:
3019 if (!od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
3020 pr_info("Mclk max frequency overdrive not supported\n");
3021 return -EOPNOTSUPP;
3022 }
3023
3024 for (i = 0; i < size; i += 2) {
3025 if (i + 2 > size) {
3026 pr_info("invalid number of input parameters %d\n",
3027 size);
3028 return -EINVAL;
3029 }
3030
3031 input_index = input[i];
3032 input_clk = input[i + 1];
3033
3034 if (input_index != 1) {
3035 pr_info("Invalid index %d\n", input_index);
3036 pr_info("Support max Mclk frequency setting only which index by 1\n");
3037 return -EINVAL;
3038 }
3039
3040 if (input_clk < od8_settings[OD8_SETTING_UCLK_FMAX].min_value ||
3041 input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) {
3042 pr_info("clock freq %d is not within allowed range [%d - %d]\n",
3043 input_clk,
3044 od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
3045 od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
3046 return -EINVAL;
3047 }
3048
3049 if (input_index == 1 && od_table->UclkFmax != input_clk)
3050 data->memclk_overdrive = true;
3051
3052 od_table->UclkFmax = input_clk;
3053 }
3054
3055 break;
3056
3057 case PP_OD_EDIT_VDDC_CURVE:
3058 if (!(od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
3059 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
3060 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
3061 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
3062 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
3063 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id)) {
3064 pr_info("Voltage curve calibrate not supported\n");
3065 return -EOPNOTSUPP;
3066 }
3067
3068 for (i = 0; i < size; i += 3) {
3069 if (i + 3 > size) {
3070 pr_info("invalid number of input parameters %d\n",
3071 size);
3072 return -EINVAL;
3073 }
3074
3075 input_index = input[i];
3076 input_clk = input[i + 1];
3077 input_vol = input[i + 2];
3078
3079 if (input_index > 2) {
3080 pr_info("Setting for point %d is not supported\n",
3081 input_index + 1);
3082 pr_info("Three supported points index by 0, 1, 2\n");
3083 return -EINVAL;
3084 }
3085
3086 od8_id = OD8_SETTING_GFXCLK_FREQ1 + 2 * input_index;
3087 if (input_clk < od8_settings[od8_id].min_value ||
3088 input_clk > od8_settings[od8_id].max_value) {
3089 pr_info("clock freq %d is not within allowed range [%d - %d]\n",
3090 input_clk,
3091 od8_settings[od8_id].min_value,
3092 od8_settings[od8_id].max_value);
3093 return -EINVAL;
3094 }
3095
3096 od8_id = OD8_SETTING_GFXCLK_VOLTAGE1 + 2 * input_index;
3097 if (input_vol < od8_settings[od8_id].min_value ||
3098 input_vol > od8_settings[od8_id].max_value) {
3099 pr_info("clock voltage %d is not within allowed range [%d - %d]\n",
3100 input_vol,
3101 od8_settings[od8_id].min_value,
3102 od8_settings[od8_id].max_value);
3103 return -EINVAL;
3104 }
3105
3106 switch (input_index) {
3107 case 0:
3108 od_table->GfxclkFreq1 = input_clk;
3109 od_table->GfxclkVolt1 = input_vol * VOLTAGE_SCALE;
3110 break;
3111 case 1:
3112 od_table->GfxclkFreq2 = input_clk;
3113 od_table->GfxclkVolt2 = input_vol * VOLTAGE_SCALE;
3114 break;
3115 case 2:
3116 od_table->GfxclkFreq3 = input_clk;
3117 od_table->GfxclkVolt3 = input_vol * VOLTAGE_SCALE;
3118 break;
3119 }
3120 }
3121 break;
3122
3123 case PP_OD_RESTORE_DEFAULT_TABLE:
3124 data->gfxclk_overdrive = false;
3125 data->memclk_overdrive = false;
3126
3127 ret = smum_smc_table_manager(hwmgr,
3128 (uint8_t *)od_table,
3129 TABLE_OVERDRIVE, true);
3130 PP_ASSERT_WITH_CODE(!ret,
3131 "Failed to export overdrive table!",
3132 return ret);
3133 break;
3134
3135 case PP_OD_COMMIT_DPM_TABLE:
3136 ret = smum_smc_table_manager(hwmgr,
3137 (uint8_t *)od_table,
3138 TABLE_OVERDRIVE, false);
3139 PP_ASSERT_WITH_CODE(!ret,
3140 "Failed to import overdrive table!",
3141 return ret);
3142
3143 /* retrieve updated gfxclk table */
3144 if (data->gfxclk_overdrive) {
3145 data->gfxclk_overdrive = false;
3146
3147 ret = vega20_setup_gfxclk_dpm_table(hwmgr);
3148 if (ret)
3149 return ret;
3150 }
3151
3152 /* retrieve updated memclk table */
3153 if (data->memclk_overdrive) {
3154 data->memclk_overdrive = false;
3155
3156 ret = vega20_setup_memclk_dpm_table(hwmgr);
3157 if (ret)
3158 return ret;
3159 }
3160 break;
3161
3162 default:
3163 return -EINVAL;
3164 }
3165
3166 return 0;
3167 }
3168
vega20_set_mp1_state(struct pp_hwmgr * hwmgr,enum pp_mp1_state mp1_state)3169 static int vega20_set_mp1_state(struct pp_hwmgr *hwmgr,
3170 enum pp_mp1_state mp1_state)
3171 {
3172 uint16_t msg;
3173 int ret;
3174
3175 switch (mp1_state) {
3176 case PP_MP1_STATE_SHUTDOWN:
3177 msg = PPSMC_MSG_PrepareMp1ForShutdown;
3178 break;
3179 case PP_MP1_STATE_UNLOAD:
3180 msg = PPSMC_MSG_PrepareMp1ForUnload;
3181 break;
3182 case PP_MP1_STATE_RESET:
3183 msg = PPSMC_MSG_PrepareMp1ForReset;
3184 break;
3185 case PP_MP1_STATE_NONE:
3186 default:
3187 return 0;
3188 }
3189
3190 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
3191 "[PrepareMp1] Failed!",
3192 return ret);
3193
3194 return 0;
3195 }
3196
vega20_get_ppfeature_status(struct pp_hwmgr * hwmgr,char * buf)3197 static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
3198 {
3199 static const char *ppfeature_name[] = {
3200 "DPM_PREFETCHER",
3201 "GFXCLK_DPM",
3202 "UCLK_DPM",
3203 "SOCCLK_DPM",
3204 "UVD_DPM",
3205 "VCE_DPM",
3206 "ULV",
3207 "MP0CLK_DPM",
3208 "LINK_DPM",
3209 "DCEFCLK_DPM",
3210 "GFXCLK_DS",
3211 "SOCCLK_DS",
3212 "LCLK_DS",
3213 "PPT",
3214 "TDC",
3215 "THERMAL",
3216 "GFX_PER_CU_CG",
3217 "RM",
3218 "DCEFCLK_DS",
3219 "ACDC",
3220 "VR0HOT",
3221 "VR1HOT",
3222 "FW_CTF",
3223 "LED_DISPLAY",
3224 "FAN_CONTROL",
3225 "GFX_EDC",
3226 "GFXOFF",
3227 "CG",
3228 "FCLK_DPM",
3229 "FCLK_DS",
3230 "MP1CLK_DS",
3231 "MP0CLK_DS",
3232 "XGMI",
3233 "ECC"};
3234 static const char *output_title[] = {
3235 "FEATURES",
3236 "BITMASK",
3237 "ENABLEMENT"};
3238 uint64_t features_enabled;
3239 int i;
3240 int ret = 0;
3241 int size = 0;
3242
3243 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
3244 PP_ASSERT_WITH_CODE(!ret,
3245 "[EnableAllSmuFeatures] Failed to get enabled smc features!",
3246 return ret);
3247
3248 size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled);
3249 size += sprintf(buf + size, "%-19s %-22s %s\n",
3250 output_title[0],
3251 output_title[1],
3252 output_title[2]);
3253 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
3254 size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
3255 ppfeature_name[i],
3256 1ULL << i,
3257 (features_enabled & (1ULL << i)) ? "Y" : "N");
3258 }
3259
3260 return size;
3261 }
3262
vega20_set_ppfeature_status(struct pp_hwmgr * hwmgr,uint64_t new_ppfeature_masks)3263 static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
3264 {
3265 struct vega20_hwmgr *data =
3266 (struct vega20_hwmgr *)(hwmgr->backend);
3267 uint64_t features_enabled, features_to_enable, features_to_disable;
3268 int i, ret = 0;
3269 bool enabled;
3270
3271 if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
3272 return -EINVAL;
3273
3274 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
3275 if (ret)
3276 return ret;
3277
3278 features_to_disable =
3279 features_enabled & ~new_ppfeature_masks;
3280 features_to_enable =
3281 ~features_enabled & new_ppfeature_masks;
3282
3283 pr_debug("features_to_disable 0x%llx\n", features_to_disable);
3284 pr_debug("features_to_enable 0x%llx\n", features_to_enable);
3285
3286 if (features_to_disable) {
3287 ret = vega20_enable_smc_features(hwmgr, false, features_to_disable);
3288 if (ret)
3289 return ret;
3290 }
3291
3292 if (features_to_enable) {
3293 ret = vega20_enable_smc_features(hwmgr, true, features_to_enable);
3294 if (ret)
3295 return ret;
3296 }
3297
3298 /* Update the cached feature enablement state */
3299 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
3300 if (ret)
3301 return ret;
3302
3303 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
3304 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
3305 true : false;
3306 data->smu_features[i].enabled = enabled;
3307 }
3308
3309 return 0;
3310 }
3311
vega20_get_current_pcie_link_width_level(struct pp_hwmgr * hwmgr)3312 static int vega20_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
3313 {
3314 struct amdgpu_device *adev = hwmgr->adev;
3315
3316 return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
3317 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
3318 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
3319 }
3320
vega20_get_current_pcie_link_width(struct pp_hwmgr * hwmgr)3321 static int vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
3322 {
3323 uint32_t width_level;
3324
3325 width_level = vega20_get_current_pcie_link_width_level(hwmgr);
3326 if (width_level > LINK_WIDTH_MAX)
3327 width_level = 0;
3328
3329 return link_width[width_level];
3330 }
3331
vega20_get_current_pcie_link_speed_level(struct pp_hwmgr * hwmgr)3332 static int vega20_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
3333 {
3334 struct amdgpu_device *adev = hwmgr->adev;
3335
3336 return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
3337 PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
3338 >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
3339 }
3340
vega20_get_current_pcie_link_speed(struct pp_hwmgr * hwmgr)3341 static int vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
3342 {
3343 uint32_t speed_level;
3344
3345 speed_level = vega20_get_current_pcie_link_speed_level(hwmgr);
3346 if (speed_level > LINK_SPEED_MAX)
3347 speed_level = 0;
3348
3349 return link_speed[speed_level];
3350 }
3351
vega20_print_clock_levels(struct pp_hwmgr * hwmgr,enum pp_clock_type type,char * buf)3352 static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
3353 enum pp_clock_type type, char *buf)
3354 {
3355 struct vega20_hwmgr *data =
3356 (struct vega20_hwmgr *)(hwmgr->backend);
3357 struct vega20_od8_single_setting *od8_settings =
3358 data->od8_settings.od8_settings_array;
3359 OverDriveTable_t *od_table =
3360 &(data->smc_state_table.overdrive_table);
3361 PPTable_t *pptable = &(data->smc_state_table.pp_table);
3362 struct pp_clock_levels_with_latency clocks;
3363 struct vega20_single_dpm_table *fclk_dpm_table =
3364 &(data->dpm_table.fclk_table);
3365 int i, now, size = 0;
3366 int ret = 0;
3367 uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
3368
3369 switch (type) {
3370 case PP_SCLK:
3371 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now);
3372 PP_ASSERT_WITH_CODE(!ret,
3373 "Attempt to get current gfx clk Failed!",
3374 return ret);
3375
3376 if (vega20_get_sclks(hwmgr, &clocks)) {
3377 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
3378 now / 100);
3379 break;
3380 }
3381
3382 for (i = 0; i < clocks.num_levels; i++)
3383 size += sprintf(buf + size, "%d: %uMhz %s\n",
3384 i, clocks.data[i].clocks_in_khz / 1000,
3385 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3386 break;
3387
3388 case PP_MCLK:
3389 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_UCLK, &now);
3390 PP_ASSERT_WITH_CODE(!ret,
3391 "Attempt to get current mclk freq Failed!",
3392 return ret);
3393
3394 if (vega20_get_memclocks(hwmgr, &clocks)) {
3395 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
3396 now / 100);
3397 break;
3398 }
3399
3400 for (i = 0; i < clocks.num_levels; i++)
3401 size += sprintf(buf + size, "%d: %uMhz %s\n",
3402 i, clocks.data[i].clocks_in_khz / 1000,
3403 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3404 break;
3405
3406 case PP_SOCCLK:
3407 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_SOCCLK, &now);
3408 PP_ASSERT_WITH_CODE(!ret,
3409 "Attempt to get current socclk freq Failed!",
3410 return ret);
3411
3412 if (vega20_get_socclocks(hwmgr, &clocks)) {
3413 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
3414 now / 100);
3415 break;
3416 }
3417
3418 for (i = 0; i < clocks.num_levels; i++)
3419 size += sprintf(buf + size, "%d: %uMhz %s\n",
3420 i, clocks.data[i].clocks_in_khz / 1000,
3421 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3422 break;
3423
3424 case PP_FCLK:
3425 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_FCLK, &now);
3426 PP_ASSERT_WITH_CODE(!ret,
3427 "Attempt to get current fclk freq Failed!",
3428 return ret);
3429
3430 for (i = 0; i < fclk_dpm_table->count; i++)
3431 size += sprintf(buf + size, "%d: %uMhz %s\n",
3432 i, fclk_dpm_table->dpm_levels[i].value,
3433 fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : "");
3434 break;
3435
3436 case PP_DCEFCLK:
3437 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_DCEFCLK, &now);
3438 PP_ASSERT_WITH_CODE(!ret,
3439 "Attempt to get current dcefclk freq Failed!",
3440 return ret);
3441
3442 if (vega20_get_dcefclocks(hwmgr, &clocks)) {
3443 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
3444 now / 100);
3445 break;
3446 }
3447
3448 for (i = 0; i < clocks.num_levels; i++)
3449 size += sprintf(buf + size, "%d: %uMhz %s\n",
3450 i, clocks.data[i].clocks_in_khz / 1000,
3451 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3452 break;
3453
3454 case PP_PCIE:
3455 current_gen_speed =
3456 vega20_get_current_pcie_link_speed_level(hwmgr);
3457 current_lane_width =
3458 vega20_get_current_pcie_link_width_level(hwmgr);
3459 for (i = 0; i < NUM_LINK_LEVELS; i++) {
3460 gen_speed = pptable->PcieGenSpeed[i];
3461 lane_width = pptable->PcieLaneCount[i];
3462
3463 size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
3464 (gen_speed == 0) ? "2.5GT/s," :
3465 (gen_speed == 1) ? "5.0GT/s," :
3466 (gen_speed == 2) ? "8.0GT/s," :
3467 (gen_speed == 3) ? "16.0GT/s," : "",
3468 (lane_width == 1) ? "x1" :
3469 (lane_width == 2) ? "x2" :
3470 (lane_width == 3) ? "x4" :
3471 (lane_width == 4) ? "x8" :
3472 (lane_width == 5) ? "x12" :
3473 (lane_width == 6) ? "x16" : "",
3474 pptable->LclkFreq[i],
3475 (current_gen_speed == gen_speed) &&
3476 (current_lane_width == lane_width) ?
3477 "*" : "");
3478 }
3479 break;
3480
3481 case OD_SCLK:
3482 if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
3483 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
3484 size = sprintf(buf, "%s:\n", "OD_SCLK");
3485 size += sprintf(buf + size, "0: %10uMhz\n",
3486 od_table->GfxclkFmin);
3487 size += sprintf(buf + size, "1: %10uMhz\n",
3488 od_table->GfxclkFmax);
3489 }
3490 break;
3491
3492 case OD_MCLK:
3493 if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
3494 size = sprintf(buf, "%s:\n", "OD_MCLK");
3495 size += sprintf(buf + size, "1: %10uMhz\n",
3496 od_table->UclkFmax);
3497 }
3498
3499 break;
3500
3501 case OD_VDDC_CURVE:
3502 if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
3503 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
3504 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
3505 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
3506 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
3507 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
3508 size = sprintf(buf, "%s:\n", "OD_VDDC_CURVE");
3509 size += sprintf(buf + size, "0: %10uMhz %10dmV\n",
3510 od_table->GfxclkFreq1,
3511 od_table->GfxclkVolt1 / VOLTAGE_SCALE);
3512 size += sprintf(buf + size, "1: %10uMhz %10dmV\n",
3513 od_table->GfxclkFreq2,
3514 od_table->GfxclkVolt2 / VOLTAGE_SCALE);
3515 size += sprintf(buf + size, "2: %10uMhz %10dmV\n",
3516 od_table->GfxclkFreq3,
3517 od_table->GfxclkVolt3 / VOLTAGE_SCALE);
3518 }
3519
3520 break;
3521
3522 case OD_RANGE:
3523 size = sprintf(buf, "%s:\n", "OD_RANGE");
3524
3525 if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
3526 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
3527 size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
3528 od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
3529 od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
3530 }
3531
3532 if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
3533 size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
3534 od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
3535 od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
3536 }
3537
3538 if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
3539 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
3540 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
3541 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
3542 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
3543 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
3544 size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
3545 od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value,
3546 od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value);
3547 size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
3548 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value,
3549 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value);
3550 size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
3551 od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value,
3552 od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value);
3553 size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
3554 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value,
3555 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value);
3556 size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
3557 od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value,
3558 od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value);
3559 size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
3560 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value,
3561 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value);
3562 }
3563
3564 break;
3565 default:
3566 break;
3567 }
3568 return size;
3569 }
3570
vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr * hwmgr,struct vega20_single_dpm_table * dpm_table)3571 static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
3572 struct vega20_single_dpm_table *dpm_table)
3573 {
3574 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3575 int ret = 0;
3576
3577 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
3578 PP_ASSERT_WITH_CODE(dpm_table->count > 0,
3579 "[SetUclkToHightestDpmLevel] Dpm table has no entry!",
3580 return -EINVAL);
3581 PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS,
3582 "[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
3583 return -EINVAL);
3584
3585 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3586 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
3587 PPSMC_MSG_SetHardMinByFreq,
3588 (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
3589 NULL)),
3590 "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
3591 return ret);
3592 }
3593
3594 return ret;
3595 }
3596
vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr * hwmgr)3597 static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr)
3598 {
3599 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3600 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.fclk_table);
3601 int ret = 0;
3602
3603 if (data->smu_features[GNLD_DPM_FCLK].enabled) {
3604 PP_ASSERT_WITH_CODE(dpm_table->count > 0,
3605 "[SetFclkToHightestDpmLevel] Dpm table has no entry!",
3606 return -EINVAL);
3607 PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_FCLK_DPM_LEVELS,
3608 "[SetFclkToHightestDpmLevel] Dpm table has too many entries!",
3609 return -EINVAL);
3610
3611 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3612 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
3613 PPSMC_MSG_SetSoftMinByFreq,
3614 (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level,
3615 NULL)),
3616 "[SetFclkToHightestDpmLevel] Set soft min fclk failed!",
3617 return ret);
3618 }
3619
3620 return ret;
3621 }
3622
vega20_pre_display_configuration_changed_task(struct pp_hwmgr * hwmgr)3623 static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
3624 {
3625 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3626 int ret = 0;
3627
3628 smum_send_msg_to_smc_with_parameter(hwmgr,
3629 PPSMC_MSG_NumOfDisplays, 0, NULL);
3630
3631 ret = vega20_set_uclk_to_highest_dpm_level(hwmgr,
3632 &data->dpm_table.mem_table);
3633 if (ret)
3634 return ret;
3635
3636 return vega20_set_fclk_to_highest_dpm_level(hwmgr);
3637 }
3638
vega20_display_configuration_changed_task(struct pp_hwmgr * hwmgr)3639 static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
3640 {
3641 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3642 int result = 0;
3643 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
3644
3645 if ((data->water_marks_bitmap & WaterMarksExist) &&
3646 !(data->water_marks_bitmap & WaterMarksLoaded)) {
3647 result = smum_smc_table_manager(hwmgr,
3648 (uint8_t *)wm_table, TABLE_WATERMARKS, false);
3649 PP_ASSERT_WITH_CODE(!result,
3650 "Failed to update WMTABLE!",
3651 return result);
3652 data->water_marks_bitmap |= WaterMarksLoaded;
3653 }
3654
3655 if ((data->water_marks_bitmap & WaterMarksExist) &&
3656 data->smu_features[GNLD_DPM_DCEFCLK].supported &&
3657 data->smu_features[GNLD_DPM_SOCCLK].supported) {
3658 result = smum_send_msg_to_smc_with_parameter(hwmgr,
3659 PPSMC_MSG_NumOfDisplays,
3660 hwmgr->display_config->num_display,
3661 NULL);
3662 }
3663
3664 return result;
3665 }
3666
vega20_enable_disable_uvd_dpm(struct pp_hwmgr * hwmgr,bool enable)3667 static int vega20_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
3668 {
3669 struct vega20_hwmgr *data =
3670 (struct vega20_hwmgr *)(hwmgr->backend);
3671 int ret = 0;
3672
3673 if (data->smu_features[GNLD_DPM_UVD].supported) {
3674 if (data->smu_features[GNLD_DPM_UVD].enabled == enable) {
3675 if (enable)
3676 PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already enabled!\n");
3677 else
3678 PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already disabled!\n");
3679 }
3680
3681 ret = vega20_enable_smc_features(hwmgr,
3682 enable,
3683 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap);
3684 PP_ASSERT_WITH_CODE(!ret,
3685 "[EnableDisableUVDDPM] Attempt to Enable/Disable DPM UVD Failed!",
3686 return ret);
3687 data->smu_features[GNLD_DPM_UVD].enabled = enable;
3688 }
3689
3690 return 0;
3691 }
3692
vega20_power_gate_vce(struct pp_hwmgr * hwmgr,bool bgate)3693 static void vega20_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
3694 {
3695 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3696
3697 if (data->vce_power_gated == bgate)
3698 return ;
3699
3700 data->vce_power_gated = bgate;
3701 if (bgate) {
3702 vega20_enable_disable_vce_dpm(hwmgr, !bgate);
3703 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
3704 AMD_IP_BLOCK_TYPE_VCE,
3705 AMD_PG_STATE_GATE);
3706 } else {
3707 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
3708 AMD_IP_BLOCK_TYPE_VCE,
3709 AMD_PG_STATE_UNGATE);
3710 vega20_enable_disable_vce_dpm(hwmgr, !bgate);
3711 }
3712
3713 }
3714
vega20_power_gate_uvd(struct pp_hwmgr * hwmgr,bool bgate)3715 static void vega20_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
3716 {
3717 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3718
3719 if (data->uvd_power_gated == bgate)
3720 return ;
3721
3722 data->uvd_power_gated = bgate;
3723 vega20_enable_disable_uvd_dpm(hwmgr, !bgate);
3724 }
3725
vega20_apply_clocks_adjust_rules(struct pp_hwmgr * hwmgr)3726 static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
3727 {
3728 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3729 struct vega20_single_dpm_table *dpm_table;
3730 bool vblank_too_short = false;
3731 bool disable_mclk_switching;
3732 bool disable_fclk_switching;
3733 uint32_t i, latency;
3734
3735 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
3736 !hwmgr->display_config->multi_monitor_in_sync) ||
3737 vblank_too_short;
3738 latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
3739
3740 /* gfxclk */
3741 dpm_table = &(data->dpm_table.gfx_table);
3742 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3743 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3744 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3745 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3746
3747 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3748 if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
3749 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
3750 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
3751 }
3752
3753 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
3754 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3755 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
3756 }
3757
3758 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3759 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3760 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3761 }
3762 }
3763
3764 /* memclk */
3765 dpm_table = &(data->dpm_table.mem_table);
3766 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3767 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3768 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3769 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3770
3771 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3772 if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
3773 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
3774 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
3775 }
3776
3777 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
3778 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3779 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
3780 }
3781
3782 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3783 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3784 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3785 }
3786 }
3787
3788 /* honour DAL's UCLK Hardmin */
3789 if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
3790 dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
3791
3792 /* Hardmin is dependent on displayconfig */
3793 if (disable_mclk_switching) {
3794 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3795 for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
3796 if (data->mclk_latency_table.entries[i].latency <= latency) {
3797 if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
3798 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
3799 break;
3800 }
3801 }
3802 }
3803 }
3804
3805 if (hwmgr->display_config->nb_pstate_switch_disable)
3806 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3807
3808 if ((disable_mclk_switching &&
3809 (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) ||
3810 hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value)
3811 disable_fclk_switching = true;
3812 else
3813 disable_fclk_switching = false;
3814
3815 /* fclk */
3816 dpm_table = &(data->dpm_table.fclk_table);
3817 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3818 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3819 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3820 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3821 if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching)
3822 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3823
3824 /* vclk */
3825 dpm_table = &(data->dpm_table.vclk_table);
3826 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3827 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3828 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3829 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3830
3831 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3832 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
3833 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
3834 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
3835 }
3836
3837 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3838 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3839 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3840 }
3841 }
3842
3843 /* dclk */
3844 dpm_table = &(data->dpm_table.dclk_table);
3845 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3846 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3847 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3848 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3849
3850 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3851 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
3852 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
3853 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
3854 }
3855
3856 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3857 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3858 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3859 }
3860 }
3861
3862 /* socclk */
3863 dpm_table = &(data->dpm_table.soc_table);
3864 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3865 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3866 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3867 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3868
3869 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3870 if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
3871 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value;
3872 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value;
3873 }
3874
3875 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3876 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3877 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3878 }
3879 }
3880
3881 /* eclk */
3882 dpm_table = &(data->dpm_table.eclk_table);
3883 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3884 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3885 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3886 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3887
3888 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3889 if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
3890 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value;
3891 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value;
3892 }
3893
3894 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3895 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3896 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3897 }
3898 }
3899
3900 return 0;
3901 }
3902
3903 static bool
vega20_check_smc_update_required_for_display_configuration(struct pp_hwmgr * hwmgr)3904 vega20_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
3905 {
3906 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3907 bool is_update_required = false;
3908
3909 if (data->display_timing.num_existing_displays !=
3910 hwmgr->display_config->num_display)
3911 is_update_required = true;
3912
3913 if (data->registry_data.gfx_clk_deep_sleep_support &&
3914 (data->display_timing.min_clock_in_sr !=
3915 hwmgr->display_config->min_core_set_clock_in_sr))
3916 is_update_required = true;
3917
3918 return is_update_required;
3919 }
3920
vega20_disable_dpm_tasks(struct pp_hwmgr * hwmgr)3921 static int vega20_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
3922 {
3923 int ret = 0;
3924
3925 ret = vega20_disable_all_smu_features(hwmgr);
3926 PP_ASSERT_WITH_CODE(!ret,
3927 "[DisableDpmTasks] Failed to disable all smu features!",
3928 return ret);
3929
3930 return 0;
3931 }
3932
vega20_power_off_asic(struct pp_hwmgr * hwmgr)3933 static int vega20_power_off_asic(struct pp_hwmgr *hwmgr)
3934 {
3935 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3936 int result;
3937
3938 result = vega20_disable_dpm_tasks(hwmgr);
3939 PP_ASSERT_WITH_CODE((0 == result),
3940 "[PowerOffAsic] Failed to disable DPM!",
3941 );
3942 data->water_marks_bitmap &= ~(WaterMarksLoaded);
3943
3944 return result;
3945 }
3946
conv_power_profile_to_pplib_workload(int power_profile)3947 static int conv_power_profile_to_pplib_workload(int power_profile)
3948 {
3949 int pplib_workload = 0;
3950
3951 switch (power_profile) {
3952 case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
3953 pplib_workload = WORKLOAD_DEFAULT_BIT;
3954 break;
3955 case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
3956 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
3957 break;
3958 case PP_SMC_POWER_PROFILE_POWERSAVING:
3959 pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
3960 break;
3961 case PP_SMC_POWER_PROFILE_VIDEO:
3962 pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
3963 break;
3964 case PP_SMC_POWER_PROFILE_VR:
3965 pplib_workload = WORKLOAD_PPLIB_VR_BIT;
3966 break;
3967 case PP_SMC_POWER_PROFILE_COMPUTE:
3968 pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
3969 break;
3970 case PP_SMC_POWER_PROFILE_CUSTOM:
3971 pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
3972 break;
3973 }
3974
3975 return pplib_workload;
3976 }
3977
vega20_get_power_profile_mode(struct pp_hwmgr * hwmgr,char * buf)3978 static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
3979 {
3980 DpmActivityMonitorCoeffInt_t activity_monitor;
3981 uint32_t i, size = 0;
3982 uint16_t workload_type = 0;
3983 static const char *profile_name[] = {
3984 "BOOTUP_DEFAULT",
3985 "3D_FULL_SCREEN",
3986 "POWER_SAVING",
3987 "VIDEO",
3988 "VR",
3989 "COMPUTE",
3990 "CUSTOM"};
3991 static const char *title[] = {
3992 "PROFILE_INDEX(NAME)",
3993 "CLOCK_TYPE(NAME)",
3994 "FPS",
3995 "UseRlcBusy",
3996 "MinActiveFreqType",
3997 "MinActiveFreq",
3998 "BoosterFreqType",
3999 "BoosterFreq",
4000 "PD_Data_limit_c",
4001 "PD_Data_error_coeff",
4002 "PD_Data_error_rate_coeff"};
4003 int result = 0;
4004
4005 if (!buf)
4006 return -EINVAL;
4007
4008 size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
4009 title[0], title[1], title[2], title[3], title[4], title[5],
4010 title[6], title[7], title[8], title[9], title[10]);
4011
4012 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
4013 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
4014 workload_type = conv_power_profile_to_pplib_workload(i);
4015 result = vega20_get_activity_monitor_coeff(hwmgr,
4016 (uint8_t *)(&activity_monitor), workload_type);
4017 PP_ASSERT_WITH_CODE(!result,
4018 "[GetPowerProfile] Failed to get activity monitor!",
4019 return result);
4020
4021 size += sprintf(buf + size, "%2d %14s%s:\n",
4022 i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ");
4023
4024 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
4025 " ",
4026 0,
4027 "GFXCLK",
4028 activity_monitor.Gfx_FPS,
4029 activity_monitor.Gfx_UseRlcBusy,
4030 activity_monitor.Gfx_MinActiveFreqType,
4031 activity_monitor.Gfx_MinActiveFreq,
4032 activity_monitor.Gfx_BoosterFreqType,
4033 activity_monitor.Gfx_BoosterFreq,
4034 activity_monitor.Gfx_PD_Data_limit_c,
4035 activity_monitor.Gfx_PD_Data_error_coeff,
4036 activity_monitor.Gfx_PD_Data_error_rate_coeff);
4037
4038 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
4039 " ",
4040 1,
4041 "SOCCLK",
4042 activity_monitor.Soc_FPS,
4043 activity_monitor.Soc_UseRlcBusy,
4044 activity_monitor.Soc_MinActiveFreqType,
4045 activity_monitor.Soc_MinActiveFreq,
4046 activity_monitor.Soc_BoosterFreqType,
4047 activity_monitor.Soc_BoosterFreq,
4048 activity_monitor.Soc_PD_Data_limit_c,
4049 activity_monitor.Soc_PD_Data_error_coeff,
4050 activity_monitor.Soc_PD_Data_error_rate_coeff);
4051
4052 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
4053 " ",
4054 2,
4055 "UCLK",
4056 activity_monitor.Mem_FPS,
4057 activity_monitor.Mem_UseRlcBusy,
4058 activity_monitor.Mem_MinActiveFreqType,
4059 activity_monitor.Mem_MinActiveFreq,
4060 activity_monitor.Mem_BoosterFreqType,
4061 activity_monitor.Mem_BoosterFreq,
4062 activity_monitor.Mem_PD_Data_limit_c,
4063 activity_monitor.Mem_PD_Data_error_coeff,
4064 activity_monitor.Mem_PD_Data_error_rate_coeff);
4065
4066 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
4067 " ",
4068 3,
4069 "FCLK",
4070 activity_monitor.Fclk_FPS,
4071 activity_monitor.Fclk_UseRlcBusy,
4072 activity_monitor.Fclk_MinActiveFreqType,
4073 activity_monitor.Fclk_MinActiveFreq,
4074 activity_monitor.Fclk_BoosterFreqType,
4075 activity_monitor.Fclk_BoosterFreq,
4076 activity_monitor.Fclk_PD_Data_limit_c,
4077 activity_monitor.Fclk_PD_Data_error_coeff,
4078 activity_monitor.Fclk_PD_Data_error_rate_coeff);
4079 }
4080
4081 return size;
4082 }
4083
vega20_set_power_profile_mode(struct pp_hwmgr * hwmgr,long * input,uint32_t size)4084 static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
4085 {
4086 DpmActivityMonitorCoeffInt_t activity_monitor;
4087 int workload_type, result = 0;
4088 uint32_t power_profile_mode = input[size];
4089
4090 if (power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
4091 pr_err("Invalid power profile mode %d\n", power_profile_mode);
4092 return -EINVAL;
4093 }
4094
4095 if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
4096 struct vega20_hwmgr *data =
4097 (struct vega20_hwmgr *)(hwmgr->backend);
4098 if (size == 0 && !data->is_custom_profile_set)
4099 return -EINVAL;
4100 if (size < 10 && size != 0)
4101 return -EINVAL;
4102
4103 result = vega20_get_activity_monitor_coeff(hwmgr,
4104 (uint8_t *)(&activity_monitor),
4105 WORKLOAD_PPLIB_CUSTOM_BIT);
4106 PP_ASSERT_WITH_CODE(!result,
4107 "[SetPowerProfile] Failed to get activity monitor!",
4108 return result);
4109
4110 /* If size==0, then we want to apply the already-configured
4111 * CUSTOM profile again. Just apply it, since we checked its
4112 * validity above
4113 */
4114 if (size == 0)
4115 goto out;
4116
4117 switch (input[0]) {
4118 case 0: /* Gfxclk */
4119 activity_monitor.Gfx_FPS = input[1];
4120 activity_monitor.Gfx_UseRlcBusy = input[2];
4121 activity_monitor.Gfx_MinActiveFreqType = input[3];
4122 activity_monitor.Gfx_MinActiveFreq = input[4];
4123 activity_monitor.Gfx_BoosterFreqType = input[5];
4124 activity_monitor.Gfx_BoosterFreq = input[6];
4125 activity_monitor.Gfx_PD_Data_limit_c = input[7];
4126 activity_monitor.Gfx_PD_Data_error_coeff = input[8];
4127 activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
4128 break;
4129 case 1: /* Socclk */
4130 activity_monitor.Soc_FPS = input[1];
4131 activity_monitor.Soc_UseRlcBusy = input[2];
4132 activity_monitor.Soc_MinActiveFreqType = input[3];
4133 activity_monitor.Soc_MinActiveFreq = input[4];
4134 activity_monitor.Soc_BoosterFreqType = input[5];
4135 activity_monitor.Soc_BoosterFreq = input[6];
4136 activity_monitor.Soc_PD_Data_limit_c = input[7];
4137 activity_monitor.Soc_PD_Data_error_coeff = input[8];
4138 activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
4139 break;
4140 case 2: /* Uclk */
4141 activity_monitor.Mem_FPS = input[1];
4142 activity_monitor.Mem_UseRlcBusy = input[2];
4143 activity_monitor.Mem_MinActiveFreqType = input[3];
4144 activity_monitor.Mem_MinActiveFreq = input[4];
4145 activity_monitor.Mem_BoosterFreqType = input[5];
4146 activity_monitor.Mem_BoosterFreq = input[6];
4147 activity_monitor.Mem_PD_Data_limit_c = input[7];
4148 activity_monitor.Mem_PD_Data_error_coeff = input[8];
4149 activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
4150 break;
4151 case 3: /* Fclk */
4152 activity_monitor.Fclk_FPS = input[1];
4153 activity_monitor.Fclk_UseRlcBusy = input[2];
4154 activity_monitor.Fclk_MinActiveFreqType = input[3];
4155 activity_monitor.Fclk_MinActiveFreq = input[4];
4156 activity_monitor.Fclk_BoosterFreqType = input[5];
4157 activity_monitor.Fclk_BoosterFreq = input[6];
4158 activity_monitor.Fclk_PD_Data_limit_c = input[7];
4159 activity_monitor.Fclk_PD_Data_error_coeff = input[8];
4160 activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
4161 break;
4162 }
4163
4164 result = vega20_set_activity_monitor_coeff(hwmgr,
4165 (uint8_t *)(&activity_monitor),
4166 WORKLOAD_PPLIB_CUSTOM_BIT);
4167 data->is_custom_profile_set = true;
4168 PP_ASSERT_WITH_CODE(!result,
4169 "[SetPowerProfile] Failed to set activity monitor!",
4170 return result);
4171 }
4172
4173 out:
4174 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
4175 workload_type =
4176 conv_power_profile_to_pplib_workload(power_profile_mode);
4177 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
4178 1 << workload_type,
4179 NULL);
4180
4181 hwmgr->power_profile_mode = power_profile_mode;
4182
4183 return 0;
4184 }
4185
vega20_notify_cac_buffer_info(struct pp_hwmgr * hwmgr,uint32_t virtual_addr_low,uint32_t virtual_addr_hi,uint32_t mc_addr_low,uint32_t mc_addr_hi,uint32_t size)4186 static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4187 uint32_t virtual_addr_low,
4188 uint32_t virtual_addr_hi,
4189 uint32_t mc_addr_low,
4190 uint32_t mc_addr_hi,
4191 uint32_t size)
4192 {
4193 smum_send_msg_to_smc_with_parameter(hwmgr,
4194 PPSMC_MSG_SetSystemVirtualDramAddrHigh,
4195 virtual_addr_hi,
4196 NULL);
4197 smum_send_msg_to_smc_with_parameter(hwmgr,
4198 PPSMC_MSG_SetSystemVirtualDramAddrLow,
4199 virtual_addr_low,
4200 NULL);
4201 smum_send_msg_to_smc_with_parameter(hwmgr,
4202 PPSMC_MSG_DramLogSetDramAddrHigh,
4203 mc_addr_hi,
4204 NULL);
4205
4206 smum_send_msg_to_smc_with_parameter(hwmgr,
4207 PPSMC_MSG_DramLogSetDramAddrLow,
4208 mc_addr_low,
4209 NULL);
4210
4211 smum_send_msg_to_smc_with_parameter(hwmgr,
4212 PPSMC_MSG_DramLogSetDramSize,
4213 size,
4214 NULL);
4215 return 0;
4216 }
4217
vega20_get_thermal_temperature_range(struct pp_hwmgr * hwmgr,struct PP_TemperatureRange * thermal_data)4218 static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
4219 struct PP_TemperatureRange *thermal_data)
4220 {
4221 struct vega20_hwmgr *data =
4222 (struct vega20_hwmgr *)(hwmgr->backend);
4223 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
4224
4225 memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
4226
4227 thermal_data->max = pp_table->TedgeLimit *
4228 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4229 thermal_data->edge_emergency_max = (pp_table->TedgeLimit + CTF_OFFSET_EDGE) *
4230 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4231 thermal_data->hotspot_crit_max = pp_table->ThotspotLimit *
4232 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4233 thermal_data->hotspot_emergency_max = (pp_table->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
4234 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4235 thermal_data->mem_crit_max = pp_table->ThbmLimit *
4236 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4237 thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
4238 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4239
4240 return 0;
4241 }
4242
vega20_smu_i2c_bus_access(struct pp_hwmgr * hwmgr,bool acquire)4243 static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire)
4244 {
4245 int res;
4246
4247 /* I2C bus access can happen very early, when SMU not loaded yet */
4248 if (!vega20_is_smc_ram_running(hwmgr))
4249 return 0;
4250
4251 res = smum_send_msg_to_smc_with_parameter(hwmgr,
4252 (acquire ?
4253 PPSMC_MSG_RequestI2CBus :
4254 PPSMC_MSG_ReleaseI2CBus),
4255 0,
4256 NULL);
4257
4258 PP_ASSERT_WITH_CODE(!res, "[SmuI2CAccessBus] Failed to access bus!", return res);
4259 return res;
4260 }
4261
vega20_set_df_cstate(struct pp_hwmgr * hwmgr,enum pp_df_cstate state)4262 static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr,
4263 enum pp_df_cstate state)
4264 {
4265 int ret;
4266
4267 /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */
4268 if (hwmgr->smu_version < 0x283200) {
4269 pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n");
4270 return -EINVAL;
4271 }
4272
4273 ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state,
4274 NULL);
4275 if (ret)
4276 pr_err("SetDfCstate failed!\n");
4277
4278 return ret;
4279 }
4280
vega20_set_xgmi_pstate(struct pp_hwmgr * hwmgr,uint32_t pstate)4281 static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr,
4282 uint32_t pstate)
4283 {
4284 int ret;
4285
4286 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
4287 PPSMC_MSG_SetXgmiMode,
4288 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
4289 NULL);
4290 if (ret)
4291 pr_err("SetXgmiPstate failed!\n");
4292
4293 return ret;
4294 }
4295
vega20_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 * gpu_metrics)4296 static void vega20_init_gpu_metrics_v1_0(struct gpu_metrics_v1_0 *gpu_metrics)
4297 {
4298 memset(gpu_metrics, 0xFF, sizeof(struct gpu_metrics_v1_0));
4299
4300 gpu_metrics->common_header.structure_size =
4301 sizeof(struct gpu_metrics_v1_0);
4302 gpu_metrics->common_header.format_revision = 1;
4303 gpu_metrics->common_header.content_revision = 0;
4304
4305 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
4306 }
4307
vega20_get_gpu_metrics(struct pp_hwmgr * hwmgr,void ** table)4308 static ssize_t vega20_get_gpu_metrics(struct pp_hwmgr *hwmgr,
4309 void **table)
4310 {
4311 struct vega20_hwmgr *data =
4312 (struct vega20_hwmgr *)(hwmgr->backend);
4313 struct gpu_metrics_v1_0 *gpu_metrics =
4314 &data->gpu_metrics_table;
4315 SmuMetrics_t metrics;
4316 uint32_t fan_speed_rpm;
4317 int ret;
4318
4319 ret = vega20_get_metrics_table(hwmgr, &metrics, true);
4320 if (ret)
4321 return ret;
4322
4323 vega20_init_gpu_metrics_v1_0(gpu_metrics);
4324
4325 gpu_metrics->temperature_edge = metrics.TemperatureEdge;
4326 gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
4327 gpu_metrics->temperature_mem = metrics.TemperatureHBM;
4328 gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
4329 gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
4330 gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
4331
4332 gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
4333 gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
4334
4335 gpu_metrics->average_socket_power = metrics.AverageSocketPower;
4336
4337 gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
4338 gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
4339 gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
4340
4341 gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
4342 gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
4343 gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
4344 gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
4345 gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
4346
4347 gpu_metrics->throttle_status = metrics.ThrottlerStatus;
4348
4349 vega20_fan_ctrl_get_fan_speed_rpm(hwmgr, &fan_speed_rpm);
4350 gpu_metrics->current_fan_speed = (uint16_t)fan_speed_rpm;
4351
4352 gpu_metrics->pcie_link_width =
4353 vega20_get_current_pcie_link_width(hwmgr);
4354 gpu_metrics->pcie_link_speed =
4355 vega20_get_current_pcie_link_speed(hwmgr);
4356
4357 *table = (void *)gpu_metrics;
4358
4359 return sizeof(struct gpu_metrics_v1_0);
4360 }
4361
4362 static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
4363 /* init/fini related */
4364 .backend_init = vega20_hwmgr_backend_init,
4365 .backend_fini = vega20_hwmgr_backend_fini,
4366 .asic_setup = vega20_setup_asic_task,
4367 .power_off_asic = vega20_power_off_asic,
4368 .dynamic_state_management_enable = vega20_enable_dpm_tasks,
4369 .dynamic_state_management_disable = vega20_disable_dpm_tasks,
4370 /* power state related */
4371 .apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules,
4372 .pre_display_config_changed = vega20_pre_display_configuration_changed_task,
4373 .display_config_changed = vega20_display_configuration_changed_task,
4374 .check_smc_update_required_for_display_configuration =
4375 vega20_check_smc_update_required_for_display_configuration,
4376 .notify_smc_display_config_after_ps_adjustment =
4377 vega20_notify_smc_display_config_after_ps_adjustment,
4378 /* export to DAL */
4379 .get_sclk = vega20_dpm_get_sclk,
4380 .get_mclk = vega20_dpm_get_mclk,
4381 .get_dal_power_level = vega20_get_dal_power_level,
4382 .get_clock_by_type_with_latency = vega20_get_clock_by_type_with_latency,
4383 .get_clock_by_type_with_voltage = vega20_get_clock_by_type_with_voltage,
4384 .set_watermarks_for_clocks_ranges = vega20_set_watermarks_for_clocks_ranges,
4385 .display_clock_voltage_request = vega20_display_clock_voltage_request,
4386 .get_performance_level = vega20_get_performance_level,
4387 /* UMD pstate, profile related */
4388 .force_dpm_level = vega20_dpm_force_dpm_level,
4389 .get_power_profile_mode = vega20_get_power_profile_mode,
4390 .set_power_profile_mode = vega20_set_power_profile_mode,
4391 /* od related */
4392 .set_power_limit = vega20_set_power_limit,
4393 .get_sclk_od = vega20_get_sclk_od,
4394 .set_sclk_od = vega20_set_sclk_od,
4395 .get_mclk_od = vega20_get_mclk_od,
4396 .set_mclk_od = vega20_set_mclk_od,
4397 .odn_edit_dpm_table = vega20_odn_edit_dpm_table,
4398 /* for sysfs to retrive/set gfxclk/memclk */
4399 .force_clock_level = vega20_force_clock_level,
4400 .print_clock_levels = vega20_print_clock_levels,
4401 .read_sensor = vega20_read_sensor,
4402 .get_ppfeature_status = vega20_get_ppfeature_status,
4403 .set_ppfeature_status = vega20_set_ppfeature_status,
4404 /* powergate related */
4405 .powergate_uvd = vega20_power_gate_uvd,
4406 .powergate_vce = vega20_power_gate_vce,
4407 /* thermal related */
4408 .start_thermal_controller = vega20_start_thermal_controller,
4409 .stop_thermal_controller = vega20_thermal_stop_thermal_controller,
4410 .get_thermal_temperature_range = vega20_get_thermal_temperature_range,
4411 .register_irq_handlers = smu9_register_irq_handlers,
4412 .disable_smc_firmware_ctf = vega20_thermal_disable_alert,
4413 /* fan control related */
4414 .get_fan_speed_percent = vega20_fan_ctrl_get_fan_speed_percent,
4415 .set_fan_speed_percent = vega20_fan_ctrl_set_fan_speed_percent,
4416 .get_fan_speed_info = vega20_fan_ctrl_get_fan_speed_info,
4417 .get_fan_speed_rpm = vega20_fan_ctrl_get_fan_speed_rpm,
4418 .set_fan_speed_rpm = vega20_fan_ctrl_set_fan_speed_rpm,
4419 .get_fan_control_mode = vega20_get_fan_control_mode,
4420 .set_fan_control_mode = vega20_set_fan_control_mode,
4421 /* smu memory related */
4422 .notify_cac_buffer_info = vega20_notify_cac_buffer_info,
4423 .enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost,
4424 /* BACO related */
4425 .get_asic_baco_capability = vega20_baco_get_capability,
4426 .get_asic_baco_state = vega20_baco_get_state,
4427 .set_asic_baco_state = vega20_baco_set_state,
4428 .set_mp1_state = vega20_set_mp1_state,
4429 .smu_i2c_bus_access = vega20_smu_i2c_bus_access,
4430 .set_df_cstate = vega20_set_df_cstate,
4431 .set_xgmi_pstate = vega20_set_xgmi_pstate,
4432 .get_gpu_metrics = vega20_get_gpu_metrics,
4433 };
4434
vega20_hwmgr_init(struct pp_hwmgr * hwmgr)4435 int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
4436 {
4437 hwmgr->hwmgr_func = &vega20_hwmgr_funcs;
4438 hwmgr->pptable_func = &vega20_pptable_funcs;
4439
4440 return 0;
4441 }
4442