1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #define SWSMU_CODE_LAYER_L1
24
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
27 #include <linux/power_supply.h>
28 #include <linux/reboot.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_smu.h"
32 #include "smu_internal.h"
33 #include "atom.h"
34 #include "arcturus_ppt.h"
35 #include "navi10_ppt.h"
36 #include "sienna_cichlid_ppt.h"
37 #include "renoir_ppt.h"
38 #include "vangogh_ppt.h"
39 #include "aldebaran_ppt.h"
40 #include "yellow_carp_ppt.h"
41 #include "cyan_skillfish_ppt.h"
42 #include "smu_v13_0_0_ppt.h"
43 #include "smu_v13_0_4_ppt.h"
44 #include "smu_v13_0_5_ppt.h"
45 #include "smu_v13_0_7_ppt.h"
46 #include "amd_pcie.h"
47
48 /*
49 * DO NOT use these for err/warn/info/debug messages.
50 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
51 * They are more MGPU friendly.
52 */
53 #undef pr_err
54 #undef pr_warn
55 #undef pr_info
56 #undef pr_debug
57
58 static const struct amd_pm_funcs swsmu_pm_funcs;
59 static int smu_force_smuclk_levels(struct smu_context *smu,
60 enum smu_clk_type clk_type,
61 uint32_t mask);
62 static int smu_handle_task(struct smu_context *smu,
63 enum amd_dpm_forced_level level,
64 enum amd_pp_task task_id);
65 static int smu_reset(struct smu_context *smu);
66 static int smu_set_fan_speed_pwm(void *handle, u32 speed);
67 static int smu_set_fan_control_mode(void *handle, u32 value);
68 static int smu_set_power_limit(void *handle, uint32_t limit);
69 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
70 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
71 static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
72
smu_sys_get_pp_feature_mask(void * handle,char * buf)73 static int smu_sys_get_pp_feature_mask(void *handle,
74 char *buf)
75 {
76 struct smu_context *smu = handle;
77
78 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
79 return -EOPNOTSUPP;
80
81 return smu_get_pp_feature_mask(smu, buf);
82 }
83
smu_sys_set_pp_feature_mask(void * handle,uint64_t new_mask)84 static int smu_sys_set_pp_feature_mask(void *handle,
85 uint64_t new_mask)
86 {
87 struct smu_context *smu = handle;
88
89 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
90 return -EOPNOTSUPP;
91
92 return smu_set_pp_feature_mask(smu, new_mask);
93 }
94
smu_set_residency_gfxoff(struct smu_context * smu,bool value)95 int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
96 {
97 if (!smu->ppt_funcs->set_gfx_off_residency)
98 return -EINVAL;
99
100 return smu_set_gfx_off_residency(smu, value);
101 }
102
smu_get_residency_gfxoff(struct smu_context * smu,u32 * value)103 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
104 {
105 if (!smu->ppt_funcs->get_gfx_off_residency)
106 return -EINVAL;
107
108 return smu_get_gfx_off_residency(smu, value);
109 }
110
smu_get_entrycount_gfxoff(struct smu_context * smu,u64 * value)111 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
112 {
113 if (!smu->ppt_funcs->get_gfx_off_entrycount)
114 return -EINVAL;
115
116 return smu_get_gfx_off_entrycount(smu, value);
117 }
118
smu_get_status_gfxoff(struct smu_context * smu,uint32_t * value)119 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
120 {
121 if (!smu->ppt_funcs->get_gfx_off_status)
122 return -EINVAL;
123
124 *value = smu_get_gfx_off_status(smu);
125
126 return 0;
127 }
128
smu_set_soft_freq_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t min,uint32_t max)129 int smu_set_soft_freq_range(struct smu_context *smu,
130 enum smu_clk_type clk_type,
131 uint32_t min,
132 uint32_t max)
133 {
134 int ret = 0;
135
136 if (smu->ppt_funcs->set_soft_freq_limited_range)
137 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
138 clk_type,
139 min,
140 max);
141
142 return ret;
143 }
144
smu_get_dpm_freq_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max)145 int smu_get_dpm_freq_range(struct smu_context *smu,
146 enum smu_clk_type clk_type,
147 uint32_t *min,
148 uint32_t *max)
149 {
150 int ret = -ENOTSUPP;
151
152 if (!min && !max)
153 return -EINVAL;
154
155 if (smu->ppt_funcs->get_dpm_ultimate_freq)
156 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
157 clk_type,
158 min,
159 max);
160
161 return ret;
162 }
163
smu_set_gfx_power_up_by_imu(struct smu_context * smu)164 int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
165 {
166 int ret = 0;
167 struct amdgpu_device *adev = smu->adev;
168
169 if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
170 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
171 if (ret)
172 dev_err(adev->dev, "Failed to enable gfx imu!\n");
173 }
174 return ret;
175 }
176
smu_get_mclk(void * handle,bool low)177 static u32 smu_get_mclk(void *handle, bool low)
178 {
179 struct smu_context *smu = handle;
180 uint32_t clk_freq;
181 int ret = 0;
182
183 ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
184 low ? &clk_freq : NULL,
185 !low ? &clk_freq : NULL);
186 if (ret)
187 return 0;
188 return clk_freq * 100;
189 }
190
smu_get_sclk(void * handle,bool low)191 static u32 smu_get_sclk(void *handle, bool low)
192 {
193 struct smu_context *smu = handle;
194 uint32_t clk_freq;
195 int ret = 0;
196
197 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
198 low ? &clk_freq : NULL,
199 !low ? &clk_freq : NULL);
200 if (ret)
201 return 0;
202 return clk_freq * 100;
203 }
204
smu_set_gfx_imu_enable(struct smu_context * smu)205 static int smu_set_gfx_imu_enable(struct smu_context *smu)
206 {
207 struct amdgpu_device *adev = smu->adev;
208
209 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
210 return 0;
211
212 if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
213 return 0;
214
215 return smu_set_gfx_power_up_by_imu(smu);
216 }
217
smu_dpm_set_vcn_enable(struct smu_context * smu,bool enable)218 static int smu_dpm_set_vcn_enable(struct smu_context *smu,
219 bool enable)
220 {
221 struct smu_power_context *smu_power = &smu->smu_power;
222 struct smu_power_gate *power_gate = &smu_power->power_gate;
223 int ret = 0;
224
225 if (!smu->ppt_funcs->dpm_set_vcn_enable)
226 return 0;
227
228 if (atomic_read(&power_gate->vcn_gated) ^ enable)
229 return 0;
230
231 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
232 if (!ret)
233 atomic_set(&power_gate->vcn_gated, !enable);
234
235 return ret;
236 }
237
smu_dpm_set_jpeg_enable(struct smu_context * smu,bool enable)238 static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
239 bool enable)
240 {
241 struct smu_power_context *smu_power = &smu->smu_power;
242 struct smu_power_gate *power_gate = &smu_power->power_gate;
243 int ret = 0;
244
245 if (!smu->ppt_funcs->dpm_set_jpeg_enable)
246 return 0;
247
248 if (atomic_read(&power_gate->jpeg_gated) ^ enable)
249 return 0;
250
251 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
252 if (!ret)
253 atomic_set(&power_gate->jpeg_gated, !enable);
254
255 return ret;
256 }
257
258 /**
259 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
260 *
261 * @handle: smu_context pointer
262 * @block_type: the IP block to power gate/ungate
263 * @gate: to power gate if true, ungate otherwise
264 *
265 * This API uses no smu->mutex lock protection due to:
266 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
267 * This is guarded to be race condition free by the caller.
268 * 2. Or get called on user setting request of power_dpm_force_performance_level.
269 * Under this case, the smu->mutex lock protection is already enforced on
270 * the parent API smu_force_performance_level of the call path.
271 */
smu_dpm_set_power_gate(void * handle,uint32_t block_type,bool gate)272 static int smu_dpm_set_power_gate(void *handle,
273 uint32_t block_type,
274 bool gate)
275 {
276 struct smu_context *smu = handle;
277 int ret = 0;
278
279 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
280 dev_WARN(smu->adev->dev,
281 "SMU uninitialized but power %s requested for %u!\n",
282 gate ? "gate" : "ungate", block_type);
283 return -EOPNOTSUPP;
284 }
285
286 switch (block_type) {
287 /*
288 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
289 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
290 */
291 case AMD_IP_BLOCK_TYPE_UVD:
292 case AMD_IP_BLOCK_TYPE_VCN:
293 ret = smu_dpm_set_vcn_enable(smu, !gate);
294 if (ret)
295 dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
296 gate ? "gate" : "ungate");
297 break;
298 case AMD_IP_BLOCK_TYPE_GFX:
299 ret = smu_gfx_off_control(smu, gate);
300 if (ret)
301 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
302 gate ? "enable" : "disable");
303 break;
304 case AMD_IP_BLOCK_TYPE_SDMA:
305 ret = smu_powergate_sdma(smu, gate);
306 if (ret)
307 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
308 gate ? "gate" : "ungate");
309 break;
310 case AMD_IP_BLOCK_TYPE_JPEG:
311 ret = smu_dpm_set_jpeg_enable(smu, !gate);
312 if (ret)
313 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
314 gate ? "gate" : "ungate");
315 break;
316 default:
317 dev_err(smu->adev->dev, "Unsupported block type!\n");
318 return -EINVAL;
319 }
320
321 return ret;
322 }
323
324 /**
325 * smu_set_user_clk_dependencies - set user profile clock dependencies
326 *
327 * @smu: smu_context pointer
328 * @clk: enum smu_clk_type type
329 *
330 * Enable/Disable the clock dependency for the @clk type.
331 */
smu_set_user_clk_dependencies(struct smu_context * smu,enum smu_clk_type clk)332 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
333 {
334 if (smu->adev->in_suspend)
335 return;
336
337 if (clk == SMU_MCLK) {
338 smu->user_dpm_profile.clk_dependency = 0;
339 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
340 } else if (clk == SMU_FCLK) {
341 /* MCLK takes precedence over FCLK */
342 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
343 return;
344
345 smu->user_dpm_profile.clk_dependency = 0;
346 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
347 } else if (clk == SMU_SOCCLK) {
348 /* MCLK takes precedence over SOCCLK */
349 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
350 return;
351
352 smu->user_dpm_profile.clk_dependency = 0;
353 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
354 } else
355 /* Add clk dependencies here, if any */
356 return;
357 }
358
359 /**
360 * smu_restore_dpm_user_profile - reinstate user dpm profile
361 *
362 * @smu: smu_context pointer
363 *
364 * Restore the saved user power configurations include power limit,
365 * clock frequencies, fan control mode and fan speed.
366 */
smu_restore_dpm_user_profile(struct smu_context * smu)367 static void smu_restore_dpm_user_profile(struct smu_context *smu)
368 {
369 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
370 int ret = 0;
371
372 if (!smu->adev->in_suspend)
373 return;
374
375 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
376 return;
377
378 /* Enable restore flag */
379 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
380
381 /* set the user dpm power limit */
382 if (smu->user_dpm_profile.power_limit) {
383 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
384 if (ret)
385 dev_err(smu->adev->dev, "Failed to set power limit value\n");
386 }
387
388 /* set the user dpm clock configurations */
389 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
390 enum smu_clk_type clk_type;
391
392 for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
393 /*
394 * Iterate over smu clk type and force the saved user clk
395 * configs, skip if clock dependency is enabled
396 */
397 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
398 smu->user_dpm_profile.clk_mask[clk_type]) {
399 ret = smu_force_smuclk_levels(smu, clk_type,
400 smu->user_dpm_profile.clk_mask[clk_type]);
401 if (ret)
402 dev_err(smu->adev->dev,
403 "Failed to set clock type = %d\n", clk_type);
404 }
405 }
406 }
407
408 /* set the user dpm fan configurations */
409 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
410 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
411 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
412 if (ret != -EOPNOTSUPP) {
413 smu->user_dpm_profile.fan_speed_pwm = 0;
414 smu->user_dpm_profile.fan_speed_rpm = 0;
415 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
416 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
417 }
418
419 if (smu->user_dpm_profile.fan_speed_pwm) {
420 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
421 if (ret != -EOPNOTSUPP)
422 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
423 }
424
425 if (smu->user_dpm_profile.fan_speed_rpm) {
426 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
427 if (ret != -EOPNOTSUPP)
428 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
429 }
430 }
431
432 /* Restore user customized OD settings */
433 if (smu->user_dpm_profile.user_od) {
434 if (smu->ppt_funcs->restore_user_od_settings) {
435 ret = smu->ppt_funcs->restore_user_od_settings(smu);
436 if (ret)
437 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
438 }
439 }
440
441 /* Disable restore flag */
442 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
443 }
444
smu_get_power_num_states(void * handle,struct pp_states_info * state_info)445 static int smu_get_power_num_states(void *handle,
446 struct pp_states_info *state_info)
447 {
448 if (!state_info)
449 return -EINVAL;
450
451 /* not support power state */
452 memset(state_info, 0, sizeof(struct pp_states_info));
453 state_info->nums = 1;
454 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
455
456 return 0;
457 }
458
is_support_sw_smu(struct amdgpu_device * adev)459 bool is_support_sw_smu(struct amdgpu_device *adev)
460 {
461 /* vega20 is 11.0.2, but it's supported via the powerplay code */
462 if (adev->asic_type == CHIP_VEGA20)
463 return false;
464
465 if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0))
466 return true;
467
468 return false;
469 }
470
is_support_cclk_dpm(struct amdgpu_device * adev)471 bool is_support_cclk_dpm(struct amdgpu_device *adev)
472 {
473 struct smu_context *smu = adev->powerplay.pp_handle;
474
475 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
476 return false;
477
478 return true;
479 }
480
481
smu_sys_get_pp_table(void * handle,char ** table)482 static int smu_sys_get_pp_table(void *handle,
483 char **table)
484 {
485 struct smu_context *smu = handle;
486 struct smu_table_context *smu_table = &smu->smu_table;
487
488 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
489 return -EOPNOTSUPP;
490
491 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
492 return -EINVAL;
493
494 if (smu_table->hardcode_pptable)
495 *table = smu_table->hardcode_pptable;
496 else
497 *table = smu_table->power_play_table;
498
499 return smu_table->power_play_table_size;
500 }
501
smu_sys_set_pp_table(void * handle,const char * buf,size_t size)502 static int smu_sys_set_pp_table(void *handle,
503 const char *buf,
504 size_t size)
505 {
506 struct smu_context *smu = handle;
507 struct smu_table_context *smu_table = &smu->smu_table;
508 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
509 int ret = 0;
510
511 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
512 return -EOPNOTSUPP;
513
514 if (header->usStructureSize != size) {
515 dev_err(smu->adev->dev, "pp table size not matched !\n");
516 return -EIO;
517 }
518
519 if (!smu_table->hardcode_pptable) {
520 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
521 if (!smu_table->hardcode_pptable)
522 return -ENOMEM;
523 }
524
525 memcpy(smu_table->hardcode_pptable, buf, size);
526 smu_table->power_play_table = smu_table->hardcode_pptable;
527 smu_table->power_play_table_size = size;
528
529 /*
530 * Special hw_fini action(for Navi1x, the DPMs disablement will be
531 * skipped) may be needed for custom pptable uploading.
532 */
533 smu->uploading_custom_pp_table = true;
534
535 ret = smu_reset(smu);
536 if (ret)
537 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
538
539 smu->uploading_custom_pp_table = false;
540
541 return ret;
542 }
543
smu_get_driver_allowed_feature_mask(struct smu_context * smu)544 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
545 {
546 struct smu_feature *feature = &smu->smu_feature;
547 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
548 int ret = 0;
549
550 /*
551 * With SCPM enabled, the allowed featuremasks setting(via
552 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted.
553 * That means there is no way to let PMFW knows the settings below.
554 * Thus, we just assume all the features are allowed under
555 * such scenario.
556 */
557 if (smu->adev->scpm_enabled) {
558 bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
559 return 0;
560 }
561
562 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
563
564 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
565 SMU_FEATURE_MAX/32);
566 if (ret)
567 return ret;
568
569 bitmap_or(feature->allowed, feature->allowed,
570 (unsigned long *)allowed_feature_mask,
571 feature->feature_num);
572
573 return ret;
574 }
575
smu_set_funcs(struct amdgpu_device * adev)576 static int smu_set_funcs(struct amdgpu_device *adev)
577 {
578 struct smu_context *smu = adev->powerplay.pp_handle;
579
580 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
581 smu->od_enabled = true;
582
583 switch (adev->ip_versions[MP1_HWIP][0]) {
584 case IP_VERSION(11, 0, 0):
585 case IP_VERSION(11, 0, 5):
586 case IP_VERSION(11, 0, 9):
587 navi10_set_ppt_funcs(smu);
588 break;
589 case IP_VERSION(11, 0, 7):
590 case IP_VERSION(11, 0, 11):
591 case IP_VERSION(11, 0, 12):
592 case IP_VERSION(11, 0, 13):
593 sienna_cichlid_set_ppt_funcs(smu);
594 break;
595 case IP_VERSION(12, 0, 0):
596 case IP_VERSION(12, 0, 1):
597 renoir_set_ppt_funcs(smu);
598 break;
599 case IP_VERSION(11, 5, 0):
600 vangogh_set_ppt_funcs(smu);
601 break;
602 case IP_VERSION(13, 0, 1):
603 case IP_VERSION(13, 0, 3):
604 case IP_VERSION(13, 0, 8):
605 yellow_carp_set_ppt_funcs(smu);
606 break;
607 case IP_VERSION(13, 0, 4):
608 case IP_VERSION(13, 0, 11):
609 smu_v13_0_4_set_ppt_funcs(smu);
610 break;
611 case IP_VERSION(13, 0, 5):
612 smu_v13_0_5_set_ppt_funcs(smu);
613 break;
614 case IP_VERSION(11, 0, 8):
615 cyan_skillfish_set_ppt_funcs(smu);
616 break;
617 case IP_VERSION(11, 0, 2):
618 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
619 arcturus_set_ppt_funcs(smu);
620 /* OD is not supported on Arcturus */
621 smu->od_enabled =false;
622 break;
623 case IP_VERSION(13, 0, 2):
624 aldebaran_set_ppt_funcs(smu);
625 /* Enable pp_od_clk_voltage node */
626 smu->od_enabled = true;
627 break;
628 case IP_VERSION(13, 0, 0):
629 case IP_VERSION(13, 0, 10):
630 smu_v13_0_0_set_ppt_funcs(smu);
631 break;
632 case IP_VERSION(13, 0, 7):
633 smu_v13_0_7_set_ppt_funcs(smu);
634 break;
635 default:
636 return -EINVAL;
637 }
638
639 return 0;
640 }
641
smu_early_init(void * handle)642 static int smu_early_init(void *handle)
643 {
644 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
645 struct smu_context *smu;
646
647 smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
648 if (!smu)
649 return -ENOMEM;
650
651 smu->adev = adev;
652 smu->pm_enabled = !!amdgpu_dpm;
653 smu->is_apu = false;
654 smu->smu_baco.state = SMU_BACO_STATE_EXIT;
655 smu->smu_baco.platform_support = false;
656 smu->user_dpm_profile.fan_mode = -1;
657
658 mutex_init(&smu->message_lock);
659
660 adev->powerplay.pp_handle = smu;
661 adev->powerplay.pp_funcs = &swsmu_pm_funcs;
662
663 return smu_set_funcs(adev);
664 }
665
smu_set_default_dpm_table(struct smu_context * smu)666 static int smu_set_default_dpm_table(struct smu_context *smu)
667 {
668 struct smu_power_context *smu_power = &smu->smu_power;
669 struct smu_power_gate *power_gate = &smu_power->power_gate;
670 int vcn_gate, jpeg_gate;
671 int ret = 0;
672
673 if (!smu->ppt_funcs->set_default_dpm_table)
674 return 0;
675
676 vcn_gate = atomic_read(&power_gate->vcn_gated);
677 jpeg_gate = atomic_read(&power_gate->jpeg_gated);
678
679 ret = smu_dpm_set_vcn_enable(smu, true);
680 if (ret)
681 return ret;
682
683 ret = smu_dpm_set_jpeg_enable(smu, true);
684 if (ret)
685 goto err_out;
686
687 ret = smu->ppt_funcs->set_default_dpm_table(smu);
688 if (ret)
689 dev_err(smu->adev->dev,
690 "Failed to setup default dpm clock tables!\n");
691
692 smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
693 err_out:
694 smu_dpm_set_vcn_enable(smu, !vcn_gate);
695 return ret;
696 }
697
smu_apply_default_config_table_settings(struct smu_context * smu)698 static int smu_apply_default_config_table_settings(struct smu_context *smu)
699 {
700 struct amdgpu_device *adev = smu->adev;
701 int ret = 0;
702
703 ret = smu_get_default_config_table_settings(smu,
704 &adev->pm.config_table);
705 if (ret)
706 return ret;
707
708 return smu_set_config_table(smu, &adev->pm.config_table);
709 }
710
smu_late_init(void * handle)711 static int smu_late_init(void *handle)
712 {
713 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
714 struct smu_context *smu = adev->powerplay.pp_handle;
715 int ret = 0;
716
717 smu_set_fine_grain_gfx_freq_parameters(smu);
718
719 if (!smu->pm_enabled)
720 return 0;
721
722 ret = smu_post_init(smu);
723 if (ret) {
724 dev_err(adev->dev, "Failed to post smu init!\n");
725 return ret;
726 }
727
728 /*
729 * Explicitly notify PMFW the power mode the system in. Since
730 * the PMFW may boot the ASIC with a different mode.
731 * For those supporting ACDC switch via gpio, PMFW will
732 * handle the switch automatically. Driver involvement
733 * is unnecessary.
734 */
735 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
736 smu_set_ac_dc(smu);
737
738 if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) ||
739 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3)))
740 return 0;
741
742 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
743 ret = smu_set_default_od_settings(smu);
744 if (ret) {
745 dev_err(adev->dev, "Failed to setup default OD settings!\n");
746 return ret;
747 }
748 }
749
750 ret = smu_populate_umd_state_clk(smu);
751 if (ret) {
752 dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
753 return ret;
754 }
755
756 ret = smu_get_asic_power_limits(smu,
757 &smu->current_power_limit,
758 &smu->default_power_limit,
759 &smu->max_power_limit);
760 if (ret) {
761 dev_err(adev->dev, "Failed to get asic power limits!\n");
762 return ret;
763 }
764
765 if (!amdgpu_sriov_vf(adev))
766 smu_get_unique_id(smu);
767
768 smu_get_fan_parameters(smu);
769
770 smu_handle_task(smu,
771 smu->smu_dpm.dpm_level,
772 AMD_PP_TASK_COMPLETE_INIT);
773
774 ret = smu_apply_default_config_table_settings(smu);
775 if (ret && (ret != -EOPNOTSUPP)) {
776 dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
777 return ret;
778 }
779
780 smu_restore_dpm_user_profile(smu);
781
782 return 0;
783 }
784
smu_init_fb_allocations(struct smu_context * smu)785 static int smu_init_fb_allocations(struct smu_context *smu)
786 {
787 struct amdgpu_device *adev = smu->adev;
788 struct smu_table_context *smu_table = &smu->smu_table;
789 struct smu_table *tables = smu_table->tables;
790 struct smu_table *driver_table = &(smu_table->driver_table);
791 uint32_t max_table_size = 0;
792 int ret, i;
793
794 /* VRAM allocation for tool table */
795 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
796 ret = amdgpu_bo_create_kernel(adev,
797 tables[SMU_TABLE_PMSTATUSLOG].size,
798 tables[SMU_TABLE_PMSTATUSLOG].align,
799 tables[SMU_TABLE_PMSTATUSLOG].domain,
800 &tables[SMU_TABLE_PMSTATUSLOG].bo,
801 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
802 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
803 if (ret) {
804 dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
805 return ret;
806 }
807 }
808
809 /* VRAM allocation for driver table */
810 for (i = 0; i < SMU_TABLE_COUNT; i++) {
811 if (tables[i].size == 0)
812 continue;
813
814 if (i == SMU_TABLE_PMSTATUSLOG)
815 continue;
816
817 if (max_table_size < tables[i].size)
818 max_table_size = tables[i].size;
819 }
820
821 driver_table->size = max_table_size;
822 driver_table->align = PAGE_SIZE;
823 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
824
825 ret = amdgpu_bo_create_kernel(adev,
826 driver_table->size,
827 driver_table->align,
828 driver_table->domain,
829 &driver_table->bo,
830 &driver_table->mc_address,
831 &driver_table->cpu_addr);
832 if (ret) {
833 dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
834 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
835 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
836 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
837 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
838 }
839
840 return ret;
841 }
842
smu_fini_fb_allocations(struct smu_context * smu)843 static int smu_fini_fb_allocations(struct smu_context *smu)
844 {
845 struct smu_table_context *smu_table = &smu->smu_table;
846 struct smu_table *tables = smu_table->tables;
847 struct smu_table *driver_table = &(smu_table->driver_table);
848
849 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
850 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
851 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
852 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
853
854 amdgpu_bo_free_kernel(&driver_table->bo,
855 &driver_table->mc_address,
856 &driver_table->cpu_addr);
857
858 return 0;
859 }
860
861 /**
862 * smu_alloc_memory_pool - allocate memory pool in the system memory
863 *
864 * @smu: amdgpu_device pointer
865 *
866 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
867 * and DramLogSetDramAddr can notify it changed.
868 *
869 * Returns 0 on success, error on failure.
870 */
smu_alloc_memory_pool(struct smu_context * smu)871 static int smu_alloc_memory_pool(struct smu_context *smu)
872 {
873 struct amdgpu_device *adev = smu->adev;
874 struct smu_table_context *smu_table = &smu->smu_table;
875 struct smu_table *memory_pool = &smu_table->memory_pool;
876 uint64_t pool_size = smu->pool_size;
877 int ret = 0;
878
879 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
880 return ret;
881
882 memory_pool->size = pool_size;
883 memory_pool->align = PAGE_SIZE;
884 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
885
886 switch (pool_size) {
887 case SMU_MEMORY_POOL_SIZE_256_MB:
888 case SMU_MEMORY_POOL_SIZE_512_MB:
889 case SMU_MEMORY_POOL_SIZE_1_GB:
890 case SMU_MEMORY_POOL_SIZE_2_GB:
891 ret = amdgpu_bo_create_kernel(adev,
892 memory_pool->size,
893 memory_pool->align,
894 memory_pool->domain,
895 &memory_pool->bo,
896 &memory_pool->mc_address,
897 &memory_pool->cpu_addr);
898 if (ret)
899 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
900 break;
901 default:
902 break;
903 }
904
905 return ret;
906 }
907
smu_free_memory_pool(struct smu_context * smu)908 static int smu_free_memory_pool(struct smu_context *smu)
909 {
910 struct smu_table_context *smu_table = &smu->smu_table;
911 struct smu_table *memory_pool = &smu_table->memory_pool;
912
913 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
914 return 0;
915
916 amdgpu_bo_free_kernel(&memory_pool->bo,
917 &memory_pool->mc_address,
918 &memory_pool->cpu_addr);
919
920 memset(memory_pool, 0, sizeof(struct smu_table));
921
922 return 0;
923 }
924
smu_alloc_dummy_read_table(struct smu_context * smu)925 static int smu_alloc_dummy_read_table(struct smu_context *smu)
926 {
927 struct smu_table_context *smu_table = &smu->smu_table;
928 struct smu_table *dummy_read_1_table =
929 &smu_table->dummy_read_1_table;
930 struct amdgpu_device *adev = smu->adev;
931 int ret = 0;
932
933 dummy_read_1_table->size = 0x40000;
934 dummy_read_1_table->align = PAGE_SIZE;
935 dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
936
937 ret = amdgpu_bo_create_kernel(adev,
938 dummy_read_1_table->size,
939 dummy_read_1_table->align,
940 dummy_read_1_table->domain,
941 &dummy_read_1_table->bo,
942 &dummy_read_1_table->mc_address,
943 &dummy_read_1_table->cpu_addr);
944 if (ret)
945 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
946
947 return ret;
948 }
949
smu_free_dummy_read_table(struct smu_context * smu)950 static void smu_free_dummy_read_table(struct smu_context *smu)
951 {
952 struct smu_table_context *smu_table = &smu->smu_table;
953 struct smu_table *dummy_read_1_table =
954 &smu_table->dummy_read_1_table;
955
956
957 amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
958 &dummy_read_1_table->mc_address,
959 &dummy_read_1_table->cpu_addr);
960
961 memset(dummy_read_1_table, 0, sizeof(struct smu_table));
962 }
963
smu_smc_table_sw_init(struct smu_context * smu)964 static int smu_smc_table_sw_init(struct smu_context *smu)
965 {
966 int ret;
967
968 /**
969 * Create smu_table structure, and init smc tables such as
970 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
971 */
972 ret = smu_init_smc_tables(smu);
973 if (ret) {
974 dev_err(smu->adev->dev, "Failed to init smc tables!\n");
975 return ret;
976 }
977
978 /**
979 * Create smu_power_context structure, and allocate smu_dpm_context and
980 * context size to fill the smu_power_context data.
981 */
982 ret = smu_init_power(smu);
983 if (ret) {
984 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
985 return ret;
986 }
987
988 /*
989 * allocate vram bos to store smc table contents.
990 */
991 ret = smu_init_fb_allocations(smu);
992 if (ret)
993 return ret;
994
995 ret = smu_alloc_memory_pool(smu);
996 if (ret)
997 return ret;
998
999 ret = smu_alloc_dummy_read_table(smu);
1000 if (ret)
1001 return ret;
1002
1003 ret = smu_i2c_init(smu);
1004 if (ret)
1005 return ret;
1006
1007 return 0;
1008 }
1009
smu_smc_table_sw_fini(struct smu_context * smu)1010 static int smu_smc_table_sw_fini(struct smu_context *smu)
1011 {
1012 int ret;
1013
1014 smu_i2c_fini(smu);
1015
1016 smu_free_dummy_read_table(smu);
1017
1018 ret = smu_free_memory_pool(smu);
1019 if (ret)
1020 return ret;
1021
1022 ret = smu_fini_fb_allocations(smu);
1023 if (ret)
1024 return ret;
1025
1026 ret = smu_fini_power(smu);
1027 if (ret) {
1028 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
1029 return ret;
1030 }
1031
1032 ret = smu_fini_smc_tables(smu);
1033 if (ret) {
1034 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
1035 return ret;
1036 }
1037
1038 return 0;
1039 }
1040
smu_throttling_logging_work_fn(struct work_struct * work)1041 static void smu_throttling_logging_work_fn(struct work_struct *work)
1042 {
1043 struct smu_context *smu = container_of(work, struct smu_context,
1044 throttling_logging_work);
1045
1046 smu_log_thermal_throttling(smu);
1047 }
1048
smu_interrupt_work_fn(struct work_struct * work)1049 static void smu_interrupt_work_fn(struct work_struct *work)
1050 {
1051 struct smu_context *smu = container_of(work, struct smu_context,
1052 interrupt_work);
1053
1054 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
1055 smu->ppt_funcs->interrupt_work(smu);
1056 }
1057
smu_swctf_delayed_work_handler(struct work_struct * work)1058 static void smu_swctf_delayed_work_handler(struct work_struct *work)
1059 {
1060 struct smu_context *smu =
1061 container_of(work, struct smu_context, swctf_delayed_work.work);
1062 struct smu_temperature_range *range =
1063 &smu->thermal_range;
1064 struct amdgpu_device *adev = smu->adev;
1065 uint32_t hotspot_tmp, size;
1066
1067 /*
1068 * If the hotspot temperature is confirmed as below SW CTF setting point
1069 * after the delay enforced, nothing will be done.
1070 * Otherwise, a graceful shutdown will be performed to prevent further damage.
1071 */
1072 if (range->software_shutdown_temp &&
1073 smu->ppt_funcs->read_sensor &&
1074 !smu->ppt_funcs->read_sensor(smu,
1075 AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
1076 &hotspot_tmp,
1077 &size) &&
1078 hotspot_tmp / 1000 < range->software_shutdown_temp)
1079 return;
1080
1081 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
1082 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
1083 orderly_poweroff(true);
1084 }
1085
smu_sw_init(void * handle)1086 static int smu_sw_init(void *handle)
1087 {
1088 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1089 struct smu_context *smu = adev->powerplay.pp_handle;
1090 int ret;
1091
1092 smu->pool_size = adev->pm.smu_prv_buffer_size;
1093 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1094 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1095 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1096
1097 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1098 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
1099 atomic64_set(&smu->throttle_int_counter, 0);
1100 smu->watermarks_bitmap = 0;
1101 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1102 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1103
1104 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
1105 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1106
1107 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
1108 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
1109 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
1110 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
1111 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
1112 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
1113 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
1114 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
1115
1116 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1117 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1118 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
1119 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
1120 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
1121 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
1122 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
1123 smu->display_config = &adev->pm.pm_display_cfg;
1124
1125 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1126 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1127
1128 ret = smu_init_microcode(smu);
1129 if (ret) {
1130 dev_err(adev->dev, "Failed to load smu firmware!\n");
1131 return ret;
1132 }
1133
1134 INIT_DELAYED_WORK(&smu->swctf_delayed_work,
1135 smu_swctf_delayed_work_handler);
1136
1137 ret = smu_smc_table_sw_init(smu);
1138 if (ret) {
1139 dev_err(adev->dev, "Failed to sw init smc table!\n");
1140 return ret;
1141 }
1142
1143 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1144 ret = smu_get_vbios_bootup_values(smu);
1145 if (ret) {
1146 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1147 return ret;
1148 }
1149
1150 ret = smu_init_pptable_microcode(smu);
1151 if (ret) {
1152 dev_err(adev->dev, "Failed to setup pptable firmware!\n");
1153 return ret;
1154 }
1155
1156 ret = smu_register_irq_handler(smu);
1157 if (ret) {
1158 dev_err(adev->dev, "Failed to register smc irq handler!\n");
1159 return ret;
1160 }
1161
1162 /* If there is no way to query fan control mode, fan control is not supported */
1163 if (!smu->ppt_funcs->get_fan_control_mode)
1164 smu->adev->pm.no_fan = true;
1165
1166 return 0;
1167 }
1168
smu_sw_fini(void * handle)1169 static int smu_sw_fini(void *handle)
1170 {
1171 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1172 struct smu_context *smu = adev->powerplay.pp_handle;
1173 int ret;
1174
1175 ret = smu_smc_table_sw_fini(smu);
1176 if (ret) {
1177 dev_err(adev->dev, "Failed to sw fini smc table!\n");
1178 return ret;
1179 }
1180
1181 smu_fini_microcode(smu);
1182
1183 return 0;
1184 }
1185
smu_get_thermal_temperature_range(struct smu_context * smu)1186 static int smu_get_thermal_temperature_range(struct smu_context *smu)
1187 {
1188 struct amdgpu_device *adev = smu->adev;
1189 struct smu_temperature_range *range =
1190 &smu->thermal_range;
1191 int ret = 0;
1192
1193 if (!smu->ppt_funcs->get_thermal_temperature_range)
1194 return 0;
1195
1196 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1197 if (ret)
1198 return ret;
1199
1200 adev->pm.dpm.thermal.min_temp = range->min;
1201 adev->pm.dpm.thermal.max_temp = range->max;
1202 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
1203 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
1204 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
1205 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
1206 adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
1207 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
1208 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
1209
1210 return ret;
1211 }
1212
smu_smc_hw_setup(struct smu_context * smu)1213 static int smu_smc_hw_setup(struct smu_context *smu)
1214 {
1215 struct smu_feature *feature = &smu->smu_feature;
1216 struct amdgpu_device *adev = smu->adev;
1217 uint8_t pcie_gen = 0, pcie_width = 0;
1218 uint64_t features_supported;
1219 int ret = 0;
1220
1221 switch (adev->ip_versions[MP1_HWIP][0]) {
1222 case IP_VERSION(11, 0, 7):
1223 case IP_VERSION(11, 0, 11):
1224 case IP_VERSION(11, 5, 0):
1225 case IP_VERSION(11, 0, 12):
1226 if (adev->in_suspend && smu_is_dpm_running(smu)) {
1227 dev_info(adev->dev, "dpm has been enabled\n");
1228 ret = smu_system_features_control(smu, true);
1229 if (ret)
1230 dev_err(adev->dev, "Failed system features control!\n");
1231 return ret;
1232 }
1233 break;
1234 default:
1235 break;
1236 }
1237
1238 ret = smu_init_display_count(smu, 0);
1239 if (ret) {
1240 dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1241 return ret;
1242 }
1243
1244 ret = smu_set_driver_table_location(smu);
1245 if (ret) {
1246 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1247 return ret;
1248 }
1249
1250 /*
1251 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1252 */
1253 ret = smu_set_tool_table_location(smu);
1254 if (ret) {
1255 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1256 return ret;
1257 }
1258
1259 /*
1260 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1261 * pool location.
1262 */
1263 ret = smu_notify_memory_pool_location(smu);
1264 if (ret) {
1265 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1266 return ret;
1267 }
1268
1269 ret = smu_setup_pptable(smu);
1270 if (ret) {
1271 dev_err(adev->dev, "Failed to setup pptable!\n");
1272 return ret;
1273 }
1274
1275 /* smu_dump_pptable(smu); */
1276
1277 /*
1278 * With SCPM enabled, PSP is responsible for the PPTable transferring
1279 * (to SMU). Driver involvement is not needed and permitted.
1280 */
1281 if (!adev->scpm_enabled) {
1282 /*
1283 * Copy pptable bo in the vram to smc with SMU MSGs such as
1284 * SetDriverDramAddr and TransferTableDram2Smu.
1285 */
1286 ret = smu_write_pptable(smu);
1287 if (ret) {
1288 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1289 return ret;
1290 }
1291 }
1292
1293 /* issue Run*Btc msg */
1294 ret = smu_run_btc(smu);
1295 if (ret)
1296 return ret;
1297
1298 /*
1299 * With SCPM enabled, these actions(and relevant messages) are
1300 * not needed and permitted.
1301 */
1302 if (!adev->scpm_enabled) {
1303 ret = smu_feature_set_allowed_mask(smu);
1304 if (ret) {
1305 dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1306 return ret;
1307 }
1308 }
1309
1310 ret = smu_system_features_control(smu, true);
1311 if (ret) {
1312 dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1313 return ret;
1314 }
1315
1316 ret = smu_feature_get_enabled_mask(smu, &features_supported);
1317 if (ret) {
1318 dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
1319 return ret;
1320 }
1321 bitmap_copy(feature->supported,
1322 (unsigned long *)&features_supported,
1323 feature->feature_num);
1324
1325 if (!smu_is_dpm_running(smu))
1326 dev_info(adev->dev, "dpm has been disabled\n");
1327
1328 /*
1329 * Set initialized values (get from vbios) to dpm tables context such as
1330 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1331 * type of clks.
1332 */
1333 ret = smu_set_default_dpm_table(smu);
1334 if (ret) {
1335 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1336 return ret;
1337 }
1338
1339 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1340 pcie_gen = 3;
1341 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1342 pcie_gen = 2;
1343 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1344 pcie_gen = 1;
1345 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1346 pcie_gen = 0;
1347
1348 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1349 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1350 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
1351 */
1352 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1353 pcie_width = 6;
1354 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1355 pcie_width = 5;
1356 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1357 pcie_width = 4;
1358 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1359 pcie_width = 3;
1360 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1361 pcie_width = 2;
1362 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1363 pcie_width = 1;
1364 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1365 if (ret) {
1366 dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1367 return ret;
1368 }
1369
1370 ret = smu_get_thermal_temperature_range(smu);
1371 if (ret) {
1372 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1373 return ret;
1374 }
1375
1376 ret = smu_enable_thermal_alert(smu);
1377 if (ret) {
1378 dev_err(adev->dev, "Failed to enable thermal alert!\n");
1379 return ret;
1380 }
1381
1382 ret = smu_notify_display_change(smu);
1383 if (ret) {
1384 dev_err(adev->dev, "Failed to notify display change!\n");
1385 return ret;
1386 }
1387
1388 /*
1389 * Set min deep sleep dce fclk with bootup value from vbios via
1390 * SetMinDeepSleepDcefclk MSG.
1391 */
1392 ret = smu_set_min_dcef_deep_sleep(smu,
1393 smu->smu_table.boot_values.dcefclk / 100);
1394
1395 return ret;
1396 }
1397
smu_start_smc_engine(struct smu_context * smu)1398 static int smu_start_smc_engine(struct smu_context *smu)
1399 {
1400 struct amdgpu_device *adev = smu->adev;
1401 int ret = 0;
1402
1403 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1404 if (adev->ip_versions[MP1_HWIP][0] < IP_VERSION(11, 0, 0)) {
1405 if (smu->ppt_funcs->load_microcode) {
1406 ret = smu->ppt_funcs->load_microcode(smu);
1407 if (ret)
1408 return ret;
1409 }
1410 }
1411 }
1412
1413 if (smu->ppt_funcs->check_fw_status) {
1414 ret = smu->ppt_funcs->check_fw_status(smu);
1415 if (ret) {
1416 dev_err(adev->dev, "SMC is not ready\n");
1417 return ret;
1418 }
1419 }
1420
1421 /*
1422 * Send msg GetDriverIfVersion to check if the return value is equal
1423 * with DRIVER_IF_VERSION of smc header.
1424 */
1425 ret = smu_check_fw_version(smu);
1426 if (ret)
1427 return ret;
1428
1429 return ret;
1430 }
1431
smu_hw_init(void * handle)1432 static int smu_hw_init(void *handle)
1433 {
1434 int ret;
1435 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1436 struct smu_context *smu = adev->powerplay.pp_handle;
1437
1438 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
1439 smu->pm_enabled = false;
1440 return 0;
1441 }
1442
1443 ret = smu_start_smc_engine(smu);
1444 if (ret) {
1445 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1446 return ret;
1447 }
1448
1449 if (smu->is_apu) {
1450 ret = smu_set_gfx_imu_enable(smu);
1451 if (ret)
1452 return ret;
1453 smu_dpm_set_vcn_enable(smu, true);
1454 smu_dpm_set_jpeg_enable(smu, true);
1455 smu_set_gfx_cgpg(smu, true);
1456 }
1457
1458 if (!smu->pm_enabled)
1459 return 0;
1460
1461 ret = smu_get_driver_allowed_feature_mask(smu);
1462 if (ret)
1463 return ret;
1464
1465 ret = smu_smc_hw_setup(smu);
1466 if (ret) {
1467 dev_err(adev->dev, "Failed to setup smc hw!\n");
1468 return ret;
1469 }
1470
1471 /*
1472 * Move maximum sustainable clock retrieving here considering
1473 * 1. It is not needed on resume(from S3).
1474 * 2. DAL settings come between .hw_init and .late_init of SMU.
1475 * And DAL needs to know the maximum sustainable clocks. Thus
1476 * it cannot be put in .late_init().
1477 */
1478 ret = smu_init_max_sustainable_clocks(smu);
1479 if (ret) {
1480 dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1481 return ret;
1482 }
1483
1484 adev->pm.dpm_enabled = true;
1485
1486 dev_info(adev->dev, "SMU is initialized successfully!\n");
1487
1488 return 0;
1489 }
1490
smu_disable_dpms(struct smu_context * smu)1491 static int smu_disable_dpms(struct smu_context *smu)
1492 {
1493 struct amdgpu_device *adev = smu->adev;
1494 int ret = 0;
1495 bool use_baco = !smu->is_apu &&
1496 ((amdgpu_in_reset(adev) &&
1497 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1498 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
1499
1500 /*
1501 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
1502 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
1503 */
1504 switch (adev->ip_versions[MP1_HWIP][0]) {
1505 case IP_VERSION(13, 0, 0):
1506 case IP_VERSION(13, 0, 7):
1507 return 0;
1508 default:
1509 break;
1510 }
1511
1512 /*
1513 * For custom pptable uploading, skip the DPM features
1514 * disable process on Navi1x ASICs.
1515 * - As the gfx related features are under control of
1516 * RLC on those ASICs. RLC reinitialization will be
1517 * needed to reenable them. That will cost much more
1518 * efforts.
1519 *
1520 * - SMU firmware can handle the DPM reenablement
1521 * properly.
1522 */
1523 if (smu->uploading_custom_pp_table) {
1524 switch (adev->ip_versions[MP1_HWIP][0]) {
1525 case IP_VERSION(11, 0, 0):
1526 case IP_VERSION(11, 0, 5):
1527 case IP_VERSION(11, 0, 9):
1528 case IP_VERSION(11, 0, 7):
1529 case IP_VERSION(11, 0, 11):
1530 case IP_VERSION(11, 5, 0):
1531 case IP_VERSION(11, 0, 12):
1532 case IP_VERSION(11, 0, 13):
1533 return 0;
1534 default:
1535 break;
1536 }
1537 }
1538
1539 /*
1540 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1541 * on BACO in. Driver involvement is unnecessary.
1542 */
1543 if (use_baco) {
1544 switch (adev->ip_versions[MP1_HWIP][0]) {
1545 case IP_VERSION(11, 0, 7):
1546 case IP_VERSION(11, 0, 0):
1547 case IP_VERSION(11, 0, 5):
1548 case IP_VERSION(11, 0, 9):
1549 case IP_VERSION(13, 0, 7):
1550 return 0;
1551 default:
1552 break;
1553 }
1554 }
1555
1556 /*
1557 * For SMU 13.0.4/11, PMFW will handle the features disablement properly
1558 * for gpu reset and S0i3 cases. Driver involvement is unnecessary.
1559 */
1560 if (amdgpu_in_reset(adev) || adev->in_s0ix) {
1561 switch (adev->ip_versions[MP1_HWIP][0]) {
1562 case IP_VERSION(13, 0, 4):
1563 case IP_VERSION(13, 0, 11):
1564 return 0;
1565 default:
1566 break;
1567 }
1568 }
1569
1570 /*
1571 * For gpu reset, runpm and hibernation through BACO,
1572 * BACO feature has to be kept enabled.
1573 */
1574 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1575 ret = smu_disable_all_features_with_exception(smu,
1576 SMU_FEATURE_BACO_BIT);
1577 if (ret)
1578 dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1579 } else {
1580 /* DisableAllSmuFeatures message is not permitted with SCPM enabled */
1581 if (!adev->scpm_enabled) {
1582 ret = smu_system_features_control(smu, false);
1583 if (ret)
1584 dev_err(adev->dev, "Failed to disable smu features.\n");
1585 }
1586 }
1587
1588 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) &&
1589 adev->gfx.rlc.funcs->stop)
1590 adev->gfx.rlc.funcs->stop(adev);
1591
1592 return ret;
1593 }
1594
smu_smc_hw_cleanup(struct smu_context * smu)1595 static int smu_smc_hw_cleanup(struct smu_context *smu)
1596 {
1597 struct amdgpu_device *adev = smu->adev;
1598 int ret = 0;
1599
1600 cancel_work_sync(&smu->throttling_logging_work);
1601 cancel_work_sync(&smu->interrupt_work);
1602
1603 ret = smu_disable_thermal_alert(smu);
1604 if (ret) {
1605 dev_err(adev->dev, "Fail to disable thermal alert!\n");
1606 return ret;
1607 }
1608
1609 cancel_delayed_work_sync(&smu->swctf_delayed_work);
1610
1611 ret = smu_disable_dpms(smu);
1612 if (ret) {
1613 dev_err(adev->dev, "Fail to disable dpm features!\n");
1614 return ret;
1615 }
1616
1617 return 0;
1618 }
1619
smu_hw_fini(void * handle)1620 static int smu_hw_fini(void *handle)
1621 {
1622 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1623 struct smu_context *smu = adev->powerplay.pp_handle;
1624
1625 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1626 return 0;
1627
1628 smu_dpm_set_vcn_enable(smu, false);
1629 smu_dpm_set_jpeg_enable(smu, false);
1630
1631 adev->vcn.cur_state = AMD_PG_STATE_GATE;
1632 adev->jpeg.cur_state = AMD_PG_STATE_GATE;
1633
1634 if (!smu->pm_enabled)
1635 return 0;
1636
1637 adev->pm.dpm_enabled = false;
1638
1639 return smu_smc_hw_cleanup(smu);
1640 }
1641
smu_late_fini(void * handle)1642 static void smu_late_fini(void *handle)
1643 {
1644 struct amdgpu_device *adev = handle;
1645 struct smu_context *smu = adev->powerplay.pp_handle;
1646
1647 kfree(smu);
1648 }
1649
smu_reset(struct smu_context * smu)1650 static int smu_reset(struct smu_context *smu)
1651 {
1652 struct amdgpu_device *adev = smu->adev;
1653 int ret;
1654
1655 ret = smu_hw_fini(adev);
1656 if (ret)
1657 return ret;
1658
1659 ret = smu_hw_init(adev);
1660 if (ret)
1661 return ret;
1662
1663 ret = smu_late_init(adev);
1664 if (ret)
1665 return ret;
1666
1667 return 0;
1668 }
1669
smu_suspend(void * handle)1670 static int smu_suspend(void *handle)
1671 {
1672 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1673 struct smu_context *smu = adev->powerplay.pp_handle;
1674 int ret;
1675 uint64_t count;
1676
1677 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1678 return 0;
1679
1680 if (!smu->pm_enabled)
1681 return 0;
1682
1683 adev->pm.dpm_enabled = false;
1684
1685 ret = smu_smc_hw_cleanup(smu);
1686 if (ret)
1687 return ret;
1688
1689 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1690
1691 smu_set_gfx_cgpg(smu, false);
1692
1693 /*
1694 * pwfw resets entrycount when device is suspended, so we save the
1695 * last value to be used when we resume to keep it consistent
1696 */
1697 ret = smu_get_entrycount_gfxoff(smu, &count);
1698 if (!ret)
1699 adev->gfx.gfx_off_entrycount = count;
1700
1701 return 0;
1702 }
1703
smu_resume(void * handle)1704 static int smu_resume(void *handle)
1705 {
1706 int ret;
1707 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1708 struct smu_context *smu = adev->powerplay.pp_handle;
1709
1710 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1711 return 0;
1712
1713 if (!smu->pm_enabled)
1714 return 0;
1715
1716 dev_info(adev->dev, "SMU is resuming...\n");
1717
1718 ret = smu_start_smc_engine(smu);
1719 if (ret) {
1720 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1721 return ret;
1722 }
1723
1724 ret = smu_smc_hw_setup(smu);
1725 if (ret) {
1726 dev_err(adev->dev, "Failed to setup smc hw!\n");
1727 return ret;
1728 }
1729
1730 ret = smu_set_gfx_imu_enable(smu);
1731 if (ret)
1732 return ret;
1733
1734 smu_set_gfx_cgpg(smu, true);
1735
1736 smu->disable_uclk_switch = 0;
1737
1738 adev->pm.dpm_enabled = true;
1739
1740 dev_info(adev->dev, "SMU is resumed successfully!\n");
1741
1742 return 0;
1743 }
1744
smu_display_configuration_change(void * handle,const struct amd_pp_display_configuration * display_config)1745 static int smu_display_configuration_change(void *handle,
1746 const struct amd_pp_display_configuration *display_config)
1747 {
1748 struct smu_context *smu = handle;
1749 int index = 0;
1750 int num_of_active_display = 0;
1751
1752 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1753 return -EOPNOTSUPP;
1754
1755 if (!display_config)
1756 return -EINVAL;
1757
1758 smu_set_min_dcef_deep_sleep(smu,
1759 display_config->min_dcef_deep_sleep_set_clk / 100);
1760
1761 for (index = 0; index < display_config->num_path_including_non_display; index++) {
1762 if (display_config->displays[index].controller_id != 0)
1763 num_of_active_display++;
1764 }
1765
1766 return 0;
1767 }
1768
smu_set_clockgating_state(void * handle,enum amd_clockgating_state state)1769 static int smu_set_clockgating_state(void *handle,
1770 enum amd_clockgating_state state)
1771 {
1772 return 0;
1773 }
1774
smu_set_powergating_state(void * handle,enum amd_powergating_state state)1775 static int smu_set_powergating_state(void *handle,
1776 enum amd_powergating_state state)
1777 {
1778 return 0;
1779 }
1780
smu_enable_umd_pstate(void * handle,enum amd_dpm_forced_level * level)1781 static int smu_enable_umd_pstate(void *handle,
1782 enum amd_dpm_forced_level *level)
1783 {
1784 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1785 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1786 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1787 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1788
1789 struct smu_context *smu = (struct smu_context*)(handle);
1790 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1791
1792 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1793 return -EINVAL;
1794
1795 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1796 /* enter umd pstate, save current level, disable gfx cg*/
1797 if (*level & profile_mode_mask) {
1798 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1799 smu_gpo_control(smu, false);
1800 smu_gfx_ulv_control(smu, false);
1801 smu_deep_sleep_control(smu, false);
1802 amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
1803 }
1804 } else {
1805 /* exit umd pstate, restore level, enable gfx cg*/
1806 if (!(*level & profile_mode_mask)) {
1807 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1808 *level = smu_dpm_ctx->saved_dpm_level;
1809 amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
1810 smu_deep_sleep_control(smu, true);
1811 smu_gfx_ulv_control(smu, true);
1812 smu_gpo_control(smu, true);
1813 }
1814 }
1815
1816 return 0;
1817 }
1818
smu_bump_power_profile_mode(struct smu_context * smu,long * param,uint32_t param_size)1819 static int smu_bump_power_profile_mode(struct smu_context *smu,
1820 long *param,
1821 uint32_t param_size)
1822 {
1823 int ret = 0;
1824
1825 if (smu->ppt_funcs->set_power_profile_mode)
1826 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
1827
1828 return ret;
1829 }
1830
smu_adjust_power_state_dynamic(struct smu_context * smu,enum amd_dpm_forced_level level,bool skip_display_settings)1831 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
1832 enum amd_dpm_forced_level level,
1833 bool skip_display_settings)
1834 {
1835 int ret = 0;
1836 int index = 0;
1837 long workload;
1838 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1839
1840 if (!skip_display_settings) {
1841 ret = smu_display_config_changed(smu);
1842 if (ret) {
1843 dev_err(smu->adev->dev, "Failed to change display config!");
1844 return ret;
1845 }
1846 }
1847
1848 ret = smu_apply_clocks_adjust_rules(smu);
1849 if (ret) {
1850 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
1851 return ret;
1852 }
1853
1854 if (!skip_display_settings) {
1855 ret = smu_notify_smc_display_config(smu);
1856 if (ret) {
1857 dev_err(smu->adev->dev, "Failed to notify smc display config!");
1858 return ret;
1859 }
1860 }
1861
1862 if (smu_dpm_ctx->dpm_level != level) {
1863 ret = smu_asic_set_performance_level(smu, level);
1864 if (ret) {
1865 dev_err(smu->adev->dev, "Failed to set performance level!");
1866 return ret;
1867 }
1868
1869 /* update the saved copy */
1870 smu_dpm_ctx->dpm_level = level;
1871 }
1872
1873 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1874 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
1875 index = fls(smu->workload_mask);
1876 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1877 workload = smu->workload_setting[index];
1878
1879 if (smu->power_profile_mode != workload)
1880 smu_bump_power_profile_mode(smu, &workload, 0);
1881 }
1882
1883 return ret;
1884 }
1885
smu_handle_task(struct smu_context * smu,enum amd_dpm_forced_level level,enum amd_pp_task task_id)1886 static int smu_handle_task(struct smu_context *smu,
1887 enum amd_dpm_forced_level level,
1888 enum amd_pp_task task_id)
1889 {
1890 int ret = 0;
1891
1892 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1893 return -EOPNOTSUPP;
1894
1895 switch (task_id) {
1896 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1897 ret = smu_pre_display_config_changed(smu);
1898 if (ret)
1899 return ret;
1900 ret = smu_adjust_power_state_dynamic(smu, level, false);
1901 break;
1902 case AMD_PP_TASK_COMPLETE_INIT:
1903 case AMD_PP_TASK_READJUST_POWER_STATE:
1904 ret = smu_adjust_power_state_dynamic(smu, level, true);
1905 break;
1906 default:
1907 break;
1908 }
1909
1910 return ret;
1911 }
1912
smu_handle_dpm_task(void * handle,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)1913 static int smu_handle_dpm_task(void *handle,
1914 enum amd_pp_task task_id,
1915 enum amd_pm_state_type *user_state)
1916 {
1917 struct smu_context *smu = handle;
1918 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1919
1920 return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
1921
1922 }
1923
smu_switch_power_profile(void * handle,enum PP_SMC_POWER_PROFILE type,bool en)1924 static int smu_switch_power_profile(void *handle,
1925 enum PP_SMC_POWER_PROFILE type,
1926 bool en)
1927 {
1928 struct smu_context *smu = handle;
1929 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1930 long workload;
1931 uint32_t index;
1932
1933 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1934 return -EOPNOTSUPP;
1935
1936 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1937 return -EINVAL;
1938
1939 if (!en) {
1940 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1941 index = fls(smu->workload_mask);
1942 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1943 workload = smu->workload_setting[index];
1944 } else {
1945 smu->workload_mask |= (1 << smu->workload_prority[type]);
1946 index = fls(smu->workload_mask);
1947 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1948 workload = smu->workload_setting[index];
1949 }
1950
1951 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1952 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
1953 smu_bump_power_profile_mode(smu, &workload, 0);
1954
1955 return 0;
1956 }
1957
smu_get_performance_level(void * handle)1958 static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
1959 {
1960 struct smu_context *smu = handle;
1961 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1962
1963 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1964 return -EOPNOTSUPP;
1965
1966 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1967 return -EINVAL;
1968
1969 return smu_dpm_ctx->dpm_level;
1970 }
1971
smu_force_performance_level(void * handle,enum amd_dpm_forced_level level)1972 static int smu_force_performance_level(void *handle,
1973 enum amd_dpm_forced_level level)
1974 {
1975 struct smu_context *smu = handle;
1976 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1977 int ret = 0;
1978
1979 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1980 return -EOPNOTSUPP;
1981
1982 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1983 return -EINVAL;
1984
1985 ret = smu_enable_umd_pstate(smu, &level);
1986 if (ret)
1987 return ret;
1988
1989 ret = smu_handle_task(smu, level,
1990 AMD_PP_TASK_READJUST_POWER_STATE);
1991
1992 /* reset user dpm clock state */
1993 if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1994 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
1995 smu->user_dpm_profile.clk_dependency = 0;
1996 }
1997
1998 return ret;
1999 }
2000
smu_set_display_count(void * handle,uint32_t count)2001 static int smu_set_display_count(void *handle, uint32_t count)
2002 {
2003 struct smu_context *smu = handle;
2004
2005 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2006 return -EOPNOTSUPP;
2007
2008 return smu_init_display_count(smu, count);
2009 }
2010
smu_force_smuclk_levels(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t mask)2011 static int smu_force_smuclk_levels(struct smu_context *smu,
2012 enum smu_clk_type clk_type,
2013 uint32_t mask)
2014 {
2015 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2016 int ret = 0;
2017
2018 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2019 return -EOPNOTSUPP;
2020
2021 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2022 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
2023 return -EINVAL;
2024 }
2025
2026 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
2027 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
2028 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2029 smu->user_dpm_profile.clk_mask[clk_type] = mask;
2030 smu_set_user_clk_dependencies(smu, clk_type);
2031 }
2032 }
2033
2034 return ret;
2035 }
2036
smu_force_ppclk_levels(void * handle,enum pp_clock_type type,uint32_t mask)2037 static int smu_force_ppclk_levels(void *handle,
2038 enum pp_clock_type type,
2039 uint32_t mask)
2040 {
2041 struct smu_context *smu = handle;
2042 enum smu_clk_type clk_type;
2043
2044 switch (type) {
2045 case PP_SCLK:
2046 clk_type = SMU_SCLK; break;
2047 case PP_MCLK:
2048 clk_type = SMU_MCLK; break;
2049 case PP_PCIE:
2050 clk_type = SMU_PCIE; break;
2051 case PP_SOCCLK:
2052 clk_type = SMU_SOCCLK; break;
2053 case PP_FCLK:
2054 clk_type = SMU_FCLK; break;
2055 case PP_DCEFCLK:
2056 clk_type = SMU_DCEFCLK; break;
2057 case PP_VCLK:
2058 clk_type = SMU_VCLK; break;
2059 case PP_DCLK:
2060 clk_type = SMU_DCLK; break;
2061 case OD_SCLK:
2062 clk_type = SMU_OD_SCLK; break;
2063 case OD_MCLK:
2064 clk_type = SMU_OD_MCLK; break;
2065 case OD_VDDC_CURVE:
2066 clk_type = SMU_OD_VDDC_CURVE; break;
2067 case OD_RANGE:
2068 clk_type = SMU_OD_RANGE; break;
2069 default:
2070 return -EINVAL;
2071 }
2072
2073 return smu_force_smuclk_levels(smu, clk_type, mask);
2074 }
2075
2076 /*
2077 * On system suspending or resetting, the dpm_enabled
2078 * flag will be cleared. So that those SMU services which
2079 * are not supported will be gated.
2080 * However, the mp1 state setting should still be granted
2081 * even if the dpm_enabled cleared.
2082 */
smu_set_mp1_state(void * handle,enum pp_mp1_state mp1_state)2083 static int smu_set_mp1_state(void *handle,
2084 enum pp_mp1_state mp1_state)
2085 {
2086 struct smu_context *smu = handle;
2087 int ret = 0;
2088
2089 if (!smu->pm_enabled)
2090 return -EOPNOTSUPP;
2091
2092 if (smu->ppt_funcs &&
2093 smu->ppt_funcs->set_mp1_state)
2094 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
2095
2096 return ret;
2097 }
2098
smu_set_df_cstate(void * handle,enum pp_df_cstate state)2099 static int smu_set_df_cstate(void *handle,
2100 enum pp_df_cstate state)
2101 {
2102 struct smu_context *smu = handle;
2103 int ret = 0;
2104
2105 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2106 return -EOPNOTSUPP;
2107
2108 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2109 return 0;
2110
2111 ret = smu->ppt_funcs->set_df_cstate(smu, state);
2112 if (ret)
2113 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
2114
2115 return ret;
2116 }
2117
smu_allow_xgmi_power_down(struct smu_context * smu,bool en)2118 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
2119 {
2120 int ret = 0;
2121
2122 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2123 return -EOPNOTSUPP;
2124
2125 if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
2126 return 0;
2127
2128 ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
2129 if (ret)
2130 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
2131
2132 return ret;
2133 }
2134
smu_write_watermarks_table(struct smu_context * smu)2135 int smu_write_watermarks_table(struct smu_context *smu)
2136 {
2137 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2138 return -EOPNOTSUPP;
2139
2140 return smu_set_watermarks_table(smu, NULL);
2141 }
2142
smu_set_watermarks_for_clock_ranges(void * handle,struct pp_smu_wm_range_sets * clock_ranges)2143 static int smu_set_watermarks_for_clock_ranges(void *handle,
2144 struct pp_smu_wm_range_sets *clock_ranges)
2145 {
2146 struct smu_context *smu = handle;
2147
2148 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2149 return -EOPNOTSUPP;
2150
2151 if (smu->disable_watermark)
2152 return 0;
2153
2154 return smu_set_watermarks_table(smu, clock_ranges);
2155 }
2156
smu_set_ac_dc(struct smu_context * smu)2157 int smu_set_ac_dc(struct smu_context *smu)
2158 {
2159 int ret = 0;
2160
2161 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2162 return -EOPNOTSUPP;
2163
2164 /* controlled by firmware */
2165 if (smu->dc_controlled_by_gpio)
2166 return 0;
2167
2168 ret = smu_set_power_source(smu,
2169 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2170 SMU_POWER_SOURCE_DC);
2171 if (ret)
2172 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2173 smu->adev->pm.ac_power ? "AC" : "DC");
2174
2175 return ret;
2176 }
2177
2178 const struct amd_ip_funcs smu_ip_funcs = {
2179 .name = "smu",
2180 .early_init = smu_early_init,
2181 .late_init = smu_late_init,
2182 .sw_init = smu_sw_init,
2183 .sw_fini = smu_sw_fini,
2184 .hw_init = smu_hw_init,
2185 .hw_fini = smu_hw_fini,
2186 .late_fini = smu_late_fini,
2187 .suspend = smu_suspend,
2188 .resume = smu_resume,
2189 .is_idle = NULL,
2190 .check_soft_reset = NULL,
2191 .wait_for_idle = NULL,
2192 .soft_reset = NULL,
2193 .set_clockgating_state = smu_set_clockgating_state,
2194 .set_powergating_state = smu_set_powergating_state,
2195 };
2196
2197 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
2198 {
2199 .type = AMD_IP_BLOCK_TYPE_SMC,
2200 .major = 11,
2201 .minor = 0,
2202 .rev = 0,
2203 .funcs = &smu_ip_funcs,
2204 };
2205
2206 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
2207 {
2208 .type = AMD_IP_BLOCK_TYPE_SMC,
2209 .major = 12,
2210 .minor = 0,
2211 .rev = 0,
2212 .funcs = &smu_ip_funcs,
2213 };
2214
2215 const struct amdgpu_ip_block_version smu_v13_0_ip_block =
2216 {
2217 .type = AMD_IP_BLOCK_TYPE_SMC,
2218 .major = 13,
2219 .minor = 0,
2220 .rev = 0,
2221 .funcs = &smu_ip_funcs,
2222 };
2223
smu_load_microcode(void * handle)2224 static int smu_load_microcode(void *handle)
2225 {
2226 struct smu_context *smu = handle;
2227 struct amdgpu_device *adev = smu->adev;
2228 int ret = 0;
2229
2230 if (!smu->pm_enabled)
2231 return -EOPNOTSUPP;
2232
2233 /* This should be used for non PSP loading */
2234 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2235 return 0;
2236
2237 if (smu->ppt_funcs->load_microcode) {
2238 ret = smu->ppt_funcs->load_microcode(smu);
2239 if (ret) {
2240 dev_err(adev->dev, "Load microcode failed\n");
2241 return ret;
2242 }
2243 }
2244
2245 if (smu->ppt_funcs->check_fw_status) {
2246 ret = smu->ppt_funcs->check_fw_status(smu);
2247 if (ret) {
2248 dev_err(adev->dev, "SMC is not ready\n");
2249 return ret;
2250 }
2251 }
2252
2253 return ret;
2254 }
2255
smu_set_gfx_cgpg(struct smu_context * smu,bool enabled)2256 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2257 {
2258 int ret = 0;
2259
2260 if (smu->ppt_funcs->set_gfx_cgpg)
2261 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2262
2263 return ret;
2264 }
2265
smu_set_fan_speed_rpm(void * handle,uint32_t speed)2266 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
2267 {
2268 struct smu_context *smu = handle;
2269 int ret = 0;
2270
2271 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2272 return -EOPNOTSUPP;
2273
2274 if (!smu->ppt_funcs->set_fan_speed_rpm)
2275 return -EOPNOTSUPP;
2276
2277 if (speed == U32_MAX)
2278 return -EINVAL;
2279
2280 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2281 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2282 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
2283 smu->user_dpm_profile.fan_speed_rpm = speed;
2284
2285 /* Override custom PWM setting as they cannot co-exist */
2286 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
2287 smu->user_dpm_profile.fan_speed_pwm = 0;
2288 }
2289
2290 return ret;
2291 }
2292
2293 /**
2294 * smu_get_power_limit - Request one of the SMU Power Limits
2295 *
2296 * @handle: pointer to smu context
2297 * @limit: requested limit is written back to this variable
2298 * @pp_limit_level: &pp_power_limit_level which limit of the power to return
2299 * @pp_power_type: &pp_power_type type of power
2300 * Return: 0 on success, <0 on error
2301 *
2302 */
smu_get_power_limit(void * handle,uint32_t * limit,enum pp_power_limit_level pp_limit_level,enum pp_power_type pp_power_type)2303 int smu_get_power_limit(void *handle,
2304 uint32_t *limit,
2305 enum pp_power_limit_level pp_limit_level,
2306 enum pp_power_type pp_power_type)
2307 {
2308 struct smu_context *smu = handle;
2309 struct amdgpu_device *adev = smu->adev;
2310 enum smu_ppt_limit_level limit_level;
2311 uint32_t limit_type;
2312 int ret = 0;
2313
2314 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2315 return -EOPNOTSUPP;
2316
2317 switch(pp_power_type) {
2318 case PP_PWR_TYPE_SUSTAINED:
2319 limit_type = SMU_DEFAULT_PPT_LIMIT;
2320 break;
2321 case PP_PWR_TYPE_FAST:
2322 limit_type = SMU_FAST_PPT_LIMIT;
2323 break;
2324 default:
2325 return -EOPNOTSUPP;
2326 break;
2327 }
2328
2329 switch(pp_limit_level){
2330 case PP_PWR_LIMIT_CURRENT:
2331 limit_level = SMU_PPT_LIMIT_CURRENT;
2332 break;
2333 case PP_PWR_LIMIT_DEFAULT:
2334 limit_level = SMU_PPT_LIMIT_DEFAULT;
2335 break;
2336 case PP_PWR_LIMIT_MAX:
2337 limit_level = SMU_PPT_LIMIT_MAX;
2338 break;
2339 case PP_PWR_LIMIT_MIN:
2340 default:
2341 return -EOPNOTSUPP;
2342 break;
2343 }
2344
2345 if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
2346 if (smu->ppt_funcs->get_ppt_limit)
2347 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2348 } else {
2349 switch (limit_level) {
2350 case SMU_PPT_LIMIT_CURRENT:
2351 switch (adev->ip_versions[MP1_HWIP][0]) {
2352 case IP_VERSION(13, 0, 2):
2353 case IP_VERSION(11, 0, 7):
2354 case IP_VERSION(11, 0, 11):
2355 case IP_VERSION(11, 0, 12):
2356 case IP_VERSION(11, 0, 13):
2357 ret = smu_get_asic_power_limits(smu,
2358 &smu->current_power_limit,
2359 NULL,
2360 NULL);
2361 break;
2362 default:
2363 break;
2364 }
2365 *limit = smu->current_power_limit;
2366 break;
2367 case SMU_PPT_LIMIT_DEFAULT:
2368 *limit = smu->default_power_limit;
2369 break;
2370 case SMU_PPT_LIMIT_MAX:
2371 *limit = smu->max_power_limit;
2372 break;
2373 default:
2374 break;
2375 }
2376 }
2377
2378 return ret;
2379 }
2380
smu_set_power_limit(void * handle,uint32_t limit)2381 static int smu_set_power_limit(void *handle, uint32_t limit)
2382 {
2383 struct smu_context *smu = handle;
2384 uint32_t limit_type = limit >> 24;
2385 int ret = 0;
2386
2387 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2388 return -EOPNOTSUPP;
2389
2390 limit &= (1<<24)-1;
2391 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2392 if (smu->ppt_funcs->set_power_limit)
2393 return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2394
2395 if (limit > smu->max_power_limit) {
2396 dev_err(smu->adev->dev,
2397 "New power limit (%d) is over the max allowed %d\n",
2398 limit, smu->max_power_limit);
2399 return -EINVAL;
2400 }
2401
2402 if (!limit)
2403 limit = smu->current_power_limit;
2404
2405 if (smu->ppt_funcs->set_power_limit) {
2406 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2407 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2408 smu->user_dpm_profile.power_limit = limit;
2409 }
2410
2411 return ret;
2412 }
2413
smu_print_smuclk_levels(struct smu_context * smu,enum smu_clk_type clk_type,char * buf)2414 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2415 {
2416 int ret = 0;
2417
2418 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2419 return -EOPNOTSUPP;
2420
2421 if (smu->ppt_funcs->print_clk_levels)
2422 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2423
2424 return ret;
2425 }
2426
smu_convert_to_smuclk(enum pp_clock_type type)2427 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
2428 {
2429 enum smu_clk_type clk_type;
2430
2431 switch (type) {
2432 case PP_SCLK:
2433 clk_type = SMU_SCLK; break;
2434 case PP_MCLK:
2435 clk_type = SMU_MCLK; break;
2436 case PP_PCIE:
2437 clk_type = SMU_PCIE; break;
2438 case PP_SOCCLK:
2439 clk_type = SMU_SOCCLK; break;
2440 case PP_FCLK:
2441 clk_type = SMU_FCLK; break;
2442 case PP_DCEFCLK:
2443 clk_type = SMU_DCEFCLK; break;
2444 case PP_VCLK:
2445 clk_type = SMU_VCLK; break;
2446 case PP_DCLK:
2447 clk_type = SMU_DCLK; break;
2448 case OD_SCLK:
2449 clk_type = SMU_OD_SCLK; break;
2450 case OD_MCLK:
2451 clk_type = SMU_OD_MCLK; break;
2452 case OD_VDDC_CURVE:
2453 clk_type = SMU_OD_VDDC_CURVE; break;
2454 case OD_RANGE:
2455 clk_type = SMU_OD_RANGE; break;
2456 case OD_VDDGFX_OFFSET:
2457 clk_type = SMU_OD_VDDGFX_OFFSET; break;
2458 case OD_CCLK:
2459 clk_type = SMU_OD_CCLK; break;
2460 default:
2461 clk_type = SMU_CLK_COUNT; break;
2462 }
2463
2464 return clk_type;
2465 }
2466
smu_print_ppclk_levels(void * handle,enum pp_clock_type type,char * buf)2467 static int smu_print_ppclk_levels(void *handle,
2468 enum pp_clock_type type,
2469 char *buf)
2470 {
2471 struct smu_context *smu = handle;
2472 enum smu_clk_type clk_type;
2473
2474 clk_type = smu_convert_to_smuclk(type);
2475 if (clk_type == SMU_CLK_COUNT)
2476 return -EINVAL;
2477
2478 return smu_print_smuclk_levels(smu, clk_type, buf);
2479 }
2480
smu_emit_ppclk_levels(void * handle,enum pp_clock_type type,char * buf,int * offset)2481 static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
2482 {
2483 struct smu_context *smu = handle;
2484 enum smu_clk_type clk_type;
2485
2486 clk_type = smu_convert_to_smuclk(type);
2487 if (clk_type == SMU_CLK_COUNT)
2488 return -EINVAL;
2489
2490 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2491 return -EOPNOTSUPP;
2492
2493 if (!smu->ppt_funcs->emit_clk_levels)
2494 return -ENOENT;
2495
2496 return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
2497
2498 }
2499
smu_od_edit_dpm_table(void * handle,enum PP_OD_DPM_TABLE_COMMAND type,long * input,uint32_t size)2500 static int smu_od_edit_dpm_table(void *handle,
2501 enum PP_OD_DPM_TABLE_COMMAND type,
2502 long *input, uint32_t size)
2503 {
2504 struct smu_context *smu = handle;
2505 int ret = 0;
2506
2507 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2508 return -EOPNOTSUPP;
2509
2510 if (smu->ppt_funcs->od_edit_dpm_table) {
2511 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2512 }
2513
2514 return ret;
2515 }
2516
smu_read_sensor(void * handle,int sensor,void * data,int * size_arg)2517 static int smu_read_sensor(void *handle,
2518 int sensor,
2519 void *data,
2520 int *size_arg)
2521 {
2522 struct smu_context *smu = handle;
2523 struct smu_umd_pstate_table *pstate_table =
2524 &smu->pstate_table;
2525 int ret = 0;
2526 uint32_t *size, size_val;
2527
2528 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2529 return -EOPNOTSUPP;
2530
2531 if (!data || !size_arg)
2532 return -EINVAL;
2533
2534 size_val = *size_arg;
2535 size = &size_val;
2536
2537 if (smu->ppt_funcs->read_sensor)
2538 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
2539 goto unlock;
2540
2541 switch (sensor) {
2542 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
2543 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
2544 *size = 4;
2545 break;
2546 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
2547 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
2548 *size = 4;
2549 break;
2550 case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
2551 *((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
2552 *size = 4;
2553 break;
2554 case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
2555 *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
2556 *size = 4;
2557 break;
2558 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2559 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
2560 *size = 8;
2561 break;
2562 case AMDGPU_PP_SENSOR_UVD_POWER:
2563 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
2564 *size = 4;
2565 break;
2566 case AMDGPU_PP_SENSOR_VCE_POWER:
2567 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
2568 *size = 4;
2569 break;
2570 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
2571 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1;
2572 *size = 4;
2573 break;
2574 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
2575 *(uint32_t *)data = 0;
2576 *size = 4;
2577 break;
2578 default:
2579 *size = 0;
2580 ret = -EOPNOTSUPP;
2581 break;
2582 }
2583
2584 unlock:
2585 // assign uint32_t to int
2586 *size_arg = size_val;
2587
2588 return ret;
2589 }
2590
smu_get_power_profile_mode(void * handle,char * buf)2591 static int smu_get_power_profile_mode(void *handle, char *buf)
2592 {
2593 struct smu_context *smu = handle;
2594
2595 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
2596 !smu->ppt_funcs->get_power_profile_mode)
2597 return -EOPNOTSUPP;
2598 if (!buf)
2599 return -EINVAL;
2600
2601 return smu->ppt_funcs->get_power_profile_mode(smu, buf);
2602 }
2603
smu_set_power_profile_mode(void * handle,long * param,uint32_t param_size)2604 static int smu_set_power_profile_mode(void *handle,
2605 long *param,
2606 uint32_t param_size)
2607 {
2608 struct smu_context *smu = handle;
2609
2610 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
2611 !smu->ppt_funcs->set_power_profile_mode)
2612 return -EOPNOTSUPP;
2613
2614 return smu_bump_power_profile_mode(smu, param, param_size);
2615 }
2616
smu_get_fan_control_mode(void * handle,u32 * fan_mode)2617 static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
2618 {
2619 struct smu_context *smu = handle;
2620
2621 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2622 return -EOPNOTSUPP;
2623
2624 if (!smu->ppt_funcs->get_fan_control_mode)
2625 return -EOPNOTSUPP;
2626
2627 if (!fan_mode)
2628 return -EINVAL;
2629
2630 *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
2631
2632 return 0;
2633 }
2634
smu_set_fan_control_mode(void * handle,u32 value)2635 static int smu_set_fan_control_mode(void *handle, u32 value)
2636 {
2637 struct smu_context *smu = handle;
2638 int ret = 0;
2639
2640 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2641 return -EOPNOTSUPP;
2642
2643 if (!smu->ppt_funcs->set_fan_control_mode)
2644 return -EOPNOTSUPP;
2645
2646 if (value == U32_MAX)
2647 return -EINVAL;
2648
2649 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2650 if (ret)
2651 goto out;
2652
2653 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2654 smu->user_dpm_profile.fan_mode = value;
2655
2656 /* reset user dpm fan speed */
2657 if (value != AMD_FAN_CTRL_MANUAL) {
2658 smu->user_dpm_profile.fan_speed_pwm = 0;
2659 smu->user_dpm_profile.fan_speed_rpm = 0;
2660 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
2661 }
2662 }
2663
2664 out:
2665 return ret;
2666 }
2667
smu_get_fan_speed_pwm(void * handle,u32 * speed)2668 static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
2669 {
2670 struct smu_context *smu = handle;
2671 int ret = 0;
2672
2673 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2674 return -EOPNOTSUPP;
2675
2676 if (!smu->ppt_funcs->get_fan_speed_pwm)
2677 return -EOPNOTSUPP;
2678
2679 if (!speed)
2680 return -EINVAL;
2681
2682 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
2683
2684 return ret;
2685 }
2686
smu_set_fan_speed_pwm(void * handle,u32 speed)2687 static int smu_set_fan_speed_pwm(void *handle, u32 speed)
2688 {
2689 struct smu_context *smu = handle;
2690 int ret = 0;
2691
2692 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2693 return -EOPNOTSUPP;
2694
2695 if (!smu->ppt_funcs->set_fan_speed_pwm)
2696 return -EOPNOTSUPP;
2697
2698 if (speed == U32_MAX)
2699 return -EINVAL;
2700
2701 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
2702 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2703 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
2704 smu->user_dpm_profile.fan_speed_pwm = speed;
2705
2706 /* Override custom RPM setting as they cannot co-exist */
2707 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
2708 smu->user_dpm_profile.fan_speed_rpm = 0;
2709 }
2710
2711 return ret;
2712 }
2713
smu_get_fan_speed_rpm(void * handle,uint32_t * speed)2714 static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
2715 {
2716 struct smu_context *smu = handle;
2717 int ret = 0;
2718
2719 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2720 return -EOPNOTSUPP;
2721
2722 if (!smu->ppt_funcs->get_fan_speed_rpm)
2723 return -EOPNOTSUPP;
2724
2725 if (!speed)
2726 return -EINVAL;
2727
2728 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2729
2730 return ret;
2731 }
2732
smu_set_deep_sleep_dcefclk(void * handle,uint32_t clk)2733 static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
2734 {
2735 struct smu_context *smu = handle;
2736
2737 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2738 return -EOPNOTSUPP;
2739
2740 return smu_set_min_dcef_deep_sleep(smu, clk);
2741 }
2742
smu_get_clock_by_type_with_latency(void * handle,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)2743 static int smu_get_clock_by_type_with_latency(void *handle,
2744 enum amd_pp_clock_type type,
2745 struct pp_clock_levels_with_latency *clocks)
2746 {
2747 struct smu_context *smu = handle;
2748 enum smu_clk_type clk_type;
2749 int ret = 0;
2750
2751 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2752 return -EOPNOTSUPP;
2753
2754 if (smu->ppt_funcs->get_clock_by_type_with_latency) {
2755 switch (type) {
2756 case amd_pp_sys_clock:
2757 clk_type = SMU_GFXCLK;
2758 break;
2759 case amd_pp_mem_clock:
2760 clk_type = SMU_MCLK;
2761 break;
2762 case amd_pp_dcef_clock:
2763 clk_type = SMU_DCEFCLK;
2764 break;
2765 case amd_pp_disp_clock:
2766 clk_type = SMU_DISPCLK;
2767 break;
2768 default:
2769 dev_err(smu->adev->dev, "Invalid clock type!\n");
2770 return -EINVAL;
2771 }
2772
2773 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2774 }
2775
2776 return ret;
2777 }
2778
smu_display_clock_voltage_request(void * handle,struct pp_display_clock_request * clock_req)2779 static int smu_display_clock_voltage_request(void *handle,
2780 struct pp_display_clock_request *clock_req)
2781 {
2782 struct smu_context *smu = handle;
2783 int ret = 0;
2784
2785 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2786 return -EOPNOTSUPP;
2787
2788 if (smu->ppt_funcs->display_clock_voltage_request)
2789 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2790
2791 return ret;
2792 }
2793
2794
smu_display_disable_memory_clock_switch(void * handle,bool disable_memory_clock_switch)2795 static int smu_display_disable_memory_clock_switch(void *handle,
2796 bool disable_memory_clock_switch)
2797 {
2798 struct smu_context *smu = handle;
2799 int ret = -EINVAL;
2800
2801 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2802 return -EOPNOTSUPP;
2803
2804 if (smu->ppt_funcs->display_disable_memory_clock_switch)
2805 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2806
2807 return ret;
2808 }
2809
smu_set_xgmi_pstate(void * handle,uint32_t pstate)2810 static int smu_set_xgmi_pstate(void *handle,
2811 uint32_t pstate)
2812 {
2813 struct smu_context *smu = handle;
2814 int ret = 0;
2815
2816 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2817 return -EOPNOTSUPP;
2818
2819 if (smu->ppt_funcs->set_xgmi_pstate)
2820 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2821
2822 if(ret)
2823 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
2824
2825 return ret;
2826 }
2827
smu_get_baco_capability(void * handle,bool * cap)2828 static int smu_get_baco_capability(void *handle, bool *cap)
2829 {
2830 struct smu_context *smu = handle;
2831
2832 *cap = false;
2833
2834 if (!smu->pm_enabled)
2835 return 0;
2836
2837 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2838 *cap = smu->ppt_funcs->baco_is_support(smu);
2839
2840 return 0;
2841 }
2842
smu_baco_set_state(void * handle,int state)2843 static int smu_baco_set_state(void *handle, int state)
2844 {
2845 struct smu_context *smu = handle;
2846 int ret = 0;
2847
2848 if (!smu->pm_enabled)
2849 return -EOPNOTSUPP;
2850
2851 if (state == 0) {
2852 if (smu->ppt_funcs->baco_exit)
2853 ret = smu->ppt_funcs->baco_exit(smu);
2854 } else if (state == 1) {
2855 if (smu->ppt_funcs->baco_enter)
2856 ret = smu->ppt_funcs->baco_enter(smu);
2857 } else {
2858 return -EINVAL;
2859 }
2860
2861 if (ret)
2862 dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
2863 (state)?"enter":"exit");
2864
2865 return ret;
2866 }
2867
smu_mode1_reset_is_support(struct smu_context * smu)2868 bool smu_mode1_reset_is_support(struct smu_context *smu)
2869 {
2870 bool ret = false;
2871
2872 if (!smu->pm_enabled)
2873 return false;
2874
2875 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
2876 ret = smu->ppt_funcs->mode1_reset_is_support(smu);
2877
2878 return ret;
2879 }
2880
smu_mode2_reset_is_support(struct smu_context * smu)2881 bool smu_mode2_reset_is_support(struct smu_context *smu)
2882 {
2883 bool ret = false;
2884
2885 if (!smu->pm_enabled)
2886 return false;
2887
2888 if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
2889 ret = smu->ppt_funcs->mode2_reset_is_support(smu);
2890
2891 return ret;
2892 }
2893
smu_mode1_reset(struct smu_context * smu)2894 int smu_mode1_reset(struct smu_context *smu)
2895 {
2896 int ret = 0;
2897
2898 if (!smu->pm_enabled)
2899 return -EOPNOTSUPP;
2900
2901 if (smu->ppt_funcs->mode1_reset)
2902 ret = smu->ppt_funcs->mode1_reset(smu);
2903
2904 return ret;
2905 }
2906
smu_mode2_reset(void * handle)2907 static int smu_mode2_reset(void *handle)
2908 {
2909 struct smu_context *smu = handle;
2910 int ret = 0;
2911
2912 if (!smu->pm_enabled)
2913 return -EOPNOTSUPP;
2914
2915 if (smu->ppt_funcs->mode2_reset)
2916 ret = smu->ppt_funcs->mode2_reset(smu);
2917
2918 if (ret)
2919 dev_err(smu->adev->dev, "Mode2 reset failed!\n");
2920
2921 return ret;
2922 }
2923
smu_get_max_sustainable_clocks_by_dc(void * handle,struct pp_smu_nv_clock_table * max_clocks)2924 static int smu_get_max_sustainable_clocks_by_dc(void *handle,
2925 struct pp_smu_nv_clock_table *max_clocks)
2926 {
2927 struct smu_context *smu = handle;
2928 int ret = 0;
2929
2930 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2931 return -EOPNOTSUPP;
2932
2933 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2934 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2935
2936 return ret;
2937 }
2938
smu_get_uclk_dpm_states(void * handle,unsigned int * clock_values_in_khz,unsigned int * num_states)2939 static int smu_get_uclk_dpm_states(void *handle,
2940 unsigned int *clock_values_in_khz,
2941 unsigned int *num_states)
2942 {
2943 struct smu_context *smu = handle;
2944 int ret = 0;
2945
2946 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2947 return -EOPNOTSUPP;
2948
2949 if (smu->ppt_funcs->get_uclk_dpm_states)
2950 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2951
2952 return ret;
2953 }
2954
smu_get_current_power_state(void * handle)2955 static enum amd_pm_state_type smu_get_current_power_state(void *handle)
2956 {
2957 struct smu_context *smu = handle;
2958 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2959
2960 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2961 return -EOPNOTSUPP;
2962
2963 if (smu->ppt_funcs->get_current_power_state)
2964 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2965
2966 return pm_state;
2967 }
2968
smu_get_dpm_clock_table(void * handle,struct dpm_clocks * clock_table)2969 static int smu_get_dpm_clock_table(void *handle,
2970 struct dpm_clocks *clock_table)
2971 {
2972 struct smu_context *smu = handle;
2973 int ret = 0;
2974
2975 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2976 return -EOPNOTSUPP;
2977
2978 if (smu->ppt_funcs->get_dpm_clock_table)
2979 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2980
2981 return ret;
2982 }
2983
smu_sys_get_gpu_metrics(void * handle,void ** table)2984 static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
2985 {
2986 struct smu_context *smu = handle;
2987
2988 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2989 return -EOPNOTSUPP;
2990
2991 if (!smu->ppt_funcs->get_gpu_metrics)
2992 return -EOPNOTSUPP;
2993
2994 return smu->ppt_funcs->get_gpu_metrics(smu, table);
2995 }
2996
smu_enable_mgpu_fan_boost(void * handle)2997 static int smu_enable_mgpu_fan_boost(void *handle)
2998 {
2999 struct smu_context *smu = handle;
3000 int ret = 0;
3001
3002 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3003 return -EOPNOTSUPP;
3004
3005 if (smu->ppt_funcs->enable_mgpu_fan_boost)
3006 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
3007
3008 return ret;
3009 }
3010
smu_gfx_state_change_set(void * handle,uint32_t state)3011 static int smu_gfx_state_change_set(void *handle,
3012 uint32_t state)
3013 {
3014 struct smu_context *smu = handle;
3015 int ret = 0;
3016
3017 if (smu->ppt_funcs->gfx_state_change_set)
3018 ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
3019
3020 return ret;
3021 }
3022
smu_handle_passthrough_sbr(struct smu_context * smu,bool enable)3023 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
3024 {
3025 int ret = 0;
3026
3027 if (smu->ppt_funcs->smu_handle_passthrough_sbr)
3028 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
3029
3030 return ret;
3031 }
3032
smu_get_ecc_info(struct smu_context * smu,void * umc_ecc)3033 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
3034 {
3035 int ret = -EOPNOTSUPP;
3036
3037 if (smu->ppt_funcs &&
3038 smu->ppt_funcs->get_ecc_info)
3039 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
3040
3041 return ret;
3042
3043 }
3044
smu_get_prv_buffer_details(void * handle,void ** addr,size_t * size)3045 static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
3046 {
3047 struct smu_context *smu = handle;
3048 struct smu_table_context *smu_table = &smu->smu_table;
3049 struct smu_table *memory_pool = &smu_table->memory_pool;
3050
3051 if (!addr || !size)
3052 return -EINVAL;
3053
3054 *addr = NULL;
3055 *size = 0;
3056 if (memory_pool->bo) {
3057 *addr = memory_pool->cpu_addr;
3058 *size = memory_pool->size;
3059 }
3060
3061 return 0;
3062 }
3063
3064 static const struct amd_pm_funcs swsmu_pm_funcs = {
3065 /* export for sysfs */
3066 .set_fan_control_mode = smu_set_fan_control_mode,
3067 .get_fan_control_mode = smu_get_fan_control_mode,
3068 .set_fan_speed_pwm = smu_set_fan_speed_pwm,
3069 .get_fan_speed_pwm = smu_get_fan_speed_pwm,
3070 .force_clock_level = smu_force_ppclk_levels,
3071 .print_clock_levels = smu_print_ppclk_levels,
3072 .emit_clock_levels = smu_emit_ppclk_levels,
3073 .force_performance_level = smu_force_performance_level,
3074 .read_sensor = smu_read_sensor,
3075 .get_performance_level = smu_get_performance_level,
3076 .get_current_power_state = smu_get_current_power_state,
3077 .get_fan_speed_rpm = smu_get_fan_speed_rpm,
3078 .set_fan_speed_rpm = smu_set_fan_speed_rpm,
3079 .get_pp_num_states = smu_get_power_num_states,
3080 .get_pp_table = smu_sys_get_pp_table,
3081 .set_pp_table = smu_sys_set_pp_table,
3082 .switch_power_profile = smu_switch_power_profile,
3083 /* export to amdgpu */
3084 .dispatch_tasks = smu_handle_dpm_task,
3085 .load_firmware = smu_load_microcode,
3086 .set_powergating_by_smu = smu_dpm_set_power_gate,
3087 .set_power_limit = smu_set_power_limit,
3088 .get_power_limit = smu_get_power_limit,
3089 .get_power_profile_mode = smu_get_power_profile_mode,
3090 .set_power_profile_mode = smu_set_power_profile_mode,
3091 .odn_edit_dpm_table = smu_od_edit_dpm_table,
3092 .set_mp1_state = smu_set_mp1_state,
3093 .gfx_state_change_set = smu_gfx_state_change_set,
3094 /* export to DC */
3095 .get_sclk = smu_get_sclk,
3096 .get_mclk = smu_get_mclk,
3097 .display_configuration_change = smu_display_configuration_change,
3098 .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency,
3099 .display_clock_voltage_request = smu_display_clock_voltage_request,
3100 .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost,
3101 .set_active_display_count = smu_set_display_count,
3102 .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk,
3103 .get_asic_baco_capability = smu_get_baco_capability,
3104 .set_asic_baco_state = smu_baco_set_state,
3105 .get_ppfeature_status = smu_sys_get_pp_feature_mask,
3106 .set_ppfeature_status = smu_sys_set_pp_feature_mask,
3107 .asic_reset_mode_2 = smu_mode2_reset,
3108 .set_df_cstate = smu_set_df_cstate,
3109 .set_xgmi_pstate = smu_set_xgmi_pstate,
3110 .get_gpu_metrics = smu_sys_get_gpu_metrics,
3111 .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges,
3112 .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
3113 .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc,
3114 .get_uclk_dpm_states = smu_get_uclk_dpm_states,
3115 .get_dpm_clock_table = smu_get_dpm_clock_table,
3116 .get_smu_prv_buf_details = smu_get_prv_buffer_details,
3117 };
3118
smu_wait_for_event(struct smu_context * smu,enum smu_event_type event,uint64_t event_arg)3119 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
3120 uint64_t event_arg)
3121 {
3122 int ret = -EINVAL;
3123
3124 if (smu->ppt_funcs->wait_for_event)
3125 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
3126
3127 return ret;
3128 }
3129
smu_stb_collect_info(struct smu_context * smu,void * buf,uint32_t size)3130 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
3131 {
3132
3133 if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
3134 return -EOPNOTSUPP;
3135
3136 /* Confirm the buffer allocated is of correct size */
3137 if (size != smu->stb_context.stb_buf_size)
3138 return -EINVAL;
3139
3140 /*
3141 * No need to lock smu mutex as we access STB directly through MMIO
3142 * and not going through SMU messaging route (for now at least).
3143 * For registers access rely on implementation internal locking.
3144 */
3145 return smu->ppt_funcs->stb_collect_info(smu, buf, size);
3146 }
3147
3148 #if defined(CONFIG_DEBUG_FS)
3149
smu_stb_debugfs_open(struct inode * inode,struct file * filp)3150 static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
3151 {
3152 struct amdgpu_device *adev = filp->f_inode->i_private;
3153 struct smu_context *smu = adev->powerplay.pp_handle;
3154 unsigned char *buf;
3155 int r;
3156
3157 buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
3158 if (!buf)
3159 return -ENOMEM;
3160
3161 r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
3162 if (r)
3163 goto out;
3164
3165 filp->private_data = buf;
3166
3167 return 0;
3168
3169 out:
3170 kvfree(buf);
3171 return r;
3172 }
3173
smu_stb_debugfs_read(struct file * filp,char __user * buf,size_t size,loff_t * pos)3174 static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
3175 loff_t *pos)
3176 {
3177 struct amdgpu_device *adev = filp->f_inode->i_private;
3178 struct smu_context *smu = adev->powerplay.pp_handle;
3179
3180
3181 if (!filp->private_data)
3182 return -EINVAL;
3183
3184 return simple_read_from_buffer(buf,
3185 size,
3186 pos, filp->private_data,
3187 smu->stb_context.stb_buf_size);
3188 }
3189
smu_stb_debugfs_release(struct inode * inode,struct file * filp)3190 static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
3191 {
3192 kvfree(filp->private_data);
3193 filp->private_data = NULL;
3194
3195 return 0;
3196 }
3197
3198 /*
3199 * We have to define not only read method but also
3200 * open and release because .read takes up to PAGE_SIZE
3201 * data each time so and so is invoked multiple times.
3202 * We allocate the STB buffer in .open and release it
3203 * in .release
3204 */
3205 static const struct file_operations smu_stb_debugfs_fops = {
3206 .owner = THIS_MODULE,
3207 .open = smu_stb_debugfs_open,
3208 .read = smu_stb_debugfs_read,
3209 .release = smu_stb_debugfs_release,
3210 .llseek = default_llseek,
3211 };
3212
3213 #endif
3214
amdgpu_smu_stb_debug_fs_init(struct amdgpu_device * adev)3215 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
3216 {
3217 #if defined(CONFIG_DEBUG_FS)
3218
3219 struct smu_context *smu = adev->powerplay.pp_handle;
3220
3221 if (!smu || (!smu->stb_context.stb_buf_size))
3222 return;
3223
3224 debugfs_create_file_size("amdgpu_smu_stb_dump",
3225 S_IRUSR,
3226 adev_to_drm(adev)->primary->debugfs_root,
3227 adev,
3228 &smu_stb_debugfs_fops,
3229 smu->stb_context.stb_buf_size);
3230 #endif
3231 }
3232
smu_send_hbm_bad_pages_num(struct smu_context * smu,uint32_t size)3233 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
3234 {
3235 int ret = 0;
3236
3237 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
3238 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
3239
3240 return ret;
3241 }
3242
smu_send_hbm_bad_channel_flag(struct smu_context * smu,uint32_t size)3243 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
3244 {
3245 int ret = 0;
3246
3247 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
3248 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);
3249
3250 return ret;
3251 }
3252