1 /*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
29 #include "atom.h"
30 #include "amd_pcie.h"
31 #include "amdgpu_display.h"
32 #include "hwmgr.h"
33 #include <linux/power_supply.h>
34 #include "amdgpu_smu.h"
35
36 #define amdgpu_dpm_enable_bapm(adev, e) \
37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
38
39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
40
amdgpu_dpm_get_sclk(struct amdgpu_device * adev,bool low)41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
42 {
43 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
44 int ret = 0;
45
46 if (!pp_funcs->get_sclk)
47 return 0;
48
49 mutex_lock(&adev->pm.mutex);
50 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
51 low);
52 mutex_unlock(&adev->pm.mutex);
53
54 return ret;
55 }
56
amdgpu_dpm_get_mclk(struct amdgpu_device * adev,bool low)57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
58 {
59 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
60 int ret = 0;
61
62 if (!pp_funcs->get_mclk)
63 return 0;
64
65 mutex_lock(&adev->pm.mutex);
66 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
67 low);
68 mutex_unlock(&adev->pm.mutex);
69
70 return ret;
71 }
72
amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device * adev,uint32_t block_type,bool gate)73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
74 {
75 int ret = 0;
76 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
77 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
78
79 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
80 dev_dbg(adev->dev, "IP block%d already in the target %s state!",
81 block_type, gate ? "gate" : "ungate");
82 return 0;
83 }
84
85 mutex_lock(&adev->pm.mutex);
86
87 switch (block_type) {
88 case AMD_IP_BLOCK_TYPE_UVD:
89 case AMD_IP_BLOCK_TYPE_VCE:
90 case AMD_IP_BLOCK_TYPE_GFX:
91 case AMD_IP_BLOCK_TYPE_VCN:
92 case AMD_IP_BLOCK_TYPE_SDMA:
93 case AMD_IP_BLOCK_TYPE_JPEG:
94 case AMD_IP_BLOCK_TYPE_GMC:
95 case AMD_IP_BLOCK_TYPE_ACP:
96 if (pp_funcs && pp_funcs->set_powergating_by_smu)
97 ret = (pp_funcs->set_powergating_by_smu(
98 (adev)->powerplay.pp_handle, block_type, gate));
99 break;
100 default:
101 break;
102 }
103
104 if (!ret)
105 atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
106
107 mutex_unlock(&adev->pm.mutex);
108
109 return ret;
110 }
111
amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device * adev)112 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
113 {
114 struct smu_context *smu = adev->powerplay.pp_handle;
115 int ret = -EOPNOTSUPP;
116
117 mutex_lock(&adev->pm.mutex);
118 ret = smu_set_gfx_power_up_by_imu(smu);
119 mutex_unlock(&adev->pm.mutex);
120
121 msleep(10);
122
123 return ret;
124 }
125
amdgpu_dpm_baco_enter(struct amdgpu_device * adev)126 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
127 {
128 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
129 void *pp_handle = adev->powerplay.pp_handle;
130 int ret = 0;
131
132 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
133 return -ENOENT;
134
135 mutex_lock(&adev->pm.mutex);
136
137 /* enter BACO state */
138 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
139
140 mutex_unlock(&adev->pm.mutex);
141
142 return ret;
143 }
144
amdgpu_dpm_baco_exit(struct amdgpu_device * adev)145 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
146 {
147 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
148 void *pp_handle = adev->powerplay.pp_handle;
149 int ret = 0;
150
151 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
152 return -ENOENT;
153
154 mutex_lock(&adev->pm.mutex);
155
156 /* exit BACO state */
157 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
158
159 mutex_unlock(&adev->pm.mutex);
160
161 return ret;
162 }
163
amdgpu_dpm_set_mp1_state(struct amdgpu_device * adev,enum pp_mp1_state mp1_state)164 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
165 enum pp_mp1_state mp1_state)
166 {
167 int ret = 0;
168 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
169
170 if (pp_funcs && pp_funcs->set_mp1_state) {
171 mutex_lock(&adev->pm.mutex);
172
173 ret = pp_funcs->set_mp1_state(
174 adev->powerplay.pp_handle,
175 mp1_state);
176
177 mutex_unlock(&adev->pm.mutex);
178 }
179
180 return ret;
181 }
182
amdgpu_dpm_is_baco_supported(struct amdgpu_device * adev)183 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
184 {
185 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
186 void *pp_handle = adev->powerplay.pp_handle;
187 bool baco_cap;
188 int ret = 0;
189
190 if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
191 return false;
192 /* Don't use baco for reset in S3.
193 * This is a workaround for some platforms
194 * where entering BACO during suspend
195 * seems to cause reboots or hangs.
196 * This might be related to the fact that BACO controls
197 * power to the whole GPU including devices like audio and USB.
198 * Powering down/up everything may adversely affect these other
199 * devices. Needs more investigation.
200 */
201 if (adev->in_s3)
202 return false;
203
204 mutex_lock(&adev->pm.mutex);
205
206 ret = pp_funcs->get_asic_baco_capability(pp_handle,
207 &baco_cap);
208
209 mutex_unlock(&adev->pm.mutex);
210
211 return ret ? false : baco_cap;
212 }
213
amdgpu_dpm_mode2_reset(struct amdgpu_device * adev)214 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
215 {
216 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
217 void *pp_handle = adev->powerplay.pp_handle;
218 int ret = 0;
219
220 if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
221 return -ENOENT;
222
223 mutex_lock(&adev->pm.mutex);
224
225 ret = pp_funcs->asic_reset_mode_2(pp_handle);
226
227 mutex_unlock(&adev->pm.mutex);
228
229 return ret;
230 }
231
amdgpu_dpm_baco_reset(struct amdgpu_device * adev)232 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
233 {
234 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
235 void *pp_handle = adev->powerplay.pp_handle;
236 int ret = 0;
237
238 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
239 return -ENOENT;
240
241 mutex_lock(&adev->pm.mutex);
242
243 /* enter BACO state */
244 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
245 if (ret)
246 goto out;
247
248 /* exit BACO state */
249 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
250
251 out:
252 mutex_unlock(&adev->pm.mutex);
253 return ret;
254 }
255
amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device * adev)256 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
257 {
258 struct smu_context *smu = adev->powerplay.pp_handle;
259 bool support_mode1_reset = false;
260
261 if (is_support_sw_smu(adev)) {
262 mutex_lock(&adev->pm.mutex);
263 support_mode1_reset = smu_mode1_reset_is_support(smu);
264 mutex_unlock(&adev->pm.mutex);
265 }
266
267 return support_mode1_reset;
268 }
269
amdgpu_dpm_mode1_reset(struct amdgpu_device * adev)270 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
271 {
272 struct smu_context *smu = adev->powerplay.pp_handle;
273 int ret = -EOPNOTSUPP;
274
275 if (is_support_sw_smu(adev)) {
276 mutex_lock(&adev->pm.mutex);
277 ret = smu_mode1_reset(smu);
278 mutex_unlock(&adev->pm.mutex);
279 }
280
281 return ret;
282 }
283
amdgpu_dpm_switch_power_profile(struct amdgpu_device * adev,enum PP_SMC_POWER_PROFILE type,bool en)284 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
285 enum PP_SMC_POWER_PROFILE type,
286 bool en)
287 {
288 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
289 int ret = 0;
290
291 if (amdgpu_sriov_vf(adev))
292 return 0;
293
294 if (pp_funcs && pp_funcs->switch_power_profile) {
295 mutex_lock(&adev->pm.mutex);
296 ret = pp_funcs->switch_power_profile(
297 adev->powerplay.pp_handle, type, en);
298 mutex_unlock(&adev->pm.mutex);
299 }
300
301 return ret;
302 }
303
amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device * adev,uint32_t pstate)304 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
305 uint32_t pstate)
306 {
307 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
308 int ret = 0;
309
310 if (pp_funcs && pp_funcs->set_xgmi_pstate) {
311 mutex_lock(&adev->pm.mutex);
312 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
313 pstate);
314 mutex_unlock(&adev->pm.mutex);
315 }
316
317 return ret;
318 }
319
amdgpu_dpm_set_df_cstate(struct amdgpu_device * adev,uint32_t cstate)320 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
321 uint32_t cstate)
322 {
323 int ret = 0;
324 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
325 void *pp_handle = adev->powerplay.pp_handle;
326
327 if (pp_funcs && pp_funcs->set_df_cstate) {
328 mutex_lock(&adev->pm.mutex);
329 ret = pp_funcs->set_df_cstate(pp_handle, cstate);
330 mutex_unlock(&adev->pm.mutex);
331 }
332
333 return ret;
334 }
335
amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device * adev,bool en)336 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
337 {
338 struct smu_context *smu = adev->powerplay.pp_handle;
339 int ret = 0;
340
341 if (is_support_sw_smu(adev)) {
342 mutex_lock(&adev->pm.mutex);
343 ret = smu_allow_xgmi_power_down(smu, en);
344 mutex_unlock(&adev->pm.mutex);
345 }
346
347 return ret;
348 }
349
amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device * adev)350 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
351 {
352 void *pp_handle = adev->powerplay.pp_handle;
353 const struct amd_pm_funcs *pp_funcs =
354 adev->powerplay.pp_funcs;
355 int ret = 0;
356
357 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
358 mutex_lock(&adev->pm.mutex);
359 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
360 mutex_unlock(&adev->pm.mutex);
361 }
362
363 return ret;
364 }
365
amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device * adev,uint32_t msg_id)366 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
367 uint32_t msg_id)
368 {
369 void *pp_handle = adev->powerplay.pp_handle;
370 const struct amd_pm_funcs *pp_funcs =
371 adev->powerplay.pp_funcs;
372 int ret = 0;
373
374 if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
375 mutex_lock(&adev->pm.mutex);
376 ret = pp_funcs->set_clockgating_by_smu(pp_handle,
377 msg_id);
378 mutex_unlock(&adev->pm.mutex);
379 }
380
381 return ret;
382 }
383
amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device * adev,bool acquire)384 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
385 bool acquire)
386 {
387 void *pp_handle = adev->powerplay.pp_handle;
388 const struct amd_pm_funcs *pp_funcs =
389 adev->powerplay.pp_funcs;
390 int ret = -EOPNOTSUPP;
391
392 if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
393 mutex_lock(&adev->pm.mutex);
394 ret = pp_funcs->smu_i2c_bus_access(pp_handle,
395 acquire);
396 mutex_unlock(&adev->pm.mutex);
397 }
398
399 return ret;
400 }
401
amdgpu_pm_acpi_event_handler(struct amdgpu_device * adev)402 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
403 {
404 if (adev->pm.dpm_enabled) {
405 mutex_lock(&adev->pm.mutex);
406 if (power_supply_is_system_supplied() > 0)
407 adev->pm.ac_power = true;
408 else
409 adev->pm.ac_power = false;
410
411 if (adev->powerplay.pp_funcs &&
412 adev->powerplay.pp_funcs->enable_bapm)
413 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
414
415 if (is_support_sw_smu(adev))
416 smu_set_ac_dc(adev->powerplay.pp_handle);
417
418 mutex_unlock(&adev->pm.mutex);
419 }
420 }
421
amdgpu_dpm_read_sensor(struct amdgpu_device * adev,enum amd_pp_sensors sensor,void * data,uint32_t * size)422 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
423 void *data, uint32_t *size)
424 {
425 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
426 int ret = -EINVAL;
427
428 if (!data || !size)
429 return -EINVAL;
430
431 if (pp_funcs && pp_funcs->read_sensor) {
432 mutex_lock(&adev->pm.mutex);
433 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
434 sensor,
435 data,
436 size);
437 mutex_unlock(&adev->pm.mutex);
438 }
439
440 return ret;
441 }
442
amdgpu_dpm_compute_clocks(struct amdgpu_device * adev)443 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
444 {
445 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
446 int i;
447
448 if (!adev->pm.dpm_enabled)
449 return;
450
451 if (!pp_funcs->pm_compute_clocks)
452 return;
453
454 if (adev->mode_info.num_crtc)
455 amdgpu_display_bandwidth_update(adev);
456
457 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
458 struct amdgpu_ring *ring = adev->rings[i];
459 if (ring && ring->sched.ready)
460 amdgpu_fence_wait_empty(ring);
461 }
462
463 mutex_lock(&adev->pm.mutex);
464 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
465 mutex_unlock(&adev->pm.mutex);
466 }
467
amdgpu_dpm_enable_uvd(struct amdgpu_device * adev,bool enable)468 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
469 {
470 int ret = 0;
471
472 if (adev->family == AMDGPU_FAMILY_SI) {
473 mutex_lock(&adev->pm.mutex);
474 if (enable) {
475 adev->pm.dpm.uvd_active = true;
476 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
477 } else {
478 adev->pm.dpm.uvd_active = false;
479 }
480 mutex_unlock(&adev->pm.mutex);
481
482 amdgpu_dpm_compute_clocks(adev);
483 return;
484 }
485
486 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
487 if (ret)
488 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
489 enable ? "enable" : "disable", ret);
490 }
491
amdgpu_dpm_enable_vce(struct amdgpu_device * adev,bool enable)492 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
493 {
494 int ret = 0;
495
496 if (adev->family == AMDGPU_FAMILY_SI) {
497 mutex_lock(&adev->pm.mutex);
498 if (enable) {
499 adev->pm.dpm.vce_active = true;
500 /* XXX select vce level based on ring/task */
501 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
502 } else {
503 adev->pm.dpm.vce_active = false;
504 }
505 mutex_unlock(&adev->pm.mutex);
506
507 amdgpu_dpm_compute_clocks(adev);
508 return;
509 }
510
511 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
512 if (ret)
513 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
514 enable ? "enable" : "disable", ret);
515 }
516
amdgpu_dpm_enable_jpeg(struct amdgpu_device * adev,bool enable)517 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
518 {
519 int ret = 0;
520
521 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
522 if (ret)
523 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
524 enable ? "enable" : "disable", ret);
525 }
526
amdgpu_pm_load_smu_firmware(struct amdgpu_device * adev,uint32_t * smu_version)527 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
528 {
529 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
530 int r = 0;
531
532 if (!pp_funcs || !pp_funcs->load_firmware)
533 return 0;
534
535 mutex_lock(&adev->pm.mutex);
536 r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
537 if (r) {
538 pr_err("smu firmware loading failed\n");
539 goto out;
540 }
541
542 if (smu_version)
543 *smu_version = adev->pm.fw_version;
544
545 out:
546 mutex_unlock(&adev->pm.mutex);
547 return r;
548 }
549
amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device * adev,bool enable)550 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
551 {
552 int ret = 0;
553
554 if (is_support_sw_smu(adev)) {
555 mutex_lock(&adev->pm.mutex);
556 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
557 enable);
558 mutex_unlock(&adev->pm.mutex);
559 }
560
561 return ret;
562 }
563
amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device * adev,uint32_t size)564 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
565 {
566 struct smu_context *smu = adev->powerplay.pp_handle;
567 int ret = 0;
568
569 if (!is_support_sw_smu(adev))
570 return -EOPNOTSUPP;
571
572 mutex_lock(&adev->pm.mutex);
573 ret = smu_send_hbm_bad_pages_num(smu, size);
574 mutex_unlock(&adev->pm.mutex);
575
576 return ret;
577 }
578
amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device * adev,uint32_t size)579 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
580 {
581 struct smu_context *smu = adev->powerplay.pp_handle;
582 int ret = 0;
583
584 if (!is_support_sw_smu(adev))
585 return -EOPNOTSUPP;
586
587 mutex_lock(&adev->pm.mutex);
588 ret = smu_send_hbm_bad_channel_flag(smu, size);
589 mutex_unlock(&adev->pm.mutex);
590
591 return ret;
592 }
593
amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device * adev,enum pp_clock_type type,uint32_t * min,uint32_t * max)594 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
595 enum pp_clock_type type,
596 uint32_t *min,
597 uint32_t *max)
598 {
599 int ret = 0;
600
601 if (type != PP_SCLK)
602 return -EINVAL;
603
604 if (!is_support_sw_smu(adev))
605 return -EOPNOTSUPP;
606
607 mutex_lock(&adev->pm.mutex);
608 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
609 SMU_SCLK,
610 min,
611 max);
612 mutex_unlock(&adev->pm.mutex);
613
614 return ret;
615 }
616
amdgpu_dpm_set_soft_freq_range(struct amdgpu_device * adev,enum pp_clock_type type,uint32_t min,uint32_t max)617 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
618 enum pp_clock_type type,
619 uint32_t min,
620 uint32_t max)
621 {
622 struct smu_context *smu = adev->powerplay.pp_handle;
623 int ret = 0;
624
625 if (type != PP_SCLK)
626 return -EINVAL;
627
628 if (!is_support_sw_smu(adev))
629 return -EOPNOTSUPP;
630
631 mutex_lock(&adev->pm.mutex);
632 ret = smu_set_soft_freq_range(smu,
633 SMU_SCLK,
634 min,
635 max);
636 mutex_unlock(&adev->pm.mutex);
637
638 return ret;
639 }
640
amdgpu_dpm_write_watermarks_table(struct amdgpu_device * adev)641 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
642 {
643 struct smu_context *smu = adev->powerplay.pp_handle;
644 int ret = 0;
645
646 if (!is_support_sw_smu(adev))
647 return 0;
648
649 mutex_lock(&adev->pm.mutex);
650 ret = smu_write_watermarks_table(smu);
651 mutex_unlock(&adev->pm.mutex);
652
653 return ret;
654 }
655
amdgpu_dpm_wait_for_event(struct amdgpu_device * adev,enum smu_event_type event,uint64_t event_arg)656 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
657 enum smu_event_type event,
658 uint64_t event_arg)
659 {
660 struct smu_context *smu = adev->powerplay.pp_handle;
661 int ret = 0;
662
663 if (!is_support_sw_smu(adev))
664 return -EOPNOTSUPP;
665
666 mutex_lock(&adev->pm.mutex);
667 ret = smu_wait_for_event(smu, event, event_arg);
668 mutex_unlock(&adev->pm.mutex);
669
670 return ret;
671 }
672
amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device * adev,bool value)673 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
674 {
675 struct smu_context *smu = adev->powerplay.pp_handle;
676 int ret = 0;
677
678 if (!is_support_sw_smu(adev))
679 return -EOPNOTSUPP;
680
681 mutex_lock(&adev->pm.mutex);
682 ret = smu_set_residency_gfxoff(smu, value);
683 mutex_unlock(&adev->pm.mutex);
684
685 return ret;
686 }
687
amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device * adev,u32 * value)688 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
689 {
690 struct smu_context *smu = adev->powerplay.pp_handle;
691 int ret = 0;
692
693 if (!is_support_sw_smu(adev))
694 return -EOPNOTSUPP;
695
696 mutex_lock(&adev->pm.mutex);
697 ret = smu_get_residency_gfxoff(smu, value);
698 mutex_unlock(&adev->pm.mutex);
699
700 return ret;
701 }
702
amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device * adev,u64 * value)703 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
704 {
705 struct smu_context *smu = adev->powerplay.pp_handle;
706 int ret = 0;
707
708 if (!is_support_sw_smu(adev))
709 return -EOPNOTSUPP;
710
711 mutex_lock(&adev->pm.mutex);
712 ret = smu_get_entrycount_gfxoff(smu, value);
713 mutex_unlock(&adev->pm.mutex);
714
715 return ret;
716 }
717
amdgpu_dpm_get_status_gfxoff(struct amdgpu_device * adev,uint32_t * value)718 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
719 {
720 struct smu_context *smu = adev->powerplay.pp_handle;
721 int ret = 0;
722
723 if (!is_support_sw_smu(adev))
724 return -EOPNOTSUPP;
725
726 mutex_lock(&adev->pm.mutex);
727 ret = smu_get_status_gfxoff(smu, value);
728 mutex_unlock(&adev->pm.mutex);
729
730 return ret;
731 }
732
amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device * adev)733 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
734 {
735 struct smu_context *smu = adev->powerplay.pp_handle;
736
737 if (!is_support_sw_smu(adev))
738 return 0;
739
740 return atomic64_read(&smu->throttle_int_counter);
741 }
742
743 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
744 * @adev: amdgpu_device pointer
745 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
746 *
747 */
amdgpu_dpm_gfx_state_change(struct amdgpu_device * adev,enum gfx_change_state state)748 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
749 enum gfx_change_state state)
750 {
751 mutex_lock(&adev->pm.mutex);
752 if (adev->powerplay.pp_funcs &&
753 adev->powerplay.pp_funcs->gfx_state_change_set)
754 ((adev)->powerplay.pp_funcs->gfx_state_change_set(
755 (adev)->powerplay.pp_handle, state));
756 mutex_unlock(&adev->pm.mutex);
757 }
758
amdgpu_dpm_get_ecc_info(struct amdgpu_device * adev,void * umc_ecc)759 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
760 void *umc_ecc)
761 {
762 struct smu_context *smu = adev->powerplay.pp_handle;
763 int ret = 0;
764
765 if (!is_support_sw_smu(adev))
766 return -EOPNOTSUPP;
767
768 mutex_lock(&adev->pm.mutex);
769 ret = smu_get_ecc_info(smu, umc_ecc);
770 mutex_unlock(&adev->pm.mutex);
771
772 return ret;
773 }
774
amdgpu_dpm_get_vce_clock_state(struct amdgpu_device * adev,uint32_t idx)775 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
776 uint32_t idx)
777 {
778 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
779 struct amd_vce_state *vstate = NULL;
780
781 if (!pp_funcs->get_vce_clock_state)
782 return NULL;
783
784 mutex_lock(&adev->pm.mutex);
785 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
786 idx);
787 mutex_unlock(&adev->pm.mutex);
788
789 return vstate;
790 }
791
amdgpu_dpm_get_current_power_state(struct amdgpu_device * adev,enum amd_pm_state_type * state)792 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
793 enum amd_pm_state_type *state)
794 {
795 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
796
797 mutex_lock(&adev->pm.mutex);
798
799 if (!pp_funcs->get_current_power_state) {
800 *state = adev->pm.dpm.user_state;
801 goto out;
802 }
803
804 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
805 if (*state < POWER_STATE_TYPE_DEFAULT ||
806 *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
807 *state = adev->pm.dpm.user_state;
808
809 out:
810 mutex_unlock(&adev->pm.mutex);
811 }
812
amdgpu_dpm_set_power_state(struct amdgpu_device * adev,enum amd_pm_state_type state)813 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
814 enum amd_pm_state_type state)
815 {
816 mutex_lock(&adev->pm.mutex);
817 adev->pm.dpm.user_state = state;
818 mutex_unlock(&adev->pm.mutex);
819
820 if (is_support_sw_smu(adev))
821 return;
822
823 if (amdgpu_dpm_dispatch_task(adev,
824 AMD_PP_TASK_ENABLE_USER_STATE,
825 &state) == -EOPNOTSUPP)
826 amdgpu_dpm_compute_clocks(adev);
827 }
828
amdgpu_dpm_get_performance_level(struct amdgpu_device * adev)829 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
830 {
831 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
832 enum amd_dpm_forced_level level;
833
834 if (!pp_funcs)
835 return AMD_DPM_FORCED_LEVEL_AUTO;
836
837 mutex_lock(&adev->pm.mutex);
838 if (pp_funcs->get_performance_level)
839 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
840 else
841 level = adev->pm.dpm.forced_level;
842 mutex_unlock(&adev->pm.mutex);
843
844 return level;
845 }
846
amdgpu_dpm_force_performance_level(struct amdgpu_device * adev,enum amd_dpm_forced_level level)847 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
848 enum amd_dpm_forced_level level)
849 {
850 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
851 enum amd_dpm_forced_level current_level;
852 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
853 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
854 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
855 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
856
857 if (!pp_funcs || !pp_funcs->force_performance_level)
858 return 0;
859
860 if (adev->pm.dpm.thermal_active)
861 return -EINVAL;
862
863 current_level = amdgpu_dpm_get_performance_level(adev);
864 if (current_level == level)
865 return 0;
866
867 if (adev->asic_type == CHIP_RAVEN) {
868 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
869 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
870 level == AMD_DPM_FORCED_LEVEL_MANUAL)
871 amdgpu_gfx_off_ctrl(adev, false);
872 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
873 level != AMD_DPM_FORCED_LEVEL_MANUAL)
874 amdgpu_gfx_off_ctrl(adev, true);
875 }
876 }
877
878 if (!(current_level & profile_mode_mask) &&
879 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
880 return -EINVAL;
881
882 if (!(current_level & profile_mode_mask) &&
883 (level & profile_mode_mask)) {
884 /* enter UMD Pstate */
885 amdgpu_device_ip_set_powergating_state(adev,
886 AMD_IP_BLOCK_TYPE_GFX,
887 AMD_PG_STATE_UNGATE);
888 amdgpu_device_ip_set_clockgating_state(adev,
889 AMD_IP_BLOCK_TYPE_GFX,
890 AMD_CG_STATE_UNGATE);
891 } else if ((current_level & profile_mode_mask) &&
892 !(level & profile_mode_mask)) {
893 /* exit UMD Pstate */
894 amdgpu_device_ip_set_clockgating_state(adev,
895 AMD_IP_BLOCK_TYPE_GFX,
896 AMD_CG_STATE_GATE);
897 amdgpu_device_ip_set_powergating_state(adev,
898 AMD_IP_BLOCK_TYPE_GFX,
899 AMD_PG_STATE_GATE);
900 }
901
902 mutex_lock(&adev->pm.mutex);
903
904 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
905 level)) {
906 mutex_unlock(&adev->pm.mutex);
907 return -EINVAL;
908 }
909
910 adev->pm.dpm.forced_level = level;
911
912 mutex_unlock(&adev->pm.mutex);
913
914 return 0;
915 }
916
amdgpu_dpm_get_pp_num_states(struct amdgpu_device * adev,struct pp_states_info * states)917 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
918 struct pp_states_info *states)
919 {
920 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
921 int ret = 0;
922
923 if (!pp_funcs->get_pp_num_states)
924 return -EOPNOTSUPP;
925
926 mutex_lock(&adev->pm.mutex);
927 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
928 states);
929 mutex_unlock(&adev->pm.mutex);
930
931 return ret;
932 }
933
amdgpu_dpm_dispatch_task(struct amdgpu_device * adev,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)934 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
935 enum amd_pp_task task_id,
936 enum amd_pm_state_type *user_state)
937 {
938 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
939 int ret = 0;
940
941 if (!pp_funcs->dispatch_tasks)
942 return -EOPNOTSUPP;
943
944 mutex_lock(&adev->pm.mutex);
945 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
946 task_id,
947 user_state);
948 mutex_unlock(&adev->pm.mutex);
949
950 return ret;
951 }
952
amdgpu_dpm_get_pp_table(struct amdgpu_device * adev,char ** table)953 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
954 {
955 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
956 int ret = 0;
957
958 if (!pp_funcs->get_pp_table)
959 return 0;
960
961 mutex_lock(&adev->pm.mutex);
962 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
963 table);
964 mutex_unlock(&adev->pm.mutex);
965
966 return ret;
967 }
968
amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device * adev,uint32_t type,long * input,uint32_t size)969 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
970 uint32_t type,
971 long *input,
972 uint32_t size)
973 {
974 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
975 int ret = 0;
976
977 if (!pp_funcs->set_fine_grain_clk_vol)
978 return 0;
979
980 mutex_lock(&adev->pm.mutex);
981 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
982 type,
983 input,
984 size);
985 mutex_unlock(&adev->pm.mutex);
986
987 return ret;
988 }
989
amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device * adev,uint32_t type,long * input,uint32_t size)990 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
991 uint32_t type,
992 long *input,
993 uint32_t size)
994 {
995 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
996 int ret = 0;
997
998 if (!pp_funcs->odn_edit_dpm_table)
999 return 0;
1000
1001 mutex_lock(&adev->pm.mutex);
1002 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1003 type,
1004 input,
1005 size);
1006 mutex_unlock(&adev->pm.mutex);
1007
1008 return ret;
1009 }
1010
amdgpu_dpm_print_clock_levels(struct amdgpu_device * adev,enum pp_clock_type type,char * buf)1011 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1012 enum pp_clock_type type,
1013 char *buf)
1014 {
1015 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1016 int ret = 0;
1017
1018 if (!pp_funcs->print_clock_levels)
1019 return 0;
1020
1021 mutex_lock(&adev->pm.mutex);
1022 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1023 type,
1024 buf);
1025 mutex_unlock(&adev->pm.mutex);
1026
1027 return ret;
1028 }
1029
amdgpu_dpm_emit_clock_levels(struct amdgpu_device * adev,enum pp_clock_type type,char * buf,int * offset)1030 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1031 enum pp_clock_type type,
1032 char *buf,
1033 int *offset)
1034 {
1035 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1036 int ret = 0;
1037
1038 if (!pp_funcs->emit_clock_levels)
1039 return -ENOENT;
1040
1041 mutex_lock(&adev->pm.mutex);
1042 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1043 type,
1044 buf,
1045 offset);
1046 mutex_unlock(&adev->pm.mutex);
1047
1048 return ret;
1049 }
1050
amdgpu_dpm_set_ppfeature_status(struct amdgpu_device * adev,uint64_t ppfeature_masks)1051 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1052 uint64_t ppfeature_masks)
1053 {
1054 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1055 int ret = 0;
1056
1057 if (!pp_funcs->set_ppfeature_status)
1058 return 0;
1059
1060 mutex_lock(&adev->pm.mutex);
1061 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1062 ppfeature_masks);
1063 mutex_unlock(&adev->pm.mutex);
1064
1065 return ret;
1066 }
1067
amdgpu_dpm_get_ppfeature_status(struct amdgpu_device * adev,char * buf)1068 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1069 {
1070 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1071 int ret = 0;
1072
1073 if (!pp_funcs->get_ppfeature_status)
1074 return 0;
1075
1076 mutex_lock(&adev->pm.mutex);
1077 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1078 buf);
1079 mutex_unlock(&adev->pm.mutex);
1080
1081 return ret;
1082 }
1083
amdgpu_dpm_force_clock_level(struct amdgpu_device * adev,enum pp_clock_type type,uint32_t mask)1084 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1085 enum pp_clock_type type,
1086 uint32_t mask)
1087 {
1088 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1089 int ret = 0;
1090
1091 if (!pp_funcs->force_clock_level)
1092 return 0;
1093
1094 mutex_lock(&adev->pm.mutex);
1095 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1096 type,
1097 mask);
1098 mutex_unlock(&adev->pm.mutex);
1099
1100 return ret;
1101 }
1102
amdgpu_dpm_get_sclk_od(struct amdgpu_device * adev)1103 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1104 {
1105 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1106 int ret = 0;
1107
1108 if (!pp_funcs->get_sclk_od)
1109 return 0;
1110
1111 mutex_lock(&adev->pm.mutex);
1112 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1113 mutex_unlock(&adev->pm.mutex);
1114
1115 return ret;
1116 }
1117
amdgpu_dpm_set_sclk_od(struct amdgpu_device * adev,uint32_t value)1118 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1119 {
1120 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1121
1122 if (is_support_sw_smu(adev))
1123 return 0;
1124
1125 mutex_lock(&adev->pm.mutex);
1126 if (pp_funcs->set_sclk_od)
1127 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1128 mutex_unlock(&adev->pm.mutex);
1129
1130 if (amdgpu_dpm_dispatch_task(adev,
1131 AMD_PP_TASK_READJUST_POWER_STATE,
1132 NULL) == -EOPNOTSUPP) {
1133 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1134 amdgpu_dpm_compute_clocks(adev);
1135 }
1136
1137 return 0;
1138 }
1139
amdgpu_dpm_get_mclk_od(struct amdgpu_device * adev)1140 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1141 {
1142 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1143 int ret = 0;
1144
1145 if (!pp_funcs->get_mclk_od)
1146 return 0;
1147
1148 mutex_lock(&adev->pm.mutex);
1149 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1150 mutex_unlock(&adev->pm.mutex);
1151
1152 return ret;
1153 }
1154
amdgpu_dpm_set_mclk_od(struct amdgpu_device * adev,uint32_t value)1155 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1156 {
1157 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1158
1159 if (is_support_sw_smu(adev))
1160 return 0;
1161
1162 mutex_lock(&adev->pm.mutex);
1163 if (pp_funcs->set_mclk_od)
1164 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1165 mutex_unlock(&adev->pm.mutex);
1166
1167 if (amdgpu_dpm_dispatch_task(adev,
1168 AMD_PP_TASK_READJUST_POWER_STATE,
1169 NULL) == -EOPNOTSUPP) {
1170 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1171 amdgpu_dpm_compute_clocks(adev);
1172 }
1173
1174 return 0;
1175 }
1176
amdgpu_dpm_get_power_profile_mode(struct amdgpu_device * adev,char * buf)1177 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1178 char *buf)
1179 {
1180 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1181 int ret = 0;
1182
1183 if (!pp_funcs->get_power_profile_mode)
1184 return -EOPNOTSUPP;
1185
1186 mutex_lock(&adev->pm.mutex);
1187 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1188 buf);
1189 mutex_unlock(&adev->pm.mutex);
1190
1191 return ret;
1192 }
1193
amdgpu_dpm_set_power_profile_mode(struct amdgpu_device * adev,long * input,uint32_t size)1194 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1195 long *input, uint32_t size)
1196 {
1197 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1198 int ret = 0;
1199
1200 if (!pp_funcs->set_power_profile_mode)
1201 return 0;
1202
1203 mutex_lock(&adev->pm.mutex);
1204 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1205 input,
1206 size);
1207 mutex_unlock(&adev->pm.mutex);
1208
1209 return ret;
1210 }
1211
amdgpu_dpm_get_gpu_metrics(struct amdgpu_device * adev,void ** table)1212 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1213 {
1214 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1215 int ret = 0;
1216
1217 if (!pp_funcs->get_gpu_metrics)
1218 return 0;
1219
1220 mutex_lock(&adev->pm.mutex);
1221 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1222 table);
1223 mutex_unlock(&adev->pm.mutex);
1224
1225 return ret;
1226 }
1227
amdgpu_dpm_get_fan_control_mode(struct amdgpu_device * adev,uint32_t * fan_mode)1228 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1229 uint32_t *fan_mode)
1230 {
1231 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1232 int ret = 0;
1233
1234 if (!pp_funcs->get_fan_control_mode)
1235 return -EOPNOTSUPP;
1236
1237 mutex_lock(&adev->pm.mutex);
1238 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1239 fan_mode);
1240 mutex_unlock(&adev->pm.mutex);
1241
1242 return ret;
1243 }
1244
amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device * adev,uint32_t speed)1245 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1246 uint32_t speed)
1247 {
1248 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1249 int ret = 0;
1250
1251 if (!pp_funcs->set_fan_speed_pwm)
1252 return -EOPNOTSUPP;
1253
1254 mutex_lock(&adev->pm.mutex);
1255 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1256 speed);
1257 mutex_unlock(&adev->pm.mutex);
1258
1259 return ret;
1260 }
1261
amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device * adev,uint32_t * speed)1262 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1263 uint32_t *speed)
1264 {
1265 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1266 int ret = 0;
1267
1268 if (!pp_funcs->get_fan_speed_pwm)
1269 return -EOPNOTSUPP;
1270
1271 mutex_lock(&adev->pm.mutex);
1272 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1273 speed);
1274 mutex_unlock(&adev->pm.mutex);
1275
1276 return ret;
1277 }
1278
amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device * adev,uint32_t * speed)1279 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1280 uint32_t *speed)
1281 {
1282 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1283 int ret = 0;
1284
1285 if (!pp_funcs->get_fan_speed_rpm)
1286 return -EOPNOTSUPP;
1287
1288 mutex_lock(&adev->pm.mutex);
1289 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1290 speed);
1291 mutex_unlock(&adev->pm.mutex);
1292
1293 return ret;
1294 }
1295
amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device * adev,uint32_t speed)1296 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1297 uint32_t speed)
1298 {
1299 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1300 int ret = 0;
1301
1302 if (!pp_funcs->set_fan_speed_rpm)
1303 return -EOPNOTSUPP;
1304
1305 mutex_lock(&adev->pm.mutex);
1306 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1307 speed);
1308 mutex_unlock(&adev->pm.mutex);
1309
1310 return ret;
1311 }
1312
amdgpu_dpm_set_fan_control_mode(struct amdgpu_device * adev,uint32_t mode)1313 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1314 uint32_t mode)
1315 {
1316 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1317 int ret = 0;
1318
1319 if (!pp_funcs->set_fan_control_mode)
1320 return -EOPNOTSUPP;
1321
1322 mutex_lock(&adev->pm.mutex);
1323 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1324 mode);
1325 mutex_unlock(&adev->pm.mutex);
1326
1327 return ret;
1328 }
1329
amdgpu_dpm_get_power_limit(struct amdgpu_device * adev,uint32_t * limit,enum pp_power_limit_level pp_limit_level,enum pp_power_type power_type)1330 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1331 uint32_t *limit,
1332 enum pp_power_limit_level pp_limit_level,
1333 enum pp_power_type power_type)
1334 {
1335 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1336 int ret = 0;
1337
1338 if (!pp_funcs->get_power_limit)
1339 return -ENODATA;
1340
1341 mutex_lock(&adev->pm.mutex);
1342 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1343 limit,
1344 pp_limit_level,
1345 power_type);
1346 mutex_unlock(&adev->pm.mutex);
1347
1348 return ret;
1349 }
1350
amdgpu_dpm_set_power_limit(struct amdgpu_device * adev,uint32_t limit)1351 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1352 uint32_t limit)
1353 {
1354 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1355 int ret = 0;
1356
1357 if (!pp_funcs->set_power_limit)
1358 return -EINVAL;
1359
1360 mutex_lock(&adev->pm.mutex);
1361 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1362 limit);
1363 mutex_unlock(&adev->pm.mutex);
1364
1365 return ret;
1366 }
1367
amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device * adev)1368 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1369 {
1370 bool cclk_dpm_supported = false;
1371
1372 if (!is_support_sw_smu(adev))
1373 return false;
1374
1375 mutex_lock(&adev->pm.mutex);
1376 cclk_dpm_supported = is_support_cclk_dpm(adev);
1377 mutex_unlock(&adev->pm.mutex);
1378
1379 return (int)cclk_dpm_supported;
1380 }
1381
amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device * adev,struct seq_file * m)1382 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1383 struct seq_file *m)
1384 {
1385 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1386
1387 if (!pp_funcs->debugfs_print_current_performance_level)
1388 return -EOPNOTSUPP;
1389
1390 mutex_lock(&adev->pm.mutex);
1391 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1392 m);
1393 mutex_unlock(&adev->pm.mutex);
1394
1395 return 0;
1396 }
1397
amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device * adev,void ** addr,size_t * size)1398 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1399 void **addr,
1400 size_t *size)
1401 {
1402 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1403 int ret = 0;
1404
1405 if (!pp_funcs->get_smu_prv_buf_details)
1406 return -ENOSYS;
1407
1408 mutex_lock(&adev->pm.mutex);
1409 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1410 addr,
1411 size);
1412 mutex_unlock(&adev->pm.mutex);
1413
1414 return ret;
1415 }
1416
amdgpu_dpm_is_overdrive_supported(struct amdgpu_device * adev)1417 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1418 {
1419 if (is_support_sw_smu(adev)) {
1420 struct smu_context *smu = adev->powerplay.pp_handle;
1421
1422 return (smu->od_enabled || smu->is_apu);
1423 } else {
1424 struct pp_hwmgr *hwmgr;
1425
1426 /*
1427 * dpm on some legacy asics don't carry od_enabled member
1428 * as its pp_handle is casted directly from adev.
1429 */
1430 if (amdgpu_dpm_is_legacy_dpm(adev))
1431 return false;
1432
1433 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1434
1435 return hwmgr->od_enabled;
1436 }
1437 }
1438
amdgpu_dpm_set_pp_table(struct amdgpu_device * adev,const char * buf,size_t size)1439 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1440 const char *buf,
1441 size_t size)
1442 {
1443 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1444 int ret = 0;
1445
1446 if (!pp_funcs->set_pp_table)
1447 return -EOPNOTSUPP;
1448
1449 mutex_lock(&adev->pm.mutex);
1450 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1451 buf,
1452 size);
1453 mutex_unlock(&adev->pm.mutex);
1454
1455 return ret;
1456 }
1457
amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device * adev)1458 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1459 {
1460 struct smu_context *smu = adev->powerplay.pp_handle;
1461
1462 if (!is_support_sw_smu(adev))
1463 return INT_MAX;
1464
1465 return smu->cpu_core_num;
1466 }
1467
amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device * adev)1468 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1469 {
1470 if (!is_support_sw_smu(adev))
1471 return;
1472
1473 amdgpu_smu_stb_debug_fs_init(adev);
1474 }
1475
amdgpu_dpm_display_configuration_change(struct amdgpu_device * adev,const struct amd_pp_display_configuration * input)1476 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1477 const struct amd_pp_display_configuration *input)
1478 {
1479 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1480 int ret = 0;
1481
1482 if (!pp_funcs->display_configuration_change)
1483 return 0;
1484
1485 mutex_lock(&adev->pm.mutex);
1486 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1487 input);
1488 mutex_unlock(&adev->pm.mutex);
1489
1490 return ret;
1491 }
1492
amdgpu_dpm_get_clock_by_type(struct amdgpu_device * adev,enum amd_pp_clock_type type,struct amd_pp_clocks * clocks)1493 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1494 enum amd_pp_clock_type type,
1495 struct amd_pp_clocks *clocks)
1496 {
1497 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1498 int ret = 0;
1499
1500 if (!pp_funcs->get_clock_by_type)
1501 return 0;
1502
1503 mutex_lock(&adev->pm.mutex);
1504 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1505 type,
1506 clocks);
1507 mutex_unlock(&adev->pm.mutex);
1508
1509 return ret;
1510 }
1511
amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device * adev,struct amd_pp_simple_clock_info * clocks)1512 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1513 struct amd_pp_simple_clock_info *clocks)
1514 {
1515 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1516 int ret = 0;
1517
1518 if (!pp_funcs->get_display_mode_validation_clocks)
1519 return 0;
1520
1521 mutex_lock(&adev->pm.mutex);
1522 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1523 clocks);
1524 mutex_unlock(&adev->pm.mutex);
1525
1526 return ret;
1527 }
1528
amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device * adev,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)1529 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1530 enum amd_pp_clock_type type,
1531 struct pp_clock_levels_with_latency *clocks)
1532 {
1533 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1534 int ret = 0;
1535
1536 if (!pp_funcs->get_clock_by_type_with_latency)
1537 return 0;
1538
1539 mutex_lock(&adev->pm.mutex);
1540 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1541 type,
1542 clocks);
1543 mutex_unlock(&adev->pm.mutex);
1544
1545 return ret;
1546 }
1547
amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device * adev,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)1548 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1549 enum amd_pp_clock_type type,
1550 struct pp_clock_levels_with_voltage *clocks)
1551 {
1552 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1553 int ret = 0;
1554
1555 if (!pp_funcs->get_clock_by_type_with_voltage)
1556 return 0;
1557
1558 mutex_lock(&adev->pm.mutex);
1559 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1560 type,
1561 clocks);
1562 mutex_unlock(&adev->pm.mutex);
1563
1564 return ret;
1565 }
1566
amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device * adev,void * clock_ranges)1567 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1568 void *clock_ranges)
1569 {
1570 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1571 int ret = 0;
1572
1573 if (!pp_funcs->set_watermarks_for_clocks_ranges)
1574 return -EOPNOTSUPP;
1575
1576 mutex_lock(&adev->pm.mutex);
1577 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1578 clock_ranges);
1579 mutex_unlock(&adev->pm.mutex);
1580
1581 return ret;
1582 }
1583
amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device * adev,struct pp_display_clock_request * clock)1584 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1585 struct pp_display_clock_request *clock)
1586 {
1587 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1588 int ret = 0;
1589
1590 if (!pp_funcs->display_clock_voltage_request)
1591 return -EOPNOTSUPP;
1592
1593 mutex_lock(&adev->pm.mutex);
1594 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1595 clock);
1596 mutex_unlock(&adev->pm.mutex);
1597
1598 return ret;
1599 }
1600
amdgpu_dpm_get_current_clocks(struct amdgpu_device * adev,struct amd_pp_clock_info * clocks)1601 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1602 struct amd_pp_clock_info *clocks)
1603 {
1604 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1605 int ret = 0;
1606
1607 if (!pp_funcs->get_current_clocks)
1608 return -EOPNOTSUPP;
1609
1610 mutex_lock(&adev->pm.mutex);
1611 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1612 clocks);
1613 mutex_unlock(&adev->pm.mutex);
1614
1615 return ret;
1616 }
1617
amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device * adev)1618 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1619 {
1620 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1621
1622 if (!pp_funcs->notify_smu_enable_pwe)
1623 return;
1624
1625 mutex_lock(&adev->pm.mutex);
1626 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1627 mutex_unlock(&adev->pm.mutex);
1628 }
1629
amdgpu_dpm_set_active_display_count(struct amdgpu_device * adev,uint32_t count)1630 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1631 uint32_t count)
1632 {
1633 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1634 int ret = 0;
1635
1636 if (!pp_funcs->set_active_display_count)
1637 return -EOPNOTSUPP;
1638
1639 mutex_lock(&adev->pm.mutex);
1640 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1641 count);
1642 mutex_unlock(&adev->pm.mutex);
1643
1644 return ret;
1645 }
1646
amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device * adev,uint32_t clock)1647 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1648 uint32_t clock)
1649 {
1650 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1651 int ret = 0;
1652
1653 if (!pp_funcs->set_min_deep_sleep_dcefclk)
1654 return -EOPNOTSUPP;
1655
1656 mutex_lock(&adev->pm.mutex);
1657 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1658 clock);
1659 mutex_unlock(&adev->pm.mutex);
1660
1661 return ret;
1662 }
1663
amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device * adev,uint32_t clock)1664 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1665 uint32_t clock)
1666 {
1667 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1668
1669 if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1670 return;
1671
1672 mutex_lock(&adev->pm.mutex);
1673 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1674 clock);
1675 mutex_unlock(&adev->pm.mutex);
1676 }
1677
amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device * adev,uint32_t clock)1678 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1679 uint32_t clock)
1680 {
1681 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1682
1683 if (!pp_funcs->set_hard_min_fclk_by_freq)
1684 return;
1685
1686 mutex_lock(&adev->pm.mutex);
1687 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1688 clock);
1689 mutex_unlock(&adev->pm.mutex);
1690 }
1691
amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device * adev,bool disable_memory_clock_switch)1692 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1693 bool disable_memory_clock_switch)
1694 {
1695 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1696 int ret = 0;
1697
1698 if (!pp_funcs->display_disable_memory_clock_switch)
1699 return 0;
1700
1701 mutex_lock(&adev->pm.mutex);
1702 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1703 disable_memory_clock_switch);
1704 mutex_unlock(&adev->pm.mutex);
1705
1706 return ret;
1707 }
1708
amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device * adev,struct pp_smu_nv_clock_table * max_clocks)1709 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1710 struct pp_smu_nv_clock_table *max_clocks)
1711 {
1712 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1713 int ret = 0;
1714
1715 if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1716 return -EOPNOTSUPP;
1717
1718 mutex_lock(&adev->pm.mutex);
1719 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1720 max_clocks);
1721 mutex_unlock(&adev->pm.mutex);
1722
1723 return ret;
1724 }
1725
amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device * adev,unsigned int * clock_values_in_khz,unsigned int * num_states)1726 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1727 unsigned int *clock_values_in_khz,
1728 unsigned int *num_states)
1729 {
1730 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1731 int ret = 0;
1732
1733 if (!pp_funcs->get_uclk_dpm_states)
1734 return -EOPNOTSUPP;
1735
1736 mutex_lock(&adev->pm.mutex);
1737 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1738 clock_values_in_khz,
1739 num_states);
1740 mutex_unlock(&adev->pm.mutex);
1741
1742 return ret;
1743 }
1744
amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device * adev,struct dpm_clocks * clock_table)1745 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1746 struct dpm_clocks *clock_table)
1747 {
1748 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1749 int ret = 0;
1750
1751 if (!pp_funcs->get_dpm_clock_table)
1752 return -EOPNOTSUPP;
1753
1754 mutex_lock(&adev->pm.mutex);
1755 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1756 clock_table);
1757 mutex_unlock(&adev->pm.mutex);
1758
1759 return ret;
1760 }
1761