• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
28 #include <linux/firmware.h>
29 #include <linux/reboot.h>
30 #include "amd_shared.h"
31 #include "amd_powerplay.h"
32 #include "power_state.h"
33 #include "amdgpu.h"
34 #include "hwmgr.h"
35 #include "amdgpu_dpm_internal.h"
36 #include "amdgpu_display.h"
37 
38 static const struct amd_pm_funcs pp_dpm_funcs;
39 
amd_powerplay_create(struct amdgpu_device * adev)40 static int amd_powerplay_create(struct amdgpu_device *adev)
41 {
42 	struct pp_hwmgr *hwmgr;
43 
44 	if (adev == NULL)
45 		return -EINVAL;
46 
47 	hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
48 	if (hwmgr == NULL)
49 		return -ENOMEM;
50 
51 	hwmgr->adev = adev;
52 	hwmgr->not_vf = !amdgpu_sriov_vf(adev);
53 	hwmgr->device = amdgpu_cgs_create_device(adev);
54 	if (!hwmgr->device) {
55 		kfree(hwmgr);
56 		return -ENOMEM;
57 	}
58 
59 	mutex_init(&hwmgr->msg_lock);
60 	hwmgr->chip_family = adev->family;
61 	hwmgr->chip_id = adev->asic_type;
62 	hwmgr->feature_mask = adev->pm.pp_feature;
63 	hwmgr->display_config = &adev->pm.pm_display_cfg;
64 	adev->powerplay.pp_handle = hwmgr;
65 	adev->powerplay.pp_funcs = &pp_dpm_funcs;
66 	return 0;
67 }
68 
69 
amd_powerplay_destroy(struct amdgpu_device * adev)70 static void amd_powerplay_destroy(struct amdgpu_device *adev)
71 {
72 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
73 
74 	mutex_destroy(&hwmgr->msg_lock);
75 
76 	kfree(hwmgr->hardcode_pp_table);
77 	hwmgr->hardcode_pp_table = NULL;
78 
79 	kfree(hwmgr);
80 	hwmgr = NULL;
81 }
82 
pp_early_init(void * handle)83 static int pp_early_init(void *handle)
84 {
85 	int ret;
86 	struct amdgpu_device *adev = handle;
87 
88 	ret = amd_powerplay_create(adev);
89 
90 	if (ret != 0)
91 		return ret;
92 
93 	ret = hwmgr_early_init(adev->powerplay.pp_handle);
94 	if (ret)
95 		return -EINVAL;
96 
97 	return 0;
98 }
99 
pp_swctf_delayed_work_handler(struct work_struct * work)100 static void pp_swctf_delayed_work_handler(struct work_struct *work)
101 {
102 	struct pp_hwmgr *hwmgr =
103 		container_of(work, struct pp_hwmgr, swctf_delayed_work.work);
104 	struct amdgpu_device *adev = hwmgr->adev;
105 	struct amdgpu_dpm_thermal *range =
106 				&adev->pm.dpm.thermal;
107 	uint32_t gpu_temperature, size = sizeof(gpu_temperature);
108 	int ret;
109 
110 	/*
111 	 * If the hotspot/edge temperature is confirmed as below SW CTF setting point
112 	 * after the delay enforced, nothing will be done.
113 	 * Otherwise, a graceful shutdown will be performed to prevent further damage.
114 	 */
115 	if (range->sw_ctf_threshold &&
116 	    hwmgr->hwmgr_func->read_sensor) {
117 		ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
118 						     AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
119 						     &gpu_temperature,
120 						     &size);
121 		/*
122 		 * For some legacy ASICs, hotspot temperature retrieving might be not
123 		 * supported. Check the edge temperature instead then.
124 		 */
125 		if (ret == -EOPNOTSUPP)
126 			ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
127 							     AMDGPU_PP_SENSOR_EDGE_TEMP,
128 							     &gpu_temperature,
129 							     &size);
130 		if (!ret && gpu_temperature / 1000 < range->sw_ctf_threshold)
131 			return;
132 	}
133 
134 	dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
135 	dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
136 	orderly_poweroff(true);
137 }
138 
pp_sw_init(void * handle)139 static int pp_sw_init(void *handle)
140 {
141 	struct amdgpu_device *adev = handle;
142 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
143 	int ret = 0;
144 
145 	ret = hwmgr_sw_init(hwmgr);
146 
147 	pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
148 
149 	if (!ret)
150 		INIT_DELAYED_WORK(&hwmgr->swctf_delayed_work,
151 				  pp_swctf_delayed_work_handler);
152 
153 	return ret;
154 }
155 
pp_sw_fini(void * handle)156 static int pp_sw_fini(void *handle)
157 {
158 	struct amdgpu_device *adev = handle;
159 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
160 
161 	hwmgr_sw_fini(hwmgr);
162 
163 	amdgpu_ucode_release(&adev->pm.fw);
164 
165 	return 0;
166 }
167 
pp_hw_init(void * handle)168 static int pp_hw_init(void *handle)
169 {
170 	int ret = 0;
171 	struct amdgpu_device *adev = handle;
172 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
173 
174 	ret = hwmgr_hw_init(hwmgr);
175 
176 	if (ret)
177 		pr_err("powerplay hw init failed\n");
178 
179 	return ret;
180 }
181 
pp_hw_fini(void * handle)182 static int pp_hw_fini(void *handle)
183 {
184 	struct amdgpu_device *adev = handle;
185 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
186 
187 	cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
188 
189 	hwmgr_hw_fini(hwmgr);
190 
191 	return 0;
192 }
193 
pp_reserve_vram_for_smu(struct amdgpu_device * adev)194 static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
195 {
196 	int r = -EINVAL;
197 	void *cpu_ptr = NULL;
198 	uint64_t gpu_addr;
199 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
200 
201 	if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
202 						PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
203 						&adev->pm.smu_prv_buffer,
204 						&gpu_addr,
205 						&cpu_ptr)) {
206 		DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
207 		return;
208 	}
209 
210 	if (hwmgr->hwmgr_func->notify_cac_buffer_info)
211 		r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
212 					lower_32_bits((unsigned long)cpu_ptr),
213 					upper_32_bits((unsigned long)cpu_ptr),
214 					lower_32_bits(gpu_addr),
215 					upper_32_bits(gpu_addr),
216 					adev->pm.smu_prv_buffer_size);
217 
218 	if (r) {
219 		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
220 		adev->pm.smu_prv_buffer = NULL;
221 		DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
222 	}
223 }
224 
pp_late_init(void * handle)225 static int pp_late_init(void *handle)
226 {
227 	struct amdgpu_device *adev = handle;
228 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
229 
230 	if (hwmgr && hwmgr->pm_en)
231 		hwmgr_handle_task(hwmgr,
232 					AMD_PP_TASK_COMPLETE_INIT, NULL);
233 	if (adev->pm.smu_prv_buffer_size != 0)
234 		pp_reserve_vram_for_smu(adev);
235 
236 	return 0;
237 }
238 
pp_late_fini(void * handle)239 static void pp_late_fini(void *handle)
240 {
241 	struct amdgpu_device *adev = handle;
242 
243 	if (adev->pm.smu_prv_buffer)
244 		amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
245 	amd_powerplay_destroy(adev);
246 }
247 
248 
pp_is_idle(void * handle)249 static bool pp_is_idle(void *handle)
250 {
251 	return false;
252 }
253 
pp_wait_for_idle(void * handle)254 static int pp_wait_for_idle(void *handle)
255 {
256 	return 0;
257 }
258 
pp_sw_reset(void * handle)259 static int pp_sw_reset(void *handle)
260 {
261 	return 0;
262 }
263 
pp_set_powergating_state(void * handle,enum amd_powergating_state state)264 static int pp_set_powergating_state(void *handle,
265 				    enum amd_powergating_state state)
266 {
267 	return 0;
268 }
269 
pp_suspend(void * handle)270 static int pp_suspend(void *handle)
271 {
272 	struct amdgpu_device *adev = handle;
273 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
274 
275 	cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
276 
277 	return hwmgr_suspend(hwmgr);
278 }
279 
pp_resume(void * handle)280 static int pp_resume(void *handle)
281 {
282 	struct amdgpu_device *adev = handle;
283 	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
284 
285 	return hwmgr_resume(hwmgr);
286 }
287 
pp_set_clockgating_state(void * handle,enum amd_clockgating_state state)288 static int pp_set_clockgating_state(void *handle,
289 					  enum amd_clockgating_state state)
290 {
291 	return 0;
292 }
293 
294 static const struct amd_ip_funcs pp_ip_funcs = {
295 	.name = "powerplay",
296 	.early_init = pp_early_init,
297 	.late_init = pp_late_init,
298 	.sw_init = pp_sw_init,
299 	.sw_fini = pp_sw_fini,
300 	.hw_init = pp_hw_init,
301 	.hw_fini = pp_hw_fini,
302 	.late_fini = pp_late_fini,
303 	.suspend = pp_suspend,
304 	.resume = pp_resume,
305 	.is_idle = pp_is_idle,
306 	.wait_for_idle = pp_wait_for_idle,
307 	.soft_reset = pp_sw_reset,
308 	.set_clockgating_state = pp_set_clockgating_state,
309 	.set_powergating_state = pp_set_powergating_state,
310 	.dump_ip_state = NULL,
311 	.print_ip_state = NULL,
312 };
313 
314 const struct amdgpu_ip_block_version pp_smu_ip_block =
315 {
316 	.type = AMD_IP_BLOCK_TYPE_SMC,
317 	.major = 1,
318 	.minor = 0,
319 	.rev = 0,
320 	.funcs = &pp_ip_funcs,
321 };
322 
323 /* This interface only be supported On Vi,
324  * because only smu7/8 can help to load gfx/sdma fw,
325  * smu need to be enabled before load other ip's fw.
326  * so call start smu to load smu7 fw and other ip's fw
327  */
pp_dpm_load_fw(void * handle)328 static int pp_dpm_load_fw(void *handle)
329 {
330 	struct pp_hwmgr *hwmgr = handle;
331 
332 	if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
333 		return -EINVAL;
334 
335 	if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
336 		pr_err("fw load failed\n");
337 		return -EINVAL;
338 	}
339 
340 	return 0;
341 }
342 
pp_dpm_fw_loading_complete(void * handle)343 static int pp_dpm_fw_loading_complete(void *handle)
344 {
345 	return 0;
346 }
347 
pp_set_clockgating_by_smu(void * handle,uint32_t msg_id)348 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
349 {
350 	struct pp_hwmgr *hwmgr = handle;
351 
352 	if (!hwmgr || !hwmgr->pm_en)
353 		return -EINVAL;
354 
355 	if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
356 		pr_info_ratelimited("%s was not implemented.\n", __func__);
357 		return 0;
358 	}
359 
360 	return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
361 }
362 
pp_dpm_en_umd_pstate(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level * level)363 static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
364 						enum amd_dpm_forced_level *level)
365 {
366 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
367 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
368 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
369 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
370 
371 	if (!(hwmgr->dpm_level & profile_mode_mask)) {
372 		/* enter umd pstate, save current level, disable gfx cg*/
373 		if (*level & profile_mode_mask) {
374 			hwmgr->saved_dpm_level = hwmgr->dpm_level;
375 			hwmgr->en_umd_pstate = true;
376 		}
377 	} else {
378 		/* exit umd pstate, restore level, enable gfx cg*/
379 		if (!(*level & profile_mode_mask)) {
380 			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
381 				*level = hwmgr->saved_dpm_level;
382 			hwmgr->en_umd_pstate = false;
383 		}
384 	}
385 }
386 
pp_dpm_force_performance_level(void * handle,enum amd_dpm_forced_level level)387 static int pp_dpm_force_performance_level(void *handle,
388 					enum amd_dpm_forced_level level)
389 {
390 	struct pp_hwmgr *hwmgr = handle;
391 
392 	if (!hwmgr || !hwmgr->pm_en)
393 		return -EINVAL;
394 
395 	if (level == hwmgr->dpm_level)
396 		return 0;
397 
398 	pp_dpm_en_umd_pstate(hwmgr, &level);
399 	hwmgr->request_dpm_level = level;
400 	hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
401 
402 	return 0;
403 }
404 
pp_dpm_get_performance_level(void * handle)405 static enum amd_dpm_forced_level pp_dpm_get_performance_level(
406 								void *handle)
407 {
408 	struct pp_hwmgr *hwmgr = handle;
409 
410 	if (!hwmgr || !hwmgr->pm_en)
411 		return -EINVAL;
412 
413 	return hwmgr->dpm_level;
414 }
415 
pp_dpm_get_sclk(void * handle,bool low)416 static uint32_t pp_dpm_get_sclk(void *handle, bool low)
417 {
418 	struct pp_hwmgr *hwmgr = handle;
419 
420 	if (!hwmgr || !hwmgr->pm_en)
421 		return 0;
422 
423 	if (hwmgr->hwmgr_func->get_sclk == NULL) {
424 		pr_info_ratelimited("%s was not implemented.\n", __func__);
425 		return 0;
426 	}
427 	return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
428 }
429 
pp_dpm_get_mclk(void * handle,bool low)430 static uint32_t pp_dpm_get_mclk(void *handle, bool low)
431 {
432 	struct pp_hwmgr *hwmgr = handle;
433 
434 	if (!hwmgr || !hwmgr->pm_en)
435 		return 0;
436 
437 	if (hwmgr->hwmgr_func->get_mclk == NULL) {
438 		pr_info_ratelimited("%s was not implemented.\n", __func__);
439 		return 0;
440 	}
441 	return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
442 }
443 
pp_dpm_powergate_vce(void * handle,bool gate)444 static void pp_dpm_powergate_vce(void *handle, bool gate)
445 {
446 	struct pp_hwmgr *hwmgr = handle;
447 
448 	if (!hwmgr || !hwmgr->pm_en)
449 		return;
450 
451 	if (hwmgr->hwmgr_func->powergate_vce == NULL) {
452 		pr_info_ratelimited("%s was not implemented.\n", __func__);
453 		return;
454 	}
455 	hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
456 }
457 
pp_dpm_powergate_uvd(void * handle,bool gate)458 static void pp_dpm_powergate_uvd(void *handle, bool gate)
459 {
460 	struct pp_hwmgr *hwmgr = handle;
461 
462 	if (!hwmgr || !hwmgr->pm_en)
463 		return;
464 
465 	if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
466 		pr_info_ratelimited("%s was not implemented.\n", __func__);
467 		return;
468 	}
469 	hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
470 }
471 
pp_dpm_dispatch_tasks(void * handle,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)472 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
473 		enum amd_pm_state_type *user_state)
474 {
475 	struct pp_hwmgr *hwmgr = handle;
476 
477 	if (!hwmgr || !hwmgr->pm_en)
478 		return -EINVAL;
479 
480 	return hwmgr_handle_task(hwmgr, task_id, user_state);
481 }
482 
pp_dpm_get_current_power_state(void * handle)483 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
484 {
485 	struct pp_hwmgr *hwmgr = handle;
486 	struct pp_power_state *state;
487 	enum amd_pm_state_type pm_type;
488 
489 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
490 		return -EINVAL;
491 
492 	state = hwmgr->current_ps;
493 
494 	switch (state->classification.ui_label) {
495 	case PP_StateUILabel_Battery:
496 		pm_type = POWER_STATE_TYPE_BATTERY;
497 		break;
498 	case PP_StateUILabel_Balanced:
499 		pm_type = POWER_STATE_TYPE_BALANCED;
500 		break;
501 	case PP_StateUILabel_Performance:
502 		pm_type = POWER_STATE_TYPE_PERFORMANCE;
503 		break;
504 	default:
505 		if (state->classification.flags & PP_StateClassificationFlag_Boot)
506 			pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
507 		else
508 			pm_type = POWER_STATE_TYPE_DEFAULT;
509 		break;
510 	}
511 
512 	return pm_type;
513 }
514 
pp_dpm_set_fan_control_mode(void * handle,uint32_t mode)515 static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
516 {
517 	struct pp_hwmgr *hwmgr = handle;
518 
519 	if (!hwmgr || !hwmgr->pm_en)
520 		return -EOPNOTSUPP;
521 
522 	if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
523 		return -EOPNOTSUPP;
524 
525 	if (mode == U32_MAX)
526 		return -EINVAL;
527 
528 	hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
529 
530 	return 0;
531 }
532 
pp_dpm_get_fan_control_mode(void * handle,uint32_t * fan_mode)533 static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode)
534 {
535 	struct pp_hwmgr *hwmgr = handle;
536 
537 	if (!hwmgr || !hwmgr->pm_en)
538 		return -EOPNOTSUPP;
539 
540 	if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
541 		return -EOPNOTSUPP;
542 
543 	if (!fan_mode)
544 		return -EINVAL;
545 
546 	*fan_mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
547 	return 0;
548 }
549 
pp_dpm_set_fan_speed_pwm(void * handle,uint32_t speed)550 static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
551 {
552 	struct pp_hwmgr *hwmgr = handle;
553 
554 	if (!hwmgr || !hwmgr->pm_en)
555 		return -EOPNOTSUPP;
556 
557 	if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
558 		return -EOPNOTSUPP;
559 
560 	if (speed == U32_MAX)
561 		return -EINVAL;
562 
563 	return hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
564 }
565 
pp_dpm_get_fan_speed_pwm(void * handle,uint32_t * speed)566 static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
567 {
568 	struct pp_hwmgr *hwmgr = handle;
569 
570 	if (!hwmgr || !hwmgr->pm_en)
571 		return -EOPNOTSUPP;
572 
573 	if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
574 		return -EOPNOTSUPP;
575 
576 	if (!speed)
577 		return -EINVAL;
578 
579 	return hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
580 }
581 
pp_dpm_get_fan_speed_rpm(void * handle,uint32_t * rpm)582 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
583 {
584 	struct pp_hwmgr *hwmgr = handle;
585 
586 	if (!hwmgr || !hwmgr->pm_en)
587 		return -EOPNOTSUPP;
588 
589 	if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
590 		return -EOPNOTSUPP;
591 
592 	if (!rpm)
593 		return -EINVAL;
594 
595 	return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
596 }
597 
pp_dpm_set_fan_speed_rpm(void * handle,uint32_t rpm)598 static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
599 {
600 	struct pp_hwmgr *hwmgr = handle;
601 
602 	if (!hwmgr || !hwmgr->pm_en)
603 		return -EOPNOTSUPP;
604 
605 	if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
606 		return -EOPNOTSUPP;
607 
608 	if (rpm == U32_MAX)
609 		return -EINVAL;
610 
611 	return hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
612 }
613 
pp_dpm_get_pp_num_states(void * handle,struct pp_states_info * data)614 static int pp_dpm_get_pp_num_states(void *handle,
615 		struct pp_states_info *data)
616 {
617 	struct pp_hwmgr *hwmgr = handle;
618 	int i;
619 
620 	memset(data, 0, sizeof(*data));
621 
622 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->ps)
623 		return -EINVAL;
624 
625 	data->nums = hwmgr->num_ps;
626 
627 	for (i = 0; i < hwmgr->num_ps; i++) {
628 		struct pp_power_state *state = (struct pp_power_state *)
629 				((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
630 		switch (state->classification.ui_label) {
631 		case PP_StateUILabel_Battery:
632 			data->states[i] = POWER_STATE_TYPE_BATTERY;
633 			break;
634 		case PP_StateUILabel_Balanced:
635 			data->states[i] = POWER_STATE_TYPE_BALANCED;
636 			break;
637 		case PP_StateUILabel_Performance:
638 			data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
639 			break;
640 		default:
641 			if (state->classification.flags & PP_StateClassificationFlag_Boot)
642 				data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
643 			else
644 				data->states[i] = POWER_STATE_TYPE_DEFAULT;
645 		}
646 	}
647 	return 0;
648 }
649 
pp_dpm_get_pp_table(void * handle,char ** table)650 static int pp_dpm_get_pp_table(void *handle, char **table)
651 {
652 	struct pp_hwmgr *hwmgr = handle;
653 
654 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->soft_pp_table)
655 		return -EINVAL;
656 
657 	*table = (char *)hwmgr->soft_pp_table;
658 	return hwmgr->soft_pp_table_size;
659 }
660 
amd_powerplay_reset(void * handle)661 static int amd_powerplay_reset(void *handle)
662 {
663 	struct pp_hwmgr *hwmgr = handle;
664 	int ret;
665 
666 	ret = hwmgr_hw_fini(hwmgr);
667 	if (ret)
668 		return ret;
669 
670 	ret = hwmgr_hw_init(hwmgr);
671 	if (ret)
672 		return ret;
673 
674 	return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
675 }
676 
pp_dpm_set_pp_table(void * handle,const char * buf,size_t size)677 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
678 {
679 	struct pp_hwmgr *hwmgr = handle;
680 	int ret = -ENOMEM;
681 
682 	if (!hwmgr || !hwmgr->pm_en)
683 		return -EINVAL;
684 
685 	if (!hwmgr->hardcode_pp_table) {
686 		hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
687 						   hwmgr->soft_pp_table_size,
688 						   GFP_KERNEL);
689 		if (!hwmgr->hardcode_pp_table)
690 			return ret;
691 	}
692 
693 	memcpy(hwmgr->hardcode_pp_table, buf, size);
694 
695 	hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
696 
697 	ret = amd_powerplay_reset(handle);
698 	if (ret)
699 		return ret;
700 
701 	if (hwmgr->hwmgr_func->avfs_control)
702 		ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
703 
704 	return ret;
705 }
706 
pp_dpm_force_clock_level(void * handle,enum pp_clock_type type,uint32_t mask)707 static int pp_dpm_force_clock_level(void *handle,
708 		enum pp_clock_type type, uint32_t mask)
709 {
710 	struct pp_hwmgr *hwmgr = handle;
711 
712 	if (!hwmgr || !hwmgr->pm_en)
713 		return -EINVAL;
714 
715 	if (hwmgr->hwmgr_func->force_clock_level == NULL) {
716 		pr_info_ratelimited("%s was not implemented.\n", __func__);
717 		return 0;
718 	}
719 
720 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
721 		pr_debug("force clock level is for dpm manual mode only.\n");
722 		return -EINVAL;
723 	}
724 
725 	return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
726 }
727 
pp_dpm_emit_clock_levels(void * handle,enum pp_clock_type type,char * buf,int * offset)728 static int pp_dpm_emit_clock_levels(void *handle,
729 				    enum pp_clock_type type,
730 				    char *buf,
731 				    int *offset)
732 {
733 	struct pp_hwmgr *hwmgr = handle;
734 
735 	if (!hwmgr || !hwmgr->pm_en)
736 		return -EOPNOTSUPP;
737 
738 	if (!hwmgr->hwmgr_func->emit_clock_levels)
739 		return -ENOENT;
740 
741 	return hwmgr->hwmgr_func->emit_clock_levels(hwmgr, type, buf, offset);
742 }
743 
pp_dpm_print_clock_levels(void * handle,enum pp_clock_type type,char * buf)744 static int pp_dpm_print_clock_levels(void *handle,
745 		enum pp_clock_type type, char *buf)
746 {
747 	struct pp_hwmgr *hwmgr = handle;
748 
749 	if (!hwmgr || !hwmgr->pm_en)
750 		return -EINVAL;
751 
752 	if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
753 		pr_info_ratelimited("%s was not implemented.\n", __func__);
754 		return 0;
755 	}
756 	return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
757 }
758 
pp_dpm_get_sclk_od(void * handle)759 static int pp_dpm_get_sclk_od(void *handle)
760 {
761 	struct pp_hwmgr *hwmgr = handle;
762 
763 	if (!hwmgr || !hwmgr->pm_en)
764 		return -EINVAL;
765 
766 	if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
767 		pr_info_ratelimited("%s was not implemented.\n", __func__);
768 		return 0;
769 	}
770 	return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
771 }
772 
pp_dpm_set_sclk_od(void * handle,uint32_t value)773 static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
774 {
775 	struct pp_hwmgr *hwmgr = handle;
776 
777 	if (!hwmgr || !hwmgr->pm_en)
778 		return -EINVAL;
779 
780 	if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
781 		pr_info_ratelimited("%s was not implemented.\n", __func__);
782 		return 0;
783 	}
784 
785 	return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
786 }
787 
pp_dpm_get_mclk_od(void * handle)788 static int pp_dpm_get_mclk_od(void *handle)
789 {
790 	struct pp_hwmgr *hwmgr = handle;
791 
792 	if (!hwmgr || !hwmgr->pm_en)
793 		return -EINVAL;
794 
795 	if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
796 		pr_info_ratelimited("%s was not implemented.\n", __func__);
797 		return 0;
798 	}
799 	return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
800 }
801 
pp_dpm_set_mclk_od(void * handle,uint32_t value)802 static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
803 {
804 	struct pp_hwmgr *hwmgr = handle;
805 
806 	if (!hwmgr || !hwmgr->pm_en)
807 		return -EINVAL;
808 
809 	if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
810 		pr_info_ratelimited("%s was not implemented.\n", __func__);
811 		return 0;
812 	}
813 	return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
814 }
815 
pp_dpm_read_sensor(void * handle,int idx,void * value,int * size)816 static int pp_dpm_read_sensor(void *handle, int idx,
817 			      void *value, int *size)
818 {
819 	struct pp_hwmgr *hwmgr = handle;
820 
821 	if (!hwmgr || !hwmgr->pm_en || !value)
822 		return -EINVAL;
823 
824 	switch (idx) {
825 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
826 		*((uint32_t *)value) = hwmgr->pstate_sclk * 100;
827 		return 0;
828 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
829 		*((uint32_t *)value) = hwmgr->pstate_mclk * 100;
830 		return 0;
831 	case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
832 		*((uint32_t *)value) = hwmgr->pstate_sclk_peak * 100;
833 		return 0;
834 	case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
835 		*((uint32_t *)value) = hwmgr->pstate_mclk_peak * 100;
836 		return 0;
837 	case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
838 		*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
839 		return 0;
840 	case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
841 		*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
842 		return 0;
843 	default:
844 		return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
845 	}
846 }
847 
848 static struct amd_vce_state*
pp_dpm_get_vce_clock_state(void * handle,unsigned idx)849 pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
850 {
851 	struct pp_hwmgr *hwmgr = handle;
852 
853 	if (!hwmgr || !hwmgr->pm_en)
854 		return NULL;
855 
856 	if (idx < hwmgr->num_vce_state_tables)
857 		return &hwmgr->vce_states[idx];
858 	return NULL;
859 }
860 
pp_get_power_profile_mode(void * handle,char * buf)861 static int pp_get_power_profile_mode(void *handle, char *buf)
862 {
863 	struct pp_hwmgr *hwmgr = handle;
864 
865 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode)
866 		return -EOPNOTSUPP;
867 	if (!buf)
868 		return -EINVAL;
869 
870 	return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
871 }
872 
pp_set_power_profile_mode(void * handle,long * input,uint32_t size)873 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
874 {
875 	struct pp_hwmgr *hwmgr = handle;
876 
877 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode)
878 		return -EOPNOTSUPP;
879 
880 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
881 		pr_debug("power profile setting is for manual dpm mode only.\n");
882 		return -EINVAL;
883 	}
884 
885 	return hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
886 }
887 
pp_set_fine_grain_clk_vol(void * handle,uint32_t type,long * input,uint32_t size)888 static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size)
889 {
890 	struct pp_hwmgr *hwmgr = handle;
891 
892 	if (!hwmgr || !hwmgr->pm_en)
893 		return -EINVAL;
894 
895 	if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
896 		return 0;
897 
898 	return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size);
899 }
900 
pp_odn_edit_dpm_table(void * handle,enum PP_OD_DPM_TABLE_COMMAND type,long * input,uint32_t size)901 static int pp_odn_edit_dpm_table(void *handle, enum PP_OD_DPM_TABLE_COMMAND type,
902 				 long *input, uint32_t size)
903 {
904 	struct pp_hwmgr *hwmgr = handle;
905 
906 	if (!hwmgr || !hwmgr->pm_en)
907 		return -EINVAL;
908 
909 	if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
910 		pr_info_ratelimited("%s was not implemented.\n", __func__);
911 		return 0;
912 	}
913 
914 	return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
915 }
916 
pp_dpm_set_mp1_state(void * handle,enum pp_mp1_state mp1_state)917 static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
918 {
919 	struct pp_hwmgr *hwmgr = handle;
920 
921 	if (!hwmgr)
922 		return -EINVAL;
923 
924 	if (!hwmgr->pm_en)
925 		return 0;
926 
927 	if (hwmgr->hwmgr_func->set_mp1_state)
928 		return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
929 
930 	return 0;
931 }
932 
pp_dpm_switch_power_profile(void * handle,enum PP_SMC_POWER_PROFILE type,bool en)933 static int pp_dpm_switch_power_profile(void *handle,
934 		enum PP_SMC_POWER_PROFILE type, bool en)
935 {
936 	struct pp_hwmgr *hwmgr = handle;
937 	long workload[1];
938 	uint32_t index;
939 
940 	if (!hwmgr || !hwmgr->pm_en)
941 		return -EINVAL;
942 
943 	if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
944 		pr_info_ratelimited("%s was not implemented.\n", __func__);
945 		return -EINVAL;
946 	}
947 
948 	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
949 		return -EINVAL;
950 
951 	if (!en) {
952 		hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
953 		index = fls(hwmgr->workload_mask);
954 		index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
955 		workload[0] = hwmgr->workload_setting[index];
956 	} else {
957 		hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
958 		index = fls(hwmgr->workload_mask);
959 		index = index <= Workload_Policy_Max ? index - 1 : 0;
960 		workload[0] = hwmgr->workload_setting[index];
961 	}
962 
963 	if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
964 		hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
965 			if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en))
966 				return -EINVAL;
967 	}
968 
969 	if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
970 		hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0);
971 
972 	return 0;
973 }
974 
pp_set_power_limit(void * handle,uint32_t limit)975 static int pp_set_power_limit(void *handle, uint32_t limit)
976 {
977 	struct pp_hwmgr *hwmgr = handle;
978 	uint32_t max_power_limit;
979 
980 	if (!hwmgr || !hwmgr->pm_en)
981 		return -EINVAL;
982 
983 	if (hwmgr->hwmgr_func->set_power_limit == NULL) {
984 		pr_info_ratelimited("%s was not implemented.\n", __func__);
985 		return -EINVAL;
986 	}
987 
988 	if (limit == 0)
989 		limit = hwmgr->default_power_limit;
990 
991 	max_power_limit = hwmgr->default_power_limit;
992 	if (hwmgr->od_enabled) {
993 		max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
994 		max_power_limit /= 100;
995 	}
996 
997 	if (limit > max_power_limit)
998 		return -EINVAL;
999 
1000 	hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
1001 	hwmgr->power_limit = limit;
1002 	return 0;
1003 }
1004 
pp_get_power_limit(void * handle,uint32_t * limit,enum pp_power_limit_level pp_limit_level,enum pp_power_type power_type)1005 static int pp_get_power_limit(void *handle, uint32_t *limit,
1006 			      enum pp_power_limit_level pp_limit_level,
1007 			      enum pp_power_type power_type)
1008 {
1009 	struct pp_hwmgr *hwmgr = handle;
1010 	int ret = 0;
1011 
1012 	if (!hwmgr || !hwmgr->pm_en || !limit)
1013 		return -EINVAL;
1014 
1015 	if (power_type != PP_PWR_TYPE_SUSTAINED)
1016 		return -EOPNOTSUPP;
1017 
1018 	switch (pp_limit_level) {
1019 		case PP_PWR_LIMIT_CURRENT:
1020 			*limit = hwmgr->power_limit;
1021 			break;
1022 		case PP_PWR_LIMIT_DEFAULT:
1023 			*limit = hwmgr->default_power_limit;
1024 			break;
1025 		case PP_PWR_LIMIT_MAX:
1026 			*limit = hwmgr->default_power_limit;
1027 			if (hwmgr->od_enabled) {
1028 				*limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1029 				*limit /= 100;
1030 			}
1031 			break;
1032 		case PP_PWR_LIMIT_MIN:
1033 			*limit = 0;
1034 			break;
1035 		default:
1036 			ret = -EOPNOTSUPP;
1037 			break;
1038 	}
1039 
1040 	return ret;
1041 }
1042 
pp_display_configuration_change(void * handle,const struct amd_pp_display_configuration * display_config)1043 static int pp_display_configuration_change(void *handle,
1044 	const struct amd_pp_display_configuration *display_config)
1045 {
1046 	struct pp_hwmgr *hwmgr = handle;
1047 
1048 	if (!hwmgr || !hwmgr->pm_en)
1049 		return -EINVAL;
1050 
1051 	phm_store_dal_configuration_data(hwmgr, display_config);
1052 	return 0;
1053 }
1054 
pp_get_display_power_level(void * handle,struct amd_pp_simple_clock_info * output)1055 static int pp_get_display_power_level(void *handle,
1056 		struct amd_pp_simple_clock_info *output)
1057 {
1058 	struct pp_hwmgr *hwmgr = handle;
1059 
1060 	if (!hwmgr || !hwmgr->pm_en || !output)
1061 		return -EINVAL;
1062 
1063 	return phm_get_dal_power_level(hwmgr, output);
1064 }
1065 
pp_get_current_clocks(void * handle,struct amd_pp_clock_info * clocks)1066 static int pp_get_current_clocks(void *handle,
1067 		struct amd_pp_clock_info *clocks)
1068 {
1069 	struct amd_pp_simple_clock_info simple_clocks = { 0 };
1070 	struct pp_clock_info hw_clocks;
1071 	struct pp_hwmgr *hwmgr = handle;
1072 	int ret = 0;
1073 
1074 	if (!hwmgr || !hwmgr->pm_en)
1075 		return -EINVAL;
1076 
1077 	phm_get_dal_power_level(hwmgr, &simple_clocks);
1078 
1079 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1080 					PHM_PlatformCaps_PowerContainment))
1081 		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1082 					&hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1083 	else
1084 		ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1085 					&hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1086 
1087 	if (ret) {
1088 		pr_debug("Error in phm_get_clock_info \n");
1089 		return -EINVAL;
1090 	}
1091 
1092 	clocks->min_engine_clock = hw_clocks.min_eng_clk;
1093 	clocks->max_engine_clock = hw_clocks.max_eng_clk;
1094 	clocks->min_memory_clock = hw_clocks.min_mem_clk;
1095 	clocks->max_memory_clock = hw_clocks.max_mem_clk;
1096 	clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1097 	clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1098 
1099 	clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1100 	clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1101 
1102 	if (simple_clocks.level == 0)
1103 		clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1104 	else
1105 		clocks->max_clocks_state = simple_clocks.level;
1106 
1107 	if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1108 		clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1109 		clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1110 	}
1111 	return 0;
1112 }
1113 
pp_get_clock_by_type(void * handle,enum amd_pp_clock_type type,struct amd_pp_clocks * clocks)1114 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1115 {
1116 	struct pp_hwmgr *hwmgr = handle;
1117 
1118 	if (!hwmgr || !hwmgr->pm_en)
1119 		return -EINVAL;
1120 
1121 	if (clocks == NULL)
1122 		return -EINVAL;
1123 
1124 	return phm_get_clock_by_type(hwmgr, type, clocks);
1125 }
1126 
pp_get_clock_by_type_with_latency(void * handle,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)1127 static int pp_get_clock_by_type_with_latency(void *handle,
1128 		enum amd_pp_clock_type type,
1129 		struct pp_clock_levels_with_latency *clocks)
1130 {
1131 	struct pp_hwmgr *hwmgr = handle;
1132 
1133 	if (!hwmgr || !hwmgr->pm_en || !clocks)
1134 		return -EINVAL;
1135 
1136 	return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1137 }
1138 
pp_get_clock_by_type_with_voltage(void * handle,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)1139 static int pp_get_clock_by_type_with_voltage(void *handle,
1140 		enum amd_pp_clock_type type,
1141 		struct pp_clock_levels_with_voltage *clocks)
1142 {
1143 	struct pp_hwmgr *hwmgr = handle;
1144 
1145 	if (!hwmgr || !hwmgr->pm_en || !clocks)
1146 		return -EINVAL;
1147 
1148 	return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1149 }
1150 
pp_set_watermarks_for_clocks_ranges(void * handle,void * clock_ranges)1151 static int pp_set_watermarks_for_clocks_ranges(void *handle,
1152 		void *clock_ranges)
1153 {
1154 	struct pp_hwmgr *hwmgr = handle;
1155 
1156 	if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1157 		return -EINVAL;
1158 
1159 	return phm_set_watermarks_for_clocks_ranges(hwmgr,
1160 						    clock_ranges);
1161 }
1162 
pp_display_clock_voltage_request(void * handle,struct pp_display_clock_request * clock)1163 static int pp_display_clock_voltage_request(void *handle,
1164 		struct pp_display_clock_request *clock)
1165 {
1166 	struct pp_hwmgr *hwmgr = handle;
1167 
1168 	if (!hwmgr || !hwmgr->pm_en || !clock)
1169 		return -EINVAL;
1170 
1171 	return phm_display_clock_voltage_request(hwmgr, clock);
1172 }
1173 
pp_get_display_mode_validation_clocks(void * handle,struct amd_pp_simple_clock_info * clocks)1174 static int pp_get_display_mode_validation_clocks(void *handle,
1175 		struct amd_pp_simple_clock_info *clocks)
1176 {
1177 	struct pp_hwmgr *hwmgr = handle;
1178 	int ret = 0;
1179 
1180 	if (!hwmgr || !hwmgr->pm_en || !clocks)
1181 		return -EINVAL;
1182 
1183 	clocks->level = PP_DAL_POWERLEVEL_7;
1184 
1185 	if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1186 		ret = phm_get_max_high_clocks(hwmgr, clocks);
1187 
1188 	return ret;
1189 }
1190 
pp_dpm_powergate_mmhub(void * handle)1191 static int pp_dpm_powergate_mmhub(void *handle)
1192 {
1193 	struct pp_hwmgr *hwmgr = handle;
1194 
1195 	if (!hwmgr || !hwmgr->pm_en)
1196 		return -EINVAL;
1197 
1198 	if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1199 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1200 		return 0;
1201 	}
1202 
1203 	return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1204 }
1205 
pp_dpm_powergate_gfx(void * handle,bool gate)1206 static int pp_dpm_powergate_gfx(void *handle, bool gate)
1207 {
1208 	struct pp_hwmgr *hwmgr = handle;
1209 
1210 	if (!hwmgr || !hwmgr->pm_en)
1211 		return 0;
1212 
1213 	if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
1214 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1215 		return 0;
1216 	}
1217 
1218 	return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1219 }
1220 
pp_dpm_powergate_acp(void * handle,bool gate)1221 static void pp_dpm_powergate_acp(void *handle, bool gate)
1222 {
1223 	struct pp_hwmgr *hwmgr = handle;
1224 
1225 	if (!hwmgr || !hwmgr->pm_en)
1226 		return;
1227 
1228 	if (hwmgr->hwmgr_func->powergate_acp == NULL) {
1229 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1230 		return;
1231 	}
1232 
1233 	hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
1234 }
1235 
pp_dpm_powergate_sdma(void * handle,bool gate)1236 static void pp_dpm_powergate_sdma(void *handle, bool gate)
1237 {
1238 	struct pp_hwmgr *hwmgr = handle;
1239 
1240 	if (!hwmgr)
1241 		return;
1242 
1243 	if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
1244 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1245 		return;
1246 	}
1247 
1248 	hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
1249 }
1250 
pp_set_powergating_by_smu(void * handle,uint32_t block_type,bool gate)1251 static int pp_set_powergating_by_smu(void *handle,
1252 				uint32_t block_type, bool gate)
1253 {
1254 	int ret = 0;
1255 
1256 	switch (block_type) {
1257 	case AMD_IP_BLOCK_TYPE_UVD:
1258 	case AMD_IP_BLOCK_TYPE_VCN:
1259 		pp_dpm_powergate_uvd(handle, gate);
1260 		break;
1261 	case AMD_IP_BLOCK_TYPE_VCE:
1262 		pp_dpm_powergate_vce(handle, gate);
1263 		break;
1264 	case AMD_IP_BLOCK_TYPE_GMC:
1265 		/*
1266 		 * For now, this is only used on PICASSO.
1267 		 * And only "gate" operation is supported.
1268 		 */
1269 		if (gate)
1270 			pp_dpm_powergate_mmhub(handle);
1271 		break;
1272 	case AMD_IP_BLOCK_TYPE_GFX:
1273 		ret = pp_dpm_powergate_gfx(handle, gate);
1274 		break;
1275 	case AMD_IP_BLOCK_TYPE_ACP:
1276 		pp_dpm_powergate_acp(handle, gate);
1277 		break;
1278 	case AMD_IP_BLOCK_TYPE_SDMA:
1279 		pp_dpm_powergate_sdma(handle, gate);
1280 		break;
1281 	default:
1282 		break;
1283 	}
1284 	return ret;
1285 }
1286 
pp_notify_smu_enable_pwe(void * handle)1287 static int pp_notify_smu_enable_pwe(void *handle)
1288 {
1289 	struct pp_hwmgr *hwmgr = handle;
1290 
1291 	if (!hwmgr || !hwmgr->pm_en)
1292 		return -EINVAL;
1293 
1294 	if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1295 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1296 		return -EINVAL;
1297 	}
1298 
1299 	hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1300 
1301 	return 0;
1302 }
1303 
pp_enable_mgpu_fan_boost(void * handle)1304 static int pp_enable_mgpu_fan_boost(void *handle)
1305 {
1306 	struct pp_hwmgr *hwmgr = handle;
1307 
1308 	if (!hwmgr)
1309 		return -EINVAL;
1310 
1311 	if (!hwmgr->pm_en ||
1312 	     hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
1313 		return 0;
1314 
1315 	hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
1316 
1317 	return 0;
1318 }
1319 
pp_set_min_deep_sleep_dcefclk(void * handle,uint32_t clock)1320 static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
1321 {
1322 	struct pp_hwmgr *hwmgr = handle;
1323 
1324 	if (!hwmgr || !hwmgr->pm_en)
1325 		return -EINVAL;
1326 
1327 	if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
1328 		pr_debug("%s was not implemented.\n", __func__);
1329 		return -EINVAL;
1330 	}
1331 
1332 	hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
1333 
1334 	return 0;
1335 }
1336 
pp_set_hard_min_dcefclk_by_freq(void * handle,uint32_t clock)1337 static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
1338 {
1339 	struct pp_hwmgr *hwmgr = handle;
1340 
1341 	if (!hwmgr || !hwmgr->pm_en)
1342 		return -EINVAL;
1343 
1344 	if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
1345 		pr_debug("%s was not implemented.\n", __func__);
1346 		return -EINVAL;
1347 	}
1348 
1349 	hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
1350 
1351 	return 0;
1352 }
1353 
pp_set_hard_min_fclk_by_freq(void * handle,uint32_t clock)1354 static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
1355 {
1356 	struct pp_hwmgr *hwmgr = handle;
1357 
1358 	if (!hwmgr || !hwmgr->pm_en)
1359 		return -EINVAL;
1360 
1361 	if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
1362 		pr_debug("%s was not implemented.\n", __func__);
1363 		return -EINVAL;
1364 	}
1365 
1366 	hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
1367 
1368 	return 0;
1369 }
1370 
pp_set_active_display_count(void * handle,uint32_t count)1371 static int pp_set_active_display_count(void *handle, uint32_t count)
1372 {
1373 	struct pp_hwmgr *hwmgr = handle;
1374 
1375 	if (!hwmgr || !hwmgr->pm_en)
1376 		return -EINVAL;
1377 
1378 	return phm_set_active_display_count(hwmgr, count);
1379 }
1380 
pp_get_asic_baco_capability(void * handle)1381 static int pp_get_asic_baco_capability(void *handle)
1382 {
1383 	struct pp_hwmgr *hwmgr = handle;
1384 
1385 	if (!hwmgr)
1386 		return false;
1387 
1388 	if (!(hwmgr->not_vf && amdgpu_dpm) ||
1389 		!hwmgr->hwmgr_func->get_bamaco_support)
1390 		return false;
1391 
1392 	return hwmgr->hwmgr_func->get_bamaco_support(hwmgr);
1393 }
1394 
pp_get_asic_baco_state(void * handle,int * state)1395 static int pp_get_asic_baco_state(void *handle, int *state)
1396 {
1397 	struct pp_hwmgr *hwmgr = handle;
1398 
1399 	if (!hwmgr)
1400 		return -EINVAL;
1401 
1402 	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
1403 		return 0;
1404 
1405 	hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
1406 
1407 	return 0;
1408 }
1409 
pp_set_asic_baco_state(void * handle,int state)1410 static int pp_set_asic_baco_state(void *handle, int state)
1411 {
1412 	struct pp_hwmgr *hwmgr = handle;
1413 
1414 	if (!hwmgr)
1415 		return -EINVAL;
1416 
1417 	if (!(hwmgr->not_vf && amdgpu_dpm) ||
1418 		!hwmgr->hwmgr_func->set_asic_baco_state)
1419 		return 0;
1420 
1421 	hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
1422 
1423 	return 0;
1424 }
1425 
pp_get_ppfeature_status(void * handle,char * buf)1426 static int pp_get_ppfeature_status(void *handle, char *buf)
1427 {
1428 	struct pp_hwmgr *hwmgr = handle;
1429 
1430 	if (!hwmgr || !hwmgr->pm_en || !buf)
1431 		return -EINVAL;
1432 
1433 	if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
1434 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1435 		return -EINVAL;
1436 	}
1437 
1438 	return hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
1439 }
1440 
pp_set_ppfeature_status(void * handle,uint64_t ppfeature_masks)1441 static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
1442 {
1443 	struct pp_hwmgr *hwmgr = handle;
1444 
1445 	if (!hwmgr || !hwmgr->pm_en)
1446 		return -EINVAL;
1447 
1448 	if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
1449 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1450 		return -EINVAL;
1451 	}
1452 
1453 	return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
1454 }
1455 
pp_asic_reset_mode_2(void * handle)1456 static int pp_asic_reset_mode_2(void *handle)
1457 {
1458 	struct pp_hwmgr *hwmgr = handle;
1459 
1460 	if (!hwmgr || !hwmgr->pm_en)
1461 		return -EINVAL;
1462 
1463 	if (hwmgr->hwmgr_func->asic_reset == NULL) {
1464 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1465 		return -EINVAL;
1466 	}
1467 
1468 	return hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
1469 }
1470 
pp_smu_i2c_bus_access(void * handle,bool acquire)1471 static int pp_smu_i2c_bus_access(void *handle, bool acquire)
1472 {
1473 	struct pp_hwmgr *hwmgr = handle;
1474 
1475 	if (!hwmgr || !hwmgr->pm_en)
1476 		return -EINVAL;
1477 
1478 	if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
1479 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1480 		return -EINVAL;
1481 	}
1482 
1483 	return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
1484 }
1485 
pp_set_df_cstate(void * handle,enum pp_df_cstate state)1486 static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
1487 {
1488 	struct pp_hwmgr *hwmgr = handle;
1489 
1490 	if (!hwmgr)
1491 		return -EINVAL;
1492 
1493 	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
1494 		return 0;
1495 
1496 	hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
1497 
1498 	return 0;
1499 }
1500 
pp_set_xgmi_pstate(void * handle,uint32_t pstate)1501 static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
1502 {
1503 	struct pp_hwmgr *hwmgr = handle;
1504 
1505 	if (!hwmgr)
1506 		return -EINVAL;
1507 
1508 	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
1509 		return 0;
1510 
1511 	hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
1512 
1513 	return 0;
1514 }
1515 
pp_get_gpu_metrics(void * handle,void ** table)1516 static ssize_t pp_get_gpu_metrics(void *handle, void **table)
1517 {
1518 	struct pp_hwmgr *hwmgr = handle;
1519 
1520 	if (!hwmgr)
1521 		return -EINVAL;
1522 
1523 	if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
1524 		return -EOPNOTSUPP;
1525 
1526 	return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
1527 }
1528 
pp_gfx_state_change_set(void * handle,uint32_t state)1529 static int pp_gfx_state_change_set(void *handle, uint32_t state)
1530 {
1531 	struct pp_hwmgr *hwmgr = handle;
1532 
1533 	if (!hwmgr || !hwmgr->pm_en)
1534 		return -EINVAL;
1535 
1536 	if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
1537 		pr_info_ratelimited("%s was not implemented.\n", __func__);
1538 		return -EINVAL;
1539 	}
1540 
1541 	hwmgr->hwmgr_func->gfx_state_change(hwmgr, state);
1542 	return 0;
1543 }
1544 
pp_get_prv_buffer_details(void * handle,void ** addr,size_t * size)1545 static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
1546 {
1547 	struct pp_hwmgr *hwmgr = handle;
1548 	struct amdgpu_device *adev = hwmgr->adev;
1549 	int err;
1550 
1551 	if (!addr || !size)
1552 		return -EINVAL;
1553 
1554 	*addr = NULL;
1555 	*size = 0;
1556 	if (adev->pm.smu_prv_buffer) {
1557 		err = amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
1558 		if (err)
1559 			return err;
1560 		*size = adev->pm.smu_prv_buffer_size;
1561 	}
1562 
1563 	return 0;
1564 }
1565 
pp_pm_compute_clocks(void * handle)1566 static void pp_pm_compute_clocks(void *handle)
1567 {
1568 	struct pp_hwmgr *hwmgr = handle;
1569 	struct amdgpu_device *adev = hwmgr->adev;
1570 
1571 	if (!adev->dc_enabled) {
1572 		amdgpu_dpm_get_active_displays(adev);
1573 		adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1574 		adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1575 		adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1576 		/* we have issues with mclk switching with
1577 		 * refresh rates over 120 hz on the non-DC code.
1578 		 */
1579 		if (adev->pm.pm_display_cfg.vrefresh > 120)
1580 			adev->pm.pm_display_cfg.min_vblank_time = 0;
1581 
1582 		pp_display_configuration_change(handle,
1583 						&adev->pm.pm_display_cfg);
1584 	}
1585 
1586 	pp_dpm_dispatch_tasks(handle,
1587 			      AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
1588 			      NULL);
1589 }
1590 
1591 static const struct amd_pm_funcs pp_dpm_funcs = {
1592 	.load_firmware = pp_dpm_load_fw,
1593 	.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1594 	.force_performance_level = pp_dpm_force_performance_level,
1595 	.get_performance_level = pp_dpm_get_performance_level,
1596 	.get_current_power_state = pp_dpm_get_current_power_state,
1597 	.dispatch_tasks = pp_dpm_dispatch_tasks,
1598 	.set_fan_control_mode = pp_dpm_set_fan_control_mode,
1599 	.get_fan_control_mode = pp_dpm_get_fan_control_mode,
1600 	.set_fan_speed_pwm = pp_dpm_set_fan_speed_pwm,
1601 	.get_fan_speed_pwm = pp_dpm_get_fan_speed_pwm,
1602 	.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1603 	.set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
1604 	.get_pp_num_states = pp_dpm_get_pp_num_states,
1605 	.get_pp_table = pp_dpm_get_pp_table,
1606 	.set_pp_table = pp_dpm_set_pp_table,
1607 	.force_clock_level = pp_dpm_force_clock_level,
1608 	.emit_clock_levels = pp_dpm_emit_clock_levels,
1609 	.print_clock_levels = pp_dpm_print_clock_levels,
1610 	.get_sclk_od = pp_dpm_get_sclk_od,
1611 	.set_sclk_od = pp_dpm_set_sclk_od,
1612 	.get_mclk_od = pp_dpm_get_mclk_od,
1613 	.set_mclk_od = pp_dpm_set_mclk_od,
1614 	.read_sensor = pp_dpm_read_sensor,
1615 	.get_vce_clock_state = pp_dpm_get_vce_clock_state,
1616 	.switch_power_profile = pp_dpm_switch_power_profile,
1617 	.set_clockgating_by_smu = pp_set_clockgating_by_smu,
1618 	.set_powergating_by_smu = pp_set_powergating_by_smu,
1619 	.get_power_profile_mode = pp_get_power_profile_mode,
1620 	.set_power_profile_mode = pp_set_power_profile_mode,
1621 	.set_fine_grain_clk_vol = pp_set_fine_grain_clk_vol,
1622 	.odn_edit_dpm_table = pp_odn_edit_dpm_table,
1623 	.set_mp1_state = pp_dpm_set_mp1_state,
1624 	.set_power_limit = pp_set_power_limit,
1625 	.get_power_limit = pp_get_power_limit,
1626 /* export to DC */
1627 	.get_sclk = pp_dpm_get_sclk,
1628 	.get_mclk = pp_dpm_get_mclk,
1629 	.display_configuration_change = pp_display_configuration_change,
1630 	.get_display_power_level = pp_get_display_power_level,
1631 	.get_current_clocks = pp_get_current_clocks,
1632 	.get_clock_by_type = pp_get_clock_by_type,
1633 	.get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1634 	.get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1635 	.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1636 	.display_clock_voltage_request = pp_display_clock_voltage_request,
1637 	.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1638 	.notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
1639 	.enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
1640 	.set_active_display_count = pp_set_active_display_count,
1641 	.set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
1642 	.set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
1643 	.set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
1644 	.get_asic_baco_capability = pp_get_asic_baco_capability,
1645 	.get_asic_baco_state = pp_get_asic_baco_state,
1646 	.set_asic_baco_state = pp_set_asic_baco_state,
1647 	.get_ppfeature_status = pp_get_ppfeature_status,
1648 	.set_ppfeature_status = pp_set_ppfeature_status,
1649 	.asic_reset_mode_2 = pp_asic_reset_mode_2,
1650 	.smu_i2c_bus_access = pp_smu_i2c_bus_access,
1651 	.set_df_cstate = pp_set_df_cstate,
1652 	.set_xgmi_pstate = pp_set_xgmi_pstate,
1653 	.get_gpu_metrics = pp_get_gpu_metrics,
1654 	.gfx_state_change_set = pp_gfx_state_change_set,
1655 	.get_smu_prv_buf_details = pp_get_prv_buffer_details,
1656 	.pm_compute_clocks = pp_pm_compute_clocks,
1657 };
1658