• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #define SWSMU_CODE_LAYER_L1
24 
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
27 #include <linux/power_supply.h>
28 #include <linux/reboot.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_smu.h"
32 #include "smu_internal.h"
33 #include "atom.h"
34 #include "arcturus_ppt.h"
35 #include "navi10_ppt.h"
36 #include "sienna_cichlid_ppt.h"
37 #include "renoir_ppt.h"
38 #include "vangogh_ppt.h"
39 #include "aldebaran_ppt.h"
40 #include "yellow_carp_ppt.h"
41 #include "cyan_skillfish_ppt.h"
42 #include "smu_v13_0_0_ppt.h"
43 #include "smu_v13_0_4_ppt.h"
44 #include "smu_v13_0_5_ppt.h"
45 #include "smu_v13_0_6_ppt.h"
46 #include "smu_v13_0_7_ppt.h"
47 #include "amd_pcie.h"
48 
49 /*
50  * DO NOT use these for err/warn/info/debug messages.
51  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
52  * They are more MGPU friendly.
53  */
54 #undef pr_err
55 #undef pr_warn
56 #undef pr_info
57 #undef pr_debug
58 
59 static const struct amd_pm_funcs swsmu_pm_funcs;
60 static int smu_force_smuclk_levels(struct smu_context *smu,
61 				   enum smu_clk_type clk_type,
62 				   uint32_t mask);
63 static int smu_handle_task(struct smu_context *smu,
64 			   enum amd_dpm_forced_level level,
65 			   enum amd_pp_task task_id);
66 static int smu_reset(struct smu_context *smu);
67 static int smu_set_fan_speed_pwm(void *handle, u32 speed);
68 static int smu_set_fan_control_mode(void *handle, u32 value);
69 static int smu_set_power_limit(void *handle, uint32_t limit);
70 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
71 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
72 static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
73 
smu_sys_get_pp_feature_mask(void * handle,char * buf)74 static int smu_sys_get_pp_feature_mask(void *handle,
75 				       char *buf)
76 {
77 	struct smu_context *smu = handle;
78 
79 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
80 		return -EOPNOTSUPP;
81 
82 	return smu_get_pp_feature_mask(smu, buf);
83 }
84 
smu_sys_set_pp_feature_mask(void * handle,uint64_t new_mask)85 static int smu_sys_set_pp_feature_mask(void *handle,
86 				       uint64_t new_mask)
87 {
88 	struct smu_context *smu = handle;
89 
90 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
91 		return -EOPNOTSUPP;
92 
93 	return smu_set_pp_feature_mask(smu, new_mask);
94 }
95 
smu_set_residency_gfxoff(struct smu_context * smu,bool value)96 int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
97 {
98 	if (!smu->ppt_funcs->set_gfx_off_residency)
99 		return -EINVAL;
100 
101 	return smu_set_gfx_off_residency(smu, value);
102 }
103 
smu_get_residency_gfxoff(struct smu_context * smu,u32 * value)104 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
105 {
106 	if (!smu->ppt_funcs->get_gfx_off_residency)
107 		return -EINVAL;
108 
109 	return smu_get_gfx_off_residency(smu, value);
110 }
111 
smu_get_entrycount_gfxoff(struct smu_context * smu,u64 * value)112 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
113 {
114 	if (!smu->ppt_funcs->get_gfx_off_entrycount)
115 		return -EINVAL;
116 
117 	return smu_get_gfx_off_entrycount(smu, value);
118 }
119 
smu_get_status_gfxoff(struct smu_context * smu,uint32_t * value)120 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
121 {
122 	if (!smu->ppt_funcs->get_gfx_off_status)
123 		return -EINVAL;
124 
125 	*value = smu_get_gfx_off_status(smu);
126 
127 	return 0;
128 }
129 
smu_set_soft_freq_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t min,uint32_t max)130 int smu_set_soft_freq_range(struct smu_context *smu,
131 			    enum smu_clk_type clk_type,
132 			    uint32_t min,
133 			    uint32_t max)
134 {
135 	int ret = 0;
136 
137 	if (smu->ppt_funcs->set_soft_freq_limited_range)
138 		ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
139 								  clk_type,
140 								  min,
141 								  max);
142 
143 	return ret;
144 }
145 
smu_get_dpm_freq_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max)146 int smu_get_dpm_freq_range(struct smu_context *smu,
147 			   enum smu_clk_type clk_type,
148 			   uint32_t *min,
149 			   uint32_t *max)
150 {
151 	int ret = -ENOTSUPP;
152 
153 	if (!min && !max)
154 		return -EINVAL;
155 
156 	if (smu->ppt_funcs->get_dpm_ultimate_freq)
157 		ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
158 							    clk_type,
159 							    min,
160 							    max);
161 
162 	return ret;
163 }
164 
smu_set_gfx_power_up_by_imu(struct smu_context * smu)165 int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
166 {
167 	int ret = 0;
168 	struct amdgpu_device *adev = smu->adev;
169 
170 	if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
171 		ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
172 		if (ret)
173 			dev_err(adev->dev, "Failed to enable gfx imu!\n");
174 	}
175 	return ret;
176 }
177 
smu_get_mclk(void * handle,bool low)178 static u32 smu_get_mclk(void *handle, bool low)
179 {
180 	struct smu_context *smu = handle;
181 	uint32_t clk_freq;
182 	int ret = 0;
183 
184 	ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
185 				     low ? &clk_freq : NULL,
186 				     !low ? &clk_freq : NULL);
187 	if (ret)
188 		return 0;
189 	return clk_freq * 100;
190 }
191 
smu_get_sclk(void * handle,bool low)192 static u32 smu_get_sclk(void *handle, bool low)
193 {
194 	struct smu_context *smu = handle;
195 	uint32_t clk_freq;
196 	int ret = 0;
197 
198 	ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
199 				     low ? &clk_freq : NULL,
200 				     !low ? &clk_freq : NULL);
201 	if (ret)
202 		return 0;
203 	return clk_freq * 100;
204 }
205 
smu_set_gfx_imu_enable(struct smu_context * smu)206 static int smu_set_gfx_imu_enable(struct smu_context *smu)
207 {
208 	struct amdgpu_device *adev = smu->adev;
209 
210 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
211 		return 0;
212 
213 	if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
214 		return 0;
215 
216 	return smu_set_gfx_power_up_by_imu(smu);
217 }
218 
smu_dpm_set_vcn_enable(struct smu_context * smu,bool enable)219 static int smu_dpm_set_vcn_enable(struct smu_context *smu,
220 				  bool enable)
221 {
222 	struct smu_power_context *smu_power = &smu->smu_power;
223 	struct smu_power_gate *power_gate = &smu_power->power_gate;
224 	int ret = 0;
225 
226 	if (!smu->ppt_funcs->dpm_set_vcn_enable)
227 		return 0;
228 
229 	if (atomic_read(&power_gate->vcn_gated) ^ enable)
230 		return 0;
231 
232 	ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
233 	if (!ret)
234 		atomic_set(&power_gate->vcn_gated, !enable);
235 
236 	return ret;
237 }
238 
smu_dpm_set_jpeg_enable(struct smu_context * smu,bool enable)239 static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
240 				   bool enable)
241 {
242 	struct smu_power_context *smu_power = &smu->smu_power;
243 	struct smu_power_gate *power_gate = &smu_power->power_gate;
244 	int ret = 0;
245 
246 	if (!smu->ppt_funcs->dpm_set_jpeg_enable)
247 		return 0;
248 
249 	if (atomic_read(&power_gate->jpeg_gated) ^ enable)
250 		return 0;
251 
252 	ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
253 	if (!ret)
254 		atomic_set(&power_gate->jpeg_gated, !enable);
255 
256 	return ret;
257 }
258 
259 /**
260  * smu_dpm_set_power_gate - power gate/ungate the specific IP block
261  *
262  * @handle:        smu_context pointer
263  * @block_type: the IP block to power gate/ungate
264  * @gate:       to power gate if true, ungate otherwise
265  *
266  * This API uses no smu->mutex lock protection due to:
267  * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
268  *    This is guarded to be race condition free by the caller.
269  * 2. Or get called on user setting request of power_dpm_force_performance_level.
270  *    Under this case, the smu->mutex lock protection is already enforced on
271  *    the parent API smu_force_performance_level of the call path.
272  */
smu_dpm_set_power_gate(void * handle,uint32_t block_type,bool gate)273 static int smu_dpm_set_power_gate(void *handle,
274 				  uint32_t block_type,
275 				  bool gate)
276 {
277 	struct smu_context *smu = handle;
278 	int ret = 0;
279 
280 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
281 		dev_WARN(smu->adev->dev,
282 			 "SMU uninitialized but power %s requested for %u!\n",
283 			 gate ? "gate" : "ungate", block_type);
284 		return -EOPNOTSUPP;
285 	}
286 
287 	switch (block_type) {
288 	/*
289 	 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
290 	 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
291 	 */
292 	case AMD_IP_BLOCK_TYPE_UVD:
293 	case AMD_IP_BLOCK_TYPE_VCN:
294 		ret = smu_dpm_set_vcn_enable(smu, !gate);
295 		if (ret)
296 			dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
297 				gate ? "gate" : "ungate");
298 		break;
299 	case AMD_IP_BLOCK_TYPE_GFX:
300 		ret = smu_gfx_off_control(smu, gate);
301 		if (ret)
302 			dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
303 				gate ? "enable" : "disable");
304 		break;
305 	case AMD_IP_BLOCK_TYPE_SDMA:
306 		ret = smu_powergate_sdma(smu, gate);
307 		if (ret)
308 			dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
309 				gate ? "gate" : "ungate");
310 		break;
311 	case AMD_IP_BLOCK_TYPE_JPEG:
312 		ret = smu_dpm_set_jpeg_enable(smu, !gate);
313 		if (ret)
314 			dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
315 				gate ? "gate" : "ungate");
316 		break;
317 	default:
318 		dev_err(smu->adev->dev, "Unsupported block type!\n");
319 		return -EINVAL;
320 	}
321 
322 	return ret;
323 }
324 
325 /**
326  * smu_set_user_clk_dependencies - set user profile clock dependencies
327  *
328  * @smu:	smu_context pointer
329  * @clk:	enum smu_clk_type type
330  *
331  * Enable/Disable the clock dependency for the @clk type.
332  */
smu_set_user_clk_dependencies(struct smu_context * smu,enum smu_clk_type clk)333 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
334 {
335 	if (smu->adev->in_suspend)
336 		return;
337 
338 	if (clk == SMU_MCLK) {
339 		smu->user_dpm_profile.clk_dependency = 0;
340 		smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
341 	} else if (clk == SMU_FCLK) {
342 		/* MCLK takes precedence over FCLK */
343 		if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
344 			return;
345 
346 		smu->user_dpm_profile.clk_dependency = 0;
347 		smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
348 	} else if (clk == SMU_SOCCLK) {
349 		/* MCLK takes precedence over SOCCLK */
350 		if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
351 			return;
352 
353 		smu->user_dpm_profile.clk_dependency = 0;
354 		smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
355 	} else
356 		/* Add clk dependencies here, if any */
357 		return;
358 }
359 
360 /**
361  * smu_restore_dpm_user_profile - reinstate user dpm profile
362  *
363  * @smu:	smu_context pointer
364  *
365  * Restore the saved user power configurations include power limit,
366  * clock frequencies, fan control mode and fan speed.
367  */
smu_restore_dpm_user_profile(struct smu_context * smu)368 static void smu_restore_dpm_user_profile(struct smu_context *smu)
369 {
370 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
371 	int ret = 0;
372 
373 	if (!smu->adev->in_suspend)
374 		return;
375 
376 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
377 		return;
378 
379 	/* Enable restore flag */
380 	smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
381 
382 	/* set the user dpm power limit */
383 	if (smu->user_dpm_profile.power_limit) {
384 		ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
385 		if (ret)
386 			dev_err(smu->adev->dev, "Failed to set power limit value\n");
387 	}
388 
389 	/* set the user dpm clock configurations */
390 	if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
391 		enum smu_clk_type clk_type;
392 
393 		for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
394 			/*
395 			 * Iterate over smu clk type and force the saved user clk
396 			 * configs, skip if clock dependency is enabled
397 			 */
398 			if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
399 					smu->user_dpm_profile.clk_mask[clk_type]) {
400 				ret = smu_force_smuclk_levels(smu, clk_type,
401 						smu->user_dpm_profile.clk_mask[clk_type]);
402 				if (ret)
403 					dev_err(smu->adev->dev,
404 						"Failed to set clock type = %d\n", clk_type);
405 			}
406 		}
407 	}
408 
409 	/* set the user dpm fan configurations */
410 	if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
411 	    smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
412 		ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
413 		if (ret != -EOPNOTSUPP) {
414 			smu->user_dpm_profile.fan_speed_pwm = 0;
415 			smu->user_dpm_profile.fan_speed_rpm = 0;
416 			smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
417 			dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
418 		}
419 
420 		if (smu->user_dpm_profile.fan_speed_pwm) {
421 			ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
422 			if (ret != -EOPNOTSUPP)
423 				dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
424 		}
425 
426 		if (smu->user_dpm_profile.fan_speed_rpm) {
427 			ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
428 			if (ret != -EOPNOTSUPP)
429 				dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
430 		}
431 	}
432 
433 	/* Restore user customized OD settings */
434 	if (smu->user_dpm_profile.user_od) {
435 		if (smu->ppt_funcs->restore_user_od_settings) {
436 			ret = smu->ppt_funcs->restore_user_od_settings(smu);
437 			if (ret)
438 				dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
439 		}
440 	}
441 
442 	/* Disable restore flag */
443 	smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
444 }
445 
smu_get_power_num_states(void * handle,struct pp_states_info * state_info)446 static int smu_get_power_num_states(void *handle,
447 				    struct pp_states_info *state_info)
448 {
449 	if (!state_info)
450 		return -EINVAL;
451 
452 	/* not support power state */
453 	memset(state_info, 0, sizeof(struct pp_states_info));
454 	state_info->nums = 1;
455 	state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
456 
457 	return 0;
458 }
459 
is_support_sw_smu(struct amdgpu_device * adev)460 bool is_support_sw_smu(struct amdgpu_device *adev)
461 {
462 	/* vega20 is 11.0.2, but it's supported via the powerplay code */
463 	if (adev->asic_type == CHIP_VEGA20)
464 		return false;
465 
466 	if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0))
467 		return true;
468 
469 	return false;
470 }
471 
is_support_cclk_dpm(struct amdgpu_device * adev)472 bool is_support_cclk_dpm(struct amdgpu_device *adev)
473 {
474 	struct smu_context *smu = adev->powerplay.pp_handle;
475 
476 	if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
477 		return false;
478 
479 	return true;
480 }
481 
482 
smu_sys_get_pp_table(void * handle,char ** table)483 static int smu_sys_get_pp_table(void *handle,
484 				char **table)
485 {
486 	struct smu_context *smu = handle;
487 	struct smu_table_context *smu_table = &smu->smu_table;
488 
489 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
490 		return -EOPNOTSUPP;
491 
492 	if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
493 		return -EINVAL;
494 
495 	if (smu_table->hardcode_pptable)
496 		*table = smu_table->hardcode_pptable;
497 	else
498 		*table = smu_table->power_play_table;
499 
500 	return smu_table->power_play_table_size;
501 }
502 
smu_sys_set_pp_table(void * handle,const char * buf,size_t size)503 static int smu_sys_set_pp_table(void *handle,
504 				const char *buf,
505 				size_t size)
506 {
507 	struct smu_context *smu = handle;
508 	struct smu_table_context *smu_table = &smu->smu_table;
509 	ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
510 	int ret = 0;
511 
512 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
513 		return -EOPNOTSUPP;
514 
515 	if (header->usStructureSize != size) {
516 		dev_err(smu->adev->dev, "pp table size not matched !\n");
517 		return -EIO;
518 	}
519 
520 	if (!smu_table->hardcode_pptable) {
521 		smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
522 		if (!smu_table->hardcode_pptable)
523 			return -ENOMEM;
524 	}
525 
526 	memcpy(smu_table->hardcode_pptable, buf, size);
527 	smu_table->power_play_table = smu_table->hardcode_pptable;
528 	smu_table->power_play_table_size = size;
529 
530 	/*
531 	 * Special hw_fini action(for Navi1x, the DPMs disablement will be
532 	 * skipped) may be needed for custom pptable uploading.
533 	 */
534 	smu->uploading_custom_pp_table = true;
535 
536 	ret = smu_reset(smu);
537 	if (ret)
538 		dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
539 
540 	smu->uploading_custom_pp_table = false;
541 
542 	return ret;
543 }
544 
smu_get_driver_allowed_feature_mask(struct smu_context * smu)545 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
546 {
547 	struct smu_feature *feature = &smu->smu_feature;
548 	uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
549 	int ret = 0;
550 
551 	/*
552 	 * With SCPM enabled, the allowed featuremasks setting(via
553 	 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted.
554 	 * That means there is no way to let PMFW knows the settings below.
555 	 * Thus, we just assume all the features are allowed under
556 	 * such scenario.
557 	 */
558 	if (smu->adev->scpm_enabled) {
559 		bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
560 		return 0;
561 	}
562 
563 	bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
564 
565 	ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
566 					     SMU_FEATURE_MAX/32);
567 	if (ret)
568 		return ret;
569 
570 	bitmap_or(feature->allowed, feature->allowed,
571 		      (unsigned long *)allowed_feature_mask,
572 		      feature->feature_num);
573 
574 	return ret;
575 }
576 
smu_set_funcs(struct amdgpu_device * adev)577 static int smu_set_funcs(struct amdgpu_device *adev)
578 {
579 	struct smu_context *smu = adev->powerplay.pp_handle;
580 
581 	if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
582 		smu->od_enabled = true;
583 
584 	switch (adev->ip_versions[MP1_HWIP][0]) {
585 	case IP_VERSION(11, 0, 0):
586 	case IP_VERSION(11, 0, 5):
587 	case IP_VERSION(11, 0, 9):
588 		navi10_set_ppt_funcs(smu);
589 		break;
590 	case IP_VERSION(11, 0, 7):
591 	case IP_VERSION(11, 0, 11):
592 	case IP_VERSION(11, 0, 12):
593 	case IP_VERSION(11, 0, 13):
594 		sienna_cichlid_set_ppt_funcs(smu);
595 		break;
596 	case IP_VERSION(12, 0, 0):
597 	case IP_VERSION(12, 0, 1):
598 		renoir_set_ppt_funcs(smu);
599 		break;
600 	case IP_VERSION(11, 5, 0):
601 		vangogh_set_ppt_funcs(smu);
602 		break;
603 	case IP_VERSION(13, 0, 1):
604 	case IP_VERSION(13, 0, 3):
605 	case IP_VERSION(13, 0, 8):
606 		yellow_carp_set_ppt_funcs(smu);
607 		break;
608 	case IP_VERSION(13, 0, 4):
609 	case IP_VERSION(13, 0, 11):
610 		smu_v13_0_4_set_ppt_funcs(smu);
611 		break;
612 	case IP_VERSION(13, 0, 5):
613 		smu_v13_0_5_set_ppt_funcs(smu);
614 		break;
615 	case IP_VERSION(11, 0, 8):
616 		cyan_skillfish_set_ppt_funcs(smu);
617 		break;
618 	case IP_VERSION(11, 0, 2):
619 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
620 		arcturus_set_ppt_funcs(smu);
621 		/* OD is not supported on Arcturus */
622 		smu->od_enabled = false;
623 		break;
624 	case IP_VERSION(13, 0, 2):
625 		aldebaran_set_ppt_funcs(smu);
626 		/* Enable pp_od_clk_voltage node */
627 		smu->od_enabled = true;
628 		break;
629 	case IP_VERSION(13, 0, 0):
630 	case IP_VERSION(13, 0, 10):
631 		smu_v13_0_0_set_ppt_funcs(smu);
632 		break;
633 	case IP_VERSION(13, 0, 6):
634 		smu_v13_0_6_set_ppt_funcs(smu);
635 		/* Enable pp_od_clk_voltage node */
636 		smu->od_enabled = true;
637 		break;
638 	case IP_VERSION(13, 0, 7):
639 		smu_v13_0_7_set_ppt_funcs(smu);
640 		break;
641 	default:
642 		return -EINVAL;
643 	}
644 
645 	return 0;
646 }
647 
smu_early_init(void * handle)648 static int smu_early_init(void *handle)
649 {
650 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
651 	struct smu_context *smu;
652 	int r;
653 
654 	smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
655 	if (!smu)
656 		return -ENOMEM;
657 
658 	smu->adev = adev;
659 	smu->pm_enabled = !!amdgpu_dpm;
660 	smu->is_apu = false;
661 	smu->smu_baco.state = SMU_BACO_STATE_EXIT;
662 	smu->smu_baco.platform_support = false;
663 	smu->user_dpm_profile.fan_mode = -1;
664 
665 	mutex_init(&smu->message_lock);
666 
667 	adev->powerplay.pp_handle = smu;
668 	adev->powerplay.pp_funcs = &swsmu_pm_funcs;
669 
670 	r = smu_set_funcs(adev);
671 	if (r)
672 		return r;
673 	return smu_init_microcode(smu);
674 }
675 
smu_set_default_dpm_table(struct smu_context * smu)676 static int smu_set_default_dpm_table(struct smu_context *smu)
677 {
678 	struct smu_power_context *smu_power = &smu->smu_power;
679 	struct smu_power_gate *power_gate = &smu_power->power_gate;
680 	int vcn_gate, jpeg_gate;
681 	int ret = 0;
682 
683 	if (!smu->ppt_funcs->set_default_dpm_table)
684 		return 0;
685 
686 	vcn_gate = atomic_read(&power_gate->vcn_gated);
687 	jpeg_gate = atomic_read(&power_gate->jpeg_gated);
688 
689 	ret = smu_dpm_set_vcn_enable(smu, true);
690 	if (ret)
691 		return ret;
692 
693 	ret = smu_dpm_set_jpeg_enable(smu, true);
694 	if (ret)
695 		goto err_out;
696 
697 	ret = smu->ppt_funcs->set_default_dpm_table(smu);
698 	if (ret)
699 		dev_err(smu->adev->dev,
700 			"Failed to setup default dpm clock tables!\n");
701 
702 	smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
703 err_out:
704 	smu_dpm_set_vcn_enable(smu, !vcn_gate);
705 	return ret;
706 }
707 
smu_apply_default_config_table_settings(struct smu_context * smu)708 static int smu_apply_default_config_table_settings(struct smu_context *smu)
709 {
710 	struct amdgpu_device *adev = smu->adev;
711 	int ret = 0;
712 
713 	ret = smu_get_default_config_table_settings(smu,
714 						    &adev->pm.config_table);
715 	if (ret)
716 		return ret;
717 
718 	return smu_set_config_table(smu, &adev->pm.config_table);
719 }
720 
smu_late_init(void * handle)721 static int smu_late_init(void *handle)
722 {
723 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
724 	struct smu_context *smu = adev->powerplay.pp_handle;
725 	int ret = 0;
726 
727 	smu_set_fine_grain_gfx_freq_parameters(smu);
728 
729 	if (!smu->pm_enabled)
730 		return 0;
731 
732 	ret = smu_post_init(smu);
733 	if (ret) {
734 		dev_err(adev->dev, "Failed to post smu init!\n");
735 		return ret;
736 	}
737 
738 	/*
739 	 * Explicitly notify PMFW the power mode the system in. Since
740 	 * the PMFW may boot the ASIC with a different mode.
741 	 * For those supporting ACDC switch via gpio, PMFW will
742 	 * handle the switch automatically. Driver involvement
743 	 * is unnecessary.
744 	 */
745 	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
746 	smu_set_ac_dc(smu);
747 
748 	if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) ||
749 	    (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3)))
750 		return 0;
751 
752 	if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
753 		ret = smu_set_default_od_settings(smu);
754 		if (ret) {
755 			dev_err(adev->dev, "Failed to setup default OD settings!\n");
756 			return ret;
757 		}
758 	}
759 
760 	ret = smu_populate_umd_state_clk(smu);
761 	if (ret) {
762 		dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
763 		return ret;
764 	}
765 
766 	ret = smu_get_asic_power_limits(smu,
767 					&smu->current_power_limit,
768 					&smu->default_power_limit,
769 					&smu->max_power_limit);
770 	if (ret) {
771 		dev_err(adev->dev, "Failed to get asic power limits!\n");
772 		return ret;
773 	}
774 
775 	if (!amdgpu_sriov_vf(adev))
776 		smu_get_unique_id(smu);
777 
778 	smu_get_fan_parameters(smu);
779 
780 	smu_handle_task(smu,
781 			smu->smu_dpm.dpm_level,
782 			AMD_PP_TASK_COMPLETE_INIT);
783 
784 	ret = smu_apply_default_config_table_settings(smu);
785 	if (ret && (ret != -EOPNOTSUPP)) {
786 		dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
787 		return ret;
788 	}
789 
790 	smu_restore_dpm_user_profile(smu);
791 
792 	return 0;
793 }
794 
smu_init_fb_allocations(struct smu_context * smu)795 static int smu_init_fb_allocations(struct smu_context *smu)
796 {
797 	struct amdgpu_device *adev = smu->adev;
798 	struct smu_table_context *smu_table = &smu->smu_table;
799 	struct smu_table *tables = smu_table->tables;
800 	struct smu_table *driver_table = &(smu_table->driver_table);
801 	uint32_t max_table_size = 0;
802 	int ret, i;
803 
804 	/* VRAM allocation for tool table */
805 	if (tables[SMU_TABLE_PMSTATUSLOG].size) {
806 		ret = amdgpu_bo_create_kernel(adev,
807 					      tables[SMU_TABLE_PMSTATUSLOG].size,
808 					      tables[SMU_TABLE_PMSTATUSLOG].align,
809 					      tables[SMU_TABLE_PMSTATUSLOG].domain,
810 					      &tables[SMU_TABLE_PMSTATUSLOG].bo,
811 					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
812 					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
813 		if (ret) {
814 			dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
815 			return ret;
816 		}
817 	}
818 
819 	driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT;
820 	/* VRAM allocation for driver table */
821 	for (i = 0; i < SMU_TABLE_COUNT; i++) {
822 		if (tables[i].size == 0)
823 			continue;
824 
825 		/* If one of the tables has VRAM domain restriction, keep it in
826 		 * VRAM
827 		 */
828 		if ((tables[i].domain &
829 		    (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) ==
830 			    AMDGPU_GEM_DOMAIN_VRAM)
831 			driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
832 
833 		if (i == SMU_TABLE_PMSTATUSLOG)
834 			continue;
835 
836 		if (max_table_size < tables[i].size)
837 			max_table_size = tables[i].size;
838 	}
839 
840 	driver_table->size = max_table_size;
841 	driver_table->align = PAGE_SIZE;
842 
843 	ret = amdgpu_bo_create_kernel(adev,
844 				      driver_table->size,
845 				      driver_table->align,
846 				      driver_table->domain,
847 				      &driver_table->bo,
848 				      &driver_table->mc_address,
849 				      &driver_table->cpu_addr);
850 	if (ret) {
851 		dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
852 		if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
853 			amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
854 					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
855 					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
856 	}
857 
858 	return ret;
859 }
860 
smu_fini_fb_allocations(struct smu_context * smu)861 static int smu_fini_fb_allocations(struct smu_context *smu)
862 {
863 	struct smu_table_context *smu_table = &smu->smu_table;
864 	struct smu_table *tables = smu_table->tables;
865 	struct smu_table *driver_table = &(smu_table->driver_table);
866 
867 	if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
868 		amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
869 				      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
870 				      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
871 
872 	amdgpu_bo_free_kernel(&driver_table->bo,
873 			      &driver_table->mc_address,
874 			      &driver_table->cpu_addr);
875 
876 	return 0;
877 }
878 
879 /**
880  * smu_alloc_memory_pool - allocate memory pool in the system memory
881  *
882  * @smu: amdgpu_device pointer
883  *
884  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
885  * and DramLogSetDramAddr can notify it changed.
886  *
887  * Returns 0 on success, error on failure.
888  */
smu_alloc_memory_pool(struct smu_context * smu)889 static int smu_alloc_memory_pool(struct smu_context *smu)
890 {
891 	struct amdgpu_device *adev = smu->adev;
892 	struct smu_table_context *smu_table = &smu->smu_table;
893 	struct smu_table *memory_pool = &smu_table->memory_pool;
894 	uint64_t pool_size = smu->pool_size;
895 	int ret = 0;
896 
897 	if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
898 		return ret;
899 
900 	memory_pool->size = pool_size;
901 	memory_pool->align = PAGE_SIZE;
902 	memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
903 
904 	switch (pool_size) {
905 	case SMU_MEMORY_POOL_SIZE_256_MB:
906 	case SMU_MEMORY_POOL_SIZE_512_MB:
907 	case SMU_MEMORY_POOL_SIZE_1_GB:
908 	case SMU_MEMORY_POOL_SIZE_2_GB:
909 		ret = amdgpu_bo_create_kernel(adev,
910 					      memory_pool->size,
911 					      memory_pool->align,
912 					      memory_pool->domain,
913 					      &memory_pool->bo,
914 					      &memory_pool->mc_address,
915 					      &memory_pool->cpu_addr);
916 		if (ret)
917 			dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
918 		break;
919 	default:
920 		break;
921 	}
922 
923 	return ret;
924 }
925 
smu_free_memory_pool(struct smu_context * smu)926 static int smu_free_memory_pool(struct smu_context *smu)
927 {
928 	struct smu_table_context *smu_table = &smu->smu_table;
929 	struct smu_table *memory_pool = &smu_table->memory_pool;
930 
931 	if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
932 		return 0;
933 
934 	amdgpu_bo_free_kernel(&memory_pool->bo,
935 			      &memory_pool->mc_address,
936 			      &memory_pool->cpu_addr);
937 
938 	memset(memory_pool, 0, sizeof(struct smu_table));
939 
940 	return 0;
941 }
942 
smu_alloc_dummy_read_table(struct smu_context * smu)943 static int smu_alloc_dummy_read_table(struct smu_context *smu)
944 {
945 	struct smu_table_context *smu_table = &smu->smu_table;
946 	struct smu_table *dummy_read_1_table =
947 			&smu_table->dummy_read_1_table;
948 	struct amdgpu_device *adev = smu->adev;
949 	int ret = 0;
950 
951 	if (!dummy_read_1_table->size)
952 		return 0;
953 
954 	ret = amdgpu_bo_create_kernel(adev,
955 				      dummy_read_1_table->size,
956 				      dummy_read_1_table->align,
957 				      dummy_read_1_table->domain,
958 				      &dummy_read_1_table->bo,
959 				      &dummy_read_1_table->mc_address,
960 				      &dummy_read_1_table->cpu_addr);
961 	if (ret)
962 		dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
963 
964 	return ret;
965 }
966 
smu_free_dummy_read_table(struct smu_context * smu)967 static void smu_free_dummy_read_table(struct smu_context *smu)
968 {
969 	struct smu_table_context *smu_table = &smu->smu_table;
970 	struct smu_table *dummy_read_1_table =
971 			&smu_table->dummy_read_1_table;
972 
973 
974 	amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
975 			      &dummy_read_1_table->mc_address,
976 			      &dummy_read_1_table->cpu_addr);
977 
978 	memset(dummy_read_1_table, 0, sizeof(struct smu_table));
979 }
980 
smu_smc_table_sw_init(struct smu_context * smu)981 static int smu_smc_table_sw_init(struct smu_context *smu)
982 {
983 	int ret;
984 
985 	/**
986 	 * Create smu_table structure, and init smc tables such as
987 	 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
988 	 */
989 	ret = smu_init_smc_tables(smu);
990 	if (ret) {
991 		dev_err(smu->adev->dev, "Failed to init smc tables!\n");
992 		return ret;
993 	}
994 
995 	/**
996 	 * Create smu_power_context structure, and allocate smu_dpm_context and
997 	 * context size to fill the smu_power_context data.
998 	 */
999 	ret = smu_init_power(smu);
1000 	if (ret) {
1001 		dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
1002 		return ret;
1003 	}
1004 
1005 	/*
1006 	 * allocate vram bos to store smc table contents.
1007 	 */
1008 	ret = smu_init_fb_allocations(smu);
1009 	if (ret)
1010 		return ret;
1011 
1012 	ret = smu_alloc_memory_pool(smu);
1013 	if (ret)
1014 		return ret;
1015 
1016 	ret = smu_alloc_dummy_read_table(smu);
1017 	if (ret)
1018 		return ret;
1019 
1020 	ret = smu_i2c_init(smu);
1021 	if (ret)
1022 		return ret;
1023 
1024 	return 0;
1025 }
1026 
smu_smc_table_sw_fini(struct smu_context * smu)1027 static int smu_smc_table_sw_fini(struct smu_context *smu)
1028 {
1029 	int ret;
1030 
1031 	smu_i2c_fini(smu);
1032 
1033 	smu_free_dummy_read_table(smu);
1034 
1035 	ret = smu_free_memory_pool(smu);
1036 	if (ret)
1037 		return ret;
1038 
1039 	ret = smu_fini_fb_allocations(smu);
1040 	if (ret)
1041 		return ret;
1042 
1043 	ret = smu_fini_power(smu);
1044 	if (ret) {
1045 		dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
1046 		return ret;
1047 	}
1048 
1049 	ret = smu_fini_smc_tables(smu);
1050 	if (ret) {
1051 		dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
1052 		return ret;
1053 	}
1054 
1055 	return 0;
1056 }
1057 
smu_throttling_logging_work_fn(struct work_struct * work)1058 static void smu_throttling_logging_work_fn(struct work_struct *work)
1059 {
1060 	struct smu_context *smu = container_of(work, struct smu_context,
1061 					       throttling_logging_work);
1062 
1063 	smu_log_thermal_throttling(smu);
1064 }
1065 
smu_interrupt_work_fn(struct work_struct * work)1066 static void smu_interrupt_work_fn(struct work_struct *work)
1067 {
1068 	struct smu_context *smu = container_of(work, struct smu_context,
1069 					       interrupt_work);
1070 
1071 	if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
1072 		smu->ppt_funcs->interrupt_work(smu);
1073 }
1074 
smu_swctf_delayed_work_handler(struct work_struct * work)1075 static void smu_swctf_delayed_work_handler(struct work_struct *work)
1076 {
1077 	struct smu_context *smu =
1078 		container_of(work, struct smu_context, swctf_delayed_work.work);
1079 	struct smu_temperature_range *range =
1080 				&smu->thermal_range;
1081 	struct amdgpu_device *adev = smu->adev;
1082 	uint32_t hotspot_tmp, size;
1083 
1084 	/*
1085 	 * If the hotspot temperature is confirmed as below SW CTF setting point
1086 	 * after the delay enforced, nothing will be done.
1087 	 * Otherwise, a graceful shutdown will be performed to prevent further damage.
1088 	 */
1089 	if (range->software_shutdown_temp &&
1090 	    smu->ppt_funcs->read_sensor &&
1091 	    !smu->ppt_funcs->read_sensor(smu,
1092 					 AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
1093 					 &hotspot_tmp,
1094 					 &size) &&
1095 	    hotspot_tmp / 1000 < range->software_shutdown_temp)
1096 		return;
1097 
1098 	dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
1099 	dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
1100 	orderly_poweroff(true);
1101 }
1102 
smu_sw_init(void * handle)1103 static int smu_sw_init(void *handle)
1104 {
1105 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1106 	struct smu_context *smu = adev->powerplay.pp_handle;
1107 	int ret;
1108 
1109 	smu->pool_size = adev->pm.smu_prv_buffer_size;
1110 	smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1111 	bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1112 	bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1113 
1114 	INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1115 	INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
1116 	atomic64_set(&smu->throttle_int_counter, 0);
1117 	smu->watermarks_bitmap = 0;
1118 	smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1119 	smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1120 
1121 	atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
1122 	atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1123 
1124 	smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
1125 	smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
1126 	smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
1127 	smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
1128 	smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
1129 	smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
1130 	smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
1131 	smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
1132 
1133 	smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1134 	smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1135 	smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
1136 	smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
1137 	smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
1138 	smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
1139 	smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
1140 	smu->display_config = &adev->pm.pm_display_cfg;
1141 
1142 	smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1143 	smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1144 
1145 	INIT_DELAYED_WORK(&smu->swctf_delayed_work,
1146 			  smu_swctf_delayed_work_handler);
1147 
1148 	ret = smu_smc_table_sw_init(smu);
1149 	if (ret) {
1150 		dev_err(adev->dev, "Failed to sw init smc table!\n");
1151 		return ret;
1152 	}
1153 
1154 	/* get boot_values from vbios to set revision, gfxclk, and etc. */
1155 	ret = smu_get_vbios_bootup_values(smu);
1156 	if (ret) {
1157 		dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1158 		return ret;
1159 	}
1160 
1161 	ret = smu_init_pptable_microcode(smu);
1162 	if (ret) {
1163 		dev_err(adev->dev, "Failed to setup pptable firmware!\n");
1164 		return ret;
1165 	}
1166 
1167 	ret = smu_register_irq_handler(smu);
1168 	if (ret) {
1169 		dev_err(adev->dev, "Failed to register smc irq handler!\n");
1170 		return ret;
1171 	}
1172 
1173 	/* If there is no way to query fan control mode, fan control is not supported */
1174 	if (!smu->ppt_funcs->get_fan_control_mode)
1175 		smu->adev->pm.no_fan = true;
1176 
1177 	return 0;
1178 }
1179 
smu_sw_fini(void * handle)1180 static int smu_sw_fini(void *handle)
1181 {
1182 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1183 	struct smu_context *smu = adev->powerplay.pp_handle;
1184 	int ret;
1185 
1186 	ret = smu_smc_table_sw_fini(smu);
1187 	if (ret) {
1188 		dev_err(adev->dev, "Failed to sw fini smc table!\n");
1189 		return ret;
1190 	}
1191 
1192 	smu_fini_microcode(smu);
1193 
1194 	return 0;
1195 }
1196 
smu_get_thermal_temperature_range(struct smu_context * smu)1197 static int smu_get_thermal_temperature_range(struct smu_context *smu)
1198 {
1199 	struct amdgpu_device *adev = smu->adev;
1200 	struct smu_temperature_range *range =
1201 				&smu->thermal_range;
1202 	int ret = 0;
1203 
1204 	if (!smu->ppt_funcs->get_thermal_temperature_range)
1205 		return 0;
1206 
1207 	ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1208 	if (ret)
1209 		return ret;
1210 
1211 	adev->pm.dpm.thermal.min_temp = range->min;
1212 	adev->pm.dpm.thermal.max_temp = range->max;
1213 	adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
1214 	adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
1215 	adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
1216 	adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
1217 	adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
1218 	adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
1219 	adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
1220 
1221 	return ret;
1222 }
1223 
smu_smc_hw_setup(struct smu_context * smu)1224 static int smu_smc_hw_setup(struct smu_context *smu)
1225 {
1226 	struct smu_feature *feature = &smu->smu_feature;
1227 	struct amdgpu_device *adev = smu->adev;
1228 	uint8_t pcie_gen = 0, pcie_width = 0;
1229 	uint64_t features_supported;
1230 	int ret = 0;
1231 
1232 	switch (adev->ip_versions[MP1_HWIP][0]) {
1233 	case IP_VERSION(11, 0, 7):
1234 	case IP_VERSION(11, 0, 11):
1235 	case IP_VERSION(11, 5, 0):
1236 	case IP_VERSION(11, 0, 12):
1237 		if (adev->in_suspend && smu_is_dpm_running(smu)) {
1238 			dev_info(adev->dev, "dpm has been enabled\n");
1239 			ret = smu_system_features_control(smu, true);
1240 			if (ret)
1241 				dev_err(adev->dev, "Failed system features control!\n");
1242 			return ret;
1243 		}
1244 		break;
1245 	default:
1246 		break;
1247 	}
1248 
1249 	ret = smu_init_display_count(smu, 0);
1250 	if (ret) {
1251 		dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1252 		return ret;
1253 	}
1254 
1255 	ret = smu_set_driver_table_location(smu);
1256 	if (ret) {
1257 		dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1258 		return ret;
1259 	}
1260 
1261 	/*
1262 	 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1263 	 */
1264 	ret = smu_set_tool_table_location(smu);
1265 	if (ret) {
1266 		dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1267 		return ret;
1268 	}
1269 
1270 	/*
1271 	 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1272 	 * pool location.
1273 	 */
1274 	ret = smu_notify_memory_pool_location(smu);
1275 	if (ret) {
1276 		dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1277 		return ret;
1278 	}
1279 
1280 	/*
1281 	 * It is assumed the pptable used before runpm is same as
1282 	 * the one used afterwards. Thus, we can reuse the stored
1283 	 * copy and do not need to resetup the pptable again.
1284 	 */
1285 	if (!adev->in_runpm) {
1286 		ret = smu_setup_pptable(smu);
1287 		if (ret) {
1288 			dev_err(adev->dev, "Failed to setup pptable!\n");
1289 			return ret;
1290 		}
1291 	}
1292 
1293 	/* smu_dump_pptable(smu); */
1294 
1295 	/*
1296 	 * With SCPM enabled, PSP is responsible for the PPTable transferring
1297 	 * (to SMU). Driver involvement is not needed and permitted.
1298 	 */
1299 	if (!adev->scpm_enabled) {
1300 		/*
1301 		 * Copy pptable bo in the vram to smc with SMU MSGs such as
1302 		 * SetDriverDramAddr and TransferTableDram2Smu.
1303 		 */
1304 		ret = smu_write_pptable(smu);
1305 		if (ret) {
1306 			dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1307 			return ret;
1308 		}
1309 	}
1310 
1311 	/* issue Run*Btc msg */
1312 	ret = smu_run_btc(smu);
1313 	if (ret)
1314 		return ret;
1315 
1316 	/*
1317 	 * With SCPM enabled, these actions(and relevant messages) are
1318 	 * not needed and permitted.
1319 	 */
1320 	if (!adev->scpm_enabled) {
1321 		ret = smu_feature_set_allowed_mask(smu);
1322 		if (ret) {
1323 			dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1324 			return ret;
1325 		}
1326 	}
1327 
1328 	ret = smu_system_features_control(smu, true);
1329 	if (ret) {
1330 		dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1331 		return ret;
1332 	}
1333 
1334 	ret = smu_feature_get_enabled_mask(smu, &features_supported);
1335 	if (ret) {
1336 		dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
1337 		return ret;
1338 	}
1339 	bitmap_copy(feature->supported,
1340 		    (unsigned long *)&features_supported,
1341 		    feature->feature_num);
1342 
1343 	if (!smu_is_dpm_running(smu))
1344 		dev_info(adev->dev, "dpm has been disabled\n");
1345 
1346 	/*
1347 	 * Set initialized values (get from vbios) to dpm tables context such as
1348 	 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1349 	 * type of clks.
1350 	 */
1351 	ret = smu_set_default_dpm_table(smu);
1352 	if (ret) {
1353 		dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1354 		return ret;
1355 	}
1356 
1357 	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1358 		pcie_gen = 3;
1359 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1360 		pcie_gen = 2;
1361 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1362 		pcie_gen = 1;
1363 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1364 		pcie_gen = 0;
1365 
1366 	/* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1367 	 * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1368 	 * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
1369 	 */
1370 	if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1371 		pcie_width = 6;
1372 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1373 		pcie_width = 5;
1374 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1375 		pcie_width = 4;
1376 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1377 		pcie_width = 3;
1378 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1379 		pcie_width = 2;
1380 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1381 		pcie_width = 1;
1382 	ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1383 	if (ret) {
1384 		dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1385 		return ret;
1386 	}
1387 
1388 	ret = smu_get_thermal_temperature_range(smu);
1389 	if (ret) {
1390 		dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1391 		return ret;
1392 	}
1393 
1394 	ret = smu_enable_thermal_alert(smu);
1395 	if (ret) {
1396 	  dev_err(adev->dev, "Failed to enable thermal alert!\n");
1397 	  return ret;
1398 	}
1399 
1400 	ret = smu_notify_display_change(smu);
1401 	if (ret) {
1402 		dev_err(adev->dev, "Failed to notify display change!\n");
1403 		return ret;
1404 	}
1405 
1406 	/*
1407 	 * Set min deep sleep dce fclk with bootup value from vbios via
1408 	 * SetMinDeepSleepDcefclk MSG.
1409 	 */
1410 	ret = smu_set_min_dcef_deep_sleep(smu,
1411 					  smu->smu_table.boot_values.dcefclk / 100);
1412 
1413 	return ret;
1414 }
1415 
smu_start_smc_engine(struct smu_context * smu)1416 static int smu_start_smc_engine(struct smu_context *smu)
1417 {
1418 	struct amdgpu_device *adev = smu->adev;
1419 	int ret = 0;
1420 
1421 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1422 		if (adev->ip_versions[MP1_HWIP][0] < IP_VERSION(11, 0, 0)) {
1423 			if (smu->ppt_funcs->load_microcode) {
1424 				ret = smu->ppt_funcs->load_microcode(smu);
1425 				if (ret)
1426 					return ret;
1427 			}
1428 		}
1429 	}
1430 
1431 	if (smu->ppt_funcs->check_fw_status) {
1432 		ret = smu->ppt_funcs->check_fw_status(smu);
1433 		if (ret) {
1434 			dev_err(adev->dev, "SMC is not ready\n");
1435 			return ret;
1436 		}
1437 	}
1438 
1439 	/*
1440 	 * Send msg GetDriverIfVersion to check if the return value is equal
1441 	 * with DRIVER_IF_VERSION of smc header.
1442 	 */
1443 	ret = smu_check_fw_version(smu);
1444 	if (ret)
1445 		return ret;
1446 
1447 	return ret;
1448 }
1449 
smu_hw_init(void * handle)1450 static int smu_hw_init(void *handle)
1451 {
1452 	int ret;
1453 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1454 	struct smu_context *smu = adev->powerplay.pp_handle;
1455 
1456 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
1457 		smu->pm_enabled = false;
1458 		return 0;
1459 	}
1460 
1461 	ret = smu_start_smc_engine(smu);
1462 	if (ret) {
1463 		dev_err(adev->dev, "SMC engine is not correctly up!\n");
1464 		return ret;
1465 	}
1466 
1467 	if (smu->is_apu) {
1468 		ret = smu_set_gfx_imu_enable(smu);
1469 		if (ret)
1470 			return ret;
1471 		smu_dpm_set_vcn_enable(smu, true);
1472 		smu_dpm_set_jpeg_enable(smu, true);
1473 		smu_set_gfx_cgpg(smu, true);
1474 	}
1475 
1476 	if (!smu->pm_enabled)
1477 		return 0;
1478 
1479 	ret = smu_get_driver_allowed_feature_mask(smu);
1480 	if (ret)
1481 		return ret;
1482 
1483 	ret = smu_smc_hw_setup(smu);
1484 	if (ret) {
1485 		dev_err(adev->dev, "Failed to setup smc hw!\n");
1486 		return ret;
1487 	}
1488 
1489 	/*
1490 	 * Move maximum sustainable clock retrieving here considering
1491 	 * 1. It is not needed on resume(from S3).
1492 	 * 2. DAL settings come between .hw_init and .late_init of SMU.
1493 	 *    And DAL needs to know the maximum sustainable clocks. Thus
1494 	 *    it cannot be put in .late_init().
1495 	 */
1496 	ret = smu_init_max_sustainable_clocks(smu);
1497 	if (ret) {
1498 		dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1499 		return ret;
1500 	}
1501 
1502 	adev->pm.dpm_enabled = true;
1503 
1504 	dev_info(adev->dev, "SMU is initialized successfully!\n");
1505 
1506 	return 0;
1507 }
1508 
smu_disable_dpms(struct smu_context * smu)1509 static int smu_disable_dpms(struct smu_context *smu)
1510 {
1511 	struct amdgpu_device *adev = smu->adev;
1512 	int ret = 0;
1513 	bool use_baco = !smu->is_apu &&
1514 		((amdgpu_in_reset(adev) &&
1515 		  (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1516 		 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
1517 
1518 	/*
1519 	 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
1520 	 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
1521 	 */
1522 	switch (adev->ip_versions[MP1_HWIP][0]) {
1523 	case IP_VERSION(13, 0, 0):
1524 	case IP_VERSION(13, 0, 7):
1525 	case IP_VERSION(13, 0, 10):
1526 		return 0;
1527 	default:
1528 		break;
1529 	}
1530 
1531 	/*
1532 	 * For custom pptable uploading, skip the DPM features
1533 	 * disable process on Navi1x ASICs.
1534 	 *   - As the gfx related features are under control of
1535 	 *     RLC on those ASICs. RLC reinitialization will be
1536 	 *     needed to reenable them. That will cost much more
1537 	 *     efforts.
1538 	 *
1539 	 *   - SMU firmware can handle the DPM reenablement
1540 	 *     properly.
1541 	 */
1542 	if (smu->uploading_custom_pp_table) {
1543 		switch (adev->ip_versions[MP1_HWIP][0]) {
1544 		case IP_VERSION(11, 0, 0):
1545 		case IP_VERSION(11, 0, 5):
1546 		case IP_VERSION(11, 0, 9):
1547 		case IP_VERSION(11, 0, 7):
1548 		case IP_VERSION(11, 0, 11):
1549 		case IP_VERSION(11, 5, 0):
1550 		case IP_VERSION(11, 0, 12):
1551 		case IP_VERSION(11, 0, 13):
1552 			return 0;
1553 		default:
1554 			break;
1555 		}
1556 	}
1557 
1558 	/*
1559 	 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1560 	 * on BACO in. Driver involvement is unnecessary.
1561 	 */
1562 	if (use_baco) {
1563 		switch (adev->ip_versions[MP1_HWIP][0]) {
1564 		case IP_VERSION(11, 0, 7):
1565 		case IP_VERSION(11, 0, 0):
1566 		case IP_VERSION(11, 0, 5):
1567 		case IP_VERSION(11, 0, 9):
1568 		case IP_VERSION(13, 0, 7):
1569 			return 0;
1570 		default:
1571 			break;
1572 		}
1573 	}
1574 
1575 	/*
1576 	 * For SMU 13.0.4/11, PMFW will handle the features disablement properly
1577 	 * for gpu reset and S0i3 cases. Driver involvement is unnecessary.
1578 	 */
1579 	if (amdgpu_in_reset(adev) || adev->in_s0ix) {
1580 		switch (adev->ip_versions[MP1_HWIP][0]) {
1581 		case IP_VERSION(13, 0, 4):
1582 		case IP_VERSION(13, 0, 11):
1583 			return 0;
1584 		default:
1585 			break;
1586 		}
1587 	}
1588 
1589 	/*
1590 	 * For gpu reset, runpm and hibernation through BACO,
1591 	 * BACO feature has to be kept enabled.
1592 	 */
1593 	if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1594 		ret = smu_disable_all_features_with_exception(smu,
1595 							      SMU_FEATURE_BACO_BIT);
1596 		if (ret)
1597 			dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1598 	} else {
1599 		/* DisableAllSmuFeatures message is not permitted with SCPM enabled */
1600 		if (!adev->scpm_enabled) {
1601 			ret = smu_system_features_control(smu, false);
1602 			if (ret)
1603 				dev_err(adev->dev, "Failed to disable smu features.\n");
1604 		}
1605 	}
1606 
1607 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) &&
1608 	    !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
1609 		adev->gfx.rlc.funcs->stop(adev);
1610 
1611 	return ret;
1612 }
1613 
smu_smc_hw_cleanup(struct smu_context * smu)1614 static int smu_smc_hw_cleanup(struct smu_context *smu)
1615 {
1616 	struct amdgpu_device *adev = smu->adev;
1617 	int ret = 0;
1618 
1619 	cancel_work_sync(&smu->throttling_logging_work);
1620 	cancel_work_sync(&smu->interrupt_work);
1621 
1622 	ret = smu_disable_thermal_alert(smu);
1623 	if (ret) {
1624 		dev_err(adev->dev, "Fail to disable thermal alert!\n");
1625 		return ret;
1626 	}
1627 
1628 	cancel_delayed_work_sync(&smu->swctf_delayed_work);
1629 
1630 	ret = smu_disable_dpms(smu);
1631 	if (ret) {
1632 		dev_err(adev->dev, "Fail to disable dpm features!\n");
1633 		return ret;
1634 	}
1635 
1636 	return 0;
1637 }
1638 
smu_hw_fini(void * handle)1639 static int smu_hw_fini(void *handle)
1640 {
1641 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1642 	struct smu_context *smu = adev->powerplay.pp_handle;
1643 
1644 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1645 		return 0;
1646 
1647 	smu_dpm_set_vcn_enable(smu, false);
1648 	smu_dpm_set_jpeg_enable(smu, false);
1649 
1650 	adev->vcn.cur_state = AMD_PG_STATE_GATE;
1651 	adev->jpeg.cur_state = AMD_PG_STATE_GATE;
1652 
1653 	if (!smu->pm_enabled)
1654 		return 0;
1655 
1656 	adev->pm.dpm_enabled = false;
1657 
1658 	return smu_smc_hw_cleanup(smu);
1659 }
1660 
smu_late_fini(void * handle)1661 static void smu_late_fini(void *handle)
1662 {
1663 	struct amdgpu_device *adev = handle;
1664 	struct smu_context *smu = adev->powerplay.pp_handle;
1665 
1666 	kfree(smu);
1667 }
1668 
smu_reset(struct smu_context * smu)1669 static int smu_reset(struct smu_context *smu)
1670 {
1671 	struct amdgpu_device *adev = smu->adev;
1672 	int ret;
1673 
1674 	ret = smu_hw_fini(adev);
1675 	if (ret)
1676 		return ret;
1677 
1678 	ret = smu_hw_init(adev);
1679 	if (ret)
1680 		return ret;
1681 
1682 	ret = smu_late_init(adev);
1683 	if (ret)
1684 		return ret;
1685 
1686 	return 0;
1687 }
1688 
smu_suspend(void * handle)1689 static int smu_suspend(void *handle)
1690 {
1691 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1692 	struct smu_context *smu = adev->powerplay.pp_handle;
1693 	int ret;
1694 	uint64_t count;
1695 
1696 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1697 		return 0;
1698 
1699 	if (!smu->pm_enabled)
1700 		return 0;
1701 
1702 	adev->pm.dpm_enabled = false;
1703 
1704 	ret = smu_smc_hw_cleanup(smu);
1705 	if (ret)
1706 		return ret;
1707 
1708 	smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1709 
1710 	smu_set_gfx_cgpg(smu, false);
1711 
1712 	/*
1713 	 * pwfw resets entrycount when device is suspended, so we save the
1714 	 * last value to be used when we resume to keep it consistent
1715 	 */
1716 	ret = smu_get_entrycount_gfxoff(smu, &count);
1717 	if (!ret)
1718 		adev->gfx.gfx_off_entrycount = count;
1719 
1720 	return 0;
1721 }
1722 
smu_resume(void * handle)1723 static int smu_resume(void *handle)
1724 {
1725 	int ret;
1726 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1727 	struct smu_context *smu = adev->powerplay.pp_handle;
1728 
1729 	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1730 		return 0;
1731 
1732 	if (!smu->pm_enabled)
1733 		return 0;
1734 
1735 	dev_info(adev->dev, "SMU is resuming...\n");
1736 
1737 	ret = smu_start_smc_engine(smu);
1738 	if (ret) {
1739 		dev_err(adev->dev, "SMC engine is not correctly up!\n");
1740 		return ret;
1741 	}
1742 
1743 	ret = smu_smc_hw_setup(smu);
1744 	if (ret) {
1745 		dev_err(adev->dev, "Failed to setup smc hw!\n");
1746 		return ret;
1747 	}
1748 
1749 	ret = smu_set_gfx_imu_enable(smu);
1750 	if (ret)
1751 		return ret;
1752 
1753 	smu_set_gfx_cgpg(smu, true);
1754 
1755 	smu->disable_uclk_switch = 0;
1756 
1757 	adev->pm.dpm_enabled = true;
1758 
1759 	dev_info(adev->dev, "SMU is resumed successfully!\n");
1760 
1761 	return 0;
1762 }
1763 
smu_display_configuration_change(void * handle,const struct amd_pp_display_configuration * display_config)1764 static int smu_display_configuration_change(void *handle,
1765 					    const struct amd_pp_display_configuration *display_config)
1766 {
1767 	struct smu_context *smu = handle;
1768 
1769 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1770 		return -EOPNOTSUPP;
1771 
1772 	if (!display_config)
1773 		return -EINVAL;
1774 
1775 	smu_set_min_dcef_deep_sleep(smu,
1776 				    display_config->min_dcef_deep_sleep_set_clk / 100);
1777 
1778 	return 0;
1779 }
1780 
smu_set_clockgating_state(void * handle,enum amd_clockgating_state state)1781 static int smu_set_clockgating_state(void *handle,
1782 				     enum amd_clockgating_state state)
1783 {
1784 	return 0;
1785 }
1786 
smu_set_powergating_state(void * handle,enum amd_powergating_state state)1787 static int smu_set_powergating_state(void *handle,
1788 				     enum amd_powergating_state state)
1789 {
1790 	return 0;
1791 }
1792 
smu_enable_umd_pstate(void * handle,enum amd_dpm_forced_level * level)1793 static int smu_enable_umd_pstate(void *handle,
1794 		      enum amd_dpm_forced_level *level)
1795 {
1796 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1797 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1798 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1799 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1800 
1801 	struct smu_context *smu = (struct smu_context*)(handle);
1802 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1803 
1804 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1805 		return -EINVAL;
1806 
1807 	if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1808 		/* enter umd pstate, save current level, disable gfx cg*/
1809 		if (*level & profile_mode_mask) {
1810 			smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1811 			smu_gpo_control(smu, false);
1812 			smu_gfx_ulv_control(smu, false);
1813 			smu_deep_sleep_control(smu, false);
1814 			amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
1815 		}
1816 	} else {
1817 		/* exit umd pstate, restore level, enable gfx cg*/
1818 		if (!(*level & profile_mode_mask)) {
1819 			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1820 				*level = smu_dpm_ctx->saved_dpm_level;
1821 			amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
1822 			smu_deep_sleep_control(smu, true);
1823 			smu_gfx_ulv_control(smu, true);
1824 			smu_gpo_control(smu, true);
1825 		}
1826 	}
1827 
1828 	return 0;
1829 }
1830 
smu_bump_power_profile_mode(struct smu_context * smu,long * param,uint32_t param_size)1831 static int smu_bump_power_profile_mode(struct smu_context *smu,
1832 					   long *param,
1833 					   uint32_t param_size)
1834 {
1835 	int ret = 0;
1836 
1837 	if (smu->ppt_funcs->set_power_profile_mode)
1838 		ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
1839 
1840 	return ret;
1841 }
1842 
smu_adjust_power_state_dynamic(struct smu_context * smu,enum amd_dpm_forced_level level,bool skip_display_settings)1843 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
1844 				   enum amd_dpm_forced_level level,
1845 				   bool skip_display_settings)
1846 {
1847 	int ret = 0;
1848 	int index = 0;
1849 	long workload[1];
1850 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1851 
1852 	if (!skip_display_settings) {
1853 		ret = smu_display_config_changed(smu);
1854 		if (ret) {
1855 			dev_err(smu->adev->dev, "Failed to change display config!");
1856 			return ret;
1857 		}
1858 	}
1859 
1860 	ret = smu_apply_clocks_adjust_rules(smu);
1861 	if (ret) {
1862 		dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
1863 		return ret;
1864 	}
1865 
1866 	if (!skip_display_settings) {
1867 		ret = smu_notify_smc_display_config(smu);
1868 		if (ret) {
1869 			dev_err(smu->adev->dev, "Failed to notify smc display config!");
1870 			return ret;
1871 		}
1872 	}
1873 
1874 	if (smu_dpm_ctx->dpm_level != level) {
1875 		ret = smu_asic_set_performance_level(smu, level);
1876 		if (ret) {
1877 			dev_err(smu->adev->dev, "Failed to set performance level!");
1878 			return ret;
1879 		}
1880 
1881 		/* update the saved copy */
1882 		smu_dpm_ctx->dpm_level = level;
1883 	}
1884 
1885 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1886 		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
1887 		index = fls(smu->workload_mask);
1888 		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1889 		workload[0] = smu->workload_setting[index];
1890 
1891 		if (smu->power_profile_mode != workload[0])
1892 			smu_bump_power_profile_mode(smu, workload, 0);
1893 	}
1894 
1895 	return ret;
1896 }
1897 
smu_handle_task(struct smu_context * smu,enum amd_dpm_forced_level level,enum amd_pp_task task_id)1898 static int smu_handle_task(struct smu_context *smu,
1899 			   enum amd_dpm_forced_level level,
1900 			   enum amd_pp_task task_id)
1901 {
1902 	int ret = 0;
1903 
1904 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1905 		return -EOPNOTSUPP;
1906 
1907 	switch (task_id) {
1908 	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1909 		ret = smu_pre_display_config_changed(smu);
1910 		if (ret)
1911 			return ret;
1912 		ret = smu_adjust_power_state_dynamic(smu, level, false);
1913 		break;
1914 	case AMD_PP_TASK_COMPLETE_INIT:
1915 	case AMD_PP_TASK_READJUST_POWER_STATE:
1916 		ret = smu_adjust_power_state_dynamic(smu, level, true);
1917 		break;
1918 	default:
1919 		break;
1920 	}
1921 
1922 	return ret;
1923 }
1924 
smu_handle_dpm_task(void * handle,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)1925 static int smu_handle_dpm_task(void *handle,
1926 			       enum amd_pp_task task_id,
1927 			       enum amd_pm_state_type *user_state)
1928 {
1929 	struct smu_context *smu = handle;
1930 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1931 
1932 	return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
1933 
1934 }
1935 
smu_switch_power_profile(void * handle,enum PP_SMC_POWER_PROFILE type,bool en)1936 static int smu_switch_power_profile(void *handle,
1937 				    enum PP_SMC_POWER_PROFILE type,
1938 				    bool en)
1939 {
1940 	struct smu_context *smu = handle;
1941 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1942 	long workload[1];
1943 	uint32_t index;
1944 
1945 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1946 		return -EOPNOTSUPP;
1947 
1948 	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1949 		return -EINVAL;
1950 
1951 	if (!en) {
1952 		smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1953 		index = fls(smu->workload_mask);
1954 		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1955 		workload[0] = smu->workload_setting[index];
1956 	} else {
1957 		smu->workload_mask |= (1 << smu->workload_prority[type]);
1958 		index = fls(smu->workload_mask);
1959 		index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1960 		workload[0] = smu->workload_setting[index];
1961 	}
1962 
1963 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1964 		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
1965 		smu_bump_power_profile_mode(smu, workload, 0);
1966 
1967 	return 0;
1968 }
1969 
smu_get_performance_level(void * handle)1970 static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
1971 {
1972 	struct smu_context *smu = handle;
1973 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1974 
1975 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1976 		return -EOPNOTSUPP;
1977 
1978 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1979 		return -EINVAL;
1980 
1981 	return smu_dpm_ctx->dpm_level;
1982 }
1983 
smu_force_performance_level(void * handle,enum amd_dpm_forced_level level)1984 static int smu_force_performance_level(void *handle,
1985 				       enum amd_dpm_forced_level level)
1986 {
1987 	struct smu_context *smu = handle;
1988 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1989 	int ret = 0;
1990 
1991 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1992 		return -EOPNOTSUPP;
1993 
1994 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1995 		return -EINVAL;
1996 
1997 	ret = smu_enable_umd_pstate(smu, &level);
1998 	if (ret)
1999 		return ret;
2000 
2001 	ret = smu_handle_task(smu, level,
2002 			      AMD_PP_TASK_READJUST_POWER_STATE);
2003 
2004 	/* reset user dpm clock state */
2005 	if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2006 		memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
2007 		smu->user_dpm_profile.clk_dependency = 0;
2008 	}
2009 
2010 	return ret;
2011 }
2012 
smu_set_display_count(void * handle,uint32_t count)2013 static int smu_set_display_count(void *handle, uint32_t count)
2014 {
2015 	struct smu_context *smu = handle;
2016 
2017 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2018 		return -EOPNOTSUPP;
2019 
2020 	return smu_init_display_count(smu, count);
2021 }
2022 
smu_force_smuclk_levels(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t mask)2023 static int smu_force_smuclk_levels(struct smu_context *smu,
2024 			 enum smu_clk_type clk_type,
2025 			 uint32_t mask)
2026 {
2027 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2028 	int ret = 0;
2029 
2030 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2031 		return -EOPNOTSUPP;
2032 
2033 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2034 		dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
2035 		return -EINVAL;
2036 	}
2037 
2038 	if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
2039 		ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
2040 		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2041 			smu->user_dpm_profile.clk_mask[clk_type] = mask;
2042 			smu_set_user_clk_dependencies(smu, clk_type);
2043 		}
2044 	}
2045 
2046 	return ret;
2047 }
2048 
smu_force_ppclk_levels(void * handle,enum pp_clock_type type,uint32_t mask)2049 static int smu_force_ppclk_levels(void *handle,
2050 				  enum pp_clock_type type,
2051 				  uint32_t mask)
2052 {
2053 	struct smu_context *smu = handle;
2054 	enum smu_clk_type clk_type;
2055 
2056 	switch (type) {
2057 	case PP_SCLK:
2058 		clk_type = SMU_SCLK; break;
2059 	case PP_MCLK:
2060 		clk_type = SMU_MCLK; break;
2061 	case PP_PCIE:
2062 		clk_type = SMU_PCIE; break;
2063 	case PP_SOCCLK:
2064 		clk_type = SMU_SOCCLK; break;
2065 	case PP_FCLK:
2066 		clk_type = SMU_FCLK; break;
2067 	case PP_DCEFCLK:
2068 		clk_type = SMU_DCEFCLK; break;
2069 	case PP_VCLK:
2070 		clk_type = SMU_VCLK; break;
2071 	case PP_VCLK1:
2072 		clk_type = SMU_VCLK1; break;
2073 	case PP_DCLK:
2074 		clk_type = SMU_DCLK; break;
2075 	case PP_DCLK1:
2076 		clk_type = SMU_DCLK1; break;
2077 	case OD_SCLK:
2078 		clk_type = SMU_OD_SCLK; break;
2079 	case OD_MCLK:
2080 		clk_type = SMU_OD_MCLK; break;
2081 	case OD_VDDC_CURVE:
2082 		clk_type = SMU_OD_VDDC_CURVE; break;
2083 	case OD_RANGE:
2084 		clk_type = SMU_OD_RANGE; break;
2085 	default:
2086 		return -EINVAL;
2087 	}
2088 
2089 	return smu_force_smuclk_levels(smu, clk_type, mask);
2090 }
2091 
2092 /*
2093  * On system suspending or resetting, the dpm_enabled
2094  * flag will be cleared. So that those SMU services which
2095  * are not supported will be gated.
2096  * However, the mp1 state setting should still be granted
2097  * even if the dpm_enabled cleared.
2098  */
smu_set_mp1_state(void * handle,enum pp_mp1_state mp1_state)2099 static int smu_set_mp1_state(void *handle,
2100 			     enum pp_mp1_state mp1_state)
2101 {
2102 	struct smu_context *smu = handle;
2103 	int ret = 0;
2104 
2105 	if (!smu->pm_enabled)
2106 		return -EOPNOTSUPP;
2107 
2108 	if (smu->ppt_funcs &&
2109 	    smu->ppt_funcs->set_mp1_state)
2110 		ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
2111 
2112 	return ret;
2113 }
2114 
smu_set_df_cstate(void * handle,enum pp_df_cstate state)2115 static int smu_set_df_cstate(void *handle,
2116 			     enum pp_df_cstate state)
2117 {
2118 	struct smu_context *smu = handle;
2119 	int ret = 0;
2120 
2121 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2122 		return -EOPNOTSUPP;
2123 
2124 	if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2125 		return 0;
2126 
2127 	ret = smu->ppt_funcs->set_df_cstate(smu, state);
2128 	if (ret)
2129 		dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
2130 
2131 	return ret;
2132 }
2133 
smu_allow_xgmi_power_down(struct smu_context * smu,bool en)2134 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
2135 {
2136 	int ret = 0;
2137 
2138 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2139 		return -EOPNOTSUPP;
2140 
2141 	if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
2142 		return 0;
2143 
2144 	ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
2145 	if (ret)
2146 		dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
2147 
2148 	return ret;
2149 }
2150 
smu_write_watermarks_table(struct smu_context * smu)2151 int smu_write_watermarks_table(struct smu_context *smu)
2152 {
2153 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2154 		return -EOPNOTSUPP;
2155 
2156 	return smu_set_watermarks_table(smu, NULL);
2157 }
2158 
smu_set_watermarks_for_clock_ranges(void * handle,struct pp_smu_wm_range_sets * clock_ranges)2159 static int smu_set_watermarks_for_clock_ranges(void *handle,
2160 					       struct pp_smu_wm_range_sets *clock_ranges)
2161 {
2162 	struct smu_context *smu = handle;
2163 
2164 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2165 		return -EOPNOTSUPP;
2166 
2167 	if (smu->disable_watermark)
2168 		return 0;
2169 
2170 	return smu_set_watermarks_table(smu, clock_ranges);
2171 }
2172 
smu_set_ac_dc(struct smu_context * smu)2173 int smu_set_ac_dc(struct smu_context *smu)
2174 {
2175 	int ret = 0;
2176 
2177 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2178 		return -EOPNOTSUPP;
2179 
2180 	/* controlled by firmware */
2181 	if (smu->dc_controlled_by_gpio)
2182 		return 0;
2183 
2184 	ret = smu_set_power_source(smu,
2185 				   smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2186 				   SMU_POWER_SOURCE_DC);
2187 	if (ret)
2188 		dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2189 		       smu->adev->pm.ac_power ? "AC" : "DC");
2190 
2191 	return ret;
2192 }
2193 
2194 const struct amd_ip_funcs smu_ip_funcs = {
2195 	.name = "smu",
2196 	.early_init = smu_early_init,
2197 	.late_init = smu_late_init,
2198 	.sw_init = smu_sw_init,
2199 	.sw_fini = smu_sw_fini,
2200 	.hw_init = smu_hw_init,
2201 	.hw_fini = smu_hw_fini,
2202 	.late_fini = smu_late_fini,
2203 	.suspend = smu_suspend,
2204 	.resume = smu_resume,
2205 	.is_idle = NULL,
2206 	.check_soft_reset = NULL,
2207 	.wait_for_idle = NULL,
2208 	.soft_reset = NULL,
2209 	.set_clockgating_state = smu_set_clockgating_state,
2210 	.set_powergating_state = smu_set_powergating_state,
2211 };
2212 
2213 const struct amdgpu_ip_block_version smu_v11_0_ip_block = {
2214 	.type = AMD_IP_BLOCK_TYPE_SMC,
2215 	.major = 11,
2216 	.minor = 0,
2217 	.rev = 0,
2218 	.funcs = &smu_ip_funcs,
2219 };
2220 
2221 const struct amdgpu_ip_block_version smu_v12_0_ip_block = {
2222 	.type = AMD_IP_BLOCK_TYPE_SMC,
2223 	.major = 12,
2224 	.minor = 0,
2225 	.rev = 0,
2226 	.funcs = &smu_ip_funcs,
2227 };
2228 
2229 const struct amdgpu_ip_block_version smu_v13_0_ip_block = {
2230 	.type = AMD_IP_BLOCK_TYPE_SMC,
2231 	.major = 13,
2232 	.minor = 0,
2233 	.rev = 0,
2234 	.funcs = &smu_ip_funcs,
2235 };
2236 
smu_load_microcode(void * handle)2237 static int smu_load_microcode(void *handle)
2238 {
2239 	struct smu_context *smu = handle;
2240 	struct amdgpu_device *adev = smu->adev;
2241 	int ret = 0;
2242 
2243 	if (!smu->pm_enabled)
2244 		return -EOPNOTSUPP;
2245 
2246 	/* This should be used for non PSP loading */
2247 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2248 		return 0;
2249 
2250 	if (smu->ppt_funcs->load_microcode) {
2251 		ret = smu->ppt_funcs->load_microcode(smu);
2252 		if (ret) {
2253 			dev_err(adev->dev, "Load microcode failed\n");
2254 			return ret;
2255 		}
2256 	}
2257 
2258 	if (smu->ppt_funcs->check_fw_status) {
2259 		ret = smu->ppt_funcs->check_fw_status(smu);
2260 		if (ret) {
2261 			dev_err(adev->dev, "SMC is not ready\n");
2262 			return ret;
2263 		}
2264 	}
2265 
2266 	return ret;
2267 }
2268 
smu_set_gfx_cgpg(struct smu_context * smu,bool enabled)2269 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2270 {
2271 	int ret = 0;
2272 
2273 	if (smu->ppt_funcs->set_gfx_cgpg)
2274 		ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2275 
2276 	return ret;
2277 }
2278 
smu_set_fan_speed_rpm(void * handle,uint32_t speed)2279 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
2280 {
2281 	struct smu_context *smu = handle;
2282 	int ret = 0;
2283 
2284 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2285 		return -EOPNOTSUPP;
2286 
2287 	if (!smu->ppt_funcs->set_fan_speed_rpm)
2288 		return -EOPNOTSUPP;
2289 
2290 	if (speed == U32_MAX)
2291 		return -EINVAL;
2292 
2293 	ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2294 	if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2295 		smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
2296 		smu->user_dpm_profile.fan_speed_rpm = speed;
2297 
2298 		/* Override custom PWM setting as they cannot co-exist */
2299 		smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
2300 		smu->user_dpm_profile.fan_speed_pwm = 0;
2301 	}
2302 
2303 	return ret;
2304 }
2305 
2306 /**
2307  * smu_get_power_limit - Request one of the SMU Power Limits
2308  *
2309  * @handle: pointer to smu context
2310  * @limit: requested limit is written back to this variable
2311  * @pp_limit_level: &pp_power_limit_level which limit of the power to return
2312  * @pp_power_type: &pp_power_type type of power
2313  * Return:  0 on success, <0 on error
2314  *
2315  */
smu_get_power_limit(void * handle,uint32_t * limit,enum pp_power_limit_level pp_limit_level,enum pp_power_type pp_power_type)2316 int smu_get_power_limit(void *handle,
2317 			uint32_t *limit,
2318 			enum pp_power_limit_level pp_limit_level,
2319 			enum pp_power_type pp_power_type)
2320 {
2321 	struct smu_context *smu = handle;
2322 	struct amdgpu_device *adev = smu->adev;
2323 	enum smu_ppt_limit_level limit_level;
2324 	uint32_t limit_type;
2325 	int ret = 0;
2326 
2327 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2328 		return -EOPNOTSUPP;
2329 
2330 	switch (pp_power_type) {
2331 	case PP_PWR_TYPE_SUSTAINED:
2332 		limit_type = SMU_DEFAULT_PPT_LIMIT;
2333 		break;
2334 	case PP_PWR_TYPE_FAST:
2335 		limit_type = SMU_FAST_PPT_LIMIT;
2336 		break;
2337 	default:
2338 		return -EOPNOTSUPP;
2339 		break;
2340 	}
2341 
2342 	switch (pp_limit_level) {
2343 	case PP_PWR_LIMIT_CURRENT:
2344 		limit_level = SMU_PPT_LIMIT_CURRENT;
2345 		break;
2346 	case PP_PWR_LIMIT_DEFAULT:
2347 		limit_level = SMU_PPT_LIMIT_DEFAULT;
2348 		break;
2349 	case PP_PWR_LIMIT_MAX:
2350 		limit_level = SMU_PPT_LIMIT_MAX;
2351 		break;
2352 	case PP_PWR_LIMIT_MIN:
2353 	default:
2354 		return -EOPNOTSUPP;
2355 		break;
2356 	}
2357 
2358 	if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
2359 		if (smu->ppt_funcs->get_ppt_limit)
2360 			ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2361 	} else {
2362 		switch (limit_level) {
2363 		case SMU_PPT_LIMIT_CURRENT:
2364 			switch (adev->ip_versions[MP1_HWIP][0]) {
2365 			case IP_VERSION(13, 0, 2):
2366 			case IP_VERSION(11, 0, 7):
2367 			case IP_VERSION(11, 0, 11):
2368 			case IP_VERSION(11, 0, 12):
2369 			case IP_VERSION(11, 0, 13):
2370 				ret = smu_get_asic_power_limits(smu,
2371 								&smu->current_power_limit,
2372 								NULL,
2373 								NULL);
2374 				break;
2375 			default:
2376 				break;
2377 			}
2378 			*limit = smu->current_power_limit;
2379 			break;
2380 		case SMU_PPT_LIMIT_DEFAULT:
2381 			*limit = smu->default_power_limit;
2382 			break;
2383 		case SMU_PPT_LIMIT_MAX:
2384 			*limit = smu->max_power_limit;
2385 			break;
2386 		default:
2387 			break;
2388 		}
2389 	}
2390 
2391 	return ret;
2392 }
2393 
smu_set_power_limit(void * handle,uint32_t limit)2394 static int smu_set_power_limit(void *handle, uint32_t limit)
2395 {
2396 	struct smu_context *smu = handle;
2397 	uint32_t limit_type = limit >> 24;
2398 	int ret = 0;
2399 
2400 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2401 		return -EOPNOTSUPP;
2402 
2403 	limit &= (1<<24)-1;
2404 	if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2405 		if (smu->ppt_funcs->set_power_limit)
2406 			return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2407 
2408 	if (limit > smu->max_power_limit) {
2409 		dev_err(smu->adev->dev,
2410 			"New power limit (%d) is over the max allowed %d\n",
2411 			limit, smu->max_power_limit);
2412 		return -EINVAL;
2413 	}
2414 
2415 	if (!limit)
2416 		limit = smu->current_power_limit;
2417 
2418 	if (smu->ppt_funcs->set_power_limit) {
2419 		ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2420 		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2421 			smu->user_dpm_profile.power_limit = limit;
2422 	}
2423 
2424 	return ret;
2425 }
2426 
smu_print_smuclk_levels(struct smu_context * smu,enum smu_clk_type clk_type,char * buf)2427 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2428 {
2429 	int ret = 0;
2430 
2431 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2432 		return -EOPNOTSUPP;
2433 
2434 	if (smu->ppt_funcs->print_clk_levels)
2435 		ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2436 
2437 	return ret;
2438 }
2439 
smu_convert_to_smuclk(enum pp_clock_type type)2440 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
2441 {
2442 	enum smu_clk_type clk_type;
2443 
2444 	switch (type) {
2445 	case PP_SCLK:
2446 		clk_type = SMU_SCLK; break;
2447 	case PP_MCLK:
2448 		clk_type = SMU_MCLK; break;
2449 	case PP_PCIE:
2450 		clk_type = SMU_PCIE; break;
2451 	case PP_SOCCLK:
2452 		clk_type = SMU_SOCCLK; break;
2453 	case PP_FCLK:
2454 		clk_type = SMU_FCLK; break;
2455 	case PP_DCEFCLK:
2456 		clk_type = SMU_DCEFCLK; break;
2457 	case PP_VCLK:
2458 		clk_type = SMU_VCLK; break;
2459 	case PP_VCLK1:
2460 		clk_type = SMU_VCLK1; break;
2461 	case PP_DCLK:
2462 		clk_type = SMU_DCLK; break;
2463 	case PP_DCLK1:
2464 		clk_type = SMU_DCLK1; break;
2465 	case OD_SCLK:
2466 		clk_type = SMU_OD_SCLK; break;
2467 	case OD_MCLK:
2468 		clk_type = SMU_OD_MCLK; break;
2469 	case OD_VDDC_CURVE:
2470 		clk_type = SMU_OD_VDDC_CURVE; break;
2471 	case OD_RANGE:
2472 		clk_type = SMU_OD_RANGE; break;
2473 	case OD_VDDGFX_OFFSET:
2474 		clk_type = SMU_OD_VDDGFX_OFFSET; break;
2475 	case OD_CCLK:
2476 		clk_type = SMU_OD_CCLK; break;
2477 	default:
2478 		clk_type = SMU_CLK_COUNT; break;
2479 	}
2480 
2481 	return clk_type;
2482 }
2483 
smu_print_ppclk_levels(void * handle,enum pp_clock_type type,char * buf)2484 static int smu_print_ppclk_levels(void *handle,
2485 				  enum pp_clock_type type,
2486 				  char *buf)
2487 {
2488 	struct smu_context *smu = handle;
2489 	enum smu_clk_type clk_type;
2490 
2491 	clk_type = smu_convert_to_smuclk(type);
2492 	if (clk_type == SMU_CLK_COUNT)
2493 		return -EINVAL;
2494 
2495 	return smu_print_smuclk_levels(smu, clk_type, buf);
2496 }
2497 
smu_emit_ppclk_levels(void * handle,enum pp_clock_type type,char * buf,int * offset)2498 static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
2499 {
2500 	struct smu_context *smu = handle;
2501 	enum smu_clk_type clk_type;
2502 
2503 	clk_type = smu_convert_to_smuclk(type);
2504 	if (clk_type == SMU_CLK_COUNT)
2505 		return -EINVAL;
2506 
2507 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2508 		return -EOPNOTSUPP;
2509 
2510 	if (!smu->ppt_funcs->emit_clk_levels)
2511 		return -ENOENT;
2512 
2513 	return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
2514 
2515 }
2516 
smu_od_edit_dpm_table(void * handle,enum PP_OD_DPM_TABLE_COMMAND type,long * input,uint32_t size)2517 static int smu_od_edit_dpm_table(void *handle,
2518 				 enum PP_OD_DPM_TABLE_COMMAND type,
2519 				 long *input, uint32_t size)
2520 {
2521 	struct smu_context *smu = handle;
2522 	int ret = 0;
2523 
2524 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2525 		return -EOPNOTSUPP;
2526 
2527 	if (smu->ppt_funcs->od_edit_dpm_table) {
2528 		ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2529 	}
2530 
2531 	return ret;
2532 }
2533 
smu_read_sensor(void * handle,int sensor,void * data,int * size_arg)2534 static int smu_read_sensor(void *handle,
2535 			   int sensor,
2536 			   void *data,
2537 			   int *size_arg)
2538 {
2539 	struct smu_context *smu = handle;
2540 	struct smu_umd_pstate_table *pstate_table =
2541 				&smu->pstate_table;
2542 	int ret = 0;
2543 	uint32_t *size, size_val;
2544 
2545 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2546 		return -EOPNOTSUPP;
2547 
2548 	if (!data || !size_arg)
2549 		return -EINVAL;
2550 
2551 	size_val = *size_arg;
2552 	size = &size_val;
2553 
2554 	if (smu->ppt_funcs->read_sensor)
2555 		if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
2556 			goto unlock;
2557 
2558 	switch (sensor) {
2559 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
2560 		*((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
2561 		*size = 4;
2562 		break;
2563 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
2564 		*((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
2565 		*size = 4;
2566 		break;
2567 	case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
2568 		*((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
2569 		*size = 4;
2570 		break;
2571 	case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
2572 		*((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
2573 		*size = 4;
2574 		break;
2575 	case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2576 		ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
2577 		*size = 8;
2578 		break;
2579 	case AMDGPU_PP_SENSOR_UVD_POWER:
2580 		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
2581 		*size = 4;
2582 		break;
2583 	case AMDGPU_PP_SENSOR_VCE_POWER:
2584 		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
2585 		*size = 4;
2586 		break;
2587 	case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
2588 		*(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1;
2589 		*size = 4;
2590 		break;
2591 	case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
2592 		*(uint32_t *)data = 0;
2593 		*size = 4;
2594 		break;
2595 	default:
2596 		*size = 0;
2597 		ret = -EOPNOTSUPP;
2598 		break;
2599 	}
2600 
2601 unlock:
2602 	// assign uint32_t to int
2603 	*size_arg = size_val;
2604 
2605 	return ret;
2606 }
2607 
smu_get_apu_thermal_limit(void * handle,uint32_t * limit)2608 static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
2609 {
2610 	int ret = -EINVAL;
2611 	struct smu_context *smu = handle;
2612 
2613 	if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
2614 		ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit);
2615 
2616 	return ret;
2617 }
2618 
smu_set_apu_thermal_limit(void * handle,uint32_t limit)2619 static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
2620 {
2621 	int ret = -EINVAL;
2622 	struct smu_context *smu = handle;
2623 
2624 	if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
2625 		ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit);
2626 
2627 	return ret;
2628 }
2629 
smu_get_power_profile_mode(void * handle,char * buf)2630 static int smu_get_power_profile_mode(void *handle, char *buf)
2631 {
2632 	struct smu_context *smu = handle;
2633 
2634 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
2635 	    !smu->ppt_funcs->get_power_profile_mode)
2636 		return -EOPNOTSUPP;
2637 	if (!buf)
2638 		return -EINVAL;
2639 
2640 	return smu->ppt_funcs->get_power_profile_mode(smu, buf);
2641 }
2642 
smu_set_power_profile_mode(void * handle,long * param,uint32_t param_size)2643 static int smu_set_power_profile_mode(void *handle,
2644 				      long *param,
2645 				      uint32_t param_size)
2646 {
2647 	struct smu_context *smu = handle;
2648 
2649 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
2650 	    !smu->ppt_funcs->set_power_profile_mode)
2651 		return -EOPNOTSUPP;
2652 
2653 	return smu_bump_power_profile_mode(smu, param, param_size);
2654 }
2655 
smu_get_fan_control_mode(void * handle,u32 * fan_mode)2656 static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
2657 {
2658 	struct smu_context *smu = handle;
2659 
2660 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2661 		return -EOPNOTSUPP;
2662 
2663 	if (!smu->ppt_funcs->get_fan_control_mode)
2664 		return -EOPNOTSUPP;
2665 
2666 	if (!fan_mode)
2667 		return -EINVAL;
2668 
2669 	*fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
2670 
2671 	return 0;
2672 }
2673 
smu_set_fan_control_mode(void * handle,u32 value)2674 static int smu_set_fan_control_mode(void *handle, u32 value)
2675 {
2676 	struct smu_context *smu = handle;
2677 	int ret = 0;
2678 
2679 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2680 		return -EOPNOTSUPP;
2681 
2682 	if (!smu->ppt_funcs->set_fan_control_mode)
2683 		return -EOPNOTSUPP;
2684 
2685 	if (value == U32_MAX)
2686 		return -EINVAL;
2687 
2688 	ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2689 	if (ret)
2690 		goto out;
2691 
2692 	if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2693 		smu->user_dpm_profile.fan_mode = value;
2694 
2695 		/* reset user dpm fan speed */
2696 		if (value != AMD_FAN_CTRL_MANUAL) {
2697 			smu->user_dpm_profile.fan_speed_pwm = 0;
2698 			smu->user_dpm_profile.fan_speed_rpm = 0;
2699 			smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
2700 		}
2701 	}
2702 
2703 out:
2704 	return ret;
2705 }
2706 
smu_get_fan_speed_pwm(void * handle,u32 * speed)2707 static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
2708 {
2709 	struct smu_context *smu = handle;
2710 	int ret = 0;
2711 
2712 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2713 		return -EOPNOTSUPP;
2714 
2715 	if (!smu->ppt_funcs->get_fan_speed_pwm)
2716 		return -EOPNOTSUPP;
2717 
2718 	if (!speed)
2719 		return -EINVAL;
2720 
2721 	ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
2722 
2723 	return ret;
2724 }
2725 
smu_set_fan_speed_pwm(void * handle,u32 speed)2726 static int smu_set_fan_speed_pwm(void *handle, u32 speed)
2727 {
2728 	struct smu_context *smu = handle;
2729 	int ret = 0;
2730 
2731 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2732 		return -EOPNOTSUPP;
2733 
2734 	if (!smu->ppt_funcs->set_fan_speed_pwm)
2735 		return -EOPNOTSUPP;
2736 
2737 	if (speed == U32_MAX)
2738 		return -EINVAL;
2739 
2740 	ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
2741 	if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2742 		smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
2743 		smu->user_dpm_profile.fan_speed_pwm = speed;
2744 
2745 		/* Override custom RPM setting as they cannot co-exist */
2746 		smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
2747 		smu->user_dpm_profile.fan_speed_rpm = 0;
2748 	}
2749 
2750 	return ret;
2751 }
2752 
smu_get_fan_speed_rpm(void * handle,uint32_t * speed)2753 static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
2754 {
2755 	struct smu_context *smu = handle;
2756 	int ret = 0;
2757 
2758 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2759 		return -EOPNOTSUPP;
2760 
2761 	if (!smu->ppt_funcs->get_fan_speed_rpm)
2762 		return -EOPNOTSUPP;
2763 
2764 	if (!speed)
2765 		return -EINVAL;
2766 
2767 	ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2768 
2769 	return ret;
2770 }
2771 
smu_set_deep_sleep_dcefclk(void * handle,uint32_t clk)2772 static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
2773 {
2774 	struct smu_context *smu = handle;
2775 
2776 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2777 		return -EOPNOTSUPP;
2778 
2779 	return smu_set_min_dcef_deep_sleep(smu, clk);
2780 }
2781 
smu_get_clock_by_type_with_latency(void * handle,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)2782 static int smu_get_clock_by_type_with_latency(void *handle,
2783 					      enum amd_pp_clock_type type,
2784 					      struct pp_clock_levels_with_latency *clocks)
2785 {
2786 	struct smu_context *smu = handle;
2787 	enum smu_clk_type clk_type;
2788 	int ret = 0;
2789 
2790 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2791 		return -EOPNOTSUPP;
2792 
2793 	if (smu->ppt_funcs->get_clock_by_type_with_latency) {
2794 		switch (type) {
2795 		case amd_pp_sys_clock:
2796 			clk_type = SMU_GFXCLK;
2797 			break;
2798 		case amd_pp_mem_clock:
2799 			clk_type = SMU_MCLK;
2800 			break;
2801 		case amd_pp_dcef_clock:
2802 			clk_type = SMU_DCEFCLK;
2803 			break;
2804 		case amd_pp_disp_clock:
2805 			clk_type = SMU_DISPCLK;
2806 			break;
2807 		default:
2808 			dev_err(smu->adev->dev, "Invalid clock type!\n");
2809 			return -EINVAL;
2810 		}
2811 
2812 		ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2813 	}
2814 
2815 	return ret;
2816 }
2817 
smu_display_clock_voltage_request(void * handle,struct pp_display_clock_request * clock_req)2818 static int smu_display_clock_voltage_request(void *handle,
2819 					     struct pp_display_clock_request *clock_req)
2820 {
2821 	struct smu_context *smu = handle;
2822 	int ret = 0;
2823 
2824 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2825 		return -EOPNOTSUPP;
2826 
2827 	if (smu->ppt_funcs->display_clock_voltage_request)
2828 		ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2829 
2830 	return ret;
2831 }
2832 
2833 
smu_display_disable_memory_clock_switch(void * handle,bool disable_memory_clock_switch)2834 static int smu_display_disable_memory_clock_switch(void *handle,
2835 						   bool disable_memory_clock_switch)
2836 {
2837 	struct smu_context *smu = handle;
2838 	int ret = -EINVAL;
2839 
2840 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2841 		return -EOPNOTSUPP;
2842 
2843 	if (smu->ppt_funcs->display_disable_memory_clock_switch)
2844 		ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2845 
2846 	return ret;
2847 }
2848 
smu_set_xgmi_pstate(void * handle,uint32_t pstate)2849 static int smu_set_xgmi_pstate(void *handle,
2850 			       uint32_t pstate)
2851 {
2852 	struct smu_context *smu = handle;
2853 	int ret = 0;
2854 
2855 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2856 		return -EOPNOTSUPP;
2857 
2858 	if (smu->ppt_funcs->set_xgmi_pstate)
2859 		ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2860 
2861 	if (ret)
2862 		dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
2863 
2864 	return ret;
2865 }
2866 
smu_get_baco_capability(void * handle,bool * cap)2867 static int smu_get_baco_capability(void *handle, bool *cap)
2868 {
2869 	struct smu_context *smu = handle;
2870 
2871 	*cap = false;
2872 
2873 	if (!smu->pm_enabled)
2874 		return 0;
2875 
2876 	if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2877 		*cap = smu->ppt_funcs->baco_is_support(smu);
2878 
2879 	return 0;
2880 }
2881 
smu_baco_set_state(void * handle,int state)2882 static int smu_baco_set_state(void *handle, int state)
2883 {
2884 	struct smu_context *smu = handle;
2885 	int ret = 0;
2886 
2887 	if (!smu->pm_enabled)
2888 		return -EOPNOTSUPP;
2889 
2890 	if (state == 0) {
2891 		if (smu->ppt_funcs->baco_exit)
2892 			ret = smu->ppt_funcs->baco_exit(smu);
2893 	} else if (state == 1) {
2894 		if (smu->ppt_funcs->baco_enter)
2895 			ret = smu->ppt_funcs->baco_enter(smu);
2896 	} else {
2897 		return -EINVAL;
2898 	}
2899 
2900 	if (ret)
2901 		dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
2902 				(state)?"enter":"exit");
2903 
2904 	return ret;
2905 }
2906 
smu_mode1_reset_is_support(struct smu_context * smu)2907 bool smu_mode1_reset_is_support(struct smu_context *smu)
2908 {
2909 	bool ret = false;
2910 
2911 	if (!smu->pm_enabled)
2912 		return false;
2913 
2914 	if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
2915 		ret = smu->ppt_funcs->mode1_reset_is_support(smu);
2916 
2917 	return ret;
2918 }
2919 
smu_mode2_reset_is_support(struct smu_context * smu)2920 bool smu_mode2_reset_is_support(struct smu_context *smu)
2921 {
2922 	bool ret = false;
2923 
2924 	if (!smu->pm_enabled)
2925 		return false;
2926 
2927 	if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
2928 		ret = smu->ppt_funcs->mode2_reset_is_support(smu);
2929 
2930 	return ret;
2931 }
2932 
smu_mode1_reset(struct smu_context * smu)2933 int smu_mode1_reset(struct smu_context *smu)
2934 {
2935 	int ret = 0;
2936 
2937 	if (!smu->pm_enabled)
2938 		return -EOPNOTSUPP;
2939 
2940 	if (smu->ppt_funcs->mode1_reset)
2941 		ret = smu->ppt_funcs->mode1_reset(smu);
2942 
2943 	return ret;
2944 }
2945 
smu_mode2_reset(void * handle)2946 static int smu_mode2_reset(void *handle)
2947 {
2948 	struct smu_context *smu = handle;
2949 	int ret = 0;
2950 
2951 	if (!smu->pm_enabled)
2952 		return -EOPNOTSUPP;
2953 
2954 	if (smu->ppt_funcs->mode2_reset)
2955 		ret = smu->ppt_funcs->mode2_reset(smu);
2956 
2957 	if (ret)
2958 		dev_err(smu->adev->dev, "Mode2 reset failed!\n");
2959 
2960 	return ret;
2961 }
2962 
smu_enable_gfx_features(void * handle)2963 static int smu_enable_gfx_features(void *handle)
2964 {
2965 	struct smu_context *smu = handle;
2966 	int ret = 0;
2967 
2968 	if (!smu->pm_enabled)
2969 		return -EOPNOTSUPP;
2970 
2971 	if (smu->ppt_funcs->enable_gfx_features)
2972 		ret = smu->ppt_funcs->enable_gfx_features(smu);
2973 
2974 	if (ret)
2975 		dev_err(smu->adev->dev, "enable gfx features failed!\n");
2976 
2977 	return ret;
2978 }
2979 
smu_get_max_sustainable_clocks_by_dc(void * handle,struct pp_smu_nv_clock_table * max_clocks)2980 static int smu_get_max_sustainable_clocks_by_dc(void *handle,
2981 						struct pp_smu_nv_clock_table *max_clocks)
2982 {
2983 	struct smu_context *smu = handle;
2984 	int ret = 0;
2985 
2986 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2987 		return -EOPNOTSUPP;
2988 
2989 	if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2990 		ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2991 
2992 	return ret;
2993 }
2994 
smu_get_uclk_dpm_states(void * handle,unsigned int * clock_values_in_khz,unsigned int * num_states)2995 static int smu_get_uclk_dpm_states(void *handle,
2996 				   unsigned int *clock_values_in_khz,
2997 				   unsigned int *num_states)
2998 {
2999 	struct smu_context *smu = handle;
3000 	int ret = 0;
3001 
3002 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3003 		return -EOPNOTSUPP;
3004 
3005 	if (smu->ppt_funcs->get_uclk_dpm_states)
3006 		ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
3007 
3008 	return ret;
3009 }
3010 
smu_get_current_power_state(void * handle)3011 static enum amd_pm_state_type smu_get_current_power_state(void *handle)
3012 {
3013 	struct smu_context *smu = handle;
3014 	enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
3015 
3016 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3017 		return -EOPNOTSUPP;
3018 
3019 	if (smu->ppt_funcs->get_current_power_state)
3020 		pm_state = smu->ppt_funcs->get_current_power_state(smu);
3021 
3022 	return pm_state;
3023 }
3024 
smu_get_dpm_clock_table(void * handle,struct dpm_clocks * clock_table)3025 static int smu_get_dpm_clock_table(void *handle,
3026 				   struct dpm_clocks *clock_table)
3027 {
3028 	struct smu_context *smu = handle;
3029 	int ret = 0;
3030 
3031 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3032 		return -EOPNOTSUPP;
3033 
3034 	if (smu->ppt_funcs->get_dpm_clock_table)
3035 		ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
3036 
3037 	return ret;
3038 }
3039 
smu_sys_get_gpu_metrics(void * handle,void ** table)3040 static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
3041 {
3042 	struct smu_context *smu = handle;
3043 
3044 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3045 		return -EOPNOTSUPP;
3046 
3047 	if (!smu->ppt_funcs->get_gpu_metrics)
3048 		return -EOPNOTSUPP;
3049 
3050 	return smu->ppt_funcs->get_gpu_metrics(smu, table);
3051 }
3052 
smu_enable_mgpu_fan_boost(void * handle)3053 static int smu_enable_mgpu_fan_boost(void *handle)
3054 {
3055 	struct smu_context *smu = handle;
3056 	int ret = 0;
3057 
3058 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3059 		return -EOPNOTSUPP;
3060 
3061 	if (smu->ppt_funcs->enable_mgpu_fan_boost)
3062 		ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
3063 
3064 	return ret;
3065 }
3066 
smu_gfx_state_change_set(void * handle,uint32_t state)3067 static int smu_gfx_state_change_set(void *handle,
3068 				    uint32_t state)
3069 {
3070 	struct smu_context *smu = handle;
3071 	int ret = 0;
3072 
3073 	if (smu->ppt_funcs->gfx_state_change_set)
3074 		ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
3075 
3076 	return ret;
3077 }
3078 
smu_handle_passthrough_sbr(struct smu_context * smu,bool enable)3079 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
3080 {
3081 	int ret = 0;
3082 
3083 	if (smu->ppt_funcs->smu_handle_passthrough_sbr)
3084 		ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
3085 
3086 	return ret;
3087 }
3088 
smu_get_ecc_info(struct smu_context * smu,void * umc_ecc)3089 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
3090 {
3091 	int ret = -EOPNOTSUPP;
3092 
3093 	if (smu->ppt_funcs &&
3094 		smu->ppt_funcs->get_ecc_info)
3095 		ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
3096 
3097 	return ret;
3098 
3099 }
3100 
smu_get_prv_buffer_details(void * handle,void ** addr,size_t * size)3101 static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
3102 {
3103 	struct smu_context *smu = handle;
3104 	struct smu_table_context *smu_table = &smu->smu_table;
3105 	struct smu_table *memory_pool = &smu_table->memory_pool;
3106 
3107 	if (!addr || !size)
3108 		return -EINVAL;
3109 
3110 	*addr = NULL;
3111 	*size = 0;
3112 	if (memory_pool->bo) {
3113 		*addr = memory_pool->cpu_addr;
3114 		*size = memory_pool->size;
3115 	}
3116 
3117 	return 0;
3118 }
3119 
3120 static const struct amd_pm_funcs swsmu_pm_funcs = {
3121 	/* export for sysfs */
3122 	.set_fan_control_mode    = smu_set_fan_control_mode,
3123 	.get_fan_control_mode    = smu_get_fan_control_mode,
3124 	.set_fan_speed_pwm   = smu_set_fan_speed_pwm,
3125 	.get_fan_speed_pwm   = smu_get_fan_speed_pwm,
3126 	.force_clock_level       = smu_force_ppclk_levels,
3127 	.print_clock_levels      = smu_print_ppclk_levels,
3128 	.emit_clock_levels       = smu_emit_ppclk_levels,
3129 	.force_performance_level = smu_force_performance_level,
3130 	.read_sensor             = smu_read_sensor,
3131 	.get_apu_thermal_limit       = smu_get_apu_thermal_limit,
3132 	.set_apu_thermal_limit       = smu_set_apu_thermal_limit,
3133 	.get_performance_level   = smu_get_performance_level,
3134 	.get_current_power_state = smu_get_current_power_state,
3135 	.get_fan_speed_rpm       = smu_get_fan_speed_rpm,
3136 	.set_fan_speed_rpm       = smu_set_fan_speed_rpm,
3137 	.get_pp_num_states       = smu_get_power_num_states,
3138 	.get_pp_table            = smu_sys_get_pp_table,
3139 	.set_pp_table            = smu_sys_set_pp_table,
3140 	.switch_power_profile    = smu_switch_power_profile,
3141 	/* export to amdgpu */
3142 	.dispatch_tasks          = smu_handle_dpm_task,
3143 	.load_firmware           = smu_load_microcode,
3144 	.set_powergating_by_smu  = smu_dpm_set_power_gate,
3145 	.set_power_limit         = smu_set_power_limit,
3146 	.get_power_limit         = smu_get_power_limit,
3147 	.get_power_profile_mode  = smu_get_power_profile_mode,
3148 	.set_power_profile_mode  = smu_set_power_profile_mode,
3149 	.odn_edit_dpm_table      = smu_od_edit_dpm_table,
3150 	.set_mp1_state           = smu_set_mp1_state,
3151 	.gfx_state_change_set    = smu_gfx_state_change_set,
3152 	/* export to DC */
3153 	.get_sclk                         = smu_get_sclk,
3154 	.get_mclk                         = smu_get_mclk,
3155 	.display_configuration_change     = smu_display_configuration_change,
3156 	.get_clock_by_type_with_latency   = smu_get_clock_by_type_with_latency,
3157 	.display_clock_voltage_request    = smu_display_clock_voltage_request,
3158 	.enable_mgpu_fan_boost            = smu_enable_mgpu_fan_boost,
3159 	.set_active_display_count         = smu_set_display_count,
3160 	.set_min_deep_sleep_dcefclk       = smu_set_deep_sleep_dcefclk,
3161 	.get_asic_baco_capability         = smu_get_baco_capability,
3162 	.set_asic_baco_state              = smu_baco_set_state,
3163 	.get_ppfeature_status             = smu_sys_get_pp_feature_mask,
3164 	.set_ppfeature_status             = smu_sys_set_pp_feature_mask,
3165 	.asic_reset_mode_2                = smu_mode2_reset,
3166 	.asic_reset_enable_gfx_features   = smu_enable_gfx_features,
3167 	.set_df_cstate                    = smu_set_df_cstate,
3168 	.set_xgmi_pstate                  = smu_set_xgmi_pstate,
3169 	.get_gpu_metrics                  = smu_sys_get_gpu_metrics,
3170 	.set_watermarks_for_clock_ranges     = smu_set_watermarks_for_clock_ranges,
3171 	.display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
3172 	.get_max_sustainable_clocks_by_dc    = smu_get_max_sustainable_clocks_by_dc,
3173 	.get_uclk_dpm_states              = smu_get_uclk_dpm_states,
3174 	.get_dpm_clock_table              = smu_get_dpm_clock_table,
3175 	.get_smu_prv_buf_details = smu_get_prv_buffer_details,
3176 };
3177 
smu_wait_for_event(struct smu_context * smu,enum smu_event_type event,uint64_t event_arg)3178 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
3179 		       uint64_t event_arg)
3180 {
3181 	int ret = -EINVAL;
3182 
3183 	if (smu->ppt_funcs->wait_for_event)
3184 		ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
3185 
3186 	return ret;
3187 }
3188 
smu_stb_collect_info(struct smu_context * smu,void * buf,uint32_t size)3189 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
3190 {
3191 
3192 	if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
3193 		return -EOPNOTSUPP;
3194 
3195 	/* Confirm the buffer allocated is of correct size */
3196 	if (size != smu->stb_context.stb_buf_size)
3197 		return -EINVAL;
3198 
3199 	/*
3200 	 * No need to lock smu mutex as we access STB directly through MMIO
3201 	 * and not going through SMU messaging route (for now at least).
3202 	 * For registers access rely on implementation internal locking.
3203 	 */
3204 	return smu->ppt_funcs->stb_collect_info(smu, buf, size);
3205 }
3206 
3207 #if defined(CONFIG_DEBUG_FS)
3208 
smu_stb_debugfs_open(struct inode * inode,struct file * filp)3209 static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
3210 {
3211 	struct amdgpu_device *adev = filp->f_inode->i_private;
3212 	struct smu_context *smu = adev->powerplay.pp_handle;
3213 	unsigned char *buf;
3214 	int r;
3215 
3216 	buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
3217 	if (!buf)
3218 		return -ENOMEM;
3219 
3220 	r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
3221 	if (r)
3222 		goto out;
3223 
3224 	filp->private_data = buf;
3225 
3226 	return 0;
3227 
3228 out:
3229 	kvfree(buf);
3230 	return r;
3231 }
3232 
smu_stb_debugfs_read(struct file * filp,char __user * buf,size_t size,loff_t * pos)3233 static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
3234 				loff_t *pos)
3235 {
3236 	struct amdgpu_device *adev = filp->f_inode->i_private;
3237 	struct smu_context *smu = adev->powerplay.pp_handle;
3238 
3239 
3240 	if (!filp->private_data)
3241 		return -EINVAL;
3242 
3243 	return simple_read_from_buffer(buf,
3244 				       size,
3245 				       pos, filp->private_data,
3246 				       smu->stb_context.stb_buf_size);
3247 }
3248 
smu_stb_debugfs_release(struct inode * inode,struct file * filp)3249 static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
3250 {
3251 	kvfree(filp->private_data);
3252 	filp->private_data = NULL;
3253 
3254 	return 0;
3255 }
3256 
3257 /*
3258  * We have to define not only read method but also
3259  * open and release because .read takes up to PAGE_SIZE
3260  * data each time so and so is invoked multiple times.
3261  *  We allocate the STB buffer in .open and release it
3262  *  in .release
3263  */
3264 static const struct file_operations smu_stb_debugfs_fops = {
3265 	.owner = THIS_MODULE,
3266 	.open = smu_stb_debugfs_open,
3267 	.read = smu_stb_debugfs_read,
3268 	.release = smu_stb_debugfs_release,
3269 	.llseek = default_llseek,
3270 };
3271 
3272 #endif
3273 
amdgpu_smu_stb_debug_fs_init(struct amdgpu_device * adev)3274 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
3275 {
3276 #if defined(CONFIG_DEBUG_FS)
3277 
3278 	struct smu_context *smu = adev->powerplay.pp_handle;
3279 
3280 	if (!smu || (!smu->stb_context.stb_buf_size))
3281 		return;
3282 
3283 	debugfs_create_file_size("amdgpu_smu_stb_dump",
3284 			    S_IRUSR,
3285 			    adev_to_drm(adev)->primary->debugfs_root,
3286 			    adev,
3287 			    &smu_stb_debugfs_fops,
3288 			    smu->stb_context.stb_buf_size);
3289 #endif
3290 }
3291 
smu_send_hbm_bad_pages_num(struct smu_context * smu,uint32_t size)3292 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
3293 {
3294 	int ret = 0;
3295 
3296 	if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
3297 		ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
3298 
3299 	return ret;
3300 }
3301 
smu_send_hbm_bad_channel_flag(struct smu_context * smu,uint32_t size)3302 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
3303 {
3304 	int ret = 0;
3305 
3306 	if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
3307 		ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);
3308 
3309 	return ret;
3310 }
3311