• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "smumgr.h"
25 #include "vega20_inc.h"
26 #include "soc15_common.h"
27 #include "vega20_smumgr.h"
28 #include "vega20_ppsmc.h"
29 #include "smu11_driver_if.h"
30 #include "ppatomctrl.h"
31 #include "pp_debug.h"
32 #include "smu_ucode_xfer_vi.h"
33 #include "smu7_smumgr.h"
34 #include "vega20_hwmgr.h"
35 
36 #include "smu_v11_0_i2c.h"
37 
38 /* MP Apertures */
39 #define MP0_Public			0x03800000
40 #define MP0_SRAM			0x03900000
41 #define MP1_Public			0x03b00000
42 #define MP1_SRAM			0x03c00004
43 
44 /* address block */
45 #define smnMP1_FIRMWARE_FLAGS		0x3010024
46 #define smnMP0_FW_INTF			0x30101c0
47 #define smnMP1_PUB_CTRL			0x3010b14
48 
vega20_is_smc_ram_running(struct pp_hwmgr * hwmgr)49 bool vega20_is_smc_ram_running(struct pp_hwmgr *hwmgr)
50 {
51 	struct amdgpu_device *adev = hwmgr->adev;
52 	uint32_t mp1_fw_flags;
53 
54 	mp1_fw_flags = RREG32_PCIE(MP1_Public |
55 				   (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
56 
57 	if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
58 	    MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
59 		return true;
60 
61 	return false;
62 }
63 
64 /*
65  * Check if SMC has responded to previous message.
66  *
67  * @param    smumgr  the address of the powerplay hardware manager.
68  * @return   TRUE    SMC has responded, FALSE otherwise.
69  */
vega20_wait_for_response(struct pp_hwmgr * hwmgr)70 static uint32_t vega20_wait_for_response(struct pp_hwmgr *hwmgr)
71 {
72 	struct amdgpu_device *adev = hwmgr->adev;
73 	uint32_t reg;
74 
75 	reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
76 
77 	phm_wait_for_register_unequal(hwmgr, reg,
78 			0, MP1_C2PMSG_90__CONTENT_MASK);
79 
80 	return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
81 }
82 
83 /*
84  * Send a message to the SMC, and do not wait for its response.
85  * @param    smumgr  the address of the powerplay hardware manager.
86  * @param    msg the message to send.
87  * @return   Always return 0.
88  */
vega20_send_msg_to_smc_without_waiting(struct pp_hwmgr * hwmgr,uint16_t msg)89 static int vega20_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
90 		uint16_t msg)
91 {
92 	struct amdgpu_device *adev = hwmgr->adev;
93 
94 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
95 
96 	return 0;
97 }
98 
99 /*
100  * Send a message to the SMC, and wait for its response.
101  * @param    hwmgr  the address of the powerplay hardware manager.
102  * @param    msg the message to send.
103  * @return   Always return 0.
104  */
vega20_send_msg_to_smc(struct pp_hwmgr * hwmgr,uint16_t msg)105 static int vega20_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
106 {
107 	struct amdgpu_device *adev = hwmgr->adev;
108 	int ret = 0;
109 
110 	vega20_wait_for_response(hwmgr);
111 
112 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
113 
114 	vega20_send_msg_to_smc_without_waiting(hwmgr, msg);
115 
116 	ret = vega20_wait_for_response(hwmgr);
117 	if (ret != PPSMC_Result_OK)
118 		pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret);
119 
120 	return (ret == PPSMC_Result_OK) ? 0 : -EIO;
121 }
122 
123 /*
124  * Send a message to the SMC with parameter
125  * @param    hwmgr:  the address of the powerplay hardware manager.
126  * @param    msg: the message to send.
127  * @param    parameter: the parameter to send
128  * @return   Always return 0.
129  */
vega20_send_msg_to_smc_with_parameter(struct pp_hwmgr * hwmgr,uint16_t msg,uint32_t parameter)130 static int vega20_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
131 		uint16_t msg, uint32_t parameter)
132 {
133 	struct amdgpu_device *adev = hwmgr->adev;
134 	int ret = 0;
135 
136 	vega20_wait_for_response(hwmgr);
137 
138 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
139 
140 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
141 
142 	vega20_send_msg_to_smc_without_waiting(hwmgr, msg);
143 
144 	ret = vega20_wait_for_response(hwmgr);
145 	if (ret != PPSMC_Result_OK)
146 		pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret);
147 
148 	return (ret == PPSMC_Result_OK) ? 0 : -EIO;
149 }
150 
vega20_get_argument(struct pp_hwmgr * hwmgr)151 static uint32_t vega20_get_argument(struct pp_hwmgr *hwmgr)
152 {
153 	struct amdgpu_device *adev = hwmgr->adev;
154 
155 	return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
156 }
157 
158 /*
159  * Copy table from SMC into driver FB
160  * @param   hwmgr    the address of the HW manager
161  * @param   table_id    the driver's table ID to copy from
162  */
vega20_copy_table_from_smc(struct pp_hwmgr * hwmgr,uint8_t * table,int16_t table_id)163 static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
164 				      uint8_t *table, int16_t table_id)
165 {
166 	struct vega20_smumgr *priv =
167 			(struct vega20_smumgr *)(hwmgr->smu_backend);
168 	struct amdgpu_device *adev = hwmgr->adev;
169 	int ret = 0;
170 
171 	PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
172 			"Invalid SMU Table ID!", return -EINVAL);
173 	PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
174 			"Invalid SMU Table version!", return -EINVAL);
175 	PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
176 			"Invalid SMU Table Length!", return -EINVAL);
177 
178 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
179 			PPSMC_MSG_SetDriverDramAddrHigh,
180 			upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
181 			NULL)) == 0,
182 			"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!",
183 			return ret);
184 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
185 			PPSMC_MSG_SetDriverDramAddrLow,
186 			lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
187 			NULL)) == 0,
188 			"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
189 			return ret);
190 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
191 			PPSMC_MSG_TransferTableSmu2Dram, table_id, NULL)) == 0,
192 			"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
193 			return ret);
194 
195 	/* flush hdp cache */
196 	amdgpu_asic_flush_hdp(adev, NULL);
197 
198 	memcpy(table, priv->smu_tables.entry[table_id].table,
199 			priv->smu_tables.entry[table_id].size);
200 
201 	return 0;
202 }
203 
204 /*
205  * Copy table from Driver FB into SMC
206  * @param   hwmgr    the address of the HW manager
207  * @param   table_id    the table to copy from
208  */
vega20_copy_table_to_smc(struct pp_hwmgr * hwmgr,uint8_t * table,int16_t table_id)209 static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
210 				    uint8_t *table, int16_t table_id)
211 {
212 	struct vega20_smumgr *priv =
213 			(struct vega20_smumgr *)(hwmgr->smu_backend);
214 	struct amdgpu_device *adev = hwmgr->adev;
215 	int ret = 0;
216 
217 	PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
218 			"Invalid SMU Table ID!", return -EINVAL);
219 	PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
220 			"Invalid SMU Table version!", return -EINVAL);
221 	PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
222 			"Invalid SMU Table Length!", return -EINVAL);
223 
224 	memcpy(priv->smu_tables.entry[table_id].table, table,
225 			priv->smu_tables.entry[table_id].size);
226 
227 	amdgpu_asic_flush_hdp(adev, NULL);
228 
229 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
230 			PPSMC_MSG_SetDriverDramAddrHigh,
231 			upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
232 			NULL)) == 0,
233 			"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
234 			return ret);
235 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
236 			PPSMC_MSG_SetDriverDramAddrLow,
237 			lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
238 			NULL)) == 0,
239 			"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
240 			return ret);
241 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
242 			PPSMC_MSG_TransferTableDram2Smu, table_id, NULL)) == 0,
243 			"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
244 			return ret);
245 
246 	return 0;
247 }
248 
vega20_set_activity_monitor_coeff(struct pp_hwmgr * hwmgr,uint8_t * table,uint16_t workload_type)249 int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
250 		uint8_t *table, uint16_t workload_type)
251 {
252 	struct vega20_smumgr *priv =
253 			(struct vega20_smumgr *)(hwmgr->smu_backend);
254 	struct amdgpu_device *adev = hwmgr->adev;
255 	int ret = 0;
256 
257 	memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table,
258 			priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
259 
260 	amdgpu_asic_flush_hdp(adev, NULL);
261 
262 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
263 			PPSMC_MSG_SetDriverDramAddrHigh,
264 			upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
265 			NULL)) == 0,
266 			"[SetActivityMonitor] Attempt to Set Dram Addr High Failed!",
267 			return ret);
268 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
269 			PPSMC_MSG_SetDriverDramAddrLow,
270 			lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
271 			NULL)) == 0,
272 			"[SetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
273 			return ret);
274 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
275 			PPSMC_MSG_TransferTableDram2Smu,
276 			TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16),
277 			NULL)) == 0,
278 			"[SetActivityMonitor] Attempt to Transfer Table To SMU Failed!",
279 			return ret);
280 
281 	return 0;
282 }
283 
vega20_get_activity_monitor_coeff(struct pp_hwmgr * hwmgr,uint8_t * table,uint16_t workload_type)284 int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
285 		uint8_t *table, uint16_t workload_type)
286 {
287 	struct vega20_smumgr *priv =
288 			(struct vega20_smumgr *)(hwmgr->smu_backend);
289 	struct amdgpu_device *adev = hwmgr->adev;
290 	int ret = 0;
291 
292 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
293 			PPSMC_MSG_SetDriverDramAddrHigh,
294 			upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
295 			NULL)) == 0,
296 			"[GetActivityMonitor] Attempt to Set Dram Addr High Failed!",
297 			return ret);
298 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
299 			PPSMC_MSG_SetDriverDramAddrLow,
300 			lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
301 			NULL)) == 0,
302 			"[GetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
303 			return ret);
304 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
305 			PPSMC_MSG_TransferTableSmu2Dram,
306 			TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16), NULL)) == 0,
307 			"[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!",
308 			return ret);
309 
310 	/* flush hdp cache */
311 	amdgpu_asic_flush_hdp(adev, NULL);
312 
313 	memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
314 			priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
315 
316 	return 0;
317 }
318 
vega20_enable_smc_features(struct pp_hwmgr * hwmgr,bool enable,uint64_t feature_mask)319 int vega20_enable_smc_features(struct pp_hwmgr *hwmgr,
320 		bool enable, uint64_t feature_mask)
321 {
322 	uint32_t smu_features_low, smu_features_high;
323 	int ret = 0;
324 
325 	smu_features_low = (uint32_t)((feature_mask & SMU_FEATURES_LOW_MASK) >> SMU_FEATURES_LOW_SHIFT);
326 	smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
327 
328 	if (enable) {
329 		PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
330 				PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low, NULL)) == 0,
331 				"[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
332 				return ret);
333 		PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
334 				PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high, NULL)) == 0,
335 				"[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
336 				return ret);
337 	} else {
338 		PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
339 				PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low, NULL)) == 0,
340 				"[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
341 				return ret);
342 		PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
343 				PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high, NULL)) == 0,
344 				"[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
345 				return ret);
346 	}
347 
348 	return 0;
349 }
350 
vega20_get_enabled_smc_features(struct pp_hwmgr * hwmgr,uint64_t * features_enabled)351 int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
352 		uint64_t *features_enabled)
353 {
354 	uint32_t smc_features_low, smc_features_high;
355 	int ret = 0;
356 
357 	if (features_enabled == NULL)
358 		return -EINVAL;
359 
360 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
361 			PPSMC_MSG_GetEnabledSmuFeaturesLow,
362 			&smc_features_low)) == 0,
363 			"[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
364 			return ret);
365 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
366 			PPSMC_MSG_GetEnabledSmuFeaturesHigh,
367 			&smc_features_high)) == 0,
368 			"[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
369 			return ret);
370 
371 	*features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
372 			(((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
373 
374 	return 0;
375 }
376 
vega20_set_tools_address(struct pp_hwmgr * hwmgr)377 static int vega20_set_tools_address(struct pp_hwmgr *hwmgr)
378 {
379 	struct vega20_smumgr *priv =
380 			(struct vega20_smumgr *)(hwmgr->smu_backend);
381 	int ret = 0;
382 
383 	if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
384 		ret = smum_send_msg_to_smc_with_parameter(hwmgr,
385 				PPSMC_MSG_SetToolsDramAddrHigh,
386 				upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
387 				NULL);
388 		if (!ret)
389 			ret = smum_send_msg_to_smc_with_parameter(hwmgr,
390 					PPSMC_MSG_SetToolsDramAddrLow,
391 					lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
392 					NULL);
393 	}
394 
395 	return ret;
396 }
397 
vega20_set_pptable_driver_address(struct pp_hwmgr * hwmgr)398 int vega20_set_pptable_driver_address(struct pp_hwmgr *hwmgr)
399 {
400 	struct vega20_smumgr *priv =
401 			(struct vega20_smumgr *)(hwmgr->smu_backend);
402 	int ret = 0;
403 
404 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
405 			PPSMC_MSG_SetDriverDramAddrHigh,
406 			upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr),
407 			NULL)) == 0,
408 			"[SetPPtabeDriverAddress] Attempt to Set Dram Addr High Failed!",
409 			return ret);
410 	PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
411 			PPSMC_MSG_SetDriverDramAddrLow,
412 			lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr),
413 			NULL)) == 0,
414 			"[SetPPtabeDriverAddress] Attempt to Set Dram Addr Low Failed!",
415 			return ret);
416 
417 	return ret;
418 }
419 
vega20_smu_init(struct pp_hwmgr * hwmgr)420 static int vega20_smu_init(struct pp_hwmgr *hwmgr)
421 {
422 	struct vega20_smumgr *priv;
423 	unsigned long tools_size = 0x19000;
424 	int ret = 0;
425 	struct amdgpu_device *adev = hwmgr->adev;
426 
427 	struct cgs_firmware_info info = {0};
428 
429 	ret = cgs_get_firmware_info(hwmgr->device,
430 				smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
431 				&info);
432 	if (ret || !info.kptr)
433 		return -EINVAL;
434 
435 	priv = kzalloc(sizeof(struct vega20_smumgr), GFP_KERNEL);
436 	if (!priv)
437 		return -ENOMEM;
438 
439 	hwmgr->smu_backend = priv;
440 
441 	/* allocate space for pptable */
442 	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
443 			sizeof(PPTable_t),
444 			PAGE_SIZE,
445 			AMDGPU_GEM_DOMAIN_VRAM,
446 			&priv->smu_tables.entry[TABLE_PPTABLE].handle,
447 			&priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
448 			&priv->smu_tables.entry[TABLE_PPTABLE].table);
449 	if (ret)
450 		goto free_backend;
451 
452 	priv->smu_tables.entry[TABLE_PPTABLE].version = 0x01;
453 	priv->smu_tables.entry[TABLE_PPTABLE].size = sizeof(PPTable_t);
454 
455 	/* allocate space for watermarks table */
456 	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
457 			sizeof(Watermarks_t),
458 			PAGE_SIZE,
459 			AMDGPU_GEM_DOMAIN_VRAM,
460 			&priv->smu_tables.entry[TABLE_WATERMARKS].handle,
461 			&priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
462 			&priv->smu_tables.entry[TABLE_WATERMARKS].table);
463 	if (ret)
464 		goto err0;
465 
466 	priv->smu_tables.entry[TABLE_WATERMARKS].version = 0x01;
467 	priv->smu_tables.entry[TABLE_WATERMARKS].size = sizeof(Watermarks_t);
468 
469 	/* allocate space for pmstatuslog table */
470 	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
471 			tools_size,
472 			PAGE_SIZE,
473 			AMDGPU_GEM_DOMAIN_VRAM,
474 			&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
475 			&priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
476 			&priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
477 	if (ret)
478 		goto err1;
479 
480 	priv->smu_tables.entry[TABLE_PMSTATUSLOG].version = 0x01;
481 	priv->smu_tables.entry[TABLE_PMSTATUSLOG].size = tools_size;
482 
483 	/* allocate space for OverDrive table */
484 	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
485 			sizeof(OverDriveTable_t),
486 			PAGE_SIZE,
487 			AMDGPU_GEM_DOMAIN_VRAM,
488 			&priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
489 			&priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
490 			&priv->smu_tables.entry[TABLE_OVERDRIVE].table);
491 	if (ret)
492 		goto err2;
493 
494 	priv->smu_tables.entry[TABLE_OVERDRIVE].version = 0x01;
495 	priv->smu_tables.entry[TABLE_OVERDRIVE].size = sizeof(OverDriveTable_t);
496 
497 	/* allocate space for SmuMetrics table */
498 	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
499 			sizeof(SmuMetrics_t),
500 			PAGE_SIZE,
501 			AMDGPU_GEM_DOMAIN_VRAM,
502 			&priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
503 			&priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
504 			&priv->smu_tables.entry[TABLE_SMU_METRICS].table);
505 	if (ret)
506 		goto err3;
507 
508 	priv->smu_tables.entry[TABLE_SMU_METRICS].version = 0x01;
509 	priv->smu_tables.entry[TABLE_SMU_METRICS].size = sizeof(SmuMetrics_t);
510 
511 	/* allocate space for ActivityMonitor table */
512 	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
513 			sizeof(DpmActivityMonitorCoeffInt_t),
514 			PAGE_SIZE,
515 			AMDGPU_GEM_DOMAIN_VRAM,
516 			&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].handle,
517 			&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr,
518 			&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table);
519 	if (ret)
520 		goto err4;
521 
522 	priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
523 	priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
524 
525 	ret = smu_v11_0_i2c_control_init(&adev->pm.smu_i2c);
526 	if (ret)
527 		goto err4;
528 
529 	return 0;
530 
531 err4:
532 	amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
533 			&priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
534 			&priv->smu_tables.entry[TABLE_SMU_METRICS].table);
535 err3:
536 	amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
537 			&priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
538 			&priv->smu_tables.entry[TABLE_OVERDRIVE].table);
539 err2:
540 	amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
541 			&priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
542 			&priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
543 err1:
544 	amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_WATERMARKS].handle,
545 			&priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
546 			&priv->smu_tables.entry[TABLE_WATERMARKS].table);
547 err0:
548 	amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
549 			&priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
550 			&priv->smu_tables.entry[TABLE_PPTABLE].table);
551 free_backend:
552 	kfree(hwmgr->smu_backend);
553 
554 	return -EINVAL;
555 }
556 
vega20_smu_fini(struct pp_hwmgr * hwmgr)557 static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
558 {
559 	struct vega20_smumgr *priv =
560 			(struct vega20_smumgr *)(hwmgr->smu_backend);
561 	struct amdgpu_device *adev = hwmgr->adev;
562 
563 	smu_v11_0_i2c_control_fini(&adev->pm.smu_i2c);
564 
565 	if (priv) {
566 		amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
567 				&priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
568 				&priv->smu_tables.entry[TABLE_PPTABLE].table);
569 		amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_WATERMARKS].handle,
570 				&priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
571 				&priv->smu_tables.entry[TABLE_WATERMARKS].table);
572 		amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
573 				&priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
574 				&priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
575 		amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
576 				&priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
577 				&priv->smu_tables.entry[TABLE_OVERDRIVE].table);
578 		amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
579 				&priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
580 				&priv->smu_tables.entry[TABLE_SMU_METRICS].table);
581 		amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].handle,
582 				&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr,
583 				&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table);
584 		kfree(hwmgr->smu_backend);
585 		hwmgr->smu_backend = NULL;
586 	}
587 
588 	return 0;
589 }
590 
vega20_start_smu(struct pp_hwmgr * hwmgr)591 static int vega20_start_smu(struct pp_hwmgr *hwmgr)
592 {
593 	int ret;
594 
595 	ret = vega20_is_smc_ram_running(hwmgr);
596 	PP_ASSERT_WITH_CODE(ret,
597 			"[Vega20StartSmu] SMC is not running!",
598 			return -EINVAL);
599 
600 	ret = vega20_set_tools_address(hwmgr);
601 	PP_ASSERT_WITH_CODE(!ret,
602 			"[Vega20StartSmu] Failed to set tools address!",
603 			return ret);
604 
605 	return 0;
606 }
607 
vega20_is_dpm_running(struct pp_hwmgr * hwmgr)608 static bool vega20_is_dpm_running(struct pp_hwmgr *hwmgr)
609 {
610 	uint64_t features_enabled = 0;
611 
612 	vega20_get_enabled_smc_features(hwmgr, &features_enabled);
613 
614 	if (features_enabled & SMC_DPM_FEATURES)
615 		return true;
616 	else
617 		return false;
618 }
619 
vega20_smc_table_manager(struct pp_hwmgr * hwmgr,uint8_t * table,uint16_t table_id,bool rw)620 static int vega20_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table,
621 				    uint16_t table_id, bool rw)
622 {
623 	int ret;
624 
625 	if (rw)
626 		ret = vega20_copy_table_from_smc(hwmgr, table, table_id);
627 	else
628 		ret = vega20_copy_table_to_smc(hwmgr, table, table_id);
629 
630 	return ret;
631 }
632 
633 const struct pp_smumgr_func vega20_smu_funcs = {
634 	.name = "vega20_smu",
635 	.smu_init = &vega20_smu_init,
636 	.smu_fini = &vega20_smu_fini,
637 	.start_smu = &vega20_start_smu,
638 	.request_smu_load_specific_fw = NULL,
639 	.send_msg_to_smc = &vega20_send_msg_to_smc,
640 	.send_msg_to_smc_with_parameter = &vega20_send_msg_to_smc_with_parameter,
641 	.download_pptable_settings = NULL,
642 	.upload_pptable_settings = NULL,
643 	.is_dpm_running = vega20_is_dpm_running,
644 	.get_argument = vega20_get_argument,
645 	.smc_table_manager = vega20_smc_table_manager,
646 };
647