• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "pp_debug.h"
24 #include <linux/firmware.h>
25 #include "amdgpu.h"
26 #include "amdgpu_smu.h"
27 #include "atomfirmware.h"
28 #include "amdgpu_atomfirmware.h"
29 #include "smu_v12_0.h"
30 #include "soc15_common.h"
31 #include "atom.h"
32 #include "renoir_ppt.h"
33 
34 #include "asic_reg/mp/mp_12_0_0_offset.h"
35 #include "asic_reg/mp/mp_12_0_0_sh_mask.h"
36 
37 #define smnMP1_FIRMWARE_FLAGS                                0x3010024
38 
39 #define mmSMUIO_GFX_MISC_CNTL                                0x00c8
40 #define mmSMUIO_GFX_MISC_CNTL_BASE_IDX                       0
41 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK          0x00000006L
42 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT        0x1
43 
smu_v12_0_send_msg_without_waiting(struct smu_context * smu,uint16_t msg)44 static int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
45 					      uint16_t msg)
46 {
47 	struct amdgpu_device *adev = smu->adev;
48 
49 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
50 	return 0;
51 }
52 
smu_v12_0_read_arg(struct smu_context * smu,uint32_t * arg)53 static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
54 {
55 	struct amdgpu_device *adev = smu->adev;
56 
57 	*arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
58 	return 0;
59 }
60 
smu_v12_0_wait_for_response(struct smu_context * smu)61 static int smu_v12_0_wait_for_response(struct smu_context *smu)
62 {
63 	struct amdgpu_device *adev = smu->adev;
64 	uint32_t cur_value, i;
65 
66 	for (i = 0; i < adev->usec_timeout; i++) {
67 		cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
68 		if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
69 			break;
70 		udelay(1);
71 	}
72 
73 	/* timeout means wrong logic */
74 	if (i == adev->usec_timeout)
75 		return -ETIME;
76 
77 	return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
78 }
79 
smu_v12_0_send_msg(struct smu_context * smu,uint16_t msg)80 static int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg)
81 {
82 	struct amdgpu_device *adev = smu->adev;
83 	int ret = 0, index = 0;
84 
85 	index = smu_msg_get_index(smu, msg);
86 	if (index < 0)
87 		return index;
88 
89 	smu_v12_0_wait_for_response(smu);
90 
91 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
92 
93 	smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
94 
95 	ret = smu_v12_0_wait_for_response(smu);
96 
97 	if (ret)
98 		pr_err("Failed to send message 0x%x, response 0x%x\n", index,
99 		       ret);
100 
101 	return ret;
102 
103 }
104 
105 static int
smu_v12_0_send_msg_with_param(struct smu_context * smu,uint16_t msg,uint32_t param)106 smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
107 			      uint32_t param)
108 {
109 	struct amdgpu_device *adev = smu->adev;
110 	int ret = 0, index = 0;
111 
112 	index = smu_msg_get_index(smu, msg);
113 	if (index < 0)
114 		return index;
115 
116 	ret = smu_v12_0_wait_for_response(smu);
117 	if (ret)
118 		pr_err("Failed to send message 0x%x, response 0x%x, param 0x%x\n",
119 		       index, ret, param);
120 
121 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
122 
123 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
124 
125 	smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
126 
127 	ret = smu_v12_0_wait_for_response(smu);
128 	if (ret)
129 		pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
130 		       index, ret, param);
131 
132 	return ret;
133 }
134 
smu_v12_0_check_fw_status(struct smu_context * smu)135 static int smu_v12_0_check_fw_status(struct smu_context *smu)
136 {
137 	struct amdgpu_device *adev = smu->adev;
138 	uint32_t mp1_fw_flags;
139 
140 	mp1_fw_flags = RREG32_PCIE(MP1_Public |
141 		(smnMP1_FIRMWARE_FLAGS & 0xffffffff));
142 
143 	if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
144 		MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
145 		return 0;
146 
147 	return -EIO;
148 }
149 
smu_v12_0_check_fw_version(struct smu_context * smu)150 static int smu_v12_0_check_fw_version(struct smu_context *smu)
151 {
152 	uint32_t if_version = 0xff, smu_version = 0xff;
153 	uint16_t smu_major;
154 	uint8_t smu_minor, smu_debug;
155 	int ret = 0;
156 
157 	ret = smu_get_smc_version(smu, &if_version, &smu_version);
158 	if (ret)
159 		return ret;
160 
161 	smu_major = (smu_version >> 16) & 0xffff;
162 	smu_minor = (smu_version >> 8) & 0xff;
163 	smu_debug = (smu_version >> 0) & 0xff;
164 
165 	/*
166 	 * 1. if_version mismatch is not critical as our fw is designed
167 	 * to be backward compatible.
168 	 * 2. New fw usually brings some optimizations. But that's visible
169 	 * only on the paired driver.
170 	 * Considering above, we just leave user a warning message instead
171 	 * of halt driver loading.
172 	 */
173 	if (if_version != smu->smc_if_version) {
174 		pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
175 			"smu fw version = 0x%08x (%d.%d.%d)\n",
176 			smu->smc_if_version, if_version,
177 			smu_version, smu_major, smu_minor, smu_debug);
178 		pr_warn("SMU driver if version not matched\n");
179 	}
180 
181 	return ret;
182 }
183 
smu_v12_0_powergate_sdma(struct smu_context * smu,bool gate)184 static int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
185 {
186 	if (!(smu->adev->flags & AMD_IS_APU))
187 		return 0;
188 
189 	if (gate)
190 		return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma);
191 	else
192 		return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma);
193 }
194 
smu_v12_0_powergate_vcn(struct smu_context * smu,bool gate)195 static int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
196 {
197 	if (!(smu->adev->flags & AMD_IS_APU))
198 		return 0;
199 
200 	if (gate)
201 		return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
202 	else
203 		return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn);
204 }
205 
smu_v12_0_set_gfx_cgpg(struct smu_context * smu,bool enable)206 static int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
207 {
208 	if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
209 		return 0;
210 
211 	return smu_v12_0_send_msg_with_param(smu,
212 		SMU_MSG_SetGfxCGPG, enable ? 1 : 0);
213 }
214 
215 /**
216  * smu_v12_0_get_gfxoff_status - get gfxoff status
217  *
218  * @smu: amdgpu_device pointer
219  *
220  * This function will be used to get gfxoff status
221  *
222  * Returns 0=GFXOFF(default).
223  * Returns 1=Transition out of GFX State.
224  * Returns 2=Not in GFXOFF.
225  * Returns 3=Transition into GFXOFF.
226  */
smu_v12_0_get_gfxoff_status(struct smu_context * smu)227 static uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
228 {
229 	uint32_t reg;
230 	uint32_t gfxOff_Status = 0;
231 	struct amdgpu_device *adev = smu->adev;
232 
233 	reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL);
234 	gfxOff_Status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
235 		>> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
236 
237 	return gfxOff_Status;
238 }
239 
smu_v12_0_gfx_off_control(struct smu_context * smu,bool enable)240 static int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
241 {
242 	int ret = 0, timeout = 500;
243 
244 	if (enable) {
245 		ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
246 
247 		/* confirm gfx is back to "off" state, timeout is 5 seconds */
248 		while (!(smu_v12_0_get_gfxoff_status(smu) == 0)) {
249 			msleep(10);
250 			timeout--;
251 			if (timeout == 0) {
252 				DRM_ERROR("enable gfxoff timeout and failed!\n");
253 				break;
254 			}
255 		}
256 	} else {
257 		ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
258 
259 		/* confirm gfx is back to "on" state, timeout is 0.5 second */
260 		while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
261 			msleep(1);
262 			timeout--;
263 			if (timeout == 0) {
264 				DRM_ERROR("disable gfxoff timeout and failed!\n");
265 				break;
266 			}
267 		}
268 	}
269 
270 	return ret;
271 }
272 
smu_v12_0_init_smc_tables(struct smu_context * smu)273 static int smu_v12_0_init_smc_tables(struct smu_context *smu)
274 {
275 	struct smu_table_context *smu_table = &smu->smu_table;
276 	struct smu_table *tables = NULL;
277 
278 	if (smu_table->tables || smu_table->table_count == 0)
279 		return -EINVAL;
280 
281 	tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
282 			 GFP_KERNEL);
283 	if (!tables)
284 		return -ENOMEM;
285 
286 	smu_table->tables = tables;
287 
288 	return smu_tables_init(smu, tables);
289 }
290 
smu_v12_0_fini_smc_tables(struct smu_context * smu)291 static int smu_v12_0_fini_smc_tables(struct smu_context *smu)
292 {
293 	struct smu_table_context *smu_table = &smu->smu_table;
294 
295 	if (!smu_table->tables || smu_table->table_count == 0)
296 		return -EINVAL;
297 
298 	kfree(smu_table->clocks_table);
299 	kfree(smu_table->tables);
300 
301 	smu_table->clocks_table = NULL;
302 	smu_table->tables = NULL;
303 
304 	return 0;
305 }
306 
smu_v12_0_populate_smc_tables(struct smu_context * smu)307 static int smu_v12_0_populate_smc_tables(struct smu_context *smu)
308 {
309 	struct smu_table_context *smu_table = &smu->smu_table;
310 	struct smu_table *table = NULL;
311 
312 	table = &smu_table->tables[SMU_TABLE_DPMCLOCKS];
313 	if (!table)
314 		return -EINVAL;
315 
316 	if (!table->cpu_addr)
317 		return -EINVAL;
318 
319 	return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
320 }
321 
smu_v12_0_get_dpm_ultimate_freq(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max)322 static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
323 						 uint32_t *min, uint32_t *max)
324 {
325 	int ret = 0;
326 
327 	mutex_lock(&smu->mutex);
328 
329 	if (max) {
330 		switch (clk_type) {
331 		case SMU_GFXCLK:
332 		case SMU_SCLK:
333 			ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency);
334 			if (ret) {
335 				pr_err("Attempt to get max GX frequency from SMC Failed !\n");
336 				goto failed;
337 			}
338 			ret = smu_read_smc_arg(smu, max);
339 			if (ret)
340 				goto failed;
341 			break;
342 		case SMU_UCLK:
343 			ret = smu_get_dpm_uclk_limited(smu, max, true);
344 			if (ret)
345 				goto failed;
346 			break;
347 		default:
348 			ret = -EINVAL;
349 			goto failed;
350 
351 		}
352 	}
353 
354 	if (min) {
355 		switch (clk_type) {
356 		case SMU_GFXCLK:
357 		case SMU_SCLK:
358 			ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency);
359 			if (ret) {
360 				pr_err("Attempt to get min GX frequency from SMC Failed !\n");
361 				goto failed;
362 			}
363 			ret = smu_read_smc_arg(smu, min);
364 			if (ret)
365 				goto failed;
366 			break;
367 		case SMU_UCLK:
368 			ret = smu_get_dpm_uclk_limited(smu, min, false);
369 			if (ret)
370 				goto failed;
371 			break;
372 		default:
373 			ret = -EINVAL;
374 			goto failed;
375 		}
376 
377 	}
378 failed:
379 	mutex_unlock(&smu->mutex);
380 	return ret;
381 }
382 
383 static const struct smu_funcs smu_v12_0_funcs = {
384 	.check_fw_status = smu_v12_0_check_fw_status,
385 	.check_fw_version = smu_v12_0_check_fw_version,
386 	.powergate_sdma = smu_v12_0_powergate_sdma,
387 	.powergate_vcn = smu_v12_0_powergate_vcn,
388 	.send_smc_msg = smu_v12_0_send_msg,
389 	.send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
390 	.read_smc_arg = smu_v12_0_read_arg,
391 	.set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
392 	.gfx_off_control = smu_v12_0_gfx_off_control,
393 	.init_smc_tables = smu_v12_0_init_smc_tables,
394 	.fini_smc_tables = smu_v12_0_fini_smc_tables,
395 	.populate_smc_tables = smu_v12_0_populate_smc_tables,
396 	.get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
397 };
398 
smu_v12_0_set_smu_funcs(struct smu_context * smu)399 void smu_v12_0_set_smu_funcs(struct smu_context *smu)
400 {
401 	struct amdgpu_device *adev = smu->adev;
402 
403 	smu->funcs = &smu_v12_0_funcs;
404 
405 	switch (adev->asic_type) {
406 	case CHIP_RENOIR:
407 		renoir_set_ppt_funcs(smu);
408 		break;
409 	default:
410 		pr_warn("Unknown asic for smu12\n");
411 	}
412 }
413