1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
29 #include <drm/amdgpu_drm.h>
30 #include "pp_instance.h"
31 #include "smumgr.h"
32 #include "cgs_common.h"
33
34 MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
35 MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
36 MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
37 MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin");
38 MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
39 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
40 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
41 MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
42 MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
43 MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
44 MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
45 MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
46 MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
47 MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
48
smum_early_init(struct pp_instance * handle)49 int smum_early_init(struct pp_instance *handle)
50 {
51 struct pp_smumgr *smumgr;
52
53 if (handle == NULL)
54 return -EINVAL;
55
56 smumgr = kzalloc(sizeof(struct pp_smumgr), GFP_KERNEL);
57 if (smumgr == NULL)
58 return -ENOMEM;
59
60 smumgr->device = handle->device;
61 smumgr->chip_family = handle->chip_family;
62 smumgr->chip_id = handle->chip_id;
63 smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
64 smumgr->reload_fw = 1;
65 handle->smu_mgr = smumgr;
66
67 switch (smumgr->chip_family) {
68 case AMDGPU_FAMILY_CZ:
69 smumgr->smumgr_funcs = &cz_smu_funcs;
70 break;
71 case AMDGPU_FAMILY_VI:
72 switch (smumgr->chip_id) {
73 case CHIP_TOPAZ:
74 smumgr->smumgr_funcs = &iceland_smu_funcs;
75 break;
76 case CHIP_TONGA:
77 smumgr->smumgr_funcs = &tonga_smu_funcs;
78 break;
79 case CHIP_FIJI:
80 smumgr->smumgr_funcs = &fiji_smu_funcs;
81 break;
82 case CHIP_POLARIS11:
83 case CHIP_POLARIS10:
84 case CHIP_POLARIS12:
85 smumgr->smumgr_funcs = &polaris10_smu_funcs;
86 break;
87 default:
88 return -EINVAL;
89 }
90 break;
91 case AMDGPU_FAMILY_AI:
92 switch (smumgr->chip_id) {
93 case CHIP_VEGA10:
94 smumgr->smumgr_funcs = &vega10_smu_funcs;
95 break;
96 default:
97 return -EINVAL;
98 }
99 break;
100 case AMDGPU_FAMILY_RV:
101 switch (smumgr->chip_id) {
102 case CHIP_RAVEN:
103 smumgr->smumgr_funcs = &rv_smu_funcs;
104 break;
105 default:
106 return -EINVAL;
107 }
108 break;
109 default:
110 kfree(smumgr);
111 return -EINVAL;
112 }
113
114 return 0;
115 }
116
smum_thermal_avfs_enable(struct pp_hwmgr * hwmgr,void * input,void * output,void * storage,int result)117 int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
118 void *input, void *output, void *storage, int result)
119 {
120 if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable)
121 return hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable(hwmgr);
122
123 return 0;
124 }
125
smum_thermal_setup_fan_table(struct pp_hwmgr * hwmgr,void * input,void * output,void * storage,int result)126 int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
127 void *input, void *output, void *storage, int result)
128 {
129 if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table)
130 return hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table(hwmgr);
131
132 return 0;
133 }
134
smum_update_sclk_threshold(struct pp_hwmgr * hwmgr)135 int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr)
136 {
137
138 if (NULL != hwmgr->smumgr->smumgr_funcs->update_sclk_threshold)
139 return hwmgr->smumgr->smumgr_funcs->update_sclk_threshold(hwmgr);
140
141 return 0;
142 }
143
smum_update_smc_table(struct pp_hwmgr * hwmgr,uint32_t type)144 int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
145 {
146
147 if (NULL != hwmgr->smumgr->smumgr_funcs->update_smc_table)
148 return hwmgr->smumgr->smumgr_funcs->update_smc_table(hwmgr, type);
149
150 return 0;
151 }
152
smum_get_offsetof(struct pp_smumgr * smumgr,uint32_t type,uint32_t member)153 uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, uint32_t type, uint32_t member)
154 {
155 if (NULL != smumgr->smumgr_funcs->get_offsetof)
156 return smumgr->smumgr_funcs->get_offsetof(type, member);
157
158 return 0;
159 }
160
smum_process_firmware_header(struct pp_hwmgr * hwmgr)161 int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
162 {
163 if (NULL != hwmgr->smumgr->smumgr_funcs->process_firmware_header)
164 return hwmgr->smumgr->smumgr_funcs->process_firmware_header(hwmgr);
165 return 0;
166 }
167
smum_get_argument(struct pp_smumgr * smumgr)168 int smum_get_argument(struct pp_smumgr *smumgr)
169 {
170 if (NULL != smumgr->smumgr_funcs->get_argument)
171 return smumgr->smumgr_funcs->get_argument(smumgr);
172
173 return 0;
174 }
175
smum_get_mac_definition(struct pp_smumgr * smumgr,uint32_t value)176 uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value)
177 {
178 if (NULL != smumgr->smumgr_funcs->get_mac_definition)
179 return smumgr->smumgr_funcs->get_mac_definition(value);
180
181 return 0;
182 }
183
smum_download_powerplay_table(struct pp_smumgr * smumgr,void ** table)184 int smum_download_powerplay_table(struct pp_smumgr *smumgr,
185 void **table)
186 {
187 if (NULL != smumgr->smumgr_funcs->download_pptable_settings)
188 return smumgr->smumgr_funcs->download_pptable_settings(smumgr,
189 table);
190 return 0;
191 }
192
smum_upload_powerplay_table(struct pp_smumgr * smumgr)193 int smum_upload_powerplay_table(struct pp_smumgr *smumgr)
194 {
195 if (NULL != smumgr->smumgr_funcs->upload_pptable_settings)
196 return smumgr->smumgr_funcs->upload_pptable_settings(smumgr);
197
198 return 0;
199 }
200
smum_send_msg_to_smc(struct pp_smumgr * smumgr,uint16_t msg)201 int smum_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
202 {
203 if (smumgr == NULL || smumgr->smumgr_funcs->send_msg_to_smc == NULL)
204 return -EINVAL;
205
206 return smumgr->smumgr_funcs->send_msg_to_smc(smumgr, msg);
207 }
208
smum_send_msg_to_smc_with_parameter(struct pp_smumgr * smumgr,uint16_t msg,uint32_t parameter)209 int smum_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
210 uint16_t msg, uint32_t parameter)
211 {
212 if (smumgr == NULL ||
213 smumgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL)
214 return -EINVAL;
215 return smumgr->smumgr_funcs->send_msg_to_smc_with_parameter(
216 smumgr, msg, parameter);
217 }
218
219 /*
220 * Returns once the part of the register indicated by the mask has
221 * reached the given value.
222 */
smum_wait_on_register(struct pp_smumgr * smumgr,uint32_t index,uint32_t value,uint32_t mask)223 int smum_wait_on_register(struct pp_smumgr *smumgr,
224 uint32_t index,
225 uint32_t value, uint32_t mask)
226 {
227 uint32_t i;
228 uint32_t cur_value;
229
230 if (smumgr == NULL || smumgr->device == NULL)
231 return -EINVAL;
232
233 for (i = 0; i < smumgr->usec_timeout; i++) {
234 cur_value = cgs_read_register(smumgr->device, index);
235 if ((cur_value & mask) == (value & mask))
236 break;
237 udelay(1);
238 }
239
240 /* timeout means wrong logic*/
241 if (i == smumgr->usec_timeout)
242 return -1;
243
244 return 0;
245 }
246
smum_wait_for_register_unequal(struct pp_smumgr * smumgr,uint32_t index,uint32_t value,uint32_t mask)247 int smum_wait_for_register_unequal(struct pp_smumgr *smumgr,
248 uint32_t index,
249 uint32_t value, uint32_t mask)
250 {
251 uint32_t i;
252 uint32_t cur_value;
253
254 if (smumgr == NULL)
255 return -EINVAL;
256
257 for (i = 0; i < smumgr->usec_timeout; i++) {
258 cur_value = cgs_read_register(smumgr->device,
259 index);
260 if ((cur_value & mask) != (value & mask))
261 break;
262 udelay(1);
263 }
264
265 /* timeout means wrong logic */
266 if (i == smumgr->usec_timeout)
267 return -1;
268
269 return 0;
270 }
271
272
273 /*
274 * Returns once the part of the register indicated by the mask
275 * has reached the given value.The indirect space is described by
276 * giving the memory-mapped index of the indirect index register.
277 */
smum_wait_on_indirect_register(struct pp_smumgr * smumgr,uint32_t indirect_port,uint32_t index,uint32_t value,uint32_t mask)278 int smum_wait_on_indirect_register(struct pp_smumgr *smumgr,
279 uint32_t indirect_port,
280 uint32_t index,
281 uint32_t value,
282 uint32_t mask)
283 {
284 if (smumgr == NULL || smumgr->device == NULL)
285 return -EINVAL;
286
287 cgs_write_register(smumgr->device, indirect_port, index);
288 return smum_wait_on_register(smumgr, indirect_port + 1,
289 mask, value);
290 }
291
smum_wait_for_indirect_register_unequal(struct pp_smumgr * smumgr,uint32_t indirect_port,uint32_t index,uint32_t value,uint32_t mask)292 void smum_wait_for_indirect_register_unequal(
293 struct pp_smumgr *smumgr,
294 uint32_t indirect_port,
295 uint32_t index,
296 uint32_t value,
297 uint32_t mask)
298 {
299 if (smumgr == NULL || smumgr->device == NULL)
300 return;
301 cgs_write_register(smumgr->device, indirect_port, index);
302 smum_wait_for_register_unequal(smumgr, indirect_port + 1,
303 value, mask);
304 }
305
smu_allocate_memory(void * device,uint32_t size,enum cgs_gpu_mem_type type,uint32_t byte_align,uint64_t * mc_addr,void ** kptr,void * handle)306 int smu_allocate_memory(void *device, uint32_t size,
307 enum cgs_gpu_mem_type type,
308 uint32_t byte_align, uint64_t *mc_addr,
309 void **kptr, void *handle)
310 {
311 int ret = 0;
312 cgs_handle_t cgs_handle;
313
314 if (device == NULL || handle == NULL ||
315 mc_addr == NULL || kptr == NULL)
316 return -EINVAL;
317
318 ret = cgs_alloc_gpu_mem(device, type, size, byte_align,
319 0, 0, (cgs_handle_t *)handle);
320 if (ret)
321 return -ENOMEM;
322
323 cgs_handle = *(cgs_handle_t *)handle;
324
325 ret = cgs_gmap_gpu_mem(device, cgs_handle, mc_addr);
326 if (ret)
327 goto error_gmap;
328
329 ret = cgs_kmap_gpu_mem(device, cgs_handle, kptr);
330 if (ret)
331 goto error_kmap;
332
333 return 0;
334
335 error_kmap:
336 cgs_gunmap_gpu_mem(device, cgs_handle);
337
338 error_gmap:
339 cgs_free_gpu_mem(device, cgs_handle);
340 return ret;
341 }
342
smu_free_memory(void * device,void * handle)343 int smu_free_memory(void *device, void *handle)
344 {
345 cgs_handle_t cgs_handle = (cgs_handle_t)handle;
346
347 if (device == NULL || handle == NULL)
348 return -EINVAL;
349
350 cgs_kunmap_gpu_mem(device, cgs_handle);
351 cgs_gunmap_gpu_mem(device, cgs_handle);
352 cgs_free_gpu_mem(device, cgs_handle);
353
354 return 0;
355 }
356
smum_init_smc_table(struct pp_hwmgr * hwmgr)357 int smum_init_smc_table(struct pp_hwmgr *hwmgr)
358 {
359 if (NULL != hwmgr->smumgr->smumgr_funcs->init_smc_table)
360 return hwmgr->smumgr->smumgr_funcs->init_smc_table(hwmgr);
361
362 return 0;
363 }
364
smum_populate_all_graphic_levels(struct pp_hwmgr * hwmgr)365 int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
366 {
367 if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels)
368 return hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels(hwmgr);
369
370 return 0;
371 }
372
smum_populate_all_memory_levels(struct pp_hwmgr * hwmgr)373 int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
374 {
375 if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels)
376 return hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels(hwmgr);
377
378 return 0;
379 }
380
381 /*this interface is needed by island ci/vi */
smum_initialize_mc_reg_table(struct pp_hwmgr * hwmgr)382 int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
383 {
384 if (NULL != hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table)
385 return hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table(hwmgr);
386
387 return 0;
388 }
389
smum_is_dpm_running(struct pp_hwmgr * hwmgr)390 bool smum_is_dpm_running(struct pp_hwmgr *hwmgr)
391 {
392 if (NULL != hwmgr->smumgr->smumgr_funcs->is_dpm_running)
393 return hwmgr->smumgr->smumgr_funcs->is_dpm_running(hwmgr);
394
395 return true;
396 }
397
smum_populate_requested_graphic_levels(struct pp_hwmgr * hwmgr,struct amd_pp_profile * request)398 int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
399 struct amd_pp_profile *request)
400 {
401 if (hwmgr->smumgr->smumgr_funcs->populate_requested_graphic_levels)
402 return hwmgr->smumgr->smumgr_funcs->populate_requested_graphic_levels(
403 hwmgr, request);
404
405 return 0;
406 }
407
smum_is_hw_avfs_present(struct pp_smumgr * smumgr)408 bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr)
409 {
410 if (smumgr->smumgr_funcs->is_hw_avfs_present)
411 return smumgr->smumgr_funcs->is_hw_avfs_present(smumgr);
412
413 return false;
414 }
415