1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui
23 *
24 */
25
26 #include <linux/firmware.h>
27 #include <drm/drm_drv.h>
28
29 #include "amdgpu.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_xgmi.h"
33 #include "soc15_common.h"
34 #include "psp_v3_1.h"
35 #include "psp_v10_0.h"
36 #include "psp_v11_0.h"
37 #include "psp_v11_0_8.h"
38 #include "psp_v12_0.h"
39 #include "psp_v13_0.h"
40 #include "psp_v13_0_4.h"
41 #include "psp_v14_0.h"
42
43 #include "amdgpu_ras.h"
44 #include "amdgpu_securedisplay.h"
45 #include "amdgpu_atomfirmware.h"
46
47 #define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16)
48
49 static int psp_load_smu_fw(struct psp_context *psp);
50 static int psp_rap_terminate(struct psp_context *psp);
51 static int psp_securedisplay_terminate(struct psp_context *psp);
52
psp_ring_init(struct psp_context * psp,enum psp_ring_type ring_type)53 static int psp_ring_init(struct psp_context *psp,
54 enum psp_ring_type ring_type)
55 {
56 int ret = 0;
57 struct psp_ring *ring;
58 struct amdgpu_device *adev = psp->adev;
59
60 ring = &psp->km_ring;
61
62 ring->ring_type = ring_type;
63
64 /* allocate 4k Page of Local Frame Buffer memory for ring */
65 ring->ring_size = 0x1000;
66 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
67 AMDGPU_GEM_DOMAIN_VRAM |
68 AMDGPU_GEM_DOMAIN_GTT,
69 &adev->firmware.rbuf,
70 &ring->ring_mem_mc_addr,
71 (void **)&ring->ring_mem);
72 if (ret) {
73 ring->ring_size = 0;
74 return ret;
75 }
76
77 return 0;
78 }
79
80 /*
81 * Due to DF Cstate management centralized to PMFW, the firmware
82 * loading sequence will be updated as below:
83 * - Load KDB
84 * - Load SYS_DRV
85 * - Load tOS
86 * - Load PMFW
87 * - Setup TMR
88 * - Load other non-psp fw
89 * - Load ASD
90 * - Load XGMI/RAS/HDCP/DTM TA if any
91 *
92 * This new sequence is required for
93 * - Arcturus and onwards
94 */
psp_check_pmfw_centralized_cstate_management(struct psp_context * psp)95 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
96 {
97 struct amdgpu_device *adev = psp->adev;
98
99 if (amdgpu_sriov_vf(adev)) {
100 psp->pmfw_centralized_cstate_management = false;
101 return;
102 }
103
104 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
105 case IP_VERSION(11, 0, 0):
106 case IP_VERSION(11, 0, 4):
107 case IP_VERSION(11, 0, 5):
108 case IP_VERSION(11, 0, 7):
109 case IP_VERSION(11, 0, 9):
110 case IP_VERSION(11, 0, 11):
111 case IP_VERSION(11, 0, 12):
112 case IP_VERSION(11, 0, 13):
113 case IP_VERSION(13, 0, 0):
114 case IP_VERSION(13, 0, 2):
115 case IP_VERSION(13, 0, 7):
116 psp->pmfw_centralized_cstate_management = true;
117 break;
118 default:
119 psp->pmfw_centralized_cstate_management = false;
120 break;
121 }
122 }
123
psp_init_sriov_microcode(struct psp_context * psp)124 static int psp_init_sriov_microcode(struct psp_context *psp)
125 {
126 struct amdgpu_device *adev = psp->adev;
127 char ucode_prefix[30];
128 int ret = 0;
129
130 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
131
132 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
133 case IP_VERSION(9, 0, 0):
134 case IP_VERSION(11, 0, 7):
135 case IP_VERSION(11, 0, 9):
136 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
137 ret = psp_init_cap_microcode(psp, ucode_prefix);
138 break;
139 case IP_VERSION(13, 0, 2):
140 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
141 ret = psp_init_cap_microcode(psp, ucode_prefix);
142 ret &= psp_init_ta_microcode(psp, ucode_prefix);
143 break;
144 case IP_VERSION(13, 0, 0):
145 adev->virt.autoload_ucode_id = 0;
146 break;
147 case IP_VERSION(13, 0, 6):
148 case IP_VERSION(13, 0, 14):
149 ret = psp_init_cap_microcode(psp, ucode_prefix);
150 ret &= psp_init_ta_microcode(psp, ucode_prefix);
151 break;
152 case IP_VERSION(13, 0, 10):
153 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
154 ret = psp_init_cap_microcode(psp, ucode_prefix);
155 break;
156 default:
157 return -EINVAL;
158 }
159 return ret;
160 }
161
psp_early_init(void * handle)162 static int psp_early_init(void *handle)
163 {
164 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
165 struct psp_context *psp = &adev->psp;
166
167 psp->autoload_supported = true;
168 psp->boot_time_tmr = true;
169
170 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
171 case IP_VERSION(9, 0, 0):
172 psp_v3_1_set_psp_funcs(psp);
173 psp->autoload_supported = false;
174 psp->boot_time_tmr = false;
175 break;
176 case IP_VERSION(10, 0, 0):
177 case IP_VERSION(10, 0, 1):
178 psp_v10_0_set_psp_funcs(psp);
179 psp->autoload_supported = false;
180 psp->boot_time_tmr = false;
181 break;
182 case IP_VERSION(11, 0, 2):
183 case IP_VERSION(11, 0, 4):
184 psp_v11_0_set_psp_funcs(psp);
185 psp->autoload_supported = false;
186 psp->boot_time_tmr = false;
187 break;
188 case IP_VERSION(11, 0, 0):
189 case IP_VERSION(11, 0, 7):
190 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
191 fallthrough;
192 case IP_VERSION(11, 0, 5):
193 case IP_VERSION(11, 0, 9):
194 case IP_VERSION(11, 0, 11):
195 case IP_VERSION(11, 5, 0):
196 case IP_VERSION(11, 0, 12):
197 case IP_VERSION(11, 0, 13):
198 psp_v11_0_set_psp_funcs(psp);
199 psp->boot_time_tmr = false;
200 break;
201 case IP_VERSION(11, 0, 3):
202 case IP_VERSION(12, 0, 1):
203 psp_v12_0_set_psp_funcs(psp);
204 psp->autoload_supported = false;
205 psp->boot_time_tmr = false;
206 break;
207 case IP_VERSION(13, 0, 2):
208 psp->boot_time_tmr = false;
209 fallthrough;
210 case IP_VERSION(13, 0, 6):
211 case IP_VERSION(13, 0, 14):
212 psp_v13_0_set_psp_funcs(psp);
213 psp->autoload_supported = false;
214 break;
215 case IP_VERSION(13, 0, 1):
216 case IP_VERSION(13, 0, 3):
217 case IP_VERSION(13, 0, 5):
218 case IP_VERSION(13, 0, 8):
219 case IP_VERSION(13, 0, 11):
220 case IP_VERSION(14, 0, 0):
221 case IP_VERSION(14, 0, 1):
222 case IP_VERSION(14, 0, 4):
223 psp_v13_0_set_psp_funcs(psp);
224 psp->boot_time_tmr = false;
225 break;
226 case IP_VERSION(11, 0, 8):
227 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
228 psp_v11_0_8_set_psp_funcs(psp);
229 }
230 psp->autoload_supported = false;
231 psp->boot_time_tmr = false;
232 break;
233 case IP_VERSION(13, 0, 0):
234 case IP_VERSION(13, 0, 7):
235 case IP_VERSION(13, 0, 10):
236 psp_v13_0_set_psp_funcs(psp);
237 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
238 psp->boot_time_tmr = false;
239 break;
240 case IP_VERSION(13, 0, 4):
241 psp_v13_0_4_set_psp_funcs(psp);
242 psp->boot_time_tmr = false;
243 break;
244 case IP_VERSION(14, 0, 2):
245 case IP_VERSION(14, 0, 3):
246 psp_v14_0_set_psp_funcs(psp);
247 break;
248 default:
249 return -EINVAL;
250 }
251
252 psp->adev = adev;
253
254 adev->psp_timeout = 20000;
255
256 psp_check_pmfw_centralized_cstate_management(psp);
257
258 if (amdgpu_sriov_vf(adev))
259 return psp_init_sriov_microcode(psp);
260 else
261 return psp_init_microcode(psp);
262 }
263
psp_ta_free_shared_buf(struct ta_mem_context * mem_ctx)264 void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
265 {
266 amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
267 &mem_ctx->shared_buf);
268 mem_ctx->shared_bo = NULL;
269 }
270
psp_free_shared_bufs(struct psp_context * psp)271 static void psp_free_shared_bufs(struct psp_context *psp)
272 {
273 void *tmr_buf;
274 void **pptr;
275
276 /* free TMR memory buffer */
277 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
278 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
279 psp->tmr_bo = NULL;
280
281 /* free xgmi shared memory */
282 psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
283
284 /* free ras shared memory */
285 psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
286
287 /* free hdcp shared memory */
288 psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
289
290 /* free dtm shared memory */
291 psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
292
293 /* free rap shared memory */
294 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
295
296 /* free securedisplay shared memory */
297 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
298
299
300 }
301
psp_memory_training_fini(struct psp_context * psp)302 static void psp_memory_training_fini(struct psp_context *psp)
303 {
304 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
305
306 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
307 kfree(ctx->sys_cache);
308 ctx->sys_cache = NULL;
309 }
310
psp_memory_training_init(struct psp_context * psp)311 static int psp_memory_training_init(struct psp_context *psp)
312 {
313 int ret;
314 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
315
316 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
317 dev_dbg(psp->adev->dev, "memory training is not supported!\n");
318 return 0;
319 }
320
321 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
322 if (ctx->sys_cache == NULL) {
323 dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
324 ret = -ENOMEM;
325 goto Err_out;
326 }
327
328 dev_dbg(psp->adev->dev,
329 "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
330 ctx->train_data_size,
331 ctx->p2c_train_data_offset,
332 ctx->c2p_train_data_offset);
333 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
334 return 0;
335
336 Err_out:
337 psp_memory_training_fini(psp);
338 return ret;
339 }
340
341 /*
342 * Helper funciton to query psp runtime database entry
343 *
344 * @adev: amdgpu_device pointer
345 * @entry_type: the type of psp runtime database entry
346 * @db_entry: runtime database entry pointer
347 *
348 * Return false if runtime database doesn't exit or entry is invalid
349 * or true if the specific database entry is found, and copy to @db_entry
350 */
psp_get_runtime_db_entry(struct amdgpu_device * adev,enum psp_runtime_entry_type entry_type,void * db_entry)351 static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
352 enum psp_runtime_entry_type entry_type,
353 void *db_entry)
354 {
355 uint64_t db_header_pos, db_dir_pos;
356 struct psp_runtime_data_header db_header = {0};
357 struct psp_runtime_data_directory db_dir = {0};
358 bool ret = false;
359 int i;
360
361 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
362 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))
363 return false;
364
365 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
366 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
367
368 /* read runtime db header from vram */
369 amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
370 sizeof(struct psp_runtime_data_header), false);
371
372 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
373 /* runtime db doesn't exist, exit */
374 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
375 return false;
376 }
377
378 /* read runtime database entry from vram */
379 amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
380 sizeof(struct psp_runtime_data_directory), false);
381
382 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
383 /* invalid db entry count, exit */
384 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
385 return false;
386 }
387
388 /* look up for requested entry type */
389 for (i = 0; i < db_dir.entry_count && !ret; i++) {
390 if (db_dir.entry_list[i].entry_type == entry_type) {
391 switch (entry_type) {
392 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
393 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
394 /* invalid db entry size */
395 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
396 return false;
397 }
398 /* read runtime database entry */
399 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
400 (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
401 ret = true;
402 break;
403 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
404 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
405 /* invalid db entry size */
406 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
407 return false;
408 }
409 /* read runtime database entry */
410 amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
411 (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
412 ret = true;
413 break;
414 default:
415 ret = false;
416 break;
417 }
418 }
419 }
420
421 return ret;
422 }
423
psp_sw_init(void * handle)424 static int psp_sw_init(void *handle)
425 {
426 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
427 struct psp_context *psp = &adev->psp;
428 int ret;
429 struct psp_runtime_boot_cfg_entry boot_cfg_entry;
430 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
431 struct psp_runtime_scpm_entry scpm_entry;
432
433 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
434 if (!psp->cmd) {
435 dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
436 return -ENOMEM;
437 }
438
439 adev->psp.xgmi_context.supports_extended_data =
440 !adev->gmc.xgmi.connected_to_cpu &&
441 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);
442
443 memset(&scpm_entry, 0, sizeof(scpm_entry));
444 if ((psp_get_runtime_db_entry(adev,
445 PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
446 &scpm_entry)) &&
447 (scpm_entry.scpm_status != SCPM_DISABLE)) {
448 adev->scpm_enabled = true;
449 adev->scpm_status = scpm_entry.scpm_status;
450 } else {
451 adev->scpm_enabled = false;
452 adev->scpm_status = SCPM_DISABLE;
453 }
454
455 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
456
457 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
458 if (psp_get_runtime_db_entry(adev,
459 PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
460 &boot_cfg_entry)) {
461 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
462 if ((psp->boot_cfg_bitmask) &
463 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
464 /* If psp runtime database exists, then
465 * only enable two stage memory training
466 * when TWO_STAGE_DRAM_TRAINING bit is set
467 * in runtime database
468 */
469 mem_training_ctx->enable_mem_training = true;
470 }
471
472 } else {
473 /* If psp runtime database doesn't exist or is
474 * invalid, force enable two stage memory training
475 */
476 mem_training_ctx->enable_mem_training = true;
477 }
478
479 if (mem_training_ctx->enable_mem_training) {
480 ret = psp_memory_training_init(psp);
481 if (ret) {
482 dev_err(adev->dev, "Failed to initialize memory training!\n");
483 return ret;
484 }
485
486 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
487 if (ret) {
488 dev_err(adev->dev, "Failed to process memory training!\n");
489 return ret;
490 }
491 }
492
493 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
494 (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
495 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
496 &psp->fw_pri_bo,
497 &psp->fw_pri_mc_addr,
498 &psp->fw_pri_buf);
499 if (ret)
500 return ret;
501
502 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
503 AMDGPU_GEM_DOMAIN_VRAM |
504 AMDGPU_GEM_DOMAIN_GTT,
505 &psp->fence_buf_bo,
506 &psp->fence_buf_mc_addr,
507 &psp->fence_buf);
508 if (ret)
509 goto failed1;
510
511 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
512 AMDGPU_GEM_DOMAIN_VRAM |
513 AMDGPU_GEM_DOMAIN_GTT,
514 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
515 (void **)&psp->cmd_buf_mem);
516 if (ret)
517 goto failed2;
518
519 return 0;
520
521 failed2:
522 amdgpu_bo_free_kernel(&psp->fence_buf_bo,
523 &psp->fence_buf_mc_addr, &psp->fence_buf);
524 failed1:
525 amdgpu_bo_free_kernel(&psp->fw_pri_bo,
526 &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
527 return ret;
528 }
529
psp_sw_fini(void * handle)530 static int psp_sw_fini(void *handle)
531 {
532 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
533 struct psp_context *psp = &adev->psp;
534
535 psp_memory_training_fini(psp);
536
537 amdgpu_ucode_release(&psp->sos_fw);
538 amdgpu_ucode_release(&psp->asd_fw);
539 amdgpu_ucode_release(&psp->ta_fw);
540 amdgpu_ucode_release(&psp->cap_fw);
541 amdgpu_ucode_release(&psp->toc_fw);
542
543 kfree(psp->cmd);
544 psp->cmd = NULL;
545
546 psp_free_shared_bufs(psp);
547
548 if (psp->km_ring.ring_mem)
549 amdgpu_bo_free_kernel(&adev->firmware.rbuf,
550 &psp->km_ring.ring_mem_mc_addr,
551 (void **)&psp->km_ring.ring_mem);
552
553 amdgpu_bo_free_kernel(&psp->fw_pri_bo,
554 &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
555 amdgpu_bo_free_kernel(&psp->fence_buf_bo,
556 &psp->fence_buf_mc_addr, &psp->fence_buf);
557 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
558 (void **)&psp->cmd_buf_mem);
559
560 return 0;
561 }
562
psp_wait_for(struct psp_context * psp,uint32_t reg_index,uint32_t reg_val,uint32_t mask,bool check_changed)563 int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
564 uint32_t reg_val, uint32_t mask, bool check_changed)
565 {
566 uint32_t val;
567 int i;
568 struct amdgpu_device *adev = psp->adev;
569
570 if (psp->adev->no_hw_access)
571 return 0;
572
573 for (i = 0; i < adev->usec_timeout; i++) {
574 val = RREG32(reg_index);
575 if (check_changed) {
576 if (val != reg_val)
577 return 0;
578 } else {
579 if ((val & mask) == reg_val)
580 return 0;
581 }
582 udelay(1);
583 }
584
585 return -ETIME;
586 }
587
psp_wait_for_spirom_update(struct psp_context * psp,uint32_t reg_index,uint32_t reg_val,uint32_t mask,uint32_t msec_timeout)588 int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
589 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
590 {
591 uint32_t val;
592 int i;
593 struct amdgpu_device *adev = psp->adev;
594
595 if (psp->adev->no_hw_access)
596 return 0;
597
598 for (i = 0; i < msec_timeout; i++) {
599 val = RREG32(reg_index);
600 if ((val & mask) == reg_val)
601 return 0;
602 msleep(1);
603 }
604
605 return -ETIME;
606 }
607
psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)608 static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
609 {
610 switch (cmd_id) {
611 case GFX_CMD_ID_LOAD_TA:
612 return "LOAD_TA";
613 case GFX_CMD_ID_UNLOAD_TA:
614 return "UNLOAD_TA";
615 case GFX_CMD_ID_INVOKE_CMD:
616 return "INVOKE_CMD";
617 case GFX_CMD_ID_LOAD_ASD:
618 return "LOAD_ASD";
619 case GFX_CMD_ID_SETUP_TMR:
620 return "SETUP_TMR";
621 case GFX_CMD_ID_LOAD_IP_FW:
622 return "LOAD_IP_FW";
623 case GFX_CMD_ID_DESTROY_TMR:
624 return "DESTROY_TMR";
625 case GFX_CMD_ID_SAVE_RESTORE:
626 return "SAVE_RESTORE_IP_FW";
627 case GFX_CMD_ID_SETUP_VMR:
628 return "SETUP_VMR";
629 case GFX_CMD_ID_DESTROY_VMR:
630 return "DESTROY_VMR";
631 case GFX_CMD_ID_PROG_REG:
632 return "PROG_REG";
633 case GFX_CMD_ID_GET_FW_ATTESTATION:
634 return "GET_FW_ATTESTATION";
635 case GFX_CMD_ID_LOAD_TOC:
636 return "ID_LOAD_TOC";
637 case GFX_CMD_ID_AUTOLOAD_RLC:
638 return "AUTOLOAD_RLC";
639 case GFX_CMD_ID_BOOT_CFG:
640 return "BOOT_CFG";
641 default:
642 return "UNKNOWN CMD";
643 }
644 }
645
psp_err_warn(struct psp_context * psp)646 static bool psp_err_warn(struct psp_context *psp)
647 {
648 struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
649
650 /* This response indicates reg list is already loaded */
651 if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
652 cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
653 cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
654 cmd->resp.status == TEE_ERROR_CANCEL)
655 return false;
656
657 return true;
658 }
659
660 static int
psp_cmd_submit_buf(struct psp_context * psp,struct amdgpu_firmware_info * ucode,struct psp_gfx_cmd_resp * cmd,uint64_t fence_mc_addr)661 psp_cmd_submit_buf(struct psp_context *psp,
662 struct amdgpu_firmware_info *ucode,
663 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
664 {
665 int ret;
666 int index;
667 int timeout = psp->adev->psp_timeout;
668 bool ras_intr = false;
669 bool skip_unsupport = false;
670
671 if (psp->adev->no_hw_access)
672 return 0;
673
674 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
675
676 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
677
678 index = atomic_inc_return(&psp->fence_value);
679 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
680 if (ret) {
681 atomic_dec(&psp->fence_value);
682 goto exit;
683 }
684
685 amdgpu_device_invalidate_hdp(psp->adev, NULL);
686 while (*((unsigned int *)psp->fence_buf) != index) {
687 if (--timeout == 0)
688 break;
689 /*
690 * Shouldn't wait for timeout when err_event_athub occurs,
691 * because gpu reset thread triggered and lock resource should
692 * be released for psp resume sequence.
693 */
694 ras_intr = amdgpu_ras_intr_triggered();
695 if (ras_intr)
696 break;
697 usleep_range(10, 100);
698 amdgpu_device_invalidate_hdp(psp->adev, NULL);
699 }
700
701 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
702 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
703 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
704
705 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
706
707 /* In some cases, psp response status is not 0 even there is no
708 * problem while the command is submitted. Some version of PSP FW
709 * doesn't write 0 to that field.
710 * So here we would like to only print a warning instead of an error
711 * during psp initialization to avoid breaking hw_init and it doesn't
712 * return -EINVAL.
713 */
714 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
715 if (ucode)
716 dev_warn(psp->adev->dev,
717 "failed to load ucode %s(0x%X) ",
718 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
719 if (psp_err_warn(psp))
720 dev_warn(
721 psp->adev->dev,
722 "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
723 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
724 psp->cmd_buf_mem->cmd_id,
725 psp->cmd_buf_mem->resp.status);
726 /* If any firmware (including CAP) load fails under SRIOV, it should
727 * return failure to stop the VF from initializing.
728 * Also return failure in case of timeout
729 */
730 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
731 ret = -EINVAL;
732 goto exit;
733 }
734 }
735
736 if (ucode) {
737 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
738 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
739 }
740
741 exit:
742 return ret;
743 }
744
acquire_psp_cmd_buf(struct psp_context * psp)745 static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
746 {
747 struct psp_gfx_cmd_resp *cmd = psp->cmd;
748
749 mutex_lock(&psp->mutex);
750
751 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
752
753 return cmd;
754 }
755
release_psp_cmd_buf(struct psp_context * psp)756 static void release_psp_cmd_buf(struct psp_context *psp)
757 {
758 mutex_unlock(&psp->mutex);
759 }
760
psp_prep_tmr_cmd_buf(struct psp_context * psp,struct psp_gfx_cmd_resp * cmd,uint64_t tmr_mc,struct amdgpu_bo * tmr_bo)761 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
762 struct psp_gfx_cmd_resp *cmd,
763 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
764 {
765 struct amdgpu_device *adev = psp->adev;
766 uint32_t size = 0;
767 uint64_t tmr_pa = 0;
768
769 if (tmr_bo) {
770 size = amdgpu_bo_size(tmr_bo);
771 tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
772 }
773
774 if (amdgpu_sriov_vf(psp->adev))
775 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
776 else
777 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
778 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
779 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
780 cmd->cmd.cmd_setup_tmr.buf_size = size;
781 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
782 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
783 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
784 }
785
psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint64_t pri_buf_mc,uint32_t size)786 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
787 uint64_t pri_buf_mc, uint32_t size)
788 {
789 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
790 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
791 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
792 cmd->cmd.cmd_load_toc.toc_size = size;
793 }
794
795 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
psp_load_toc(struct psp_context * psp,uint32_t * tmr_size)796 static int psp_load_toc(struct psp_context *psp,
797 uint32_t *tmr_size)
798 {
799 int ret;
800 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
801
802 /* Copy toc to psp firmware private buffer */
803 psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
804
805 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
806
807 ret = psp_cmd_submit_buf(psp, NULL, cmd,
808 psp->fence_buf_mc_addr);
809 if (!ret)
810 *tmr_size = psp->cmd_buf_mem->resp.tmr_size;
811
812 release_psp_cmd_buf(psp);
813
814 return ret;
815 }
816
817 /* Set up Trusted Memory Region */
psp_tmr_init(struct psp_context * psp)818 static int psp_tmr_init(struct psp_context *psp)
819 {
820 int ret = 0;
821 int tmr_size;
822 void *tmr_buf;
823 void **pptr;
824
825 /*
826 * According to HW engineer, they prefer the TMR address be "naturally
827 * aligned" , e.g. the start address be an integer divide of TMR size.
828 *
829 * Note: this memory need be reserved till the driver
830 * uninitializes.
831 */
832 tmr_size = PSP_TMR_SIZE(psp->adev);
833
834 /* For ASICs support RLC autoload, psp will parse the toc
835 * and calculate the total size of TMR needed
836 */
837 if (!amdgpu_sriov_vf(psp->adev) &&
838 psp->toc.start_addr &&
839 psp->toc.size_bytes &&
840 psp->fw_pri_buf) {
841 ret = psp_load_toc(psp, &tmr_size);
842 if (ret) {
843 dev_err(psp->adev->dev, "Failed to load toc\n");
844 return ret;
845 }
846 }
847
848 if (!psp->tmr_bo && !psp->boot_time_tmr) {
849 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
850 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
851 PSP_TMR_ALIGNMENT,
852 AMDGPU_HAS_VRAM(psp->adev) ?
853 AMDGPU_GEM_DOMAIN_VRAM :
854 AMDGPU_GEM_DOMAIN_GTT,
855 &psp->tmr_bo, &psp->tmr_mc_addr,
856 pptr);
857 }
858
859 return ret;
860 }
861
psp_skip_tmr(struct psp_context * psp)862 static bool psp_skip_tmr(struct psp_context *psp)
863 {
864 switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
865 case IP_VERSION(11, 0, 9):
866 case IP_VERSION(11, 0, 7):
867 case IP_VERSION(13, 0, 2):
868 case IP_VERSION(13, 0, 6):
869 case IP_VERSION(13, 0, 10):
870 case IP_VERSION(13, 0, 14):
871 return true;
872 default:
873 return false;
874 }
875 }
876
psp_tmr_load(struct psp_context * psp)877 static int psp_tmr_load(struct psp_context *psp)
878 {
879 int ret;
880 struct psp_gfx_cmd_resp *cmd;
881
882 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
883 * Already set up by host driver.
884 */
885 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
886 return 0;
887
888 cmd = acquire_psp_cmd_buf(psp);
889
890 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
891 if (psp->tmr_bo)
892 dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
893 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
894
895 ret = psp_cmd_submit_buf(psp, NULL, cmd,
896 psp->fence_buf_mc_addr);
897
898 release_psp_cmd_buf(psp);
899
900 return ret;
901 }
902
psp_prep_tmr_unload_cmd_buf(struct psp_context * psp,struct psp_gfx_cmd_resp * cmd)903 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
904 struct psp_gfx_cmd_resp *cmd)
905 {
906 if (amdgpu_sriov_vf(psp->adev))
907 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
908 else
909 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
910 }
911
psp_tmr_unload(struct psp_context * psp)912 static int psp_tmr_unload(struct psp_context *psp)
913 {
914 int ret;
915 struct psp_gfx_cmd_resp *cmd;
916
917 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
918 * as TMR is not loaded at all
919 */
920 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
921 return 0;
922
923 cmd = acquire_psp_cmd_buf(psp);
924
925 psp_prep_tmr_unload_cmd_buf(psp, cmd);
926 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
927
928 ret = psp_cmd_submit_buf(psp, NULL, cmd,
929 psp->fence_buf_mc_addr);
930
931 release_psp_cmd_buf(psp);
932
933 return ret;
934 }
935
psp_tmr_terminate(struct psp_context * psp)936 static int psp_tmr_terminate(struct psp_context *psp)
937 {
938 return psp_tmr_unload(psp);
939 }
940
psp_get_fw_attestation_records_addr(struct psp_context * psp,uint64_t * output_ptr)941 int psp_get_fw_attestation_records_addr(struct psp_context *psp,
942 uint64_t *output_ptr)
943 {
944 int ret;
945 struct psp_gfx_cmd_resp *cmd;
946
947 if (!output_ptr)
948 return -EINVAL;
949
950 if (amdgpu_sriov_vf(psp->adev))
951 return 0;
952
953 cmd = acquire_psp_cmd_buf(psp);
954
955 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
956
957 ret = psp_cmd_submit_buf(psp, NULL, cmd,
958 psp->fence_buf_mc_addr);
959
960 if (!ret) {
961 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
962 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
963 }
964
965 release_psp_cmd_buf(psp);
966
967 return ret;
968 }
969
psp_boot_config_get(struct amdgpu_device * adev,uint32_t * boot_cfg)970 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
971 {
972 struct psp_context *psp = &adev->psp;
973 struct psp_gfx_cmd_resp *cmd;
974 int ret;
975
976 if (amdgpu_sriov_vf(adev))
977 return 0;
978
979 cmd = acquire_psp_cmd_buf(psp);
980
981 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
982 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
983
984 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
985 if (!ret) {
986 *boot_cfg =
987 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
988 }
989
990 release_psp_cmd_buf(psp);
991
992 return ret;
993 }
994
psp_boot_config_set(struct amdgpu_device * adev,uint32_t boot_cfg)995 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
996 {
997 int ret;
998 struct psp_context *psp = &adev->psp;
999 struct psp_gfx_cmd_resp *cmd;
1000
1001 if (amdgpu_sriov_vf(adev))
1002 return 0;
1003
1004 cmd = acquire_psp_cmd_buf(psp);
1005
1006 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
1007 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
1008 cmd->cmd.boot_cfg.boot_config = boot_cfg;
1009 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
1010
1011 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1012
1013 release_psp_cmd_buf(psp);
1014
1015 return ret;
1016 }
1017
psp_rl_load(struct amdgpu_device * adev)1018 static int psp_rl_load(struct amdgpu_device *adev)
1019 {
1020 int ret;
1021 struct psp_context *psp = &adev->psp;
1022 struct psp_gfx_cmd_resp *cmd;
1023
1024 if (!is_psp_fw_valid(psp->rl))
1025 return 0;
1026
1027 cmd = acquire_psp_cmd_buf(psp);
1028
1029 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
1030 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
1031
1032 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
1033 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1034 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1035 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1036 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1037
1038 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1039
1040 release_psp_cmd_buf(psp);
1041
1042 return ret;
1043 }
1044
psp_spatial_partition(struct psp_context * psp,int mode)1045 int psp_spatial_partition(struct psp_context *psp, int mode)
1046 {
1047 struct psp_gfx_cmd_resp *cmd;
1048 int ret;
1049
1050 if (amdgpu_sriov_vf(psp->adev))
1051 return 0;
1052
1053 cmd = acquire_psp_cmd_buf(psp);
1054
1055 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1056 cmd->cmd.cmd_spatial_part.mode = mode;
1057
1058 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1059 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1060
1061 release_psp_cmd_buf(psp);
1062
1063 return ret;
1064 }
1065
psp_asd_initialize(struct psp_context * psp)1066 static int psp_asd_initialize(struct psp_context *psp)
1067 {
1068 int ret;
1069
1070 /* If PSP version doesn't match ASD version, asd loading will be failed.
1071 * add workaround to bypass it for sriov now.
1072 * TODO: add version check to make it common
1073 */
1074 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1075 return 0;
1076
1077 /* bypass asd if display hardware is not available */
1078 if (!amdgpu_device_has_display_hardware(psp->adev) &&
1079 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10))
1080 return 0;
1081
1082 psp->asd_context.mem_context.shared_mc_addr = 0;
1083 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1084 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD;
1085
1086 ret = psp_ta_load(psp, &psp->asd_context);
1087 if (!ret)
1088 psp->asd_context.initialized = true;
1089
1090 return ret;
1091 }
1092
psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint32_t session_id)1093 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1094 uint32_t session_id)
1095 {
1096 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1097 cmd->cmd.cmd_unload_ta.session_id = session_id;
1098 }
1099
psp_ta_unload(struct psp_context * psp,struct ta_context * context)1100 int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1101 {
1102 int ret;
1103 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1104
1105 psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
1106
1107 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1108
1109 context->resp_status = cmd->resp.status;
1110
1111 release_psp_cmd_buf(psp);
1112
1113 return ret;
1114 }
1115
psp_asd_terminate(struct psp_context * psp)1116 static int psp_asd_terminate(struct psp_context *psp)
1117 {
1118 int ret;
1119
1120 if (amdgpu_sriov_vf(psp->adev))
1121 return 0;
1122
1123 if (!psp->asd_context.initialized)
1124 return 0;
1125
1126 ret = psp_ta_unload(psp, &psp->asd_context);
1127 if (!ret)
1128 psp->asd_context.initialized = false;
1129
1130 return ret;
1131 }
1132
psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint32_t id,uint32_t value)1133 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1134 uint32_t id, uint32_t value)
1135 {
1136 cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1137 cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1138 cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1139 }
1140
psp_reg_program(struct psp_context * psp,enum psp_reg_prog_id reg,uint32_t value)1141 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1142 uint32_t value)
1143 {
1144 struct psp_gfx_cmd_resp *cmd;
1145 int ret = 0;
1146
1147 if (reg >= PSP_REG_LAST)
1148 return -EINVAL;
1149
1150 cmd = acquire_psp_cmd_buf(psp);
1151
1152 psp_prep_reg_prog_cmd_buf(cmd, reg, value);
1153 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
1154 if (ret)
1155 dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
1156
1157 release_psp_cmd_buf(psp);
1158
1159 return ret;
1160 }
1161
psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint64_t ta_bin_mc,struct ta_context * context)1162 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1163 uint64_t ta_bin_mc,
1164 struct ta_context *context)
1165 {
1166 cmd->cmd_id = context->ta_load_type;
1167 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
1168 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
1169 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes;
1170
1171 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1172 lower_32_bits(context->mem_context.shared_mc_addr);
1173 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1174 upper_32_bits(context->mem_context.shared_mc_addr);
1175 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1176 }
1177
psp_ta_init_shared_buf(struct psp_context * psp,struct ta_mem_context * mem_ctx)1178 int psp_ta_init_shared_buf(struct psp_context *psp,
1179 struct ta_mem_context *mem_ctx)
1180 {
1181 /*
1182 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1183 * physical) for ta to host memory
1184 */
1185 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
1186 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1187 AMDGPU_GEM_DOMAIN_GTT,
1188 &mem_ctx->shared_bo,
1189 &mem_ctx->shared_mc_addr,
1190 &mem_ctx->shared_buf);
1191 }
1192
psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp * cmd,uint32_t ta_cmd_id,uint32_t session_id)1193 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1194 uint32_t ta_cmd_id,
1195 uint32_t session_id)
1196 {
1197 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
1198 cmd->cmd.cmd_invoke_cmd.session_id = session_id;
1199 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
1200 }
1201
psp_ta_invoke(struct psp_context * psp,uint32_t ta_cmd_id,struct ta_context * context)1202 int psp_ta_invoke(struct psp_context *psp,
1203 uint32_t ta_cmd_id,
1204 struct ta_context *context)
1205 {
1206 int ret;
1207 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1208
1209 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
1210
1211 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1212 psp->fence_buf_mc_addr);
1213
1214 context->resp_status = cmd->resp.status;
1215
1216 release_psp_cmd_buf(psp);
1217
1218 return ret;
1219 }
1220
psp_ta_load(struct psp_context * psp,struct ta_context * context)1221 int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1222 {
1223 int ret;
1224 struct psp_gfx_cmd_resp *cmd;
1225
1226 cmd = acquire_psp_cmd_buf(psp);
1227
1228 psp_copy_fw(psp, context->bin_desc.start_addr,
1229 context->bin_desc.size_bytes);
1230
1231 psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
1232
1233 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1234 psp->fence_buf_mc_addr);
1235
1236 context->resp_status = cmd->resp.status;
1237
1238 if (!ret)
1239 context->session_id = cmd->resp.session_id;
1240
1241 release_psp_cmd_buf(psp);
1242
1243 return ret;
1244 }
1245
psp_xgmi_invoke(struct psp_context * psp,uint32_t ta_cmd_id)1246 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1247 {
1248 return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
1249 }
1250
psp_xgmi_terminate(struct psp_context * psp)1251 int psp_xgmi_terminate(struct psp_context *psp)
1252 {
1253 int ret;
1254 struct amdgpu_device *adev = psp->adev;
1255
1256 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1257 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
1258 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
1259 adev->gmc.xgmi.connected_to_cpu))
1260 return 0;
1261
1262 if (!psp->xgmi_context.context.initialized)
1263 return 0;
1264
1265 ret = psp_ta_unload(psp, &psp->xgmi_context.context);
1266
1267 psp->xgmi_context.context.initialized = false;
1268
1269 return ret;
1270 }
1271
psp_xgmi_initialize(struct psp_context * psp,bool set_extended_data,bool load_ta)1272 int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1273 {
1274 struct ta_xgmi_shared_memory *xgmi_cmd;
1275 int ret;
1276
1277 if (!psp->ta_fw ||
1278 !psp->xgmi_context.context.bin_desc.size_bytes ||
1279 !psp->xgmi_context.context.bin_desc.start_addr)
1280 return -ENOENT;
1281
1282 if (!load_ta)
1283 goto invoke;
1284
1285 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1286 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1287
1288 if (!psp->xgmi_context.context.mem_context.shared_buf) {
1289 ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
1290 if (ret)
1291 return ret;
1292 }
1293
1294 /* Load XGMI TA */
1295 ret = psp_ta_load(psp, &psp->xgmi_context.context);
1296 if (!ret)
1297 psp->xgmi_context.context.initialized = true;
1298 else
1299 return ret;
1300
1301 invoke:
1302 /* Initialize XGMI session */
1303 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1304 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1305 xgmi_cmd->flag_extend_link_record = set_extended_data;
1306 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1307
1308 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1309 /* note down the capbility flag for XGMI TA */
1310 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1311
1312 return ret;
1313 }
1314
psp_xgmi_get_hive_id(struct psp_context * psp,uint64_t * hive_id)1315 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1316 {
1317 struct ta_xgmi_shared_memory *xgmi_cmd;
1318 int ret;
1319
1320 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1321 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1322
1323 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1324
1325 /* Invoke xgmi ta to get hive id */
1326 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1327 if (ret)
1328 return ret;
1329
1330 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1331
1332 return 0;
1333 }
1334
psp_xgmi_get_node_id(struct psp_context * psp,uint64_t * node_id)1335 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1336 {
1337 struct ta_xgmi_shared_memory *xgmi_cmd;
1338 int ret;
1339
1340 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1341 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1342
1343 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1344
1345 /* Invoke xgmi ta to get the node id */
1346 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1347 if (ret)
1348 return ret;
1349
1350 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1351
1352 return 0;
1353 }
1354
psp_xgmi_peer_link_info_supported(struct psp_context * psp)1355 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1356 {
1357 return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1358 IP_VERSION(13, 0, 2) &&
1359 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1360 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=
1361 IP_VERSION(13, 0, 6);
1362 }
1363
1364 /*
1365 * Chips that support extended topology information require the driver to
1366 * reflect topology information in the opposite direction. This is
1367 * because the TA has already exceeded its link record limit and if the
1368 * TA holds bi-directional information, the driver would have to do
1369 * multiple fetches instead of just two.
1370 */
psp_xgmi_reflect_topology_info(struct psp_context * psp,struct psp_xgmi_node_info node_info)1371 static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1372 struct psp_xgmi_node_info node_info)
1373 {
1374 struct amdgpu_device *mirror_adev;
1375 struct amdgpu_hive_info *hive;
1376 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1377 uint64_t dst_node_id = node_info.node_id;
1378 uint8_t dst_num_hops = node_info.num_hops;
1379 uint8_t dst_num_links = node_info.num_links;
1380
1381 hive = amdgpu_get_xgmi_hive(psp->adev);
1382 if (WARN_ON(!hive))
1383 return;
1384
1385 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1386 struct psp_xgmi_topology_info *mirror_top_info;
1387 int j;
1388
1389 if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1390 continue;
1391
1392 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1393 for (j = 0; j < mirror_top_info->num_nodes; j++) {
1394 if (mirror_top_info->nodes[j].node_id != src_node_id)
1395 continue;
1396
1397 mirror_top_info->nodes[j].num_hops = dst_num_hops;
1398 /*
1399 * prevent 0 num_links value re-reflection since reflection
1400 * criteria is based on num_hops (direct or indirect).
1401 *
1402 */
1403 if (dst_num_links)
1404 mirror_top_info->nodes[j].num_links = dst_num_links;
1405
1406 break;
1407 }
1408
1409 break;
1410 }
1411
1412 amdgpu_put_xgmi_hive(hive);
1413 }
1414
psp_xgmi_get_topology_info(struct psp_context * psp,int number_devices,struct psp_xgmi_topology_info * topology,bool get_extended_data)1415 int psp_xgmi_get_topology_info(struct psp_context *psp,
1416 int number_devices,
1417 struct psp_xgmi_topology_info *topology,
1418 bool get_extended_data)
1419 {
1420 struct ta_xgmi_shared_memory *xgmi_cmd;
1421 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1422 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1423 int i;
1424 int ret;
1425
1426 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1427 return -EINVAL;
1428
1429 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1430 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1431 xgmi_cmd->flag_extend_link_record = get_extended_data;
1432
1433 /* Fill in the shared memory with topology information as input */
1434 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1435 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1436 topology_info_input->num_nodes = number_devices;
1437
1438 for (i = 0; i < topology_info_input->num_nodes; i++) {
1439 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1440 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1441 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1442 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1443 }
1444
1445 /* Invoke xgmi ta to get the topology information */
1446 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1447 if (ret)
1448 return ret;
1449
1450 /* Read the output topology information from the shared memory */
1451 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1452 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1453 for (i = 0; i < topology->num_nodes; i++) {
1454 /* extended data will either be 0 or equal to non-extended data */
1455 if (topology_info_output->nodes[i].num_hops)
1456 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1457
1458 /* non-extended data gets everything here so no need to update */
1459 if (!get_extended_data) {
1460 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1461 topology->nodes[i].is_sharing_enabled =
1462 topology_info_output->nodes[i].is_sharing_enabled;
1463 topology->nodes[i].sdma_engine =
1464 topology_info_output->nodes[i].sdma_engine;
1465 }
1466
1467 }
1468
1469 /* Invoke xgmi ta again to get the link information */
1470 if (psp_xgmi_peer_link_info_supported(psp)) {
1471 struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1472 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1473 bool requires_reflection =
1474 (psp->xgmi_context.supports_extended_data &&
1475 get_extended_data) ||
1476 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1477 IP_VERSION(13, 0, 6) ||
1478 amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
1479 IP_VERSION(13, 0, 14);
1480 bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
1481 psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
1482
1483 /* popluate the shared output buffer rather than the cmd input buffer
1484 * with node_ids as the input for GET_PEER_LINKS command execution.
1485 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1486 * The same requirement for GET_EXTEND_PEER_LINKS command.
1487 */
1488 if (ta_port_num_support) {
1489 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1490
1491 for (i = 0; i < topology->num_nodes; i++)
1492 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1493
1494 link_extend_info_output->num_nodes = topology->num_nodes;
1495 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1496 } else {
1497 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1498
1499 for (i = 0; i < topology->num_nodes; i++)
1500 link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1501
1502 link_info_output->num_nodes = topology->num_nodes;
1503 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1504 }
1505
1506 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
1507 if (ret)
1508 return ret;
1509
1510 for (i = 0; i < topology->num_nodes; i++) {
1511 uint8_t node_num_links = ta_port_num_support ?
1512 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1513 /* accumulate num_links on extended data */
1514 if (get_extended_data) {
1515 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1516 } else {
1517 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1518 topology->nodes[i].num_links : node_num_links;
1519 }
1520 /* popluate the connected port num info if supported and available */
1521 if (ta_port_num_support && topology->nodes[i].num_links) {
1522 memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
1523 sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
1524 }
1525
1526 /* reflect the topology information for bi-directionality */
1527 if (requires_reflection && topology->nodes[i].num_hops)
1528 psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
1529 }
1530 }
1531
1532 return 0;
1533 }
1534
psp_xgmi_set_topology_info(struct psp_context * psp,int number_devices,struct psp_xgmi_topology_info * topology)1535 int psp_xgmi_set_topology_info(struct psp_context *psp,
1536 int number_devices,
1537 struct psp_xgmi_topology_info *topology)
1538 {
1539 struct ta_xgmi_shared_memory *xgmi_cmd;
1540 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1541 int i;
1542
1543 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1544 return -EINVAL;
1545
1546 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1547 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1548
1549 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1550 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1551 topology_info_input->num_nodes = number_devices;
1552
1553 for (i = 0; i < topology_info_input->num_nodes; i++) {
1554 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1555 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1556 topology_info_input->nodes[i].is_sharing_enabled = 1;
1557 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1558 }
1559
1560 /* Invoke xgmi ta to set topology information */
1561 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1562 }
1563
1564 // ras begin
psp_ras_ta_check_status(struct psp_context * psp)1565 static void psp_ras_ta_check_status(struct psp_context *psp)
1566 {
1567 struct ta_ras_shared_memory *ras_cmd =
1568 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1569
1570 switch (ras_cmd->ras_status) {
1571 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1572 dev_warn(psp->adev->dev,
1573 "RAS WARNING: cmd failed due to unsupported ip\n");
1574 break;
1575 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1576 dev_warn(psp->adev->dev,
1577 "RAS WARNING: cmd failed due to unsupported error injection\n");
1578 break;
1579 case TA_RAS_STATUS__SUCCESS:
1580 break;
1581 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1582 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1583 dev_warn(psp->adev->dev,
1584 "RAS WARNING: Inject error to critical region is not allowed\n");
1585 break;
1586 default:
1587 dev_warn(psp->adev->dev,
1588 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1589 break;
1590 }
1591 }
1592
psp_ras_send_cmd(struct psp_context * psp,enum ras_command cmd_id,void * in,void * out)1593 static int psp_ras_send_cmd(struct psp_context *psp,
1594 enum ras_command cmd_id, void *in, void *out)
1595 {
1596 struct ta_ras_shared_memory *ras_cmd;
1597 uint32_t cmd = cmd_id;
1598 int ret = 0;
1599
1600 if (!in)
1601 return -EINVAL;
1602
1603 mutex_lock(&psp->ras_context.mutex);
1604 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1605 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1606
1607 switch (cmd) {
1608 case TA_RAS_COMMAND__ENABLE_FEATURES:
1609 case TA_RAS_COMMAND__DISABLE_FEATURES:
1610 memcpy(&ras_cmd->ras_in_message,
1611 in, sizeof(ras_cmd->ras_in_message));
1612 break;
1613 case TA_RAS_COMMAND__TRIGGER_ERROR:
1614 memcpy(&ras_cmd->ras_in_message.trigger_error,
1615 in, sizeof(ras_cmd->ras_in_message.trigger_error));
1616 break;
1617 case TA_RAS_COMMAND__QUERY_ADDRESS:
1618 memcpy(&ras_cmd->ras_in_message.address,
1619 in, sizeof(ras_cmd->ras_in_message.address));
1620 break;
1621 default:
1622 dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd);
1623 ret = -EINVAL;
1624 goto err_out;
1625 }
1626
1627 ras_cmd->cmd_id = cmd;
1628 ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
1629
1630 switch (cmd) {
1631 case TA_RAS_COMMAND__TRIGGER_ERROR:
1632 if (!ret && out)
1633 memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status));
1634 break;
1635 case TA_RAS_COMMAND__QUERY_ADDRESS:
1636 if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
1637 ret = -EINVAL;
1638 else if (out)
1639 memcpy(out,
1640 &ras_cmd->ras_out_message.address,
1641 sizeof(ras_cmd->ras_out_message.address));
1642 break;
1643 default:
1644 break;
1645 }
1646
1647 err_out:
1648 mutex_unlock(&psp->ras_context.mutex);
1649
1650 return ret;
1651 }
1652
psp_ras_invoke(struct psp_context * psp,uint32_t ta_cmd_id)1653 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1654 {
1655 struct ta_ras_shared_memory *ras_cmd;
1656 int ret;
1657
1658 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1659
1660 /*
1661 * TODO: bypass the loading in sriov for now
1662 */
1663 if (amdgpu_sriov_vf(psp->adev))
1664 return 0;
1665
1666 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
1667
1668 if (amdgpu_ras_intr_triggered())
1669 return ret;
1670
1671 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1672 dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
1673 return -EINVAL;
1674 }
1675
1676 if (!ret) {
1677 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1678 dev_warn(psp->adev->dev, "ECC switch disabled\n");
1679
1680 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1681 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1682 dev_warn(psp->adev->dev,
1683 "RAS internal register access blocked\n");
1684
1685 psp_ras_ta_check_status(psp);
1686 }
1687
1688 return ret;
1689 }
1690
psp_ras_enable_features(struct psp_context * psp,union ta_ras_cmd_input * info,bool enable)1691 int psp_ras_enable_features(struct psp_context *psp,
1692 union ta_ras_cmd_input *info, bool enable)
1693 {
1694 enum ras_command cmd_id;
1695 int ret;
1696
1697 if (!psp->ras_context.context.initialized || !info)
1698 return -EINVAL;
1699
1700 cmd_id = enable ?
1701 TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES;
1702 ret = psp_ras_send_cmd(psp, cmd_id, info, NULL);
1703 if (ret)
1704 return -EINVAL;
1705
1706 return 0;
1707 }
1708
psp_ras_terminate(struct psp_context * psp)1709 int psp_ras_terminate(struct psp_context *psp)
1710 {
1711 int ret;
1712
1713 /*
1714 * TODO: bypass the terminate in sriov for now
1715 */
1716 if (amdgpu_sriov_vf(psp->adev))
1717 return 0;
1718
1719 if (!psp->ras_context.context.initialized)
1720 return 0;
1721
1722 ret = psp_ta_unload(psp, &psp->ras_context.context);
1723
1724 psp->ras_context.context.initialized = false;
1725
1726 mutex_destroy(&psp->ras_context.mutex);
1727
1728 return ret;
1729 }
1730
psp_ras_initialize(struct psp_context * psp)1731 int psp_ras_initialize(struct psp_context *psp)
1732 {
1733 int ret;
1734 uint32_t boot_cfg = 0xFF;
1735 struct amdgpu_device *adev = psp->adev;
1736 struct ta_ras_shared_memory *ras_cmd;
1737
1738 /*
1739 * TODO: bypass the initialize in sriov for now
1740 */
1741 if (amdgpu_sriov_vf(adev))
1742 return 0;
1743
1744 if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1745 !adev->psp.ras_context.context.bin_desc.start_addr) {
1746 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1747 return 0;
1748 }
1749
1750 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1751 /* query GECC enablement status from boot config
1752 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1753 */
1754 ret = psp_boot_config_get(adev, &boot_cfg);
1755 if (ret)
1756 dev_warn(adev->dev, "PSP get boot config failed\n");
1757
1758 if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) {
1759 if (!boot_cfg) {
1760 dev_info(adev->dev, "GECC is disabled\n");
1761 } else {
1762 /* disable GECC in next boot cycle if ras is
1763 * disabled by module parameter amdgpu_ras_enable
1764 * and/or amdgpu_ras_mask, or boot_config_get call
1765 * is failed
1766 */
1767 ret = psp_boot_config_set(adev, 0);
1768 if (ret)
1769 dev_warn(adev->dev, "PSP set boot config failed\n");
1770 else
1771 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1772 }
1773 } else {
1774 if (boot_cfg == 1) {
1775 dev_info(adev->dev, "GECC is enabled\n");
1776 } else {
1777 /* enable GECC in next boot cycle if it is disabled
1778 * in boot config, or force enable GECC if failed to
1779 * get boot configuration
1780 */
1781 ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
1782 if (ret)
1783 dev_warn(adev->dev, "PSP set boot config failed\n");
1784 else
1785 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1786 }
1787 }
1788 }
1789
1790 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1791 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1792
1793 if (!psp->ras_context.context.mem_context.shared_buf) {
1794 ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
1795 if (ret)
1796 return ret;
1797 }
1798
1799 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1800 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1801
1802 if (amdgpu_ras_is_poison_mode_supported(adev))
1803 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1804 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1805 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1806 ras_cmd->ras_in_message.init_flags.xcc_mask =
1807 adev->gfx.xcc_mask;
1808 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1809
1810 ret = psp_ta_load(psp, &psp->ras_context.context);
1811
1812 if (!ret && !ras_cmd->ras_status) {
1813 psp->ras_context.context.initialized = true;
1814 mutex_init(&psp->ras_context.mutex);
1815 } else {
1816 if (ras_cmd->ras_status)
1817 dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
1818
1819 /* fail to load RAS TA */
1820 psp->ras_context.context.initialized = false;
1821 }
1822
1823 return ret;
1824 }
1825
psp_ras_trigger_error(struct psp_context * psp,struct ta_ras_trigger_error_input * info,uint32_t instance_mask)1826 int psp_ras_trigger_error(struct psp_context *psp,
1827 struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
1828 {
1829 struct amdgpu_device *adev = psp->adev;
1830 int ret;
1831 uint32_t dev_mask;
1832 uint32_t ras_status = 0;
1833
1834 if (!psp->ras_context.context.initialized || !info)
1835 return -EINVAL;
1836
1837 switch (info->block_id) {
1838 case TA_RAS_BLOCK__GFX:
1839 dev_mask = GET_MASK(GC, instance_mask);
1840 break;
1841 case TA_RAS_BLOCK__SDMA:
1842 dev_mask = GET_MASK(SDMA0, instance_mask);
1843 break;
1844 case TA_RAS_BLOCK__VCN:
1845 case TA_RAS_BLOCK__JPEG:
1846 dev_mask = GET_MASK(VCN, instance_mask);
1847 break;
1848 default:
1849 dev_mask = instance_mask;
1850 break;
1851 }
1852
1853 /* reuse sub_block_index for backward compatibility */
1854 dev_mask <<= AMDGPU_RAS_INST_SHIFT;
1855 dev_mask &= AMDGPU_RAS_INST_MASK;
1856 info->sub_block_index |= dev_mask;
1857
1858 ret = psp_ras_send_cmd(psp,
1859 TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status);
1860 if (ret)
1861 return -EINVAL;
1862
1863 /* If err_event_athub occurs error inject was successful, however
1864 * return status from TA is no long reliable
1865 */
1866 if (amdgpu_ras_intr_triggered())
1867 return 0;
1868
1869 if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
1870 return -EACCES;
1871 else if (ras_status)
1872 return -EINVAL;
1873
1874 return 0;
1875 }
1876
psp_ras_query_address(struct psp_context * psp,struct ta_ras_query_address_input * addr_in,struct ta_ras_query_address_output * addr_out)1877 int psp_ras_query_address(struct psp_context *psp,
1878 struct ta_ras_query_address_input *addr_in,
1879 struct ta_ras_query_address_output *addr_out)
1880 {
1881 int ret;
1882
1883 if (!psp->ras_context.context.initialized ||
1884 !addr_in || !addr_out)
1885 return -EINVAL;
1886
1887 ret = psp_ras_send_cmd(psp,
1888 TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out);
1889
1890 return ret;
1891 }
1892 // ras end
1893
1894 // HDCP start
psp_hdcp_initialize(struct psp_context * psp)1895 static int psp_hdcp_initialize(struct psp_context *psp)
1896 {
1897 int ret;
1898
1899 /*
1900 * TODO: bypass the initialize in sriov for now
1901 */
1902 if (amdgpu_sriov_vf(psp->adev))
1903 return 0;
1904
1905 /* bypass hdcp initialization if dmu is harvested */
1906 if (!amdgpu_device_has_display_hardware(psp->adev))
1907 return 0;
1908
1909 if (!psp->hdcp_context.context.bin_desc.size_bytes ||
1910 !psp->hdcp_context.context.bin_desc.start_addr) {
1911 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1912 return 0;
1913 }
1914
1915 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
1916 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1917
1918 if (!psp->hdcp_context.context.mem_context.shared_buf) {
1919 ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
1920 if (ret)
1921 return ret;
1922 }
1923
1924 ret = psp_ta_load(psp, &psp->hdcp_context.context);
1925 if (!ret) {
1926 psp->hdcp_context.context.initialized = true;
1927 mutex_init(&psp->hdcp_context.mutex);
1928 }
1929
1930 return ret;
1931 }
1932
psp_hdcp_invoke(struct psp_context * psp,uint32_t ta_cmd_id)1933 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1934 {
1935 /*
1936 * TODO: bypass the loading in sriov for now
1937 */
1938 if (amdgpu_sriov_vf(psp->adev))
1939 return 0;
1940
1941 if (!psp->hdcp_context.context.initialized)
1942 return 0;
1943
1944 return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
1945 }
1946
psp_hdcp_terminate(struct psp_context * psp)1947 static int psp_hdcp_terminate(struct psp_context *psp)
1948 {
1949 int ret;
1950
1951 /*
1952 * TODO: bypass the terminate in sriov for now
1953 */
1954 if (amdgpu_sriov_vf(psp->adev))
1955 return 0;
1956
1957 if (!psp->hdcp_context.context.initialized)
1958 return 0;
1959
1960 ret = psp_ta_unload(psp, &psp->hdcp_context.context);
1961
1962 psp->hdcp_context.context.initialized = false;
1963
1964 return ret;
1965 }
1966 // HDCP end
1967
1968 // DTM start
psp_dtm_initialize(struct psp_context * psp)1969 static int psp_dtm_initialize(struct psp_context *psp)
1970 {
1971 int ret;
1972
1973 /*
1974 * TODO: bypass the initialize in sriov for now
1975 */
1976 if (amdgpu_sriov_vf(psp->adev))
1977 return 0;
1978
1979 /* bypass dtm initialization if dmu is harvested */
1980 if (!amdgpu_device_has_display_hardware(psp->adev))
1981 return 0;
1982
1983 if (!psp->dtm_context.context.bin_desc.size_bytes ||
1984 !psp->dtm_context.context.bin_desc.start_addr) {
1985 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
1986 return 0;
1987 }
1988
1989 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
1990 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1991
1992 if (!psp->dtm_context.context.mem_context.shared_buf) {
1993 ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
1994 if (ret)
1995 return ret;
1996 }
1997
1998 ret = psp_ta_load(psp, &psp->dtm_context.context);
1999 if (!ret) {
2000 psp->dtm_context.context.initialized = true;
2001 mutex_init(&psp->dtm_context.mutex);
2002 }
2003
2004 return ret;
2005 }
2006
psp_dtm_invoke(struct psp_context * psp,uint32_t ta_cmd_id)2007 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2008 {
2009 /*
2010 * TODO: bypass the loading in sriov for now
2011 */
2012 if (amdgpu_sriov_vf(psp->adev))
2013 return 0;
2014
2015 if (!psp->dtm_context.context.initialized)
2016 return 0;
2017
2018 return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
2019 }
2020
psp_dtm_terminate(struct psp_context * psp)2021 static int psp_dtm_terminate(struct psp_context *psp)
2022 {
2023 int ret;
2024
2025 /*
2026 * TODO: bypass the terminate in sriov for now
2027 */
2028 if (amdgpu_sriov_vf(psp->adev))
2029 return 0;
2030
2031 if (!psp->dtm_context.context.initialized)
2032 return 0;
2033
2034 ret = psp_ta_unload(psp, &psp->dtm_context.context);
2035
2036 psp->dtm_context.context.initialized = false;
2037
2038 return ret;
2039 }
2040 // DTM end
2041
2042 // RAP start
psp_rap_initialize(struct psp_context * psp)2043 static int psp_rap_initialize(struct psp_context *psp)
2044 {
2045 int ret;
2046 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
2047
2048 /*
2049 * TODO: bypass the initialize in sriov for now
2050 */
2051 if (amdgpu_sriov_vf(psp->adev))
2052 return 0;
2053
2054 if (!psp->rap_context.context.bin_desc.size_bytes ||
2055 !psp->rap_context.context.bin_desc.start_addr) {
2056 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
2057 return 0;
2058 }
2059
2060 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
2061 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2062
2063 if (!psp->rap_context.context.mem_context.shared_buf) {
2064 ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
2065 if (ret)
2066 return ret;
2067 }
2068
2069 ret = psp_ta_load(psp, &psp->rap_context.context);
2070 if (!ret) {
2071 psp->rap_context.context.initialized = true;
2072 mutex_init(&psp->rap_context.mutex);
2073 } else
2074 return ret;
2075
2076 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
2077 if (ret || status != TA_RAP_STATUS__SUCCESS) {
2078 psp_rap_terminate(psp);
2079 /* free rap shared memory */
2080 psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
2081
2082 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
2083 ret, status);
2084
2085 return ret;
2086 }
2087
2088 return 0;
2089 }
2090
psp_rap_terminate(struct psp_context * psp)2091 static int psp_rap_terminate(struct psp_context *psp)
2092 {
2093 int ret;
2094
2095 if (!psp->rap_context.context.initialized)
2096 return 0;
2097
2098 ret = psp_ta_unload(psp, &psp->rap_context.context);
2099
2100 psp->rap_context.context.initialized = false;
2101
2102 return ret;
2103 }
2104
psp_rap_invoke(struct psp_context * psp,uint32_t ta_cmd_id,enum ta_rap_status * status)2105 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
2106 {
2107 struct ta_rap_shared_memory *rap_cmd;
2108 int ret = 0;
2109
2110 if (!psp->rap_context.context.initialized)
2111 return 0;
2112
2113 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
2114 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
2115 return -EINVAL;
2116
2117 mutex_lock(&psp->rap_context.mutex);
2118
2119 rap_cmd = (struct ta_rap_shared_memory *)
2120 psp->rap_context.context.mem_context.shared_buf;
2121 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
2122
2123 rap_cmd->cmd_id = ta_cmd_id;
2124 rap_cmd->validation_method_id = METHOD_A;
2125
2126 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
2127 if (ret)
2128 goto out_unlock;
2129
2130 if (status)
2131 *status = rap_cmd->rap_status;
2132
2133 out_unlock:
2134 mutex_unlock(&psp->rap_context.mutex);
2135
2136 return ret;
2137 }
2138 // RAP end
2139
2140 /* securedisplay start */
psp_securedisplay_initialize(struct psp_context * psp)2141 static int psp_securedisplay_initialize(struct psp_context *psp)
2142 {
2143 int ret;
2144 struct ta_securedisplay_cmd *securedisplay_cmd;
2145
2146 /*
2147 * TODO: bypass the initialize in sriov for now
2148 */
2149 if (amdgpu_sriov_vf(psp->adev))
2150 return 0;
2151
2152 /* bypass securedisplay initialization if dmu is harvested */
2153 if (!amdgpu_device_has_display_hardware(psp->adev))
2154 return 0;
2155
2156 if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2157 !psp->securedisplay_context.context.bin_desc.start_addr) {
2158 dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
2159 return 0;
2160 }
2161
2162 psp->securedisplay_context.context.mem_context.shared_mem_size =
2163 PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2164 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2165
2166 if (!psp->securedisplay_context.context.initialized) {
2167 ret = psp_ta_init_shared_buf(psp,
2168 &psp->securedisplay_context.context.mem_context);
2169 if (ret)
2170 return ret;
2171 }
2172
2173 ret = psp_ta_load(psp, &psp->securedisplay_context.context);
2174 if (!ret) {
2175 psp->securedisplay_context.context.initialized = true;
2176 mutex_init(&psp->securedisplay_context.mutex);
2177 } else
2178 return ret;
2179
2180 mutex_lock(&psp->securedisplay_context.mutex);
2181
2182 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
2183 TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2184
2185 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2186
2187 mutex_unlock(&psp->securedisplay_context.mutex);
2188
2189 if (ret) {
2190 psp_securedisplay_terminate(psp);
2191 /* free securedisplay shared memory */
2192 psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
2193 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2194 return -EINVAL;
2195 }
2196
2197 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2198 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
2199 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2200 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2201 /* don't try again */
2202 psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2203 }
2204
2205 return 0;
2206 }
2207
psp_securedisplay_terminate(struct psp_context * psp)2208 static int psp_securedisplay_terminate(struct psp_context *psp)
2209 {
2210 int ret;
2211
2212 /*
2213 * TODO:bypass the terminate in sriov for now
2214 */
2215 if (amdgpu_sriov_vf(psp->adev))
2216 return 0;
2217
2218 if (!psp->securedisplay_context.context.initialized)
2219 return 0;
2220
2221 ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
2222
2223 psp->securedisplay_context.context.initialized = false;
2224
2225 return ret;
2226 }
2227
psp_securedisplay_invoke(struct psp_context * psp,uint32_t ta_cmd_id)2228 int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2229 {
2230 int ret;
2231
2232 if (!psp->securedisplay_context.context.initialized)
2233 return -EINVAL;
2234
2235 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2236 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
2237 return -EINVAL;
2238
2239 ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
2240
2241 return ret;
2242 }
2243 /* SECUREDISPLAY end */
2244
amdgpu_psp_wait_for_bootloader(struct amdgpu_device * adev)2245 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2246 {
2247 struct psp_context *psp = &adev->psp;
2248 int ret = 0;
2249
2250 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2251 ret = psp->funcs->wait_for_bootloader(psp);
2252
2253 return ret;
2254 }
2255
amdgpu_psp_get_ras_capability(struct psp_context * psp)2256 bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
2257 {
2258 if (psp->funcs &&
2259 psp->funcs->get_ras_capability) {
2260 return psp->funcs->get_ras_capability(psp);
2261 } else {
2262 return false;
2263 }
2264 }
2265
psp_hw_start(struct psp_context * psp)2266 static int psp_hw_start(struct psp_context *psp)
2267 {
2268 struct amdgpu_device *adev = psp->adev;
2269 int ret;
2270
2271 if (!amdgpu_sriov_vf(adev)) {
2272 if ((is_psp_fw_valid(psp->kdb)) &&
2273 (psp->funcs->bootloader_load_kdb != NULL)) {
2274 ret = psp_bootloader_load_kdb(psp);
2275 if (ret) {
2276 dev_err(adev->dev, "PSP load kdb failed!\n");
2277 return ret;
2278 }
2279 }
2280
2281 if ((is_psp_fw_valid(psp->spl)) &&
2282 (psp->funcs->bootloader_load_spl != NULL)) {
2283 ret = psp_bootloader_load_spl(psp);
2284 if (ret) {
2285 dev_err(adev->dev, "PSP load spl failed!\n");
2286 return ret;
2287 }
2288 }
2289
2290 if ((is_psp_fw_valid(psp->sys)) &&
2291 (psp->funcs->bootloader_load_sysdrv != NULL)) {
2292 ret = psp_bootloader_load_sysdrv(psp);
2293 if (ret) {
2294 dev_err(adev->dev, "PSP load sys drv failed!\n");
2295 return ret;
2296 }
2297 }
2298
2299 if ((is_psp_fw_valid(psp->soc_drv)) &&
2300 (psp->funcs->bootloader_load_soc_drv != NULL)) {
2301 ret = psp_bootloader_load_soc_drv(psp);
2302 if (ret) {
2303 dev_err(adev->dev, "PSP load soc drv failed!\n");
2304 return ret;
2305 }
2306 }
2307
2308 if ((is_psp_fw_valid(psp->intf_drv)) &&
2309 (psp->funcs->bootloader_load_intf_drv != NULL)) {
2310 ret = psp_bootloader_load_intf_drv(psp);
2311 if (ret) {
2312 dev_err(adev->dev, "PSP load intf drv failed!\n");
2313 return ret;
2314 }
2315 }
2316
2317 if ((is_psp_fw_valid(psp->dbg_drv)) &&
2318 (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2319 ret = psp_bootloader_load_dbg_drv(psp);
2320 if (ret) {
2321 dev_err(adev->dev, "PSP load dbg drv failed!\n");
2322 return ret;
2323 }
2324 }
2325
2326 if ((is_psp_fw_valid(psp->ras_drv)) &&
2327 (psp->funcs->bootloader_load_ras_drv != NULL)) {
2328 ret = psp_bootloader_load_ras_drv(psp);
2329 if (ret) {
2330 dev_err(adev->dev, "PSP load ras_drv failed!\n");
2331 return ret;
2332 }
2333 }
2334
2335 if ((is_psp_fw_valid(psp->ipkeymgr_drv)) &&
2336 (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) {
2337 ret = psp_bootloader_load_ipkeymgr_drv(psp);
2338 if (ret) {
2339 dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n");
2340 return ret;
2341 }
2342 }
2343
2344 if ((is_psp_fw_valid(psp->sos)) &&
2345 (psp->funcs->bootloader_load_sos != NULL)) {
2346 ret = psp_bootloader_load_sos(psp);
2347 if (ret) {
2348 dev_err(adev->dev, "PSP load sos failed!\n");
2349 return ret;
2350 }
2351 }
2352 }
2353
2354 ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2355 if (ret) {
2356 dev_err(adev->dev, "PSP create ring failed!\n");
2357 return ret;
2358 }
2359
2360 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2361 goto skip_pin_bo;
2362
2363 if (!psp->boot_time_tmr || psp->autoload_supported) {
2364 ret = psp_tmr_init(psp);
2365 if (ret) {
2366 dev_err(adev->dev, "PSP tmr init failed!\n");
2367 return ret;
2368 }
2369 }
2370
2371 skip_pin_bo:
2372 /*
2373 * For ASICs with DF Cstate management centralized
2374 * to PMFW, TMR setup should be performed after PMFW
2375 * loaded and before other non-psp firmware loaded.
2376 */
2377 if (psp->pmfw_centralized_cstate_management) {
2378 ret = psp_load_smu_fw(psp);
2379 if (ret)
2380 return ret;
2381 }
2382
2383 if (!psp->boot_time_tmr || !psp->autoload_supported) {
2384 ret = psp_tmr_load(psp);
2385 if (ret) {
2386 dev_err(adev->dev, "PSP load tmr failed!\n");
2387 return ret;
2388 }
2389 }
2390
2391 return 0;
2392 }
2393
psp_get_fw_type(struct amdgpu_firmware_info * ucode,enum psp_gfx_fw_type * type)2394 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2395 enum psp_gfx_fw_type *type)
2396 {
2397 switch (ucode->ucode_id) {
2398 case AMDGPU_UCODE_ID_CAP:
2399 *type = GFX_FW_TYPE_CAP;
2400 break;
2401 case AMDGPU_UCODE_ID_SDMA0:
2402 *type = GFX_FW_TYPE_SDMA0;
2403 break;
2404 case AMDGPU_UCODE_ID_SDMA1:
2405 *type = GFX_FW_TYPE_SDMA1;
2406 break;
2407 case AMDGPU_UCODE_ID_SDMA2:
2408 *type = GFX_FW_TYPE_SDMA2;
2409 break;
2410 case AMDGPU_UCODE_ID_SDMA3:
2411 *type = GFX_FW_TYPE_SDMA3;
2412 break;
2413 case AMDGPU_UCODE_ID_SDMA4:
2414 *type = GFX_FW_TYPE_SDMA4;
2415 break;
2416 case AMDGPU_UCODE_ID_SDMA5:
2417 *type = GFX_FW_TYPE_SDMA5;
2418 break;
2419 case AMDGPU_UCODE_ID_SDMA6:
2420 *type = GFX_FW_TYPE_SDMA6;
2421 break;
2422 case AMDGPU_UCODE_ID_SDMA7:
2423 *type = GFX_FW_TYPE_SDMA7;
2424 break;
2425 case AMDGPU_UCODE_ID_CP_MES:
2426 *type = GFX_FW_TYPE_CP_MES;
2427 break;
2428 case AMDGPU_UCODE_ID_CP_MES_DATA:
2429 *type = GFX_FW_TYPE_MES_STACK;
2430 break;
2431 case AMDGPU_UCODE_ID_CP_MES1:
2432 *type = GFX_FW_TYPE_CP_MES_KIQ;
2433 break;
2434 case AMDGPU_UCODE_ID_CP_MES1_DATA:
2435 *type = GFX_FW_TYPE_MES_KIQ_STACK;
2436 break;
2437 case AMDGPU_UCODE_ID_CP_CE:
2438 *type = GFX_FW_TYPE_CP_CE;
2439 break;
2440 case AMDGPU_UCODE_ID_CP_PFP:
2441 *type = GFX_FW_TYPE_CP_PFP;
2442 break;
2443 case AMDGPU_UCODE_ID_CP_ME:
2444 *type = GFX_FW_TYPE_CP_ME;
2445 break;
2446 case AMDGPU_UCODE_ID_CP_MEC1:
2447 *type = GFX_FW_TYPE_CP_MEC;
2448 break;
2449 case AMDGPU_UCODE_ID_CP_MEC1_JT:
2450 *type = GFX_FW_TYPE_CP_MEC_ME1;
2451 break;
2452 case AMDGPU_UCODE_ID_CP_MEC2:
2453 *type = GFX_FW_TYPE_CP_MEC;
2454 break;
2455 case AMDGPU_UCODE_ID_CP_MEC2_JT:
2456 *type = GFX_FW_TYPE_CP_MEC_ME2;
2457 break;
2458 case AMDGPU_UCODE_ID_RLC_P:
2459 *type = GFX_FW_TYPE_RLC_P;
2460 break;
2461 case AMDGPU_UCODE_ID_RLC_V:
2462 *type = GFX_FW_TYPE_RLC_V;
2463 break;
2464 case AMDGPU_UCODE_ID_RLC_G:
2465 *type = GFX_FW_TYPE_RLC_G;
2466 break;
2467 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2468 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2469 break;
2470 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2471 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2472 break;
2473 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2474 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2475 break;
2476 case AMDGPU_UCODE_ID_RLC_IRAM:
2477 *type = GFX_FW_TYPE_RLC_IRAM;
2478 break;
2479 case AMDGPU_UCODE_ID_RLC_DRAM:
2480 *type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2481 break;
2482 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2483 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2484 break;
2485 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2486 *type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2487 break;
2488 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2489 *type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2490 break;
2491 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2492 *type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2493 break;
2494 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2495 *type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2496 break;
2497 case AMDGPU_UCODE_ID_SMC:
2498 *type = GFX_FW_TYPE_SMU;
2499 break;
2500 case AMDGPU_UCODE_ID_PPTABLE:
2501 *type = GFX_FW_TYPE_PPTABLE;
2502 break;
2503 case AMDGPU_UCODE_ID_UVD:
2504 *type = GFX_FW_TYPE_UVD;
2505 break;
2506 case AMDGPU_UCODE_ID_UVD1:
2507 *type = GFX_FW_TYPE_UVD1;
2508 break;
2509 case AMDGPU_UCODE_ID_VCE:
2510 *type = GFX_FW_TYPE_VCE;
2511 break;
2512 case AMDGPU_UCODE_ID_VCN:
2513 *type = GFX_FW_TYPE_VCN;
2514 break;
2515 case AMDGPU_UCODE_ID_VCN1:
2516 *type = GFX_FW_TYPE_VCN1;
2517 break;
2518 case AMDGPU_UCODE_ID_DMCU_ERAM:
2519 *type = GFX_FW_TYPE_DMCU_ERAM;
2520 break;
2521 case AMDGPU_UCODE_ID_DMCU_INTV:
2522 *type = GFX_FW_TYPE_DMCU_ISR;
2523 break;
2524 case AMDGPU_UCODE_ID_VCN0_RAM:
2525 *type = GFX_FW_TYPE_VCN0_RAM;
2526 break;
2527 case AMDGPU_UCODE_ID_VCN1_RAM:
2528 *type = GFX_FW_TYPE_VCN1_RAM;
2529 break;
2530 case AMDGPU_UCODE_ID_DMCUB:
2531 *type = GFX_FW_TYPE_DMUB;
2532 break;
2533 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2534 case AMDGPU_UCODE_ID_SDMA_RS64:
2535 *type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2536 break;
2537 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2538 *type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2539 break;
2540 case AMDGPU_UCODE_ID_IMU_I:
2541 *type = GFX_FW_TYPE_IMU_I;
2542 break;
2543 case AMDGPU_UCODE_ID_IMU_D:
2544 *type = GFX_FW_TYPE_IMU_D;
2545 break;
2546 case AMDGPU_UCODE_ID_CP_RS64_PFP:
2547 *type = GFX_FW_TYPE_RS64_PFP;
2548 break;
2549 case AMDGPU_UCODE_ID_CP_RS64_ME:
2550 *type = GFX_FW_TYPE_RS64_ME;
2551 break;
2552 case AMDGPU_UCODE_ID_CP_RS64_MEC:
2553 *type = GFX_FW_TYPE_RS64_MEC;
2554 break;
2555 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2556 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2557 break;
2558 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2559 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2560 break;
2561 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2562 *type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2563 break;
2564 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2565 *type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2566 break;
2567 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2568 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2569 break;
2570 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2571 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2572 break;
2573 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2574 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2575 break;
2576 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2577 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2578 break;
2579 case AMDGPU_UCODE_ID_VPE_CTX:
2580 *type = GFX_FW_TYPE_VPEC_FW1;
2581 break;
2582 case AMDGPU_UCODE_ID_VPE_CTL:
2583 *type = GFX_FW_TYPE_VPEC_FW2;
2584 break;
2585 case AMDGPU_UCODE_ID_VPE:
2586 *type = GFX_FW_TYPE_VPE;
2587 break;
2588 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2589 *type = GFX_FW_TYPE_UMSCH_UCODE;
2590 break;
2591 case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2592 *type = GFX_FW_TYPE_UMSCH_DATA;
2593 break;
2594 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2595 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2596 break;
2597 case AMDGPU_UCODE_ID_P2S_TABLE:
2598 *type = GFX_FW_TYPE_P2S_TABLE;
2599 break;
2600 case AMDGPU_UCODE_ID_JPEG_RAM:
2601 *type = GFX_FW_TYPE_JPEG_RAM;
2602 break;
2603 case AMDGPU_UCODE_ID_ISP:
2604 *type = GFX_FW_TYPE_ISP;
2605 break;
2606 case AMDGPU_UCODE_ID_MAXIMUM:
2607 default:
2608 return -EINVAL;
2609 }
2610
2611 return 0;
2612 }
2613
psp_print_fw_hdr(struct psp_context * psp,struct amdgpu_firmware_info * ucode)2614 static void psp_print_fw_hdr(struct psp_context *psp,
2615 struct amdgpu_firmware_info *ucode)
2616 {
2617 struct amdgpu_device *adev = psp->adev;
2618 struct common_firmware_header *hdr;
2619
2620 switch (ucode->ucode_id) {
2621 case AMDGPU_UCODE_ID_SDMA0:
2622 case AMDGPU_UCODE_ID_SDMA1:
2623 case AMDGPU_UCODE_ID_SDMA2:
2624 case AMDGPU_UCODE_ID_SDMA3:
2625 case AMDGPU_UCODE_ID_SDMA4:
2626 case AMDGPU_UCODE_ID_SDMA5:
2627 case AMDGPU_UCODE_ID_SDMA6:
2628 case AMDGPU_UCODE_ID_SDMA7:
2629 hdr = (struct common_firmware_header *)
2630 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2631 amdgpu_ucode_print_sdma_hdr(hdr);
2632 break;
2633 case AMDGPU_UCODE_ID_CP_CE:
2634 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2635 amdgpu_ucode_print_gfx_hdr(hdr);
2636 break;
2637 case AMDGPU_UCODE_ID_CP_PFP:
2638 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2639 amdgpu_ucode_print_gfx_hdr(hdr);
2640 break;
2641 case AMDGPU_UCODE_ID_CP_ME:
2642 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2643 amdgpu_ucode_print_gfx_hdr(hdr);
2644 break;
2645 case AMDGPU_UCODE_ID_CP_MEC1:
2646 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2647 amdgpu_ucode_print_gfx_hdr(hdr);
2648 break;
2649 case AMDGPU_UCODE_ID_RLC_G:
2650 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2651 amdgpu_ucode_print_rlc_hdr(hdr);
2652 break;
2653 case AMDGPU_UCODE_ID_SMC:
2654 hdr = (struct common_firmware_header *)adev->pm.fw->data;
2655 amdgpu_ucode_print_smc_hdr(hdr);
2656 break;
2657 default:
2658 break;
2659 }
2660 }
2661
psp_prep_load_ip_fw_cmd_buf(struct psp_context * psp,struct amdgpu_firmware_info * ucode,struct psp_gfx_cmd_resp * cmd)2662 static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
2663 struct amdgpu_firmware_info *ucode,
2664 struct psp_gfx_cmd_resp *cmd)
2665 {
2666 int ret;
2667 uint64_t fw_mem_mc_addr = ucode->mc_addr;
2668
2669 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2670 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2671 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2672 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2673
2674 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
2675 if (ret)
2676 dev_err(psp->adev->dev, "Unknown firmware type\n");
2677
2678 return ret;
2679 }
2680
psp_execute_ip_fw_load(struct psp_context * psp,struct amdgpu_firmware_info * ucode)2681 int psp_execute_ip_fw_load(struct psp_context *psp,
2682 struct amdgpu_firmware_info *ucode)
2683 {
2684 int ret = 0;
2685 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2686
2687 ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
2688 if (!ret) {
2689 ret = psp_cmd_submit_buf(psp, ucode, cmd,
2690 psp->fence_buf_mc_addr);
2691 }
2692
2693 release_psp_cmd_buf(psp);
2694
2695 return ret;
2696 }
2697
psp_load_p2s_table(struct psp_context * psp)2698 static int psp_load_p2s_table(struct psp_context *psp)
2699 {
2700 int ret;
2701 struct amdgpu_device *adev = psp->adev;
2702 struct amdgpu_firmware_info *ucode =
2703 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2704
2705 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2706 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2707 return 0;
2708
2709 if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
2710 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
2711 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2712 0x0036003C;
2713 if (psp->sos.fw_version < supp_vers)
2714 return 0;
2715 }
2716
2717 if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2718 return 0;
2719
2720 ret = psp_execute_ip_fw_load(psp, ucode);
2721
2722 return ret;
2723 }
2724
psp_load_smu_fw(struct psp_context * psp)2725 static int psp_load_smu_fw(struct psp_context *psp)
2726 {
2727 int ret;
2728 struct amdgpu_device *adev = psp->adev;
2729 struct amdgpu_firmware_info *ucode =
2730 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2731 struct amdgpu_ras *ras = psp->ras_context.ras;
2732
2733 /*
2734 * Skip SMU FW reloading in case of using BACO for runpm only,
2735 * as SMU is always alive.
2736 */
2737 if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2738 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
2739 return 0;
2740
2741 if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2742 return 0;
2743
2744 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2745 (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
2746 amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
2747 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
2748 if (ret)
2749 dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
2750 }
2751
2752 ret = psp_execute_ip_fw_load(psp, ucode);
2753
2754 if (ret)
2755 dev_err(adev->dev, "PSP load smu failed!\n");
2756
2757 return ret;
2758 }
2759
fw_load_skip_check(struct psp_context * psp,struct amdgpu_firmware_info * ucode)2760 static bool fw_load_skip_check(struct psp_context *psp,
2761 struct amdgpu_firmware_info *ucode)
2762 {
2763 if (!ucode->fw || !ucode->ucode_size)
2764 return true;
2765
2766 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
2767 return true;
2768
2769 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2770 (psp_smu_reload_quirk(psp) ||
2771 psp->autoload_supported ||
2772 psp->pmfw_centralized_cstate_management))
2773 return true;
2774
2775 if (amdgpu_sriov_vf(psp->adev) &&
2776 amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
2777 return true;
2778
2779 if (psp->autoload_supported &&
2780 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
2781 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
2782 /* skip mec JT when autoload is enabled */
2783 return true;
2784
2785 return false;
2786 }
2787
psp_load_fw_list(struct psp_context * psp,struct amdgpu_firmware_info ** ucode_list,int ucode_count)2788 int psp_load_fw_list(struct psp_context *psp,
2789 struct amdgpu_firmware_info **ucode_list, int ucode_count)
2790 {
2791 int ret = 0, i;
2792 struct amdgpu_firmware_info *ucode;
2793
2794 for (i = 0; i < ucode_count; ++i) {
2795 ucode = ucode_list[i];
2796 psp_print_fw_hdr(psp, ucode);
2797 ret = psp_execute_ip_fw_load(psp, ucode);
2798 if (ret)
2799 return ret;
2800 }
2801 return ret;
2802 }
2803
psp_load_non_psp_fw(struct psp_context * psp)2804 static int psp_load_non_psp_fw(struct psp_context *psp)
2805 {
2806 int i, ret;
2807 struct amdgpu_firmware_info *ucode;
2808 struct amdgpu_device *adev = psp->adev;
2809
2810 if (psp->autoload_supported &&
2811 !psp->pmfw_centralized_cstate_management) {
2812 ret = psp_load_smu_fw(psp);
2813 if (ret)
2814 return ret;
2815 }
2816
2817 /* Load P2S table first if it's available */
2818 psp_load_p2s_table(psp);
2819
2820 for (i = 0; i < adev->firmware.max_ucodes; i++) {
2821 ucode = &adev->firmware.ucode[i];
2822
2823 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2824 !fw_load_skip_check(psp, ucode)) {
2825 ret = psp_load_smu_fw(psp);
2826 if (ret)
2827 return ret;
2828 continue;
2829 }
2830
2831 if (fw_load_skip_check(psp, ucode))
2832 continue;
2833
2834 if (psp->autoload_supported &&
2835 (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2836 IP_VERSION(11, 0, 7) ||
2837 amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2838 IP_VERSION(11, 0, 11) ||
2839 amdgpu_ip_version(adev, MP0_HWIP, 0) ==
2840 IP_VERSION(11, 0, 12)) &&
2841 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
2842 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
2843 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
2844 /* PSP only receive one SDMA fw for sienna_cichlid,
2845 * as all four sdma fw are same
2846 */
2847 continue;
2848
2849 psp_print_fw_hdr(psp, ucode);
2850
2851 ret = psp_execute_ip_fw_load(psp, ucode);
2852 if (ret)
2853 return ret;
2854
2855 /* Start rlc autoload after psp received all the gfx firmware */
2856 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
2857 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
2858 ret = psp_rlc_autoload_start(psp);
2859 if (ret) {
2860 dev_err(adev->dev, "Failed to start rlc autoload\n");
2861 return ret;
2862 }
2863 }
2864 }
2865
2866 return 0;
2867 }
2868
psp_load_fw(struct amdgpu_device * adev)2869 static int psp_load_fw(struct amdgpu_device *adev)
2870 {
2871 int ret;
2872 struct psp_context *psp = &adev->psp;
2873
2874 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2875 /* should not destroy ring, only stop */
2876 psp_ring_stop(psp, PSP_RING_TYPE__KM);
2877 } else {
2878 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
2879
2880 ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
2881 if (ret) {
2882 dev_err(adev->dev, "PSP ring init failed!\n");
2883 goto failed;
2884 }
2885 }
2886
2887 ret = psp_hw_start(psp);
2888 if (ret)
2889 goto failed;
2890
2891 ret = psp_load_non_psp_fw(psp);
2892 if (ret)
2893 goto failed1;
2894
2895 ret = psp_asd_initialize(psp);
2896 if (ret) {
2897 dev_err(adev->dev, "PSP load asd failed!\n");
2898 goto failed1;
2899 }
2900
2901 ret = psp_rl_load(adev);
2902 if (ret) {
2903 dev_err(adev->dev, "PSP load RL failed!\n");
2904 goto failed1;
2905 }
2906
2907 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2908 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2909 ret = psp_xgmi_initialize(psp, false, true);
2910 /* Warning the XGMI seesion initialize failure
2911 * Instead of stop driver initialization
2912 */
2913 if (ret)
2914 dev_err(psp->adev->dev,
2915 "XGMI: Failed to initialize XGMI session\n");
2916 }
2917 }
2918
2919 if (psp->ta_fw) {
2920 ret = psp_ras_initialize(psp);
2921 if (ret)
2922 dev_err(psp->adev->dev,
2923 "RAS: Failed to initialize RAS\n");
2924
2925 ret = psp_hdcp_initialize(psp);
2926 if (ret)
2927 dev_err(psp->adev->dev,
2928 "HDCP: Failed to initialize HDCP\n");
2929
2930 ret = psp_dtm_initialize(psp);
2931 if (ret)
2932 dev_err(psp->adev->dev,
2933 "DTM: Failed to initialize DTM\n");
2934
2935 ret = psp_rap_initialize(psp);
2936 if (ret)
2937 dev_err(psp->adev->dev,
2938 "RAP: Failed to initialize RAP\n");
2939
2940 ret = psp_securedisplay_initialize(psp);
2941 if (ret)
2942 dev_err(psp->adev->dev,
2943 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
2944 }
2945
2946 return 0;
2947
2948 failed1:
2949 psp_free_shared_bufs(psp);
2950 failed:
2951 /*
2952 * all cleanup jobs (xgmi terminate, ras terminate,
2953 * ring destroy, cmd/fence/fw buffers destory,
2954 * psp->cmd destory) are delayed to psp_hw_fini
2955 */
2956 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
2957 return ret;
2958 }
2959
psp_hw_init(void * handle)2960 static int psp_hw_init(void *handle)
2961 {
2962 int ret;
2963 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2964
2965 mutex_lock(&adev->firmware.mutex);
2966 /*
2967 * This sequence is just used on hw_init only once, no need on
2968 * resume.
2969 */
2970 ret = amdgpu_ucode_init_bo(adev);
2971 if (ret)
2972 goto failed;
2973
2974 ret = psp_load_fw(adev);
2975 if (ret) {
2976 dev_err(adev->dev, "PSP firmware loading failed\n");
2977 goto failed;
2978 }
2979
2980 mutex_unlock(&adev->firmware.mutex);
2981 return 0;
2982
2983 failed:
2984 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
2985 mutex_unlock(&adev->firmware.mutex);
2986 return -EINVAL;
2987 }
2988
psp_hw_fini(void * handle)2989 static int psp_hw_fini(void *handle)
2990 {
2991 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2992 struct psp_context *psp = &adev->psp;
2993
2994 if (psp->ta_fw) {
2995 psp_ras_terminate(psp);
2996 psp_securedisplay_terminate(psp);
2997 psp_rap_terminate(psp);
2998 psp_dtm_terminate(psp);
2999 psp_hdcp_terminate(psp);
3000
3001 if (adev->gmc.xgmi.num_physical_nodes > 1)
3002 psp_xgmi_terminate(psp);
3003 }
3004
3005 psp_asd_terminate(psp);
3006 psp_tmr_terminate(psp);
3007
3008 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
3009
3010 return 0;
3011 }
3012
psp_suspend(void * handle)3013 static int psp_suspend(void *handle)
3014 {
3015 int ret = 0;
3016 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3017 struct psp_context *psp = &adev->psp;
3018
3019 if (adev->gmc.xgmi.num_physical_nodes > 1 &&
3020 psp->xgmi_context.context.initialized) {
3021 ret = psp_xgmi_terminate(psp);
3022 if (ret) {
3023 dev_err(adev->dev, "Failed to terminate xgmi ta\n");
3024 goto out;
3025 }
3026 }
3027
3028 if (psp->ta_fw) {
3029 ret = psp_ras_terminate(psp);
3030 if (ret) {
3031 dev_err(adev->dev, "Failed to terminate ras ta\n");
3032 goto out;
3033 }
3034 ret = psp_hdcp_terminate(psp);
3035 if (ret) {
3036 dev_err(adev->dev, "Failed to terminate hdcp ta\n");
3037 goto out;
3038 }
3039 ret = psp_dtm_terminate(psp);
3040 if (ret) {
3041 dev_err(adev->dev, "Failed to terminate dtm ta\n");
3042 goto out;
3043 }
3044 ret = psp_rap_terminate(psp);
3045 if (ret) {
3046 dev_err(adev->dev, "Failed to terminate rap ta\n");
3047 goto out;
3048 }
3049 ret = psp_securedisplay_terminate(psp);
3050 if (ret) {
3051 dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
3052 goto out;
3053 }
3054 }
3055
3056 ret = psp_asd_terminate(psp);
3057 if (ret) {
3058 dev_err(adev->dev, "Failed to terminate asd\n");
3059 goto out;
3060 }
3061
3062 ret = psp_tmr_terminate(psp);
3063 if (ret) {
3064 dev_err(adev->dev, "Failed to terminate tmr\n");
3065 goto out;
3066 }
3067
3068 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
3069 if (ret)
3070 dev_err(adev->dev, "PSP ring stop failed\n");
3071
3072 out:
3073 return ret;
3074 }
3075
psp_resume(void * handle)3076 static int psp_resume(void *handle)
3077 {
3078 int ret;
3079 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3080 struct psp_context *psp = &adev->psp;
3081
3082 dev_info(adev->dev, "PSP is resuming...\n");
3083
3084 if (psp->mem_train_ctx.enable_mem_training) {
3085 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
3086 if (ret) {
3087 dev_err(adev->dev, "Failed to process memory training!\n");
3088 return ret;
3089 }
3090 }
3091
3092 mutex_lock(&adev->firmware.mutex);
3093
3094 ret = psp_hw_start(psp);
3095 if (ret)
3096 goto failed;
3097
3098 ret = psp_load_non_psp_fw(psp);
3099 if (ret)
3100 goto failed;
3101
3102 ret = psp_asd_initialize(psp);
3103 if (ret) {
3104 dev_err(adev->dev, "PSP load asd failed!\n");
3105 goto failed;
3106 }
3107
3108 ret = psp_rl_load(adev);
3109 if (ret) {
3110 dev_err(adev->dev, "PSP load RL failed!\n");
3111 goto failed;
3112 }
3113
3114 if (adev->gmc.xgmi.num_physical_nodes > 1) {
3115 ret = psp_xgmi_initialize(psp, false, true);
3116 /* Warning the XGMI seesion initialize failure
3117 * Instead of stop driver initialization
3118 */
3119 if (ret)
3120 dev_err(psp->adev->dev,
3121 "XGMI: Failed to initialize XGMI session\n");
3122 }
3123
3124 if (psp->ta_fw) {
3125 ret = psp_ras_initialize(psp);
3126 if (ret)
3127 dev_err(psp->adev->dev,
3128 "RAS: Failed to initialize RAS\n");
3129
3130 ret = psp_hdcp_initialize(psp);
3131 if (ret)
3132 dev_err(psp->adev->dev,
3133 "HDCP: Failed to initialize HDCP\n");
3134
3135 ret = psp_dtm_initialize(psp);
3136 if (ret)
3137 dev_err(psp->adev->dev,
3138 "DTM: Failed to initialize DTM\n");
3139
3140 ret = psp_rap_initialize(psp);
3141 if (ret)
3142 dev_err(psp->adev->dev,
3143 "RAP: Failed to initialize RAP\n");
3144
3145 ret = psp_securedisplay_initialize(psp);
3146 if (ret)
3147 dev_err(psp->adev->dev,
3148 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
3149 }
3150
3151 mutex_unlock(&adev->firmware.mutex);
3152
3153 return 0;
3154
3155 failed:
3156 dev_err(adev->dev, "PSP resume failed\n");
3157 mutex_unlock(&adev->firmware.mutex);
3158 return ret;
3159 }
3160
psp_gpu_reset(struct amdgpu_device * adev)3161 int psp_gpu_reset(struct amdgpu_device *adev)
3162 {
3163 int ret;
3164
3165 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3166 return 0;
3167
3168 mutex_lock(&adev->psp.mutex);
3169 ret = psp_mode1_reset(&adev->psp);
3170 mutex_unlock(&adev->psp.mutex);
3171
3172 return ret;
3173 }
3174
psp_rlc_autoload_start(struct psp_context * psp)3175 int psp_rlc_autoload_start(struct psp_context *psp)
3176 {
3177 int ret;
3178 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3179
3180 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3181
3182 ret = psp_cmd_submit_buf(psp, NULL, cmd,
3183 psp->fence_buf_mc_addr);
3184
3185 release_psp_cmd_buf(psp);
3186
3187 return ret;
3188 }
3189
psp_ring_cmd_submit(struct psp_context * psp,uint64_t cmd_buf_mc_addr,uint64_t fence_mc_addr,int index)3190 int psp_ring_cmd_submit(struct psp_context *psp,
3191 uint64_t cmd_buf_mc_addr,
3192 uint64_t fence_mc_addr,
3193 int index)
3194 {
3195 unsigned int psp_write_ptr_reg = 0;
3196 struct psp_gfx_rb_frame *write_frame;
3197 struct psp_ring *ring = &psp->km_ring;
3198 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3199 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3200 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3201 struct amdgpu_device *adev = psp->adev;
3202 uint32_t ring_size_dw = ring->ring_size / 4;
3203 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3204
3205 /* KM (GPCOM) prepare write pointer */
3206 psp_write_ptr_reg = psp_ring_get_wptr(psp);
3207
3208 /* Update KM RB frame pointer to new frame */
3209 /* write_frame ptr increments by size of rb_frame in bytes */
3210 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3211 if ((psp_write_ptr_reg % ring_size_dw) == 0)
3212 write_frame = ring_buffer_start;
3213 else
3214 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3215 /* Check invalid write_frame ptr address */
3216 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3217 dev_err(adev->dev,
3218 "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3219 ring_buffer_start, ring_buffer_end, write_frame);
3220 dev_err(adev->dev,
3221 "write_frame is pointing to address out of bounds\n");
3222 return -EINVAL;
3223 }
3224
3225 /* Initialize KM RB frame */
3226 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3227
3228 /* Update KM RB frame */
3229 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3230 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3231 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3232 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3233 write_frame->fence_value = index;
3234 amdgpu_device_flush_hdp(adev, NULL);
3235
3236 /* Update the write Pointer in DWORDs */
3237 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3238 psp_ring_set_wptr(psp, psp_write_ptr_reg);
3239 return 0;
3240 }
3241
psp_init_asd_microcode(struct psp_context * psp,const char * chip_name)3242 int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3243 {
3244 struct amdgpu_device *adev = psp->adev;
3245 const struct psp_firmware_header_v1_0 *asd_hdr;
3246 int err = 0;
3247
3248 err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, "amdgpu/%s_asd.bin", chip_name);
3249 if (err)
3250 goto out;
3251
3252 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3253 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3254 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3255 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3256 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3257 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3258 return 0;
3259 out:
3260 amdgpu_ucode_release(&adev->psp.asd_fw);
3261 return err;
3262 }
3263
psp_init_toc_microcode(struct psp_context * psp,const char * chip_name)3264 int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3265 {
3266 struct amdgpu_device *adev = psp->adev;
3267 const struct psp_firmware_header_v1_0 *toc_hdr;
3268 int err = 0;
3269
3270 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, "amdgpu/%s_toc.bin", chip_name);
3271 if (err)
3272 goto out;
3273
3274 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3275 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3276 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3277 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3278 adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3279 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3280 return 0;
3281 out:
3282 amdgpu_ucode_release(&adev->psp.toc_fw);
3283 return err;
3284 }
3285
parse_sos_bin_descriptor(struct psp_context * psp,const struct psp_fw_bin_desc * desc,const struct psp_firmware_header_v2_0 * sos_hdr)3286 static int parse_sos_bin_descriptor(struct psp_context *psp,
3287 const struct psp_fw_bin_desc *desc,
3288 const struct psp_firmware_header_v2_0 *sos_hdr)
3289 {
3290 uint8_t *ucode_start_addr = NULL;
3291
3292 if (!psp || !desc || !sos_hdr)
3293 return -EINVAL;
3294
3295 ucode_start_addr = (uint8_t *)sos_hdr +
3296 le32_to_cpu(desc->offset_bytes) +
3297 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3298
3299 switch (desc->fw_type) {
3300 case PSP_FW_TYPE_PSP_SOS:
3301 psp->sos.fw_version = le32_to_cpu(desc->fw_version);
3302 psp->sos.feature_version = le32_to_cpu(desc->fw_version);
3303 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes);
3304 psp->sos.start_addr = ucode_start_addr;
3305 break;
3306 case PSP_FW_TYPE_PSP_SYS_DRV:
3307 psp->sys.fw_version = le32_to_cpu(desc->fw_version);
3308 psp->sys.feature_version = le32_to_cpu(desc->fw_version);
3309 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes);
3310 psp->sys.start_addr = ucode_start_addr;
3311 break;
3312 case PSP_FW_TYPE_PSP_KDB:
3313 psp->kdb.fw_version = le32_to_cpu(desc->fw_version);
3314 psp->kdb.feature_version = le32_to_cpu(desc->fw_version);
3315 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes);
3316 psp->kdb.start_addr = ucode_start_addr;
3317 break;
3318 case PSP_FW_TYPE_PSP_TOC:
3319 psp->toc.fw_version = le32_to_cpu(desc->fw_version);
3320 psp->toc.feature_version = le32_to_cpu(desc->fw_version);
3321 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes);
3322 psp->toc.start_addr = ucode_start_addr;
3323 break;
3324 case PSP_FW_TYPE_PSP_SPL:
3325 psp->spl.fw_version = le32_to_cpu(desc->fw_version);
3326 psp->spl.feature_version = le32_to_cpu(desc->fw_version);
3327 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes);
3328 psp->spl.start_addr = ucode_start_addr;
3329 break;
3330 case PSP_FW_TYPE_PSP_RL:
3331 psp->rl.fw_version = le32_to_cpu(desc->fw_version);
3332 psp->rl.feature_version = le32_to_cpu(desc->fw_version);
3333 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes);
3334 psp->rl.start_addr = ucode_start_addr;
3335 break;
3336 case PSP_FW_TYPE_PSP_SOC_DRV:
3337 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version);
3338 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version);
3339 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3340 psp->soc_drv.start_addr = ucode_start_addr;
3341 break;
3342 case PSP_FW_TYPE_PSP_INTF_DRV:
3343 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version);
3344 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version);
3345 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3346 psp->intf_drv.start_addr = ucode_start_addr;
3347 break;
3348 case PSP_FW_TYPE_PSP_DBG_DRV:
3349 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version);
3350 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version);
3351 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3352 psp->dbg_drv.start_addr = ucode_start_addr;
3353 break;
3354 case PSP_FW_TYPE_PSP_RAS_DRV:
3355 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version);
3356 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version);
3357 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3358 psp->ras_drv.start_addr = ucode_start_addr;
3359 break;
3360 case PSP_FW_TYPE_PSP_IPKEYMGR_DRV:
3361 psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version);
3362 psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version);
3363 psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3364 psp->ipkeymgr_drv.start_addr = ucode_start_addr;
3365 break;
3366 default:
3367 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3368 break;
3369 }
3370
3371 return 0;
3372 }
3373
psp_init_sos_base_fw(struct amdgpu_device * adev)3374 static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3375 {
3376 const struct psp_firmware_header_v1_0 *sos_hdr;
3377 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3378 uint8_t *ucode_array_start_addr;
3379
3380 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3381 ucode_array_start_addr = (uint8_t *)sos_hdr +
3382 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3383
3384 if (adev->gmc.xgmi.connected_to_cpu ||
3385 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {
3386 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3387 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3388
3389 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3390 adev->psp.sys.start_addr = ucode_array_start_addr;
3391
3392 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3393 adev->psp.sos.start_addr = ucode_array_start_addr +
3394 le32_to_cpu(sos_hdr->sos.offset_bytes);
3395 } else {
3396 /* Load alternate PSP SOS FW */
3397 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3398
3399 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3400 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3401
3402 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3403 adev->psp.sys.start_addr = ucode_array_start_addr +
3404 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3405
3406 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3407 adev->psp.sos.start_addr = ucode_array_start_addr +
3408 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3409 }
3410
3411 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3412 dev_warn(adev->dev, "PSP SOS FW not available");
3413 return -EINVAL;
3414 }
3415
3416 return 0;
3417 }
3418
psp_init_sos_microcode(struct psp_context * psp,const char * chip_name)3419 int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3420 {
3421 struct amdgpu_device *adev = psp->adev;
3422 const struct psp_firmware_header_v1_0 *sos_hdr;
3423 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3424 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3425 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3426 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3427 const struct psp_firmware_header_v2_1 *sos_hdr_v2_1;
3428 int fw_index, fw_bin_count, start_index = 0;
3429 const struct psp_fw_bin_desc *fw_bin;
3430 uint8_t *ucode_array_start_addr;
3431 int err = 0;
3432
3433 if (amdgpu_is_kicker_fw(adev))
3434 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos_kicker.bin", chip_name);
3435 else
3436 err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name);
3437 if (err)
3438 goto out;
3439
3440 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3441 ucode_array_start_addr = (uint8_t *)sos_hdr +
3442 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3443 amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
3444
3445 switch (sos_hdr->header.header_version_major) {
3446 case 1:
3447 err = psp_init_sos_base_fw(adev);
3448 if (err)
3449 goto out;
3450
3451 if (sos_hdr->header.header_version_minor == 1) {
3452 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3453 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3454 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3455 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3456 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3457 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3458 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3459 }
3460 if (sos_hdr->header.header_version_minor == 2) {
3461 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3462 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3463 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3464 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3465 }
3466 if (sos_hdr->header.header_version_minor == 3) {
3467 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3468 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3469 adev->psp.toc.start_addr = ucode_array_start_addr +
3470 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3471 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3472 adev->psp.kdb.start_addr = ucode_array_start_addr +
3473 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3474 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3475 adev->psp.spl.start_addr = ucode_array_start_addr +
3476 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3477 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3478 adev->psp.rl.start_addr = ucode_array_start_addr +
3479 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3480 }
3481 break;
3482 case 2:
3483 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3484
3485 fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count);
3486
3487 if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) {
3488 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3489 err = -EINVAL;
3490 goto out;
3491 }
3492
3493 if (sos_hdr_v2_0->header.header_version_minor == 1) {
3494 sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data;
3495
3496 fw_bin = sos_hdr_v2_1->psp_fw_bin;
3497
3498 if (psp_is_aux_sos_load_required(psp))
3499 start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3500 else
3501 fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
3502
3503 } else {
3504 fw_bin = sos_hdr_v2_0->psp_fw_bin;
3505 }
3506
3507 for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) {
3508 err = parse_sos_bin_descriptor(psp, fw_bin + fw_index,
3509 sos_hdr_v2_0);
3510 if (err)
3511 goto out;
3512 }
3513 break;
3514 default:
3515 dev_err(adev->dev,
3516 "unsupported psp sos firmware\n");
3517 err = -EINVAL;
3518 goto out;
3519 }
3520
3521 return 0;
3522 out:
3523 amdgpu_ucode_release(&adev->psp.sos_fw);
3524
3525 return err;
3526 }
3527
parse_ta_bin_descriptor(struct psp_context * psp,const struct psp_fw_bin_desc * desc,const struct ta_firmware_header_v2_0 * ta_hdr)3528 static int parse_ta_bin_descriptor(struct psp_context *psp,
3529 const struct psp_fw_bin_desc *desc,
3530 const struct ta_firmware_header_v2_0 *ta_hdr)
3531 {
3532 uint8_t *ucode_start_addr = NULL;
3533
3534 if (!psp || !desc || !ta_hdr)
3535 return -EINVAL;
3536
3537 ucode_start_addr = (uint8_t *)ta_hdr +
3538 le32_to_cpu(desc->offset_bytes) +
3539 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3540
3541 switch (desc->fw_type) {
3542 case TA_FW_TYPE_PSP_ASD:
3543 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3544 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version);
3545 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3546 psp->asd_context.bin_desc.start_addr = ucode_start_addr;
3547 break;
3548 case TA_FW_TYPE_PSP_XGMI:
3549 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3550 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3551 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr;
3552 break;
3553 case TA_FW_TYPE_PSP_RAS:
3554 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3555 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3556 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr;
3557 break;
3558 case TA_FW_TYPE_PSP_HDCP:
3559 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3560 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3561 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr;
3562 break;
3563 case TA_FW_TYPE_PSP_DTM:
3564 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3565 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3566 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr;
3567 break;
3568 case TA_FW_TYPE_PSP_RAP:
3569 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3570 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3571 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr;
3572 break;
3573 case TA_FW_TYPE_PSP_SECUREDISPLAY:
3574 psp->securedisplay_context.context.bin_desc.fw_version =
3575 le32_to_cpu(desc->fw_version);
3576 psp->securedisplay_context.context.bin_desc.size_bytes =
3577 le32_to_cpu(desc->size_bytes);
3578 psp->securedisplay_context.context.bin_desc.start_addr =
3579 ucode_start_addr;
3580 break;
3581 default:
3582 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3583 break;
3584 }
3585
3586 return 0;
3587 }
3588
parse_ta_v1_microcode(struct psp_context * psp)3589 static int parse_ta_v1_microcode(struct psp_context *psp)
3590 {
3591 const struct ta_firmware_header_v1_0 *ta_hdr;
3592 struct amdgpu_device *adev = psp->adev;
3593
3594 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3595
3596 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3597 return -EINVAL;
3598
3599 adev->psp.xgmi_context.context.bin_desc.fw_version =
3600 le32_to_cpu(ta_hdr->xgmi.fw_version);
3601 adev->psp.xgmi_context.context.bin_desc.size_bytes =
3602 le32_to_cpu(ta_hdr->xgmi.size_bytes);
3603 adev->psp.xgmi_context.context.bin_desc.start_addr =
3604 (uint8_t *)ta_hdr +
3605 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3606
3607 adev->psp.ras_context.context.bin_desc.fw_version =
3608 le32_to_cpu(ta_hdr->ras.fw_version);
3609 adev->psp.ras_context.context.bin_desc.size_bytes =
3610 le32_to_cpu(ta_hdr->ras.size_bytes);
3611 adev->psp.ras_context.context.bin_desc.start_addr =
3612 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3613 le32_to_cpu(ta_hdr->ras.offset_bytes);
3614
3615 adev->psp.hdcp_context.context.bin_desc.fw_version =
3616 le32_to_cpu(ta_hdr->hdcp.fw_version);
3617 adev->psp.hdcp_context.context.bin_desc.size_bytes =
3618 le32_to_cpu(ta_hdr->hdcp.size_bytes);
3619 adev->psp.hdcp_context.context.bin_desc.start_addr =
3620 (uint8_t *)ta_hdr +
3621 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3622
3623 adev->psp.dtm_context.context.bin_desc.fw_version =
3624 le32_to_cpu(ta_hdr->dtm.fw_version);
3625 adev->psp.dtm_context.context.bin_desc.size_bytes =
3626 le32_to_cpu(ta_hdr->dtm.size_bytes);
3627 adev->psp.dtm_context.context.bin_desc.start_addr =
3628 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3629 le32_to_cpu(ta_hdr->dtm.offset_bytes);
3630
3631 adev->psp.securedisplay_context.context.bin_desc.fw_version =
3632 le32_to_cpu(ta_hdr->securedisplay.fw_version);
3633 adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3634 le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3635 adev->psp.securedisplay_context.context.bin_desc.start_addr =
3636 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3637 le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3638
3639 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3640
3641 return 0;
3642 }
3643
parse_ta_v2_microcode(struct psp_context * psp)3644 static int parse_ta_v2_microcode(struct psp_context *psp)
3645 {
3646 const struct ta_firmware_header_v2_0 *ta_hdr;
3647 struct amdgpu_device *adev = psp->adev;
3648 int err = 0;
3649 int ta_index = 0;
3650
3651 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3652
3653 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3654 return -EINVAL;
3655
3656 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3657 dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3658 return -EINVAL;
3659 }
3660
3661 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3662 err = parse_ta_bin_descriptor(psp,
3663 &ta_hdr->ta_fw_bin[ta_index],
3664 ta_hdr);
3665 if (err)
3666 return err;
3667 }
3668
3669 return 0;
3670 }
3671
psp_init_ta_microcode(struct psp_context * psp,const char * chip_name)3672 int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3673 {
3674 const struct common_firmware_header *hdr;
3675 struct amdgpu_device *adev = psp->adev;
3676 int err;
3677
3678 if (amdgpu_is_kicker_fw(adev))
3679 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta_kicker.bin", chip_name);
3680 else
3681 err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta.bin", chip_name);
3682 if (err)
3683 return err;
3684
3685 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3686 switch (le16_to_cpu(hdr->header_version_major)) {
3687 case 1:
3688 err = parse_ta_v1_microcode(psp);
3689 break;
3690 case 2:
3691 err = parse_ta_v2_microcode(psp);
3692 break;
3693 default:
3694 dev_err(adev->dev, "unsupported TA header version\n");
3695 err = -EINVAL;
3696 }
3697
3698 if (err)
3699 amdgpu_ucode_release(&adev->psp.ta_fw);
3700
3701 return err;
3702 }
3703
psp_init_cap_microcode(struct psp_context * psp,const char * chip_name)3704 int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3705 {
3706 struct amdgpu_device *adev = psp->adev;
3707 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3708 struct amdgpu_firmware_info *info = NULL;
3709 int err = 0;
3710
3711 if (!amdgpu_sriov_vf(adev)) {
3712 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
3713 return -EINVAL;
3714 }
3715
3716 err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, "amdgpu/%s_cap.bin", chip_name);
3717 if (err) {
3718 if (err == -ENODEV) {
3719 dev_warn(adev->dev, "cap microcode does not exist, skip\n");
3720 err = 0;
3721 } else {
3722 dev_err(adev->dev, "fail to initialize cap microcode\n");
3723 }
3724 goto out;
3725 }
3726
3727 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
3728 info->ucode_id = AMDGPU_UCODE_ID_CAP;
3729 info->fw = adev->psp.cap_fw;
3730 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
3731 adev->psp.cap_fw->data;
3732 adev->firmware.fw_size += ALIGN(
3733 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
3734 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
3735 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
3736 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
3737
3738 return 0;
3739
3740 out:
3741 amdgpu_ucode_release(&adev->psp.cap_fw);
3742 return err;
3743 }
3744
psp_set_clockgating_state(void * handle,enum amd_clockgating_state state)3745 static int psp_set_clockgating_state(void *handle,
3746 enum amd_clockgating_state state)
3747 {
3748 return 0;
3749 }
3750
psp_set_powergating_state(void * handle,enum amd_powergating_state state)3751 static int psp_set_powergating_state(void *handle,
3752 enum amd_powergating_state state)
3753 {
3754 return 0;
3755 }
3756
psp_usbc_pd_fw_sysfs_read(struct device * dev,struct device_attribute * attr,char * buf)3757 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
3758 struct device_attribute *attr,
3759 char *buf)
3760 {
3761 struct drm_device *ddev = dev_get_drvdata(dev);
3762 struct amdgpu_device *adev = drm_to_adev(ddev);
3763 uint32_t fw_ver;
3764 int ret;
3765
3766 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3767 dev_info(adev->dev, "PSP block is not ready yet\n.");
3768 return -EBUSY;
3769 }
3770
3771 mutex_lock(&adev->psp.mutex);
3772 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
3773 mutex_unlock(&adev->psp.mutex);
3774
3775 if (ret) {
3776 dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
3777 return ret;
3778 }
3779
3780 return sysfs_emit(buf, "%x\n", fw_ver);
3781 }
3782
psp_usbc_pd_fw_sysfs_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3783 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
3784 struct device_attribute *attr,
3785 const char *buf,
3786 size_t count)
3787 {
3788 struct drm_device *ddev = dev_get_drvdata(dev);
3789 struct amdgpu_device *adev = drm_to_adev(ddev);
3790 int ret, idx;
3791 const struct firmware *usbc_pd_fw;
3792 struct amdgpu_bo *fw_buf_bo = NULL;
3793 uint64_t fw_pri_mc_addr;
3794 void *fw_pri_cpu_addr;
3795
3796 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3797 dev_err(adev->dev, "PSP block is not ready yet.");
3798 return -EBUSY;
3799 }
3800
3801 if (!drm_dev_enter(ddev, &idx))
3802 return -ENODEV;
3803
3804 ret = amdgpu_ucode_request(adev, &usbc_pd_fw, "amdgpu/%s", buf);
3805 if (ret)
3806 goto fail;
3807
3808 /* LFB address which is aligned to 1MB boundary per PSP request */
3809 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
3810 AMDGPU_GEM_DOMAIN_VRAM |
3811 AMDGPU_GEM_DOMAIN_GTT,
3812 &fw_buf_bo, &fw_pri_mc_addr,
3813 &fw_pri_cpu_addr);
3814 if (ret)
3815 goto rel_buf;
3816
3817 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
3818
3819 mutex_lock(&adev->psp.mutex);
3820 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
3821 mutex_unlock(&adev->psp.mutex);
3822
3823 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3824
3825 rel_buf:
3826 amdgpu_ucode_release(&usbc_pd_fw);
3827 fail:
3828 if (ret) {
3829 dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
3830 count = ret;
3831 }
3832
3833 drm_dev_exit(idx);
3834 return count;
3835 }
3836
psp_copy_fw(struct psp_context * psp,uint8_t * start_addr,uint32_t bin_size)3837 void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
3838 {
3839 int idx;
3840
3841 if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
3842 return;
3843
3844 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
3845 memcpy(psp->fw_pri_buf, start_addr, bin_size);
3846
3847 drm_dev_exit(idx);
3848 }
3849
3850 /**
3851 * DOC: usbc_pd_fw
3852 * Reading from this file will retrieve the USB-C PD firmware version. Writing to
3853 * this file will trigger the update process.
3854 */
3855 static DEVICE_ATTR(usbc_pd_fw, 0644,
3856 psp_usbc_pd_fw_sysfs_read,
3857 psp_usbc_pd_fw_sysfs_write);
3858
is_psp_fw_valid(struct psp_bin_desc bin)3859 int is_psp_fw_valid(struct psp_bin_desc bin)
3860 {
3861 return bin.size_bytes;
3862 }
3863
amdgpu_psp_vbflash_write(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buffer,loff_t pos,size_t count)3864 static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
3865 struct bin_attribute *bin_attr,
3866 char *buffer, loff_t pos, size_t count)
3867 {
3868 struct device *dev = kobj_to_dev(kobj);
3869 struct drm_device *ddev = dev_get_drvdata(dev);
3870 struct amdgpu_device *adev = drm_to_adev(ddev);
3871
3872 adev->psp.vbflash_done = false;
3873
3874 /* Safeguard against memory drain */
3875 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
3876 dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
3877 kvfree(adev->psp.vbflash_tmp_buf);
3878 adev->psp.vbflash_tmp_buf = NULL;
3879 adev->psp.vbflash_image_size = 0;
3880 return -ENOMEM;
3881 }
3882
3883 /* TODO Just allocate max for now and optimize to realloc later if needed */
3884 if (!adev->psp.vbflash_tmp_buf) {
3885 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
3886 if (!adev->psp.vbflash_tmp_buf)
3887 return -ENOMEM;
3888 }
3889
3890 mutex_lock(&adev->psp.mutex);
3891 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
3892 adev->psp.vbflash_image_size += count;
3893 mutex_unlock(&adev->psp.mutex);
3894
3895 dev_dbg(adev->dev, "IFWI staged for update\n");
3896
3897 return count;
3898 }
3899
amdgpu_psp_vbflash_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buffer,loff_t pos,size_t count)3900 static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
3901 struct bin_attribute *bin_attr, char *buffer,
3902 loff_t pos, size_t count)
3903 {
3904 struct device *dev = kobj_to_dev(kobj);
3905 struct drm_device *ddev = dev_get_drvdata(dev);
3906 struct amdgpu_device *adev = drm_to_adev(ddev);
3907 struct amdgpu_bo *fw_buf_bo = NULL;
3908 uint64_t fw_pri_mc_addr;
3909 void *fw_pri_cpu_addr;
3910 int ret;
3911
3912 if (adev->psp.vbflash_image_size == 0)
3913 return -EINVAL;
3914
3915 dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
3916
3917 ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
3918 AMDGPU_GPU_PAGE_SIZE,
3919 AMDGPU_GEM_DOMAIN_VRAM,
3920 &fw_buf_bo,
3921 &fw_pri_mc_addr,
3922 &fw_pri_cpu_addr);
3923 if (ret)
3924 goto rel_buf;
3925
3926 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
3927
3928 mutex_lock(&adev->psp.mutex);
3929 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
3930 mutex_unlock(&adev->psp.mutex);
3931
3932 amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
3933
3934 rel_buf:
3935 kvfree(adev->psp.vbflash_tmp_buf);
3936 adev->psp.vbflash_tmp_buf = NULL;
3937 adev->psp.vbflash_image_size = 0;
3938
3939 if (ret) {
3940 dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
3941 return ret;
3942 }
3943
3944 dev_dbg(adev->dev, "PSP IFWI flash process done\n");
3945 return 0;
3946 }
3947
3948 /**
3949 * DOC: psp_vbflash
3950 * Writing to this file will stage an IFWI for update. Reading from this file
3951 * will trigger the update process.
3952 */
3953 static struct bin_attribute psp_vbflash_bin_attr = {
3954 .attr = {.name = "psp_vbflash", .mode = 0660},
3955 .size = 0,
3956 .write = amdgpu_psp_vbflash_write,
3957 .read = amdgpu_psp_vbflash_read,
3958 };
3959
3960 /**
3961 * DOC: psp_vbflash_status
3962 * The status of the flash process.
3963 * 0: IFWI flash not complete.
3964 * 1: IFWI flash complete.
3965 */
amdgpu_psp_vbflash_status(struct device * dev,struct device_attribute * attr,char * buf)3966 static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
3967 struct device_attribute *attr,
3968 char *buf)
3969 {
3970 struct drm_device *ddev = dev_get_drvdata(dev);
3971 struct amdgpu_device *adev = drm_to_adev(ddev);
3972 uint32_t vbflash_status;
3973
3974 vbflash_status = psp_vbflash_status(&adev->psp);
3975 if (!adev->psp.vbflash_done)
3976 vbflash_status = 0;
3977 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
3978 vbflash_status = 1;
3979
3980 return sysfs_emit(buf, "0x%x\n", vbflash_status);
3981 }
3982 static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
3983
3984 static struct bin_attribute *bin_flash_attrs[] = {
3985 &psp_vbflash_bin_attr,
3986 NULL
3987 };
3988
3989 static struct attribute *flash_attrs[] = {
3990 &dev_attr_psp_vbflash_status.attr,
3991 &dev_attr_usbc_pd_fw.attr,
3992 NULL
3993 };
3994
amdgpu_flash_attr_is_visible(struct kobject * kobj,struct attribute * attr,int idx)3995 static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
3996 {
3997 struct device *dev = kobj_to_dev(kobj);
3998 struct drm_device *ddev = dev_get_drvdata(dev);
3999 struct amdgpu_device *adev = drm_to_adev(ddev);
4000
4001 if (attr == &dev_attr_usbc_pd_fw.attr)
4002 return adev->psp.sup_pd_fw_up ? 0660 : 0;
4003
4004 return adev->psp.sup_ifwi_up ? 0440 : 0;
4005 }
4006
amdgpu_bin_flash_attr_is_visible(struct kobject * kobj,struct bin_attribute * attr,int idx)4007 static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
4008 struct bin_attribute *attr,
4009 int idx)
4010 {
4011 struct device *dev = kobj_to_dev(kobj);
4012 struct drm_device *ddev = dev_get_drvdata(dev);
4013 struct amdgpu_device *adev = drm_to_adev(ddev);
4014
4015 return adev->psp.sup_ifwi_up ? 0660 : 0;
4016 }
4017
4018 const struct attribute_group amdgpu_flash_attr_group = {
4019 .attrs = flash_attrs,
4020 .bin_attrs = bin_flash_attrs,
4021 .is_bin_visible = amdgpu_bin_flash_attr_is_visible,
4022 .is_visible = amdgpu_flash_attr_is_visible,
4023 };
4024
4025 const struct amd_ip_funcs psp_ip_funcs = {
4026 .name = "psp",
4027 .early_init = psp_early_init,
4028 .late_init = NULL,
4029 .sw_init = psp_sw_init,
4030 .sw_fini = psp_sw_fini,
4031 .hw_init = psp_hw_init,
4032 .hw_fini = psp_hw_fini,
4033 .suspend = psp_suspend,
4034 .resume = psp_resume,
4035 .is_idle = NULL,
4036 .check_soft_reset = NULL,
4037 .wait_for_idle = NULL,
4038 .soft_reset = NULL,
4039 .set_clockgating_state = psp_set_clockgating_state,
4040 .set_powergating_state = psp_set_powergating_state,
4041 };
4042
4043 const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
4044 .type = AMD_IP_BLOCK_TYPE_PSP,
4045 .major = 3,
4046 .minor = 1,
4047 .rev = 0,
4048 .funcs = &psp_ip_funcs,
4049 };
4050
4051 const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
4052 .type = AMD_IP_BLOCK_TYPE_PSP,
4053 .major = 10,
4054 .minor = 0,
4055 .rev = 0,
4056 .funcs = &psp_ip_funcs,
4057 };
4058
4059 const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
4060 .type = AMD_IP_BLOCK_TYPE_PSP,
4061 .major = 11,
4062 .minor = 0,
4063 .rev = 0,
4064 .funcs = &psp_ip_funcs,
4065 };
4066
4067 const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
4068 .type = AMD_IP_BLOCK_TYPE_PSP,
4069 .major = 11,
4070 .minor = 0,
4071 .rev = 8,
4072 .funcs = &psp_ip_funcs,
4073 };
4074
4075 const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
4076 .type = AMD_IP_BLOCK_TYPE_PSP,
4077 .major = 12,
4078 .minor = 0,
4079 .rev = 0,
4080 .funcs = &psp_ip_funcs,
4081 };
4082
4083 const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
4084 .type = AMD_IP_BLOCK_TYPE_PSP,
4085 .major = 13,
4086 .minor = 0,
4087 .rev = 0,
4088 .funcs = &psp_ip_funcs,
4089 };
4090
4091 const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
4092 .type = AMD_IP_BLOCK_TYPE_PSP,
4093 .major = 13,
4094 .minor = 0,
4095 .rev = 4,
4096 .funcs = &psp_ip_funcs,
4097 };
4098
4099 const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
4100 .type = AMD_IP_BLOCK_TYPE_PSP,
4101 .major = 14,
4102 .minor = 0,
4103 .rev = 0,
4104 .funcs = &psp_ip_funcs,
4105 };
4106