1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #ifndef __AMDGPU_VCN_H__ 25 #define __AMDGPU_VCN_H__ 26 27 #include "amdgpu_ras.h" 28 29 #define AMDGPU_VCN_STACK_SIZE (128*1024) 30 #define AMDGPU_VCN_CONTEXT_SIZE (512*1024) 31 32 #define AMDGPU_VCN_FIRMWARE_OFFSET 256 33 #define AMDGPU_VCN_MAX_ENC_RINGS 3 34 35 #define AMDGPU_MAX_VCN_INSTANCES 4 36 #define AMDGPU_MAX_VCN_ENC_RINGS (AMDGPU_VCN_MAX_ENC_RINGS * AMDGPU_MAX_VCN_INSTANCES) 37 38 #define AMDGPU_VCN_HARVEST_VCN0 (1 << 0) 39 #define AMDGPU_VCN_HARVEST_VCN1 (1 << 1) 40 41 #define VCN_DEC_KMD_CMD 0x80000000 42 #define VCN_DEC_CMD_FENCE 0x00000000 43 #define VCN_DEC_CMD_TRAP 0x00000001 44 #define VCN_DEC_CMD_WRITE_REG 0x00000004 45 #define VCN_DEC_CMD_REG_READ_COND_WAIT 0x00000006 46 #define VCN_DEC_CMD_PACKET_START 0x0000000a 47 #define VCN_DEC_CMD_PACKET_END 0x0000000b 48 49 #define VCN_DEC_SW_CMD_NO_OP 0x00000000 50 #define VCN_DEC_SW_CMD_END 0x00000001 51 #define VCN_DEC_SW_CMD_IB 0x00000002 52 #define VCN_DEC_SW_CMD_FENCE 0x00000003 53 #define VCN_DEC_SW_CMD_TRAP 0x00000004 54 #define VCN_DEC_SW_CMD_IB_AUTO 0x00000005 55 #define VCN_DEC_SW_CMD_SEMAPHORE 0x00000006 56 #define VCN_DEC_SW_CMD_PREEMPT_FENCE 0x00000009 57 #define VCN_DEC_SW_CMD_REG_WRITE 0x0000000b 58 #define VCN_DEC_SW_CMD_REG_WAIT 0x0000000c 59 60 #define VCN_ENC_CMD_NO_OP 0x00000000 61 #define VCN_ENC_CMD_END 0x00000001 62 #define VCN_ENC_CMD_IB 0x00000002 63 #define VCN_ENC_CMD_FENCE 0x00000003 64 #define VCN_ENC_CMD_TRAP 0x00000004 65 #define VCN_ENC_CMD_REG_WRITE 0x0000000b 66 #define VCN_ENC_CMD_REG_WAIT 0x0000000c 67 68 #define VCN_AON_SOC_ADDRESS_2_0 0x1f800 69 #define VCN_VID_IP_ADDRESS_2_0 0x0 70 #define VCN_AON_IP_ADDRESS_2_0 0x30000 71 72 #define mmUVD_RBC_XX_IB_REG_CHECK 0x026b 73 #define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1 74 #define mmUVD_REG_XX_MASK 0x026c 75 #define mmUVD_REG_XX_MASK_BASE_IDX 1 76 77 /* 1 second timeout */ 78 #define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000) 79 80 #define RREG32_SOC15_DPG_MODE_1_0(ip, inst_idx, reg, mask, sram_sel) \ 81 ({ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \ 82 WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \ 83 UVD_DPG_LMA_CTL__MASK_EN_MASK | \ 84 ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \ 85 << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \ 86 (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \ 87 RREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA); \ 88 }) 89 90 #define WREG32_SOC15_DPG_MODE_1_0(ip, inst_idx, reg, value, mask, sram_sel) \ 91 do { \ 92 WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA, value); \ 93 WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \ 94 WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \ 95 UVD_DPG_LMA_CTL__READ_WRITE_MASK | \ 96 ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \ 97 << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \ 98 (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \ 99 } while (0) 100 101 #define SOC15_DPG_MODE_OFFSET(ip, inst_idx, reg) \ 102 ({ \ 103 uint32_t internal_reg_offset, addr; \ 104 bool video_range, video1_range, aon_range, aon1_range; \ 105 \ 106 addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \ 107 addr <<= 2; \ 108 video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS_2_0)) && \ 109 ((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS_2_0 + 0x2600))))); \ 110 video1_range = ((((0xFFFFF & addr) >= (VCN1_VID_SOC_ADDRESS_3_0)) && \ 111 ((0xFFFFF & addr) < ((VCN1_VID_SOC_ADDRESS_3_0 + 0x2600))))); \ 112 aon_range = ((((0xFFFFF & addr) >= (VCN_AON_SOC_ADDRESS_2_0)) && \ 113 ((0xFFFFF & addr) < ((VCN_AON_SOC_ADDRESS_2_0 + 0x600))))); \ 114 aon1_range = ((((0xFFFFF & addr) >= (VCN1_AON_SOC_ADDRESS_3_0)) && \ 115 ((0xFFFFF & addr) < ((VCN1_AON_SOC_ADDRESS_3_0 + 0x600))))); \ 116 if (video_range) \ 117 internal_reg_offset = ((0xFFFFF & addr) - (VCN_VID_SOC_ADDRESS_2_0) + \ 118 (VCN_VID_IP_ADDRESS_2_0)); \ 119 else if (aon_range) \ 120 internal_reg_offset = ((0xFFFFF & addr) - (VCN_AON_SOC_ADDRESS_2_0) + \ 121 (VCN_AON_IP_ADDRESS_2_0)); \ 122 else if (video1_range) \ 123 internal_reg_offset = ((0xFFFFF & addr) - (VCN1_VID_SOC_ADDRESS_3_0) + \ 124 (VCN_VID_IP_ADDRESS_2_0)); \ 125 else if (aon1_range) \ 126 internal_reg_offset = ((0xFFFFF & addr) - (VCN1_AON_SOC_ADDRESS_3_0) + \ 127 (VCN_AON_IP_ADDRESS_2_0)); \ 128 else \ 129 internal_reg_offset = (0xFFFFF & addr); \ 130 \ 131 internal_reg_offset >>= 2; \ 132 }) 133 134 #define RREG32_SOC15_DPG_MODE(inst_idx, offset, mask_en) \ 135 ({ \ 136 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, \ 137 (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ 138 mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ 139 offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ 140 RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA); \ 141 }) 142 143 #define WREG32_SOC15_DPG_MODE(inst_idx, offset, value, mask_en, indirect) \ 144 do { \ 145 if (!indirect) { \ 146 WREG32_SOC15(VCN, GET_INST(VCN, inst_idx), \ 147 mmUVD_DPG_LMA_DATA, value); \ 148 WREG32_SOC15( \ 149 VCN, GET_INST(VCN, inst_idx), \ 150 mmUVD_DPG_LMA_CTL, \ 151 (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ 152 mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ 153 offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ 154 } else { \ 155 *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \ 156 offset; \ 157 *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \ 158 value; \ 159 } \ 160 } while (0) 161 162 #define SOC24_DPG_MODE_OFFSET(ip, inst_idx, reg) \ 163 ({ \ 164 uint32_t internal_reg_offset, addr; \ 165 bool video_range, aon_range; \ 166 \ 167 addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \ 168 addr <<= 2; \ 169 video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS)) && \ 170 ((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS + 0x2600))))); \ 171 aon_range = ((((0xFFFFF & addr) >= (VCN_AON_SOC_ADDRESS)) && \ 172 ((0xFFFFF & addr) < ((VCN_AON_SOC_ADDRESS + 0x600))))); \ 173 if (video_range) \ 174 internal_reg_offset = ((0xFFFFF & addr) - (VCN_VID_SOC_ADDRESS) + \ 175 (VCN_VID_IP_ADDRESS)); \ 176 else if (aon_range) \ 177 internal_reg_offset = ((0xFFFFF & addr) - (VCN_AON_SOC_ADDRESS) + \ 178 (VCN_AON_IP_ADDRESS)); \ 179 else \ 180 internal_reg_offset = (0xFFFFF & addr); \ 181 \ 182 internal_reg_offset >>= 2; \ 183 }) 184 185 #define WREG32_SOC24_DPG_MODE(inst_idx, offset, value, mask_en, indirect) \ 186 do { \ 187 if (!indirect) { \ 188 WREG32_SOC15(VCN, GET_INST(VCN, inst_idx), \ 189 regUVD_DPG_LMA_DATA, value); \ 190 WREG32_SOC15( \ 191 VCN, GET_INST(VCN, inst_idx), \ 192 regUVD_DPG_LMA_CTL, \ 193 (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ 194 mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ 195 offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ 196 } else { \ 197 *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \ 198 offset; \ 199 *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \ 200 value; \ 201 } \ 202 } while (0) 203 204 #define AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE (1 << 2) 205 #define AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT (1 << 4) 206 #define AMDGPU_VCN_FW_SHARED_FLAG_0_RB (1 << 6) 207 #define AMDGPU_VCN_MULTI_QUEUE_FLAG (1 << 8) 208 #define AMDGPU_VCN_SW_RING_FLAG (1 << 9) 209 #define AMDGPU_VCN_FW_LOGGING_FLAG (1 << 10) 210 #define AMDGPU_VCN_SMU_VERSION_INFO_FLAG (1 << 11) 211 #define AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG (1 << 11) 212 #define AMDGPU_VCN_VF_RB_SETUP_FLAG (1 << 14) 213 #define AMDGPU_VCN_VF_RB_DECOUPLE_FLAG (1 << 15) 214 215 #define MAX_NUM_VCN_RB_SETUP 4 216 217 #define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER 0x00000001 218 #define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER 0x00000001 219 220 #define VCN_CODEC_DISABLE_MASK_AV1 (1 << 0) 221 #define VCN_CODEC_DISABLE_MASK_VP9 (1 << 1) 222 #define VCN_CODEC_DISABLE_MASK_HEVC (1 << 2) 223 #define VCN_CODEC_DISABLE_MASK_H264 (1 << 3) 224 225 #define AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU (0) 226 #define AMDGPU_VCN_SMU_DPM_INTERFACE_APU (1) 227 228 #define AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING 2 229 230 enum fw_queue_mode { 231 FW_QUEUE_RING_RESET = 1, 232 FW_QUEUE_DPG_HOLD_OFF = 2, 233 }; 234 235 enum engine_status_constants { 236 UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0, 237 UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0 = 0xAAAA0, 238 UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0 = 0x2A2A8AA0, 239 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON = 0x00000002, 240 UVD_STATUS__UVD_BUSY = 0x00000004, 241 GB_ADDR_CONFIG_DEFAULT = 0x26010011, 242 UVD_STATUS__IDLE = 0x2, 243 UVD_STATUS__BUSY = 0x5, 244 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF = 0x1, 245 UVD_STATUS__RBC_BUSY = 0x1, 246 UVD_PGFSM_STATUS_UVDJ_PWR_ON = 0, 247 }; 248 249 enum internal_dpg_state { 250 VCN_DPG_STATE__UNPAUSE = 0, 251 VCN_DPG_STATE__PAUSE, 252 }; 253 254 struct dpg_pause_state { 255 enum internal_dpg_state fw_based; 256 enum internal_dpg_state jpeg; 257 }; 258 259 struct amdgpu_vcn_reg{ 260 unsigned data0; 261 unsigned data1; 262 unsigned cmd; 263 unsigned nop; 264 unsigned context_id; 265 unsigned ib_vmid; 266 unsigned ib_bar_low; 267 unsigned ib_bar_high; 268 unsigned ib_size; 269 unsigned gp_scratch8; 270 unsigned scratch9; 271 }; 272 273 struct amdgpu_vcn_fw_shared { 274 void *cpu_addr; 275 uint64_t gpu_addr; 276 uint32_t mem_size; 277 uint32_t log_offset; 278 }; 279 280 struct amdgpu_vcn_inst { 281 struct amdgpu_bo *vcpu_bo; 282 void *cpu_addr; 283 uint64_t gpu_addr; 284 void *saved_bo; 285 struct amdgpu_ring ring_dec; 286 struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS]; 287 atomic_t sched_score; 288 struct amdgpu_irq_src irq; 289 struct amdgpu_irq_src ras_poison_irq; 290 struct amdgpu_vcn_reg external; 291 struct amdgpu_bo *dpg_sram_bo; 292 struct dpg_pause_state pause_state; 293 void *dpg_sram_cpu_addr; 294 uint64_t dpg_sram_gpu_addr; 295 uint32_t *dpg_sram_curr_addr; 296 atomic_t dpg_enc_submission_cnt; 297 struct amdgpu_vcn_fw_shared fw_shared; 298 uint8_t aid_id; 299 }; 300 301 struct amdgpu_vcn_ras { 302 struct amdgpu_ras_block_object ras_block; 303 }; 304 305 struct amdgpu_vcn { 306 unsigned fw_version; 307 struct delayed_work idle_work; 308 const struct firmware *fw[AMDGPU_MAX_VCN_INSTANCES]; /* VCN firmware */ 309 unsigned num_enc_rings; 310 enum amd_powergating_state cur_state; 311 bool indirect_sram; 312 313 uint8_t num_vcn_inst; 314 struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES]; 315 uint8_t vcn_config[AMDGPU_MAX_VCN_INSTANCES]; 316 uint32_t vcn_codec_disable_mask[AMDGPU_MAX_VCN_INSTANCES]; 317 struct amdgpu_vcn_reg internal; 318 struct mutex vcn_pg_lock; 319 struct mutex vcn1_jpeg1_workaround; 320 atomic_t total_submission_cnt; 321 322 unsigned harvest_config; 323 int (*pause_dpg_mode)(struct amdgpu_device *adev, 324 int inst_idx, struct dpg_pause_state *new_state); 325 326 struct ras_common_if *ras_if; 327 struct amdgpu_vcn_ras *ras; 328 329 uint16_t inst_mask; 330 uint8_t num_inst_per_aid; 331 bool using_unified_queue; 332 333 /* IP reg dump */ 334 uint32_t *ip_dump; 335 }; 336 337 struct amdgpu_fw_shared_rb_ptrs_struct { 338 /* to WA DPG R/W ptr issues.*/ 339 uint32_t rptr; 340 uint32_t wptr; 341 }; 342 343 struct amdgpu_fw_shared_multi_queue { 344 uint8_t decode_queue_mode; 345 uint8_t encode_generalpurpose_queue_mode; 346 uint8_t encode_lowlatency_queue_mode; 347 uint8_t encode_realtime_queue_mode; 348 uint8_t padding[4]; 349 }; 350 351 struct amdgpu_fw_shared_sw_ring { 352 uint8_t is_enabled; 353 uint8_t padding[3]; 354 }; 355 356 struct amdgpu_fw_shared_unified_queue_struct { 357 uint8_t is_enabled; 358 uint8_t queue_mode; 359 uint8_t queue_status; 360 uint8_t padding[5]; 361 }; 362 363 struct amdgpu_fw_shared_fw_logging { 364 uint8_t is_enabled; 365 uint32_t addr_lo; 366 uint32_t addr_hi; 367 uint32_t size; 368 }; 369 370 struct amdgpu_fw_shared_smu_interface_info { 371 uint8_t smu_interface_type; 372 uint8_t padding[3]; 373 }; 374 375 struct amdgpu_fw_shared { 376 uint32_t present_flag_0; 377 uint8_t pad[44]; 378 struct amdgpu_fw_shared_rb_ptrs_struct rb; 379 uint8_t pad1[1]; 380 struct amdgpu_fw_shared_multi_queue multi_queue; 381 struct amdgpu_fw_shared_sw_ring sw_ring; 382 struct amdgpu_fw_shared_fw_logging fw_log; 383 struct amdgpu_fw_shared_smu_interface_info smu_interface_info; 384 }; 385 386 struct amdgpu_vcn_rb_setup_info { 387 uint32_t rb_addr_lo; 388 uint32_t rb_addr_hi; 389 uint32_t rb_size; 390 }; 391 392 struct amdgpu_fw_shared_rb_setup { 393 uint32_t is_rb_enabled_flags; 394 395 union { 396 struct { 397 uint32_t rb_addr_lo; 398 uint32_t rb_addr_hi; 399 uint32_t rb_size; 400 uint32_t rb4_addr_lo; 401 uint32_t rb4_addr_hi; 402 uint32_t rb4_size; 403 uint32_t reserved[6]; 404 }; 405 406 struct { 407 struct amdgpu_vcn_rb_setup_info rb_info[MAX_NUM_VCN_RB_SETUP]; 408 }; 409 }; 410 }; 411 412 struct amdgpu_fw_shared_drm_key_wa { 413 uint8_t method; 414 uint8_t reserved[3]; 415 }; 416 417 struct amdgpu_fw_shared_queue_decouple { 418 uint8_t is_enabled; 419 uint8_t reserved[7]; 420 }; 421 422 struct amdgpu_vcn4_fw_shared { 423 uint32_t present_flag_0; 424 uint8_t pad[12]; 425 struct amdgpu_fw_shared_unified_queue_struct sq; 426 uint8_t pad1[8]; 427 struct amdgpu_fw_shared_fw_logging fw_log; 428 uint8_t pad2[20]; 429 struct amdgpu_fw_shared_rb_setup rb_setup; 430 struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface; 431 struct amdgpu_fw_shared_drm_key_wa drm_key_wa; 432 uint8_t pad3[9]; 433 struct amdgpu_fw_shared_queue_decouple decouple; 434 }; 435 436 struct amdgpu_vcn_fwlog { 437 uint32_t rptr; 438 uint32_t wptr; 439 uint32_t buffer_size; 440 uint32_t header_size; 441 uint8_t wrapped; 442 }; 443 444 struct amdgpu_vcn_decode_buffer { 445 uint32_t valid_buf_flag; 446 uint32_t msg_buffer_address_hi; 447 uint32_t msg_buffer_address_lo; 448 uint32_t pad[30]; 449 }; 450 451 struct amdgpu_vcn_rb_metadata { 452 uint32_t size; 453 uint32_t present_flag_0; 454 455 uint8_t version; 456 uint8_t ring_id; 457 uint8_t pad[26]; 458 }; 459 460 struct amdgpu_vcn5_fw_shared { 461 uint32_t present_flag_0; 462 uint8_t pad[12]; 463 struct amdgpu_fw_shared_unified_queue_struct sq; 464 uint8_t pad1[8]; 465 struct amdgpu_fw_shared_fw_logging fw_log; 466 uint8_t pad2[20]; 467 struct amdgpu_fw_shared_rb_setup rb_setup; 468 struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface; 469 struct amdgpu_fw_shared_drm_key_wa drm_key_wa; 470 uint8_t pad3[9]; 471 }; 472 473 #define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80 474 #define VCN_BLOCK_DECODE_DISABLE_MASK 0x40 475 #define VCN_BLOCK_QUEUE_DISABLE_MASK 0xC0 476 477 enum vcn_ring_type { 478 VCN_ENCODE_RING, 479 VCN_DECODE_RING, 480 VCN_UNIFIED_RING, 481 }; 482 483 int amdgpu_vcn_early_init(struct amdgpu_device *adev); 484 int amdgpu_vcn_sw_init(struct amdgpu_device *adev); 485 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev); 486 int amdgpu_vcn_suspend(struct amdgpu_device *adev); 487 int amdgpu_vcn_resume(struct amdgpu_device *adev); 488 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring); 489 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring); 490 491 bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, 492 enum vcn_ring_type type, uint32_t vcn_instance); 493 494 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring); 495 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout); 496 int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring); 497 int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout); 498 int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout); 499 500 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring); 501 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout); 502 503 enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring); 504 505 void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev); 506 507 void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn); 508 void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, 509 uint8_t i, struct amdgpu_vcn_inst *vcn); 510 511 int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, 512 struct amdgpu_irq_src *source, 513 struct amdgpu_iv_entry *entry); 514 int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, 515 struct ras_common_if *ras_block); 516 int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev); 517 518 int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx, 519 enum AMDGPU_UCODE_ID ucode_id); 520 521 #endif 522