• Home
  • Raw
  • Download

Lines Matching refs:vcn

82 	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);  in amdgpu_vcn_sw_init()
83 mutex_init(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_sw_init()
84 mutex_init(&adev->vcn.vcn1_jpeg1_workaround); in amdgpu_vcn_sw_init()
85 atomic_set(&adev->vcn.total_submission_cnt, 0); in amdgpu_vcn_sw_init()
86 for (i = 0; i < adev->vcn.num_vcn_inst; i++) in amdgpu_vcn_sw_init()
87 atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); in amdgpu_vcn_sw_init()
102 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
112 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
118 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
124 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
130 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
136 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
142 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
148 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
154 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
160 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
166 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
172 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
178 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev); in amdgpu_vcn_sw_init()
185 r = amdgpu_ucode_validate(adev->vcn.fw); in amdgpu_vcn_sw_init()
189 release_firmware(adev->vcn.fw); in amdgpu_vcn_sw_init()
190 adev->vcn.fw = NULL; in amdgpu_vcn_sw_init()
194 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; in amdgpu_vcn_sw_init()
195 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); in amdgpu_vcn_sw_init()
229 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in amdgpu_vcn_sw_init()
230 if (adev->vcn.harvest_config & (1 << i)) in amdgpu_vcn_sw_init()
234 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo, in amdgpu_vcn_sw_init()
235 &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr); in amdgpu_vcn_sw_init()
241 adev->vcn.inst[i].fw_shared_cpu_addr = adev->vcn.inst[i].cpu_addr + in amdgpu_vcn_sw_init()
243 adev->vcn.inst[i].fw_shared_gpu_addr = adev->vcn.inst[i].gpu_addr + in amdgpu_vcn_sw_init()
246 if (adev->vcn.indirect_sram) { in amdgpu_vcn_sw_init()
248 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo, in amdgpu_vcn_sw_init()
249 &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr); in amdgpu_vcn_sw_init()
264 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { in amdgpu_vcn_sw_fini()
265 if (adev->vcn.harvest_config & (1 << j)) in amdgpu_vcn_sw_fini()
268 if (adev->vcn.indirect_sram) { in amdgpu_vcn_sw_fini()
269 amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo, in amdgpu_vcn_sw_fini()
270 &adev->vcn.inst[j].dpg_sram_gpu_addr, in amdgpu_vcn_sw_fini()
271 (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr); in amdgpu_vcn_sw_fini()
273 kvfree(adev->vcn.inst[j].saved_bo); in amdgpu_vcn_sw_fini()
275 amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo, in amdgpu_vcn_sw_fini()
276 &adev->vcn.inst[j].gpu_addr, in amdgpu_vcn_sw_fini()
277 (void **)&adev->vcn.inst[j].cpu_addr); in amdgpu_vcn_sw_fini()
279 amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec); in amdgpu_vcn_sw_fini()
281 for (i = 0; i < adev->vcn.num_enc_rings; ++i) in amdgpu_vcn_sw_fini()
282 amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]); in amdgpu_vcn_sw_fini()
285 release_firmware(adev->vcn.fw); in amdgpu_vcn_sw_fini()
286 mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround); in amdgpu_vcn_sw_fini()
287 mutex_destroy(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_sw_fini()
321 cancel_delayed_work_sync(&adev->vcn.idle_work); in amdgpu_vcn_suspend()
323 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in amdgpu_vcn_suspend()
324 if (adev->vcn.harvest_config & (1 << i)) in amdgpu_vcn_suspend()
326 if (adev->vcn.inst[i].vcpu_bo == NULL) in amdgpu_vcn_suspend()
329 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); in amdgpu_vcn_suspend()
330 ptr = adev->vcn.inst[i].cpu_addr; in amdgpu_vcn_suspend()
332 adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL); in amdgpu_vcn_suspend()
333 if (!adev->vcn.inst[i].saved_bo) in amdgpu_vcn_suspend()
337 memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size); in amdgpu_vcn_suspend()
350 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in amdgpu_vcn_resume()
351 if (adev->vcn.harvest_config & (1 << i)) in amdgpu_vcn_resume()
353 if (adev->vcn.inst[i].vcpu_bo == NULL) in amdgpu_vcn_resume()
356 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); in amdgpu_vcn_resume()
357 ptr = adev->vcn.inst[i].cpu_addr; in amdgpu_vcn_resume()
359 if (adev->vcn.inst[i].saved_bo != NULL) { in amdgpu_vcn_resume()
361 memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size); in amdgpu_vcn_resume()
364 kvfree(adev->vcn.inst[i].saved_bo); in amdgpu_vcn_resume()
365 adev->vcn.inst[i].saved_bo = NULL; in amdgpu_vcn_resume()
370 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; in amdgpu_vcn_resume()
374 memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset, in amdgpu_vcn_resume()
390 container_of(work, struct amdgpu_device, vcn.idle_work.work); in amdgpu_vcn_idle_work_handler()
395 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { in amdgpu_vcn_idle_work_handler()
396 if (adev->vcn.harvest_config & (1 << j)) in amdgpu_vcn_idle_work_handler()
399 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { in amdgpu_vcn_idle_work_handler()
400 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]); in amdgpu_vcn_idle_work_handler()
407 unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt))) in amdgpu_vcn_idle_work_handler()
412 adev->vcn.pause_dpg_mode(adev, j, &new_state); in amdgpu_vcn_idle_work_handler()
415 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec); in amdgpu_vcn_idle_work_handler()
419 if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) { in amdgpu_vcn_idle_work_handler()
427 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); in amdgpu_vcn_idle_work_handler()
436 atomic_inc(&adev->vcn.total_submission_cnt); in amdgpu_vcn_ring_begin_use()
438 if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) { in amdgpu_vcn_ring_begin_use()
445 mutex_lock(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_ring_begin_use()
453 atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt); in amdgpu_vcn_ring_begin_use()
459 for (i = 0; i < adev->vcn.num_enc_rings; ++i) in amdgpu_vcn_ring_begin_use()
460 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]); in amdgpu_vcn_ring_begin_use()
462 if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt)) in amdgpu_vcn_ring_begin_use()
468 adev->vcn.pause_dpg_mode(adev, ring->me, &new_state); in amdgpu_vcn_ring_begin_use()
470 mutex_unlock(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_ring_begin_use()
477 atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt); in amdgpu_vcn_ring_end_use()
479 atomic_dec(&ring->adev->vcn.total_submission_cnt); in amdgpu_vcn_ring_end_use()
481 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT); in amdgpu_vcn_ring_end_use()
495 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD); in amdgpu_vcn_dec_ring_test_ring()
499 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0)); in amdgpu_vcn_dec_ring_test_ring()
503 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9); in amdgpu_vcn_dec_ring_test_ring()
566 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0); in amdgpu_vcn_dec_send_msg()
568 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0); in amdgpu_vcn_dec_send_msg()
570 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0); in amdgpu_vcn_dec_send_msg()
573 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0); in amdgpu_vcn_dec_send_msg()