• Home
  • Raw
  • Download

Lines Matching refs:uvd

126 	INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);  in amdgpu_uvd_sw_init()
174 r = request_firmware(&adev->uvd.fw, fw_name, adev->dev); in amdgpu_uvd_sw_init()
181 r = amdgpu_ucode_validate(adev->uvd.fw); in amdgpu_uvd_sw_init()
185 release_firmware(adev->uvd.fw); in amdgpu_uvd_sw_init()
186 adev->uvd.fw = NULL; in amdgpu_uvd_sw_init()
191 adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES; in amdgpu_uvd_sw_init()
193 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; in amdgpu_uvd_sw_init()
208 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; in amdgpu_uvd_sw_init()
210 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) | in amdgpu_uvd_sw_init()
215 (adev->uvd.fw_version < FW_1_66_16)) in amdgpu_uvd_sw_init()
220 + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles; in amdgpu_uvd_sw_init()
225 AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo, in amdgpu_uvd_sw_init()
226 &adev->uvd.gpu_addr, &adev->uvd.cpu_addr); in amdgpu_uvd_sw_init()
232 ring = &adev->uvd.ring; in amdgpu_uvd_sw_init()
234 r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity, in amdgpu_uvd_sw_init()
241 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_sw_init()
242 atomic_set(&adev->uvd.handles[i], 0); in amdgpu_uvd_sw_init()
243 adev->uvd.filp[i] = NULL; in amdgpu_uvd_sw_init()
248 adev->uvd.address_64_bit = true; in amdgpu_uvd_sw_init()
252 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10; in amdgpu_uvd_sw_init()
255 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11; in amdgpu_uvd_sw_init()
258 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12; in amdgpu_uvd_sw_init()
261 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15; in amdgpu_uvd_sw_init()
264 adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10; in amdgpu_uvd_sw_init()
272 kfree(adev->uvd.saved_bo); in amdgpu_uvd_sw_fini()
274 amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); in amdgpu_uvd_sw_fini()
276 amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo, in amdgpu_uvd_sw_fini()
277 &adev->uvd.gpu_addr, in amdgpu_uvd_sw_fini()
278 (void **)&adev->uvd.cpu_addr); in amdgpu_uvd_sw_fini()
280 amdgpu_ring_fini(&adev->uvd.ring); in amdgpu_uvd_sw_fini()
282 release_firmware(adev->uvd.fw); in amdgpu_uvd_sw_fini()
293 if (adev->uvd.vcpu_bo == NULL) in amdgpu_uvd_suspend()
298 for (i = 0; i < adev->uvd.max_handles; ++i) in amdgpu_uvd_suspend()
299 if (atomic_read(&adev->uvd.handles[i])) in amdgpu_uvd_suspend()
302 if (i == adev->uvd.max_handles) in amdgpu_uvd_suspend()
306 cancel_delayed_work_sync(&adev->uvd.idle_work); in amdgpu_uvd_suspend()
308 size = amdgpu_bo_size(adev->uvd.vcpu_bo); in amdgpu_uvd_suspend()
309 ptr = adev->uvd.cpu_addr; in amdgpu_uvd_suspend()
311 adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); in amdgpu_uvd_suspend()
312 if (!adev->uvd.saved_bo) in amdgpu_uvd_suspend()
315 memcpy_fromio(adev->uvd.saved_bo, ptr, size); in amdgpu_uvd_suspend()
325 if (adev->uvd.vcpu_bo == NULL) in amdgpu_uvd_resume()
328 size = amdgpu_bo_size(adev->uvd.vcpu_bo); in amdgpu_uvd_resume()
329 ptr = adev->uvd.cpu_addr; in amdgpu_uvd_resume()
331 if (adev->uvd.saved_bo != NULL) { in amdgpu_uvd_resume()
332 memcpy_toio(ptr, adev->uvd.saved_bo, size); in amdgpu_uvd_resume()
333 kfree(adev->uvd.saved_bo); in amdgpu_uvd_resume()
334 adev->uvd.saved_bo = NULL; in amdgpu_uvd_resume()
339 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; in amdgpu_uvd_resume()
342 memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset, in amdgpu_uvd_resume()
355 struct amdgpu_ring *ring = &adev->uvd.ring; in amdgpu_uvd_free_handles()
358 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_free_handles()
359 uint32_t handle = atomic_read(&adev->uvd.handles[i]); in amdgpu_uvd_free_handles()
360 if (handle != 0 && adev->uvd.filp[i] == filp) { in amdgpu_uvd_free_handles()
373 adev->uvd.filp[i] = NULL; in amdgpu_uvd_free_handles()
374 atomic_set(&adev->uvd.handles[i], 0); in amdgpu_uvd_free_handles()
422 if (!ctx->parser->adev->uvd.address_64_bit) { in amdgpu_uvd_cs_pass1()
580 if (!adev->uvd.use_ctx_buf){ in amdgpu_uvd_cs_msg_decode()
676 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_cs_msg()
677 if (atomic_read(&adev->uvd.handles[i]) == handle) { in amdgpu_uvd_cs_msg()
682 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { in amdgpu_uvd_cs_msg()
683 adev->uvd.filp[i] = ctx->parser->filp; in amdgpu_uvd_cs_msg()
699 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_cs_msg()
700 if (atomic_read(&adev->uvd.handles[i]) == handle) { in amdgpu_uvd_cs_msg()
701 if (adev->uvd.filp[i] != ctx->parser->filp) { in amdgpu_uvd_cs_msg()
714 for (i = 0; i < adev->uvd.max_handles; ++i) in amdgpu_uvd_cs_msg()
715 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); in amdgpu_uvd_cs_msg()
783 if (!ctx->parser->adev->uvd.address_64_bit) { in amdgpu_uvd_cs_pass2()
791 (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) { in amdgpu_uvd_cs_pass2()
932 if (!parser->adev->uvd.address_64_bit) { in amdgpu_uvd_ring_parse_cs()
976 if (!ring->adev->uvd.address_64_bit) { in amdgpu_uvd_send_msg()
1023 r = amdgpu_job_submit(job, ring, &adev->uvd.entity, in amdgpu_uvd_send_msg()
1145 container_of(work, struct amdgpu_device, uvd.idle_work.work); in amdgpu_uvd_idle_work_handler()
1146 unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring); in amdgpu_uvd_idle_work_handler()
1163 schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); in amdgpu_uvd_idle_work_handler()
1170 bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); in amdgpu_uvd_ring_begin_use()
1190 schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); in amdgpu_uvd_ring_end_use()
1246 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_used_handles()
1252 if (atomic_read(&adev->uvd.handles[i])) in amdgpu_uvd_used_handles()