/drivers/gpu/drm/scheduler/ |
D | sched_main.c | 172 struct drm_sched_fence *s_fence = s_job->s_fence; in drm_sched_job_done() local 173 struct drm_gpu_scheduler *sched = s_fence->sched; in drm_sched_job_done() 178 trace_drm_sched_process_job(s_fence); in drm_sched_job_done() 180 dma_fence_get(&s_fence->finished); in drm_sched_job_done() 181 drm_sched_fence_finished(s_fence); in drm_sched_job_done() 182 dma_fence_put(&s_fence->finished); in drm_sched_job_done() 210 struct drm_sched_fence *s_fence; in drm_sched_dependency_optimized() local 216 s_fence = to_drm_sched_fence(fence); in drm_sched_dependency_optimized() 217 if (s_fence && s_fence->sched == sched) in drm_sched_dependency_optimized() 417 if (s_job->s_fence->parent && in drm_sched_stop() [all …]
|
D | sched_entity.c | 209 drm_sched_fence_finished(job->s_fence); in drm_sched_entity_kill_jobs_cb() 210 WARN_ON(job->s_fence->parent); in drm_sched_entity_kill_jobs_cb() 229 struct drm_sched_fence *s_fence = job->s_fence; in drm_sched_entity_kill_jobs() local 235 drm_sched_fence_scheduled(s_fence); in drm_sched_entity_kill_jobs() 236 dma_fence_set_error(&s_fence->finished, -ESRCH); in drm_sched_entity_kill_jobs() 373 struct drm_sched_fence *s_fence; in drm_sched_entity_add_dependency_cb() local 386 s_fence = to_drm_sched_fence(fence); in drm_sched_entity_add_dependency_cb() 387 if (s_fence && s_fence->sched == sched) { in drm_sched_entity_add_dependency_cb() 393 fence = dma_fence_get(&s_fence->scheduled); in drm_sched_entity_add_dependency_cb() 439 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); in drm_sched_entity_pop_job() [all …]
|
D | gpu_scheduler_trace.h | 50 __entry->fence = &sched_job->s_fence->finished; 77 __entry->fence = &sched_job->s_fence->finished;
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_job.c | 52 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { in amdgpu_job_timedout() 139 f = job->base.s_fence ? &job->base.s_fence->finished : hw_fence; in amdgpu_job_free_resources() 185 *f = dma_fence_get(&job->base.s_fence->finished); in amdgpu_job_submit() 228 &job->base.s_fence->finished, in amdgpu_job_dependency() 247 finished = &job->base.s_fence->finished; in amdgpu_job_run() 295 struct drm_sched_fence *s_fence = s_job->s_fence; in amdgpu_job_stop_all_jobs_on_sched() local 297 dma_fence_signal(&s_fence->scheduled); in amdgpu_job_stop_all_jobs_on_sched() 298 dma_fence_set_error(&s_fence->finished, -EHWPOISON); in amdgpu_job_stop_all_jobs_on_sched() 299 dma_fence_signal(&s_fence->finished); in amdgpu_job_stop_all_jobs_on_sched() 307 struct drm_sched_fence *s_fence = s_job->s_fence; in amdgpu_job_stop_all_jobs_on_sched() local [all …]
|
D | amdgpu_sync.c | 68 struct drm_sched_fence *s_fence = to_drm_sched_fence(f); in amdgpu_sync_same_dev() local 70 if (s_fence) { in amdgpu_sync_same_dev() 73 ring = container_of(s_fence->sched, struct amdgpu_ring, sched); in amdgpu_sync_same_dev() 89 struct drm_sched_fence *s_fence; in amdgpu_sync_get_owner() local 95 s_fence = to_drm_sched_fence(f); in amdgpu_sync_get_owner() 96 if (s_fence) in amdgpu_sync_get_owner() 97 return s_fence->owner; in amdgpu_sync_get_owner() 314 struct drm_sched_fence *s_fence = to_drm_sched_fence(f); in amdgpu_sync_peek_fence() local 322 if (ring && s_fence) { in amdgpu_sync_peek_fence() 326 if (s_fence->sched == &ring->sched) { in amdgpu_sync_peek_fence() [all …]
|
D | amdgpu_ctx.c | 677 struct drm_sched_fence *s_fence; in amdgpu_ctx_fence_time() local 684 s_fence = to_drm_sched_fence(fence); in amdgpu_ctx_fence_time() 685 if (!dma_fence_is_signaled(&s_fence->scheduled)) { in amdgpu_ctx_fence_time() 689 t1 = s_fence->scheduled.timestamp; in amdgpu_ctx_fence_time() 694 if (dma_fence_is_signaled(&s_fence->finished) && in amdgpu_ctx_fence_time() 695 s_fence->finished.timestamp < now) in amdgpu_ctx_fence_time() 696 *total += ktime_sub(s_fence->finished.timestamp, t1); in amdgpu_ctx_fence_time()
|
D | amdgpu_trace.h | 36 job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished) 180 __entry->context = job->base.s_fence->finished.context; 181 __entry->seqno = job->base.s_fence->finished.seqno; 205 __entry->context = job->base.s_fence->finished.context; 206 __entry->seqno = job->base.s_fence->finished.seqno;
|
D | amdgpu_ib.c | 151 fence_ctx = job->base.s_fence ? in amdgpu_ib_schedule() 152 job->base.s_fence->scheduled.context : 0; in amdgpu_ib_schedule()
|
D | amdgpu_cs.c | 979 struct drm_sched_fence *s_fence; in amdgpu_cs_process_fence_dep() local 982 s_fence = to_drm_sched_fence(fence); in amdgpu_cs_process_fence_dep() 983 fence = dma_fence_get(&s_fence->scheduled); in amdgpu_cs_process_fence_dep() 1234 p->fence = dma_fence_get(&job->base.s_fence->finished); in amdgpu_cs_submit()
|
D | amdgpu_device.c | 5039 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout); in amdgpu_device_recheck_guilty_jobs() 5072 dma_fence_get(&s_job->s_fence->finished); in amdgpu_device_recheck_guilty_jobs() 5073 dma_fence_signal(&s_job->s_fence->finished); in amdgpu_device_recheck_guilty_jobs() 5074 dma_fence_put(&s_job->s_fence->finished); in amdgpu_device_recheck_guilty_jobs() 5247 if (job && job->base.s_fence->parent && in amdgpu_device_gpu_recover() 5248 dma_fence_is_signaled(job->base.s_fence->parent)) { in amdgpu_device_gpu_recover()
|
D | amdgpu_debugfs.c | 1341 if (dma_fence_is_signaled(&s_job->s_fence->finished)) { in amdgpu_ib_preempt_mark_partial_job()
|
/drivers/gpu/drm/lima/ |
D | lima_trace.h | 25 __entry->context = task->base.s_fence->finished.context; 26 __entry->seqno = task->base.s_fence->finished.seqno;
|
D | lima_sched.c | 181 struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished); in lima_sched_context_queue_task() 230 if (job->s_fence->finished.error < 0) in lima_sched_run_job()
|
/drivers/gpu/drm/etnaviv/ |
D | etnaviv_sched.c | 77 if (likely(!sched_job->s_fence->finished.error)) in etnaviv_sched_run_job() 168 submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished); in etnaviv_sched_push_job()
|
/drivers/gpu/drm/v3d/ |
D | v3d_sched.c | 106 if (unlikely(job->base.base.s_fence->finished.error)) in v3d_bin_job_run() 160 if (unlikely(job->base.base.s_fence->finished.error)) in v3d_render_job_run()
|
D | v3d_gem.c | 483 job->done_fence = dma_fence_get(&job->base.s_fence->finished); in v3d_push_job()
|
/drivers/gpu/drm/panfrost/ |
D | panfrost_job.c | 291 job->render_done_fence = dma_fence_get(&job->base.s_fence->finished); in panfrost_job_push() 384 if (unlikely(job->base.s_fence->finished.error)) in panfrost_job_run()
|
/drivers/gpu/drm/msm/ |
D | msm_gem_submit.c | 886 submit->user_fence = dma_fence_get(&submit->base.s_fence->finished); in msm_ioctl_gem_submit()
|