Lines Matching full:job
32 * backend operations to the scheduler like submitting a job to hardware run queue,
33 * returning the dependencies of a job etc.
46 * Note that once a job was taken from the entities queue and pushed to the
188 * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
232 * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
260 * drm_sched_job_done - complete a job
261 * @s_job: pointer to the job which is done
263 * Finish the job's fence and wake up the worker thread.
282 * drm_sched_job_done_cb - the callback for a done job
322 * drm_sched_suspend_timeout - Suspend scheduler job timeout
352 * drm_sched_resume_timeout - Resume scheduler job timeout
386 struct drm_sched_job *job; in drm_sched_job_timedout() local
393 job = list_first_entry_or_null(&sched->pending_list, in drm_sched_job_timedout()
396 if (job) { in drm_sched_job_timedout()
398 * Remove the bad job so it cannot be freed by concurrent in drm_sched_job_timedout()
402 list_del_init(&job->list); in drm_sched_job_timedout()
405 status = job->sched->ops->timedout_job(job); in drm_sched_job_timedout()
408 * Guilty job did complete and hence needs to be manually removed in drm_sched_job_timedout()
412 job->sched->ops->free_job(job); in drm_sched_job_timedout()
430 * @bad: job which caused the time out
433 * Note: bad job will not be freed as it might be used later and so it's
445 * Reinsert back the bad job here - now it's safe as in drm_sched_stop()
447 * bad job at this point - we parked (waited for) any in progress in drm_sched_stop()
454 * job extracted. in drm_sched_stop()
459 * Iterate the job list from later to earlier one and either deactive in drm_sched_stop()
474 * remove job from pending_list. in drm_sched_stop()
482 * Wait for job's HW fence callback to finish using s_job in drm_sched_stop()
485 * Job is still alive so fence refcount at least 1 in drm_sched_stop()
490 * We must keep bad job alive for later use during in drm_sched_stop()
492 * that the guilty job must be released. in drm_sched_stop()
565 * recovery after a job timeout.
613 * drm_sched_job_init - init a scheduler job
614 * @job: scheduler job to init
616 * @owner: job owner for debugging
622 * successfully, even when @job is aborted before drm_sched_job_arm() is called.
631 int drm_sched_job_init(struct drm_sched_job *job, in drm_sched_job_init() argument
644 memset(job, 0, sizeof(*job)); in drm_sched_job_init()
646 job->entity = entity; in drm_sched_job_init()
647 job->s_fence = drm_sched_fence_alloc(entity, owner); in drm_sched_job_init()
648 if (!job->s_fence) in drm_sched_job_init()
651 INIT_LIST_HEAD(&job->list); in drm_sched_job_init()
653 xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC); in drm_sched_job_init()
660 * drm_sched_job_arm - arm a scheduler job for execution
661 * @job: scheduler job to arm
663 * This arms a scheduler job for execution. Specifically it initializes the
664 * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
665 * or other places that need to track the completion of this job.
672 void drm_sched_job_arm(struct drm_sched_job *job) in drm_sched_job_arm() argument
675 struct drm_sched_entity *entity = job->entity; in drm_sched_job_arm()
681 job->sched = sched; in drm_sched_job_arm()
682 job->s_priority = entity->rq - sched->sched_rq; in drm_sched_job_arm()
683 job->id = atomic64_inc_return(&sched->job_id_count); in drm_sched_job_arm()
685 drm_sched_fence_init(job->s_fence, job->entity); in drm_sched_job_arm()
690 * drm_sched_job_add_dependency - adds the fence as a job dependency
691 * @job: scheduler job to add the dependencies to
699 int drm_sched_job_add_dependency(struct drm_sched_job *job, in drm_sched_job_add_dependency() argument
714 xa_for_each(&job->dependencies, index, entry) { in drm_sched_job_add_dependency()
720 xa_store(&job->dependencies, index, fence, GFP_KERNEL); in drm_sched_job_add_dependency()
727 ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL); in drm_sched_job_add_dependency()
736 * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
737 * @job: scheduler job to add the dependencies to
742 * This adds the fence matching the given syncobj to @job.
747 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job, in drm_sched_job_add_syncobj_dependency() argument
759 return drm_sched_job_add_dependency(job, fence); in drm_sched_job_add_syncobj_dependency()
764 * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
765 * @job: scheduler job to add the dependencies to
769 * This adds all fences matching the given usage from @resv to @job.
775 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, in drm_sched_job_add_resv_dependencies() argument
788 ret = drm_sched_job_add_dependency(job, fence); in drm_sched_job_add_resv_dependencies()
799 * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
801 * @job: scheduler job to add the dependencies to
803 * @write: whether the job might write the object (so we need to depend on
807 * GEM objects used in the job but before updating the reservations with your
813 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, in drm_sched_job_add_implicit_dependencies() argument
817 return drm_sched_job_add_resv_dependencies(job, obj->resv, in drm_sched_job_add_implicit_dependencies()
823 * drm_sched_job_cleanup - clean up scheduler job resources
824 * @job: scheduler job to clean up
828 * Drivers should call this from their error unwind code if @job is aborted
831 * After that point of no return @job is committed to be executed by the
835 void drm_sched_job_cleanup(struct drm_sched_job *job) in drm_sched_job_cleanup() argument
840 if (kref_read(&job->s_fence->finished.refcount)) { in drm_sched_job_cleanup()
842 dma_fence_put(&job->s_fence->finished); in drm_sched_job_cleanup()
844 /* aborted job before committing to run it */ in drm_sched_job_cleanup()
845 drm_sched_fence_free(job->s_fence); in drm_sched_job_cleanup()
848 job->s_fence = NULL; in drm_sched_job_cleanup()
850 xa_for_each(&job->dependencies, index, fence) { in drm_sched_job_cleanup()
853 xa_destroy(&job->dependencies); in drm_sched_job_cleanup()
911 * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
915 * Returns the next finished job from the pending list (if there is one)
921 struct drm_sched_job *job, *next; in drm_sched_get_cleanup_job() local
925 job = list_first_entry_or_null(&sched->pending_list, in drm_sched_get_cleanup_job()
928 if (job && dma_fence_is_signaled(&job->s_fence->finished)) { in drm_sched_get_cleanup_job()
929 /* remove job from pending_list */ in drm_sched_get_cleanup_job()
930 list_del_init(&job->list); in drm_sched_get_cleanup_job()
932 /* cancel this job's TO timer */ in drm_sched_get_cleanup_job()
940 dma_fence_timestamp(&job->s_fence->finished); in drm_sched_get_cleanup_job()
941 /* start TO timer for next job */ in drm_sched_get_cleanup_job()
945 job = NULL; in drm_sched_get_cleanup_job()
950 return job; in drm_sched_get_cleanup_job()
1083 * @hang_limit: number of times to allow a job to hang before dropping it
1177 * @bad: The job guilty of time out
1179 * Increment on every hang caused by the 'bad' job. If this exceeds the hang