Lines Matching refs:sched
71 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, in drm_sched_rq_init() argument
77 rq->sched = sched; in drm_sched_rq_init()
94 atomic_inc(rq->sched->score); in drm_sched_rq_add_entity()
113 atomic_dec(rq->sched->score); in drm_sched_rq_remove_entity()
173 struct drm_gpu_scheduler *sched = s_fence->sched; in drm_sched_job_done() local
175 atomic_dec(&sched->hw_rq_count); in drm_sched_job_done()
176 atomic_dec(sched->score); in drm_sched_job_done()
183 wake_up_interruptible(&sched->wake_up_worker); in drm_sched_job_done()
209 struct drm_gpu_scheduler *sched = entity->rq->sched; in drm_sched_dependency_optimized() local
217 if (s_fence && s_fence->sched == sched) in drm_sched_dependency_optimized()
231 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) in drm_sched_start_timeout() argument
233 if (sched->timeout != MAX_SCHEDULE_TIMEOUT && in drm_sched_start_timeout()
234 !list_empty(&sched->pending_list)) in drm_sched_start_timeout()
235 queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout); in drm_sched_start_timeout()
245 void drm_sched_fault(struct drm_gpu_scheduler *sched) in drm_sched_fault() argument
247 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0); in drm_sched_fault()
263 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched) in drm_sched_suspend_timeout() argument
267 sched_timeout = sched->work_tdr.timer.expires; in drm_sched_suspend_timeout()
273 if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT) in drm_sched_suspend_timeout()
277 return sched->timeout; in drm_sched_suspend_timeout()
289 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, in drm_sched_resume_timeout() argument
292 spin_lock(&sched->job_list_lock); in drm_sched_resume_timeout()
294 if (list_empty(&sched->pending_list)) in drm_sched_resume_timeout()
295 cancel_delayed_work(&sched->work_tdr); in drm_sched_resume_timeout()
297 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining); in drm_sched_resume_timeout()
299 spin_unlock(&sched->job_list_lock); in drm_sched_resume_timeout()
305 struct drm_gpu_scheduler *sched = s_job->sched; in drm_sched_job_begin() local
307 spin_lock(&sched->job_list_lock); in drm_sched_job_begin()
308 list_add_tail(&s_job->list, &sched->pending_list); in drm_sched_job_begin()
309 drm_sched_start_timeout(sched); in drm_sched_job_begin()
310 spin_unlock(&sched->job_list_lock); in drm_sched_job_begin()
315 struct drm_gpu_scheduler *sched; in drm_sched_job_timedout() local
319 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); in drm_sched_job_timedout()
322 spin_lock(&sched->job_list_lock); in drm_sched_job_timedout()
323 job = list_first_entry_or_null(&sched->pending_list, in drm_sched_job_timedout()
333 spin_unlock(&sched->job_list_lock); in drm_sched_job_timedout()
335 status = job->sched->ops->timedout_job(job); in drm_sched_job_timedout()
341 if (sched->free_guilty) { in drm_sched_job_timedout()
342 job->sched->ops->free_job(job); in drm_sched_job_timedout()
343 sched->free_guilty = false; in drm_sched_job_timedout()
346 spin_unlock(&sched->job_list_lock); in drm_sched_job_timedout()
350 spin_lock(&sched->job_list_lock); in drm_sched_job_timedout()
351 drm_sched_start_timeout(sched); in drm_sched_job_timedout()
352 spin_unlock(&sched->job_list_lock); in drm_sched_job_timedout()
389 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) in drm_sched_stop() argument
393 kthread_park(sched->thread); in drm_sched_stop()
402 if (bad && bad->sched == sched) in drm_sched_stop()
407 list_add(&bad->list, &sched->pending_list); in drm_sched_stop()
415 list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list, in drm_sched_stop()
420 atomic_dec(&sched->hw_rq_count); in drm_sched_stop()
426 spin_lock(&sched->job_list_lock); in drm_sched_stop()
428 spin_unlock(&sched->job_list_lock); in drm_sched_stop()
444 sched->ops->free_job(s_job); in drm_sched_stop()
446 sched->free_guilty = true; in drm_sched_stop()
456 cancel_delayed_work(&sched->work_tdr); in drm_sched_stop()
468 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) in drm_sched_start() argument
478 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { in drm_sched_start()
481 atomic_inc(&sched->hw_rq_count); in drm_sched_start()
499 spin_lock(&sched->job_list_lock); in drm_sched_start()
500 drm_sched_start_timeout(sched); in drm_sched_start()
501 spin_unlock(&sched->job_list_lock); in drm_sched_start()
504 kthread_unpark(sched->thread); in drm_sched_start()
514 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) in drm_sched_resubmit_jobs() argument
516 drm_sched_resubmit_jobs_ext(sched, INT_MAX); in drm_sched_resubmit_jobs()
527 void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max) in drm_sched_resubmit_jobs_ext() argument
535 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { in drm_sched_resubmit_jobs_ext()
541 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { in drm_sched_resubmit_jobs_ext()
550 fence = sched->ops->run_job(s_job); in drm_sched_resubmit_jobs_ext()
581 struct drm_gpu_scheduler *sched; in drm_sched_job_init() local
587 sched = entity->rq->sched; in drm_sched_job_init()
589 job->sched = sched; in drm_sched_job_init()
591 job->s_priority = entity->rq - sched->sched_rq; in drm_sched_job_init()
595 job->id = atomic64_inc_return(&sched->job_id_count); in drm_sched_job_init()
622 static bool drm_sched_ready(struct drm_gpu_scheduler *sched) in drm_sched_ready() argument
624 return atomic_read(&sched->hw_rq_count) < in drm_sched_ready()
625 sched->hw_submission_limit; in drm_sched_ready()
634 void drm_sched_wakeup(struct drm_gpu_scheduler *sched) in drm_sched_wakeup() argument
636 if (drm_sched_ready(sched)) in drm_sched_wakeup()
637 wake_up_interruptible(&sched->wake_up_worker); in drm_sched_wakeup()
648 drm_sched_select_entity(struct drm_gpu_scheduler *sched) in drm_sched_select_entity() argument
653 if (!drm_sched_ready(sched)) in drm_sched_select_entity()
658 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]); in drm_sched_select_entity()
675 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) in drm_sched_get_cleanup_job() argument
683 if ((sched->timeout != MAX_SCHEDULE_TIMEOUT && in drm_sched_get_cleanup_job()
684 !cancel_delayed_work(&sched->work_tdr)) || in drm_sched_get_cleanup_job()
688 spin_lock(&sched->job_list_lock); in drm_sched_get_cleanup_job()
690 job = list_first_entry_or_null(&sched->pending_list, in drm_sched_get_cleanup_job()
697 next = list_first_entry_or_null(&sched->pending_list, in drm_sched_get_cleanup_job()
705 drm_sched_start_timeout(sched); in drm_sched_get_cleanup_job()
708 spin_unlock(&sched->job_list_lock); in drm_sched_get_cleanup_job()
725 struct drm_gpu_scheduler *sched, *picked_sched = NULL; in drm_sched_pick_best() local
730 sched = sched_list[i]; in drm_sched_pick_best()
732 if (!sched->ready) { in drm_sched_pick_best()
734 sched->name); in drm_sched_pick_best()
738 num_score = atomic_read(sched->score); in drm_sched_pick_best()
741 picked_sched = sched; in drm_sched_pick_best()
756 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched) in drm_sched_blocked() argument
775 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; in drm_sched_main() local
787 wait_event_interruptible(sched->wake_up_worker, in drm_sched_main()
788 (cleanup_job = drm_sched_get_cleanup_job(sched)) || in drm_sched_main()
789 (!drm_sched_blocked(sched) && in drm_sched_main()
790 (entity = drm_sched_select_entity(sched))) || in drm_sched_main()
794 sched->ops->free_job(cleanup_job); in drm_sched_main()
796 drm_sched_start_timeout(sched); in drm_sched_main()
811 atomic_inc(&sched->hw_rq_count); in drm_sched_main()
815 fence = sched->ops->run_job(sched_job); in drm_sched_main()
836 wake_up(&sched->job_scheduled); in drm_sched_main()
856 int drm_sched_init(struct drm_gpu_scheduler *sched, in drm_sched_init() argument
863 sched->ops = ops; in drm_sched_init()
864 sched->hw_submission_limit = hw_submission; in drm_sched_init()
865 sched->name = name; in drm_sched_init()
866 sched->timeout = timeout; in drm_sched_init()
867 sched->timeout_wq = timeout_wq ? : system_wq; in drm_sched_init()
868 sched->hang_limit = hang_limit; in drm_sched_init()
869 sched->score = score ? score : &sched->_score; in drm_sched_init()
871 drm_sched_rq_init(sched, &sched->sched_rq[i]); in drm_sched_init()
873 init_waitqueue_head(&sched->wake_up_worker); in drm_sched_init()
874 init_waitqueue_head(&sched->job_scheduled); in drm_sched_init()
875 INIT_LIST_HEAD(&sched->pending_list); in drm_sched_init()
876 spin_lock_init(&sched->job_list_lock); in drm_sched_init()
877 atomic_set(&sched->hw_rq_count, 0); in drm_sched_init()
878 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); in drm_sched_init()
879 atomic_set(&sched->_score, 0); in drm_sched_init()
880 atomic64_set(&sched->job_id_count, 0); in drm_sched_init()
883 sched->thread = kthread_run(drm_sched_main, sched, sched->name); in drm_sched_init()
884 if (IS_ERR(sched->thread)) { in drm_sched_init()
885 ret = PTR_ERR(sched->thread); in drm_sched_init()
886 sched->thread = NULL; in drm_sched_init()
891 sched->ready = true; in drm_sched_init()
903 void drm_sched_fini(struct drm_gpu_scheduler *sched) in drm_sched_fini() argument
908 if (sched->thread) in drm_sched_fini()
909 kthread_stop(sched->thread); in drm_sched_fini()
912 struct drm_sched_rq *rq = &sched->sched_rq[i]; in drm_sched_fini()
930 wake_up_all(&sched->job_scheduled); in drm_sched_fini()
933 cancel_delayed_work_sync(&sched->work_tdr); in drm_sched_fini()
935 sched->ready = false; in drm_sched_fini()
951 struct drm_gpu_scheduler *sched = bad->sched; in drm_sched_increase_karma_ext() local
965 struct drm_sched_rq *rq = &sched->sched_rq[i]; in drm_sched_increase_karma_ext()