Lines Matching full:entity
44 * The jobs in a entity are always scheduled in the order that they were pushed.
61 static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
82 * drm_sched_rq_add_entity - add an entity
85 * @entity: scheduler entity
87 * Adds a scheduler entity to the run queue.
90 struct drm_sched_entity *entity) in drm_sched_rq_add_entity() argument
92 if (!list_empty(&entity->list)) in drm_sched_rq_add_entity()
95 list_add_tail(&entity->list, &rq->entities); in drm_sched_rq_add_entity()
100 * drm_sched_rq_remove_entity - remove an entity
103 * @entity: scheduler entity
105 * Removes a scheduler entity from the run queue.
108 struct drm_sched_entity *entity) in drm_sched_rq_remove_entity() argument
110 if (list_empty(&entity->list)) in drm_sched_rq_remove_entity()
113 list_del_init(&entity->list); in drm_sched_rq_remove_entity()
114 if (rq->current_entity == entity) in drm_sched_rq_remove_entity()
120 * drm_sched_rq_select_entity - Select an entity which could provide a job to run
124 * Try to find a ready entity, returns NULL if none found.
129 struct drm_sched_entity *entity; in drm_sched_rq_select_entity() local
133 entity = rq->current_entity; in drm_sched_rq_select_entity()
134 if (entity) { in drm_sched_rq_select_entity()
135 list_for_each_entry_continue(entity, &rq->entities, list) { in drm_sched_rq_select_entity()
136 if (drm_sched_entity_is_ready(entity)) { in drm_sched_rq_select_entity()
137 rq->current_entity = entity; in drm_sched_rq_select_entity()
139 return entity; in drm_sched_rq_select_entity()
144 list_for_each_entry(entity, &rq->entities, list) { in drm_sched_rq_select_entity()
146 if (drm_sched_entity_is_ready(entity)) { in drm_sched_rq_select_entity()
147 rq->current_entity = entity; in drm_sched_rq_select_entity()
149 return entity; in drm_sched_rq_select_entity()
152 if (entity == rq->current_entity) in drm_sched_rq_select_entity()
162 * drm_sched_entity_init - Init a context entity used by scheduler when
165 * @entity: scheduler entity to init
167 * entity can be submitted
173 * the entity
177 int drm_sched_entity_init(struct drm_sched_entity *entity, in drm_sched_entity_init() argument
182 if (!(entity && rq_list && num_rq_list > 0 && rq_list[0])) in drm_sched_entity_init()
185 memset(entity, 0, sizeof(struct drm_sched_entity)); in drm_sched_entity_init()
186 INIT_LIST_HEAD(&entity->list); in drm_sched_entity_init()
187 entity->rq = rq_list[0]; in drm_sched_entity_init()
188 entity->guilty = guilty; in drm_sched_entity_init()
189 entity->last_scheduled = NULL; in drm_sched_entity_init()
191 spin_lock_init(&entity->rq_lock); in drm_sched_entity_init()
192 spsc_queue_init(&entity->job_queue); in drm_sched_entity_init()
194 atomic_set(&entity->fence_seq, 0); in drm_sched_entity_init()
195 entity->fence_context = dma_fence_context_alloc(2); in drm_sched_entity_init()
202 * drm_sched_entity_is_idle - Check if entity is idle
204 * @entity: scheduler entity
206 * Returns true if the entity does not have any unscheduled jobs.
208 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) in drm_sched_entity_is_idle() argument
212 if (list_empty(&entity->list) || in drm_sched_entity_is_idle()
213 spsc_queue_peek(&entity->job_queue) == NULL) in drm_sched_entity_is_idle()
220 * drm_sched_entity_is_ready - Check if entity is ready
222 * @entity: scheduler entity
224 * Return true if entity could provide a job.
226 static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) in drm_sched_entity_is_ready() argument
228 if (spsc_queue_peek(&entity->job_queue) == NULL) in drm_sched_entity_is_ready()
231 if (READ_ONCE(entity->dependency)) in drm_sched_entity_is_ready()
250 * drm_sched_entity_flush - Flush a context entity
252 * @entity: scheduler entity
256 * removes the entity from the runqueue and returns an error when the process was killed.
260 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) in drm_sched_entity_flush() argument
266 sched = entity->rq->sched; in drm_sched_entity_flush()
275 drm_sched_entity_is_idle(entity), in drm_sched_entity_flush()
278 wait_event_killable(sched->job_scheduled, drm_sched_entity_is_idle(entity)); in drm_sched_entity_flush()
282 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); in drm_sched_entity_flush()
285 drm_sched_rq_remove_entity(entity->rq, entity); in drm_sched_entity_flush()
292 * drm_sched_entity_cleanup - Destroy a context entity
294 * @entity: scheduler entity
297 * entity and signals all jobs with an error code if the process was killed.
300 void drm_sched_entity_fini(struct drm_sched_entity *entity) in drm_sched_entity_fini() argument
304 sched = entity->rq->sched; in drm_sched_entity_fini()
305 drm_sched_rq_remove_entity(entity->rq, entity); in drm_sched_entity_fini()
310 if (spsc_queue_peek(&entity->job_queue)) { in drm_sched_entity_fini()
319 if (entity->dependency) { in drm_sched_entity_fini()
320 dma_fence_remove_callback(entity->dependency, in drm_sched_entity_fini()
321 &entity->cb); in drm_sched_entity_fini()
322 dma_fence_put(entity->dependency); in drm_sched_entity_fini()
323 entity->dependency = NULL; in drm_sched_entity_fini()
326 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { in drm_sched_entity_fini()
332 * When pipe is hanged by older entity, new entity might in drm_sched_entity_fini()
334 * and so entity->last_scheduled will remain NULL in drm_sched_entity_fini()
336 if (!entity->last_scheduled) { in drm_sched_entity_fini()
339 r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb, in drm_sched_entity_fini()
349 dma_fence_put(entity->last_scheduled); in drm_sched_entity_fini()
350 entity->last_scheduled = NULL; in drm_sched_entity_fini()
355 * drm_sched_entity_fini - Destroy a context entity
357 * @entity: scheduler entity
361 void drm_sched_entity_destroy(struct drm_sched_entity *entity) in drm_sched_entity_destroy() argument
363 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); in drm_sched_entity_destroy()
364 drm_sched_entity_fini(entity); in drm_sched_entity_destroy()
370 struct drm_sched_entity *entity = in drm_sched_entity_wakeup() local
372 entity->dependency = NULL; in drm_sched_entity_wakeup()
374 drm_sched_wakeup(entity->rq->sched); in drm_sched_entity_wakeup()
379 struct drm_sched_entity *entity = in drm_sched_entity_clear_dep() local
381 entity->dependency = NULL; in drm_sched_entity_clear_dep()
386 * drm_sched_entity_set_rq - Sets the run queue for an entity
388 * @entity: scheduler entity
391 * Sets the run queue for an entity and removes the entity from the previous
394 void drm_sched_entity_set_rq(struct drm_sched_entity *entity, in drm_sched_entity_set_rq() argument
397 if (entity->rq == rq) in drm_sched_entity_set_rq()
402 spin_lock(&entity->rq_lock); in drm_sched_entity_set_rq()
403 drm_sched_rq_remove_entity(entity->rq, entity); in drm_sched_entity_set_rq()
404 entity->rq = rq; in drm_sched_entity_set_rq()
405 drm_sched_rq_add_entity(rq, entity); in drm_sched_entity_set_rq()
406 spin_unlock(&entity->rq_lock); in drm_sched_entity_set_rq()
414 * @entity: the entity which depends on the above fence
419 struct drm_sched_entity *entity) in drm_sched_dependency_optimized() argument
421 struct drm_gpu_scheduler *sched = entity->rq->sched; in drm_sched_dependency_optimized()
426 if (fence->context == entity->fence_context) in drm_sched_dependency_optimized()
436 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) in drm_sched_entity_add_dependency_cb() argument
438 struct drm_gpu_scheduler *sched = entity->rq->sched; in drm_sched_entity_add_dependency_cb()
439 struct dma_fence * fence = entity->dependency; in drm_sched_entity_add_dependency_cb()
442 if (fence->context == entity->fence_context || in drm_sched_entity_add_dependency_cb()
443 fence->context == entity->fence_context + 1) { in drm_sched_entity_add_dependency_cb()
446 * which belongs to the same entity, we can ignore in drm_sched_entity_add_dependency_cb()
449 dma_fence_put(entity->dependency); in drm_sched_entity_add_dependency_cb()
461 dma_fence_put(entity->dependency); in drm_sched_entity_add_dependency_cb()
462 entity->dependency = fence; in drm_sched_entity_add_dependency_cb()
463 if (!dma_fence_add_callback(fence, &entity->cb, in drm_sched_entity_add_dependency_cb()
472 if (!dma_fence_add_callback(entity->dependency, &entity->cb, in drm_sched_entity_add_dependency_cb()
476 dma_fence_put(entity->dependency); in drm_sched_entity_add_dependency_cb()
481 drm_sched_entity_pop_job(struct drm_sched_entity *entity) in drm_sched_entity_pop_job() argument
483 struct drm_gpu_scheduler *sched = entity->rq->sched; in drm_sched_entity_pop_job()
485 spsc_queue_peek(&entity->job_queue)); in drm_sched_entity_pop_job()
490 while ((entity->dependency = sched->ops->dependency(sched_job, entity))) in drm_sched_entity_pop_job()
491 if (drm_sched_entity_add_dependency_cb(entity)) in drm_sched_entity_pop_job()
494 /* skip jobs from entity that marked guilty */ in drm_sched_entity_pop_job()
495 if (entity->guilty && atomic_read(entity->guilty)) in drm_sched_entity_pop_job()
498 dma_fence_put(entity->last_scheduled); in drm_sched_entity_pop_job()
499 entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished); in drm_sched_entity_pop_job()
501 spsc_queue_pop(&entity->job_queue); in drm_sched_entity_pop_job()
506 * drm_sched_entity_push_job - Submit a job to the entity's job queue
509 * @entity: scheduler entity
518 struct drm_sched_entity *entity) in drm_sched_entity_push_job() argument
523 trace_drm_sched_job(sched_job, entity); in drm_sched_entity_push_job()
525 WRITE_ONCE(entity->last_user, current->group_leader); in drm_sched_entity_push_job()
526 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); in drm_sched_entity_push_job()
530 /* Add the entity to the run queue */ in drm_sched_entity_push_job()
531 spin_lock(&entity->rq_lock); in drm_sched_entity_push_job()
532 if (!entity->rq) { in drm_sched_entity_push_job()
533 DRM_ERROR("Trying to push to a killed entity\n"); in drm_sched_entity_push_job()
534 spin_unlock(&entity->rq_lock); in drm_sched_entity_push_job()
537 drm_sched_rq_add_entity(entity->rq, entity); in drm_sched_entity_push_job()
538 spin_unlock(&entity->rq_lock); in drm_sched_entity_push_job()
620 struct drm_sched_entity *entity, *tmp; in drm_sched_hw_job_reset() local
645 list_for_each_entry_safe(entity, tmp, &rq->entities, list) { in drm_sched_hw_job_reset()
646 if (bad->s_fence->scheduled.context == entity->fence_context) { in drm_sched_hw_job_reset()
648 if (entity->guilty) in drm_sched_hw_job_reset()
649 atomic_set(entity->guilty, 1); in drm_sched_hw_job_reset()
654 if (&entity->list != &rq->entities) in drm_sched_hw_job_reset()
719 * @entity: scheduler entity to use
728 struct drm_sched_entity *entity, in drm_sched_job_init() argument
731 struct drm_gpu_scheduler *sched = entity->rq->sched; in drm_sched_job_init()
734 job->entity = entity; in drm_sched_job_init()
735 job->s_priority = entity->rq - sched->sched_rq; in drm_sched_job_init()
736 job->s_fence = drm_sched_fence_create(entity, owner); in drm_sched_job_init()
775 * drm_sched_select_entity - Select next entity to process
779 * Returns the entity to process or NULL if none are found.
784 struct drm_sched_entity *entity; in drm_sched_select_entity() local
792 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]); in drm_sched_select_entity()
793 if (entity) in drm_sched_select_entity()
797 return entity; in drm_sched_select_entity()
856 struct drm_sched_entity *entity = NULL; in drm_sched_main() local
863 (entity = drm_sched_select_entity(sched))) || in drm_sched_main()
866 if (!entity) in drm_sched_main()
869 sched_job = drm_sched_entity_pop_job(entity); in drm_sched_main()