1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/completion.h>
27
28 #include <drm/drm_print.h>
29 #include <drm/gpu_scheduler.h>
30
31 #include "gpu_scheduler_trace.h"
32
33 #define to_drm_sched_job(sched_job) \
34 container_of((sched_job), struct drm_sched_job, queue_node)
35
36 /**
37 * drm_sched_entity_init - Init a context entity used by scheduler when
38 * submit to HW ring.
39 *
40 * @entity: scheduler entity to init
41 * @priority: priority of the entity
42 * @sched_list: the list of drm scheds on which jobs from this
43 * entity can be submitted
44 * @num_sched_list: number of drm sched in sched_list
45 * @guilty: atomic_t set to 1 when a job on this queue
46 * is found to be guilty causing a timeout
47 *
48 * Note that the &sched_list must have at least one element to schedule the entity.
49 *
50 * For changing @priority later on at runtime see
51 * drm_sched_entity_set_priority(). For changing the set of schedulers
52 * @sched_list at runtime see drm_sched_entity_modify_sched().
53 *
54 * An entity is cleaned up by callind drm_sched_entity_fini(). See also
55 * drm_sched_entity_destroy().
56 *
57 * Returns 0 on success or a negative error code on failure.
58 */
drm_sched_entity_init(struct drm_sched_entity * entity,enum drm_sched_priority priority,struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list,atomic_t * guilty)59 int drm_sched_entity_init(struct drm_sched_entity *entity,
60 enum drm_sched_priority priority,
61 struct drm_gpu_scheduler **sched_list,
62 unsigned int num_sched_list,
63 atomic_t *guilty)
64 {
65 if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
66 return -EINVAL;
67
68 memset(entity, 0, sizeof(struct drm_sched_entity));
69 INIT_LIST_HEAD(&entity->list);
70 entity->rq = NULL;
71 entity->guilty = guilty;
72 entity->num_sched_list = num_sched_list;
73 entity->priority = priority;
74 /*
75 * It's perfectly valid to initialize an entity without having a valid
76 * scheduler attached. It's just not valid to use the scheduler before it
77 * is initialized itself.
78 */
79 entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
80 RCU_INIT_POINTER(entity->last_scheduled, NULL);
81 RB_CLEAR_NODE(&entity->rb_tree_node);
82
83 if (num_sched_list && !sched_list[0]->sched_rq) {
84 /* Since every entry covered by num_sched_list
85 * should be non-NULL and therefore we warn drivers
86 * not to do this and to fix their DRM calling order.
87 */
88 pr_warn("%s: called with uninitialized scheduler\n", __func__);
89 } else if (num_sched_list) {
90 /* The "priority" of an entity cannot exceed the number of run-queues of a
91 * scheduler. Protect against num_rqs being 0, by converting to signed. Choose
92 * the lowest priority available.
93 */
94 if (entity->priority >= sched_list[0]->num_rqs) {
95 drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n",
96 entity->priority, sched_list[0]->num_rqs);
97 entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
98 (s32) DRM_SCHED_PRIORITY_KERNEL);
99 }
100 entity->rq = sched_list[0]->sched_rq[entity->priority];
101 }
102
103 init_completion(&entity->entity_idle);
104
105 /* We start in an idle state. */
106 complete_all(&entity->entity_idle);
107
108 spin_lock_init(&entity->rq_lock);
109 spsc_queue_init(&entity->job_queue);
110
111 atomic_set(&entity->fence_seq, 0);
112 entity->fence_context = dma_fence_context_alloc(2);
113
114 return 0;
115 }
116 EXPORT_SYMBOL(drm_sched_entity_init);
117
118 /**
119 * drm_sched_entity_modify_sched - Modify sched of an entity
120 * @entity: scheduler entity to init
121 * @sched_list: the list of new drm scheds which will replace
122 * existing entity->sched_list
123 * @num_sched_list: number of drm sched in sched_list
124 *
125 * Note that this must be called under the same common lock for @entity as
126 * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
127 * guarantee through some other means that this is never called while new jobs
128 * can be pushed to @entity.
129 */
drm_sched_entity_modify_sched(struct drm_sched_entity * entity,struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list)130 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
131 struct drm_gpu_scheduler **sched_list,
132 unsigned int num_sched_list)
133 {
134 WARN_ON(!num_sched_list || !sched_list);
135
136 spin_lock(&entity->rq_lock);
137 entity->sched_list = sched_list;
138 entity->num_sched_list = num_sched_list;
139 spin_unlock(&entity->rq_lock);
140 }
141 EXPORT_SYMBOL(drm_sched_entity_modify_sched);
142
drm_sched_entity_is_idle(struct drm_sched_entity * entity)143 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
144 {
145 rmb(); /* for list_empty to work without lock */
146
147 if (list_empty(&entity->list) ||
148 spsc_queue_count(&entity->job_queue) == 0 ||
149 entity->stopped)
150 return true;
151
152 return false;
153 }
154
155 /* Return true if entity could provide a job. */
drm_sched_entity_is_ready(struct drm_sched_entity * entity)156 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
157 {
158 if (spsc_queue_peek(&entity->job_queue) == NULL)
159 return false;
160
161 if (READ_ONCE(entity->dependency))
162 return false;
163
164 return true;
165 }
166
167 /**
168 * drm_sched_entity_error - return error of last scheduled job
169 * @entity: scheduler entity to check
170 *
171 * Opportunistically return the error of the last scheduled job. Result can
172 * change any time when new jobs are pushed to the hw.
173 */
drm_sched_entity_error(struct drm_sched_entity * entity)174 int drm_sched_entity_error(struct drm_sched_entity *entity)
175 {
176 struct dma_fence *fence;
177 int r;
178
179 rcu_read_lock();
180 fence = rcu_dereference(entity->last_scheduled);
181 r = fence ? fence->error : 0;
182 rcu_read_unlock();
183
184 return r;
185 }
186 EXPORT_SYMBOL(drm_sched_entity_error);
187
drm_sched_entity_kill_jobs_work(struct work_struct * wrk)188 static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
189 {
190 struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
191
192 drm_sched_fence_scheduled(job->s_fence, NULL);
193 drm_sched_fence_finished(job->s_fence, -ESRCH);
194 WARN_ON(job->s_fence->parent);
195 job->sched->ops->free_job(job);
196 }
197
198 /* Signal the scheduler finished fence when the entity in question is killed. */
drm_sched_entity_kill_jobs_cb(struct dma_fence * f,struct dma_fence_cb * cb)199 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
200 struct dma_fence_cb *cb)
201 {
202 struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
203 finish_cb);
204 unsigned long index;
205
206 dma_fence_put(f);
207
208 /* Wait for all dependencies to avoid data corruptions */
209 xa_for_each(&job->dependencies, index, f) {
210 struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
211
212 if (s_fence && f == &s_fence->scheduled) {
213 /* The dependencies array had a reference on the scheduled
214 * fence, and the finished fence refcount might have
215 * dropped to zero. Use dma_fence_get_rcu() so we get
216 * a NULL fence in that case.
217 */
218 f = dma_fence_get_rcu(&s_fence->finished);
219
220 /* Now that we have a reference on the finished fence,
221 * we can release the reference the dependencies array
222 * had on the scheduled fence.
223 */
224 dma_fence_put(&s_fence->scheduled);
225 }
226
227 xa_erase(&job->dependencies, index);
228 if (f && !dma_fence_add_callback(f, &job->finish_cb,
229 drm_sched_entity_kill_jobs_cb))
230 return;
231
232 dma_fence_put(f);
233 }
234
235 INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
236 schedule_work(&job->work);
237 }
238
239 /* Remove the entity from the scheduler and kill all pending jobs */
drm_sched_entity_kill(struct drm_sched_entity * entity)240 static void drm_sched_entity_kill(struct drm_sched_entity *entity)
241 {
242 struct drm_sched_job *job;
243 struct dma_fence *prev;
244
245 if (!entity->rq)
246 return;
247
248 spin_lock(&entity->rq_lock);
249 entity->stopped = true;
250 drm_sched_rq_remove_entity(entity->rq, entity);
251 spin_unlock(&entity->rq_lock);
252
253 /* Make sure this entity is not used by the scheduler at the moment */
254 wait_for_completion(&entity->entity_idle);
255
256 /* The entity is guaranteed to not be used by the scheduler */
257 prev = rcu_dereference_check(entity->last_scheduled, true);
258 dma_fence_get(prev);
259 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
260 struct drm_sched_fence *s_fence = job->s_fence;
261
262 dma_fence_get(&s_fence->finished);
263 if (!prev ||
264 dma_fence_add_callback(prev, &job->finish_cb,
265 drm_sched_entity_kill_jobs_cb)) {
266 /*
267 * Adding callback above failed.
268 * dma_fence_put() checks for NULL.
269 */
270 dma_fence_put(prev);
271 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
272 }
273
274 prev = &s_fence->finished;
275 }
276 dma_fence_put(prev);
277 }
278
279 /**
280 * drm_sched_entity_flush - Flush a context entity
281 *
282 * @entity: scheduler entity
283 * @timeout: time to wait in for Q to become empty in jiffies.
284 *
285 * Splitting drm_sched_entity_fini() into two functions, The first one does the
286 * waiting, removes the entity from the runqueue and returns an error when the
287 * process was killed.
288 *
289 * Returns the remaining time in jiffies left from the input timeout
290 */
drm_sched_entity_flush(struct drm_sched_entity * entity,long timeout)291 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
292 {
293 struct drm_gpu_scheduler *sched;
294 struct task_struct *last_user;
295 long ret = timeout;
296
297 if (!entity->rq)
298 return 0;
299
300 sched = entity->rq->sched;
301 /**
302 * The client will not queue more IBs during this fini, consume existing
303 * queued IBs or discard them on SIGKILL
304 */
305 if (current->flags & PF_EXITING) {
306 if (timeout)
307 ret = wait_event_timeout(
308 sched->job_scheduled,
309 drm_sched_entity_is_idle(entity),
310 timeout);
311 } else {
312 wait_event_killable(sched->job_scheduled,
313 drm_sched_entity_is_idle(entity));
314 }
315
316 /* For killed process disable any more IBs enqueue right now */
317 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
318 if ((!last_user || last_user == current->group_leader) &&
319 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
320 drm_sched_entity_kill(entity);
321
322 return ret;
323 }
324 EXPORT_SYMBOL(drm_sched_entity_flush);
325
326 /**
327 * drm_sched_entity_fini - Destroy a context entity
328 *
329 * @entity: scheduler entity
330 *
331 * Cleanups up @entity which has been initialized by drm_sched_entity_init().
332 *
333 * If there are potentially job still in flight or getting newly queued
334 * drm_sched_entity_flush() must be called first. This function then goes over
335 * the entity and signals all jobs with an error code if the process was killed.
336 */
drm_sched_entity_fini(struct drm_sched_entity * entity)337 void drm_sched_entity_fini(struct drm_sched_entity *entity)
338 {
339 /*
340 * If consumption of existing IBs wasn't completed. Forcefully remove
341 * them here. Also makes sure that the scheduler won't touch this entity
342 * any more.
343 */
344 drm_sched_entity_kill(entity);
345
346 if (entity->dependency) {
347 dma_fence_remove_callback(entity->dependency, &entity->cb);
348 dma_fence_put(entity->dependency);
349 entity->dependency = NULL;
350 }
351
352 dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
353 RCU_INIT_POINTER(entity->last_scheduled, NULL);
354 }
355 EXPORT_SYMBOL(drm_sched_entity_fini);
356
357 /**
358 * drm_sched_entity_destroy - Destroy a context entity
359 * @entity: scheduler entity
360 *
361 * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
362 * convenience wrapper.
363 */
drm_sched_entity_destroy(struct drm_sched_entity * entity)364 void drm_sched_entity_destroy(struct drm_sched_entity *entity)
365 {
366 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
367 drm_sched_entity_fini(entity);
368 }
369 EXPORT_SYMBOL(drm_sched_entity_destroy);
370
371 /*
372 * drm_sched_entity_clear_dep - callback to clear the entities dependency and
373 * wake up scheduler
374 */
drm_sched_entity_wakeup(struct dma_fence * f,struct dma_fence_cb * cb)375 static void drm_sched_entity_wakeup(struct dma_fence *f,
376 struct dma_fence_cb *cb)
377 {
378 struct drm_sched_entity *entity =
379 container_of(cb, struct drm_sched_entity, cb);
380
381 entity->dependency = NULL;
382 dma_fence_put(f);
383 drm_sched_wakeup(entity->rq->sched);
384 }
385
386 /**
387 * drm_sched_entity_set_priority - Sets priority of the entity
388 *
389 * @entity: scheduler entity
390 * @priority: scheduler priority
391 *
392 * Update the priority of runqueus used for the entity.
393 */
drm_sched_entity_set_priority(struct drm_sched_entity * entity,enum drm_sched_priority priority)394 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
395 enum drm_sched_priority priority)
396 {
397 spin_lock(&entity->rq_lock);
398 entity->priority = priority;
399 spin_unlock(&entity->rq_lock);
400 }
401 EXPORT_SYMBOL(drm_sched_entity_set_priority);
402
403 /*
404 * Add a callback to the current dependency of the entity to wake up the
405 * scheduler when the entity becomes available.
406 */
drm_sched_entity_add_dependency_cb(struct drm_sched_entity * entity)407 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
408 {
409 struct drm_gpu_scheduler *sched = entity->rq->sched;
410 struct dma_fence *fence = entity->dependency;
411 struct drm_sched_fence *s_fence;
412
413 if (fence->context == entity->fence_context ||
414 fence->context == entity->fence_context + 1) {
415 /*
416 * Fence is a scheduled/finished fence from a job
417 * which belongs to the same entity, we can ignore
418 * fences from ourself
419 */
420 dma_fence_put(entity->dependency);
421 return false;
422 }
423
424 s_fence = to_drm_sched_fence(fence);
425 if (!fence->error && s_fence && s_fence->sched == sched &&
426 !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
427
428 /*
429 * Fence is from the same scheduler, only need to wait for
430 * it to be scheduled
431 */
432 fence = dma_fence_get(&s_fence->scheduled);
433 dma_fence_put(entity->dependency);
434 entity->dependency = fence;
435 }
436
437 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
438 drm_sched_entity_wakeup))
439 return true;
440
441 dma_fence_put(entity->dependency);
442 return false;
443 }
444
445 static struct dma_fence *
drm_sched_job_dependency(struct drm_sched_job * job,struct drm_sched_entity * entity)446 drm_sched_job_dependency(struct drm_sched_job *job,
447 struct drm_sched_entity *entity)
448 {
449 struct dma_fence *f;
450
451 /* We keep the fence around, so we can iterate over all dependencies
452 * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled
453 * before killing the job.
454 */
455 f = xa_load(&job->dependencies, job->last_dependency);
456 if (f) {
457 job->last_dependency++;
458 return dma_fence_get(f);
459 }
460
461 if (job->sched->ops->prepare_job)
462 return job->sched->ops->prepare_job(job, entity);
463
464 return NULL;
465 }
466
drm_sched_entity_pop_job(struct drm_sched_entity * entity)467 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
468 {
469 struct drm_sched_job *sched_job;
470
471 sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
472 if (!sched_job)
473 return NULL;
474
475 while ((entity->dependency =
476 drm_sched_job_dependency(sched_job, entity))) {
477 trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
478
479 if (drm_sched_entity_add_dependency_cb(entity))
480 return NULL;
481 }
482
483 /* skip jobs from entity that marked guilty */
484 if (entity->guilty && atomic_read(entity->guilty))
485 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
486
487 dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
488 rcu_assign_pointer(entity->last_scheduled,
489 dma_fence_get(&sched_job->s_fence->finished));
490
491 /*
492 * If the queue is empty we allow drm_sched_entity_select_rq() to
493 * locklessly access ->last_scheduled. This only works if we set the
494 * pointer before we dequeue and if we a write barrier here.
495 */
496 smp_wmb();
497
498 spsc_queue_pop(&entity->job_queue);
499
500 /*
501 * Update the entity's location in the min heap according to
502 * the timestamp of the next job, if any.
503 */
504 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
505 struct drm_sched_job *next;
506
507 next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
508 if (next)
509 drm_sched_rq_update_fifo(entity, next->submit_ts);
510 }
511
512 /* Jobs and entities might have different lifecycles. Since we're
513 * removing the job from the entities queue, set the jobs entity pointer
514 * to NULL to prevent any future access of the entity through this job.
515 */
516 sched_job->entity = NULL;
517
518 return sched_job;
519 }
520
drm_sched_entity_select_rq(struct drm_sched_entity * entity)521 void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
522 {
523 struct dma_fence *fence;
524 struct drm_gpu_scheduler *sched;
525 struct drm_sched_rq *rq;
526
527 /* single possible engine and already selected */
528 if (!entity->sched_list)
529 return;
530
531 /* queue non-empty, stay on the same engine */
532 if (spsc_queue_count(&entity->job_queue))
533 return;
534
535 /*
536 * Only when the queue is empty are we guaranteed that the scheduler
537 * thread cannot change ->last_scheduled. To enforce ordering we need
538 * a read barrier here. See drm_sched_entity_pop_job() for the other
539 * side.
540 */
541 smp_rmb();
542
543 fence = rcu_dereference_check(entity->last_scheduled, true);
544
545 /* stay on the same engine if the previous job hasn't finished */
546 if (fence && !dma_fence_is_signaled(fence))
547 return;
548
549 spin_lock(&entity->rq_lock);
550 sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
551 rq = sched ? sched->sched_rq[entity->priority] : NULL;
552 if (rq != entity->rq) {
553 drm_sched_rq_remove_entity(entity->rq, entity);
554 entity->rq = rq;
555 }
556 spin_unlock(&entity->rq_lock);
557
558 if (entity->num_sched_list == 1)
559 entity->sched_list = NULL;
560 }
561
562 /**
563 * drm_sched_entity_push_job - Submit a job to the entity's job queue
564 * @sched_job: job to submit
565 *
566 * Note: To guarantee that the order of insertion to queue matches the job's
567 * fence sequence number this function should be called with drm_sched_job_arm()
568 * under common lock for the struct drm_sched_entity that was set up for
569 * @sched_job in drm_sched_job_init().
570 *
571 * Returns 0 for success, negative error code otherwise.
572 */
drm_sched_entity_push_job(struct drm_sched_job * sched_job)573 void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
574 {
575 struct drm_sched_entity *entity = sched_job->entity;
576 bool first;
577 ktime_t submit_ts;
578
579 trace_drm_sched_job(sched_job, entity);
580 atomic_inc(entity->rq->sched->score);
581 WRITE_ONCE(entity->last_user, current->group_leader);
582
583 /*
584 * After the sched_job is pushed into the entity queue, it may be
585 * completed and freed up at any time. We can no longer access it.
586 * Make sure to set the submit_ts first, to avoid a race.
587 */
588 sched_job->submit_ts = submit_ts = ktime_get();
589 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
590
591 /* first job wakes up scheduler */
592 if (first) {
593 struct drm_gpu_scheduler *sched;
594 struct drm_sched_rq *rq;
595
596 /* Add the entity to the run queue */
597 spin_lock(&entity->rq_lock);
598 if (entity->stopped) {
599 spin_unlock(&entity->rq_lock);
600
601 DRM_ERROR("Trying to push to a killed entity\n");
602 return;
603 }
604
605 rq = entity->rq;
606 sched = rq->sched;
607
608 drm_sched_rq_add_entity(rq, entity);
609 spin_unlock(&entity->rq_lock);
610
611 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
612 drm_sched_rq_update_fifo(entity, submit_ts);
613
614 drm_sched_wakeup(sched);
615 }
616 }
617 EXPORT_SYMBOL(drm_sched_entity_push_job);
618