• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/completion.h>
27 
28 #include <drm/drm_print.h>
29 #include <drm/gpu_scheduler.h>
30 
31 #include "gpu_scheduler_trace.h"
32 
33 #define to_drm_sched_job(sched_job)		\
34 		container_of((sched_job), struct drm_sched_job, queue_node)
35 
36 /**
37  * drm_sched_entity_init - Init a context entity used by scheduler when
38  * submit to HW ring.
39  *
40  * @entity: scheduler entity to init
41  * @priority: priority of the entity
42  * @sched_list: the list of drm scheds on which jobs from this
43  *           entity can be submitted
44  * @num_sched_list: number of drm sched in sched_list
45  * @guilty: atomic_t set to 1 when a job on this queue
46  *          is found to be guilty causing a timeout
47  *
48  * Note that the &sched_list must have at least one element to schedule the entity.
49  *
50  * For changing @priority later on at runtime see
51  * drm_sched_entity_set_priority(). For changing the set of schedulers
52  * @sched_list at runtime see drm_sched_entity_modify_sched().
53  *
54  * An entity is cleaned up by callind drm_sched_entity_fini(). See also
55  * drm_sched_entity_destroy().
56  *
57  * Returns 0 on success or a negative error code on failure.
58  */
drm_sched_entity_init(struct drm_sched_entity * entity,enum drm_sched_priority priority,struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list,atomic_t * guilty)59 int drm_sched_entity_init(struct drm_sched_entity *entity,
60 			  enum drm_sched_priority priority,
61 			  struct drm_gpu_scheduler **sched_list,
62 			  unsigned int num_sched_list,
63 			  atomic_t *guilty)
64 {
65 	if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
66 		return -EINVAL;
67 
68 	memset(entity, 0, sizeof(struct drm_sched_entity));
69 	INIT_LIST_HEAD(&entity->list);
70 	entity->rq = NULL;
71 	entity->guilty = guilty;
72 	entity->num_sched_list = num_sched_list;
73 	entity->priority = priority;
74 	entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
75 	RCU_INIT_POINTER(entity->last_scheduled, NULL);
76 	RB_CLEAR_NODE(&entity->rb_tree_node);
77 
78 	if(num_sched_list)
79 		entity->rq = &sched_list[0]->sched_rq[entity->priority];
80 
81 	init_completion(&entity->entity_idle);
82 
83 	/* We start in an idle state. */
84 	complete_all(&entity->entity_idle);
85 
86 	spin_lock_init(&entity->rq_lock);
87 	spsc_queue_init(&entity->job_queue);
88 
89 	atomic_set(&entity->fence_seq, 0);
90 	entity->fence_context = dma_fence_context_alloc(2);
91 
92 	return 0;
93 }
94 EXPORT_SYMBOL(drm_sched_entity_init);
95 
96 /**
97  * drm_sched_entity_modify_sched - Modify sched of an entity
98  * @entity: scheduler entity to init
99  * @sched_list: the list of new drm scheds which will replace
100  *		 existing entity->sched_list
101  * @num_sched_list: number of drm sched in sched_list
102  *
103  * Note that this must be called under the same common lock for @entity as
104  * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
105  * guarantee through some other means that this is never called while new jobs
106  * can be pushed to @entity.
107  */
drm_sched_entity_modify_sched(struct drm_sched_entity * entity,struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list)108 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
109 				    struct drm_gpu_scheduler **sched_list,
110 				    unsigned int num_sched_list)
111 {
112 	WARN_ON(!num_sched_list || !sched_list);
113 
114 	spin_lock(&entity->rq_lock);
115 	entity->sched_list = sched_list;
116 	entity->num_sched_list = num_sched_list;
117 	spin_unlock(&entity->rq_lock);
118 }
119 EXPORT_SYMBOL(drm_sched_entity_modify_sched);
120 
drm_sched_entity_is_idle(struct drm_sched_entity * entity)121 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
122 {
123 	rmb(); /* for list_empty to work without lock */
124 
125 	if (list_empty(&entity->list) ||
126 	    spsc_queue_count(&entity->job_queue) == 0 ||
127 	    entity->stopped)
128 		return true;
129 
130 	return false;
131 }
132 
133 /* Return true if entity could provide a job. */
drm_sched_entity_is_ready(struct drm_sched_entity * entity)134 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
135 {
136 	if (spsc_queue_peek(&entity->job_queue) == NULL)
137 		return false;
138 
139 	if (READ_ONCE(entity->dependency))
140 		return false;
141 
142 	return true;
143 }
144 
145 /**
146  * drm_sched_entity_error - return error of last scheduled job
147  * @entity: scheduler entity to check
148  *
149  * Opportunistically return the error of the last scheduled job. Result can
150  * change any time when new jobs are pushed to the hw.
151  */
drm_sched_entity_error(struct drm_sched_entity * entity)152 int drm_sched_entity_error(struct drm_sched_entity *entity)
153 {
154 	struct dma_fence *fence;
155 	int r;
156 
157 	rcu_read_lock();
158 	fence = rcu_dereference(entity->last_scheduled);
159 	r = fence ? fence->error : 0;
160 	rcu_read_unlock();
161 
162 	return r;
163 }
164 EXPORT_SYMBOL(drm_sched_entity_error);
165 
drm_sched_entity_kill_jobs_work(struct work_struct * wrk)166 static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
167 {
168 	struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
169 
170 	drm_sched_fence_scheduled(job->s_fence, NULL);
171 	drm_sched_fence_finished(job->s_fence, -ESRCH);
172 	WARN_ON(job->s_fence->parent);
173 	job->sched->ops->free_job(job);
174 }
175 
176 /* Signal the scheduler finished fence when the entity in question is killed. */
drm_sched_entity_kill_jobs_cb(struct dma_fence * f,struct dma_fence_cb * cb)177 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
178 					  struct dma_fence_cb *cb)
179 {
180 	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
181 						 finish_cb);
182 	unsigned long index;
183 
184 	dma_fence_put(f);
185 
186 	/* Wait for all dependencies to avoid data corruptions */
187 	xa_for_each(&job->dependencies, index, f) {
188 		struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
189 
190 		if (s_fence && f == &s_fence->scheduled) {
191 			/* The dependencies array had a reference on the scheduled
192 			 * fence, and the finished fence refcount might have
193 			 * dropped to zero. Use dma_fence_get_rcu() so we get
194 			 * a NULL fence in that case.
195 			 */
196 			f = dma_fence_get_rcu(&s_fence->finished);
197 
198 			/* Now that we have a reference on the finished fence,
199 			 * we can release the reference the dependencies array
200 			 * had on the scheduled fence.
201 			 */
202 			dma_fence_put(&s_fence->scheduled);
203 		}
204 
205 		xa_erase(&job->dependencies, index);
206 		if (f && !dma_fence_add_callback(f, &job->finish_cb,
207 						 drm_sched_entity_kill_jobs_cb))
208 			return;
209 
210 		dma_fence_put(f);
211 	}
212 
213 	INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
214 	schedule_work(&job->work);
215 }
216 
217 /* Remove the entity from the scheduler and kill all pending jobs */
drm_sched_entity_kill(struct drm_sched_entity * entity)218 static void drm_sched_entity_kill(struct drm_sched_entity *entity)
219 {
220 	struct drm_sched_job *job;
221 	struct dma_fence *prev;
222 
223 	if (!entity->rq)
224 		return;
225 
226 	spin_lock(&entity->rq_lock);
227 	entity->stopped = true;
228 	drm_sched_rq_remove_entity(entity->rq, entity);
229 	spin_unlock(&entity->rq_lock);
230 
231 	/* Make sure this entity is not used by the scheduler at the moment */
232 	wait_for_completion(&entity->entity_idle);
233 
234 	/* The entity is guaranteed to not be used by the scheduler */
235 	prev = rcu_dereference_check(entity->last_scheduled, true);
236 	dma_fence_get(prev);
237 	while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
238 		struct drm_sched_fence *s_fence = job->s_fence;
239 
240 		dma_fence_get(&s_fence->finished);
241 		if (!prev ||
242 		    dma_fence_add_callback(prev, &job->finish_cb,
243 					   drm_sched_entity_kill_jobs_cb)) {
244 			/*
245 			 * Adding callback above failed.
246 			 * dma_fence_put() checks for NULL.
247 			 */
248 			dma_fence_put(prev);
249 			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
250 		}
251 
252 		prev = &s_fence->finished;
253 	}
254 	dma_fence_put(prev);
255 }
256 
257 /**
258  * drm_sched_entity_flush - Flush a context entity
259  *
260  * @entity: scheduler entity
261  * @timeout: time to wait in for Q to become empty in jiffies.
262  *
263  * Splitting drm_sched_entity_fini() into two functions, The first one does the
264  * waiting, removes the entity from the runqueue and returns an error when the
265  * process was killed.
266  *
267  * Returns the remaining time in jiffies left from the input timeout
268  */
drm_sched_entity_flush(struct drm_sched_entity * entity,long timeout)269 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
270 {
271 	struct drm_gpu_scheduler *sched;
272 	struct task_struct *last_user;
273 	long ret = timeout;
274 
275 	if (!entity->rq)
276 		return 0;
277 
278 	sched = entity->rq->sched;
279 	/**
280 	 * The client will not queue more IBs during this fini, consume existing
281 	 * queued IBs or discard them on SIGKILL
282 	 */
283 	if (current->flags & PF_EXITING) {
284 		if (timeout)
285 			ret = wait_event_timeout(
286 					sched->job_scheduled,
287 					drm_sched_entity_is_idle(entity),
288 					timeout);
289 	} else {
290 		wait_event_killable(sched->job_scheduled,
291 				    drm_sched_entity_is_idle(entity));
292 	}
293 
294 	/* For killed process disable any more IBs enqueue right now */
295 	last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
296 	if ((!last_user || last_user == current->group_leader) &&
297 	    (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
298 		drm_sched_entity_kill(entity);
299 
300 	return ret;
301 }
302 EXPORT_SYMBOL(drm_sched_entity_flush);
303 
304 /**
305  * drm_sched_entity_fini - Destroy a context entity
306  *
307  * @entity: scheduler entity
308  *
309  * Cleanups up @entity which has been initialized by drm_sched_entity_init().
310  *
311  * If there are potentially job still in flight or getting newly queued
312  * drm_sched_entity_flush() must be called first. This function then goes over
313  * the entity and signals all jobs with an error code if the process was killed.
314  */
drm_sched_entity_fini(struct drm_sched_entity * entity)315 void drm_sched_entity_fini(struct drm_sched_entity *entity)
316 {
317 	/*
318 	 * If consumption of existing IBs wasn't completed. Forcefully remove
319 	 * them here. Also makes sure that the scheduler won't touch this entity
320 	 * any more.
321 	 */
322 	drm_sched_entity_kill(entity);
323 
324 	if (entity->dependency) {
325 		dma_fence_remove_callback(entity->dependency, &entity->cb);
326 		dma_fence_put(entity->dependency);
327 		entity->dependency = NULL;
328 	}
329 
330 	dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
331 	RCU_INIT_POINTER(entity->last_scheduled, NULL);
332 }
333 EXPORT_SYMBOL(drm_sched_entity_fini);
334 
335 /**
336  * drm_sched_entity_destroy - Destroy a context entity
337  * @entity: scheduler entity
338  *
339  * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
340  * convenience wrapper.
341  */
drm_sched_entity_destroy(struct drm_sched_entity * entity)342 void drm_sched_entity_destroy(struct drm_sched_entity *entity)
343 {
344 	drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
345 	drm_sched_entity_fini(entity);
346 }
347 EXPORT_SYMBOL(drm_sched_entity_destroy);
348 
349 /*
350  * drm_sched_entity_wakeup - callback to clear the entity's dependency and
351  * wake up the scheduler
352  */
drm_sched_entity_wakeup(struct dma_fence * f,struct dma_fence_cb * cb)353 static void drm_sched_entity_wakeup(struct dma_fence *f,
354 				    struct dma_fence_cb *cb)
355 {
356 	struct drm_sched_entity *entity =
357 		container_of(cb, struct drm_sched_entity, cb);
358 
359 	entity->dependency = NULL;
360 	dma_fence_put(f);
361 	drm_sched_wakeup_if_can_queue(entity->rq->sched);
362 }
363 
364 /**
365  * drm_sched_entity_set_priority - Sets priority of the entity
366  *
367  * @entity: scheduler entity
368  * @priority: scheduler priority
369  *
370  * Update the priority of runqueus used for the entity.
371  */
drm_sched_entity_set_priority(struct drm_sched_entity * entity,enum drm_sched_priority priority)372 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
373 				   enum drm_sched_priority priority)
374 {
375 	spin_lock(&entity->rq_lock);
376 	entity->priority = priority;
377 	spin_unlock(&entity->rq_lock);
378 }
379 EXPORT_SYMBOL(drm_sched_entity_set_priority);
380 
381 /*
382  * Add a callback to the current dependency of the entity to wake up the
383  * scheduler when the entity becomes available.
384  */
drm_sched_entity_add_dependency_cb(struct drm_sched_entity * entity)385 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
386 {
387 	struct drm_gpu_scheduler *sched = entity->rq->sched;
388 	struct dma_fence *fence = entity->dependency;
389 	struct drm_sched_fence *s_fence;
390 
391 	if (fence->context == entity->fence_context ||
392 	    fence->context == entity->fence_context + 1) {
393 		/*
394 		 * Fence is a scheduled/finished fence from a job
395 		 * which belongs to the same entity, we can ignore
396 		 * fences from ourself
397 		 */
398 		dma_fence_put(entity->dependency);
399 		return false;
400 	}
401 
402 	s_fence = to_drm_sched_fence(fence);
403 	if (!fence->error && s_fence && s_fence->sched == sched &&
404 	    !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
405 
406 		/*
407 		 * Fence is from the same scheduler, only need to wait for
408 		 * it to be scheduled
409 		 */
410 		fence = dma_fence_get(&s_fence->scheduled);
411 		dma_fence_put(entity->dependency);
412 		entity->dependency = fence;
413 	}
414 
415 	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
416 				    drm_sched_entity_wakeup))
417 		return true;
418 
419 	dma_fence_put(entity->dependency);
420 	return false;
421 }
422 
423 static struct dma_fence *
drm_sched_job_dependency(struct drm_sched_job * job,struct drm_sched_entity * entity)424 drm_sched_job_dependency(struct drm_sched_job *job,
425 			 struct drm_sched_entity *entity)
426 {
427 	struct dma_fence *f;
428 
429 	/* We keep the fence around, so we can iterate over all dependencies
430 	 * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled
431 	 * before killing the job.
432 	 */
433 	f = xa_load(&job->dependencies, job->last_dependency);
434 	if (f) {
435 		job->last_dependency++;
436 		return dma_fence_get(f);
437 	}
438 
439 	if (job->sched->ops->prepare_job)
440 		return job->sched->ops->prepare_job(job, entity);
441 
442 	return NULL;
443 }
444 
drm_sched_entity_pop_job(struct drm_sched_entity * entity)445 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
446 {
447 	struct drm_sched_job *sched_job;
448 
449 	sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
450 	if (!sched_job)
451 		return NULL;
452 
453 	while ((entity->dependency =
454 			drm_sched_job_dependency(sched_job, entity))) {
455 		trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
456 
457 		if (drm_sched_entity_add_dependency_cb(entity))
458 			return NULL;
459 	}
460 
461 	/* skip jobs from entity that marked guilty */
462 	if (entity->guilty && atomic_read(entity->guilty))
463 		dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
464 
465 	dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
466 	rcu_assign_pointer(entity->last_scheduled,
467 			   dma_fence_get(&sched_job->s_fence->finished));
468 
469 	/*
470 	 * If the queue is empty we allow drm_sched_entity_select_rq() to
471 	 * locklessly access ->last_scheduled. This only works if we set the
472 	 * pointer before we dequeue and if we a write barrier here.
473 	 */
474 	smp_wmb();
475 
476 	spsc_queue_pop(&entity->job_queue);
477 
478 	/*
479 	 * Update the entity's location in the min heap according to
480 	 * the timestamp of the next job, if any.
481 	 */
482 	if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
483 		struct drm_sched_job *next;
484 
485 		next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
486 		if (next)
487 			drm_sched_rq_update_fifo(entity, next->submit_ts);
488 	}
489 
490 	/* Jobs and entities might have different lifecycles. Since we're
491 	 * removing the job from the entities queue, set the jobs entity pointer
492 	 * to NULL to prevent any future access of the entity through this job.
493 	 */
494 	sched_job->entity = NULL;
495 
496 	return sched_job;
497 }
498 
drm_sched_entity_select_rq(struct drm_sched_entity * entity)499 void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
500 {
501 	struct dma_fence *fence;
502 	struct drm_gpu_scheduler *sched;
503 	struct drm_sched_rq *rq;
504 
505 	/* single possible engine and already selected */
506 	if (!entity->sched_list)
507 		return;
508 
509 	/* queue non-empty, stay on the same engine */
510 	if (spsc_queue_count(&entity->job_queue))
511 		return;
512 
513 	/*
514 	 * Only when the queue is empty are we guaranteed that the scheduler
515 	 * thread cannot change ->last_scheduled. To enforce ordering we need
516 	 * a read barrier here. See drm_sched_entity_pop_job() for the other
517 	 * side.
518 	 */
519 	smp_rmb();
520 
521 	fence = rcu_dereference_check(entity->last_scheduled, true);
522 
523 	/* stay on the same engine if the previous job hasn't finished */
524 	if (fence && !dma_fence_is_signaled(fence))
525 		return;
526 
527 	spin_lock(&entity->rq_lock);
528 	sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
529 	rq = sched ? &sched->sched_rq[entity->priority] : NULL;
530 	if (rq != entity->rq) {
531 		drm_sched_rq_remove_entity(entity->rq, entity);
532 		entity->rq = rq;
533 	}
534 	spin_unlock(&entity->rq_lock);
535 
536 	if (entity->num_sched_list == 1)
537 		entity->sched_list = NULL;
538 }
539 
540 /**
541  * drm_sched_entity_push_job - Submit a job to the entity's job queue
542  * @sched_job: job to submit
543  *
544  * Note: To guarantee that the order of insertion to queue matches the job's
545  * fence sequence number this function should be called with drm_sched_job_arm()
546  * under common lock for the struct drm_sched_entity that was set up for
547  * @sched_job in drm_sched_job_init().
548  *
549  * Returns 0 for success, negative error code otherwise.
550  */
drm_sched_entity_push_job(struct drm_sched_job * sched_job)551 void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
552 {
553 	struct drm_sched_entity *entity = sched_job->entity;
554 	bool first;
555 	ktime_t submit_ts;
556 
557 	trace_drm_sched_job(sched_job, entity);
558 	atomic_inc(entity->rq->sched->score);
559 	WRITE_ONCE(entity->last_user, current->group_leader);
560 
561 	/*
562 	 * After the sched_job is pushed into the entity queue, it may be
563 	 * completed and freed up at any time. We can no longer access it.
564 	 * Make sure to set the submit_ts first, to avoid a race.
565 	 */
566 	sched_job->submit_ts = submit_ts = ktime_get();
567 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
568 
569 	/* first job wakes up scheduler */
570 	if (first) {
571 		/* Add the entity to the run queue */
572 		spin_lock(&entity->rq_lock);
573 		if (entity->stopped) {
574 			spin_unlock(&entity->rq_lock);
575 
576 			DRM_ERROR("Trying to push to a killed entity\n");
577 			return;
578 		}
579 
580 		drm_sched_rq_add_entity(entity->rq, entity);
581 		spin_unlock(&entity->rq_lock);
582 
583 		if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
584 			drm_sched_rq_update_fifo(entity, submit_ts);
585 
586 		drm_sched_wakeup_if_can_queue(entity->rq->sched);
587 	}
588 }
589 EXPORT_SYMBOL(drm_sched_entity_push_job);
590