• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/completion.h>
27 
28 #include <drm/drm_print.h>
29 #include <drm/gpu_scheduler.h>
30 
31 #include "gpu_scheduler_trace.h"
32 
33 #define to_drm_sched_job(sched_job)		\
34 		container_of((sched_job), struct drm_sched_job, queue_node)
35 
36 /**
37  * drm_sched_entity_init - Init a context entity used by scheduler when
38  * submit to HW ring.
39  *
40  * @entity: scheduler entity to init
41  * @priority: priority of the entity
42  * @sched_list: the list of drm scheds on which jobs from this
43  *           entity can be submitted
44  * @num_sched_list: number of drm sched in sched_list
45  * @guilty: atomic_t set to 1 when a job on this queue
46  *          is found to be guilty causing a timeout
47  *
48  * Note: the sched_list should have at least one element to schedule
49  *       the entity
50  *
51  * Returns 0 on success or a negative error code on failure.
52  */
drm_sched_entity_init(struct drm_sched_entity * entity,enum drm_sched_priority priority,struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list,atomic_t * guilty)53 int drm_sched_entity_init(struct drm_sched_entity *entity,
54 			  enum drm_sched_priority priority,
55 			  struct drm_gpu_scheduler **sched_list,
56 			  unsigned int num_sched_list,
57 			  atomic_t *guilty)
58 {
59 	if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
60 		return -EINVAL;
61 
62 	memset(entity, 0, sizeof(struct drm_sched_entity));
63 	INIT_LIST_HEAD(&entity->list);
64 	entity->rq = NULL;
65 	entity->guilty = guilty;
66 	entity->num_sched_list = num_sched_list;
67 	entity->priority = priority;
68 	entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
69 	entity->last_scheduled = NULL;
70 
71 	if(num_sched_list)
72 		entity->rq = &sched_list[0]->sched_rq[entity->priority];
73 
74 	init_completion(&entity->entity_idle);
75 
76 	spin_lock_init(&entity->rq_lock);
77 	spsc_queue_init(&entity->job_queue);
78 
79 	atomic_set(&entity->fence_seq, 0);
80 	entity->fence_context = dma_fence_context_alloc(2);
81 
82 	return 0;
83 }
84 EXPORT_SYMBOL(drm_sched_entity_init);
85 
86 /**
87  * drm_sched_entity_modify_sched - Modify sched of an entity
88  * @entity: scheduler entity to init
89  * @sched_list: the list of new drm scheds which will replace
90  *		 existing entity->sched_list
91  * @num_sched_list: number of drm sched in sched_list
92  */
drm_sched_entity_modify_sched(struct drm_sched_entity * entity,struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list)93 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
94 				    struct drm_gpu_scheduler **sched_list,
95 				    unsigned int num_sched_list)
96 {
97 	WARN_ON(!num_sched_list || !sched_list);
98 
99 	entity->sched_list = sched_list;
100 	entity->num_sched_list = num_sched_list;
101 }
102 EXPORT_SYMBOL(drm_sched_entity_modify_sched);
103 
104 /**
105  * drm_sched_entity_is_idle - Check if entity is idle
106  *
107  * @entity: scheduler entity
108  *
109  * Returns true if the entity does not have any unscheduled jobs.
110  */
drm_sched_entity_is_idle(struct drm_sched_entity * entity)111 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
112 {
113 	rmb(); /* for list_empty to work without lock */
114 
115 	if (list_empty(&entity->list) ||
116 	    spsc_queue_count(&entity->job_queue) == 0 ||
117 	    entity->stopped)
118 		return true;
119 
120 	return false;
121 }
122 
123 /**
124  * drm_sched_entity_is_ready - Check if entity is ready
125  *
126  * @entity: scheduler entity
127  *
128  * Return true if entity could provide a job.
129  */
drm_sched_entity_is_ready(struct drm_sched_entity * entity)130 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
131 {
132 	if (spsc_queue_peek(&entity->job_queue) == NULL)
133 		return false;
134 
135 	if (READ_ONCE(entity->dependency))
136 		return false;
137 
138 	return true;
139 }
140 
141 /**
142  * drm_sched_entity_flush - Flush a context entity
143  *
144  * @entity: scheduler entity
145  * @timeout: time to wait in for Q to become empty in jiffies.
146  *
147  * Splitting drm_sched_entity_fini() into two functions, The first one does the
148  * waiting, removes the entity from the runqueue and returns an error when the
149  * process was killed.
150  *
151  * Returns the remaining time in jiffies left from the input timeout
152  */
drm_sched_entity_flush(struct drm_sched_entity * entity,long timeout)153 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
154 {
155 	struct drm_gpu_scheduler *sched;
156 	struct task_struct *last_user;
157 	long ret = timeout;
158 
159 	if (!entity->rq)
160 		return 0;
161 
162 	sched = entity->rq->sched;
163 	/**
164 	 * The client will not queue more IBs during this fini, consume existing
165 	 * queued IBs or discard them on SIGKILL
166 	 */
167 	if (current->flags & PF_EXITING) {
168 		if (timeout)
169 			ret = wait_event_timeout(
170 					sched->job_scheduled,
171 					drm_sched_entity_is_idle(entity),
172 					timeout);
173 	} else {
174 		wait_event_killable(sched->job_scheduled,
175 				    drm_sched_entity_is_idle(entity));
176 	}
177 
178 	/* For killed process disable any more IBs enqueue right now */
179 	last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
180 	if ((!last_user || last_user == current->group_leader) &&
181 	    (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) {
182 		spin_lock(&entity->rq_lock);
183 		entity->stopped = true;
184 		drm_sched_rq_remove_entity(entity->rq, entity);
185 		spin_unlock(&entity->rq_lock);
186 	}
187 
188 	return ret;
189 }
190 EXPORT_SYMBOL(drm_sched_entity_flush);
191 
192 /**
193  * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs
194  *
195  * @f: signaled fence
196  * @cb: our callback structure
197  *
198  * Signal the scheduler finished fence when the entity in question is killed.
199  */
drm_sched_entity_kill_jobs_cb(struct dma_fence * f,struct dma_fence_cb * cb)200 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
201 					  struct dma_fence_cb *cb)
202 {
203 	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
204 						 finish_cb);
205 
206 	drm_sched_fence_finished(job->s_fence);
207 	WARN_ON(job->s_fence->parent);
208 	job->sched->ops->free_job(job);
209 }
210 
211 /**
212  * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
213  *
214  * @entity: entity which is cleaned up
215  *
216  * Makes sure that all remaining jobs in an entity are killed before it is
217  * destroyed.
218  */
drm_sched_entity_kill_jobs(struct drm_sched_entity * entity)219 static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
220 {
221 	struct drm_sched_job *job;
222 	struct dma_fence *f;
223 	int r;
224 
225 	while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
226 		struct drm_sched_fence *s_fence = job->s_fence;
227 
228 		/* Wait for all dependencies to avoid data corruptions */
229 		while ((f = job->sched->ops->dependency(job, entity)))
230 			dma_fence_wait(f, false);
231 
232 		drm_sched_fence_scheduled(s_fence);
233 		dma_fence_set_error(&s_fence->finished, -ESRCH);
234 
235 		/*
236 		 * When pipe is hanged by older entity, new entity might
237 		 * not even have chance to submit it's first job to HW
238 		 * and so entity->last_scheduled will remain NULL
239 		 */
240 		if (!entity->last_scheduled) {
241 			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
242 			continue;
243 		}
244 
245 		r = dma_fence_add_callback(entity->last_scheduled,
246 					   &job->finish_cb,
247 					   drm_sched_entity_kill_jobs_cb);
248 		if (r == -ENOENT)
249 			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
250 		else if (r)
251 			DRM_ERROR("fence add callback failed (%d)\n", r);
252 	}
253 }
254 
255 /**
256  * drm_sched_entity_cleanup - Destroy a context entity
257  *
258  * @entity: scheduler entity
259  *
260  * This should be called after @drm_sched_entity_do_release. It goes over the
261  * entity and signals all jobs with an error code if the process was killed.
262  *
263  */
drm_sched_entity_fini(struct drm_sched_entity * entity)264 void drm_sched_entity_fini(struct drm_sched_entity *entity)
265 {
266 	struct drm_gpu_scheduler *sched = NULL;
267 
268 	if (entity->rq) {
269 		sched = entity->rq->sched;
270 		drm_sched_rq_remove_entity(entity->rq, entity);
271 	}
272 
273 	/* Consumption of existing IBs wasn't completed. Forcefully
274 	 * remove them here.
275 	 */
276 	if (spsc_queue_count(&entity->job_queue)) {
277 		if (sched) {
278 			/*
279 			 * Wait for thread to idle to make sure it isn't processing
280 			 * this entity.
281 			 */
282 			wait_for_completion(&entity->entity_idle);
283 
284 		}
285 		if (entity->dependency) {
286 			dma_fence_remove_callback(entity->dependency,
287 						  &entity->cb);
288 			dma_fence_put(entity->dependency);
289 			entity->dependency = NULL;
290 		}
291 
292 		drm_sched_entity_kill_jobs(entity);
293 	}
294 
295 	dma_fence_put(entity->last_scheduled);
296 	entity->last_scheduled = NULL;
297 }
298 EXPORT_SYMBOL(drm_sched_entity_fini);
299 
300 /**
301  * drm_sched_entity_fini - Destroy a context entity
302  *
303  * @entity: scheduler entity
304  *
305  * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
306  */
drm_sched_entity_destroy(struct drm_sched_entity * entity)307 void drm_sched_entity_destroy(struct drm_sched_entity *entity)
308 {
309 	drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
310 	drm_sched_entity_fini(entity);
311 }
312 EXPORT_SYMBOL(drm_sched_entity_destroy);
313 
314 /**
315  * drm_sched_entity_clear_dep - callback to clear the entities dependency
316  */
drm_sched_entity_clear_dep(struct dma_fence * f,struct dma_fence_cb * cb)317 static void drm_sched_entity_clear_dep(struct dma_fence *f,
318 				       struct dma_fence_cb *cb)
319 {
320 	struct drm_sched_entity *entity =
321 		container_of(cb, struct drm_sched_entity, cb);
322 
323 	entity->dependency = NULL;
324 	dma_fence_put(f);
325 }
326 
327 /**
328  * drm_sched_entity_clear_dep - callback to clear the entities dependency and
329  * wake up scheduler
330  */
drm_sched_entity_wakeup(struct dma_fence * f,struct dma_fence_cb * cb)331 static void drm_sched_entity_wakeup(struct dma_fence *f,
332 				    struct dma_fence_cb *cb)
333 {
334 	struct drm_sched_entity *entity =
335 		container_of(cb, struct drm_sched_entity, cb);
336 
337 	drm_sched_entity_clear_dep(f, cb);
338 	drm_sched_wakeup(entity->rq->sched);
339 }
340 
341 /**
342  * drm_sched_entity_set_priority - Sets priority of the entity
343  *
344  * @entity: scheduler entity
345  * @priority: scheduler priority
346  *
347  * Update the priority of runqueus used for the entity.
348  */
drm_sched_entity_set_priority(struct drm_sched_entity * entity,enum drm_sched_priority priority)349 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
350 				   enum drm_sched_priority priority)
351 {
352 	spin_lock(&entity->rq_lock);
353 	entity->priority = priority;
354 	spin_unlock(&entity->rq_lock);
355 }
356 EXPORT_SYMBOL(drm_sched_entity_set_priority);
357 
358 /**
359  * drm_sched_entity_add_dependency_cb - add callback for the entities dependency
360  *
361  * @entity: entity with dependency
362  *
363  * Add a callback to the current dependency of the entity to wake up the
364  * scheduler when the entity becomes available.
365  */
drm_sched_entity_add_dependency_cb(struct drm_sched_entity * entity)366 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
367 {
368 	struct drm_gpu_scheduler *sched = entity->rq->sched;
369 	struct dma_fence *fence = entity->dependency;
370 	struct drm_sched_fence *s_fence;
371 
372 	if (fence->context == entity->fence_context ||
373 	    fence->context == entity->fence_context + 1) {
374 		/*
375 		 * Fence is a scheduled/finished fence from a job
376 		 * which belongs to the same entity, we can ignore
377 		 * fences from ourself
378 		 */
379 		dma_fence_put(entity->dependency);
380 		return false;
381 	}
382 
383 	s_fence = to_drm_sched_fence(fence);
384 	if (s_fence && s_fence->sched == sched) {
385 
386 		/*
387 		 * Fence is from the same scheduler, only need to wait for
388 		 * it to be scheduled
389 		 */
390 		fence = dma_fence_get(&s_fence->scheduled);
391 		dma_fence_put(entity->dependency);
392 		entity->dependency = fence;
393 		if (!dma_fence_add_callback(fence, &entity->cb,
394 					    drm_sched_entity_clear_dep))
395 			return true;
396 
397 		/* Ignore it when it is already scheduled */
398 		dma_fence_put(fence);
399 		return false;
400 	}
401 
402 	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
403 				    drm_sched_entity_wakeup))
404 		return true;
405 
406 	dma_fence_put(entity->dependency);
407 	return false;
408 }
409 
410 /**
411  * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
412  *
413  * @entity: entity to get the job from
414  *
415  * Process all dependencies and try to get one job from the entities queue.
416  */
drm_sched_entity_pop_job(struct drm_sched_entity * entity)417 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
418 {
419 	struct drm_gpu_scheduler *sched = entity->rq->sched;
420 	struct drm_sched_job *sched_job;
421 
422 	sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
423 	if (!sched_job)
424 		return NULL;
425 
426 	while ((entity->dependency =
427 			sched->ops->dependency(sched_job, entity))) {
428 		trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
429 
430 		if (drm_sched_entity_add_dependency_cb(entity))
431 			return NULL;
432 	}
433 
434 	/* skip jobs from entity that marked guilty */
435 	if (entity->guilty && atomic_read(entity->guilty))
436 		dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
437 
438 	dma_fence_put(entity->last_scheduled);
439 	entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
440 
441 	spsc_queue_pop(&entity->job_queue);
442 	return sched_job;
443 }
444 
445 /**
446  * drm_sched_entity_select_rq - select a new rq for the entity
447  *
448  * @entity: scheduler entity
449  *
450  * Check all prerequisites and select a new rq for the entity for load
451  * balancing.
452  */
drm_sched_entity_select_rq(struct drm_sched_entity * entity)453 void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
454 {
455 	struct dma_fence *fence;
456 	struct drm_gpu_scheduler *sched;
457 	struct drm_sched_rq *rq;
458 
459 	if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
460 		return;
461 
462 	fence = READ_ONCE(entity->last_scheduled);
463 	if (fence && !dma_fence_is_signaled(fence))
464 		return;
465 
466 	spin_lock(&entity->rq_lock);
467 	sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
468 	rq = sched ? &sched->sched_rq[entity->priority] : NULL;
469 	if (rq != entity->rq) {
470 		drm_sched_rq_remove_entity(entity->rq, entity);
471 		entity->rq = rq;
472 	}
473 
474 	spin_unlock(&entity->rq_lock);
475 }
476 
477 /**
478  * drm_sched_entity_push_job - Submit a job to the entity's job queue
479  *
480  * @sched_job: job to submit
481  * @entity: scheduler entity
482  *
483  * Note: To guarantee that the order of insertion to queue matches
484  * the job's fence sequence number this function should be
485  * called with drm_sched_job_init under common lock.
486  *
487  * Returns 0 for success, negative error code otherwise.
488  */
drm_sched_entity_push_job(struct drm_sched_job * sched_job,struct drm_sched_entity * entity)489 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
490 			       struct drm_sched_entity *entity)
491 {
492 	bool first;
493 
494 	trace_drm_sched_job(sched_job, entity);
495 	atomic_inc(&entity->rq->sched->score);
496 	WRITE_ONCE(entity->last_user, current->group_leader);
497 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
498 
499 	/* first job wakes up scheduler */
500 	if (first) {
501 		/* Add the entity to the run queue */
502 		spin_lock(&entity->rq_lock);
503 		if (entity->stopped) {
504 			spin_unlock(&entity->rq_lock);
505 
506 			DRM_ERROR("Trying to push to a killed entity\n");
507 			return;
508 		}
509 		drm_sched_rq_add_entity(entity->rq, entity);
510 		spin_unlock(&entity->rq_lock);
511 		drm_sched_wakeup(entity->rq->sched);
512 	}
513 }
514 EXPORT_SYMBOL(drm_sched_entity_push_job);
515