1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include "gpu_scheduler.h"
29
30 #define CREATE_TRACE_POINTS
31 #include "gpu_sched_trace.h"
32
33 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
34 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
35
36 struct kmem_cache *sched_fence_slab;
37 atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
38
39 /* Initialize a given run queue struct */
amd_sched_rq_init(struct amd_sched_rq * rq)40 static void amd_sched_rq_init(struct amd_sched_rq *rq)
41 {
42 spin_lock_init(&rq->lock);
43 INIT_LIST_HEAD(&rq->entities);
44 rq->current_entity = NULL;
45 }
46
amd_sched_rq_add_entity(struct amd_sched_rq * rq,struct amd_sched_entity * entity)47 static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
48 struct amd_sched_entity *entity)
49 {
50 spin_lock(&rq->lock);
51 list_add_tail(&entity->list, &rq->entities);
52 spin_unlock(&rq->lock);
53 }
54
amd_sched_rq_remove_entity(struct amd_sched_rq * rq,struct amd_sched_entity * entity)55 static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
56 struct amd_sched_entity *entity)
57 {
58 spin_lock(&rq->lock);
59 list_del_init(&entity->list);
60 if (rq->current_entity == entity)
61 rq->current_entity = NULL;
62 spin_unlock(&rq->lock);
63 }
64
65 /**
66 * Select an entity which could provide a job to run
67 *
68 * @rq The run queue to check.
69 *
70 * Try to find a ready entity, returns NULL if none found.
71 */
72 static struct amd_sched_entity *
amd_sched_rq_select_entity(struct amd_sched_rq * rq)73 amd_sched_rq_select_entity(struct amd_sched_rq *rq)
74 {
75 struct amd_sched_entity *entity;
76
77 spin_lock(&rq->lock);
78
79 entity = rq->current_entity;
80 if (entity) {
81 list_for_each_entry_continue(entity, &rq->entities, list) {
82 if (amd_sched_entity_is_ready(entity)) {
83 rq->current_entity = entity;
84 spin_unlock(&rq->lock);
85 return entity;
86 }
87 }
88 }
89
90 list_for_each_entry(entity, &rq->entities, list) {
91
92 if (amd_sched_entity_is_ready(entity)) {
93 rq->current_entity = entity;
94 spin_unlock(&rq->lock);
95 return entity;
96 }
97
98 if (entity == rq->current_entity)
99 break;
100 }
101
102 spin_unlock(&rq->lock);
103
104 return NULL;
105 }
106
107 /**
108 * Init a context entity used by scheduler when submit to HW ring.
109 *
110 * @sched The pointer to the scheduler
111 * @entity The pointer to a valid amd_sched_entity
112 * @rq The run queue this entity belongs
113 * @kernel If this is an entity for the kernel
114 * @jobs The max number of jobs in the job queue
115 *
116 * return 0 if succeed. negative error code on failure
117 */
amd_sched_entity_init(struct amd_gpu_scheduler * sched,struct amd_sched_entity * entity,struct amd_sched_rq * rq,uint32_t jobs)118 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
119 struct amd_sched_entity *entity,
120 struct amd_sched_rq *rq,
121 uint32_t jobs)
122 {
123 int r;
124
125 if (!(sched && entity && rq))
126 return -EINVAL;
127
128 memset(entity, 0, sizeof(struct amd_sched_entity));
129 INIT_LIST_HEAD(&entity->list);
130 entity->rq = rq;
131 entity->sched = sched;
132
133 spin_lock_init(&entity->queue_lock);
134 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
135 if (r)
136 return r;
137
138 atomic_set(&entity->fence_seq, 0);
139 entity->fence_context = fence_context_alloc(1);
140
141 /* Add the entity to the run queue */
142 amd_sched_rq_add_entity(rq, entity);
143
144 return 0;
145 }
146
147 /**
148 * Query if entity is initialized
149 *
150 * @sched Pointer to scheduler instance
151 * @entity The pointer to a valid scheduler entity
152 *
153 * return true if entity is initialized, false otherwise
154 */
amd_sched_entity_is_initialized(struct amd_gpu_scheduler * sched,struct amd_sched_entity * entity)155 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
156 struct amd_sched_entity *entity)
157 {
158 return entity->sched == sched &&
159 entity->rq != NULL;
160 }
161
162 /**
163 * Check if entity is idle
164 *
165 * @entity The pointer to a valid scheduler entity
166 *
167 * Return true if entity don't has any unscheduled jobs.
168 */
amd_sched_entity_is_idle(struct amd_sched_entity * entity)169 static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
170 {
171 rmb();
172 if (kfifo_is_empty(&entity->job_queue))
173 return true;
174
175 return false;
176 }
177
178 /**
179 * Check if entity is ready
180 *
181 * @entity The pointer to a valid scheduler entity
182 *
183 * Return true if entity could provide a job.
184 */
amd_sched_entity_is_ready(struct amd_sched_entity * entity)185 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
186 {
187 if (kfifo_is_empty(&entity->job_queue))
188 return false;
189
190 if (ACCESS_ONCE(entity->dependency))
191 return false;
192
193 return true;
194 }
195
196 /**
197 * Destroy a context entity
198 *
199 * @sched Pointer to scheduler instance
200 * @entity The pointer to a valid scheduler entity
201 *
202 * Cleanup and free the allocated resources.
203 */
amd_sched_entity_fini(struct amd_gpu_scheduler * sched,struct amd_sched_entity * entity)204 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
205 struct amd_sched_entity *entity)
206 {
207 struct amd_sched_rq *rq = entity->rq;
208
209 if (!amd_sched_entity_is_initialized(sched, entity))
210 return;
211
212 /**
213 * The client will not queue more IBs during this fini, consume existing
214 * queued IBs
215 */
216 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
217
218 amd_sched_rq_remove_entity(rq, entity);
219 kfifo_free(&entity->job_queue);
220 }
221
amd_sched_entity_wakeup(struct fence * f,struct fence_cb * cb)222 static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
223 {
224 struct amd_sched_entity *entity =
225 container_of(cb, struct amd_sched_entity, cb);
226 entity->dependency = NULL;
227 fence_put(f);
228 amd_sched_wakeup(entity->sched);
229 }
230
amd_sched_entity_add_dependency_cb(struct amd_sched_entity * entity)231 static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
232 {
233 struct amd_gpu_scheduler *sched = entity->sched;
234 struct fence * fence = entity->dependency;
235 struct amd_sched_fence *s_fence;
236
237 if (fence->context == entity->fence_context) {
238 /* We can ignore fences from ourself */
239 fence_put(entity->dependency);
240 return false;
241 }
242
243 s_fence = to_amd_sched_fence(fence);
244 if (s_fence && s_fence->sched == sched) {
245 /* Fence is from the same scheduler */
246 if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
247 /* Ignore it when it is already scheduled */
248 fence_put(entity->dependency);
249 return false;
250 }
251
252 /* Wait for fence to be scheduled */
253 entity->cb.func = amd_sched_entity_wakeup;
254 list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
255 return true;
256 }
257
258 if (!fence_add_callback(entity->dependency, &entity->cb,
259 amd_sched_entity_wakeup))
260 return true;
261
262 fence_put(entity->dependency);
263 return false;
264 }
265
266 static struct amd_sched_job *
amd_sched_entity_pop_job(struct amd_sched_entity * entity)267 amd_sched_entity_pop_job(struct amd_sched_entity *entity)
268 {
269 struct amd_gpu_scheduler *sched = entity->sched;
270 struct amd_sched_job *sched_job;
271
272 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
273 return NULL;
274
275 while ((entity->dependency = sched->ops->dependency(sched_job)))
276 if (amd_sched_entity_add_dependency_cb(entity))
277 return NULL;
278
279 return sched_job;
280 }
281
282 /**
283 * Helper to submit a job to the job queue
284 *
285 * @sched_job The pointer to job required to submit
286 *
287 * Returns true if we could submit the job.
288 */
amd_sched_entity_in(struct amd_sched_job * sched_job)289 static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
290 {
291 struct amd_gpu_scheduler *sched = sched_job->sched;
292 struct amd_sched_entity *entity = sched_job->s_entity;
293 bool added, first = false;
294
295 spin_lock(&entity->queue_lock);
296 added = kfifo_in(&entity->job_queue, &sched_job,
297 sizeof(sched_job)) == sizeof(sched_job);
298
299 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
300 first = true;
301
302 spin_unlock(&entity->queue_lock);
303
304 /* first job wakes up scheduler */
305 if (first)
306 amd_sched_wakeup(sched);
307
308 return added;
309 }
310
311 /**
312 * Submit a job to the job queue
313 *
314 * @sched_job The pointer to job required to submit
315 *
316 * Returns 0 for success, negative error code otherwise.
317 */
amd_sched_entity_push_job(struct amd_sched_job * sched_job)318 void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
319 {
320 struct amd_sched_entity *entity = sched_job->s_entity;
321
322 trace_amd_sched_job(sched_job);
323 wait_event(entity->sched->job_scheduled,
324 amd_sched_entity_in(sched_job));
325 }
326
327 /**
328 * Return ture if we can push more jobs to the hw.
329 */
amd_sched_ready(struct amd_gpu_scheduler * sched)330 static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
331 {
332 return atomic_read(&sched->hw_rq_count) <
333 sched->hw_submission_limit;
334 }
335
336 /**
337 * Wake up the scheduler when it is ready
338 */
amd_sched_wakeup(struct amd_gpu_scheduler * sched)339 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
340 {
341 if (amd_sched_ready(sched))
342 wake_up_interruptible(&sched->wake_up_worker);
343 }
344
345 /**
346 * Select next entity to process
347 */
348 static struct amd_sched_entity *
amd_sched_select_entity(struct amd_gpu_scheduler * sched)349 amd_sched_select_entity(struct amd_gpu_scheduler *sched)
350 {
351 struct amd_sched_entity *entity;
352
353 if (!amd_sched_ready(sched))
354 return NULL;
355
356 /* Kernel run queue has higher priority than normal run queue*/
357 entity = amd_sched_rq_select_entity(&sched->kernel_rq);
358 if (entity == NULL)
359 entity = amd_sched_rq_select_entity(&sched->sched_rq);
360
361 return entity;
362 }
363
amd_sched_process_job(struct fence * f,struct fence_cb * cb)364 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
365 {
366 struct amd_sched_fence *s_fence =
367 container_of(cb, struct amd_sched_fence, cb);
368 struct amd_gpu_scheduler *sched = s_fence->sched;
369 unsigned long flags;
370
371 atomic_dec(&sched->hw_rq_count);
372 amd_sched_fence_signal(s_fence);
373 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
374 cancel_delayed_work(&s_fence->dwork);
375 spin_lock_irqsave(&sched->fence_list_lock, flags);
376 list_del_init(&s_fence->list);
377 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
378 }
379 trace_amd_sched_process_job(s_fence);
380 fence_put(&s_fence->base);
381 wake_up_interruptible(&sched->wake_up_worker);
382 }
383
amd_sched_fence_work_func(struct work_struct * work)384 static void amd_sched_fence_work_func(struct work_struct *work)
385 {
386 struct amd_sched_fence *s_fence =
387 container_of(work, struct amd_sched_fence, dwork.work);
388 struct amd_gpu_scheduler *sched = s_fence->sched;
389 struct amd_sched_fence *entity, *tmp;
390 unsigned long flags;
391
392 DRM_ERROR("[%s] scheduler is timeout!\n", sched->name);
393
394 /* Clean all pending fences */
395 spin_lock_irqsave(&sched->fence_list_lock, flags);
396 list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) {
397 DRM_ERROR(" fence no %d\n", entity->base.seqno);
398 cancel_delayed_work(&entity->dwork);
399 list_del_init(&entity->list);
400 fence_put(&entity->base);
401 }
402 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
403 }
404
amd_sched_main(void * param)405 static int amd_sched_main(void *param)
406 {
407 struct sched_param sparam = {.sched_priority = 1};
408 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
409 int r, count;
410
411 spin_lock_init(&sched->fence_list_lock);
412 INIT_LIST_HEAD(&sched->fence_list);
413 sched_setscheduler(current, SCHED_FIFO, &sparam);
414
415 while (!kthread_should_stop()) {
416 struct amd_sched_entity *entity;
417 struct amd_sched_fence *s_fence;
418 struct amd_sched_job *sched_job;
419 struct fence *fence;
420 unsigned long flags;
421
422 wait_event_interruptible(sched->wake_up_worker,
423 (entity = amd_sched_select_entity(sched)) ||
424 kthread_should_stop());
425
426 if (!entity)
427 continue;
428
429 sched_job = amd_sched_entity_pop_job(entity);
430 if (!sched_job)
431 continue;
432
433 s_fence = sched_job->s_fence;
434
435 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
436 INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func);
437 schedule_delayed_work(&s_fence->dwork, sched->timeout);
438 spin_lock_irqsave(&sched->fence_list_lock, flags);
439 list_add_tail(&s_fence->list, &sched->fence_list);
440 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
441 }
442
443 atomic_inc(&sched->hw_rq_count);
444 fence = sched->ops->run_job(sched_job);
445 amd_sched_fence_scheduled(s_fence);
446 if (fence) {
447 r = fence_add_callback(fence, &s_fence->cb,
448 amd_sched_process_job);
449 if (r == -ENOENT)
450 amd_sched_process_job(fence, &s_fence->cb);
451 else if (r)
452 DRM_ERROR("fence add callback failed (%d)\n", r);
453 fence_put(fence);
454 } else {
455 DRM_ERROR("Failed to run job!\n");
456 amd_sched_process_job(NULL, &s_fence->cb);
457 }
458
459 count = kfifo_out(&entity->job_queue, &sched_job,
460 sizeof(sched_job));
461 WARN_ON(count != sizeof(sched_job));
462 wake_up(&sched->job_scheduled);
463 }
464 return 0;
465 }
466
467 /**
468 * Init a gpu scheduler instance
469 *
470 * @sched The pointer to the scheduler
471 * @ops The backend operations for this scheduler.
472 * @hw_submissions Number of hw submissions to do.
473 * @name Name used for debugging
474 *
475 * Return 0 on success, otherwise error code.
476 */
amd_sched_init(struct amd_gpu_scheduler * sched,struct amd_sched_backend_ops * ops,unsigned hw_submission,long timeout,const char * name)477 int amd_sched_init(struct amd_gpu_scheduler *sched,
478 struct amd_sched_backend_ops *ops,
479 unsigned hw_submission, long timeout, const char *name)
480 {
481 sched->ops = ops;
482 sched->hw_submission_limit = hw_submission;
483 sched->name = name;
484 sched->timeout = timeout;
485 amd_sched_rq_init(&sched->sched_rq);
486 amd_sched_rq_init(&sched->kernel_rq);
487
488 init_waitqueue_head(&sched->wake_up_worker);
489 init_waitqueue_head(&sched->job_scheduled);
490 atomic_set(&sched->hw_rq_count, 0);
491 if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
492 sched_fence_slab = kmem_cache_create(
493 "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
494 SLAB_HWCACHE_ALIGN, NULL);
495 if (!sched_fence_slab)
496 return -ENOMEM;
497 }
498
499 /* Each scheduler will run on a seperate kernel thread */
500 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
501 if (IS_ERR(sched->thread)) {
502 DRM_ERROR("Failed to create scheduler for %s.\n", name);
503 return PTR_ERR(sched->thread);
504 }
505
506 return 0;
507 }
508
509 /**
510 * Destroy a gpu scheduler
511 *
512 * @sched The pointer to the scheduler
513 */
amd_sched_fini(struct amd_gpu_scheduler * sched)514 void amd_sched_fini(struct amd_gpu_scheduler *sched)
515 {
516 if (sched->thread)
517 kthread_stop(sched->thread);
518 if (atomic_dec_and_test(&sched_fence_slab_ref))
519 kmem_cache_destroy(sched_fence_slab);
520 }
521