• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   * Copyright 2015 Advanced Micro Devices, Inc.
3   *
4   * Permission is hereby granted, free of charge, to any person obtaining a
5   * copy of this software and associated documentation files (the "Software"),
6   * to deal in the Software without restriction, including without limitation
7   * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8   * and/or sell copies of the Software, and to permit persons to whom the
9   * Software is furnished to do so, subject to the following conditions:
10   *
11   * The above copyright notice and this permission notice shall be included in
12   * all copies or substantial portions of the Software.
13   *
14   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17   * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18   * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19   * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20   * OTHER DEALINGS IN THE SOFTWARE.
21   *
22   */
23  
24  #ifndef _DRM_GPU_SCHEDULER_H_
25  #define _DRM_GPU_SCHEDULER_H_
26  
27  #include <drm/spsc_queue.h>
28  #include <linux/dma-fence.h>
29  #include <linux/completion.h>
30  
31  #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
32  
33  struct drm_gpu_scheduler;
34  struct drm_sched_rq;
35  
36  /* These are often used as an (initial) index
37   * to an array, and as such should start at 0.
38   */
39  enum drm_sched_priority {
40  	DRM_SCHED_PRIORITY_MIN,
41  	DRM_SCHED_PRIORITY_NORMAL,
42  	DRM_SCHED_PRIORITY_HIGH,
43  	DRM_SCHED_PRIORITY_KERNEL,
44  
45  	DRM_SCHED_PRIORITY_COUNT,
46  	DRM_SCHED_PRIORITY_UNSET = -2
47  };
48  
49  /**
50   * struct drm_sched_entity - A wrapper around a job queue (typically
51   * attached to the DRM file_priv).
52   *
53   * @list: used to append this struct to the list of entities in the
54   *        runqueue.
55   * @rq: runqueue on which this entity is currently scheduled.
56   * @sched_list: A list of schedulers (drm_gpu_schedulers).
57   *              Jobs from this entity can be scheduled on any scheduler
58   *              on this list.
59   * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
60   * @priority: priority of the entity
61   * @rq_lock: lock to modify the runqueue to which this entity belongs.
62   * @job_queue: the list of jobs of this entity.
63   * @fence_seq: a linearly increasing seqno incremented with each
64   *             new &drm_sched_fence which is part of the entity.
65   * @fence_context: a unique context for all the fences which belong
66   *                 to this entity.
67   *                 The &drm_sched_fence.scheduled uses the
68   *                 fence_context but &drm_sched_fence.finished uses
69   *                 fence_context + 1.
70   * @dependency: the dependency fence of the job which is on the top
71   *              of the job queue.
72   * @cb: callback for the dependency fence above.
73   * @guilty: points to ctx's guilty.
74   * @fini_status: contains the exit status in case the process was signalled.
75   * @last_scheduled: points to the finished fence of the last scheduled job.
76   * @last_user: last group leader pushing a job into the entity.
77   * @stopped: Marks the enity as removed from rq and destined for termination.
78   * @entity_idle: Signals when enityt is not in use
79   *
80   * Entities will emit jobs in order to their corresponding hardware
81   * ring, and the scheduler will alternate between entities based on
82   * scheduling policy.
83   */
84  struct drm_sched_entity {
85  	struct list_head		list;
86  	struct drm_sched_rq		*rq;
87  	struct drm_gpu_scheduler        **sched_list;
88  	unsigned int                    num_sched_list;
89  	enum drm_sched_priority         priority;
90  	spinlock_t			rq_lock;
91  
92  	struct spsc_queue		job_queue;
93  
94  	atomic_t			fence_seq;
95  	uint64_t			fence_context;
96  
97  	struct dma_fence		*dependency;
98  	struct dma_fence_cb		cb;
99  	atomic_t			*guilty;
100  	struct dma_fence                *last_scheduled;
101  	struct task_struct		*last_user;
102  	bool 				stopped;
103  	struct completion		entity_idle;
104  };
105  
106  /**
107   * struct drm_sched_rq - queue of entities to be scheduled.
108   *
109   * @lock: to modify the entities list.
110   * @sched: the scheduler to which this rq belongs to.
111   * @entities: list of the entities to be scheduled.
112   * @current_entity: the entity which is to be scheduled.
113   *
114   * Run queue is a set of entities scheduling command submissions for
115   * one specific ring. It implements the scheduling policy that selects
116   * the next entity to emit commands from.
117   */
118  struct drm_sched_rq {
119  	spinlock_t			lock;
120  	struct drm_gpu_scheduler	*sched;
121  	struct list_head		entities;
122  	struct drm_sched_entity		*current_entity;
123  };
124  
125  /**
126   * struct drm_sched_fence - fences corresponding to the scheduling of a job.
127   */
128  struct drm_sched_fence {
129          /**
130           * @scheduled: this fence is what will be signaled by the scheduler
131           * when the job is scheduled.
132           */
133  	struct dma_fence		scheduled;
134  
135          /**
136           * @finished: this fence is what will be signaled by the scheduler
137           * when the job is completed.
138           *
139           * When setting up an out fence for the job, you should use
140           * this, since it's available immediately upon
141           * drm_sched_job_init(), and the fence returned by the driver
142           * from run_job() won't be created until the dependencies have
143           * resolved.
144           */
145  	struct dma_fence		finished;
146  
147          /**
148           * @parent: the fence returned by &drm_sched_backend_ops.run_job
149           * when scheduling the job on hardware. We signal the
150           * &drm_sched_fence.finished fence once parent is signalled.
151           */
152  	struct dma_fence		*parent;
153          /**
154           * @sched: the scheduler instance to which the job having this struct
155           * belongs to.
156           */
157  	struct drm_gpu_scheduler	*sched;
158          /**
159           * @lock: the lock used by the scheduled and the finished fences.
160           */
161  	spinlock_t			lock;
162          /**
163           * @owner: job owner for debugging
164           */
165  	void				*owner;
166  };
167  
168  struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
169  
170  /**
171   * struct drm_sched_job - A job to be run by an entity.
172   *
173   * @queue_node: used to append this struct to the queue of jobs in an entity.
174   * @list: a job participates in a "pending" and "done" lists.
175   * @sched: the scheduler instance on which this job is scheduled.
176   * @s_fence: contains the fences for the scheduling of job.
177   * @finish_cb: the callback for the finished fence.
178   * @id: a unique id assigned to each job scheduled on the scheduler.
179   * @karma: increment on every hang caused by this job. If this exceeds the hang
180   *         limit of the scheduler then the job is marked guilty and will not
181   *         be scheduled further.
182   * @s_priority: the priority of the job.
183   * @entity: the entity to which this job belongs.
184   * @cb: the callback for the parent fence in s_fence.
185   *
186   * A job is created by the driver using drm_sched_job_init(), and
187   * should call drm_sched_entity_push_job() once it wants the scheduler
188   * to schedule the job.
189   */
190  struct drm_sched_job {
191  	struct spsc_node		queue_node;
192  	struct list_head		list;
193  	struct drm_gpu_scheduler	*sched;
194  	struct drm_sched_fence		*s_fence;
195  	struct dma_fence_cb		finish_cb;
196  	uint64_t			id;
197  	atomic_t			karma;
198  	enum drm_sched_priority		s_priority;
199  	struct drm_sched_entity         *entity;
200  	struct dma_fence_cb		cb;
201  };
202  
drm_sched_invalidate_job(struct drm_sched_job * s_job,int threshold)203  static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
204  					    int threshold)
205  {
206  	return s_job && atomic_inc_return(&s_job->karma) > threshold;
207  }
208  
209  enum drm_gpu_sched_stat {
210  	DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
211  	DRM_GPU_SCHED_STAT_NOMINAL,
212  	DRM_GPU_SCHED_STAT_ENODEV,
213  };
214  
215  /**
216   * struct drm_sched_backend_ops
217   *
218   * Define the backend operations called by the scheduler,
219   * these functions should be implemented in driver side.
220   */
221  struct drm_sched_backend_ops {
222  	/**
223           * @dependency: Called when the scheduler is considering scheduling
224           * this job next, to get another struct dma_fence for this job to
225  	 * block on.  Once it returns NULL, run_job() may be called.
226  	 */
227  	struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
228  					struct drm_sched_entity *s_entity);
229  
230  	/**
231           * @run_job: Called to execute the job once all of the dependencies
232           * have been resolved.  This may be called multiple times, if
233  	 * timedout_job() has happened and drm_sched_job_recovery()
234  	 * decides to try it again.
235  	 */
236  	struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
237  
238  	/**
239  	 * @timedout_job: Called when a job has taken too long to execute,
240  	 * to trigger GPU recovery.
241  	 *
242  	 * This method is called in a workqueue context.
243  	 *
244  	 * Drivers typically issue a reset to recover from GPU hangs, and this
245  	 * procedure usually follows the following workflow:
246  	 *
247  	 * 1. Stop the scheduler using drm_sched_stop(). This will park the
248  	 *    scheduler thread and cancel the timeout work, guaranteeing that
249  	 *    nothing is queued while we reset the hardware queue
250  	 * 2. Try to gracefully stop non-faulty jobs (optional)
251  	 * 3. Issue a GPU reset (driver-specific)
252  	 * 4. Re-submit jobs using drm_sched_resubmit_jobs()
253  	 * 5. Restart the scheduler using drm_sched_start(). At that point, new
254  	 *    jobs can be queued, and the scheduler thread is unblocked
255  	 *
256  	 * Note that some GPUs have distinct hardware queues but need to reset
257  	 * the GPU globally, which requires extra synchronization between the
258  	 * timeout handler of the different &drm_gpu_scheduler. One way to
259  	 * achieve this synchronization is to create an ordered workqueue
260  	 * (using alloc_ordered_workqueue()) at the driver level, and pass this
261  	 * queue to drm_sched_init(), to guarantee that timeout handlers are
262  	 * executed sequentially. The above workflow needs to be slightly
263  	 * adjusted in that case:
264  	 *
265  	 * 1. Stop all schedulers impacted by the reset using drm_sched_stop()
266  	 * 2. Try to gracefully stop non-faulty jobs on all queues impacted by
267  	 *    the reset (optional)
268  	 * 3. Issue a GPU reset on all faulty queues (driver-specific)
269  	 * 4. Re-submit jobs on all schedulers impacted by the reset using
270  	 *    drm_sched_resubmit_jobs()
271  	 * 5. Restart all schedulers that were stopped in step #1 using
272  	 *    drm_sched_start()
273  	 *
274  	 * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
275  	 * and the underlying driver has started or completed recovery.
276  	 *
277  	 * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
278  	 * available, i.e. has been unplugged.
279  	 */
280  	enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
281  
282  	/**
283           * @free_job: Called once the job's finished fence has been signaled
284           * and it's time to clean it up.
285  	 */
286  	void (*free_job)(struct drm_sched_job *sched_job);
287  };
288  
289  /**
290   * struct drm_gpu_scheduler
291   *
292   * @ops: backend operations provided by the driver.
293   * @hw_submission_limit: the max size of the hardware queue.
294   * @timeout: the time after which a job is removed from the scheduler.
295   * @name: name of the ring for which this scheduler is being used.
296   * @sched_rq: priority wise array of run queues.
297   * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
298   *                  is ready to be scheduled.
299   * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
300   *                 waits on this wait queue until all the scheduled jobs are
301   *                 finished.
302   * @hw_rq_count: the number of jobs currently in the hardware queue.
303   * @job_id_count: used to assign unique id to the each job.
304   * @timeout_wq: workqueue used to queue @work_tdr
305   * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
306   *            timeout interval is over.
307   * @thread: the kthread on which the scheduler which run.
308   * @pending_list: the list of jobs which are currently in the job queue.
309   * @job_list_lock: lock to protect the pending_list.
310   * @hang_limit: once the hangs by a job crosses this limit then it is marked
311   *              guilty and it will no longer be considered for scheduling.
312   * @score: score to help loadbalancer pick a idle sched
313   * @_score: score used when the driver doesn't provide one
314   * @ready: marks if the underlying HW is ready to work
315   * @free_guilty: A hit to time out handler to free the guilty job.
316   *
317   * One scheduler is implemented for each hardware ring.
318   */
319  struct drm_gpu_scheduler {
320  	const struct drm_sched_backend_ops	*ops;
321  	uint32_t			hw_submission_limit;
322  	long				timeout;
323  	const char			*name;
324  	struct drm_sched_rq		sched_rq[DRM_SCHED_PRIORITY_COUNT];
325  	wait_queue_head_t		wake_up_worker;
326  	wait_queue_head_t		job_scheduled;
327  	atomic_t			hw_rq_count;
328  	atomic64_t			job_id_count;
329  	struct workqueue_struct		*timeout_wq;
330  	struct delayed_work		work_tdr;
331  	struct task_struct		*thread;
332  	struct list_head		pending_list;
333  	spinlock_t			job_list_lock;
334  	int				hang_limit;
335  	atomic_t                        *score;
336  	atomic_t                        _score;
337  	bool				ready;
338  	bool				free_guilty;
339  };
340  
341  int drm_sched_init(struct drm_gpu_scheduler *sched,
342  		   const struct drm_sched_backend_ops *ops,
343  		   uint32_t hw_submission, unsigned hang_limit,
344  		   long timeout, struct workqueue_struct *timeout_wq,
345  		   atomic_t *score, const char *name);
346  
347  void drm_sched_fini(struct drm_gpu_scheduler *sched);
348  int drm_sched_job_init(struct drm_sched_job *job,
349  		       struct drm_sched_entity *entity,
350  		       void *owner);
351  void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
352  				    struct drm_gpu_scheduler **sched_list,
353                                     unsigned int num_sched_list);
354  
355  void drm_sched_job_cleanup(struct drm_sched_job *job);
356  void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
357  void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
358  void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
359  void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
360  void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max);
361  void drm_sched_increase_karma(struct drm_sched_job *bad);
362  void drm_sched_reset_karma(struct drm_sched_job *bad);
363  void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
364  bool drm_sched_dependency_optimized(struct dma_fence* fence,
365  				    struct drm_sched_entity *entity);
366  void drm_sched_fault(struct drm_gpu_scheduler *sched);
367  void drm_sched_job_kickout(struct drm_sched_job *s_job);
368  
369  void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
370  			     struct drm_sched_entity *entity);
371  void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
372  				struct drm_sched_entity *entity);
373  
374  int drm_sched_entity_init(struct drm_sched_entity *entity,
375  			  enum drm_sched_priority priority,
376  			  struct drm_gpu_scheduler **sched_list,
377  			  unsigned int num_sched_list,
378  			  atomic_t *guilty);
379  long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
380  void drm_sched_entity_fini(struct drm_sched_entity *entity);
381  void drm_sched_entity_destroy(struct drm_sched_entity *entity);
382  void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
383  struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
384  void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
385  			       struct drm_sched_entity *entity);
386  void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
387  				   enum drm_sched_priority priority);
388  bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
389  
390  struct drm_sched_fence *drm_sched_fence_create(
391  	struct drm_sched_entity *s_entity, void *owner);
392  void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
393  void drm_sched_fence_finished(struct drm_sched_fence *fence);
394  
395  unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
396  void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
397  		                unsigned long remaining);
398  struct drm_gpu_scheduler *
399  drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
400  		     unsigned int num_sched_list);
401  
402  #endif
403