• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3  *
4  * (C) COPYRIGHT 2020-2021 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 /*
23  * Job Scheduler Interface.
24  * These interfaces are Internal to KBase.
25  */
26 
27 #ifndef _KBASE_JM_JS_H_
28 #define _KBASE_JM_JS_H_
29 
30 #include "mali_kbase_js_ctx_attr.h"
31 
32 /**
33  * kbasep_js_devdata_init - Initialize the Job Scheduler
34  * @kbdev: The kbase_device to operate on
35  *
36  * The struct kbasep_js_device_data sub-structure of kbdev must be zero
37  * initialized before passing to the kbasep_js_devdata_init() function. This is
38  * to give efficient error path code.
39  */
40 int kbasep_js_devdata_init(struct kbase_device * const kbdev);
41 
42 /**
43  * kbasep_js_devdata_halt - Halt the Job Scheduler.
44  * @kbdev: The kbase_device to operate on
45  *
46  * It is safe to call this on kbdev even if it the kbasep_js_device_data
47  * sub-structure was never initialized/failed initialization, to give efficient
48  * error-path code.
49  *
50  * For this to work, the struct kbasep_js_device_data sub-structure of kbdev
51  * must be zero initialized before passing to the kbasep_js_devdata_init()
52  * function. This is to give efficient error path code.
53  *
54  * It is a programming error to call this whilst there are still kbase_context
55  * structures registered with this scheduler.
56  *
57  */
58 void kbasep_js_devdata_halt(struct kbase_device *kbdev);
59 
60 /**
61  * kbasep_js_devdata_term - Terminate the Job Scheduler
62  * @kbdev: The kbase_device to operate on
63  *
64  * It is safe to call this on kbdev even if it the kbasep_js_device_data
65  * sub-structure was never initialized/failed initialization, to give efficient
66  * error-path code.
67  *
68  * For this to work, the struct kbasep_js_device_data sub-structure of kbdev
69  * must be zero initialized before passing to the kbasep_js_devdata_init()
70  * function. This is to give efficient error path code.
71  *
72  * It is a programming error to call this whilst there are still kbase_context
73  * structures registered with this scheduler.
74  */
75 void kbasep_js_devdata_term(struct kbase_device *kbdev);
76 
77 /**
78  * kbasep_js_kctx_init - Initialize the Scheduling Component of a
79  *                       struct kbase_context on the Job Scheduler.
80  * @kctx:  The kbase_context to operate on
81  *
82  * This effectively registers a struct kbase_context with a Job Scheduler.
83  *
84  * It does not register any jobs owned by the struct kbase_context with
85  * the scheduler. Those must be separately registered by kbasep_js_add_job().
86  *
87  * The struct kbase_context must be zero initialized before passing to the
88  * kbase_js_init() function. This is to give efficient error path code.
89  */
90 int kbasep_js_kctx_init(struct kbase_context *const kctx);
91 
92 /**
93  * kbasep_js_kctx_term - Terminate the Scheduling Component of a
94  *                       struct kbase_context on the Job Scheduler
95  * @kctx:  The kbase_context to operate on
96  *
97  * This effectively de-registers a struct kbase_context from its Job Scheduler
98  *
99  * It is safe to call this on a struct kbase_context that has never had or
100  * failed initialization of its jctx.sched_info member, to give efficient
101  * error-path code.
102  *
103  * For this to work, the struct kbase_context must be zero intitialized before
104  * passing to the kbase_js_init() function.
105  *
106  * It is a Programming Error to call this whilst there are still jobs
107  * registered with this context.
108  */
109 void kbasep_js_kctx_term(struct kbase_context *kctx);
110 
111 /* kbase_jsctx_slot_prio_blocked_set - Set a context as being blocked for a job
112  *                                     slot at and below a given priority level
113  * @kctx: The kbase_context
114  * @js: The job slot
115  * @sched_prio: The priority levels that the context is blocked at for @js (all
116  *              priority levels at this level and below will be blocked)
117  *
118  * To preserve ordering and dependencies of atoms on soft-stopping (both within
119  * an between priority levels), a context must be marked as blocked for that
120  * atom's job slot, for all priority levels at or below the atom's priority.
121  *
122  * This must only be called due to an atom that was pulled from the context,
123  * otherwise there will be no way of unblocking the context when the atom is
124  * completed/unpulled.
125  *
126  * Atoms of higher priority might still be able to be pulled from the context
127  * on @js. This helps with starting a high priority atom as soon as possible.
128  */
kbase_jsctx_slot_prio_blocked_set(struct kbase_context * kctx,int js,int sched_prio)129 static inline void kbase_jsctx_slot_prio_blocked_set(struct kbase_context *kctx,
130 						     int js, int sched_prio)
131 {
132 	struct kbase_jsctx_slot_tracking *slot_tracking =
133 		&kctx->slot_tracking[js];
134 
135 	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
136 	WARN(!slot_tracking->atoms_pulled_pri[sched_prio],
137 	     "When marking slot %d as blocked for priority %d on a kctx, no atoms were pulled - the slot cannot become unblocked",
138 	     js, sched_prio);
139 
140 	slot_tracking->blocked |= ((kbase_js_prio_bitmap_t)1) << sched_prio;
141 	KBASE_KTRACE_ADD_JM_SLOT_INFO(kctx->kbdev, JS_SLOT_PRIO_BLOCKED, kctx,
142 				      NULL, 0, js, (unsigned int)sched_prio);
143 }
144 
145 /* kbase_jsctx_atoms_pulled - Return number of atoms pulled on a context
146  * @kctx: The kbase_context
147  *
148  * Having atoms pulled indicates the context is not idle.
149  *
150  * Return: the number of atoms pulled on @kctx
151  */
kbase_jsctx_atoms_pulled(struct kbase_context * kctx)152 static inline int kbase_jsctx_atoms_pulled(struct kbase_context *kctx)
153 {
154 	return atomic_read(&kctx->atoms_pulled_all_slots);
155 }
156 
157 /**
158  * kbasep_js_add_job - Add a job chain to the Job Scheduler,
159  *                     and take necessary actions to
160  *                     schedule the context/run the job.
161  * @kctx:  The kbase_context to operate on
162  * @atom: Atom to add
163  *
164  * This atomically does the following:
165  * * Update the numbers of jobs information
166  * * Add the job to the run pool if necessary (part of init_job)
167  *
168  * Once this is done, then an appropriate action is taken:
169  * * If the ctx is scheduled, it attempts to start the next job (which might be
170  * this added job)
171  * * Otherwise, and if this is the first job on the context, it enqueues it on
172  * the Policy Queue
173  *
174  * The Policy's Queue can be updated by this in the following ways:
175  * * In the above case that this is the first job on the context
176  * * If the context is high priority and the context is not scheduled, then it
177  * could cause the Policy to schedule out a low-priority context, allowing
178  * this context to be scheduled in.
179  *
180  * If the context is already scheduled on the RunPool, then adding a job to it
181  * is guaranteed not to update the Policy Queue. And so, the caller is
182  * guaranteed to not need to try scheduling a context from the Run Pool - it
183  * can safely assert that the result is false.
184  *
185  * It is a programming error to have more than U32_MAX jobs in flight at a time.
186  *
187  * The following locking conditions are made on the caller:
188  * * it must not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
189  * * it must not hold hwaccess_lock (as this will be obtained internally)
190  * * it must not hold kbasep_js_device_data::runpool_mutex (as this will be
191  * obtained internally)
192  * * it must not hold kbasep_jd_device_data::queue_mutex (again, it's used
193  * internally).
194  *
195  * Return: true indicates that the Policy Queue was updated, and so the
196  * caller will need to try scheduling a context onto the Run Pool,
197  * false indicates that no updates were made to the Policy Queue,
198  * so no further action is required from the caller. This is always returned
199  * when the context is currently scheduled.
200  */
201 bool kbasep_js_add_job(struct kbase_context *kctx, struct kbase_jd_atom *atom);
202 
203 /**
204  * kbasep_js_remove_job - Remove a job chain from the Job Scheduler,
205  *                        except for its 'retained state'.
206  * @kbdev: The kbase_device to operate on
207  * @kctx:  The kbase_context to operate on
208  * @atom: Atom to remove
209 *
210  * Completely removing a job requires several calls:
211  * * kbasep_js_copy_atom_retained_state(), to capture the 'retained state' of
212  *   the atom
213  * * kbasep_js_remove_job(), to partially remove the atom from the Job Scheduler
214  * * kbasep_js_runpool_release_ctx_and_katom_retained_state(), to release the
215  *   remaining state held as part of the job having been run.
216  *
217  * In the common case of atoms completing normally, this set of actions is more
218  * optimal for spinlock purposes than having kbasep_js_remove_job() handle all
219  * of the actions.
220  *
221  * In the case of canceling atoms, it is easier to call
222  * kbasep_js_remove_cancelled_job(), which handles all the necessary actions.
223  *
224  * It is a programming error to call this when:
225  * * a atom is not a job belonging to kctx.
226  * * a atom has already been removed from the Job Scheduler.
227  * * a atom is still in the runpool
228  *
229  * Do not use this for removing jobs being killed by kbase_jd_cancel() - use
230  * kbasep_js_remove_cancelled_job() instead.
231  *
232  * The following locking conditions are made on the caller:
233  * * it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
234  *
235  */
236 void kbasep_js_remove_job(struct kbase_device *kbdev,
237 		struct kbase_context *kctx, struct kbase_jd_atom *atom);
238 
239 /**
240  * kbasep_js_remove_cancelled_job - Completely remove a job chain from the
241  *                                  Job Scheduler, in the case
242  *                                  where the job chain was cancelled.
243  * @kbdev: The kbase_device to operate on
244  * @kctx:  The kbase_context to operate on
245  * @katom: Atom to remove
246  *
247  * This is a variant of kbasep_js_remove_job() that takes care of removing all
248  * of the retained state too. This is generally useful for cancelled atoms,
249  * which need not be handled in an optimal way.
250  *
251  * It is a programming error to call this when:
252  * * a atom is not a job belonging to kctx.
253  * * a atom has already been removed from the Job Scheduler.
254  * * a atom is still in the runpool:
255  *  * it is not being killed with kbasep_jd_cancel()
256  *
257  * The following locking conditions are made on the caller:
258  * * it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
259  * * it must not hold the hwaccess_lock, (as this will be obtained
260  *   internally)
261  * * it must not hold kbasep_js_device_data::runpool_mutex (as this could be
262  * obtained internally)
263  *
264  * Return: true indicates that ctx attributes have changed and the caller
265  * should call kbase_js_sched_all() to try to run more jobs and
266  * false otherwise.
267  */
268 bool kbasep_js_remove_cancelled_job(struct kbase_device *kbdev,
269 		struct kbase_context *kctx,
270 		struct kbase_jd_atom *katom);
271 
272 /**
273  * kbasep_js_runpool_requeue_or_kill_ctx - Handling the requeuing/killing of a
274  *                                         context that was evicted from the
275  *                                         policy queue or runpool.
276  * @kbdev: The kbase_device to operate on
277  * @kctx:  The kbase_context to operate on
278  * @has_pm_ref: tells whether to release Power Manager active reference
279  *
280  * This should be used whenever handing off a context that has been evicted
281  * from the policy queue or the runpool:
282  * * If the context is not dying and has jobs, it gets re-added to the policy
283  * queue
284  * * Otherwise, it is not added
285  *
286  * In addition, if the context is dying the jobs are killed asynchronously.
287  *
288  * In all cases, the Power Manager active reference is released
289  * (kbase_pm_context_idle()) whenever the has_pm_ref parameter is true.
290  * has_pm_ref must be set to false whenever the context was not previously in
291  * the runpool and does not hold a Power Manager active refcount. Note that
292  * contexts in a rollback of kbasep_js_try_schedule_head_ctx() might have an
293  * active refcount even though they weren't in the runpool.
294  *
295  * The following locking conditions are made on the caller:
296  * * it must hold kbasep_js_kctx_info::ctx::jsctx_mutex.
297  * * it must not hold kbasep_jd_device_data::queue_mutex (as this will be
298  * obtained internally)
299  */
300 void kbasep_js_runpool_requeue_or_kill_ctx(struct kbase_device *kbdev,
301 		struct kbase_context *kctx, bool has_pm_ref);
302 
303 /**
304  * kbasep_js_runpool_release_ctx - Release a refcount of a context being busy,
305  *                                 allowing it to be scheduled out.
306  * @kbdev: The kbase_device to operate on
307  * @kctx:  The kbase_context to operate on
308  *
309  * When the refcount reaches zero and the context might be scheduled out
310  * (depending on whether the Scheduling Policy has deemed it so, or if it has
311  * run out of jobs).
312  *
313  * If the context does get scheduled out, then The following actions will be
314  * taken as part of deschduling a context:
315  * For the context being descheduled:
316  * * If the context is in the processing of dying (all the jobs are being
317  * removed from it), then descheduling also kills off any jobs remaining in the
318  * context.
319  * * If the context is not dying, and any jobs remain after descheduling the
320  * context then it is re-enqueued to the Policy's Queue.
321  * * Otherwise, the context is still known to the scheduler, but remains absent
322  * from the Policy Queue until a job is next added to it.
323  * * In all descheduling cases, the Power Manager active reference (obtained
324  * during kbasep_js_try_schedule_head_ctx()) is released
325  * (kbase_pm_context_idle()).
326  *
327  * Whilst the context is being descheduled, this also handles actions that
328  * cause more atoms to be run:
329  * * Attempt submitting atoms when the Context Attributes on the Runpool have
330  * changed. This is because the context being scheduled out could mean that
331  * there are more opportunities to run atoms.
332  * * Attempt submitting to a slot that was previously blocked due to affinity
333  * restrictions. This is usually only necessary when releasing a context
334  * happens as part of completing a previous job, but is harmless nonetheless.
335  * * Attempt scheduling in a new context (if one is available), and if
336  * necessary, running a job from that new context.
337  *
338  * Unlike retaining a context in the runpool, this function cannot be called
339  * from IRQ context.
340  *
341  * It is a programming error to call this on a kctx that is not currently
342  * scheduled, or that already has a zero refcount.
343  *
344  * The following locking conditions are made on the caller:
345  * * it must not hold the hwaccess_lock, because it will be used internally.
346  * * it must not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
347  * * it must not hold kbasep_js_device_data::runpool_mutex (as this will be
348  * obtained internally)
349  * * it must not hold the kbase_device::mmu_hw_mutex (as this will be
350  * obtained internally)
351  * * it must not hold kbasep_jd_device_data::queue_mutex (as this will be
352  * obtained internally)
353  *
354  */
355 void kbasep_js_runpool_release_ctx(struct kbase_device *kbdev,
356 		struct kbase_context *kctx);
357 
358 /**
359  * kbasep_js_runpool_release_ctx_and_katom_retained_state -  Variant of
360  * kbasep_js_runpool_release_ctx() that handles additional
361  * actions from completing an atom.
362  * @kbdev:                KBase device
363  * @kctx:                 KBase context
364  * @katom_retained_state: Retained state from the atom
365  *
366  * This is usually called as part of completing an atom and releasing the
367  * refcount on the context held by the atom.
368  *
369  * Therefore, the extra actions carried out are part of handling actions queued
370  * on a completed atom, namely:
371  * * Releasing the atom's context attributes
372  * * Retrying the submission on a particular slot, because we couldn't submit
373  * on that slot from an IRQ handler.
374  *
375  * The locking conditions of this function are the same as those for
376  * kbasep_js_runpool_release_ctx()
377  */
378 void kbasep_js_runpool_release_ctx_and_katom_retained_state(
379 		struct kbase_device *kbdev,
380 		struct kbase_context *kctx,
381 		struct kbasep_js_atom_retained_state *katom_retained_state);
382 
383 /**
384  * kbasep_js_runpool_release_ctx_nolock -
385  * Variant of kbase_js_runpool_release_ctx() w/out locks
386  * @kbdev: KBase device
387  * @kctx:  KBase context
388  *
389  * Variant of kbase_js_runpool_release_ctx() that assumes that
390  * kbasep_js_device_data::runpool_mutex and
391  * kbasep_js_kctx_info::ctx::jsctx_mutex are held by the caller, and does not
392  * attempt to schedule new contexts.
393  */
394 void kbasep_js_runpool_release_ctx_nolock(struct kbase_device *kbdev,
395 		struct kbase_context *kctx);
396 
397 /**
398  * kbasep_js_schedule_privileged_ctx -  Schedule in a privileged context
399  * @kbdev: KBase device
400  * @kctx:  KBase context
401  *
402  * This schedules a context in regardless of the context priority.
403  * If the runpool is full, a context will be forced out of the runpool and the
404  * function will wait for the new context to be scheduled in.
405  * The context will be kept scheduled in (and the corresponding address space
406  * reserved) until kbasep_js_release_privileged_ctx is called).
407  *
408  * The following locking conditions are made on the caller:
409  * * it must not hold the hwaccess_lock, because it will be used internally.
410  * * it must not hold kbasep_js_device_data::runpool_mutex (as this will be
411  * obtained internally)
412  * * it must not hold the kbase_device::mmu_hw_mutex (as this will be
413  * obtained internally)
414  * * it must not hold kbasep_jd_device_data::queue_mutex (again, it's used
415  * internally).
416  * * it must not hold kbasep_js_kctx_info::ctx::jsctx_mutex, because it will
417  * be used internally.
418  *
419  */
420 void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev,
421 		struct kbase_context *kctx);
422 
423 /**
424  * kbasep_js_release_privileged_ctx -  Release a privileged context,
425  * allowing it to be scheduled out.
426  * @kbdev: KBase device
427  * @kctx:  KBase context
428  *
429  * See kbasep_js_runpool_release_ctx for potential side effects.
430  *
431  * The following locking conditions are made on the caller:
432  * * it must not hold the hwaccess_lock, because it will be used internally.
433  * * it must not hold kbasep_js_kctx_info::ctx::jsctx_mutex.
434  * * it must not hold kbasep_js_device_data::runpool_mutex (as this will be
435  * obtained internally)
436  * * it must not hold the kbase_device::mmu_hw_mutex (as this will be
437  * obtained internally)
438  *
439  */
440 void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev,
441 		struct kbase_context *kctx);
442 
443 /**
444  * kbase_js_try_run_jobs -  Try to submit the next job on each slot
445  * @kbdev: KBase device
446  *
447  * The following locks may be used:
448  * * kbasep_js_device_data::runpool_mutex
449  * * hwaccess_lock
450  */
451 void kbase_js_try_run_jobs(struct kbase_device *kbdev);
452 
453 /**
454  * kbasep_js_suspend -  Suspend the job scheduler during a Power Management
455  *                      Suspend event.
456  * @kbdev: KBase device
457  *
458  * Causes all contexts to be removed from the runpool, and prevents any
459  * contexts from (re)entering the runpool.
460  *
461  * This does not handle suspending the one privileged context: the caller must
462  * instead do this by by suspending the GPU HW Counter Instrumentation.
463  *
464  * This will eventually cause all Power Management active references held by
465  * contexts on the runpool to be released, without running any more atoms.
466  *
467  * The caller must then wait for all Power Management active refcount to become
468  * zero before completing the suspend.
469  *
470  * The emptying mechanism may take some time to complete, since it can wait for
471  * jobs to complete naturally instead of forcing them to end quickly. However,
472  * this is bounded by the Job Scheduler's Job Timeouts. Hence, this
473  * function is guaranteed to complete in a finite time.
474  */
475 void kbasep_js_suspend(struct kbase_device *kbdev);
476 
477 /**
478  * kbasep_js_resume - Resume the Job Scheduler after a Power Management
479  *                    Resume event.
480  * @kbdev: KBase device
481  *
482  * This restores the actions from kbasep_js_suspend():
483  * * Schedules contexts back into the runpool
484  * * Resumes running atoms on the GPU
485  */
486 void kbasep_js_resume(struct kbase_device *kbdev);
487 
488 /**
489  * kbase_js_dep_resolved_submit - Submit an atom to the job scheduler.
490  *
491  * @kctx:  Context pointer
492  * @katom:  Pointer to the atom to submit
493  *
494  * The atom is enqueued on the context's ringbuffer. The caller must have
495  * ensured that all dependencies can be represented in the ringbuffer.
496  *
497  * Caller must hold jctx->lock
498  *
499  * Return: true if the context requires to be enqueued, otherwise false.
500  */
501 bool kbase_js_dep_resolved_submit(struct kbase_context *kctx,
502 		struct kbase_jd_atom *katom);
503 
504 /**
505  * jsctx_ll_flush_to_rb() - Pushes atoms from the linked list to ringbuffer.
506  * @kctx:  Context Pointer
507  * @prio:  Priority (specifies the queue together with js).
508  * @js:    Job slot (specifies the queue together with prio).
509  *
510  * Pushes all possible atoms from the linked list to the ringbuffer.
511  * Number of atoms are limited to free space in the ringbuffer and
512  * number of available atoms in the linked list.
513  *
514  */
515 void jsctx_ll_flush_to_rb(struct kbase_context *kctx, int prio, int js);
516 
517 /**
518  * kbase_js_pull - Pull an atom from a context in the job scheduler for
519  *                 execution.
520  *
521  * @kctx:  Context to pull from
522  * @js:    Job slot to pull from
523  *
524  * The atom will not be removed from the ringbuffer at this stage.
525  *
526  * The HW access lock must be held when calling this function.
527  *
528  * Return: a pointer to an atom, or NULL if there are no atoms for this
529  * slot that can be currently run.
530  */
531 struct kbase_jd_atom *kbase_js_pull(struct kbase_context *kctx, int js);
532 
533 /**
534  * kbase_js_unpull - Return an atom to the job scheduler ringbuffer.
535  *
536  * @kctx:  Context pointer
537  * @katom:  Pointer to the atom to unpull
538  *
539  * An atom is 'unpulled' if execution is stopped but intended to be returned to
540  * later. The most common reason for this is that the atom has been
541  * soft-stopped. Another reason is if an end-of-renderpass atom completed
542  * but will need to be run again as part of the same renderpass.
543  *
544  * Note that if multiple atoms are to be 'unpulled', they must be returned in
545  * the reverse order to which they were originally pulled. It is a programming
546  * error to return atoms in any other order.
547  *
548  * The HW access lock must be held when calling this function.
549  *
550  */
551 void kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom);
552 
553 /**
554  * kbase_js_complete_atom_wq - Complete an atom from jd_done_worker(),
555  *                             removing it from the job
556  *                             scheduler ringbuffer.
557  * @kctx:  Context pointer
558  * @katom: Pointer to the atom to complete
559  *
560  * If the atom failed then all dependee atoms marked for failure propagation
561  * will also fail.
562  *
563  * Return: true if the context is now idle (no jobs pulled) false otherwise.
564  */
565 bool kbase_js_complete_atom_wq(struct kbase_context *kctx,
566 		struct kbase_jd_atom *katom);
567 
568 /**
569  * kbase_js_complete_atom - Complete an atom.
570  *
571  * @katom:         Pointer to the atom to complete
572  * @end_timestamp: The time that the atom completed (may be NULL)
573  *
574  * Most of the work required to complete an atom will be performed by
575  * jd_done_worker().
576  *
577  * The HW access lock must be held when calling this function.
578  *
579  * Return: a atom that has now been unblocked and can now be run, or NULL
580  * if none
581  */
582 struct kbase_jd_atom *kbase_js_complete_atom(struct kbase_jd_atom *katom,
583 		ktime_t *end_timestamp);
584 
585 /**
586  * kbase_js_atom_blocked_on_x_dep - Decide whether to ignore a cross-slot
587  *                                  dependency
588  * @katom:	Pointer to an atom in the slot ringbuffer
589  *
590  * A cross-slot dependency is ignored if necessary to unblock incremental
591  * rendering. If the atom at the start of a renderpass used too much memory
592  * and was soft-stopped then the atom at the end of a renderpass is submitted
593  * to hardware regardless of its dependency on the start-of-renderpass atom.
594  * This can happen multiple times for the same pair of atoms.
595  *
596  * Return: true to block the atom or false to allow it to be submitted to
597  * hardware.
598  */
599 bool kbase_js_atom_blocked_on_x_dep(struct kbase_jd_atom *katom);
600 
601 /**
602  * kbase_js_sched - Submit atoms from all available contexts.
603  *
604  * @kbdev:    Device pointer
605  * @js_mask:  Mask of job slots to submit to
606  *
607  * This will attempt to submit as many jobs as possible to the provided job
608  * slots. It will exit when either all job slots are full, or all contexts have
609  * been used.
610  *
611  */
612 void kbase_js_sched(struct kbase_device *kbdev, int js_mask);
613 
614 /**
615  * kbase_jd_zap_context - Attempt to deschedule a context that is being
616  *                        destroyed
617  * @kctx: Context pointer
618  *
619  * This will attempt to remove a context from any internal job scheduler queues
620  * and perform any other actions to ensure a context will not be submitted
621  * from.
622  *
623  * If the context is currently scheduled, then the caller must wait for all
624  * pending jobs to complete before taking any further action.
625  */
626 void kbase_js_zap_context(struct kbase_context *kctx);
627 
628 /**
629  * kbase_js_is_atom_valid - Validate an atom
630  *
631  * @kbdev:  Device pointer
632  * @katom:  Atom to validate
633  *
634  * This will determine whether the atom can be scheduled onto the GPU. Atoms
635  * with invalid combinations of core requirements will be rejected.
636  *
637  * Return: true if atom is valid false otherwise.
638  */
639 bool kbase_js_is_atom_valid(struct kbase_device *kbdev,
640 		struct kbase_jd_atom *katom);
641 
642 /**
643  * kbase_js_set_timeouts - update all JS timeouts with user specified data
644  *
645  * @kbdev: Device pointer
646  *
647  * Timeouts are specified through the 'js_timeouts' sysfs file. If a timeout is
648  * set to a positive number then that becomes the new value used, if a timeout
649  * is negative then the default is set.
650  */
651 void kbase_js_set_timeouts(struct kbase_device *kbdev);
652 
653 /**
654  * kbase_js_set_ctx_priority - set the context priority
655  *
656  * @kctx: Context pointer
657  * @new_priority: New priority value for the Context
658  *
659  * The context priority is set to a new value and it is moved to the
660  * pullable/unpullable list as per the new priority.
661  */
662 void kbase_js_set_ctx_priority(struct kbase_context *kctx, int new_priority);
663 
664 /**
665  * kbase_js_update_ctx_priority - update the context priority
666  *
667  * @kctx: Context pointer
668  *
669  * The context priority gets updated as per the priority of atoms currently in
670  * use for that context, but only if system priority mode for context scheduling
671  * is being used.
672  */
673 void kbase_js_update_ctx_priority(struct kbase_context *kctx);
674 
675 /*
676  * Helpers follow
677  */
678 
679 /**
680  * kbasep_js_is_submit_allowed - Check that a context is allowed to submit
681  *                               jobs on this policy
682  * @js_devdata: KBase Job Scheduler Device Data
683  * @kctx:       KBase context
684  *
685  * The purpose of this abstraction is to hide the underlying data size,
686  * and wrap up the long repeated line of code.
687  *
688  * As with any bool, never test the return value with true.
689  *
690  * The caller must hold hwaccess_lock.
691  */
kbasep_js_is_submit_allowed(struct kbasep_js_device_data * js_devdata,struct kbase_context * kctx)692 static inline bool kbasep_js_is_submit_allowed(
693 		struct kbasep_js_device_data *js_devdata,
694 		struct kbase_context *kctx)
695 {
696 	u16 test_bit;
697 	bool is_allowed;
698 
699 	/* Ensure context really is scheduled in */
700 	KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
701 	KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
702 
703 	test_bit = (u16) (1u << kctx->as_nr);
704 
705 	is_allowed = (bool) (js_devdata->runpool_irq.submit_allowed & test_bit);
706 	dev_dbg(kctx->kbdev->dev, "JS: submit %s allowed on %pK (as=%d)",
707 			is_allowed ? "is" : "isn't", (void *)kctx, kctx->as_nr);
708 	return is_allowed;
709 }
710 
711 /**
712  * kbasep_js_set_submit_allowed - Allow a context to submit jobs on this policy
713  * @js_devdata: KBase Job Scheduler Device Data
714  * @kctx:       KBase context
715  *
716  * The purpose of this abstraction is to hide the underlying data size,
717  * and wrap up the long repeated line of code.
718  *
719  * The caller must hold hwaccess_lock.
720  */
kbasep_js_set_submit_allowed(struct kbasep_js_device_data * js_devdata,struct kbase_context * kctx)721 static inline void kbasep_js_set_submit_allowed(
722 		struct kbasep_js_device_data *js_devdata,
723 		struct kbase_context *kctx)
724 {
725 	u16 set_bit;
726 
727 	/* Ensure context really is scheduled in */
728 	KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
729 	KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
730 
731 	set_bit = (u16) (1u << kctx->as_nr);
732 
733 	dev_dbg(kctx->kbdev->dev, "JS: Setting Submit Allowed on %pK (as=%d)",
734 			kctx, kctx->as_nr);
735 
736 	js_devdata->runpool_irq.submit_allowed |= set_bit;
737 }
738 
739 /**
740  * kbasep_js_clear_submit_allowed - Prevent a context from submitting more
741  *                                  jobs on this policy
742  * @js_devdata: KBase Job Scheduler Device Data
743  * @kctx:       KBase context
744  *
745  * The purpose of this abstraction is to hide the underlying data size,
746  * and wrap up the long repeated line of code.
747  *
748  * The caller must hold hwaccess_lock.
749  */
kbasep_js_clear_submit_allowed(struct kbasep_js_device_data * js_devdata,struct kbase_context * kctx)750 static inline void kbasep_js_clear_submit_allowed(
751 		struct kbasep_js_device_data *js_devdata,
752 		struct kbase_context *kctx)
753 {
754 	u16 clear_bit;
755 	u16 clear_mask;
756 
757 	/* Ensure context really is scheduled in */
758 	KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
759 	KBASE_DEBUG_ASSERT(kbase_ctx_flag(kctx, KCTX_SCHEDULED));
760 
761 	clear_bit = (u16) (1u << kctx->as_nr);
762 	clear_mask = ~clear_bit;
763 
764 	dev_dbg(kctx->kbdev->dev, "JS: Clearing Submit Allowed on %pK (as=%d)",
765 			kctx, kctx->as_nr);
766 
767 	js_devdata->runpool_irq.submit_allowed &= clear_mask;
768 }
769 
770 /**
771  * kbasep_js_atom_retained_state_init_invalid -
772  * Create an initial 'invalid' atom retained state
773  * @retained_state: pointer where to create and initialize the state
774  *
775  * Create an initial 'invalid' atom retained state, that requires no
776  * atom-related work to be done on releasing with
777  * kbasep_js_runpool_release_ctx_and_katom_retained_state()
778  */
kbasep_js_atom_retained_state_init_invalid(struct kbasep_js_atom_retained_state * retained_state)779 static inline void kbasep_js_atom_retained_state_init_invalid(
780 		struct kbasep_js_atom_retained_state *retained_state)
781 {
782 	retained_state->event_code = BASE_JD_EVENT_NOT_STARTED;
783 	retained_state->core_req =
784 			KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID;
785 }
786 
787 /**
788  * kbasep_js_atom_retained_state_copy() - Copy atom state
789  * @retained_state: where to copy
790  * @katom:          where to copy from
791  *
792  * Copy atom state that can be made available after jd_done_nolock() is called
793  * on that atom.
794  */
kbasep_js_atom_retained_state_copy(struct kbasep_js_atom_retained_state * retained_state,const struct kbase_jd_atom * katom)795 static inline void kbasep_js_atom_retained_state_copy(
796 		struct kbasep_js_atom_retained_state *retained_state,
797 		const struct kbase_jd_atom *katom)
798 {
799 	retained_state->event_code = katom->event_code;
800 	retained_state->core_req = katom->core_req;
801 	retained_state->sched_priority = katom->sched_priority;
802 	retained_state->device_nr = katom->device_nr;
803 }
804 
805 /**
806  * kbasep_js_has_atom_finished - Determine whether an atom has finished
807  *                               (given its retained state),
808  *                               and so should be given back to
809  *                               userspace/removed from the system.
810  *
811  * @katom_retained_state:         the retained state of the atom to check
812  *
813  * Reasons for an atom not finishing include:
814  * * Being soft-stopped (and so, the atom should be resubmitted sometime later)
815  * * It is an end of renderpass atom that was run to consume the output of a
816  *   start-of-renderpass atom that was soft-stopped because it used too much
817  *   memory. In this case, it will have to be run again later.
818  *
819  * Return: false if the atom has not finished, true otherwise.
820  */
kbasep_js_has_atom_finished(const struct kbasep_js_atom_retained_state * katom_retained_state)821 static inline bool kbasep_js_has_atom_finished(
822 	const struct kbasep_js_atom_retained_state *katom_retained_state)
823 {
824 	return (bool) (katom_retained_state->event_code !=
825 			BASE_JD_EVENT_STOPPED &&
826 		katom_retained_state->event_code !=
827 			BASE_JD_EVENT_REMOVED_FROM_NEXT &&
828 		katom_retained_state->event_code !=
829 			BASE_JD_EVENT_END_RP_DONE);
830 }
831 
832 /**
833  *  kbasep_js_atom_retained_state_is_valid - Determine whether a struct
834  *                                           kbasep_js_atom_retained_state
835  *                                           is valid
836  * @katom_retained_state:        the atom's retained state to check
837  *
838  * An invalid struct kbasep_js_atom_retained_state is allowed, and indicates
839  * that the code should just ignore it.
840  *
841  * Return: false if the retained state is invalid, true otherwise.
842  */
kbasep_js_atom_retained_state_is_valid(const struct kbasep_js_atom_retained_state * katom_retained_state)843 static inline bool kbasep_js_atom_retained_state_is_valid(
844 	const struct kbasep_js_atom_retained_state *katom_retained_state)
845 {
846 	return (bool) (katom_retained_state->core_req !=
847 			KBASEP_JS_ATOM_RETAINED_STATE_CORE_REQ_INVALID);
848 }
849 
850 /**
851  * kbase_js_runpool_inc_context_count - Increment number of running contexts.
852  * @kbdev: KBase device
853  * @kctx:  KBase context
854  *
855  * The following locking conditions are made on the caller:
856  * * The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
857  * * The caller must hold the kbasep_js_device_data::runpool_mutex
858  */
kbase_js_runpool_inc_context_count(struct kbase_device * kbdev,struct kbase_context * kctx)859 static inline void kbase_js_runpool_inc_context_count(
860 						struct kbase_device *kbdev,
861 						struct kbase_context *kctx)
862 {
863 	struct kbasep_js_device_data *js_devdata;
864 	struct kbasep_js_kctx_info *js_kctx_info;
865 
866 	KBASE_DEBUG_ASSERT(kbdev != NULL);
867 	KBASE_DEBUG_ASSERT(kctx != NULL);
868 
869 	js_devdata = &kbdev->js_data;
870 	js_kctx_info = &kctx->jctx.sched_info;
871 
872 	lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
873 	lockdep_assert_held(&js_devdata->runpool_mutex);
874 
875 	/* Track total contexts */
876 	KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running < S8_MAX);
877 	++(js_devdata->nr_all_contexts_running);
878 
879 	if (!kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
880 		/* Track contexts that can submit jobs */
881 		KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running <
882 									S8_MAX);
883 		++(js_devdata->nr_user_contexts_running);
884 	}
885 }
886 
887 /**
888  * kbase_js_runpool_dec_context_count - decrement number of running contexts.
889  *
890  * @kbdev: KBase device
891  * @kctx:  KBase context
892  * The following locking conditions are made on the caller:
893  * * The caller must hold the kbasep_js_kctx_info::ctx::jsctx_mutex.
894  * * The caller must hold the kbasep_js_device_data::runpool_mutex
895  */
kbase_js_runpool_dec_context_count(struct kbase_device * kbdev,struct kbase_context * kctx)896 static inline void kbase_js_runpool_dec_context_count(
897 						struct kbase_device *kbdev,
898 						struct kbase_context *kctx)
899 {
900 	struct kbasep_js_device_data *js_devdata;
901 	struct kbasep_js_kctx_info *js_kctx_info;
902 
903 	KBASE_DEBUG_ASSERT(kbdev != NULL);
904 	KBASE_DEBUG_ASSERT(kctx != NULL);
905 
906 	js_devdata = &kbdev->js_data;
907 	js_kctx_info = &kctx->jctx.sched_info;
908 
909 	lockdep_assert_held(&js_kctx_info->ctx.jsctx_mutex);
910 	lockdep_assert_held(&js_devdata->runpool_mutex);
911 
912 	/* Track total contexts */
913 	--(js_devdata->nr_all_contexts_running);
914 	KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running >= 0);
915 
916 	if (!kbase_ctx_flag(kctx, KCTX_SUBMIT_DISABLED)) {
917 		/* Track contexts that can submit jobs */
918 		--(js_devdata->nr_user_contexts_running);
919 		KBASE_DEBUG_ASSERT(js_devdata->nr_user_contexts_running >= 0);
920 	}
921 }
922 
923 /**
924  * kbase_js_sched_all - Submit atoms from all available contexts to all
925  *                      job slots.
926  *
927  * @kbdev:    Device pointer
928  *
929  * This will attempt to submit as many jobs as possible. It will exit when
930  * either all job slots are full, or all contexts have been used.
931  */
kbase_js_sched_all(struct kbase_device * kbdev)932 static inline void kbase_js_sched_all(struct kbase_device *kbdev)
933 {
934 	kbase_js_sched(kbdev, (1 << kbdev->gpu_props.num_job_slots) - 1);
935 }
936 
937 extern const int
938 kbasep_js_atom_priority_to_relative[BASE_JD_NR_PRIO_LEVELS];
939 
940 extern const base_jd_prio
941 kbasep_js_relative_priority_to_atom[KBASE_JS_ATOM_SCHED_PRIO_COUNT];
942 
943 /**
944  * kbasep_js_atom_prio_to_sched_prio(): - Convert atom priority (base_jd_prio)
945  *                                        to relative ordering
946  * @atom_prio: Priority ID to translate.
947  *
948  * Atom priority values for @ref base_jd_prio cannot be compared directly to
949  * find out which are higher or lower.
950  *
951  * This function will convert base_jd_prio values for successively lower
952  * priorities into a monotonically increasing sequence. That is, the lower the
953  * base_jd_prio priority, the higher the value produced by this function. This
954  * is in accordance with how the rest of the kernel treats priority.
955  *
956  * The mapping is 1:1 and the size of the valid input range is the same as the
957  * size of the valid output range, i.e.
958  * KBASE_JS_ATOM_SCHED_PRIO_COUNT == BASE_JD_NR_PRIO_LEVELS
959  *
960  * Note This must be kept in sync with BASE_JD_PRIO_<...> definitions
961  *
962  * Return: On success: a value in the inclusive range
963  *         0..KBASE_JS_ATOM_SCHED_PRIO_COUNT-1. On failure:
964  *         KBASE_JS_ATOM_SCHED_PRIO_INVALID
965  */
kbasep_js_atom_prio_to_sched_prio(base_jd_prio atom_prio)966 static inline int kbasep_js_atom_prio_to_sched_prio(base_jd_prio atom_prio)
967 {
968 	if (atom_prio >= BASE_JD_NR_PRIO_LEVELS)
969 		return KBASE_JS_ATOM_SCHED_PRIO_INVALID;
970 
971 	return kbasep_js_atom_priority_to_relative[atom_prio];
972 }
973 
kbasep_js_sched_prio_to_atom_prio(int sched_prio)974 static inline base_jd_prio kbasep_js_sched_prio_to_atom_prio(int sched_prio)
975 {
976 	unsigned int prio_idx;
977 
978 	KBASE_DEBUG_ASSERT(sched_prio >= 0 &&
979 			sched_prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT);
980 
981 	prio_idx = (unsigned int)sched_prio;
982 
983 	return kbasep_js_relative_priority_to_atom[prio_idx];
984 }
985 
986 /**
987  * kbase_js_priority_check - Check the priority requested
988  *
989  * @kbdev:    Device pointer
990  * @priority: Requested priority
991  *
992  * This will determine whether the requested priority can be satisfied.
993  *
994  * Return: The same or lower priority than requested.
995  */
996 base_jd_prio kbase_js_priority_check(struct kbase_device *kbdev, base_jd_prio priority);
997 
998 /**
999  * kbase_js_atom_runs_before - determine if atoms for the same slot have an
1000  *                             ordering relation
1001  * @kbdev: kbase device
1002  * @katom_a: the first atom
1003  * @katom_b: the second atom.
1004  * @order_flags: combination of KBASE_ATOM_ORDERING_FLAG_<...> for the ordering
1005  *               relation
1006  *
1007  * This is for making consistent decisions about the ordering of atoms when we
1008  * need to do pre-emption on a slot, which includes stopping existing atoms
1009  * when a new atom is ready to run, and also which other atoms to remove from
1010  * the slot when the atom in JSn_HEAD is being pre-empted.
1011  *
1012  * This only handles @katom_a and @katom_b being for the same job slot, as
1013  * pre-emption only operates within a slot.
1014  *
1015  * Note: there is currently no use-case for this as a sorting comparison
1016  * functions, hence only a boolean returned instead of int -1, 0, +1 return. If
1017  * required in future, a modification to do so would be better than calling
1018  * twice with katom_a and katom_b swapped.
1019  *
1020  * Return:
1021  * true if @katom_a should run before @katom_b, false otherwise.
1022  * A false return value does not distinguish between "no ordering relation" and
1023  * "@katom_a should run after @katom_b".
1024  */
1025 bool kbase_js_atom_runs_before(struct kbase_device *kbdev,
1026 			       const struct kbase_jd_atom *katom_a,
1027 			       const struct kbase_jd_atom *katom_b,
1028 			       const kbase_atom_ordering_flag_t order_flags);
1029 
1030 #endif	/* _KBASE_JM_JS_H_ */
1031