1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3 *
4 * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #ifndef _KBASE_CSF_SCHEDULER_H_
23 #define _KBASE_CSF_SCHEDULER_H_
24
25 #include "mali_kbase_csf.h"
26 #include "mali_kbase_csf_event.h"
27
28 /**
29 * kbase_csf_scheduler_queue_start() - Enable the running of GPU command queue
30 * on firmware.
31 *
32 * @queue: Pointer to the GPU command queue to be started.
33 *
34 * This function would enable the start of a CSI, within a
35 * CSG, to which the @queue was bound.
36 * If the CSG is already scheduled and resident, the CSI will be started
37 * right away, otherwise once the group is made resident.
38 *
39 * Return: 0 on success, or negative on failure.
40 */
41 int kbase_csf_scheduler_queue_start(struct kbase_queue *queue);
42
43 /**
44 * kbase_csf_scheduler_queue_stop() - Disable the running of GPU command queue
45 * on firmware.
46 *
47 * @queue: Pointer to the GPU command queue to be stopped.
48 *
49 * This function would stop the CSI, within a CSG, to which @queue was bound.
50 *
51 * Return: 0 on success, or negative on failure.
52 */
53 int kbase_csf_scheduler_queue_stop(struct kbase_queue *queue);
54
55 /**
56 * kbase_csf_scheduler_group_protm_enter - Handle the protm enter event for the
57 * GPU command queue group.
58 *
59 * @group: The command queue group.
60 *
61 * This function could request the firmware to enter the protected mode
62 * and allow the execution of protected region instructions for all the
63 * bound queues of the group that have protm pending bit set in their
64 * respective CS_ACK register.
65 */
66 void kbase_csf_scheduler_group_protm_enter(struct kbase_queue_group *group);
67
68 /**
69 * kbase_csf_scheduler_group_get_slot() - Checks if a queue group is
70 * programmed on a firmware CSG slot
71 * and returns the slot number.
72 *
73 * @group: The command queue group.
74 *
75 * Return: The slot number, if the group is programmed on a slot.
76 * Otherwise returns a negative number.
77 *
78 * Note: This function should not be used if the interrupt_lock is held. Use
79 * kbase_csf_scheduler_group_get_slot_locked() instead.
80 */
81 int kbase_csf_scheduler_group_get_slot(struct kbase_queue_group *group);
82
83 /**
84 * kbase_csf_scheduler_group_get_slot_locked() - Checks if a queue group is
85 * programmed on a firmware CSG slot
86 * and returns the slot number.
87 *
88 * @group: The command queue group.
89 *
90 * Return: The slot number, if the group is programmed on a slot.
91 * Otherwise returns a negative number.
92 *
93 * Note: Caller must hold the interrupt_lock.
94 */
95 int kbase_csf_scheduler_group_get_slot_locked(struct kbase_queue_group *group);
96
97 /**
98 * kbase_csf_scheduler_group_events_enabled() - Checks if interrupt events
99 * should be handled for a queue group.
100 *
101 * @kbdev: The device of the group.
102 * @group: The queue group.
103 *
104 * Return: true if interrupt events should be handled.
105 *
106 * Note: Caller must hold the interrupt_lock.
107 */
108 bool kbase_csf_scheduler_group_events_enabled(struct kbase_device *kbdev,
109 struct kbase_queue_group *group);
110
111 /**
112 * kbase_csf_scheduler_get_group_on_slot()- Gets the queue group that has been
113 * programmed to a firmware CSG slot.
114 *
115 * @kbdev: The GPU device.
116 * @slot: The slot for which to get the queue group.
117 *
118 * Return: Pointer to the programmed queue group.
119 *
120 * Note: Caller must hold the interrupt_lock.
121 */
122 struct kbase_queue_group *kbase_csf_scheduler_get_group_on_slot(
123 struct kbase_device *kbdev, int slot);
124
125 /**
126 * kbase_csf_scheduler_group_deschedule() - Deschedule a GPU command queue
127 * group from the firmware.
128 *
129 * @group: Pointer to the queue group to be descheduled.
130 *
131 * This function would disable the scheduling of GPU command queue group on
132 * firmware.
133 */
134 void kbase_csf_scheduler_group_deschedule(struct kbase_queue_group *group);
135
136 /**
137 * kbase_csf_scheduler_evict_ctx_slots() - Evict all GPU command queue groups
138 * of a given context that are active
139 * running from the firmware.
140 *
141 * @kbdev: The GPU device.
142 * @kctx: Kbase context for the evict operation.
143 * @evicted_groups: List_head for returning evicted active queue groups.
144 *
145 * This function would disable the scheduling of GPU command queue groups active
146 * on firmware slots from the given Kbase context. The affected groups are
147 * added to the supplied list_head argument.
148 */
149 void kbase_csf_scheduler_evict_ctx_slots(struct kbase_device *kbdev,
150 struct kbase_context *kctx, struct list_head *evicted_groups);
151
152 /**
153 * kbase_csf_scheduler_context_init() - Initialize the context-specific part
154 * for CSF scheduler.
155 *
156 * @kctx: Pointer to kbase context that is being created.
157 *
158 * This function must be called during Kbase context creation.
159 *
160 * Return: 0 on success, or negative on failure.
161 */
162 int kbase_csf_scheduler_context_init(struct kbase_context *kctx);
163
164 /**
165 * kbase_csf_scheduler_init - Initialize the CSF scheduler
166 *
167 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
168 *
169 * The scheduler does the arbitration for the CSG slots
170 * provided by the firmware between the GPU command queue groups created
171 * by the Clients.
172 * This function must be called after loading firmware and parsing its capabilities.
173 *
174 * Return: 0 on success, or negative on failure.
175 */
176 int kbase_csf_scheduler_init(struct kbase_device *kbdev);
177
178 /**
179 * kbase_csf_scheduler_early_init - Early initialization for the CSF scheduler
180 *
181 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
182 *
183 * Initialize necessary resources such as locks, workqueue for CSF scheduler.
184 * This must be called at kbase probe.
185 *
186 * Return: 0 on success, or negative on failure.
187 */
188 int kbase_csf_scheduler_early_init(struct kbase_device *kbdev);
189
190 /**
191 * kbase_csf_scheduler_context_term() - Terminate the context-specific part
192 * for CSF scheduler.
193 *
194 * @kctx: Pointer to kbase context that is being terminated.
195 *
196 * This function must be called during Kbase context termination.
197 */
198 void kbase_csf_scheduler_context_term(struct kbase_context *kctx);
199
200 /**
201 * kbase_csf_scheduler_term - Terminate the CSF scheduler.
202 *
203 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
204 *
205 * This should be called when unload of firmware is done on device
206 * termination.
207 */
208 void kbase_csf_scheduler_term(struct kbase_device *kbdev);
209
210 /**
211 * kbase_csf_scheduler_early_term - Early termination of the CSF scheduler.
212 *
213 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
214 *
215 * This should be called only when kbase probe fails or gets rmmoded.
216 */
217 void kbase_csf_scheduler_early_term(struct kbase_device *kbdev);
218
219 /**
220 * kbase_csf_scheduler_reset - Reset the state of all active GPU command
221 * queue groups.
222 *
223 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
224 *
225 * This function will first iterate through all the active/scheduled GPU
226 * command queue groups and suspend them (to avoid losing work for groups
227 * that are not stuck). The groups that could not get suspended would be
228 * descheduled and marked as terminated (which will then lead to unbinding
229 * of all the queues bound to them) and also no more work would be allowed
230 * to execute for them.
231 *
232 * This is similar to the action taken in response to an unexpected OoM event.
233 * No explicit re-initialization is done for CSG & CS interface I/O pages;
234 * instead, that happens implicitly on firmware reload.
235 *
236 * Should be called only after initiating the GPU reset.
237 */
238 void kbase_csf_scheduler_reset(struct kbase_device *kbdev);
239
240 /**
241 * kbase_csf_scheduler_enable_tick_timer - Enable the scheduler tick timer.
242 *
243 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
244 *
245 * This function will restart the scheduler tick so that regular scheduling can
246 * be resumed without any explicit trigger (like kicking of GPU queues).
247 */
248 void kbase_csf_scheduler_enable_tick_timer(struct kbase_device *kbdev);
249
250 /**
251 * kbase_csf_scheduler_group_copy_suspend_buf - Suspend a queue
252 * group and copy suspend buffer.
253 *
254 * @group: Pointer to the queue group to be suspended.
255 * @sus_buf: Pointer to the structure which contains details of the
256 * user buffer and its kernel pinned pages to which we need to copy
257 * the group suspend buffer.
258 *
259 * This function is called to suspend a queue group and copy the suspend_buffer
260 * contents to the input buffer provided.
261 *
262 * Return: 0 on success, or negative on failure.
263 */
264 int kbase_csf_scheduler_group_copy_suspend_buf(struct kbase_queue_group *group,
265 struct kbase_suspend_copy_buffer *sus_buf);
266
267 /**
268 * kbase_csf_scheduler_lock - Acquire the global Scheduler lock.
269 *
270 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
271 *
272 * This function will take the global scheduler lock, in order to serialize
273 * against the Scheduler actions, for access to CS IO pages.
274 */
kbase_csf_scheduler_lock(struct kbase_device * kbdev)275 static inline void kbase_csf_scheduler_lock(struct kbase_device *kbdev)
276 {
277 mutex_lock(&kbdev->csf.scheduler.lock);
278 }
279
280 /**
281 * kbase_csf_scheduler_unlock - Release the global Scheduler lock.
282 *
283 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
284 */
kbase_csf_scheduler_unlock(struct kbase_device * kbdev)285 static inline void kbase_csf_scheduler_unlock(struct kbase_device *kbdev)
286 {
287 mutex_unlock(&kbdev->csf.scheduler.lock);
288 }
289
290 /**
291 * kbase_csf_scheduler_spin_lock - Acquire Scheduler interrupt spinlock.
292 *
293 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
294 * @flags: Pointer to the memory location that would store the previous
295 * interrupt state.
296 *
297 * This function will take the global scheduler lock, in order to serialize
298 * against the Scheduler actions, for access to CS IO pages.
299 */
kbase_csf_scheduler_spin_lock(struct kbase_device * kbdev,unsigned long * flags)300 static inline void kbase_csf_scheduler_spin_lock(struct kbase_device *kbdev,
301 unsigned long *flags)
302 {
303 spin_lock_irqsave(&kbdev->csf.scheduler.interrupt_lock, *flags);
304 }
305
306 /**
307 * kbase_csf_scheduler_spin_unlock - Release Scheduler interrupt spinlock.
308 *
309 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
310 * @flags: Previously stored interrupt state when Scheduler interrupt
311 * spinlock was acquired.
312 */
kbase_csf_scheduler_spin_unlock(struct kbase_device * kbdev,unsigned long flags)313 static inline void kbase_csf_scheduler_spin_unlock(struct kbase_device *kbdev,
314 unsigned long flags)
315 {
316 spin_unlock_irqrestore(&kbdev->csf.scheduler.interrupt_lock, flags);
317 }
318
319 /**
320 * kbase_csf_scheduler_spin_lock_assert_held - Assert if the Scheduler
321 * interrupt spinlock is held.
322 *
323 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
324 */
325 static inline void
kbase_csf_scheduler_spin_lock_assert_held(struct kbase_device * kbdev)326 kbase_csf_scheduler_spin_lock_assert_held(struct kbase_device *kbdev)
327 {
328 lockdep_assert_held(&kbdev->csf.scheduler.interrupt_lock);
329 }
330
331 /**
332 * kbase_csf_scheduler_timer_is_enabled() - Check if the scheduler wakes up
333 * automatically for periodic tasks.
334 *
335 * @kbdev: Pointer to the device
336 *
337 * Return: true if the scheduler is configured to wake up periodically
338 */
339 bool kbase_csf_scheduler_timer_is_enabled(struct kbase_device *kbdev);
340
341 /**
342 * kbase_csf_scheduler_timer_set_enabled() - Enable/disable periodic
343 * scheduler tasks.
344 *
345 * @kbdev: Pointer to the device
346 * @enable: Whether to enable periodic scheduler tasks
347 */
348 void kbase_csf_scheduler_timer_set_enabled(struct kbase_device *kbdev,
349 bool enable);
350
351 /**
352 * kbase_csf_scheduler_kick - Perform pending scheduling tasks once.
353 *
354 * Note: This function is only effective if the scheduling timer is disabled.
355 *
356 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
357 */
358 void kbase_csf_scheduler_kick(struct kbase_device *kbdev);
359
360 /**
361 * kbase_csf_scheduler_protected_mode_in_use() - Check if the scheduler is
362 * running with protected mode tasks.
363 *
364 * @kbdev: Pointer to the device
365 *
366 * Return: true if the scheduler is running with protected mode tasks
367 */
kbase_csf_scheduler_protected_mode_in_use(struct kbase_device * kbdev)368 static inline bool kbase_csf_scheduler_protected_mode_in_use(
369 struct kbase_device *kbdev)
370 {
371 return (kbdev->csf.scheduler.active_protm_grp != NULL);
372 }
373
374 /**
375 * kbase_csf_scheduler_pm_active - Perform scheduler power active operation
376 *
377 * Note: This function will increase the scheduler's internal pm_active_count
378 * value, ensuring that both GPU and MCU are powered for access. The MCU may
379 * not have actually become active when this function returns, so need to
380 * call kbase_csf_scheduler_wait_mcu_active() for that.
381 *
382 * This function should not be called with global scheduler lock held.
383 *
384 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
385 */
386 void kbase_csf_scheduler_pm_active(struct kbase_device *kbdev);
387
388 /**
389 * kbase_csf_scheduler_pm_idle - Perform the scheduler power idle operation
390 *
391 * Note: This function will decrease the scheduler's internal pm_active_count
392 * value. On reaching 0, the MCU and GPU could be powered off. This function
393 * should not be called with global scheduler lock held.
394 *
395 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
396 */
397 void kbase_csf_scheduler_pm_idle(struct kbase_device *kbdev);
398
399 /**
400 * kbase_csf_scheduler_wait_mcu_active - Wait for the MCU to actually become active
401 *
402 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
403 *
404 * This function will wait for the MCU to actually become active. It is supposed
405 * to be called after calling kbase_csf_scheduler_pm_active(). It is needed as
406 * kbase_csf_scheduler_pm_active() may not make the MCU active right away.
407 *
408 * Return: 0 if the MCU was successfully activated otherwise an error code.
409 */
410 int kbase_csf_scheduler_wait_mcu_active(struct kbase_device *kbdev);
411
412 /**
413 * kbase_csf_scheduler_pm_resume - Reactivate the scheduler on system resume
414 *
415 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
416 *
417 * This function will make the scheduler resume the scheduling of queue groups
418 * and take the power managemenet reference, if there are any runnable groups.
419 */
420 void kbase_csf_scheduler_pm_resume(struct kbase_device *kbdev);
421
422 /**
423 * kbase_csf_scheduler_pm_suspend - Idle the scheduler on system suspend
424 *
425 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
426 *
427 * This function will make the scheduler suspend all the running queue groups
428 * and drop its power managemenet reference.
429 *
430 * Return: 0 on success.
431 */
432 int kbase_csf_scheduler_pm_suspend(struct kbase_device *kbdev);
433
434 /**
435 * kbase_csf_scheduler_all_csgs_idle() - Check if the scheduler internal
436 * runtime used slots are all tagged as idle command queue groups.
437 *
438 * @kbdev: Pointer to the device
439 *
440 * Return: true if all the used slots are tagged as idle CSGs.
441 */
kbase_csf_scheduler_all_csgs_idle(struct kbase_device * kbdev)442 static inline bool kbase_csf_scheduler_all_csgs_idle(struct kbase_device *kbdev)
443 {
444 lockdep_assert_held(&kbdev->csf.scheduler.interrupt_lock);
445 return bitmap_equal(kbdev->csf.scheduler.csg_slots_idle_mask,
446 kbdev->csf.scheduler.csg_inuse_bitmap,
447 kbdev->csf.global_iface.group_num);
448 }
449
450 /**
451 * kbase_csf_scheduler_advance_tick_nolock() - Advance the scheduling tick
452 *
453 * @kbdev: Pointer to the device
454 *
455 * This function advances the scheduling tick by enqueing the tick work item for
456 * immediate execution, but only if the tick hrtimer is active. If the timer
457 * is inactive then the tick work item is already in flight.
458 * The caller must hold the interrupt lock.
459 */
460 static inline void
kbase_csf_scheduler_advance_tick_nolock(struct kbase_device * kbdev)461 kbase_csf_scheduler_advance_tick_nolock(struct kbase_device *kbdev)
462 {
463 struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
464
465 lockdep_assert_held(&scheduler->interrupt_lock);
466
467 if (scheduler->tick_timer_active) {
468 KBASE_KTRACE_ADD(kbdev, SCHEDULER_ADVANCE_TICK, NULL, 0u);
469 scheduler->tick_timer_active = false;
470 queue_work(scheduler->wq, &scheduler->tick_work);
471 } else {
472 KBASE_KTRACE_ADD(kbdev, SCHEDULER_NOADVANCE_TICK, NULL, 0u);
473 }
474 }
475
476 /**
477 * kbase_csf_scheduler_advance_tick() - Advance the scheduling tick
478 *
479 * @kbdev: Pointer to the device
480 *
481 * This function advances the scheduling tick by enqueing the tick work item for
482 * immediate execution, but only if the tick hrtimer is active. If the timer
483 * is inactive then the tick work item is already in flight.
484 */
kbase_csf_scheduler_advance_tick(struct kbase_device * kbdev)485 static inline void kbase_csf_scheduler_advance_tick(struct kbase_device *kbdev)
486 {
487 struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
488 unsigned long flags;
489
490 spin_lock_irqsave(&scheduler->interrupt_lock, flags);
491 kbase_csf_scheduler_advance_tick_nolock(kbdev);
492 spin_unlock_irqrestore(&scheduler->interrupt_lock, flags);
493 }
494
495 /**
496 * kbase_csf_scheduler_invoke_tick() - Invoke the scheduling tick
497 *
498 * @kbdev: Pointer to the device
499 *
500 * This function will queue the scheduling tick work item for immediate
501 * execution if tick timer is not active. This can be called from interrupt
502 * context to resume the scheduling after GPU was put to sleep.
503 */
kbase_csf_scheduler_invoke_tick(struct kbase_device * kbdev)504 static inline void kbase_csf_scheduler_invoke_tick(struct kbase_device *kbdev)
505 {
506 struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
507 unsigned long flags;
508
509 spin_lock_irqsave(&scheduler->interrupt_lock, flags);
510 if (!scheduler->tick_timer_active)
511 queue_work(scheduler->wq, &scheduler->tick_work);
512 spin_unlock_irqrestore(&scheduler->interrupt_lock, flags);
513 }
514
515 /**
516 * kbase_csf_scheduler_queue_has_trace() - report whether the queue has been
517 * configured to operate with the
518 * cs_trace feature.
519 *
520 * @queue: Pointer to the queue.
521 *
522 * Return: True if the gpu queue is configured to operate with the cs_trace
523 * feature, otherwise false.
524 */
kbase_csf_scheduler_queue_has_trace(struct kbase_queue * queue)525 static inline bool kbase_csf_scheduler_queue_has_trace(struct kbase_queue *queue)
526 {
527 lockdep_assert_held(&queue->kctx->kbdev->csf.scheduler.lock);
528 /* In the current arrangement, it is possible for the context to enable
529 * the cs_trace after some queues have been registered with cs_trace in
530 * disabled state. So each queue has its own enabled/disabled condition.
531 */
532 return (queue->trace_buffer_size && queue->trace_buffer_base);
533 }
534
535 #ifdef KBASE_PM_RUNTIME
536 /**
537 * kbase_csf_scheduler_reval_idleness_post_sleep() - Check GPU's idleness after
538 * putting MCU to sleep state
539 *
540 * @kbdev: Pointer to the device
541 *
542 * This function re-evaluates the idleness of on-slot queue groups after MCU
543 * was put to the sleep state and invokes the scheduling tick if any of the
544 * on-slot queue group became non-idle.
545 * CSG_OUTPUT_BLOCK.CSG_STATUS_STATE.IDLE bit is checked to determine the
546 * idleness which is updated by MCU firmware on handling of the sleep request.
547 *
548 * This function is needed to detect if more work was flushed in the window
549 * between the GPU idle notification and the enabling of Doorbell mirror
550 * interrupt (from MCU state machine). Once Doorbell mirror interrupt is
551 * enabled, Host can receive the notification on User doorbell rings.
552 */
553 void kbase_csf_scheduler_reval_idleness_post_sleep(struct kbase_device *kbdev);
554
555 /**
556 * kbase_csf_scheduler_handle_runtime_suspend() - Handle runtime suspend by
557 * suspending CSGs.
558 *
559 * @kbdev: Pointer to the device
560 *
561 * This function is called from the runtime suspend callback function for
562 * suspending all the on-slot queue groups. If any of the group is found to
563 * be non-idle after the completion of CSG suspend operation or the CSG
564 * suspend operation times out, then the scheduling tick is invoked and an
565 * error is returned so that the GPU power down can be aborted.
566 *
567 * Return: 0 if all the CSGs were suspended, otherwise an error code.
568 */
569 int kbase_csf_scheduler_handle_runtime_suspend(struct kbase_device *kbdev);
570 #endif
571
572 /**
573 * kbase_csf_scheduler_get_nr_active_csgs() - Get the number of active CSGs
574 *
575 * @kbdev: Pointer to the device
576 *
577 * This function calculates the number of CSG slots that have a queue group
578 * resident on them.
579 *
580 * Note: This function should not be used if the interrupt_lock is held. Use
581 * kbase_csf_scheduler_get_nr_active_csgs_locked() instead.
582 *
583 * Return: number of active CSGs.
584 */
585 u32 kbase_csf_scheduler_get_nr_active_csgs(struct kbase_device *kbdev);
586
587 /**
588 * kbase_csf_scheduler_get_nr_active_csgs_locked() - Get the number of active
589 * CSGs
590 *
591 * @kbdev: Pointer to the device
592 *
593 * This function calculates the number of CSG slots that have a queue group
594 * resident on them.
595 *
596 * Note: This function should be called with interrupt_lock held.
597 *
598 * Return: number of active CSGs.
599 */
600 u32 kbase_csf_scheduler_get_nr_active_csgs_locked(struct kbase_device *kbdev);
601
602 /**
603 * kbase_csf_scheduler_force_wakeup() - Forcefully resume the scheduling of CSGs
604 *
605 * @kbdev: Pointer to the device
606 *
607 * This function is called to forcefully resume the scheduling of CSGs, even
608 * when there wasn't any work submitted for them.
609 * This function is only used for testing purpose.
610 */
611 void kbase_csf_scheduler_force_wakeup(struct kbase_device *kbdev);
612
613 #ifdef KBASE_PM_RUNTIME
614 /**
615 * kbase_csf_scheduler_force_sleep() - Forcefully put the Scheduler to sleeping
616 * state.
617 *
618 * @kbdev: Pointer to the device
619 *
620 * This function is called to forcefully put the Scheduler to sleeping state
621 * and trigger the sleep of MCU. If the CSGs are not idle, then the Scheduler
622 * would get reactivated again immediately.
623 * This function is only used for testing purpose.
624 */
625 void kbase_csf_scheduler_force_sleep(struct kbase_device *kbdev);
626 #endif
627
628 #endif /* _KBASE_CSF_SCHEDULER_H_ */
629