1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3 *
4 * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 /*
23 * Power management API definitions used internally by GPU backend
24 */
25
26 #ifndef _KBASE_BACKEND_PM_INTERNAL_H_
27 #define _KBASE_BACKEND_PM_INTERNAL_H_
28
29 #include <mali_kbase_hwaccess_pm.h>
30
31 #include "backend/gpu/mali_kbase_pm_ca.h"
32 #include "mali_kbase_pm_policy.h"
33
34
35 /**
36 * kbase_pm_dev_idle - The GPU is idle.
37 *
38 * @kbdev: The kbase device structure for the device (must be a valid pointer)
39 *
40 * The OS may choose to turn off idle devices
41 */
42 void kbase_pm_dev_idle(struct kbase_device *kbdev);
43
44 /**
45 * kbase_pm_dev_activate - The GPU is active.
46 *
47 * @kbdev: The kbase device structure for the device (must be a valid pointer)
48 *
49 * The OS should avoid opportunistically turning off the GPU while it is active
50 */
51 void kbase_pm_dev_activate(struct kbase_device *kbdev);
52
53 /**
54 * kbase_pm_get_present_cores - Get details of the cores that are present in
55 * the device.
56 *
57 * @kbdev: The kbase device structure for the device (must be a valid
58 * pointer)
59 * @type: The type of core (see the enum kbase_pm_core_type enumeration)
60 *
61 * This function can be called by the active power policy to return a bitmask of
62 * the cores (of a specified type) present in the GPU device and also a count of
63 * the number of cores.
64 *
65 * Return: The bit mask of cores present
66 */
67 u64 kbase_pm_get_present_cores(struct kbase_device *kbdev,
68 enum kbase_pm_core_type type);
69
70 /**
71 * kbase_pm_get_active_cores - Get details of the cores that are currently
72 * active in the device.
73 *
74 * @kbdev: The kbase device structure for the device (must be a valid pointer)
75 * @type: The type of core (see the enum kbase_pm_core_type enumeration)
76 *
77 * This function can be called by the active power policy to return a bitmask of
78 * the cores (of a specified type) that are actively processing work (i.e.
79 * turned on *and* busy).
80 *
81 * Return: The bit mask of active cores
82 */
83 u64 kbase_pm_get_active_cores(struct kbase_device *kbdev,
84 enum kbase_pm_core_type type);
85
86 /**
87 * kbase_pm_get_trans_cores - Get details of the cores that are currently
88 * transitioning between power states.
89 *
90 * @kbdev: The kbase device structure for the device (must be a valid pointer)
91 * @type: The type of core (see the enum kbase_pm_core_type enumeration)
92 *
93 * This function can be called by the active power policy to return a bitmask of
94 * the cores (of a specified type) that are currently transitioning between
95 * power states.
96 *
97 * Return: The bit mask of transitioning cores
98 */
99 u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev,
100 enum kbase_pm_core_type type);
101
102 /**
103 * kbase_pm_get_ready_cores - Get details of the cores that are currently
104 * powered and ready for jobs.
105 *
106 * @kbdev: The kbase device structure for the device (must be a valid pointer)
107 * @type: The type of core (see the enum kbase_pm_core_type enumeration)
108 *
109 * This function can be called by the active power policy to return a bitmask of
110 * the cores (of a specified type) that are powered and ready for jobs (they may
111 * or may not be currently executing jobs).
112 *
113 * Return: The bit mask of ready cores
114 */
115 u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev,
116 enum kbase_pm_core_type type);
117
118 /**
119 * kbase_pm_clock_on - Turn the clock for the device on, and enable device
120 * interrupts.
121 *
122 * @kbdev: The kbase device structure for the device (must be a valid
123 * pointer)
124 * @is_resume: true if clock on due to resume after suspend, false otherwise
125 *
126 * This function can be used by a power policy to turn the clock for the GPU on.
127 * It should be modified during integration to perform the necessary actions to
128 * ensure that the GPU is fully powered and clocked.
129 */
130 void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume);
131
132 /**
133 * kbase_pm_clock_off - Disable device interrupts, and turn the clock for the
134 * device off.
135 *
136 * @kbdev: The kbase device structure for the device (must be a valid
137 * pointer)
138 *
139 * This function can be used by a power policy to turn the clock for the GPU
140 * off. It should be modified during integration to perform the necessary
141 * actions to turn the clock off (if this is possible in the integration).
142 *
143 * If runtime PM is enabled and @power_runtime_gpu_idle_callback is used
144 * then this function would usually be invoked from the runtime suspend
145 * callback function.
146 *
147 * Return: true if clock was turned off, or
148 * false if clock can not be turned off due to pending page/bus fault
149 * workers. Caller must flush MMU workqueues and retry
150 */
151 bool kbase_pm_clock_off(struct kbase_device *kbdev);
152
153 /**
154 * kbase_pm_enable_interrupts - Enable interrupts on the device.
155 *
156 * @kbdev: The kbase device structure for the device (must be a valid pointer)
157 *
158 * Interrupts are also enabled after a call to kbase_pm_clock_on().
159 */
160 void kbase_pm_enable_interrupts(struct kbase_device *kbdev);
161
162 /**
163 * kbase_pm_disable_interrupts - Disable interrupts on the device.
164 *
165 * @kbdev: The kbase device structure for the device (must be a valid pointer)
166 *
167 * This prevents delivery of Power Management interrupts to the CPU so that
168 * kbase_pm_update_state() will not be called from the IRQ handler
169 * until kbase_pm_enable_interrupts() or kbase_pm_clock_on() is called.
170 *
171 * Interrupts are also disabled after a call to kbase_pm_clock_off().
172 */
173 void kbase_pm_disable_interrupts(struct kbase_device *kbdev);
174
175 /**
176 * kbase_pm_disable_interrupts_nolock - Version of kbase_pm_disable_interrupts()
177 * that does not take the hwaccess_lock
178 *
179 * @kbdev: The kbase device structure for the device (must be a valid pointer)
180 *
181 * Caller must hold the hwaccess_lock.
182 */
183 void kbase_pm_disable_interrupts_nolock(struct kbase_device *kbdev);
184
185 /**
186 * kbase_pm_init_hw - Initialize the hardware.
187 * @kbdev: The kbase device structure for the device (must be a valid pointer)
188 * @flags: Flags specifying the type of PM init
189 *
190 * This function checks the GPU ID register to ensure that the GPU is supported
191 * by the driver and performs a reset on the device so that it is in a known
192 * state before the device is used.
193 *
194 * Return: 0 if the device is supported and successfully reset.
195 */
196 int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags);
197
198 /**
199 * kbase_pm_reset_done - The GPU has been reset successfully.
200 * @kbdev: The kbase device structure for the device (must be a valid pointer)
201 *
202 * This function must be called by the GPU interrupt handler when the
203 * RESET_COMPLETED bit is set. It signals to the power management initialization
204 * code that the GPU has been successfully reset.
205 */
206 void kbase_pm_reset_done(struct kbase_device *kbdev);
207
208 #if MALI_USE_CSF
209 /**
210 * kbase_pm_wait_for_desired_state - Wait for the desired power state to be
211 * reached
212 * @kbdev: The kbase device structure for the device (must be a valid pointer)
213 *
214 * Wait for the L2 and MCU state machines to reach the states corresponding
215 * to the values of 'kbase_pm_is_l2_desired' and 'kbase_pm_is_mcu_desired'.
216 *
217 * The usual use-case for this is to ensure that all parts of GPU have been
218 * powered up after performing a GPU Reset.
219 *
220 * Unlike kbase_pm_update_state(), the caller must not hold hwaccess_lock,
221 * because this function will take that lock itself.
222 *
223 * NOTE: This may not wait until the correct state is reached if there is a
224 * power off in progress and kbase_pm_context_active() was called instead of
225 * kbase_csf_scheduler_pm_active().
226 *
227 * Return: 0 on success, error code on error
228 */
229 int kbase_pm_wait_for_desired_state(struct kbase_device *kbdev);
230 #else
231 /**
232 * kbase_pm_wait_for_desired_state - Wait for the desired power state to be
233 * reached
234 * @kbdev: The kbase device structure for the device (must be a valid pointer)
235 *
236 * Wait for the L2 and shader power state machines to reach the states
237 * corresponding to the values of 'l2_desired' and 'shaders_desired'.
238 *
239 * The usual use-case for this is to ensure cores are 'READY' after performing
240 * a GPU Reset.
241 *
242 * Unlike kbase_pm_update_state(), the caller must not hold hwaccess_lock,
243 * because this function will take that lock itself.
244 *
245 * NOTE: This may not wait until the correct state is reached if there is a
246 * power off in progress. To correctly wait for the desired state the caller
247 * must ensure that this is not the case by, for example, calling
248 * kbase_pm_wait_for_poweroff_work_complete()
249 *
250 * Return: 0 on success, error code on error
251 */
252 int kbase_pm_wait_for_desired_state(struct kbase_device *kbdev);
253 #endif
254
255 /**
256 * kbase_pm_wait_for_l2_powered - Wait for the L2 cache to be powered on
257 *
258 * @kbdev: The kbase device structure for the device (must be a valid pointer)
259 *
260 * Wait for the L2 to be powered on, and for the L2 and the state machines of
261 * its dependent stack components to stabilise.
262 *
263 * kbdev->pm.active_count must be non-zero when calling this function.
264 *
265 * Unlike kbase_pm_update_state(), the caller must not hold hwaccess_lock,
266 * because this function will take that lock itself.
267 *
268 * Return: 0 on success, error code on error
269 */
270 int kbase_pm_wait_for_l2_powered(struct kbase_device *kbdev);
271
272 /**
273 * kbase_pm_update_dynamic_cores_onoff - Update the L2 and shader power state
274 * machines after changing shader core
275 * availability
276 * @kbdev: The kbase device structure for the device (must be a valid pointer)
277 *
278 * It can be called in any status, so need to check the l2 and shader core
279 * power status in this function or it will break shader/l2 state machine
280 *
281 * Caller must hold hwaccess_lock
282 */
283 void kbase_pm_update_dynamic_cores_onoff(struct kbase_device *kbdev);
284
285 /**
286 * kbase_pm_update_cores_state_nolock - Variant of kbase_pm_update_cores_state()
287 * where the caller must hold
288 * kbase_device.hwaccess_lock
289 *
290 * @kbdev: The kbase device structure for the device (must be a valid pointer)
291 */
292 void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev);
293
294 /**
295 * kbase_pm_update_state - Update the L2 and shader power state machines
296 * @kbdev: Device pointer
297 */
298 void kbase_pm_update_state(struct kbase_device *kbdev);
299
300 /**
301 * kbase_pm_state_machine_init - Initialize the state machines, primarily the
302 * shader poweroff timer
303 * @kbdev: Device pointer
304 */
305 int kbase_pm_state_machine_init(struct kbase_device *kbdev);
306
307 /**
308 * kbase_pm_state_machine_term - Clean up the PM state machines' data
309 * @kbdev: Device pointer
310 */
311 void kbase_pm_state_machine_term(struct kbase_device *kbdev);
312
313 /**
314 * kbase_pm_update_cores_state - Update the desired state of shader cores from
315 * the Power Policy, and begin any power
316 * transitions.
317 * @kbdev: The kbase device structure for the device (must be a valid pointer)
318 *
319 * This function will update the desired_xx_state members of
320 * struct kbase_pm_device_data by calling into the current Power Policy. It will
321 * then begin power transitions to make the hardware acheive the desired shader
322 * core state.
323 */
324 void kbase_pm_update_cores_state(struct kbase_device *kbdev);
325
326 /**
327 * kbasep_pm_metrics_init - Initialize the metrics gathering framework.
328 * @kbdev: The kbase device structure for the device (must be a valid pointer)
329 *
330 * This must be called before other metric gathering APIs are called.
331 *
332 *
333 * Return: 0 on success, error code on error
334 */
335 int kbasep_pm_metrics_init(struct kbase_device *kbdev);
336
337 /**
338 * kbasep_pm_metrics_term - Terminate the metrics gathering framework.
339 * @kbdev: The kbase device structure for the device (must be a valid pointer)
340 *
341 * This must be called when metric gathering is no longer required. It is an
342 * error to call any metrics gathering function (other than
343 * kbasep_pm_metrics_init()) after calling this function.
344 */
345 void kbasep_pm_metrics_term(struct kbase_device *kbdev);
346
347 /**
348 * kbase_pm_report_vsync - Function to be called by the frame buffer driver to
349 * update the vsync metric.
350 * @kbdev: The kbase device structure for the device (must be a
351 * valid pointer)
352 * @buffer_updated: True if the buffer has been updated on this VSync,
353 * false otherwise
354 *
355 * This function should be called by the frame buffer driver to update whether
356 * the system is hitting the vsync target or not. buffer_updated should be true
357 * if the vsync corresponded with a new frame being displayed, otherwise it
358 * should be false. This function does not need to be called every vsync, but
359 * only when the value of @buffer_updated differs from a previous call.
360 */
361 void kbase_pm_report_vsync(struct kbase_device *kbdev, int buffer_updated);
362
363 /**
364 * kbase_pm_get_dvfs_action - Determine whether the DVFS system should change
365 * the clock speed of the GPU.
366 *
367 * @kbdev: The kbase device structure for the device (must be a valid pointer)
368 *
369 * This function should be called regularly by the DVFS system to check whether
370 * the clock speed of the GPU needs updating.
371 */
372 void kbase_pm_get_dvfs_action(struct kbase_device *kbdev);
373
374 /**
375 * kbase_pm_request_gpu_cycle_counter - Mark that the GPU cycle counter is
376 * needed
377 * @kbdev: The kbase device structure for the device (must be a valid pointer)
378 *
379 * If the caller is the first caller then the GPU cycle counters will be enabled
380 * along with the l2 cache
381 *
382 * The GPU must be powered when calling this function (i.e.
383 * kbase_pm_context_active() must have been called).
384 *
385 */
386 void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev);
387
388 /**
389 * kbase_pm_request_gpu_cycle_counter_l2_is_on - Mark GPU cycle counter is
390 * needed (l2 cache already on)
391 * @kbdev: The kbase device structure for the device (must be a valid pointer)
392 *
393 * This is a version of the above function
394 * (kbase_pm_request_gpu_cycle_counter()) suitable for being called when the
395 * l2 cache is known to be on and assured to be on until the subsequent call of
396 * kbase_pm_release_gpu_cycle_counter() such as when a job is submitted. It does
397 * not sleep and can be called from atomic functions.
398 *
399 * The GPU must be powered when calling this function (i.e.
400 * kbase_pm_context_active() must have been called) and the l2 cache must be
401 * powered on.
402 */
403 void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev);
404
405 /**
406 * kbase_pm_release_gpu_cycle_counter - Mark that the GPU cycle counter is no
407 * longer in use
408 * @kbdev: The kbase device structure for the device (must be a valid pointer)
409 *
410 * If the caller is the last caller then the GPU cycle counters will be
411 * disabled. A request must have been made before a call to this.
412 *
413 * Caller must not hold the hwaccess_lock, as it will be taken in this function.
414 * If the caller is already holding this lock then
415 * kbase_pm_release_gpu_cycle_counter_nolock() must be used instead.
416 */
417 void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev);
418
419 /**
420 * kbase_pm_release_gpu_cycle_counter_nolock - Version of kbase_pm_release_gpu_cycle_counter()
421 * that does not take hwaccess_lock
422 * @kbdev: The kbase device structure for the device (must be a valid pointer)
423 *
424 * Caller must hold the hwaccess_lock.
425 */
426 void kbase_pm_release_gpu_cycle_counter_nolock(struct kbase_device *kbdev);
427
428 /**
429 * kbase_pm_wait_for_poweroff_work_complete - Wait for the poweroff workqueue to
430 * complete
431 *
432 * @kbdev: The kbase device structure for the device (must be a valid pointer)
433 *
434 * This function effectively just waits for the @gpu_poweroff_wait_work work
435 * item to complete, if it was enqueued. GPU may not have been powered down
436 * before this function returns.
437 */
438 void kbase_pm_wait_for_poweroff_work_complete(struct kbase_device *kbdev);
439
440 /**
441 * kbase_pm_wait_for_gpu_power_down - Wait for the GPU power down to complete
442 *
443 * @kbdev: The kbase device structure for the device (must be a valid pointer)
444 *
445 * This function waits for the actual gpu power down to complete.
446 */
447 void kbase_pm_wait_for_gpu_power_down(struct kbase_device *kbdev);
448
449 /**
450 * kbase_pm_runtime_init - Initialize runtime-pm for Mali GPU platform device
451 * @kbdev: The kbase device structure for the device (must be a valid pointer)
452 *
453 * Setup the power management callbacks and initialize/enable the runtime-pm
454 * for the Mali GPU platform device, using the callback function. This must be
455 * called before the kbase_pm_register_access_enable() function.
456 */
457 int kbase_pm_runtime_init(struct kbase_device *kbdev);
458
459 /**
460 * kbase_pm_runtime_term - Disable runtime-pm for Mali GPU platform device
461 *
462 * @kbdev: The kbase device structure for the device (must be a valid pointer)
463 */
464 void kbase_pm_runtime_term(struct kbase_device *kbdev);
465
466 /**
467 * kbase_pm_register_access_enable - Enable access to GPU registers
468 * @kbdev: The kbase device structure for the device (must be a valid pointer)
469 *
470 * Enables access to the GPU registers before power management has powered up
471 * the GPU with kbase_pm_powerup().
472 *
473 * This results in the power management callbacks provided in the driver
474 * configuration to get called to turn on power and/or clocks to the GPU. See
475 * kbase_pm_callback_conf.
476 *
477 * This should only be used before power management is powered up with
478 * kbase_pm_powerup()
479 */
480 void kbase_pm_register_access_enable(struct kbase_device *kbdev);
481
482 /**
483 * kbase_pm_register_access_disable - Disable early register access
484 * @kbdev: The kbase device structure for the device (must be a valid pointer)
485 *
486 * Disables access to the GPU registers enabled earlier by a call to
487 * kbase_pm_register_access_enable().
488 *
489 * This results in the power management callbacks provided in the driver
490 * configuration to get called to turn off power and/or clocks to the GPU. See
491 * kbase_pm_callback_conf
492 *
493 * This should only be used before power management is powered up with
494 * kbase_pm_powerup()
495 */
496 void kbase_pm_register_access_disable(struct kbase_device *kbdev);
497
498 /* NOTE: kbase_pm_is_suspending is in mali_kbase.h, because it is an inline
499 * function
500 */
501
502 /**
503 * kbase_pm_metrics_is_active - Check if the power management metrics
504 * collection is active.
505 * @kbdev: The kbase device structure for the device (must be a valid pointer)
506 *
507 * Note that this returns if the power management metrics collection was
508 * active at the time of calling, it is possible that after the call the metrics
509 * collection enable may have changed state.
510 *
511 * The caller must handle the consequence that the state may have changed.
512 *
513 * Return: true if metrics collection was active else false.
514 */
515 bool kbase_pm_metrics_is_active(struct kbase_device *kbdev);
516
517 /**
518 * kbase_pm_do_poweron - Power on the GPU, and any cores that are requested.
519 *
520 * @kbdev: The kbase device structure for the device (must be a valid
521 * pointer)
522 * @is_resume: true if power on due to resume after suspend,
523 * false otherwise
524 */
525 void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume);
526
527 /**
528 * kbase_pm_do_poweroff - Power off the GPU, and any cores that have been
529 * requested.
530 *
531 * @kbdev: The kbase device structure for the device (must be a valid
532 * pointer)
533 */
534 void kbase_pm_do_poweroff(struct kbase_device *kbdev);
535
536 #if defined(CONFIG_MALI_BIFROST_DEVFREQ) || defined(CONFIG_MALI_BIFROST_DVFS)
537 void kbase_pm_get_dvfs_metrics(struct kbase_device *kbdev,
538 struct kbasep_pm_metrics *last,
539 struct kbasep_pm_metrics *diff);
540 #endif /* defined(CONFIG_MALI_BIFROST_DEVFREQ) || defined(CONFIG_MALI_BIFROST_DVFS) */
541
542 #ifdef CONFIG_MALI_BIFROST_DVFS
543
544 #if MALI_USE_CSF
545 /**
546 * kbase_platform_dvfs_event - Report utilisation to DVFS code for CSF GPU
547 *
548 * @kbdev: The kbase device structure for the device (must be a
549 * valid pointer)
550 * @utilisation: The current calculated utilisation by the metrics system.
551 *
552 * Function provided by platform specific code when DVFS is enabled to allow
553 * the power management metrics system to report utilisation.
554 *
555 * Return: Returns 0 on failure and non zero on success.
556 */
557 int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation);
558 #else
559 /**
560 * kbase_platform_dvfs_event - Report utilisation to DVFS code for JM GPU
561 *
562 * @kbdev: The kbase device structure for the device (must be a
563 * valid pointer)
564 * @utilisation: The current calculated utilisation by the metrics system.
565 * @util_gl_share: The current calculated gl share of utilisation.
566 * @util_cl_share: The current calculated cl share of utilisation per core
567 * group.
568 * Function provided by platform specific code when DVFS is enabled to allow
569 * the power management metrics system to report utilisation.
570 *
571 * Return: Returns 0 on failure and non zero on success.
572 */
573 int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation,
574 u32 util_gl_share, u32 util_cl_share[2]);
575 #endif
576
577 #endif /* CONFIG_MALI_BIFROST_DVFS */
578
579 void kbase_pm_power_changed(struct kbase_device *kbdev);
580
581 /**
582 * kbase_pm_metrics_update - Inform the metrics system that an atom is either
583 * about to be run or has just completed.
584 * @kbdev: The kbase device structure for the device (must be a valid pointer)
585 * @now: Pointer to the timestamp of the change, or NULL to use current time
586 *
587 * Caller must hold hwaccess_lock
588 */
589 void kbase_pm_metrics_update(struct kbase_device *kbdev,
590 ktime_t *now);
591
592 /**
593 * kbase_pm_cache_snoop_enable - Allow CPU snoops on the GPU
594 * If the GPU does not have coherency this is a no-op
595 * @kbdev: Device pointer
596 *
597 * This function should be called after L2 power up.
598 */
599
600 void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev);
601
602 /**
603 * kbase_pm_cache_snoop_disable - Prevent CPU snoops on the GPU
604 * If the GPU does not have coherency this is a no-op
605 * @kbdev: Device pointer
606 *
607 * This function should be called before L2 power off.
608 */
609 void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev);
610
611 #ifdef CONFIG_MALI_BIFROST_DEVFREQ
612 /**
613 * kbase_devfreq_set_core_mask - Set devfreq core mask
614 * @kbdev: Device pointer
615 * @core_mask: New core mask
616 *
617 * This function is used by devfreq to change the available core mask as
618 * required by Dynamic Core Scaling.
619 */
620 void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask);
621 #endif
622
623 /**
624 * kbase_pm_reset_start_locked - Signal that GPU reset has started
625 * @kbdev: Device pointer
626 *
627 * Normal power management operation will be suspended until the reset has
628 * completed.
629 *
630 * Caller must hold hwaccess_lock.
631 */
632 void kbase_pm_reset_start_locked(struct kbase_device *kbdev);
633
634 /**
635 * kbase_pm_reset_complete - Signal that GPU reset has completed
636 * @kbdev: Device pointer
637 *
638 * Normal power management operation will be resumed. The power manager will
639 * re-evaluate what cores are needed and power on or off as required.
640 */
641 void kbase_pm_reset_complete(struct kbase_device *kbdev);
642
643 #if !MALI_USE_CSF
644 /**
645 * kbase_pm_protected_override_enable - Enable the protected mode override
646 * @kbdev: Device pointer
647 *
648 * When the protected mode override is enabled, all shader cores are requested
649 * to power down, and the L2 power state can be controlled by
650 * kbase_pm_protected_l2_override().
651 *
652 * Caller must hold hwaccess_lock.
653 */
654 void kbase_pm_protected_override_enable(struct kbase_device *kbdev);
655
656 /**
657 * kbase_pm_protected_override_disable - Disable the protected mode override
658 * @kbdev: Device pointer
659 *
660 * Caller must hold hwaccess_lock.
661 */
662 void kbase_pm_protected_override_disable(struct kbase_device *kbdev);
663
664 /**
665 * kbase_pm_protected_l2_override - Control the protected mode L2 override
666 * @kbdev: Device pointer
667 * @override: true to enable the override, false to disable
668 *
669 * When the driver is transitioning in or out of protected mode, the L2 cache is
670 * forced to power off. This can be overridden to force the L2 cache to power
671 * on. This is required to change coherency settings on some GPUs.
672 */
673 void kbase_pm_protected_l2_override(struct kbase_device *kbdev, bool override);
674
675 /**
676 * kbase_pm_protected_entry_override_enable - Enable the protected mode entry
677 * override
678 * @kbdev: Device pointer
679 *
680 * Initiate a GPU reset and enable the protected mode entry override flag if
681 * l2_always_on WA is enabled and platform is fully coherent. If the GPU
682 * reset is already ongoing then protected mode entry override flag will not
683 * be enabled and function will have to be called again.
684 *
685 * When protected mode entry override flag is enabled to power down L2 via GPU
686 * reset, the GPU reset handling behavior gets changed. For example call to
687 * kbase_backend_reset() is skipped, Hw counters are not re-enabled and L2
688 * isn't powered up again post reset.
689 * This is needed only as a workaround for a Hw issue where explicit power down
690 * of L2 causes a glitch. For entering protected mode on fully coherent
691 * platforms L2 needs to be powered down to switch to IO coherency mode, so to
692 * avoid the glitch GPU reset is used to power down L2. Hence, this function
693 * does nothing on systems where the glitch issue isn't present.
694 *
695 * Caller must hold hwaccess_lock. Should be only called during the transition
696 * to enter protected mode.
697 *
698 * Return: -EAGAIN if a GPU reset was required for the glitch workaround but
699 * was already ongoing, otherwise 0.
700 */
701 int kbase_pm_protected_entry_override_enable(struct kbase_device *kbdev);
702
703 /**
704 * kbase_pm_protected_entry_override_disable - Disable the protected mode entry
705 * override
706 * @kbdev: Device pointer
707 *
708 * This shall be called once L2 has powered down and switch to IO coherency
709 * mode has been made. As with kbase_pm_protected_entry_override_enable(),
710 * this function does nothing on systems where the glitch issue isn't present.
711 *
712 * Caller must hold hwaccess_lock. Should be only called during the transition
713 * to enter protected mode.
714 */
715 void kbase_pm_protected_entry_override_disable(struct kbase_device *kbdev);
716 #endif
717
718 /* If true, the driver should explicitly control corestack power management,
719 * instead of relying on the Power Domain Controller.
720 */
721 extern bool corestack_driver_control;
722
723 /**
724 * kbase_pm_is_l2_desired - Check whether l2 is desired
725 *
726 * @kbdev: Device pointer
727 *
728 * This shall be called to check whether l2 is needed to power on
729 *
730 * Return: true if l2 need to power on
731 */
732 bool kbase_pm_is_l2_desired(struct kbase_device *kbdev);
733
734 #if MALI_USE_CSF
735 /**
736 * kbase_pm_is_mcu_desired - Check whether MCU is desired
737 *
738 * @kbdev: Device pointer
739 *
740 * This shall be called to check whether MCU needs to be enabled.
741 *
742 * Return: true if MCU needs to be enabled.
743 */
744 bool kbase_pm_is_mcu_desired(struct kbase_device *kbdev);
745
746 /**
747 * kbase_pm_is_mcu_inactive - Check if the MCU is inactive (i.e. either
748 * it is disabled or it is in sleep)
749 *
750 * @kbdev: kbase device
751 * @state: state of the MCU state machine.
752 *
753 * This function must be called with hwaccess_lock held.
754 * L2 cache can be turned off if this function returns true.
755 *
756 * Return: true if MCU is inactive
757 */
758 bool kbase_pm_is_mcu_inactive(struct kbase_device *kbdev,
759 enum kbase_mcu_state state);
760
761 /**
762 * kbase_pm_idle_groups_sched_suspendable - Check whether the scheduler can be
763 * suspended to low power state when all
764 * the CSGs are idle
765 *
766 * @kbdev: Device pointer
767 *
768 * Return: true if allowed to enter the suspended state.
769 */
770 static inline
kbase_pm_idle_groups_sched_suspendable(struct kbase_device * kbdev)771 bool kbase_pm_idle_groups_sched_suspendable(struct kbase_device *kbdev)
772 {
773 lockdep_assert_held(&kbdev->hwaccess_lock);
774
775 return !(kbdev->pm.backend.csf_pm_sched_flags &
776 CSF_DYNAMIC_PM_SCHED_IGNORE_IDLE);
777 }
778
779 /**
780 * kbase_pm_no_runnables_sched_suspendable - Check whether the scheduler can be
781 * suspended to low power state when
782 * there are no runnable CSGs.
783 *
784 * @kbdev: Device pointer
785 *
786 * Return: true if allowed to enter the suspended state.
787 */
788 static inline
kbase_pm_no_runnables_sched_suspendable(struct kbase_device * kbdev)789 bool kbase_pm_no_runnables_sched_suspendable(struct kbase_device *kbdev)
790 {
791 lockdep_assert_held(&kbdev->hwaccess_lock);
792
793 return !(kbdev->pm.backend.csf_pm_sched_flags &
794 CSF_DYNAMIC_PM_SCHED_NO_SUSPEND);
795 }
796
797 /**
798 * kbase_pm_no_mcu_core_pwroff - Check whether the PM is required to keep the
799 * MCU core powered in accordance to the active
800 * power management policy
801 *
802 * @kbdev: Device pointer
803 *
804 * Return: true if the MCU is to retain powered.
805 */
kbase_pm_no_mcu_core_pwroff(struct kbase_device * kbdev)806 static inline bool kbase_pm_no_mcu_core_pwroff(struct kbase_device *kbdev)
807 {
808 lockdep_assert_held(&kbdev->hwaccess_lock);
809
810 return kbdev->pm.backend.csf_pm_sched_flags &
811 CSF_DYNAMIC_PM_CORE_KEEP_ON;
812 }
813 #endif
814
815 /**
816 * kbase_pm_lock - Lock all necessary mutexes to perform PM actions
817 *
818 * @kbdev: Device pointer
819 *
820 * This function locks correct mutexes independent of GPU architecture.
821 */
kbase_pm_lock(struct kbase_device * kbdev)822 static inline void kbase_pm_lock(struct kbase_device *kbdev)
823 {
824 #if !MALI_USE_CSF
825 mutex_lock(&kbdev->js_data.runpool_mutex);
826 #endif /* !MALI_USE_CSF */
827 mutex_lock(&kbdev->pm.lock);
828 }
829
830 /**
831 * kbase_pm_unlock - Unlock mutexes locked by kbase_pm_lock
832 *
833 * @kbdev: Device pointer
834 */
kbase_pm_unlock(struct kbase_device * kbdev)835 static inline void kbase_pm_unlock(struct kbase_device *kbdev)
836 {
837 mutex_unlock(&kbdev->pm.lock);
838 #if !MALI_USE_CSF
839 mutex_unlock(&kbdev->js_data.runpool_mutex);
840 #endif /* !MALI_USE_CSF */
841 }
842
843 #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
844 /**
845 * kbase_pm_gpu_sleep_allowed - Check if the GPU is allowed to be put in sleep
846 *
847 * @kbdev: Device pointer
848 *
849 * This function is called on GPU idle notification and if it returns false then
850 * GPU power down will be triggered by suspending the CSGs and halting the MCU.
851 *
852 * Return: true if the GPU is allowed to be in the sleep state.
853 */
kbase_pm_gpu_sleep_allowed(struct kbase_device * kbdev)854 static inline bool kbase_pm_gpu_sleep_allowed(struct kbase_device *kbdev)
855 {
856 /* If the autosuspend_delay has been set to 0 then it doesn't make
857 * sense to first put GPU to sleep state and then power it down,
858 * instead would be better to power it down right away.
859 * Also need to do the same when autosuspend_delay is set to a negative
860 * value, which implies that runtime pm is effectively disabled by the
861 * kernel.
862 * A high positive value of autosuspend_delay can be used to keep the
863 * GPU in sleep state for a long time.
864 */
865 if (unlikely(!kbdev->dev->power.autosuspend_delay ||
866 (kbdev->dev->power.autosuspend_delay < 0)))
867 return false;
868
869 return kbdev->pm.backend.gpu_sleep_supported;
870 }
871
872 /**
873 * kbase_pm_enable_db_mirror_interrupt - Enable the doorbell mirror interrupt to
874 * detect the User doorbell rings.
875 *
876 * @kbdev: Device pointer
877 *
878 * This function is called just before sending the sleep request to MCU firmware
879 * so that User doorbell rings can be detected whilst GPU remains in the sleep
880 * state.
881 *
882 */
kbase_pm_enable_db_mirror_interrupt(struct kbase_device * kbdev)883 static inline void kbase_pm_enable_db_mirror_interrupt(struct kbase_device *kbdev)
884 {
885 lockdep_assert_held(&kbdev->hwaccess_lock);
886
887 if (!kbdev->pm.backend.db_mirror_interrupt_enabled) {
888 u32 irq_mask = kbase_reg_read(kbdev,
889 GPU_CONTROL_REG(GPU_IRQ_MASK));
890
891 WARN_ON(irq_mask & DOORBELL_MIRROR);
892
893 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
894 irq_mask | DOORBELL_MIRROR);
895 kbdev->pm.backend.db_mirror_interrupt_enabled = true;
896 }
897 }
898
899 /**
900 * kbase_pm_disable_db_mirror_interrupt - Disable the doorbell mirror interrupt.
901 *
902 * @kbdev: Device pointer
903 *
904 * This function is called when doorbell mirror interrupt is received or MCU
905 * needs to be reactivated by enabling the doorbell notification.
906 */
kbase_pm_disable_db_mirror_interrupt(struct kbase_device * kbdev)907 static inline void kbase_pm_disable_db_mirror_interrupt(struct kbase_device *kbdev)
908 {
909 lockdep_assert_held(&kbdev->hwaccess_lock);
910
911 if (kbdev->pm.backend.db_mirror_interrupt_enabled) {
912 u32 irq_mask = kbase_reg_read(kbdev,
913 GPU_CONTROL_REG(GPU_IRQ_MASK));
914
915 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
916 irq_mask & ~DOORBELL_MIRROR);
917 kbdev->pm.backend.db_mirror_interrupt_enabled = false;
918 }
919 }
920 #endif
921
922 #endif /* _KBASE_BACKEND_PM_INTERNAL_H_ */
923