• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 /*
23  * GPU backend implementation of base kernel power management APIs
24  */
25 
26 #include <mali_kbase.h>
27 #include <gpu/mali_kbase_gpu_regmap.h>
28 #include <mali_kbase_config_defaults.h>
29 
30 #include <mali_kbase_pm.h>
31 #if !MALI_USE_CSF
32 #include <mali_kbase_hwaccess_jm.h>
33 #include <backend/gpu/mali_kbase_js_internal.h>
34 #include <backend/gpu/mali_kbase_jm_internal.h>
35 #else
36 #include <linux/pm_runtime.h>
37 #include <mali_kbase_reset_gpu.h>
38 #endif /* !MALI_USE_CSF */
39 #include <mali_kbase_hwcnt_context.h>
40 #include <backend/gpu/mali_kbase_pm_internal.h>
41 #include <backend/gpu/mali_kbase_devfreq.h>
42 #include <mali_kbase_dummy_job_wa.h>
43 #include <backend/gpu/mali_kbase_irq_internal.h>
44 
45 static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data);
46 static void kbase_pm_hwcnt_disable_worker(struct work_struct *data);
47 static void kbase_pm_gpu_clock_control_worker(struct work_struct *data);
48 
kbase_pm_runtime_init(struct kbase_device * kbdev)49 int kbase_pm_runtime_init(struct kbase_device *kbdev)
50 {
51 	struct kbase_pm_callback_conf *callbacks;
52 
53 	callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
54 	if (callbacks) {
55 		kbdev->pm.backend.callback_power_on =
56 					callbacks->power_on_callback;
57 		kbdev->pm.backend.callback_power_off =
58 					callbacks->power_off_callback;
59 		kbdev->pm.backend.callback_power_suspend =
60 					callbacks->power_suspend_callback;
61 		kbdev->pm.backend.callback_power_resume =
62 					callbacks->power_resume_callback;
63 		kbdev->pm.callback_power_runtime_init =
64 					callbacks->power_runtime_init_callback;
65 		kbdev->pm.callback_power_runtime_term =
66 					callbacks->power_runtime_term_callback;
67 		kbdev->pm.backend.callback_power_runtime_on =
68 					callbacks->power_runtime_on_callback;
69 		kbdev->pm.backend.callback_power_runtime_off =
70 					callbacks->power_runtime_off_callback;
71 		kbdev->pm.backend.callback_power_runtime_idle =
72 					callbacks->power_runtime_idle_callback;
73 		kbdev->pm.backend.callback_soft_reset =
74 					callbacks->soft_reset_callback;
75 		kbdev->pm.backend.callback_power_runtime_gpu_idle =
76 					callbacks->power_runtime_gpu_idle_callback;
77 		kbdev->pm.backend.callback_power_runtime_gpu_active =
78 					callbacks->power_runtime_gpu_active_callback;
79 
80 		if (callbacks->power_runtime_init_callback)
81 			return callbacks->power_runtime_init_callback(kbdev);
82 		else
83 			return 0;
84 	}
85 
86 	kbdev->pm.backend.callback_power_on = NULL;
87 	kbdev->pm.backend.callback_power_off = NULL;
88 	kbdev->pm.backend.callback_power_suspend = NULL;
89 	kbdev->pm.backend.callback_power_resume = NULL;
90 	kbdev->pm.callback_power_runtime_init = NULL;
91 	kbdev->pm.callback_power_runtime_term = NULL;
92 	kbdev->pm.backend.callback_power_runtime_on = NULL;
93 	kbdev->pm.backend.callback_power_runtime_off = NULL;
94 	kbdev->pm.backend.callback_power_runtime_idle = NULL;
95 	kbdev->pm.backend.callback_soft_reset = NULL;
96 	kbdev->pm.backend.callback_power_runtime_gpu_idle = NULL;
97 	kbdev->pm.backend.callback_power_runtime_gpu_active = NULL;
98 
99 	return 0;
100 }
101 
kbase_pm_runtime_term(struct kbase_device * kbdev)102 void kbase_pm_runtime_term(struct kbase_device *kbdev)
103 {
104 	if (kbdev->pm.callback_power_runtime_term) {
105 		kbdev->pm.callback_power_runtime_term(kbdev);
106 	}
107 }
108 
kbase_pm_register_access_enable(struct kbase_device * kbdev)109 void kbase_pm_register_access_enable(struct kbase_device *kbdev)
110 {
111 	struct kbase_pm_callback_conf *callbacks;
112 
113 	callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
114 
115 	if (callbacks)
116 		callbacks->power_on_callback(kbdev);
117 
118 #ifdef CONFIG_MALI_ARBITER_SUPPORT
119 	if (WARN_ON(kbase_pm_is_gpu_lost(kbdev)))
120 		dev_err(kbdev->dev, "Attempting to power on while GPU lost\n");
121 #endif
122 
123 	kbdev->pm.backend.gpu_powered = true;
124 }
125 
kbase_pm_register_access_disable(struct kbase_device * kbdev)126 void kbase_pm_register_access_disable(struct kbase_device *kbdev)
127 {
128 	struct kbase_pm_callback_conf *callbacks;
129 
130 	callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
131 
132 	kbdev->pm.backend.gpu_powered = false;
133 
134 	if (callbacks)
135 		callbacks->power_off_callback(kbdev);
136 }
137 
kbase_hwaccess_pm_init(struct kbase_device * kbdev)138 int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
139 {
140 	int ret = 0;
141 
142 	KBASE_DEBUG_ASSERT(kbdev != NULL);
143 
144 	mutex_init(&kbdev->pm.lock);
145 
146 	kbdev->pm.backend.gpu_poweroff_wait_wq = alloc_workqueue("kbase_pm_poweroff_wait",
147 			WQ_HIGHPRI | WQ_UNBOUND, 1);
148 	if (!kbdev->pm.backend.gpu_poweroff_wait_wq)
149 		return -ENOMEM;
150 
151 	INIT_WORK(&kbdev->pm.backend.gpu_poweroff_wait_work,
152 			kbase_pm_gpu_poweroff_wait_wq);
153 
154 	kbdev->pm.backend.ca_cores_enabled = ~0ull;
155 	kbdev->pm.backend.gpu_powered = false;
156 	kbdev->pm.backend.gpu_ready = false;
157 	kbdev->pm.suspending = false;
158 #ifdef CONFIG_MALI_ARBITER_SUPPORT
159 	kbase_pm_set_gpu_lost(kbdev, false);
160 #endif
161 #ifdef CONFIG_MALI_BIFROST_DEBUG
162 	kbdev->pm.backend.driver_ready_for_irqs = false;
163 #endif /* CONFIG_MALI_BIFROST_DEBUG */
164 	init_waitqueue_head(&kbdev->pm.backend.gpu_in_desired_state_wait);
165 
166 #if !MALI_USE_CSF
167 	/* Initialise the metrics subsystem */
168 	ret = kbasep_pm_metrics_init(kbdev);
169 	if (ret)
170 		return ret;
171 #else
172 	mutex_init(&kbdev->pm.backend.policy_change_lock);
173 	kbdev->pm.backend.policy_change_clamp_state_to_off = false;
174 	/* Due to dependency on kbase_ipa_control, the metrics subsystem can't
175 	 * be initialized here.
176 	 */
177 	CSTD_UNUSED(ret);
178 #endif
179 
180 	init_waitqueue_head(&kbdev->pm.backend.reset_done_wait);
181 	kbdev->pm.backend.reset_done = false;
182 
183 	init_waitqueue_head(&kbdev->pm.zero_active_count_wait);
184 	init_waitqueue_head(&kbdev->pm.resume_wait);
185 	kbdev->pm.active_count = 0;
186 
187 	spin_lock_init(&kbdev->pm.backend.gpu_cycle_counter_requests_lock);
188 
189 	init_waitqueue_head(&kbdev->pm.backend.poweroff_wait);
190 
191 	if (kbase_pm_ca_init(kbdev) != 0)
192 		goto workq_fail;
193 
194 	kbase_pm_policy_init(kbdev);
195 
196 	if (kbase_pm_state_machine_init(kbdev) != 0)
197 		goto pm_state_machine_fail;
198 
199 	kbdev->pm.backend.hwcnt_desired = false;
200 	kbdev->pm.backend.hwcnt_disabled = true;
201 	INIT_WORK(&kbdev->pm.backend.hwcnt_disable_work,
202 		kbase_pm_hwcnt_disable_worker);
203 	kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
204 
205 
206 	if (IS_ENABLED(CONFIG_MALI_HW_ERRATA_1485982_NOT_AFFECTED)) {
207 		kbdev->pm.backend.l2_always_on = false;
208 		kbdev->pm.backend.gpu_clock_slow_down_wa = false;
209 
210 		return 0;
211 	}
212 
213 	/* WA1: L2 always_on for GPUs being affected by GPU2017-1336 */
214 	if (!IS_ENABLED(CONFIG_MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE)) {
215 		kbdev->pm.backend.gpu_clock_slow_down_wa = false;
216 		if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_GPU2017_1336))
217 			kbdev->pm.backend.l2_always_on = true;
218 		else
219 			kbdev->pm.backend.l2_always_on = false;
220 
221 		return 0;
222 	}
223 
224 	/* WA3: Clock slow down for GPUs being affected by GPU2017-1336 */
225 	kbdev->pm.backend.l2_always_on = false;
226 	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_GPU2017_1336)) {
227 		kbdev->pm.backend.gpu_clock_slow_down_wa = true;
228 		kbdev->pm.backend.gpu_clock_suspend_freq = 0;
229 		kbdev->pm.backend.gpu_clock_slow_down_desired = true;
230 		kbdev->pm.backend.gpu_clock_slowed_down = false;
231 		INIT_WORK(&kbdev->pm.backend.gpu_clock_control_work,
232 			kbase_pm_gpu_clock_control_worker);
233 	} else
234 		kbdev->pm.backend.gpu_clock_slow_down_wa = false;
235 
236 	return 0;
237 
238 pm_state_machine_fail:
239 	kbase_pm_policy_term(kbdev);
240 	kbase_pm_ca_term(kbdev);
241 workq_fail:
242 #if !MALI_USE_CSF
243 	kbasep_pm_metrics_term(kbdev);
244 #endif
245 	return -EINVAL;
246 }
247 
kbase_pm_do_poweron(struct kbase_device * kbdev,bool is_resume)248 void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume)
249 {
250 	lockdep_assert_held(&kbdev->pm.lock);
251 
252 	/* Turn clocks and interrupts on - no-op if we haven't done a previous
253 	 * kbase_pm_clock_off()
254 	 */
255 	kbase_pm_clock_on(kbdev, is_resume);
256 
257 	if (!is_resume) {
258 		unsigned long flags;
259 
260 		/* Force update of L2 state - if we have abandoned a power off
261 		 * then this may be required to power the L2 back on.
262 		 */
263 		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
264 		kbase_pm_update_state(kbdev);
265 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
266 	}
267 
268 	/* Update core status as required by the policy */
269 	kbase_pm_update_cores_state(kbdev);
270 
271 	/* NOTE: We don't wait to reach the desired state, since running atoms
272 	 * will wait for that state to be reached anyway
273 	 */
274 }
275 
pm_handle_power_off(struct kbase_device * kbdev)276 static void pm_handle_power_off(struct kbase_device *kbdev)
277 {
278 	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
279 #if MALI_USE_CSF
280 	enum kbase_mcu_state mcu_state;
281 #endif
282 	unsigned long flags;
283 
284 	lockdep_assert_held(&kbdev->pm.lock);
285 
286 	if (backend->poweron_required)
287 		return;
288 
289 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
290 #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
291 	if (kbdev->pm.backend.gpu_wakeup_override ) {
292 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
293 		return;
294 	}
295 #endif
296 	WARN_ON(backend->shaders_state !=
297 			KBASE_SHADERS_OFF_CORESTACK_OFF ||
298 		backend->l2_state != KBASE_L2_OFF);
299 #if MALI_USE_CSF
300 	mcu_state = backend->mcu_state;
301 	WARN_ON(!kbase_pm_is_mcu_inactive(kbdev, mcu_state));
302 #endif
303 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
304 
305 #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
306 	if (backend->callback_power_runtime_gpu_idle) {
307 		WARN_ON(backend->gpu_idled);
308 		backend->callback_power_runtime_gpu_idle(kbdev);
309 		backend->gpu_idled = true;
310 		return;
311 	}
312 #endif
313 
314 	/* Disable interrupts and turn the clock off */
315 	if (!kbase_pm_clock_off(kbdev)) {
316 		/*
317 		 * Page/bus faults are pending, must drop locks to
318 		 * process.  Interrupts are disabled so no more faults
319 		 * should be generated at this point.
320 		 */
321 		kbase_pm_unlock(kbdev);
322 		kbase_flush_mmu_wqs(kbdev);
323 		kbase_pm_lock(kbdev);
324 
325 #ifdef CONFIG_MALI_ARBITER_SUPPORT
326 		/* poweron_required may have changed while pm lock
327 		 * was released.
328 		 */
329 		if (kbase_pm_is_gpu_lost(kbdev))
330 			backend->poweron_required = false;
331 #endif
332 
333 		/* Turn off clock now that fault have been handled. We
334 		 * dropped locks so poweron_required may have changed -
335 		 * power back on if this is the case (effectively only
336 		 * re-enabling of the interrupts would be done in this
337 		 * case, as the clocks to GPU were not withdrawn yet).
338 		 */
339 		if (backend->poweron_required)
340 			kbase_pm_clock_on(kbdev, false);
341 		else
342 			WARN_ON(!kbase_pm_clock_off(kbdev));
343 	}
344 }
345 
kbase_pm_gpu_poweroff_wait_wq(struct work_struct * data)346 static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
347 {
348 	struct kbase_device *kbdev = container_of(data, struct kbase_device,
349 			pm.backend.gpu_poweroff_wait_work);
350 	struct kbase_pm_device_data *pm = &kbdev->pm;
351 	struct kbase_pm_backend_data *backend = &pm->backend;
352 	unsigned long flags;
353 
354 	KBASE_KTRACE_ADD(kbdev, PM_POWEROFF_WAIT_WQ, NULL, 0);
355 
356 #if !MALI_USE_CSF
357 	/* Wait for power transitions to complete. We do this with no locks held
358 	 * so that we don't deadlock with any pending workqueues.
359 	 */
360 	kbase_pm_wait_for_desired_state(kbdev);
361 #endif
362 
363 	kbase_pm_lock(kbdev);
364 
365 #ifdef CONFIG_MALI_ARBITER_SUPPORT
366 	if (kbase_pm_is_gpu_lost(kbdev))
367 		backend->poweron_required = false;
368 #endif
369 
370 	pm_handle_power_off(kbdev);
371 
372 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
373 	backend->poweroff_wait_in_progress = false;
374 	if (backend->poweron_required) {
375 		backend->poweron_required = false;
376 		kbdev->pm.backend.l2_desired = true;
377 #if MALI_USE_CSF
378 		kbdev->pm.backend.mcu_desired = true;
379 #endif
380 		kbase_pm_update_state(kbdev);
381 		kbase_pm_update_cores_state_nolock(kbdev);
382 #if !MALI_USE_CSF
383 		kbase_backend_slot_update(kbdev);
384 #endif /* !MALI_USE_CSF */
385 	}
386 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
387 
388 	kbase_pm_unlock(kbdev);
389 
390 	wake_up(&kbdev->pm.backend.poweroff_wait);
391 }
392 
kbase_pm_l2_clock_slow(struct kbase_device * kbdev)393 static void kbase_pm_l2_clock_slow(struct kbase_device *kbdev)
394 {
395 #if defined(CONFIG_MALI_BIFROST_DVFS)
396 	struct clk *clk = kbdev->clocks[0];
397 #endif
398 
399 	if (!kbdev->pm.backend.gpu_clock_slow_down_wa)
400 		return;
401 
402 	/* No suspend clock is specified */
403 	if (WARN_ON_ONCE(!kbdev->pm.backend.gpu_clock_suspend_freq))
404 		return;
405 
406 #if defined(CONFIG_MALI_BIFROST_DEVFREQ)
407 
408 	/* Suspend devfreq */
409 	devfreq_suspend_device(kbdev->devfreq);
410 
411 	/* Keep the current freq to restore it upon resume */
412 	kbdev->previous_frequency = kbdev->current_nominal_freq;
413 
414 	/* Slow down GPU clock to the suspend clock*/
415 	kbase_devfreq_force_freq(kbdev,
416 			kbdev->pm.backend.gpu_clock_suspend_freq);
417 
418 #elif defined(CONFIG_MALI_BIFROST_DVFS) /* CONFIG_MALI_BIFROST_DEVFREQ */
419 
420 	if (WARN_ON_ONCE(!clk))
421 		return;
422 
423 	/* Stop the metrics gathering framework */
424 	if (kbase_pm_metrics_is_active(kbdev))
425 		kbase_pm_metrics_stop(kbdev);
426 
427 	/* Keep the current freq to restore it upon resume */
428 	kbdev->previous_frequency = clk_get_rate(clk);
429 
430 	/* Slow down GPU clock to the suspend clock*/
431 	if (WARN_ON_ONCE(clk_set_rate(clk,
432 				kbdev->pm.backend.gpu_clock_suspend_freq)))
433 		dev_err(kbdev->dev, "Failed to set suspend freq\n");
434 
435 #endif /* CONFIG_MALI_BIFROST_DVFS */
436 }
437 
kbase_pm_l2_clock_normalize(struct kbase_device * kbdev)438 static void kbase_pm_l2_clock_normalize(struct kbase_device *kbdev)
439 {
440 #if defined(CONFIG_MALI_BIFROST_DVFS)
441 	struct clk *clk = kbdev->clocks[0];
442 #endif
443 
444 	if (!kbdev->pm.backend.gpu_clock_slow_down_wa)
445 		return;
446 
447 #if defined(CONFIG_MALI_BIFROST_DEVFREQ)
448 
449 	/* Restore GPU clock to the previous one */
450 	kbase_devfreq_force_freq(kbdev, kbdev->previous_frequency);
451 
452 	/* Resume devfreq */
453 	devfreq_resume_device(kbdev->devfreq);
454 
455 #elif defined(CONFIG_MALI_BIFROST_DVFS) /* CONFIG_MALI_BIFROST_DEVFREQ */
456 
457 	if (WARN_ON_ONCE(!clk))
458 		return;
459 
460 	/* Restore GPU clock */
461 	if (WARN_ON_ONCE(clk_set_rate(clk, kbdev->previous_frequency)))
462 		dev_err(kbdev->dev, "Failed to restore freq (%lu)\n",
463 			kbdev->previous_frequency);
464 
465 	/* Restart the metrics gathering framework */
466 	kbase_pm_metrics_start(kbdev);
467 
468 #endif /* CONFIG_MALI_BIFROST_DVFS */
469 }
470 
kbase_pm_gpu_clock_control_worker(struct work_struct * data)471 static void kbase_pm_gpu_clock_control_worker(struct work_struct *data)
472 {
473 	struct kbase_device *kbdev = container_of(data, struct kbase_device,
474 			pm.backend.gpu_clock_control_work);
475 	struct kbase_pm_device_data *pm = &kbdev->pm;
476 	struct kbase_pm_backend_data *backend = &pm->backend;
477 	unsigned long flags;
478 	bool slow_down = false, normalize = false;
479 
480 	/* Determine if GPU clock control is required */
481 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
482 	if (!backend->gpu_clock_slowed_down &&
483 			backend->gpu_clock_slow_down_desired) {
484 		slow_down = true;
485 		backend->gpu_clock_slowed_down = true;
486 	} else if (backend->gpu_clock_slowed_down &&
487 			!backend->gpu_clock_slow_down_desired) {
488 		normalize = true;
489 		backend->gpu_clock_slowed_down = false;
490 	}
491 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
492 
493 	/* Control GPU clock according to the request of L2 state machine.
494 	 * The GPU clock needs to be lowered for safe L2 power down
495 	 * and restored to previous speed at L2 power up.
496 	 */
497 	if (slow_down)
498 		kbase_pm_l2_clock_slow(kbdev);
499 	else if (normalize)
500 		kbase_pm_l2_clock_normalize(kbdev);
501 
502 	/* Tell L2 state machine to transit to next state */
503 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
504 	kbase_pm_update_state(kbdev);
505 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
506 }
507 
kbase_pm_hwcnt_disable_worker(struct work_struct * data)508 static void kbase_pm_hwcnt_disable_worker(struct work_struct *data)
509 {
510 	struct kbase_device *kbdev = container_of(data, struct kbase_device,
511 			pm.backend.hwcnt_disable_work);
512 	struct kbase_pm_device_data *pm = &kbdev->pm;
513 	struct kbase_pm_backend_data *backend = &pm->backend;
514 	unsigned long flags;
515 
516 	bool do_disable;
517 
518 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
519 	do_disable = !backend->hwcnt_desired && !backend->hwcnt_disabled;
520 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
521 
522 	if (!do_disable)
523 		return;
524 
525 	kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
526 
527 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
528 	do_disable = !backend->hwcnt_desired && !backend->hwcnt_disabled;
529 
530 	if (do_disable) {
531 		/* PM state did not change while we were doing the disable,
532 		 * so commit the work we just performed and continue the state
533 		 * machine.
534 		 */
535 		backend->hwcnt_disabled = true;
536 		kbase_pm_update_state(kbdev);
537 #if !MALI_USE_CSF
538 		kbase_backend_slot_update(kbdev);
539 #endif /* !MALI_USE_CSF */
540 	} else {
541 		/* PM state was updated while we were doing the disable,
542 		 * so we need to undo the disable we just performed.
543 		 */
544 #if MALI_USE_CSF
545 		unsigned long lock_flags;
546 
547 		kbase_csf_scheduler_spin_lock(kbdev, &lock_flags);
548 #endif
549 		kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
550 #if MALI_USE_CSF
551 		kbase_csf_scheduler_spin_unlock(kbdev, lock_flags);
552 #endif
553 	}
554 
555 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
556 }
557 
558 #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
559 /**
560  * kbase_pm_do_poweroff_sync - Do the synchronous power down of GPU
561  *
562  * @kbdev: The kbase device structure for the device (must be a valid pointer)
563  *
564  * This function is called at the time of system suspend or device unload
565  * to power down the GPU synchronously. This is needed as the power down of GPU
566  * would usually happen from the runtime suspend callback function (if gpu_active
567  * and gpu_idle callbacks are used) and runtime suspend operation is disabled
568  * when system suspend takes place.
569  * The function first waits for the @gpu_poweroff_wait_work to complete, which
570  * could have been enqueued after the last PM reference was released.
571  *
572  * Return: 0 on success, negative value otherwise.
573  */
kbase_pm_do_poweroff_sync(struct kbase_device * kbdev)574 static int kbase_pm_do_poweroff_sync(struct kbase_device *kbdev)
575 {
576 	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
577 	unsigned long flags;
578 	int ret = 0;
579 
580 	WARN_ON(kbdev->pm.active_count);
581 
582 	kbase_pm_wait_for_poweroff_work_complete(kbdev);
583 
584 	kbase_pm_lock(kbdev);
585 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
586 	WARN_ON(backend->poweroff_wait_in_progress);
587 	WARN_ON(backend->gpu_sleep_mode_active);
588 	if (backend->gpu_powered) {
589 
590 		backend->mcu_desired = false;
591 		backend->l2_desired = false;
592 		kbase_pm_update_state(kbdev);
593 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
594 
595 		ret = kbase_pm_wait_for_desired_state(kbdev);
596 		if (ret) {
597 			dev_warn(
598 				kbdev->dev,
599 				"Wait for pm state change failed on synchronous power off");
600 			ret = -EBUSY;
601 			goto out;
602 		}
603 
604 		/* Due to the power policy, GPU could have been kept active
605 		 * throughout and so need to invoke the idle callback before
606 		 * the power down.
607 		 */
608 		if (backend->callback_power_runtime_gpu_idle &&
609 		    !backend->gpu_idled) {
610 			backend->callback_power_runtime_gpu_idle(kbdev);
611 			backend->gpu_idled = true;
612 		}
613 
614 		if (!kbase_pm_clock_off(kbdev)) {
615 			dev_warn(
616 				kbdev->dev,
617 				"Failed to turn off GPU clocks on synchronous power off, MMU faults pending");
618 			ret = -EBUSY;
619 		}
620 	} else {
621 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
622 	}
623 
624 out:
625 	kbase_pm_unlock(kbdev);
626 	return ret;
627 }
628 #endif
629 
kbase_pm_do_poweroff(struct kbase_device * kbdev)630 void kbase_pm_do_poweroff(struct kbase_device *kbdev)
631 {
632 	unsigned long flags;
633 
634 	lockdep_assert_held(&kbdev->pm.lock);
635 
636 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
637 
638 	if (!kbdev->pm.backend.gpu_powered)
639 		goto unlock_hwaccess;
640 
641 	if (kbdev->pm.backend.poweroff_wait_in_progress)
642 		goto unlock_hwaccess;
643 
644 #if MALI_USE_CSF
645 	kbdev->pm.backend.mcu_desired = false;
646 #else
647 	/* Force all cores off */
648 	kbdev->pm.backend.shaders_desired = false;
649 #endif
650 	kbdev->pm.backend.l2_desired = false;
651 
652 	kbdev->pm.backend.poweroff_wait_in_progress = true;
653 	kbdev->pm.backend.invoke_poweroff_wait_wq_when_l2_off = true;
654 
655 	/* l2_desired being false should cause the state machine to
656 	 * start powering off the L2. When it actually is powered off,
657 	 * the interrupt handler will call kbase_pm_l2_update_state()
658 	 * again, which will trigger the kbase_pm_gpu_poweroff_wait_wq.
659 	 * Callers of this function will need to wait on poweroff_wait.
660 	 */
661 	kbase_pm_update_state(kbdev);
662 
663 unlock_hwaccess:
664 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
665 }
666 
is_poweroff_in_progress(struct kbase_device * kbdev)667 static bool is_poweroff_in_progress(struct kbase_device *kbdev)
668 {
669 	bool ret;
670 	unsigned long flags;
671 
672 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
673 	ret = (kbdev->pm.backend.poweroff_wait_in_progress == false);
674 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
675 
676 	return ret;
677 }
678 
kbase_pm_wait_for_poweroff_work_complete(struct kbase_device * kbdev)679 void kbase_pm_wait_for_poweroff_work_complete(struct kbase_device *kbdev)
680 {
681 	wait_event_killable(kbdev->pm.backend.poweroff_wait,
682 			is_poweroff_in_progress(kbdev));
683 }
684 KBASE_EXPORT_TEST_API(kbase_pm_wait_for_poweroff_work_complete);
685 
is_gpu_powered_down(struct kbase_device * kbdev)686 static bool is_gpu_powered_down(struct kbase_device *kbdev)
687 {
688 	bool ret;
689 	unsigned long flags;
690 
691 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
692 	ret = !kbdev->pm.backend.gpu_powered;
693 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
694 
695 	return ret;
696 }
697 
kbase_pm_wait_for_gpu_power_down(struct kbase_device * kbdev)698 void kbase_pm_wait_for_gpu_power_down(struct kbase_device *kbdev)
699 {
700 	wait_event_killable(kbdev->pm.backend.poweroff_wait,
701 			is_gpu_powered_down(kbdev));
702 }
703 KBASE_EXPORT_TEST_API(kbase_pm_wait_for_gpu_power_down);
704 
kbase_hwaccess_pm_powerup(struct kbase_device * kbdev,unsigned int flags)705 int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
706 		unsigned int flags)
707 {
708 	unsigned long irq_flags;
709 	int ret;
710 
711 	KBASE_DEBUG_ASSERT(kbdev != NULL);
712 
713 	kbase_pm_lock(kbdev);
714 
715 	/* A suspend won't happen during startup/insmod */
716 	KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev));
717 
718 	/* Power up the GPU, don't enable IRQs as we are not ready to receive
719 	 * them
720 	 */
721 	ret = kbase_pm_init_hw(kbdev, flags);
722 	if (ret) {
723 		kbase_pm_unlock(kbdev);
724 		return ret;
725 	}
726 #if MALI_USE_CSF
727 	kbdev->pm.debug_core_mask =
728 		kbdev->gpu_props.props.raw_props.shader_present;
729 	spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
730 	/* Set the initial value for 'shaders_avail'. It would be later
731 	 * modified only from the MCU state machine, when the shader core
732 	 * allocation enable mask request has completed. So its value would
733 	 * indicate the mask of cores that are currently being used by FW for
734 	 * the allocation of endpoints requested by CSGs.
735 	 */
736 	kbdev->pm.backend.shaders_avail = kbase_pm_ca_get_core_mask(kbdev);
737 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
738 #else
739 	kbdev->pm.debug_core_mask_all = kbdev->pm.debug_core_mask[0] =
740 			kbdev->pm.debug_core_mask[1] =
741 			kbdev->pm.debug_core_mask[2] =
742 			kbdev->gpu_props.props.raw_props.shader_present;
743 #endif
744 
745 	/* Pretend the GPU is active to prevent a power policy turning the GPU
746 	 * cores off
747 	 */
748 	kbdev->pm.active_count = 1;
749 #if MALI_USE_CSF && KBASE_PM_RUNTIME
750 	if (kbdev->pm.backend.callback_power_runtime_gpu_active) {
751 		/* Take the RPM reference count to match with the internal
752 		 * PM reference count
753 		 */
754 		kbdev->pm.backend.callback_power_runtime_gpu_active(kbdev);
755 		WARN_ON(kbdev->pm.backend.gpu_idled);
756 	}
757 #endif
758 
759 	spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
760 								irq_flags);
761 	/* Ensure cycle counter is off */
762 	kbdev->pm.backend.gpu_cycle_counter_requests = 0;
763 	spin_unlock_irqrestore(
764 			&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
765 								irq_flags);
766 
767 	/* We are ready to receive IRQ's now as power policy is set up, so
768 	 * enable them now.
769 	 */
770 #ifdef CONFIG_MALI_BIFROST_DEBUG
771 	kbdev->pm.backend.driver_ready_for_irqs = true;
772 #endif
773 	kbase_pm_enable_interrupts(kbdev);
774 
775 	WARN_ON(!kbdev->pm.backend.gpu_powered);
776 	/* GPU has been powered up (by kbase_pm_init_hw) and interrupts have
777 	 * been enabled, so GPU is ready for use and PM state machine can be
778 	 * exercised from this point onwards.
779 	 */
780 	kbdev->pm.backend.gpu_ready = true;
781 
782 	/* Turn on the GPU and any cores needed by the policy */
783 #if MALI_USE_CSF
784 	/* Turn on the L2 caches, needed for firmware boot */
785 	spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
786 	kbdev->pm.backend.l2_desired = true;
787 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
788 #endif
789 	kbase_pm_do_poweron(kbdev, false);
790 	kbase_pm_unlock(kbdev);
791 
792 	return 0;
793 }
794 
kbase_hwaccess_pm_halt(struct kbase_device * kbdev)795 void kbase_hwaccess_pm_halt(struct kbase_device *kbdev)
796 {
797 	KBASE_DEBUG_ASSERT(kbdev != NULL);
798 
799 #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
800 	WARN_ON(kbase_pm_do_poweroff_sync(kbdev));
801 #else
802 	mutex_lock(&kbdev->pm.lock);
803 	kbase_pm_do_poweroff(kbdev);
804 	mutex_unlock(&kbdev->pm.lock);
805 
806 	kbase_pm_wait_for_poweroff_work_complete(kbdev);
807 #endif
808 }
809 
810 KBASE_EXPORT_TEST_API(kbase_hwaccess_pm_halt);
811 
kbase_hwaccess_pm_term(struct kbase_device * kbdev)812 void kbase_hwaccess_pm_term(struct kbase_device *kbdev)
813 {
814 	KBASE_DEBUG_ASSERT(kbdev != NULL);
815 	KBASE_DEBUG_ASSERT(kbdev->pm.active_count == 0);
816 	KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests == 0);
817 
818 	cancel_work_sync(&kbdev->pm.backend.hwcnt_disable_work);
819 
820 	if (kbdev->pm.backend.hwcnt_disabled) {
821 		unsigned long flags;
822 #if MALI_USE_CSF
823 		kbase_csf_scheduler_spin_lock(kbdev, &flags);
824 		kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
825 		kbase_csf_scheduler_spin_unlock(kbdev, flags);
826 #else
827 		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
828 		kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
829 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
830 #endif
831 	}
832 
833 	/* Free any resources the policy allocated */
834 	kbase_pm_state_machine_term(kbdev);
835 	kbase_pm_policy_term(kbdev);
836 	kbase_pm_ca_term(kbdev);
837 
838 #if !MALI_USE_CSF
839 	/* Shut down the metrics subsystem */
840 	kbasep_pm_metrics_term(kbdev);
841 #else
842 	if (WARN_ON(mutex_is_locked(&kbdev->pm.backend.policy_change_lock))) {
843 		mutex_lock(&kbdev->pm.backend.policy_change_lock);
844 		mutex_unlock(&kbdev->pm.backend.policy_change_lock);
845 	}
846 	mutex_destroy(&kbdev->pm.backend.policy_change_lock);
847 #endif
848 
849 	destroy_workqueue(kbdev->pm.backend.gpu_poweroff_wait_wq);
850 }
851 
kbase_pm_power_changed(struct kbase_device * kbdev)852 void kbase_pm_power_changed(struct kbase_device *kbdev)
853 {
854 	unsigned long flags;
855 
856 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
857 	kbase_pm_update_state(kbdev);
858 
859 #if !MALI_USE_CSF
860 		kbase_backend_slot_update(kbdev);
861 #endif /* !MALI_USE_CSF */
862 
863 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
864 }
865 
866 #if MALI_USE_CSF
kbase_pm_set_debug_core_mask(struct kbase_device * kbdev,u64 new_core_mask)867 void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev, u64 new_core_mask)
868 {
869 	lockdep_assert_held(&kbdev->hwaccess_lock);
870 	lockdep_assert_held(&kbdev->pm.lock);
871 
872 	kbdev->pm.debug_core_mask = new_core_mask;
873 	kbase_pm_update_dynamic_cores_onoff(kbdev);
874 }
875 KBASE_EXPORT_TEST_API(kbase_pm_set_debug_core_mask);
876 #else
kbase_pm_set_debug_core_mask(struct kbase_device * kbdev,u64 new_core_mask_js0,u64 new_core_mask_js1,u64 new_core_mask_js2)877 void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
878 		u64 new_core_mask_js0, u64 new_core_mask_js1,
879 		u64 new_core_mask_js2)
880 {
881 	lockdep_assert_held(&kbdev->hwaccess_lock);
882 	lockdep_assert_held(&kbdev->pm.lock);
883 
884 	if (kbase_dummy_job_wa_enabled(kbdev)) {
885 		dev_warn(kbdev->dev, "Change of core mask not supported for slot 0 as dummy job WA is enabled");
886 		new_core_mask_js0 = kbdev->pm.debug_core_mask[0];
887 	}
888 
889 	kbdev->pm.debug_core_mask[0] = new_core_mask_js0;
890 	kbdev->pm.debug_core_mask[1] = new_core_mask_js1;
891 	kbdev->pm.debug_core_mask[2] = new_core_mask_js2;
892 	kbdev->pm.debug_core_mask_all = new_core_mask_js0 | new_core_mask_js1 |
893 			new_core_mask_js2;
894 
895 	kbase_pm_update_dynamic_cores_onoff(kbdev);
896 }
897 #endif /* MALI_USE_CSF */
898 
kbase_hwaccess_pm_gpu_active(struct kbase_device * kbdev)899 void kbase_hwaccess_pm_gpu_active(struct kbase_device *kbdev)
900 {
901 	kbase_pm_update_active(kbdev);
902 }
903 
kbase_hwaccess_pm_gpu_idle(struct kbase_device * kbdev)904 void kbase_hwaccess_pm_gpu_idle(struct kbase_device *kbdev)
905 {
906 	kbase_pm_update_active(kbdev);
907 }
908 
kbase_hwaccess_pm_suspend(struct kbase_device * kbdev)909 int kbase_hwaccess_pm_suspend(struct kbase_device *kbdev)
910 {
911 	int ret = 0;
912 
913 #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
914 	ret = kbase_pm_do_poweroff_sync(kbdev);
915 	if (ret)
916 		return ret;
917 #else
918 	/* Force power off the GPU and all cores (regardless of policy), only
919 	 * after the PM active count reaches zero (otherwise, we risk turning it
920 	 * off prematurely)
921 	 */
922 	kbase_pm_lock(kbdev);
923 
924 	kbase_pm_do_poweroff(kbdev);
925 
926 #if !MALI_USE_CSF
927 	kbase_backend_timer_suspend(kbdev);
928 #endif /* !MALI_USE_CSF */
929 
930 	kbase_pm_unlock(kbdev);
931 
932 	kbase_pm_wait_for_poweroff_work_complete(kbdev);
933 #endif
934 
935 	WARN_ON(kbdev->pm.backend.gpu_powered);
936 	WARN_ON(atomic_read(&kbdev->faults_pending));
937 
938 	if (kbdev->pm.backend.callback_power_suspend)
939 		kbdev->pm.backend.callback_power_suspend(kbdev);
940 
941 	return ret;
942 }
943 
kbase_hwaccess_pm_resume(struct kbase_device * kbdev)944 void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
945 {
946 	kbase_pm_lock(kbdev);
947 
948 	kbdev->pm.suspending = false;
949 #ifdef CONFIG_MALI_ARBITER_SUPPORT
950 	if (kbase_pm_is_gpu_lost(kbdev)) {
951 		dev_dbg(kbdev->dev, "%s: GPU lost in progress\n", __func__);
952 		kbase_pm_unlock(kbdev);
953 		return;
954 	}
955 #endif
956 	kbase_pm_do_poweron(kbdev, true);
957 
958 #if !MALI_USE_CSF
959 	kbase_backend_timer_resume(kbdev);
960 #endif /* !MALI_USE_CSF */
961 
962 	wake_up_all(&kbdev->pm.resume_wait);
963 	kbase_pm_unlock(kbdev);
964 }
965 
966 #ifdef CONFIG_MALI_ARBITER_SUPPORT
kbase_pm_handle_gpu_lost(struct kbase_device * kbdev)967 void kbase_pm_handle_gpu_lost(struct kbase_device *kbdev)
968 {
969 	unsigned long flags;
970 	ktime_t end_timestamp = ktime_get();
971 	struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
972 
973 	if (!kbdev->arb.arb_if)
974 		return;
975 
976 	mutex_lock(&kbdev->pm.lock);
977 	mutex_lock(&arb_vm_state->vm_state_lock);
978 	if (kbdev->pm.backend.gpu_powered &&
979 			!kbase_pm_is_gpu_lost(kbdev)) {
980 		kbase_pm_set_gpu_lost(kbdev, true);
981 
982 		/* GPU is no longer mapped to VM.  So no interrupts will
983 		 * be received and Mali registers have been replaced by
984 		 * dummy RAM
985 		 */
986 		WARN(!kbase_is_gpu_removed(kbdev),
987 			"GPU is still available after GPU lost event\n");
988 
989 		/* Full GPU reset will have been done by hypervisor, so
990 		 * cancel
991 		 */
992 		atomic_set(&kbdev->hwaccess.backend.reset_gpu,
993 				KBASE_RESET_GPU_NOT_PENDING);
994 		hrtimer_cancel(&kbdev->hwaccess.backend.reset_timer);
995 		kbase_synchronize_irqs(kbdev);
996 
997 		/* Clear all jobs running on the GPU */
998 		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
999 		kbdev->protected_mode = false;
1000 		kbase_backend_reset(kbdev, &end_timestamp);
1001 		kbase_pm_metrics_update(kbdev, NULL);
1002 		kbase_pm_update_state(kbdev);
1003 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1004 
1005 		/* Cancel any pending HWC dumps */
1006 		spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
1007 		if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DUMPING ||
1008 				kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
1009 			kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_FAULT;
1010 			kbdev->hwcnt.backend.triggered = 1;
1011 			wake_up(&kbdev->hwcnt.backend.wait);
1012 		}
1013 		spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
1014 	}
1015 	mutex_unlock(&arb_vm_state->vm_state_lock);
1016 	mutex_unlock(&kbdev->pm.lock);
1017 }
1018 
1019 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
1020 
1021 #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
kbase_pm_force_mcu_wakeup_after_sleep(struct kbase_device * kbdev)1022 int kbase_pm_force_mcu_wakeup_after_sleep(struct kbase_device *kbdev)
1023 {
1024 	unsigned long flags;
1025 
1026 	lockdep_assert_held(&kbdev->pm.lock);
1027 
1028 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1029 	/* Set the override flag to force the power up of L2 cache */
1030 	kbdev->pm.backend.gpu_wakeup_override = true;
1031 	kbase_pm_update_state(kbdev);
1032 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1033 
1034 	return kbase_pm_wait_for_desired_state(kbdev);
1035 }
1036 
pm_handle_mcu_sleep_on_runtime_suspend(struct kbase_device * kbdev)1037 static int pm_handle_mcu_sleep_on_runtime_suspend(struct kbase_device *kbdev)
1038 {
1039 	unsigned long flags;
1040 	int ret;
1041 
1042 	lockdep_assert_held(&kbdev->csf.scheduler.lock);
1043 	lockdep_assert_held(&kbdev->pm.lock);
1044 
1045 	/* In case of no active CSG on slot, powering up L2 could be skipped and
1046 	 * proceed directly to suspend GPU.
1047 	 * ToDo: firmware has to be reloaded after wake-up as no halt command
1048 	 * has been sent when GPU was put to sleep mode.
1049 	 */
1050 	if (!kbase_csf_scheduler_get_nr_active_csgs(kbdev))
1051 		dev_info(
1052 			kbdev->dev,
1053 			"No active CSGs. Can skip the power up of L2 and go for suspension directly");
1054 
1055 	ret = kbase_pm_force_mcu_wakeup_after_sleep(kbdev);
1056 	if (ret) {
1057 		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1058 		dev_warn(
1059 			kbdev->dev,
1060 			"Waiting for MCU to wake up failed on runtime suspend");
1061 		kbdev->pm.backend.gpu_wakeup_override = false;
1062 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1063 		return ret;
1064 	}
1065 
1066 	/* Check if a Doorbell mirror interrupt occurred meanwhile */
1067 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1068 	if (kbdev->pm.backend.gpu_sleep_mode_active &&
1069 	    kbdev->pm.backend.exit_gpu_sleep_mode) {
1070 		dev_dbg(kbdev->dev, "DB mirror interrupt occurred during runtime suspend after L2 power up");
1071 		kbdev->pm.backend.gpu_wakeup_override = false;
1072 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1073 		return -EBUSY;
1074 	}
1075 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1076 	/* Need to release the kbdev->pm.lock to avoid lock ordering issue
1077 	 * with kctx->reg.lock, which is taken if the sync wait condition is
1078 	 * evaluated after the CSG suspend operation.
1079 	 */
1080 	kbase_pm_unlock(kbdev);
1081 	ret = kbase_csf_scheduler_handle_runtime_suspend(kbdev);
1082 	kbase_pm_lock(kbdev);
1083 
1084 	/* Power down L2 cache */
1085 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1086 	kbdev->pm.backend.gpu_wakeup_override = false;
1087 	kbase_pm_update_state(kbdev);
1088 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1089 
1090 	/* After re-acquiring the kbdev->pm.lock, check if the device
1091 	 * became active (or active then idle) meanwhile.
1092 	 */
1093 	if (kbdev->pm.active_count ||
1094 	    kbdev->pm.backend.poweroff_wait_in_progress) {
1095 		dev_dbg(kbdev->dev,
1096 			"Device became active on runtime suspend after suspending Scheduler");
1097 		ret = -EBUSY;
1098 	}
1099 
1100 	if (ret)
1101 		return ret;
1102 
1103 	ret = kbase_pm_wait_for_desired_state(kbdev);
1104 	if (ret)
1105 		dev_warn(kbdev->dev, "Wait for power down failed on runtime suspend");
1106 
1107 	return ret;
1108 }
1109 
kbase_pm_handle_runtime_suspend(struct kbase_device * kbdev)1110 int kbase_pm_handle_runtime_suspend(struct kbase_device *kbdev)
1111 {
1112 	enum kbase_mcu_state mcu_state;
1113 	bool exit_early = false;
1114 	unsigned long flags;
1115 	int ret = 0;
1116 
1117 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1118 	/* This check is needed for the case where Kbase had invoked the
1119 	 * @power_off_callback directly.
1120 	 */
1121 	if (!kbdev->pm.backend.gpu_powered) {
1122 		dev_dbg(kbdev->dev, "GPU already powered down on runtime suspend");
1123 		exit_early = true;
1124 	}
1125 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1126 
1127 	if (exit_early)
1128 		goto out;
1129 
1130 	ret = kbase_reset_gpu_try_prevent(kbdev);
1131 	if (ret == -ENOMEM) {
1132 		dev_dbg(kbdev->dev, "Quit runtime suspend as GPU is in bad state");
1133 		/* Finish the runtime suspend, no point in trying again as GPU is
1134 		 * in irrecoverable bad state.
1135 		 */
1136 		goto out;
1137 	} else if (ret) {
1138 		dev_dbg(kbdev->dev, "Quit runtime suspend for failing to prevent gpu reset");
1139 		ret = -EBUSY;
1140 		goto out;
1141 	}
1142 
1143 	kbase_csf_scheduler_lock(kbdev);
1144 	kbase_pm_lock(kbdev);
1145 
1146 	/*
1147 	 * This is to handle the case where GPU device becomes active and idle
1148 	 * very quickly whilst the runtime suspend callback is executing.
1149 	 * This is useful for the following scenario :-
1150 	 * - GPU goes idle and pm_callback_runtime_gpu_idle() is called.
1151 	 * - Auto-suspend timer expires and kbase_device_runtime_suspend()
1152 	 *   is called.
1153 	 * - GPU becomes active and pm_callback_runtime_gpu_active() calls
1154 	 *   pm_runtime_get().
1155 	 * - Shortly after that GPU becomes idle again.
1156 	 * - kbase_pm_handle_runtime_suspend() gets called.
1157 	 * - pm_callback_runtime_gpu_idle() is called.
1158 	 *
1159 	 * We do not want to power down the GPU immediately after it goes idle.
1160 	 * So if we notice that GPU had become active when the runtime suspend
1161 	 * had already kicked in, we abort the runtime suspend.
1162 	 * By aborting the runtime suspend, we defer the power down of GPU.
1163 	 *
1164 	 * This check also helps prevent warnings regarding L2 and MCU states
1165 	 * inside the pm_handle_power_off() function. The warning stems from
1166 	 * the fact that pm.lock is released before invoking Scheduler function
1167 	 * to suspend the CSGs.
1168 	 */
1169 	if (kbdev->pm.active_count ||
1170 	    kbdev->pm.backend.poweroff_wait_in_progress) {
1171 		dev_dbg(kbdev->dev, "Device became active on runtime suspend");
1172 		ret = -EBUSY;
1173 		goto unlock;
1174 	}
1175 
1176 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1177 	if (kbdev->pm.backend.gpu_sleep_mode_active &&
1178 	    kbdev->pm.backend.exit_gpu_sleep_mode) {
1179 		dev_dbg(kbdev->dev, "DB mirror interrupt occurred during runtime suspend before L2 power up");
1180 		ret = -EBUSY;
1181 		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1182 		goto unlock;
1183 	}
1184 
1185 	mcu_state = kbdev->pm.backend.mcu_state;
1186 	WARN_ON(!kbase_pm_is_mcu_inactive(kbdev, mcu_state));
1187 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1188 
1189 	if (mcu_state == KBASE_MCU_IN_SLEEP) {
1190 		ret = pm_handle_mcu_sleep_on_runtime_suspend(kbdev);
1191 		if (ret)
1192 			goto unlock;
1193 	}
1194 
1195 	/* Disable interrupts and turn off the GPU clocks */
1196 	if (!kbase_pm_clock_off(kbdev)) {
1197 		dev_warn(kbdev->dev, "Failed to turn off GPU clocks on runtime suspend, MMU faults pending");
1198 
1199 		WARN_ON(!kbdev->poweroff_pending);
1200 		/* Previous call to kbase_pm_clock_off() would have disabled
1201 		 * the interrupts and also synchronized with the interrupt
1202 		 * handlers, so more fault work items can't be enqueued.
1203 		 *
1204 		 * Can't wait for the completion of MMU fault work items as
1205 		 * there is a possibility of a deadlock since the fault work
1206 		 * items would do the group termination which requires the
1207 		 * Scheduler lock.
1208 		 */
1209 		ret = -EBUSY;
1210 		goto unlock;
1211 	}
1212 
1213 	wake_up(&kbdev->pm.backend.poweroff_wait);
1214 	WARN_ON(kbdev->pm.backend.gpu_powered);
1215 	dev_dbg(kbdev->dev, "GPU power down complete");
1216 
1217 unlock:
1218 	kbase_pm_unlock(kbdev);
1219 	kbase_csf_scheduler_unlock(kbdev);
1220 	kbase_reset_gpu_allow(kbdev);
1221 out:
1222 	if (ret) {
1223 		ret = -EBUSY;
1224 		pm_runtime_mark_last_busy(kbdev->dev);
1225 	}
1226 
1227 	return ret;
1228 }
1229 #endif
1230