1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 /*
23 * Base kernel Power Management hardware control
24 */
25
26 #include <mali_kbase.h>
27 #include <mali_kbase_config_defaults.h>
28 #include <gpu/mali_kbase_gpu_regmap.h>
29 #include <tl/mali_kbase_tracepoints.h>
30 #include <mali_kbase_pm.h>
31 #include <mali_kbase_config_defaults.h>
32 #include <mali_kbase_smc.h>
33
34 #if MALI_USE_CSF
35 #include <csf/ipa_control/mali_kbase_csf_ipa_control.h>
36 #else
37 #include <mali_kbase_hwaccess_jm.h>
38 #endif /* !MALI_USE_CSF */
39
40 #include <mali_kbase_reset_gpu.h>
41 #include <mali_kbase_ctx_sched.h>
42 #include <mali_kbase_hwcnt_context.h>
43 #include <mali_kbase_pbha.h>
44 #include <backend/gpu/mali_kbase_cache_policy_backend.h>
45 #include <device/mali_kbase_device.h>
46 #include <backend/gpu/mali_kbase_irq_internal.h>
47 #include <backend/gpu/mali_kbase_pm_internal.h>
48 #include <backend/gpu/mali_kbase_l2_mmu_config.h>
49 #include <mali_kbase_dummy_job_wa.h>
50 #ifdef CONFIG_MALI_ARBITER_SUPPORT
51 #include <arbiter/mali_kbase_arbiter_pm.h>
52 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
53 #if MALI_USE_CSF
54 #include <csf/ipa_control/mali_kbase_csf_ipa_control.h>
55 #endif
56
57 #if MALI_USE_CSF
58 #include <linux/delay.h>
59 #endif
60
61 #include <linux/of.h>
62
63 #ifdef CONFIG_MALI_CORESTACK
64 bool corestack_driver_control = true;
65 #else
66 bool corestack_driver_control; /* Default value of 0/false */
67 #endif
68 module_param(corestack_driver_control, bool, 0444);
69 MODULE_PARM_DESC(corestack_driver_control,
70 "Let the driver power on/off the GPU core stack independently "
71 "without involving the Power Domain Controller. This should "
72 "only be enabled on platforms for which integration of the PDC "
73 "to the Mali GPU is known to be problematic.");
74 KBASE_EXPORT_TEST_API(corestack_driver_control);
75
76 /**
77 * enum kbasep_pm_action - Actions that can be performed on a core.
78 *
79 * @ACTION_PRESENT: The cores that are present
80 * @ACTION_READY: The cores that are ready
81 * @ACTION_PWRON: Power on the cores specified
82 * @ACTION_PWROFF: Power off the cores specified
83 * @ACTION_PWRTRANS: The cores that are transitioning
84 * @ACTION_PWRACTIVE: The cores that are active
85 *
86 * This enumeration is private to the file. Its values are set to allow
87 * core_type_to_reg() function, which decodes this enumeration, to be simpler
88 * and more efficient.
89 */
90 enum kbasep_pm_action {
91 ACTION_PRESENT = 0,
92 ACTION_READY = (SHADER_READY_LO - SHADER_PRESENT_LO),
93 ACTION_PWRON = (SHADER_PWRON_LO - SHADER_PRESENT_LO),
94 ACTION_PWROFF = (SHADER_PWROFF_LO - SHADER_PRESENT_LO),
95 ACTION_PWRTRANS = (SHADER_PWRTRANS_LO - SHADER_PRESENT_LO),
96 ACTION_PWRACTIVE = (SHADER_PWRACTIVE_LO - SHADER_PRESENT_LO)
97 };
98
99 static u64 kbase_pm_get_state(
100 struct kbase_device *kbdev,
101 enum kbase_pm_core_type core_type,
102 enum kbasep_pm_action action);
103
104 #if MALI_USE_CSF
kbase_pm_is_mcu_desired(struct kbase_device * kbdev)105 bool kbase_pm_is_mcu_desired(struct kbase_device *kbdev)
106 {
107 lockdep_assert_held(&kbdev->hwaccess_lock);
108
109 if (unlikely(!kbdev->csf.firmware_inited))
110 return false;
111
112 if (kbdev->csf.scheduler.pm_active_count &&
113 kbdev->pm.backend.mcu_desired)
114 return true;
115
116 #ifdef KBASE_PM_RUNTIME
117 if (kbdev->pm.backend.gpu_wakeup_override)
118 return true;
119 #endif
120
121 /* MCU is supposed to be ON, only when scheduler.pm_active_count is
122 * non zero. But for always_on policy, the MCU needs to be kept on,
123 * unless policy changing transition needs it off.
124 */
125
126 return (kbdev->pm.backend.mcu_desired &&
127 kbase_pm_no_mcu_core_pwroff(kbdev) &&
128 !kbdev->pm.backend.policy_change_clamp_state_to_off);
129 }
130 #endif
131
kbase_pm_is_l2_desired(struct kbase_device * kbdev)132 bool kbase_pm_is_l2_desired(struct kbase_device *kbdev)
133 {
134 #if !MALI_USE_CSF
135 if (kbdev->pm.backend.protected_entry_transition_override)
136 return false;
137
138 if (kbdev->pm.backend.protected_transition_override &&
139 kbdev->pm.backend.protected_l2_override)
140 return true;
141
142 if (kbdev->pm.backend.protected_transition_override &&
143 !kbdev->pm.backend.shaders_desired)
144 return false;
145 #else
146 if (unlikely(kbdev->pm.backend.policy_change_clamp_state_to_off))
147 return false;
148
149 /* Power up the L2 cache only when MCU is desired */
150 if (likely(kbdev->csf.firmware_inited))
151 return kbase_pm_is_mcu_desired(kbdev);
152 #endif
153
154 return kbdev->pm.backend.l2_desired;
155 }
156
157 #if !MALI_USE_CSF
kbase_pm_protected_override_enable(struct kbase_device * kbdev)158 void kbase_pm_protected_override_enable(struct kbase_device *kbdev)
159 {
160 lockdep_assert_held(&kbdev->hwaccess_lock);
161
162 kbdev->pm.backend.protected_transition_override = true;
163 }
kbase_pm_protected_override_disable(struct kbase_device * kbdev)164 void kbase_pm_protected_override_disable(struct kbase_device *kbdev)
165 {
166 lockdep_assert_held(&kbdev->hwaccess_lock);
167
168 kbdev->pm.backend.protected_transition_override = false;
169 }
170
kbase_pm_protected_entry_override_enable(struct kbase_device * kbdev)171 int kbase_pm_protected_entry_override_enable(struct kbase_device *kbdev)
172 {
173 lockdep_assert_held(&kbdev->hwaccess_lock);
174
175 WARN_ON(!kbdev->protected_mode_transition);
176
177 if (kbdev->pm.backend.l2_always_on &&
178 (kbdev->system_coherency == COHERENCY_ACE)) {
179 WARN_ON(kbdev->pm.backend.protected_entry_transition_override);
180
181 /*
182 * If there is already a GPU reset pending then wait for it to
183 * complete before initiating a special reset for protected
184 * mode entry.
185 */
186 if (kbase_reset_gpu_silent(kbdev))
187 return -EAGAIN;
188
189 kbdev->pm.backend.protected_entry_transition_override = true;
190 }
191
192 return 0;
193 }
194
kbase_pm_protected_entry_override_disable(struct kbase_device * kbdev)195 void kbase_pm_protected_entry_override_disable(struct kbase_device *kbdev)
196 {
197 lockdep_assert_held(&kbdev->hwaccess_lock);
198
199 WARN_ON(!kbdev->protected_mode_transition);
200
201 if (kbdev->pm.backend.l2_always_on &&
202 (kbdev->system_coherency == COHERENCY_ACE)) {
203 WARN_ON(!kbdev->pm.backend.protected_entry_transition_override);
204
205 kbdev->pm.backend.protected_entry_transition_override = false;
206 }
207 }
208
kbase_pm_protected_l2_override(struct kbase_device * kbdev,bool override)209 void kbase_pm_protected_l2_override(struct kbase_device *kbdev, bool override)
210 {
211 lockdep_assert_held(&kbdev->hwaccess_lock);
212
213 if (override) {
214 kbdev->pm.backend.protected_l2_override++;
215 WARN_ON(kbdev->pm.backend.protected_l2_override <= 0);
216 } else {
217 kbdev->pm.backend.protected_l2_override--;
218 WARN_ON(kbdev->pm.backend.protected_l2_override < 0);
219 }
220
221 kbase_pm_update_state(kbdev);
222 }
223 #endif
224
225 /**
226 * core_type_to_reg - Decode a core type and action to a register.
227 *
228 * @core_type: The type of core
229 * @action: The type of action
230 *
231 * Given a core type (defined by kbase_pm_core_type) and an action (defined
232 * by kbasep_pm_action) this function will return the register offset that
233 * will perform the action on the core type. The register returned is the _LO
234 * register and an offset must be applied to use the _HI register.
235 *
236 * Return: The register offset of the _LO register that performs an action of
237 * type @action on a core of type @core_type.
238 */
core_type_to_reg(enum kbase_pm_core_type core_type,enum kbasep_pm_action action)239 static u32 core_type_to_reg(enum kbase_pm_core_type core_type,
240 enum kbasep_pm_action action)
241 {
242 if (corestack_driver_control) {
243 if (core_type == KBASE_PM_CORE_STACK) {
244 switch (action) {
245 case ACTION_PRESENT:
246 return STACK_PRESENT_LO;
247 case ACTION_READY:
248 return STACK_READY_LO;
249 case ACTION_PWRON:
250 return STACK_PWRON_LO;
251 case ACTION_PWROFF:
252 return STACK_PWROFF_LO;
253 case ACTION_PWRTRANS:
254 return STACK_PWRTRANS_LO;
255 default:
256 WARN(1, "Invalid action for core type\n");
257 }
258 }
259 }
260
261 return (u32)core_type + (u32)action;
262 }
263
264 #if IS_ENABLED(CONFIG_ARM64)
mali_cci_flush_l2(struct kbase_device * kbdev)265 static void mali_cci_flush_l2(struct kbase_device *kbdev)
266 {
267 const u32 mask = CLEAN_CACHES_COMPLETED | RESET_COMPLETED;
268 u32 loops = KBASE_CLEAN_CACHE_MAX_LOOPS;
269 u32 raw;
270
271 /*
272 * Note that we don't take the cache flush mutex here since
273 * we expect to be the last user of the L2, all other L2 users
274 * would have dropped their references, to initiate L2 power
275 * down, L2 power down being the only valid place for this
276 * to be called from.
277 */
278
279 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
280 GPU_COMMAND_CACHE_CLN_INV_L2);
281
282 raw = kbase_reg_read(kbdev,
283 GPU_CONTROL_REG(GPU_IRQ_RAWSTAT));
284
285 /* Wait for cache flush to complete before continuing, exit on
286 * gpu resets or loop expiry.
287 */
288 while (((raw & mask) == 0) && --loops) {
289 raw = kbase_reg_read(kbdev,
290 GPU_CONTROL_REG(GPU_IRQ_RAWSTAT));
291 }
292 }
293 #endif
294
295 /**
296 * kbase_pm_invoke - Invokes an action on a core set
297 *
298 * @kbdev: The kbase device structure of the device
299 * @core_type: The type of core that the action should be performed on
300 * @cores: A bit mask of cores to perform the action on (low 32 bits)
301 * @action: The action to perform on the cores
302 *
303 * This function performs the action given by @action on a set of cores of a
304 * type given by @core_type. It is a static function used by
305 * kbase_pm_transition_core_type()
306 */
kbase_pm_invoke(struct kbase_device * kbdev,enum kbase_pm_core_type core_type,u64 cores,enum kbasep_pm_action action)307 static void kbase_pm_invoke(struct kbase_device *kbdev,
308 enum kbase_pm_core_type core_type,
309 u64 cores,
310 enum kbasep_pm_action action)
311 {
312 u32 reg;
313 u32 lo = cores & 0xFFFFFFFF;
314 u32 hi = (cores >> 32) & 0xFFFFFFFF;
315
316 lockdep_assert_held(&kbdev->hwaccess_lock);
317
318 reg = core_type_to_reg(core_type, action);
319
320 KBASE_DEBUG_ASSERT(reg);
321
322 if (cores) {
323 u64 state = kbase_pm_get_state(kbdev, core_type, ACTION_READY);
324
325 if (action == ACTION_PWRON)
326 state |= cores;
327 else if (action == ACTION_PWROFF)
328 state &= ~cores;
329 KBASE_TLSTREAM_AUX_PM_STATE(kbdev, core_type, state);
330 }
331
332 /* Tracing */
333 if (cores) {
334 if (action == ACTION_PWRON)
335 switch (core_type) {
336 case KBASE_PM_CORE_SHADER:
337 KBASE_KTRACE_ADD(kbdev, PM_PWRON, NULL, cores);
338 break;
339 case KBASE_PM_CORE_TILER:
340 KBASE_KTRACE_ADD(kbdev, PM_PWRON_TILER, NULL, cores);
341 break;
342 case KBASE_PM_CORE_L2:
343 KBASE_KTRACE_ADD(kbdev, PM_PWRON_L2, NULL, cores);
344 break;
345 default:
346 break;
347 }
348 else if (action == ACTION_PWROFF)
349 switch (core_type) {
350 case KBASE_PM_CORE_SHADER:
351 KBASE_KTRACE_ADD(kbdev, PM_PWROFF, NULL, cores);
352 break;
353 case KBASE_PM_CORE_TILER:
354 KBASE_KTRACE_ADD(kbdev, PM_PWROFF_TILER, NULL, cores);
355 break;
356 case KBASE_PM_CORE_L2:
357 KBASE_KTRACE_ADD(kbdev, PM_PWROFF_L2, NULL, cores);
358 /* disable snoops before L2 is turned off */
359 kbase_pm_cache_snoop_disable(kbdev);
360 break;
361 default:
362 break;
363 }
364 }
365
366 if (kbase_dummy_job_wa_enabled(kbdev) &&
367 action == ACTION_PWRON &&
368 core_type == KBASE_PM_CORE_SHADER &&
369 !(kbdev->dummy_job_wa.flags &
370 KBASE_DUMMY_JOB_WA_FLAG_LOGICAL_SHADER_POWER)) {
371 kbase_dummy_job_wa_execute(kbdev, cores);
372 } else {
373 if (lo != 0)
374 kbase_reg_write(kbdev, GPU_CONTROL_REG(reg), lo);
375 if (hi != 0)
376 kbase_reg_write(kbdev, GPU_CONTROL_REG(reg + 4), hi);
377 }
378 }
379
380 /**
381 * kbase_pm_get_state - Get information about a core set
382 *
383 * @kbdev: The kbase device structure of the device
384 * @core_type: The type of core that the should be queried
385 * @action: The property of the cores to query
386 *
387 * This function gets information (chosen by @action) about a set of cores of
388 * a type given by @core_type. It is a static function used by
389 * kbase_pm_get_active_cores(), kbase_pm_get_trans_cores() and
390 * kbase_pm_get_ready_cores().
391 *
392 * Return: A bit mask specifying the state of the cores
393 */
kbase_pm_get_state(struct kbase_device * kbdev,enum kbase_pm_core_type core_type,enum kbasep_pm_action action)394 static u64 kbase_pm_get_state(struct kbase_device *kbdev,
395 enum kbase_pm_core_type core_type,
396 enum kbasep_pm_action action)
397 {
398 u32 reg;
399 u32 lo, hi;
400
401 reg = core_type_to_reg(core_type, action);
402
403 KBASE_DEBUG_ASSERT(reg);
404
405 lo = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg));
406 hi = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg + 4));
407
408 return (((u64) hi) << 32) | ((u64) lo);
409 }
410
411 /**
412 * kbase_pm_get_present_cores - Get the cores that are present
413 *
414 * @kbdev: Kbase device
415 * @type: The type of cores to query
416 *
417 * Return: Bitmask of the cores that are present
418 */
kbase_pm_get_present_cores(struct kbase_device * kbdev,enum kbase_pm_core_type type)419 u64 kbase_pm_get_present_cores(struct kbase_device *kbdev,
420 enum kbase_pm_core_type type)
421 {
422 KBASE_DEBUG_ASSERT(kbdev != NULL);
423
424 switch (type) {
425 case KBASE_PM_CORE_L2:
426 return kbdev->gpu_props.curr_config.l2_present;
427 case KBASE_PM_CORE_SHADER:
428 return kbdev->gpu_props.curr_config.shader_present;
429 case KBASE_PM_CORE_TILER:
430 return kbdev->gpu_props.props.raw_props.tiler_present;
431 case KBASE_PM_CORE_STACK:
432 return kbdev->gpu_props.props.raw_props.stack_present;
433 default:
434 break;
435 }
436 KBASE_DEBUG_ASSERT(0);
437
438 return 0;
439 }
440
441 KBASE_EXPORT_TEST_API(kbase_pm_get_present_cores);
442
443 /**
444 * kbase_pm_get_active_cores - Get the cores that are "active"
445 * (busy processing work)
446 *
447 * @kbdev: Kbase device
448 * @type: The type of cores to query
449 *
450 * Return: Bitmask of cores that are active
451 */
kbase_pm_get_active_cores(struct kbase_device * kbdev,enum kbase_pm_core_type type)452 u64 kbase_pm_get_active_cores(struct kbase_device *kbdev,
453 enum kbase_pm_core_type type)
454 {
455 return kbase_pm_get_state(kbdev, type, ACTION_PWRACTIVE);
456 }
457
458 KBASE_EXPORT_TEST_API(kbase_pm_get_active_cores);
459
460 /**
461 * kbase_pm_get_trans_cores - Get the cores that are transitioning between
462 * power states
463 *
464 * @kbdev: Kbase device
465 * @type: The type of cores to query
466 *
467 * Return: Bitmask of cores that are transitioning
468 */
kbase_pm_get_trans_cores(struct kbase_device * kbdev,enum kbase_pm_core_type type)469 u64 kbase_pm_get_trans_cores(struct kbase_device *kbdev,
470 enum kbase_pm_core_type type)
471 {
472 return kbase_pm_get_state(kbdev, type, ACTION_PWRTRANS);
473 }
474
475 KBASE_EXPORT_TEST_API(kbase_pm_get_trans_cores);
476
477 /**
478 * kbase_pm_get_ready_cores - Get the cores that are powered on
479 *
480 * @kbdev: Kbase device
481 * @type: The type of cores to query
482 *
483 * Return: Bitmask of cores that are ready (powered on)
484 */
kbase_pm_get_ready_cores(struct kbase_device * kbdev,enum kbase_pm_core_type type)485 u64 kbase_pm_get_ready_cores(struct kbase_device *kbdev,
486 enum kbase_pm_core_type type)
487 {
488 u64 result;
489
490 result = kbase_pm_get_state(kbdev, type, ACTION_READY);
491
492 switch (type) {
493 case KBASE_PM_CORE_SHADER:
494 KBASE_KTRACE_ADD(kbdev, PM_CORES_POWERED, NULL, result);
495 break;
496 case KBASE_PM_CORE_TILER:
497 KBASE_KTRACE_ADD(kbdev, PM_CORES_POWERED_TILER, NULL, result);
498 break;
499 case KBASE_PM_CORE_L2:
500 KBASE_KTRACE_ADD(kbdev, PM_CORES_POWERED_L2, NULL, result);
501 break;
502 default:
503 break;
504 }
505
506 return result;
507 }
508
509 KBASE_EXPORT_TEST_API(kbase_pm_get_ready_cores);
510
kbase_pm_trigger_hwcnt_disable(struct kbase_device * kbdev)511 static void kbase_pm_trigger_hwcnt_disable(struct kbase_device *kbdev)
512 {
513 struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
514
515 lockdep_assert_held(&kbdev->hwaccess_lock);
516
517 /* See if we can get away with disabling hwcnt
518 * atomically, otherwise kick off a worker.
519 */
520 if (kbase_hwcnt_context_disable_atomic(kbdev->hwcnt_gpu_ctx)) {
521 backend->hwcnt_disabled = true;
522
523 } else {
524 kbase_hwcnt_context_queue_work(kbdev->hwcnt_gpu_ctx,
525 &backend->hwcnt_disable_work);
526 }
527 }
528
kbase_pm_l2_config_override(struct kbase_device * kbdev)529 static void kbase_pm_l2_config_override(struct kbase_device *kbdev)
530 {
531 u32 val;
532
533 /*
534 * Skip if it is not supported
535 */
536 if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_L2_CONFIG))
537 return;
538
539 /*
540 * Skip if size and hash are not given explicitly,
541 * which means default values are used.
542 */
543 if ((kbdev->l2_size_override == 0) && (kbdev->l2_hash_override == 0) &&
544 (!kbdev->l2_hash_values_override))
545 return;
546
547 val = kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_CONFIG));
548
549 if (kbdev->l2_size_override) {
550 val &= ~L2_CONFIG_SIZE_MASK;
551 val |= (kbdev->l2_size_override << L2_CONFIG_SIZE_SHIFT);
552 }
553
554 if (kbdev->l2_hash_override) {
555 WARN_ON(kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_ASN_HASH));
556 val &= ~L2_CONFIG_HASH_MASK;
557 val |= (kbdev->l2_hash_override << L2_CONFIG_HASH_SHIFT);
558 } else if (kbdev->l2_hash_values_override) {
559 int i;
560
561 WARN_ON(!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_ASN_HASH));
562 val &= ~L2_CONFIG_ASN_HASH_ENABLE_MASK;
563 val |= (0x1 << L2_CONFIG_ASN_HASH_ENABLE_SHIFT);
564
565 for (i = 0; i < ASN_HASH_COUNT; i++) {
566 dev_dbg(kbdev->dev, "Program 0x%x to ASN_HASH[%d]\n",
567 kbdev->l2_hash_values[i], i);
568 kbase_reg_write(kbdev, GPU_CONTROL_REG(ASN_HASH(i)),
569 kbdev->l2_hash_values[i]);
570 }
571 }
572
573 dev_dbg(kbdev->dev, "Program 0x%x to L2_CONFIG\n", val);
574 kbase_reg_write(kbdev, GPU_CONTROL_REG(L2_CONFIG), val);
575 }
576
kbase_pm_control_gpu_clock(struct kbase_device * kbdev)577 static void kbase_pm_control_gpu_clock(struct kbase_device *kbdev)
578 {
579 struct kbase_pm_backend_data *const backend = &kbdev->pm.backend;
580
581 lockdep_assert_held(&kbdev->hwaccess_lock);
582
583 queue_work(system_wq, &backend->gpu_clock_control_work);
584 }
585
586 #if MALI_USE_CSF
kbase_mcu_state_to_string(enum kbase_mcu_state state)587 static const char *kbase_mcu_state_to_string(enum kbase_mcu_state state)
588 {
589 const char *const strings[] = {
590 #define KBASEP_MCU_STATE(n) #n,
591 #include "mali_kbase_pm_mcu_states.h"
592 #undef KBASEP_MCU_STATE
593 };
594 if (WARN_ON((size_t)state >= ARRAY_SIZE(strings)))
595 return "Bad MCU state";
596 else
597 return strings[state];
598 }
599
kbase_pm_handle_mcu_core_attr_update(struct kbase_device * kbdev)600 static inline bool kbase_pm_handle_mcu_core_attr_update(struct kbase_device *kbdev)
601 {
602 struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
603 bool timer_update;
604 bool core_mask_update;
605
606 lockdep_assert_held(&kbdev->hwaccess_lock);
607
608 WARN_ON(backend->mcu_state != KBASE_MCU_ON);
609
610 /* This function is only for cases where the MCU managing Cores, if
611 * the firmware mode is with host control, do nothing here.
612 */
613 if (unlikely(kbdev->csf.firmware_hctl_core_pwr))
614 return false;
615
616 core_mask_update =
617 backend->shaders_avail != backend->shaders_desired_mask;
618
619 timer_update = kbdev->csf.mcu_core_pwroff_dur_count !=
620 kbdev->csf.mcu_core_pwroff_reg_shadow;
621
622 if (core_mask_update || timer_update)
623 kbase_csf_firmware_update_core_attr(kbdev, timer_update,
624 core_mask_update, backend->shaders_desired_mask);
625
626 return (core_mask_update || timer_update);
627 }
628
kbase_pm_is_mcu_inactive(struct kbase_device * kbdev,enum kbase_mcu_state state)629 bool kbase_pm_is_mcu_inactive(struct kbase_device *kbdev,
630 enum kbase_mcu_state state)
631 {
632 lockdep_assert_held(&kbdev->hwaccess_lock);
633
634 return ((state == KBASE_MCU_OFF) || (state == KBASE_MCU_IN_SLEEP));
635 }
636
637 #ifdef KBASE_PM_RUNTIME
638 /**
639 * kbase_pm_enable_mcu_db_notification - Enable the Doorbell notification on
640 * MCU side
641 *
642 * @kbdev: Pointer to the device.
643 *
644 * This function is called to re-enable the Doorbell notification on MCU side
645 * when MCU needs to beome active again.
646 */
kbase_pm_enable_mcu_db_notification(struct kbase_device * kbdev)647 static void kbase_pm_enable_mcu_db_notification(struct kbase_device *kbdev)
648 {
649 u32 val = kbase_reg_read(kbdev, GPU_CONTROL_REG(MCU_CONTROL));
650
651 lockdep_assert_held(&kbdev->hwaccess_lock);
652
653 val &= ~MCU_CNTRL_DOORBELL_DISABLE_MASK;
654 kbase_reg_write(kbdev, GPU_CONTROL_REG(MCU_CONTROL), val);
655 }
656 #endif
657
kbase_pm_mcu_update_state(struct kbase_device * kbdev)658 static int kbase_pm_mcu_update_state(struct kbase_device *kbdev)
659 {
660 struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
661 enum kbase_mcu_state prev_state;
662
663 lockdep_assert_held(&kbdev->hwaccess_lock);
664
665 /*
666 * Initial load of firmware should have been done to
667 * exercise the MCU state machine.
668 */
669 if (unlikely(!kbdev->csf.firmware_inited)) {
670 WARN_ON(backend->mcu_state != KBASE_MCU_OFF);
671 return 0;
672 }
673
674 do {
675 u64 shaders_trans = kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_SHADER);
676 u64 shaders_ready = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_SHADER);
677
678 /* mask off ready from trans in case transitions finished
679 * between the register reads
680 */
681 shaders_trans &= ~shaders_ready;
682
683 prev_state = backend->mcu_state;
684
685 switch (backend->mcu_state) {
686 case KBASE_MCU_OFF:
687 if (kbase_pm_is_mcu_desired(kbdev) &&
688 !backend->policy_change_clamp_state_to_off &&
689 backend->l2_state == KBASE_L2_ON) {
690 kbase_csf_firmware_trigger_reload(kbdev);
691 backend->mcu_state = KBASE_MCU_PEND_ON_RELOAD;
692 }
693 break;
694
695 case KBASE_MCU_PEND_ON_RELOAD:
696 if (kbdev->csf.firmware_reloaded) {
697 backend->shaders_desired_mask =
698 kbase_pm_ca_get_core_mask(kbdev);
699 kbase_csf_firmware_global_reinit(kbdev,
700 backend->shaders_desired_mask);
701 backend->mcu_state =
702 KBASE_MCU_ON_GLB_REINIT_PEND;
703 }
704 break;
705
706 case KBASE_MCU_ON_GLB_REINIT_PEND:
707 if (kbase_csf_firmware_global_reinit_complete(kbdev)) {
708 backend->shaders_avail =
709 backend->shaders_desired_mask;
710 backend->pm_shaders_core_mask = 0;
711 if (kbdev->csf.firmware_hctl_core_pwr) {
712 kbase_pm_invoke(kbdev, KBASE_PM_CORE_SHADER,
713 backend->shaders_avail, ACTION_PWRON);
714 backend->mcu_state =
715 KBASE_MCU_HCTL_SHADERS_PEND_ON;
716 } else
717 backend->mcu_state = KBASE_MCU_ON_HWCNT_ENABLE;
718 }
719 break;
720
721 case KBASE_MCU_HCTL_SHADERS_PEND_ON:
722 if (!shaders_trans &&
723 shaders_ready == backend->shaders_avail) {
724 /* Cores now stable, notify MCU the stable mask */
725 kbase_csf_firmware_update_core_attr(kbdev,
726 false, true, shaders_ready);
727
728 backend->pm_shaders_core_mask = shaders_ready;
729 backend->mcu_state =
730 KBASE_MCU_HCTL_CORES_NOTIFY_PEND;
731 }
732 break;
733
734 case KBASE_MCU_HCTL_CORES_NOTIFY_PEND:
735 /* Wait for the acknowledgement */
736 if (kbase_csf_firmware_core_attr_updated(kbdev))
737 backend->mcu_state = KBASE_MCU_ON_HWCNT_ENABLE;
738 break;
739
740 case KBASE_MCU_ON_HWCNT_ENABLE:
741 backend->hwcnt_desired = true;
742 if (backend->hwcnt_disabled) {
743 unsigned long flags;
744
745 kbase_csf_scheduler_spin_lock(kbdev, &flags);
746 kbase_hwcnt_context_enable(
747 kbdev->hwcnt_gpu_ctx);
748 kbase_csf_scheduler_spin_unlock(kbdev, flags);
749 backend->hwcnt_disabled = false;
750 }
751 backend->mcu_state = KBASE_MCU_ON;
752 break;
753
754 case KBASE_MCU_ON:
755 backend->shaders_desired_mask = kbase_pm_ca_get_core_mask(kbdev);
756
757 if (!kbase_pm_is_mcu_desired(kbdev))
758 backend->mcu_state = KBASE_MCU_ON_HWCNT_DISABLE;
759 else if (kbdev->csf.firmware_hctl_core_pwr) {
760 /* Host control scale up/down cores as needed */
761 if (backend->shaders_desired_mask != shaders_ready) {
762 backend->hwcnt_desired = false;
763 if (!backend->hwcnt_disabled)
764 kbase_pm_trigger_hwcnt_disable(kbdev);
765 backend->mcu_state =
766 KBASE_MCU_HCTL_MCU_ON_RECHECK;
767 }
768 } else if (kbase_pm_handle_mcu_core_attr_update(kbdev)) {
769 backend->mcu_state = KBASE_MCU_ON_CORE_ATTR_UPDATE_PEND;
770 }
771 break;
772
773 case KBASE_MCU_HCTL_MCU_ON_RECHECK:
774 backend->shaders_desired_mask = kbase_pm_ca_get_core_mask(kbdev);
775
776 if (!backend->hwcnt_disabled) {
777 /* Wait for being disabled */
778 ;
779 } else if (!kbase_pm_is_mcu_desired(kbdev)) {
780 /* Converging to MCU powering down flow */
781 backend->mcu_state = KBASE_MCU_ON_HWCNT_DISABLE;
782 } else if (backend->shaders_desired_mask & ~shaders_ready) {
783 /* set cores ready but not available to
784 * meet SHADERS_PEND_ON check pass
785 */
786 backend->shaders_avail =
787 (backend->shaders_desired_mask | shaders_ready);
788
789 kbase_pm_invoke(kbdev, KBASE_PM_CORE_SHADER,
790 backend->shaders_avail & ~shaders_ready,
791 ACTION_PWRON);
792 backend->mcu_state =
793 KBASE_MCU_HCTL_SHADERS_PEND_ON;
794
795 } else if (~backend->shaders_desired_mask & shaders_ready) {
796 kbase_csf_firmware_update_core_attr(kbdev, false, true,
797 backend->shaders_desired_mask);
798 backend->mcu_state = KBASE_MCU_HCTL_CORES_DOWN_SCALE_NOTIFY_PEND;
799 } else {
800 backend->mcu_state =
801 KBASE_MCU_HCTL_SHADERS_PEND_ON;
802 }
803 break;
804
805 case KBASE_MCU_HCTL_CORES_DOWN_SCALE_NOTIFY_PEND:
806 if (kbase_csf_firmware_core_attr_updated(kbdev)) {
807 /* wait in queue until cores idle */
808 queue_work(backend->core_idle_wq, &backend->core_idle_work);
809 backend->mcu_state = KBASE_MCU_HCTL_CORE_INACTIVE_PEND;
810 }
811 break;
812
813 case KBASE_MCU_HCTL_CORE_INACTIVE_PEND:
814 {
815 u64 active_cores = kbase_pm_get_active_cores(
816 kbdev,
817 KBASE_PM_CORE_SHADER);
818 u64 cores_to_disable = shaders_ready &
819 ~backend->shaders_desired_mask;
820
821 if (!(cores_to_disable & active_cores)) {
822 kbase_pm_invoke(kbdev, KBASE_PM_CORE_SHADER,
823 cores_to_disable,
824 ACTION_PWROFF);
825 backend->shaders_avail = backend->shaders_desired_mask;
826 backend->mcu_state = KBASE_MCU_HCTL_SHADERS_CORE_OFF_PEND;
827 }
828 }
829 break;
830
831 case KBASE_MCU_HCTL_SHADERS_CORE_OFF_PEND:
832 if (!shaders_trans && shaders_ready == backend->shaders_avail) {
833 /* Cores now stable */
834 backend->pm_shaders_core_mask = shaders_ready;
835 backend->mcu_state = KBASE_MCU_ON_HWCNT_ENABLE;
836 }
837 break;
838
839 case KBASE_MCU_ON_CORE_ATTR_UPDATE_PEND:
840 if (kbase_csf_firmware_core_attr_updated(kbdev)) {
841 backend->shaders_avail = backend->shaders_desired_mask;
842 backend->mcu_state = KBASE_MCU_ON;
843 }
844 break;
845
846 case KBASE_MCU_ON_HWCNT_DISABLE:
847 if (kbase_pm_is_mcu_desired(kbdev)) {
848 backend->mcu_state = KBASE_MCU_ON_HWCNT_ENABLE;
849 break;
850 }
851
852 backend->hwcnt_desired = false;
853 if (!backend->hwcnt_disabled)
854 kbase_pm_trigger_hwcnt_disable(kbdev);
855
856
857 if (backend->hwcnt_disabled) {
858 #ifdef KBASE_PM_RUNTIME
859 if (backend->gpu_sleep_mode_active)
860 backend->mcu_state = KBASE_MCU_ON_SLEEP_INITIATE;
861 else
862 #endif
863 backend->mcu_state = KBASE_MCU_ON_HALT;
864 }
865 break;
866
867 case KBASE_MCU_ON_HALT:
868 if (!kbase_pm_is_mcu_desired(kbdev)) {
869 kbase_csf_firmware_trigger_mcu_halt(kbdev);
870 backend->mcu_state = KBASE_MCU_ON_PEND_HALT;
871 } else
872 backend->mcu_state = KBASE_MCU_ON_HWCNT_ENABLE;
873 break;
874
875 case KBASE_MCU_ON_PEND_HALT:
876 if (kbase_csf_firmware_mcu_halted(kbdev)) {
877 KBASE_KTRACE_ADD(kbdev, MCU_HALTED, NULL,
878 kbase_csf_ktrace_gpu_cycle_cnt(kbdev));
879 if (kbdev->csf.firmware_hctl_core_pwr)
880 backend->mcu_state =
881 KBASE_MCU_HCTL_SHADERS_READY_OFF;
882 else
883 backend->mcu_state = KBASE_MCU_POWER_DOWN;
884 }
885 break;
886
887 case KBASE_MCU_HCTL_SHADERS_READY_OFF:
888 kbase_pm_invoke(kbdev, KBASE_PM_CORE_SHADER,
889 shaders_ready, ACTION_PWROFF);
890 backend->mcu_state =
891 KBASE_MCU_HCTL_SHADERS_PEND_OFF;
892 break;
893
894 case KBASE_MCU_HCTL_SHADERS_PEND_OFF:
895 if (!shaders_trans && !shaders_ready) {
896 backend->pm_shaders_core_mask = 0;
897 backend->mcu_state = KBASE_MCU_POWER_DOWN;
898 }
899 break;
900
901 case KBASE_MCU_POWER_DOWN:
902 kbase_csf_firmware_disable_mcu(kbdev);
903 backend->mcu_state = KBASE_MCU_PEND_OFF;
904 break;
905
906 case KBASE_MCU_PEND_OFF:
907 /* wait synchronously for the MCU to get disabled */
908 kbase_csf_firmware_disable_mcu_wait(kbdev);
909 backend->mcu_state = KBASE_MCU_OFF;
910 break;
911 #ifdef KBASE_PM_RUNTIME
912 case KBASE_MCU_ON_SLEEP_INITIATE:
913 if (!kbase_pm_is_mcu_desired(kbdev)) {
914 kbase_csf_firmware_trigger_mcu_sleep(kbdev);
915 backend->mcu_state = KBASE_MCU_ON_PEND_SLEEP;
916 } else
917 backend->mcu_state = KBASE_MCU_ON_HWCNT_ENABLE;
918 break;
919
920 case KBASE_MCU_ON_PEND_SLEEP:
921 if (kbase_csf_firmware_is_mcu_in_sleep(kbdev)) {
922 KBASE_KTRACE_ADD(kbdev, MCU_IN_SLEEP, NULL,
923 kbase_csf_ktrace_gpu_cycle_cnt(kbdev));
924 backend->mcu_state = KBASE_MCU_IN_SLEEP;
925 kbase_pm_enable_db_mirror_interrupt(kbdev);
926 kbase_csf_scheduler_reval_idleness_post_sleep(kbdev);
927 }
928 break;
929
930 case KBASE_MCU_IN_SLEEP:
931 if (kbase_pm_is_mcu_desired(kbdev) &&
932 backend->l2_state == KBASE_L2_ON) {
933 KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_WAKEUP(
934 kbdev, kbase_backend_get_cycle_cnt(kbdev));
935 kbase_pm_enable_mcu_db_notification(kbdev);
936 kbase_pm_disable_db_mirror_interrupt(kbdev);
937 backend->mcu_state = KBASE_MCU_ON_HWCNT_ENABLE;
938 }
939 break;
940 #endif
941 case KBASE_MCU_RESET_WAIT:
942 /* Reset complete */
943 if (!backend->in_reset)
944 backend->mcu_state = KBASE_MCU_OFF;
945 break;
946
947 default:
948 WARN(1, "Invalid state in mcu_state: %d",
949 backend->mcu_state);
950 }
951
952 if (backend->mcu_state != prev_state)
953 dev_dbg(kbdev->dev, "MCU state transition: %s to %s\n",
954 kbase_mcu_state_to_string(prev_state),
955 kbase_mcu_state_to_string(backend->mcu_state));
956
957 } while (backend->mcu_state != prev_state);
958
959 return 0;
960 }
961
core_idle_worker(struct work_struct * work)962 static void core_idle_worker(struct work_struct *work)
963 {
964 struct kbase_device *kbdev =
965 container_of(work, struct kbase_device, pm.backend.core_idle_work);
966 struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
967 unsigned long flags;
968
969 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
970 while (backend->gpu_powered && (backend->mcu_state == KBASE_MCU_HCTL_CORE_INACTIVE_PEND)) {
971 const unsigned int core_inactive_wait_ms = 1;
972 u64 active_cores = kbase_pm_get_active_cores(kbdev, KBASE_PM_CORE_SHADER);
973 u64 shaders_ready = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_SHADER);
974 u64 cores_to_disable = shaders_ready & ~backend->shaders_desired_mask;
975
976 if (!(cores_to_disable & active_cores)) {
977 kbase_pm_update_state(kbdev);
978 break;
979 }
980
981 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
982 msleep(core_inactive_wait_ms);
983 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
984 }
985
986 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
987 }
988 #endif
989
kbase_l2_core_state_to_string(enum kbase_l2_core_state state)990 static const char *kbase_l2_core_state_to_string(enum kbase_l2_core_state state)
991 {
992 const char *const strings[] = {
993 #define KBASEP_L2_STATE(n) #n,
994 #include "mali_kbase_pm_l2_states.h"
995 #undef KBASEP_L2_STATE
996 };
997 if (WARN_ON((size_t)state >= ARRAY_SIZE(strings)))
998 return "Bad level 2 cache state";
999 else
1000 return strings[state];
1001 }
1002
1003 #if !MALI_USE_CSF
1004 /* On powering on the L2, the tracked kctx becomes stale and can be cleared.
1005 * This enables the backend to spare the START_FLUSH.INV_SHADER_OTHER
1006 * operation on the first submitted katom after the L2 powering on.
1007 */
kbase_pm_l2_clear_backend_slot_submit_kctx(struct kbase_device * kbdev)1008 static void kbase_pm_l2_clear_backend_slot_submit_kctx(struct kbase_device *kbdev)
1009 {
1010 int js;
1011
1012 lockdep_assert_held(&kbdev->hwaccess_lock);
1013
1014 /* Clear the slots' last katom submission kctx */
1015 for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
1016 kbdev->hwaccess.backend.slot_rb[js].last_kctx_tagged = SLOT_RB_NULL_TAG_VAL;
1017 }
1018 #endif
1019
kbase_pm_l2_update_state(struct kbase_device * kbdev)1020 static int kbase_pm_l2_update_state(struct kbase_device *kbdev)
1021 {
1022 struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
1023 u64 l2_present = kbdev->gpu_props.curr_config.l2_present;
1024 #if !MALI_USE_CSF
1025 u64 tiler_present = kbdev->gpu_props.props.raw_props.tiler_present;
1026 #endif
1027 enum kbase_l2_core_state prev_state;
1028
1029 lockdep_assert_held(&kbdev->hwaccess_lock);
1030
1031 do {
1032 /* Get current state */
1033 u64 l2_trans = kbase_pm_get_trans_cores(kbdev,
1034 KBASE_PM_CORE_L2);
1035 u64 l2_ready = kbase_pm_get_ready_cores(kbdev,
1036 KBASE_PM_CORE_L2);
1037
1038 #if !MALI_USE_CSF
1039 u64 tiler_trans = kbase_pm_get_trans_cores(kbdev,
1040 KBASE_PM_CORE_TILER);
1041 u64 tiler_ready = kbase_pm_get_ready_cores(kbdev,
1042 KBASE_PM_CORE_TILER);
1043 #endif
1044
1045 /*
1046 * kbase_pm_get_ready_cores and kbase_pm_get_trans_cores
1047 * are vulnerable to corruption if gpu is lost
1048 */
1049 if (kbase_is_gpu_removed(kbdev)
1050 #ifdef CONFIG_MALI_ARBITER_SUPPORT
1051 || kbase_pm_is_gpu_lost(kbdev)) {
1052 #else
1053 ) {
1054 #endif
1055 backend->shaders_state =
1056 KBASE_SHADERS_OFF_CORESTACK_OFF;
1057 backend->hwcnt_desired = false;
1058 if (!backend->hwcnt_disabled) {
1059 /* Don't progress until hw counters are disabled
1060 * This may involve waiting for a worker to complete.
1061 * The HW counters backend disable code checks for the
1062 * GPU removed case and will error out without touching
1063 * the hardware. This step is needed to keep the HW
1064 * counters in a consistent state after a GPU lost.
1065 */
1066 backend->l2_state =
1067 KBASE_L2_ON_HWCNT_DISABLE;
1068 kbase_pm_trigger_hwcnt_disable(kbdev);
1069 }
1070
1071 if (backend->hwcnt_disabled) {
1072 backend->l2_state = KBASE_L2_OFF;
1073 dev_dbg(kbdev->dev, "GPU lost has occurred - L2 off\n");
1074 }
1075 break;
1076 }
1077
1078 /* mask off ready from trans in case transitions finished
1079 * between the register reads
1080 */
1081 l2_trans &= ~l2_ready;
1082 #if !MALI_USE_CSF
1083 tiler_trans &= ~tiler_ready;
1084 #endif
1085 prev_state = backend->l2_state;
1086
1087 switch (backend->l2_state) {
1088 case KBASE_L2_OFF:
1089 if (kbase_pm_is_l2_desired(kbdev)) {
1090 /*
1091 * Set the desired config for L2 before
1092 * powering it on
1093 */
1094 kbase_pm_l2_config_override(kbdev);
1095 kbase_pbha_write_settings(kbdev);
1096 #if !MALI_USE_CSF
1097 /* L2 is required, power on. Powering on the
1098 * tiler will also power the first L2 cache.
1099 */
1100 kbase_pm_invoke(kbdev, KBASE_PM_CORE_TILER,
1101 tiler_present, ACTION_PWRON);
1102
1103 /* If we have more than one L2 cache then we
1104 * must power them on explicitly.
1105 */
1106 if (l2_present != 1)
1107 kbase_pm_invoke(kbdev, KBASE_PM_CORE_L2,
1108 l2_present & ~1,
1109 ACTION_PWRON);
1110 /* Clear backend slot submission kctx */
1111 kbase_pm_l2_clear_backend_slot_submit_kctx(kbdev);
1112 #else
1113 /* With CSF firmware, Host driver doesn't need to
1114 * handle power management with both shader and tiler cores.
1115 * The CSF firmware will power up the cores appropriately.
1116 * So only power the l2 cache explicitly.
1117 */
1118 kbase_pm_invoke(kbdev, KBASE_PM_CORE_L2,
1119 l2_present, ACTION_PWRON);
1120 #endif
1121 backend->l2_state = KBASE_L2_PEND_ON;
1122 }
1123 break;
1124
1125 case KBASE_L2_PEND_ON:
1126 #if !MALI_USE_CSF
1127 if (!l2_trans && l2_ready == l2_present && !tiler_trans
1128 && tiler_ready == tiler_present) {
1129 KBASE_KTRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER, NULL,
1130 tiler_ready);
1131 #else
1132 if (!l2_trans && l2_ready == l2_present) {
1133 KBASE_KTRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_L2, NULL,
1134 l2_ready);
1135 #endif
1136 /*
1137 * Ensure snoops are enabled after L2 is powered
1138 * up. Note that kbase keeps track of the snoop
1139 * state, so safe to repeatedly call.
1140 */
1141 kbase_pm_cache_snoop_enable(kbdev);
1142
1143 /* With the L2 enabled, we can now enable
1144 * hardware counters.
1145 */
1146 if (kbdev->pm.backend.gpu_clock_slow_down_wa)
1147 backend->l2_state =
1148 KBASE_L2_RESTORE_CLOCKS;
1149 else
1150 backend->l2_state =
1151 KBASE_L2_ON_HWCNT_ENABLE;
1152
1153 /* Now that the L2 is on, the shaders can start
1154 * powering on if they're required. The obvious
1155 * way to do this would be to call
1156 * kbase_pm_shaders_update_state() here.
1157 * However, that would make the two state
1158 * machines mutually recursive, as the opposite
1159 * would be needed for powering down. Instead,
1160 * callers of this function should use the
1161 * kbase_pm_update_state() wrapper, which will
1162 * call the shader state machine immediately
1163 * after the L2 (for power up), or
1164 * automatically re-invoke the L2 state machine
1165 * when the shaders power down.
1166 */
1167 }
1168 break;
1169
1170 case KBASE_L2_RESTORE_CLOCKS:
1171 /* We always assume only GPUs being affected by
1172 * BASE_HW_ISSUE_GPU2017_1336 fall into this state
1173 */
1174 WARN_ON_ONCE(!kbdev->pm.backend.gpu_clock_slow_down_wa);
1175
1176 /* If L2 not needed, we need to make sure cancellation
1177 * of any previously issued work to restore GPU clock.
1178 * For it, move to KBASE_L2_SLOW_DOWN_CLOCKS state.
1179 */
1180 if (!kbase_pm_is_l2_desired(kbdev)) {
1181 backend->l2_state = KBASE_L2_SLOW_DOWN_CLOCKS;
1182 break;
1183 }
1184
1185 backend->gpu_clock_slow_down_desired = false;
1186 if (backend->gpu_clock_slowed_down)
1187 kbase_pm_control_gpu_clock(kbdev);
1188 else
1189 backend->l2_state = KBASE_L2_ON_HWCNT_ENABLE;
1190 break;
1191
1192 case KBASE_L2_ON_HWCNT_ENABLE:
1193 #if !MALI_USE_CSF
1194 backend->hwcnt_desired = true;
1195 if (backend->hwcnt_disabled) {
1196 kbase_hwcnt_context_enable(
1197 kbdev->hwcnt_gpu_ctx);
1198 backend->hwcnt_disabled = false;
1199 }
1200 #endif
1201 backend->l2_state = KBASE_L2_ON;
1202 break;
1203
1204 case KBASE_L2_ON:
1205 if (!kbase_pm_is_l2_desired(kbdev)) {
1206 #if !MALI_USE_CSF
1207 /* Do not power off L2 until the shaders and
1208 * core stacks are off.
1209 */
1210 if (backend->shaders_state != KBASE_SHADERS_OFF_CORESTACK_OFF)
1211 break;
1212 #else
1213 /* Do not power off L2 until the MCU has been stopped */
1214 if ((backend->mcu_state != KBASE_MCU_OFF) &&
1215 (backend->mcu_state != KBASE_MCU_IN_SLEEP))
1216 break;
1217 #endif
1218
1219 /* We need to make sure hardware counters are
1220 * disabled before powering down the L2, to
1221 * prevent loss of data.
1222 *
1223 * We waited until after the cores were powered
1224 * down to prevent ping-ponging between hwcnt
1225 * enabled and disabled, which would have
1226 * happened if userspace submitted more work
1227 * while we were trying to power down.
1228 */
1229 backend->l2_state = KBASE_L2_ON_HWCNT_DISABLE;
1230 }
1231 break;
1232
1233 case KBASE_L2_ON_HWCNT_DISABLE:
1234 #if !MALI_USE_CSF
1235 /* If the L2 became desired while we were waiting on the
1236 * worker to do the actual hwcnt disable (which might
1237 * happen if some work was submitted immediately after
1238 * the shaders powered off), then we need to early-out
1239 * of this state and re-enable hwcnt.
1240 *
1241 * If we get lucky, the hwcnt disable might not have
1242 * actually started yet, and the logic in the hwcnt
1243 * enable state will prevent the worker from
1244 * performing the disable entirely, preventing loss of
1245 * any hardware counter data.
1246 *
1247 * If the hwcnt disable has started, then we'll lose
1248 * a tiny amount of hardware counter data between the
1249 * disable and the re-enable occurring.
1250 *
1251 * This loss of data is preferable to the alternative,
1252 * which is to block the shader cores from doing any
1253 * work until we're sure hwcnt has been re-enabled.
1254 */
1255 if (kbase_pm_is_l2_desired(kbdev)) {
1256 backend->l2_state = KBASE_L2_ON_HWCNT_ENABLE;
1257 break;
1258 }
1259
1260 backend->hwcnt_desired = false;
1261 if (!backend->hwcnt_disabled) {
1262 kbase_pm_trigger_hwcnt_disable(kbdev);
1263 }
1264 #endif
1265
1266 if (backend->hwcnt_disabled) {
1267 if (kbdev->pm.backend.gpu_clock_slow_down_wa)
1268 backend->l2_state =
1269 KBASE_L2_SLOW_DOWN_CLOCKS;
1270 else
1271 backend->l2_state = KBASE_L2_POWER_DOWN;
1272 }
1273 break;
1274
1275 case KBASE_L2_SLOW_DOWN_CLOCKS:
1276 /* We always assume only GPUs being affected by
1277 * BASE_HW_ISSUE_GPU2017_1336 fall into this state
1278 */
1279 WARN_ON_ONCE(!kbdev->pm.backend.gpu_clock_slow_down_wa);
1280
1281 /* L2 needs to be powered up. And we need to make sure
1282 * cancellation of any previously issued work to slow
1283 * down GPU clock. For it, we move to the state,
1284 * KBASE_L2_RESTORE_CLOCKS.
1285 */
1286 if (kbase_pm_is_l2_desired(kbdev)) {
1287 backend->l2_state = KBASE_L2_RESTORE_CLOCKS;
1288 break;
1289 }
1290
1291 backend->gpu_clock_slow_down_desired = true;
1292 if (!backend->gpu_clock_slowed_down)
1293 kbase_pm_control_gpu_clock(kbdev);
1294 else
1295 backend->l2_state = KBASE_L2_POWER_DOWN;
1296
1297 break;
1298
1299 case KBASE_L2_POWER_DOWN:
1300 if (!backend->l2_always_on)
1301 /* Powering off the L2 will also power off the
1302 * tiler.
1303 */
1304 kbase_pm_invoke(kbdev, KBASE_PM_CORE_L2,
1305 l2_present,
1306 ACTION_PWROFF);
1307 else
1308 /* If L2 cache is powered then we must flush it
1309 * before we power off the GPU. Normally this
1310 * would have been handled when the L2 was
1311 * powered off.
1312 */
1313 kbase_gpu_start_cache_clean_nolock(
1314 kbdev, GPU_COMMAND_CACHE_CLN_INV_L2);
1315 #if !MALI_USE_CSF
1316 KBASE_KTRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER, NULL, 0u);
1317 #else
1318 KBASE_KTRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_L2, NULL, 0u);
1319 #endif
1320 backend->l2_state = KBASE_L2_PEND_OFF;
1321 break;
1322
1323 case KBASE_L2_PEND_OFF:
1324 if (!backend->l2_always_on) {
1325 /* We only need to check the L2 here - if the L2
1326 * is off then the tiler is definitely also off.
1327 */
1328 if (!l2_trans && !l2_ready)
1329 /* L2 is now powered off */
1330 backend->l2_state = KBASE_L2_OFF;
1331 } else {
1332 if (!kbdev->cache_clean_in_progress)
1333 backend->l2_state = KBASE_L2_OFF;
1334 }
1335 break;
1336
1337 case KBASE_L2_RESET_WAIT:
1338 /* Reset complete */
1339 if (!backend->in_reset)
1340 backend->l2_state = KBASE_L2_OFF;
1341 break;
1342
1343 default:
1344 WARN(1, "Invalid state in l2_state: %d",
1345 backend->l2_state);
1346 }
1347
1348 if (backend->l2_state != prev_state)
1349 dev_dbg(kbdev->dev, "L2 state transition: %s to %s\n",
1350 kbase_l2_core_state_to_string(prev_state),
1351 kbase_l2_core_state_to_string(
1352 backend->l2_state));
1353
1354 } while (backend->l2_state != prev_state);
1355
1356 if (kbdev->pm.backend.invoke_poweroff_wait_wq_when_l2_off &&
1357 backend->l2_state == KBASE_L2_OFF) {
1358 kbdev->pm.backend.invoke_poweroff_wait_wq_when_l2_off = false;
1359 queue_work(kbdev->pm.backend.gpu_poweroff_wait_wq,
1360 &kbdev->pm.backend.gpu_poweroff_wait_work);
1361 }
1362
1363 return 0;
1364 }
1365
1366 static void shader_poweroff_timer_stop_callback(struct work_struct *data)
1367 {
1368 unsigned long flags;
1369 struct kbasep_pm_tick_timer_state *stt = container_of(data,
1370 struct kbasep_pm_tick_timer_state, work);
1371 struct kbase_device *kbdev = container_of(stt, struct kbase_device,
1372 pm.backend.shader_tick_timer);
1373
1374 hrtimer_cancel(&stt->timer);
1375
1376 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1377
1378 stt->cancel_queued = false;
1379 if (kbdev->pm.backend.gpu_powered)
1380 kbase_pm_update_state(kbdev);
1381
1382 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1383 }
1384
1385 /**
1386 * shader_poweroff_timer_queue_cancel - cancel the shader poweroff tick timer
1387 * @kbdev: pointer to kbase device
1388 *
1389 * Synchronization between the shader state machine and the timer thread is
1390 * difficult. This is because situations may arise where the state machine
1391 * wants to start the timer, but the callback is already running, and has
1392 * already passed the point at which it checks whether it is required, and so
1393 * cancels itself, even though the state machine may have just tried to call
1394 * hrtimer_start.
1395 *
1396 * This cannot be stopped by holding hwaccess_lock in the timer thread,
1397 * because there are still infinitesimally small sections at the start and end
1398 * of the callback where the lock is not held.
1399 *
1400 * Instead, a new state is added to the shader state machine,
1401 * KBASE_SHADERS_OFF_CORESTACK_OFF_TIMER_PEND_OFF. This is used to guarantee
1402 * that when the shaders are switched off, the timer has definitely been
1403 * cancelled. As a result, when KBASE_SHADERS_ON_CORESTACK_ON is left and the
1404 * timer is started, it is guaranteed that either the timer is already running
1405 * (from an availability change or cancelled timer), or hrtimer_start will
1406 * succeed. It is critical to avoid ending up in
1407 * KBASE_SHADERS_WAIT_OFF_CORESTACK_ON without the timer running, or it could
1408 * hang there forever.
1409 */
1410 static void shader_poweroff_timer_queue_cancel(struct kbase_device *kbdev)
1411 {
1412 struct kbasep_pm_tick_timer_state *stt =
1413 &kbdev->pm.backend.shader_tick_timer;
1414
1415 lockdep_assert_held(&kbdev->hwaccess_lock);
1416
1417 stt->needed = false;
1418
1419 if (hrtimer_active(&stt->timer) && !stt->cancel_queued) {
1420 stt->cancel_queued = true;
1421 queue_work(stt->wq, &stt->work);
1422 }
1423 }
1424
1425 #if !MALI_USE_CSF
1426 static const char *kbase_shader_core_state_to_string(
1427 enum kbase_shader_core_state state)
1428 {
1429 const char *const strings[] = {
1430 #define KBASEP_SHADER_STATE(n) #n,
1431 #include "mali_kbase_pm_shader_states.h"
1432 #undef KBASEP_SHADER_STATE
1433 };
1434 if (WARN_ON((size_t)state >= ARRAY_SIZE(strings)))
1435 return "Bad shader core state";
1436 else
1437 return strings[state];
1438 }
1439
1440 static int kbase_pm_shaders_update_state(struct kbase_device *kbdev)
1441 {
1442 struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
1443 struct kbasep_pm_tick_timer_state *stt =
1444 &kbdev->pm.backend.shader_tick_timer;
1445 enum kbase_shader_core_state prev_state;
1446 u64 stacks_avail = 0;
1447
1448 lockdep_assert_held(&kbdev->hwaccess_lock);
1449
1450 if (corestack_driver_control)
1451 /* Always power on all the corestacks. Disabling certain
1452 * corestacks when their respective shaders are not in the
1453 * available bitmap is not currently supported.
1454 */
1455 stacks_avail = kbase_pm_get_present_cores(kbdev, KBASE_PM_CORE_STACK);
1456
1457 do {
1458 u64 shaders_trans = kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_SHADER);
1459 u64 shaders_ready = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_SHADER);
1460 u64 stacks_trans = 0;
1461 u64 stacks_ready = 0;
1462
1463 if (corestack_driver_control) {
1464 stacks_trans = kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_STACK);
1465 stacks_ready = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_STACK);
1466 }
1467
1468 /*
1469 * kbase_pm_get_ready_cores and kbase_pm_get_trans_cores
1470 * are vulnerable to corruption if gpu is lost
1471 */
1472 if (kbase_is_gpu_removed(kbdev)
1473 #ifdef CONFIG_MALI_ARBITER_SUPPORT
1474 || kbase_pm_is_gpu_lost(kbdev)) {
1475 #else
1476 ) {
1477 #endif
1478 backend->shaders_state =
1479 KBASE_SHADERS_OFF_CORESTACK_OFF;
1480 dev_dbg(kbdev->dev, "GPU lost has occurred - shaders off\n");
1481 break;
1482 }
1483
1484 /* mask off ready from trans in case transitions finished
1485 * between the register reads
1486 */
1487 shaders_trans &= ~shaders_ready;
1488 stacks_trans &= ~stacks_ready;
1489
1490 prev_state = backend->shaders_state;
1491
1492 switch (backend->shaders_state) {
1493 case KBASE_SHADERS_OFF_CORESTACK_OFF:
1494 /* Ignore changes to the shader core availability
1495 * except at certain points where we can handle it,
1496 * i.e. off and SHADERS_ON_CORESTACK_ON.
1497 */
1498 backend->shaders_desired_mask =
1499 kbase_pm_ca_get_core_mask(kbdev);
1500 backend->pm_shaders_core_mask = 0;
1501
1502 if (backend->shaders_desired &&
1503 backend->l2_state == KBASE_L2_ON) {
1504 if (backend->hwcnt_desired &&
1505 !backend->hwcnt_disabled) {
1506 /* Trigger a hwcounter dump */
1507 backend->hwcnt_desired = false;
1508 kbase_pm_trigger_hwcnt_disable(kbdev);
1509 }
1510
1511 if (backend->hwcnt_disabled) {
1512 if (corestack_driver_control) {
1513 kbase_pm_invoke(kbdev,
1514 KBASE_PM_CORE_STACK,
1515 stacks_avail,
1516 ACTION_PWRON);
1517 }
1518 backend->shaders_state =
1519 KBASE_SHADERS_OFF_CORESTACK_PEND_ON;
1520 }
1521 }
1522 break;
1523
1524 case KBASE_SHADERS_OFF_CORESTACK_PEND_ON:
1525 if (!stacks_trans && stacks_ready == stacks_avail) {
1526 backend->shaders_avail =
1527 backend->shaders_desired_mask;
1528 kbase_pm_invoke(kbdev, KBASE_PM_CORE_SHADER,
1529 backend->shaders_avail, ACTION_PWRON);
1530
1531 if (backend->pm_current_policy &&
1532 backend->pm_current_policy->handle_event)
1533 backend->pm_current_policy->handle_event(
1534 kbdev,
1535 KBASE_PM_POLICY_EVENT_POWER_ON);
1536
1537 backend->shaders_state = KBASE_SHADERS_PEND_ON_CORESTACK_ON;
1538 }
1539 break;
1540
1541 case KBASE_SHADERS_PEND_ON_CORESTACK_ON:
1542 if (!shaders_trans && shaders_ready == backend->shaders_avail) {
1543 KBASE_KTRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL, shaders_ready);
1544 backend->pm_shaders_core_mask = shaders_ready;
1545 backend->hwcnt_desired = true;
1546 if (backend->hwcnt_disabled) {
1547 #if MALI_USE_CSF
1548 unsigned long flags;
1549
1550 kbase_csf_scheduler_spin_lock(kbdev,
1551 &flags);
1552 #endif
1553 kbase_hwcnt_context_enable(
1554 kbdev->hwcnt_gpu_ctx);
1555 #if MALI_USE_CSF
1556 kbase_csf_scheduler_spin_unlock(kbdev,
1557 flags);
1558 #endif
1559 backend->hwcnt_disabled = false;
1560 }
1561
1562 backend->shaders_state = KBASE_SHADERS_ON_CORESTACK_ON;
1563 }
1564 break;
1565
1566 case KBASE_SHADERS_ON_CORESTACK_ON:
1567 backend->shaders_desired_mask =
1568 kbase_pm_ca_get_core_mask(kbdev);
1569
1570 /* If shaders to change state, trigger a counter dump */
1571 if (!backend->shaders_desired ||
1572 (backend->shaders_desired_mask != shaders_ready)) {
1573 backend->hwcnt_desired = false;
1574 if (!backend->hwcnt_disabled)
1575 kbase_pm_trigger_hwcnt_disable(kbdev);
1576 backend->shaders_state =
1577 KBASE_SHADERS_ON_CORESTACK_ON_RECHECK;
1578 }
1579 break;
1580
1581 case KBASE_SHADERS_ON_CORESTACK_ON_RECHECK:
1582 backend->shaders_desired_mask =
1583 kbase_pm_ca_get_core_mask(kbdev);
1584
1585 if (!backend->hwcnt_disabled) {
1586 /* Wait for being disabled */
1587 ;
1588 } else if (!backend->shaders_desired) {
1589 if (backend->pm_current_policy &&
1590 backend->pm_current_policy->handle_event)
1591 backend->pm_current_policy->handle_event(
1592 kbdev,
1593 KBASE_PM_POLICY_EVENT_IDLE);
1594
1595 if (kbdev->pm.backend.protected_transition_override ||
1596 #ifdef CONFIG_MALI_ARBITER_SUPPORT
1597 kbase_pm_is_suspending(kbdev) ||
1598 kbase_pm_is_gpu_lost(kbdev) ||
1599 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
1600 !stt->configured_ticks ||
1601 WARN_ON(stt->cancel_queued)) {
1602 backend->shaders_state = KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON;
1603 } else {
1604 stt->remaining_ticks = stt->configured_ticks;
1605 stt->needed = true;
1606
1607 /* The shader hysteresis timer is not
1608 * done the obvious way, which would be
1609 * to start an hrtimer when the shader
1610 * power off is requested. Instead,
1611 * use a 'tick' timer, and set the
1612 * remaining number of ticks on a power
1613 * off request. This avoids the
1614 * latency of starting, then
1615 * immediately cancelling an hrtimer
1616 * when the shaders are re-requested
1617 * before the timeout expires.
1618 */
1619 if (!hrtimer_active(&stt->timer))
1620 hrtimer_start(&stt->timer,
1621 stt->configured_interval,
1622 HRTIMER_MODE_REL);
1623
1624 backend->shaders_state = KBASE_SHADERS_WAIT_OFF_CORESTACK_ON;
1625 }
1626 } else if (backend->shaders_desired_mask & ~shaders_ready) {
1627 /* set cores ready but not available to
1628 * meet KBASE_SHADERS_PEND_ON_CORESTACK_ON
1629 * check pass
1630 */
1631 backend->shaders_avail =
1632 (backend->shaders_desired_mask | shaders_ready);
1633
1634 kbase_pm_invoke(kbdev, KBASE_PM_CORE_SHADER,
1635 backend->shaders_avail & ~shaders_ready,
1636 ACTION_PWRON);
1637 backend->shaders_state =
1638 KBASE_SHADERS_PEND_ON_CORESTACK_ON;
1639 } else if (shaders_ready & ~backend->shaders_desired_mask) {
1640 backend->shaders_state =
1641 KBASE_SHADERS_WAIT_GPU_IDLE;
1642 } else {
1643 backend->shaders_state =
1644 KBASE_SHADERS_PEND_ON_CORESTACK_ON;
1645 }
1646 break;
1647
1648 case KBASE_SHADERS_WAIT_OFF_CORESTACK_ON:
1649 if (WARN_ON(!hrtimer_active(&stt->timer))) {
1650 stt->remaining_ticks = 0;
1651 backend->shaders_state = KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON;
1652 }
1653
1654 if (backend->shaders_desired) {
1655 if (backend->pm_current_policy &&
1656 backend->pm_current_policy->handle_event)
1657 backend->pm_current_policy->handle_event(
1658 kbdev,
1659 KBASE_PM_POLICY_EVENT_TIMER_HIT);
1660
1661 stt->remaining_ticks = 0;
1662 backend->shaders_state = KBASE_SHADERS_ON_CORESTACK_ON_RECHECK;
1663 } else if (stt->remaining_ticks == 0) {
1664 if (backend->pm_current_policy &&
1665 backend->pm_current_policy->handle_event)
1666 backend->pm_current_policy->handle_event(
1667 kbdev,
1668 KBASE_PM_POLICY_EVENT_TIMER_MISS);
1669
1670 backend->shaders_state = KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON;
1671 #ifdef CONFIG_MALI_ARBITER_SUPPORT
1672 } else if (kbase_pm_is_suspending(kbdev) ||
1673 kbase_pm_is_gpu_lost(kbdev)) {
1674 backend->shaders_state = KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON;
1675 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
1676 }
1677 break;
1678
1679 case KBASE_SHADERS_WAIT_GPU_IDLE:
1680 /* If partial shader core off need to wait the job in
1681 * running and next register finished then flush L2
1682 * or it might hit GPU2017-861
1683 */
1684 if (!kbase_gpu_atoms_submitted_any(kbdev)) {
1685 backend->partial_shaderoff = true;
1686 backend->shaders_state = KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON;
1687 }
1688 break;
1689
1690 case KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON:
1691 if (!backend->partial_shaderoff)
1692 shader_poweroff_timer_queue_cancel(kbdev);
1693
1694 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TTRX_921)) {
1695 kbase_gpu_start_cache_clean_nolock(
1696 kbdev, GPU_COMMAND_CACHE_CLN_INV_L2);
1697 backend->shaders_state =
1698 KBASE_SHADERS_L2_FLUSHING_CORESTACK_ON;
1699 } else {
1700 backend->shaders_state =
1701 KBASE_SHADERS_READY_OFF_CORESTACK_ON;
1702 }
1703 break;
1704
1705 case KBASE_SHADERS_L2_FLUSHING_CORESTACK_ON:
1706 if (!kbdev->cache_clean_in_progress)
1707 backend->shaders_state =
1708 KBASE_SHADERS_READY_OFF_CORESTACK_ON;
1709
1710 break;
1711
1712 case KBASE_SHADERS_READY_OFF_CORESTACK_ON:
1713 if (backend->partial_shaderoff) {
1714 backend->partial_shaderoff = false;
1715 /* remove cores available but not ready to
1716 * meet KBASE_SHADERS_PEND_ON_CORESTACK_ON
1717 * check pass
1718 */
1719
1720 /* shaders_desired_mask shall be a subset of
1721 * shaders_ready
1722 */
1723 WARN_ON(backend->shaders_desired_mask & ~shaders_ready);
1724 WARN_ON(!(backend->shaders_desired_mask & shaders_ready));
1725
1726 backend->shaders_avail =
1727 backend->shaders_desired_mask;
1728 kbase_pm_invoke(kbdev, KBASE_PM_CORE_SHADER,
1729 shaders_ready & ~backend->shaders_avail, ACTION_PWROFF);
1730 backend->shaders_state = KBASE_SHADERS_PEND_ON_CORESTACK_ON;
1731 KBASE_KTRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL, (shaders_ready & ~backend->shaders_avail));
1732 } else {
1733 kbase_pm_invoke(kbdev, KBASE_PM_CORE_SHADER,
1734 shaders_ready, ACTION_PWROFF);
1735
1736 KBASE_KTRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL, 0u);
1737
1738 backend->shaders_state = KBASE_SHADERS_PEND_OFF_CORESTACK_ON;
1739 }
1740 break;
1741
1742 case KBASE_SHADERS_PEND_OFF_CORESTACK_ON:
1743 if (!shaders_trans && !shaders_ready) {
1744 if (corestack_driver_control)
1745 kbase_pm_invoke(kbdev, KBASE_PM_CORE_STACK,
1746 stacks_avail, ACTION_PWROFF);
1747
1748 backend->shaders_state = KBASE_SHADERS_OFF_CORESTACK_PEND_OFF;
1749 }
1750 break;
1751
1752 case KBASE_SHADERS_OFF_CORESTACK_PEND_OFF:
1753 if (!stacks_trans && !stacks_ready) {
1754 /* On powered off, re-enable the hwcnt */
1755 backend->pm_shaders_core_mask = 0;
1756 backend->hwcnt_desired = true;
1757 if (backend->hwcnt_disabled) {
1758 #if MALI_USE_CSF
1759 unsigned long flags;
1760
1761 kbase_csf_scheduler_spin_lock(kbdev,
1762 &flags);
1763 #endif
1764 kbase_hwcnt_context_enable(
1765 kbdev->hwcnt_gpu_ctx);
1766 #if MALI_USE_CSF
1767 kbase_csf_scheduler_spin_unlock(kbdev,
1768 flags);
1769 #endif
1770 backend->hwcnt_disabled = false;
1771 }
1772 backend->shaders_state = KBASE_SHADERS_OFF_CORESTACK_OFF_TIMER_PEND_OFF;
1773 }
1774 break;
1775
1776 case KBASE_SHADERS_OFF_CORESTACK_OFF_TIMER_PEND_OFF:
1777 if (!hrtimer_active(&stt->timer) && !stt->cancel_queued)
1778 backend->shaders_state = KBASE_SHADERS_OFF_CORESTACK_OFF;
1779 break;
1780
1781 case KBASE_SHADERS_RESET_WAIT:
1782 /* Reset complete */
1783 if (!backend->in_reset)
1784 backend->shaders_state = KBASE_SHADERS_OFF_CORESTACK_OFF_TIMER_PEND_OFF;
1785 break;
1786 }
1787
1788 if (backend->shaders_state != prev_state)
1789 dev_dbg(kbdev->dev, "Shader state transition: %s to %s\n",
1790 kbase_shader_core_state_to_string(prev_state),
1791 kbase_shader_core_state_to_string(
1792 backend->shaders_state));
1793
1794 } while (backend->shaders_state != prev_state);
1795
1796 return 0;
1797 }
1798 #endif /* !MALI_USE_CSF */
1799
1800 static bool kbase_pm_is_in_desired_state_nolock(struct kbase_device *kbdev)
1801 {
1802 bool in_desired_state = true;
1803
1804 lockdep_assert_held(&kbdev->hwaccess_lock);
1805
1806 if (kbase_pm_is_l2_desired(kbdev) &&
1807 kbdev->pm.backend.l2_state != KBASE_L2_ON)
1808 in_desired_state = false;
1809 else if (!kbase_pm_is_l2_desired(kbdev) &&
1810 kbdev->pm.backend.l2_state != KBASE_L2_OFF)
1811 in_desired_state = false;
1812
1813 #if !MALI_USE_CSF
1814 if (kbdev->pm.backend.shaders_desired &&
1815 kbdev->pm.backend.shaders_state != KBASE_SHADERS_ON_CORESTACK_ON)
1816 in_desired_state = false;
1817 else if (!kbdev->pm.backend.shaders_desired &&
1818 kbdev->pm.backend.shaders_state != KBASE_SHADERS_OFF_CORESTACK_OFF)
1819 in_desired_state = false;
1820 #else
1821 if (kbase_pm_is_mcu_desired(kbdev) &&
1822 kbdev->pm.backend.mcu_state != KBASE_MCU_ON)
1823 in_desired_state = false;
1824 else if (!kbase_pm_is_mcu_desired(kbdev) &&
1825 (kbdev->pm.backend.mcu_state != KBASE_MCU_OFF) &&
1826 (kbdev->pm.backend.mcu_state != KBASE_MCU_IN_SLEEP))
1827 in_desired_state = false;
1828 #endif
1829
1830 return in_desired_state;
1831 }
1832
1833 static bool kbase_pm_is_in_desired_state(struct kbase_device *kbdev)
1834 {
1835 bool in_desired_state;
1836 unsigned long flags;
1837
1838 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1839 in_desired_state = kbase_pm_is_in_desired_state_nolock(kbdev);
1840 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1841
1842 return in_desired_state;
1843 }
1844
1845 static bool kbase_pm_is_in_desired_state_with_l2_powered(
1846 struct kbase_device *kbdev)
1847 {
1848 bool in_desired_state = false;
1849 unsigned long flags;
1850
1851 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1852 if (kbase_pm_is_in_desired_state_nolock(kbdev) &&
1853 (kbdev->pm.backend.l2_state == KBASE_L2_ON))
1854 in_desired_state = true;
1855 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1856
1857 return in_desired_state;
1858 }
1859
1860 static void kbase_pm_trace_power_state(struct kbase_device *kbdev)
1861 {
1862 lockdep_assert_held(&kbdev->hwaccess_lock);
1863
1864 KBASE_TLSTREAM_AUX_PM_STATE(
1865 kbdev,
1866 KBASE_PM_CORE_L2,
1867 kbase_pm_get_ready_cores(
1868 kbdev, KBASE_PM_CORE_L2));
1869 KBASE_TLSTREAM_AUX_PM_STATE(
1870 kbdev,
1871 KBASE_PM_CORE_SHADER,
1872 kbase_pm_get_ready_cores(
1873 kbdev, KBASE_PM_CORE_SHADER));
1874 KBASE_TLSTREAM_AUX_PM_STATE(
1875 kbdev,
1876 KBASE_PM_CORE_TILER,
1877 kbase_pm_get_ready_cores(
1878 kbdev,
1879 KBASE_PM_CORE_TILER));
1880
1881 if (corestack_driver_control)
1882 KBASE_TLSTREAM_AUX_PM_STATE(
1883 kbdev,
1884 KBASE_PM_CORE_STACK,
1885 kbase_pm_get_ready_cores(
1886 kbdev,
1887 KBASE_PM_CORE_STACK));
1888 }
1889
1890 void kbase_pm_update_state(struct kbase_device *kbdev)
1891 {
1892 #if !MALI_USE_CSF
1893 enum kbase_shader_core_state prev_shaders_state =
1894 kbdev->pm.backend.shaders_state;
1895 #else
1896 enum kbase_mcu_state prev_mcu_state = kbdev->pm.backend.mcu_state;
1897 #endif
1898
1899 lockdep_assert_held(&kbdev->hwaccess_lock);
1900
1901 if (!kbdev->pm.backend.gpu_ready)
1902 return; /* Do nothing if the GPU is not ready */
1903
1904 if (kbase_pm_l2_update_state(kbdev))
1905 return;
1906
1907 #if !MALI_USE_CSF
1908 if (kbase_pm_shaders_update_state(kbdev))
1909 return;
1910
1911 /* If the shaders just turned off, re-invoke the L2 state machine, in
1912 * case it was waiting for the shaders to turn off before powering down
1913 * the L2.
1914 */
1915 if (prev_shaders_state != KBASE_SHADERS_OFF_CORESTACK_OFF &&
1916 kbdev->pm.backend.shaders_state ==
1917 KBASE_SHADERS_OFF_CORESTACK_OFF) {
1918 if (kbase_pm_l2_update_state(kbdev))
1919 return;
1920 }
1921 #else
1922 if (kbase_pm_mcu_update_state(kbdev))
1923 return;
1924
1925 if (!kbase_pm_is_mcu_inactive(kbdev, prev_mcu_state) &&
1926 kbase_pm_is_mcu_inactive(kbdev, kbdev->pm.backend.mcu_state)) {
1927 if (kbase_pm_l2_update_state(kbdev))
1928 return;
1929 }
1930 #endif
1931
1932 if (kbase_pm_is_in_desired_state_nolock(kbdev)) {
1933 KBASE_KTRACE_ADD(kbdev, PM_DESIRED_REACHED, NULL,
1934 kbdev->pm.backend.shaders_avail);
1935
1936 kbase_pm_trace_power_state(kbdev);
1937
1938 KBASE_KTRACE_ADD(kbdev, PM_WAKE_WAITERS, NULL, 0);
1939 wake_up(&kbdev->pm.backend.gpu_in_desired_state_wait);
1940 }
1941 }
1942
1943 static enum hrtimer_restart
1944 shader_tick_timer_callback(struct hrtimer *timer)
1945 {
1946 struct kbasep_pm_tick_timer_state *stt = container_of(timer,
1947 struct kbasep_pm_tick_timer_state, timer);
1948 struct kbase_device *kbdev = container_of(stt, struct kbase_device,
1949 pm.backend.shader_tick_timer);
1950 struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
1951 unsigned long flags;
1952 enum hrtimer_restart restart = HRTIMER_NORESTART;
1953
1954 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1955
1956 if (stt->remaining_ticks &&
1957 backend->shaders_state == KBASE_SHADERS_WAIT_OFF_CORESTACK_ON) {
1958 stt->remaining_ticks--;
1959
1960 /* If the remaining ticks just changed from 1 to 0, invoke the
1961 * PM state machine to power off the shader cores.
1962 */
1963 if (!stt->remaining_ticks && !backend->shaders_desired)
1964 kbase_pm_update_state(kbdev);
1965 }
1966
1967 if (stt->needed) {
1968 hrtimer_forward_now(timer, stt->configured_interval);
1969 restart = HRTIMER_RESTART;
1970 }
1971
1972 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1973
1974 return restart;
1975 }
1976
1977 int kbase_pm_state_machine_init(struct kbase_device *kbdev)
1978 {
1979 struct kbasep_pm_tick_timer_state *stt = &kbdev->pm.backend.shader_tick_timer;
1980
1981 stt->wq = alloc_workqueue("kbase_pm_shader_poweroff", WQ_HIGHPRI | WQ_UNBOUND, 1);
1982 if (!stt->wq)
1983 return -ENOMEM;
1984
1985 INIT_WORK(&stt->work, shader_poweroff_timer_stop_callback);
1986
1987 stt->needed = false;
1988 hrtimer_init(&stt->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1989 stt->timer.function = shader_tick_timer_callback;
1990 stt->configured_interval = HR_TIMER_DELAY_NSEC(DEFAULT_PM_GPU_POWEROFF_TICK_NS);
1991 stt->default_ticks = DEFAULT_PM_POWEROFF_TICK_SHADER;
1992 stt->configured_ticks = stt->default_ticks;
1993
1994 #if MALI_USE_CSF
1995 kbdev->pm.backend.core_idle_wq = alloc_workqueue("coreoff_wq", WQ_HIGHPRI | WQ_UNBOUND, 1);
1996 if (!kbdev->pm.backend.core_idle_wq) {
1997 destroy_workqueue(stt->wq);
1998 return -ENOMEM;
1999 }
2000
2001 INIT_WORK(&kbdev->pm.backend.core_idle_work, core_idle_worker);
2002 #endif
2003
2004 return 0;
2005 }
2006
2007 void kbase_pm_state_machine_term(struct kbase_device *kbdev)
2008 {
2009 #if MALI_USE_CSF
2010 destroy_workqueue(kbdev->pm.backend.core_idle_wq);
2011 #endif
2012 hrtimer_cancel(&kbdev->pm.backend.shader_tick_timer.timer);
2013 destroy_workqueue(kbdev->pm.backend.shader_tick_timer.wq);
2014 }
2015
2016 void kbase_pm_reset_start_locked(struct kbase_device *kbdev)
2017 {
2018 struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
2019
2020 lockdep_assert_held(&kbdev->hwaccess_lock);
2021
2022 backend->in_reset = true;
2023 backend->l2_state = KBASE_L2_RESET_WAIT;
2024 #if !MALI_USE_CSF
2025 backend->shaders_state = KBASE_SHADERS_RESET_WAIT;
2026 #else
2027 /* MCU state machine is exercised only after the initial load/boot
2028 * of the firmware.
2029 */
2030 if (likely(kbdev->csf.firmware_inited)) {
2031 backend->mcu_state = KBASE_MCU_RESET_WAIT;
2032 #ifdef KBASE_PM_RUNTIME
2033 backend->exit_gpu_sleep_mode = true;
2034 #endif
2035 kbdev->csf.firmware_reload_needed = true;
2036 } else {
2037 WARN_ON(backend->mcu_state != KBASE_MCU_OFF);
2038 }
2039 #endif
2040
2041 /* We're in a reset, so hwcnt will have been synchronously disabled by
2042 * this function's caller as part of the reset process. We therefore
2043 * know that any call to kbase_hwcnt_context_disable_atomic, if
2044 * required to sync the hwcnt refcount with our internal state, is
2045 * guaranteed to succeed.
2046 */
2047 backend->hwcnt_desired = false;
2048 if (!backend->hwcnt_disabled) {
2049 WARN_ON(!kbase_hwcnt_context_disable_atomic(
2050 kbdev->hwcnt_gpu_ctx));
2051 backend->hwcnt_disabled = true;
2052 }
2053
2054 shader_poweroff_timer_queue_cancel(kbdev);
2055 }
2056
2057 void kbase_pm_reset_complete(struct kbase_device *kbdev)
2058 {
2059 struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
2060 unsigned long flags;
2061
2062 WARN_ON(!kbase_reset_gpu_is_active(kbdev));
2063 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2064
2065 /* As GPU has just been reset, that results in implicit flush of L2
2066 * cache, can safely mark the pending cache flush operation (if there
2067 * was any) as complete and unblock the waiter.
2068 * No work can be submitted whilst GPU reset is ongoing.
2069 */
2070 kbase_gpu_cache_clean_wait_complete(kbdev);
2071 backend->in_reset = false;
2072 #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
2073 backend->gpu_wakeup_override = false;
2074 #endif
2075 kbase_pm_update_state(kbdev);
2076
2077 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2078 }
2079
2080 /* Timeout for kbase_pm_wait_for_desired_state when wait_event_killable has
2081 * aborted due to a fatal signal. If the time spent waiting has exceeded this
2082 * threshold then there is most likely a hardware issue.
2083 */
2084 #define PM_TIMEOUT_MS (5000) /* 5s */
2085
2086 static void kbase_pm_timed_out(struct kbase_device *kbdev)
2087 {
2088 unsigned long flags;
2089
2090 dev_err(kbdev->dev, "Power transition timed out unexpectedly\n");
2091 #if !MALI_USE_CSF
2092 CSTD_UNUSED(flags);
2093 dev_err(kbdev->dev, "Desired state :\n");
2094 dev_err(kbdev->dev, "\tShader=%016llx\n",
2095 kbdev->pm.backend.shaders_desired ? kbdev->pm.backend.shaders_avail : 0);
2096 #else
2097 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2098 dev_err(kbdev->dev, "\tMCU desired = %d\n",
2099 kbase_pm_is_mcu_desired(kbdev));
2100 dev_err(kbdev->dev, "\tMCU sw state = %d\n",
2101 kbdev->pm.backend.mcu_state);
2102 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2103 #endif
2104 dev_err(kbdev->dev, "Current state :\n");
2105 dev_err(kbdev->dev, "\tShader=%08x%08x\n",
2106 kbase_reg_read(kbdev,
2107 GPU_CONTROL_REG(SHADER_READY_HI)),
2108 kbase_reg_read(kbdev,
2109 GPU_CONTROL_REG(SHADER_READY_LO)));
2110 dev_err(kbdev->dev, "\tTiler =%08x%08x\n",
2111 kbase_reg_read(kbdev,
2112 GPU_CONTROL_REG(TILER_READY_HI)),
2113 kbase_reg_read(kbdev,
2114 GPU_CONTROL_REG(TILER_READY_LO)));
2115 dev_err(kbdev->dev, "\tL2 =%08x%08x\n",
2116 kbase_reg_read(kbdev,
2117 GPU_CONTROL_REG(L2_READY_HI)),
2118 kbase_reg_read(kbdev,
2119 GPU_CONTROL_REG(L2_READY_LO)));
2120 #if MALI_USE_CSF
2121 dev_err(kbdev->dev, "\tMCU status = %d\n",
2122 kbase_reg_read(kbdev, GPU_CONTROL_REG(MCU_STATUS)));
2123 #endif
2124 dev_err(kbdev->dev, "Cores transitioning :\n");
2125 dev_err(kbdev->dev, "\tShader=%08x%08x\n",
2126 kbase_reg_read(kbdev, GPU_CONTROL_REG(
2127 SHADER_PWRTRANS_HI)),
2128 kbase_reg_read(kbdev, GPU_CONTROL_REG(
2129 SHADER_PWRTRANS_LO)));
2130 dev_err(kbdev->dev, "\tTiler =%08x%08x\n",
2131 kbase_reg_read(kbdev, GPU_CONTROL_REG(
2132 TILER_PWRTRANS_HI)),
2133 kbase_reg_read(kbdev, GPU_CONTROL_REG(
2134 TILER_PWRTRANS_LO)));
2135 dev_err(kbdev->dev, "\tL2 =%08x%08x\n",
2136 kbase_reg_read(kbdev, GPU_CONTROL_REG(
2137 L2_PWRTRANS_HI)),
2138 kbase_reg_read(kbdev, GPU_CONTROL_REG(
2139 L2_PWRTRANS_LO)));
2140
2141 dev_err(kbdev->dev, "Sending reset to GPU - all running jobs will be lost\n");
2142 if (kbase_prepare_to_reset_gpu(kbdev,
2143 RESET_FLAGS_HWC_UNRECOVERABLE_ERROR))
2144 kbase_reset_gpu(kbdev);
2145 }
2146
2147 int kbase_pm_wait_for_l2_powered(struct kbase_device *kbdev)
2148 {
2149 unsigned long flags;
2150 unsigned long timeout;
2151 long remaining;
2152 int err = 0;
2153
2154 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2155 kbase_pm_update_state(kbdev);
2156 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2157
2158 #if MALI_USE_CSF
2159 timeout = kbase_csf_timeout_in_jiffies(PM_TIMEOUT_MS);
2160 #else
2161 timeout = msecs_to_jiffies(PM_TIMEOUT_MS);
2162 #endif
2163
2164 /* Wait for cores */
2165 #if KERNEL_VERSION(4, 13, 1) <= LINUX_VERSION_CODE
2166 remaining = wait_event_killable_timeout(
2167 #else
2168 remaining = wait_event_timeout(
2169 #endif
2170 kbdev->pm.backend.gpu_in_desired_state_wait,
2171 kbase_pm_is_in_desired_state_with_l2_powered(kbdev), timeout);
2172
2173 if (!remaining) {
2174 kbase_pm_timed_out(kbdev);
2175 err = -ETIMEDOUT;
2176 } else if (remaining < 0) {
2177 dev_info(
2178 kbdev->dev,
2179 "Wait for desired PM state with L2 powered got interrupted");
2180 err = (int)remaining;
2181 }
2182
2183 return err;
2184 }
2185
2186 int kbase_pm_wait_for_desired_state(struct kbase_device *kbdev)
2187 {
2188 unsigned long flags;
2189 long remaining;
2190 #if MALI_USE_CSF
2191 long timeout = kbase_csf_timeout_in_jiffies(PM_TIMEOUT_MS);
2192 #else
2193 long timeout = msecs_to_jiffies(PM_TIMEOUT_MS);
2194 #endif
2195 int err = 0;
2196
2197 /* Let the state machine latch the most recent desired state. */
2198 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2199 kbase_pm_update_state(kbdev);
2200 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2201
2202 /* Wait for cores */
2203 #if KERNEL_VERSION(4, 13, 1) <= LINUX_VERSION_CODE
2204 remaining = wait_event_killable_timeout(
2205 kbdev->pm.backend.gpu_in_desired_state_wait,
2206 kbase_pm_is_in_desired_state(kbdev), timeout);
2207 #else
2208 remaining = wait_event_timeout(
2209 kbdev->pm.backend.gpu_in_desired_state_wait,
2210 kbase_pm_is_in_desired_state(kbdev), timeout);
2211 #endif
2212
2213 if (!remaining) {
2214 kbase_pm_timed_out(kbdev);
2215 err = -ETIMEDOUT;
2216 } else if (remaining < 0) {
2217 dev_info(kbdev->dev,
2218 "Wait for desired PM state got interrupted");
2219 err = (int)remaining;
2220 }
2221
2222 return err;
2223 }
2224 KBASE_EXPORT_TEST_API(kbase_pm_wait_for_desired_state);
2225
2226 void kbase_pm_enable_interrupts(struct kbase_device *kbdev)
2227 {
2228 unsigned long flags;
2229
2230 KBASE_DEBUG_ASSERT(kbdev != NULL);
2231 /*
2232 * Clear all interrupts,
2233 * and unmask them all.
2234 */
2235 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2236 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL);
2237 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), GPU_IRQ_REG_ALL);
2238 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2239
2240 kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF);
2241 kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0xFFFFFFFF);
2242
2243 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF);
2244 #if MALI_USE_CSF
2245 /* Enable only the Page fault bits part */
2246 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0xFFFF);
2247 #else
2248 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0xFFFFFFFF);
2249 #endif
2250 }
2251
2252 KBASE_EXPORT_TEST_API(kbase_pm_enable_interrupts);
2253
2254 void kbase_pm_disable_interrupts_nolock(struct kbase_device *kbdev)
2255 {
2256 KBASE_DEBUG_ASSERT(kbdev != NULL);
2257 /*
2258 * Mask all interrupts,
2259 * and clear them all.
2260 */
2261 lockdep_assert_held(&kbdev->hwaccess_lock);
2262
2263 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), 0);
2264 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL);
2265 kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0);
2266 kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF);
2267
2268 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0);
2269 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF);
2270 }
2271
2272 void kbase_pm_disable_interrupts(struct kbase_device *kbdev)
2273 {
2274 unsigned long flags;
2275
2276 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2277 kbase_pm_disable_interrupts_nolock(kbdev);
2278 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2279 }
2280
2281 KBASE_EXPORT_TEST_API(kbase_pm_disable_interrupts);
2282
2283 #if MALI_USE_CSF
2284 static void update_user_reg_page_mapping(struct kbase_device *kbdev)
2285 {
2286 lockdep_assert_held(&kbdev->pm.lock);
2287
2288 if (kbdev->csf.mali_file_inode) {
2289 /* This would zap the pte corresponding to the mapping of User
2290 * register page for all the Kbase contexts.
2291 */
2292 unmap_mapping_range(kbdev->csf.mali_file_inode->i_mapping,
2293 BASEP_MEM_CSF_USER_REG_PAGE_HANDLE,
2294 PAGE_SIZE, 1);
2295 }
2296 }
2297 #endif
2298
2299
2300 /*
2301 * pmu layout:
2302 * 0x0000: PMU TAG (RO) (0xCAFECAFE)
2303 * 0x0004: PMU VERSION ID (RO) (0x00000000)
2304 * 0x0008: CLOCK ENABLE (RW) (31:1 SBZ, 0 CLOCK STATE)
2305 */
2306 void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
2307 {
2308 struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
2309 bool reset_required = is_resume;
2310 unsigned long flags;
2311
2312 KBASE_DEBUG_ASSERT(kbdev != NULL);
2313 #if !MALI_USE_CSF
2314 lockdep_assert_held(&kbdev->js_data.runpool_mutex);
2315 #endif /* !MALI_USE_CSF */
2316 lockdep_assert_held(&kbdev->pm.lock);
2317
2318 #ifdef CONFIG_MALI_ARBITER_SUPPORT
2319 if (WARN_ON(kbase_pm_is_gpu_lost(kbdev))) {
2320 dev_err(kbdev->dev,
2321 "%s: Cannot power up while GPU lost", __func__);
2322 return;
2323 }
2324 #endif
2325
2326 if (backend->gpu_powered) {
2327 #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
2328 if (backend->gpu_idled) {
2329 backend->callback_power_runtime_gpu_active(kbdev);
2330 backend->gpu_idled = false;
2331 }
2332 #endif
2333 /* Already turned on */
2334 if (kbdev->poweroff_pending)
2335 kbase_pm_enable_interrupts(kbdev);
2336 kbdev->poweroff_pending = false;
2337 KBASE_DEBUG_ASSERT(!is_resume);
2338 return;
2339 }
2340
2341 kbdev->poweroff_pending = false;
2342
2343 KBASE_KTRACE_ADD(kbdev, PM_GPU_ON, NULL, 0u);
2344
2345 if (is_resume && backend->callback_power_resume) {
2346 backend->callback_power_resume(kbdev);
2347 return;
2348 } else if (backend->callback_power_on) {
2349 reset_required = backend->callback_power_on(kbdev);
2350 }
2351
2352 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2353 backend->gpu_powered = true;
2354 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2355
2356 #if MALI_USE_CSF
2357 /* GPU has been turned on, can switch to actual register page */
2358 update_user_reg_page_mapping(kbdev);
2359 #endif
2360
2361 if (reset_required) {
2362 /* GPU state was lost, reset GPU to ensure it is in a
2363 * consistent state
2364 */
2365 kbase_pm_init_hw(kbdev, PM_ENABLE_IRQS);
2366 }
2367 #ifdef CONFIG_MALI_ARBITER_SUPPORT
2368 else {
2369 if (kbdev->arb.arb_if) {
2370 struct kbase_arbiter_vm_state *arb_vm_state =
2371 kbdev->pm.arb_vm_state;
2372
2373 /* In the case that the GPU has just been granted by
2374 * the Arbiter, a reset will have already been done.
2375 * However, it is still necessary to initialize the GPU.
2376 */
2377 if (arb_vm_state->vm_arb_starting)
2378 kbase_pm_init_hw(kbdev, PM_ENABLE_IRQS |
2379 PM_NO_RESET);
2380 }
2381 }
2382 /*
2383 * This point means that the GPU trasitioned to ON. So there is a chance
2384 * that a repartitioning occurred. In this case the current config
2385 * should be read again.
2386 */
2387 kbase_gpuprops_get_curr_config_props(kbdev,
2388 &kbdev->gpu_props.curr_config);
2389 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
2390
2391 mutex_lock(&kbdev->mmu_hw_mutex);
2392 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2393 kbase_ctx_sched_restore_all_as(kbdev);
2394 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2395 mutex_unlock(&kbdev->mmu_hw_mutex);
2396
2397 if (kbdev->dummy_job_wa.flags &
2398 KBASE_DUMMY_JOB_WA_FLAG_LOGICAL_SHADER_POWER) {
2399 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2400 kbase_dummy_job_wa_execute(kbdev,
2401 kbase_pm_get_present_cores(kbdev,
2402 KBASE_PM_CORE_SHADER));
2403 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2404 }
2405
2406 /* Enable the interrupts */
2407 kbase_pm_enable_interrupts(kbdev);
2408
2409 /* Turn on the L2 caches */
2410 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2411 backend->gpu_ready = true;
2412 backend->l2_desired = true;
2413 #if MALI_USE_CSF
2414 if (reset_required) {
2415 /* GPU reset was done after the power on, so send the post
2416 * reset event instead. This is okay as GPU power off event
2417 * is same as pre GPU reset event.
2418 */
2419 kbase_ipa_control_handle_gpu_reset_post(kbdev);
2420 } else {
2421 kbase_ipa_control_handle_gpu_power_on(kbdev);
2422 }
2423 #endif
2424 kbase_pm_update_state(kbdev);
2425 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2426
2427 #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
2428 /* GPU is now powered up. Invoke the GPU active callback as GPU idle
2429 * callback would have been invoked before the power down.
2430 */
2431 if (backend->gpu_idled) {
2432 backend->callback_power_runtime_gpu_active(kbdev);
2433 backend->gpu_idled = false;
2434 }
2435 #endif
2436
2437 }
2438
2439 KBASE_EXPORT_TEST_API(kbase_pm_clock_on);
2440
2441 bool kbase_pm_clock_off(struct kbase_device *kbdev)
2442 {
2443 unsigned long flags;
2444
2445 KBASE_DEBUG_ASSERT(kbdev != NULL);
2446 lockdep_assert_held(&kbdev->pm.lock);
2447
2448 /* ASSERT that the cores should now be unavailable. No lock needed. */
2449 WARN_ON(kbdev->pm.backend.shaders_state != KBASE_SHADERS_OFF_CORESTACK_OFF);
2450
2451 kbdev->poweroff_pending = true;
2452
2453 if (!kbdev->pm.backend.gpu_powered) {
2454 /* Already turned off */
2455 return true;
2456 }
2457
2458 KBASE_KTRACE_ADD(kbdev, PM_GPU_OFF, NULL, 0u);
2459
2460 /* Disable interrupts. This also clears any outstanding interrupts */
2461 kbase_pm_disable_interrupts(kbdev);
2462 /* Ensure that any IRQ handlers have finished */
2463 kbase_synchronize_irqs(kbdev);
2464
2465 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2466
2467 if (atomic_read(&kbdev->faults_pending)) {
2468 /* Page/bus faults are still being processed. The GPU can not
2469 * be powered off until they have completed
2470 */
2471 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2472 return false;
2473 }
2474
2475 kbase_pm_cache_snoop_disable(kbdev);
2476 #if MALI_USE_CSF
2477 kbase_ipa_control_handle_gpu_power_off(kbdev);
2478 #endif
2479
2480 if (kbase_is_gpu_removed(kbdev)
2481 #ifdef CONFIG_MALI_ARBITER_SUPPORT
2482 || kbase_pm_is_gpu_lost(kbdev)) {
2483 #else
2484 ) {
2485 #endif
2486 /* Ensure we unblock any threads that are stuck waiting
2487 * for the GPU
2488 */
2489 kbase_gpu_cache_clean_wait_complete(kbdev);
2490 }
2491
2492 kbdev->pm.backend.gpu_ready = false;
2493
2494 /* The GPU power may be turned off from this point */
2495 kbdev->pm.backend.gpu_powered = false;
2496
2497 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2498
2499 #if MALI_USE_CSF
2500 /* GPU is about to be turned off, switch to dummy page */
2501 update_user_reg_page_mapping(kbdev);
2502 #endif
2503
2504 #ifdef CONFIG_MALI_ARBITER_SUPPORT
2505 kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_GPU_IDLE_EVENT);
2506 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
2507
2508 if (kbdev->pm.backend.callback_power_off)
2509 kbdev->pm.backend.callback_power_off(kbdev);
2510 return true;
2511 }
2512
2513 KBASE_EXPORT_TEST_API(kbase_pm_clock_off);
2514
2515 struct kbasep_reset_timeout_data {
2516 struct hrtimer timer;
2517 bool timed_out;
2518 struct kbase_device *kbdev;
2519 };
2520
2521 void kbase_pm_reset_done(struct kbase_device *kbdev)
2522 {
2523 KBASE_DEBUG_ASSERT(kbdev != NULL);
2524 kbdev->pm.backend.reset_done = true;
2525 wake_up(&kbdev->pm.backend.reset_done_wait);
2526 }
2527
2528 /**
2529 * kbase_pm_wait_for_reset - Wait for a reset to happen
2530 *
2531 * @kbdev: Kbase device
2532 *
2533 * Wait for the %RESET_COMPLETED IRQ to occur, then reset the waiting state.
2534 */
2535 static void kbase_pm_wait_for_reset(struct kbase_device *kbdev)
2536 {
2537 lockdep_assert_held(&kbdev->pm.lock);
2538
2539 wait_event(kbdev->pm.backend.reset_done_wait,
2540 (kbdev->pm.backend.reset_done));
2541 kbdev->pm.backend.reset_done = false;
2542 }
2543
2544 KBASE_EXPORT_TEST_API(kbase_pm_reset_done);
2545
2546 static enum hrtimer_restart kbasep_reset_timeout(struct hrtimer *timer)
2547 {
2548 struct kbasep_reset_timeout_data *rtdata =
2549 container_of(timer, struct kbasep_reset_timeout_data, timer);
2550
2551 rtdata->timed_out = true;
2552
2553 /* Set the wait queue to wake up kbase_pm_init_hw even though the reset
2554 * hasn't completed
2555 */
2556 kbase_pm_reset_done(rtdata->kbdev);
2557
2558 return HRTIMER_NORESTART;
2559 }
2560
2561 static int kbase_set_gpu_quirks(struct kbase_device *kbdev, const u32 prod_id)
2562 {
2563 #if MALI_USE_CSF
2564 kbdev->hw_quirks_gpu =
2565 kbase_reg_read(kbdev, GPU_CONTROL_REG(CSF_CONFIG));
2566 #else
2567 u32 hw_quirks_gpu = kbase_reg_read(kbdev, GPU_CONTROL_REG(JM_CONFIG));
2568
2569 if (GPU_ID2_MODEL_MATCH_VALUE(prod_id) == GPU_ID2_PRODUCT_TMIX) {
2570 /* Only for tMIx */
2571 u32 coherency_features;
2572
2573 coherency_features = kbase_reg_read(kbdev,
2574 GPU_CONTROL_REG(COHERENCY_FEATURES));
2575
2576 /* (COHERENCY_ACE_LITE | COHERENCY_ACE) was incorrectly
2577 * documented for tMIx so force correct value here.
2578 */
2579 if (coherency_features ==
2580 COHERENCY_FEATURE_BIT(COHERENCY_ACE)) {
2581 hw_quirks_gpu |= (COHERENCY_ACE_LITE | COHERENCY_ACE)
2582 << JM_FORCE_COHERENCY_FEATURES_SHIFT;
2583 }
2584 }
2585
2586 if (kbase_is_gpu_removed(kbdev))
2587 return -EIO;
2588
2589 kbdev->hw_quirks_gpu = hw_quirks_gpu;
2590
2591 #endif /* !MALI_USE_CSF */
2592 if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_IDVS_GROUP_SIZE)) {
2593 int default_idvs_group_size = 0xF;
2594 u32 group_size = 0;
2595
2596 if (of_property_read_u32(kbdev->dev->of_node, "idvs-group-size",
2597 &group_size))
2598 group_size = default_idvs_group_size;
2599
2600 if (group_size > IDVS_GROUP_MAX_SIZE) {
2601 dev_err(kbdev->dev,
2602 "idvs-group-size of %d is too large. Maximum value is %d",
2603 group_size, IDVS_GROUP_MAX_SIZE);
2604 group_size = default_idvs_group_size;
2605 }
2606
2607 kbdev->hw_quirks_gpu |= group_size << IDVS_GROUP_SIZE_SHIFT;
2608 }
2609
2610 #define MANUAL_POWER_CONTROL ((u32)(1 << 8))
2611 if (corestack_driver_control)
2612 kbdev->hw_quirks_gpu |= MANUAL_POWER_CONTROL;
2613
2614 return 0;
2615 }
2616
2617 static int kbase_set_sc_quirks(struct kbase_device *kbdev, const u32 prod_id)
2618 {
2619 u32 hw_quirks_sc = kbase_reg_read(kbdev,
2620 GPU_CONTROL_REG(SHADER_CONFIG));
2621
2622 if (kbase_is_gpu_removed(kbdev))
2623 return -EIO;
2624
2625 if (prod_id < 0x750 || prod_id == 0x6956) /* T60x, T62x, T72x */
2626 hw_quirks_sc |= SC_LS_ATTR_CHECK_DISABLE;
2627 else if (prod_id >= 0x750 && prod_id <= 0x880) /* T76x, T8xx */
2628 hw_quirks_sc |= SC_LS_ALLOW_ATTR_TYPES;
2629
2630 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TTRX_2968_TTRX_3162))
2631 hw_quirks_sc |= SC_VAR_ALGORITHM;
2632
2633 if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_TLS_HASHING))
2634 hw_quirks_sc |= SC_TLS_HASH_ENABLE;
2635
2636 kbdev->hw_quirks_sc = hw_quirks_sc;
2637
2638 return 0;
2639 }
2640
2641 static int kbase_set_tiler_quirks(struct kbase_device *kbdev)
2642 {
2643 u32 hw_quirks_tiler = kbase_reg_read(kbdev,
2644 GPU_CONTROL_REG(TILER_CONFIG));
2645
2646 if (kbase_is_gpu_removed(kbdev))
2647 return -EIO;
2648
2649 /* Set tiler clock gate override if required */
2650 if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3953))
2651 hw_quirks_tiler |= TC_CLOCK_GATE_OVERRIDE;
2652
2653 kbdev->hw_quirks_tiler = hw_quirks_tiler;
2654
2655 return 0;
2656 }
2657
2658 static int kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
2659 {
2660 struct device_node *np = kbdev->dev->of_node;
2661 const u32 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
2662 const u32 prod_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >>
2663 GPU_ID_VERSION_PRODUCT_ID_SHIFT;
2664 int error = 0;
2665
2666 kbdev->hw_quirks_gpu = 0;
2667 kbdev->hw_quirks_sc = 0;
2668 kbdev->hw_quirks_tiler = 0;
2669 kbdev->hw_quirks_mmu = 0;
2670
2671 if (!of_property_read_u32(np, "quirks_gpu", &kbdev->hw_quirks_gpu)) {
2672 dev_info(kbdev->dev,
2673 "Found quirks_gpu = [0x%x] in Devicetree\n",
2674 kbdev->hw_quirks_gpu);
2675 } else {
2676 error = kbase_set_gpu_quirks(kbdev, prod_id);
2677 if (error)
2678 return error;
2679 }
2680
2681 if (!of_property_read_u32(np, "quirks_sc",
2682 &kbdev->hw_quirks_sc)) {
2683 dev_info(kbdev->dev,
2684 "Found quirks_sc = [0x%x] in Devicetree\n",
2685 kbdev->hw_quirks_sc);
2686 } else {
2687 error = kbase_set_sc_quirks(kbdev, prod_id);
2688 if (error)
2689 return error;
2690 }
2691
2692 if (!of_property_read_u32(np, "quirks_tiler",
2693 &kbdev->hw_quirks_tiler)) {
2694 dev_info(kbdev->dev,
2695 "Found quirks_tiler = [0x%x] in Devicetree\n",
2696 kbdev->hw_quirks_tiler);
2697 } else {
2698 error = kbase_set_tiler_quirks(kbdev);
2699 if (error)
2700 return error;
2701 }
2702
2703 if (!of_property_read_u32(np, "quirks_mmu",
2704 &kbdev->hw_quirks_mmu)) {
2705 dev_info(kbdev->dev,
2706 "Found quirks_mmu = [0x%x] in Devicetree\n",
2707 kbdev->hw_quirks_mmu);
2708 } else {
2709 error = kbase_set_mmu_quirks(kbdev);
2710 }
2711
2712 return error;
2713 }
2714
2715 static void kbase_pm_hw_issues_apply(struct kbase_device *kbdev)
2716 {
2717 kbase_reg_write(kbdev, GPU_CONTROL_REG(SHADER_CONFIG),
2718 kbdev->hw_quirks_sc);
2719
2720 kbase_reg_write(kbdev, GPU_CONTROL_REG(TILER_CONFIG),
2721 kbdev->hw_quirks_tiler);
2722
2723 kbase_reg_write(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG),
2724 kbdev->hw_quirks_mmu);
2725 #if MALI_USE_CSF
2726 kbase_reg_write(kbdev, GPU_CONTROL_REG(CSF_CONFIG),
2727 kbdev->hw_quirks_gpu);
2728 #else
2729 kbase_reg_write(kbdev, GPU_CONTROL_REG(JM_CONFIG),
2730 kbdev->hw_quirks_gpu);
2731 #endif
2732 }
2733
2734 void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev)
2735 {
2736 if ((kbdev->current_gpu_coherency_mode == COHERENCY_ACE) &&
2737 !kbdev->cci_snoop_enabled) {
2738 #if IS_ENABLED(CONFIG_ARM64)
2739 if (kbdev->snoop_enable_smc != 0)
2740 kbase_invoke_smc_fid(kbdev->snoop_enable_smc, 0, 0, 0);
2741 #endif /* CONFIG_ARM64 */
2742 dev_dbg(kbdev->dev, "MALI - CCI Snoops - Enabled\n");
2743 kbdev->cci_snoop_enabled = true;
2744 }
2745 }
2746
2747 void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev)
2748 {
2749 if (kbdev->cci_snoop_enabled) {
2750 #if IS_ENABLED(CONFIG_ARM64)
2751 if (kbdev->snoop_disable_smc != 0) {
2752 mali_cci_flush_l2(kbdev);
2753 kbase_invoke_smc_fid(kbdev->snoop_disable_smc, 0, 0, 0);
2754 }
2755 #endif /* CONFIG_ARM64 */
2756 dev_dbg(kbdev->dev, "MALI - CCI Snoops Disabled\n");
2757 kbdev->cci_snoop_enabled = false;
2758 }
2759 }
2760
2761 #if !MALI_USE_CSF
2762 static void reenable_protected_mode_hwcnt(struct kbase_device *kbdev)
2763 {
2764 unsigned long irq_flags;
2765
2766 spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
2767 kbdev->protected_mode_hwcnt_desired = true;
2768 if (kbdev->protected_mode_hwcnt_disabled) {
2769 kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
2770 kbdev->protected_mode_hwcnt_disabled = false;
2771 }
2772 spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
2773 }
2774 #endif
2775
2776 static int kbase_pm_do_reset(struct kbase_device *kbdev)
2777 {
2778 struct kbasep_reset_timeout_data rtdata;
2779 int ret;
2780
2781 KBASE_KTRACE_ADD(kbdev, CORE_GPU_SOFT_RESET, NULL, 0);
2782
2783 KBASE_TLSTREAM_JD_GPU_SOFT_RESET(kbdev, kbdev);
2784
2785 if (kbdev->pm.backend.callback_soft_reset) {
2786 ret = kbdev->pm.backend.callback_soft_reset(kbdev);
2787 if (ret < 0)
2788 return ret;
2789 else if (ret > 0)
2790 return 0;
2791 } else {
2792 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
2793 GPU_COMMAND_SOFT_RESET);
2794 }
2795
2796 /* Unmask the reset complete interrupt only */
2797 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), RESET_COMPLETED);
2798
2799 /* Initialize a structure for tracking the status of the reset */
2800 rtdata.kbdev = kbdev;
2801 rtdata.timed_out = false;
2802
2803 /* Create a timer to use as a timeout on the reset */
2804 hrtimer_init_on_stack(&rtdata.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2805 rtdata.timer.function = kbasep_reset_timeout;
2806
2807 hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT),
2808 HRTIMER_MODE_REL);
2809
2810 /* Wait for the RESET_COMPLETED interrupt to be raised */
2811 kbase_pm_wait_for_reset(kbdev);
2812
2813 if (!rtdata.timed_out) {
2814 /* GPU has been reset */
2815 hrtimer_cancel(&rtdata.timer);
2816 destroy_hrtimer_on_stack(&rtdata.timer);
2817 return 0;
2818 }
2819
2820 /* No interrupt has been received - check if the RAWSTAT register says
2821 * the reset has completed
2822 */
2823 if ((kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT)) &
2824 RESET_COMPLETED)) {
2825 /* The interrupt is set in the RAWSTAT; this suggests that the
2826 * interrupts are not getting to the CPU
2827 */
2828 dev_err(kbdev->dev, "Reset interrupt didn't reach CPU. Check interrupt assignments.\n");
2829 /* If interrupts aren't working we can't continue. */
2830 destroy_hrtimer_on_stack(&rtdata.timer);
2831 return -EINVAL;
2832 }
2833
2834 if (kbase_is_gpu_removed(kbdev)) {
2835 dev_dbg(kbdev->dev, "GPU has been removed, reset no longer needed.\n");
2836 destroy_hrtimer_on_stack(&rtdata.timer);
2837 return -EINVAL;
2838 }
2839
2840 /* The GPU doesn't seem to be responding to the reset so try a hard
2841 * reset, but only when NOT in arbitration mode.
2842 */
2843 #ifdef CONFIG_MALI_ARBITER_SUPPORT
2844 if (!kbdev->arb.arb_if) {
2845 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
2846 dev_err(kbdev->dev, "Failed to soft-reset GPU (timed out after %d ms), now attempting a hard reset\n",
2847 RESET_TIMEOUT);
2848 KBASE_KTRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, 0);
2849 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
2850 GPU_COMMAND_HARD_RESET);
2851
2852 /* Restart the timer to wait for the hard reset to complete */
2853 rtdata.timed_out = false;
2854
2855 hrtimer_start(&rtdata.timer, HR_TIMER_DELAY_MSEC(RESET_TIMEOUT),
2856 HRTIMER_MODE_REL);
2857
2858 /* Wait for the RESET_COMPLETED interrupt to be raised */
2859 kbase_pm_wait_for_reset(kbdev);
2860
2861 if (!rtdata.timed_out) {
2862 /* GPU has been reset */
2863 hrtimer_cancel(&rtdata.timer);
2864 destroy_hrtimer_on_stack(&rtdata.timer);
2865 return 0;
2866 }
2867
2868 destroy_hrtimer_on_stack(&rtdata.timer);
2869
2870 dev_err(kbdev->dev, "Failed to hard-reset the GPU (timed out after %d ms)\n",
2871 RESET_TIMEOUT);
2872 #ifdef CONFIG_MALI_ARBITER_SUPPORT
2873 }
2874 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
2875
2876 return -EINVAL;
2877 }
2878
2879 int kbase_pm_protected_mode_enable(struct kbase_device *const kbdev)
2880 {
2881 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
2882 GPU_COMMAND_SET_PROTECTED_MODE);
2883 return 0;
2884 }
2885
2886 int kbase_pm_protected_mode_disable(struct kbase_device *const kbdev)
2887 {
2888 lockdep_assert_held(&kbdev->pm.lock);
2889
2890 return kbase_pm_do_reset(kbdev);
2891 }
2892
2893 int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
2894 {
2895 unsigned long irq_flags;
2896 int err = 0;
2897
2898 KBASE_DEBUG_ASSERT(kbdev != NULL);
2899 lockdep_assert_held(&kbdev->pm.lock);
2900
2901 /* Ensure the clock is on before attempting to access the hardware */
2902 if (!kbdev->pm.backend.gpu_powered) {
2903 if (kbdev->pm.backend.callback_power_on)
2904 kbdev->pm.backend.callback_power_on(kbdev);
2905
2906 kbdev->pm.backend.gpu_powered = true;
2907 }
2908
2909 /* Ensure interrupts are off to begin with, this also clears any
2910 * outstanding interrupts
2911 */
2912 kbase_pm_disable_interrupts(kbdev);
2913 /* Ensure cache snoops are disabled before reset. */
2914 kbase_pm_cache_snoop_disable(kbdev);
2915 /* Prepare for the soft-reset */
2916 kbdev->pm.backend.reset_done = false;
2917
2918 /* The cores should be made unavailable due to the reset */
2919 spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
2920 if (kbdev->pm.backend.shaders_state != KBASE_SHADERS_OFF_CORESTACK_OFF)
2921 KBASE_KTRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL, 0u);
2922 spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
2923
2924 /* Soft reset the GPU */
2925 #ifdef CONFIG_MALI_ARBITER_SUPPORT
2926 if (!(flags & PM_NO_RESET))
2927 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
2928 err = kbdev->protected_ops->protected_mode_disable(
2929 kbdev->protected_dev);
2930
2931 spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
2932 #if MALI_USE_CSF
2933 if (kbdev->protected_mode) {
2934 unsigned long flags;
2935
2936 kbase_ipa_control_protm_exited(kbdev);
2937
2938 kbase_csf_scheduler_spin_lock(kbdev, &flags);
2939 kbase_hwcnt_backend_csf_protm_exited(&kbdev->hwcnt_gpu_iface);
2940 kbase_csf_scheduler_spin_unlock(kbdev, flags);
2941 }
2942 #endif
2943 kbdev->protected_mode = false;
2944 spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
2945
2946 if (err)
2947 goto exit;
2948
2949 if (flags & PM_HW_ISSUES_DETECT) {
2950 err = kbase_pm_hw_issues_detect(kbdev);
2951 if (err)
2952 goto exit;
2953 }
2954
2955 kbase_pm_hw_issues_apply(kbdev);
2956 kbase_cache_set_coherency_mode(kbdev, kbdev->system_coherency);
2957
2958 /* Sanity check protected mode was left after reset */
2959 WARN_ON(kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS)) &
2960 GPU_STATUS_PROTECTED_MODE_ACTIVE);
2961
2962 /* If cycle counter was in use re-enable it, enable_irqs will only be
2963 * false when called from kbase_pm_powerup
2964 */
2965 if (kbdev->pm.backend.gpu_cycle_counter_requests &&
2966 (flags & PM_ENABLE_IRQS)) {
2967 kbase_pm_enable_interrupts(kbdev);
2968
2969 /* Re-enable the counters if we need to */
2970 spin_lock_irqsave(
2971 &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
2972 irq_flags);
2973 if (kbdev->pm.backend.gpu_cycle_counter_requests)
2974 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
2975 GPU_COMMAND_CYCLE_COUNT_START);
2976 spin_unlock_irqrestore(
2977 &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
2978 irq_flags);
2979
2980 kbase_pm_disable_interrupts(kbdev);
2981 }
2982
2983 if (flags & PM_ENABLE_IRQS)
2984 kbase_pm_enable_interrupts(kbdev);
2985
2986 exit:
2987 #if !MALI_USE_CSF
2988 if (!kbdev->pm.backend.protected_entry_transition_override) {
2989 /* Re-enable GPU hardware counters if we're resetting from
2990 * protected mode.
2991 */
2992 reenable_protected_mode_hwcnt(kbdev);
2993 }
2994 #endif
2995
2996 return err;
2997 }
2998
2999 /**
3000 * kbase_pm_request_gpu_cycle_counter_do_request - Request cycle counters
3001 * @kbdev: The kbase device structure of the device
3002 *
3003 * Increase the count of cycle counter users and turn the cycle counters on if
3004 * they were previously off
3005 *
3006 * This function is designed to be called by
3007 * kbase_pm_request_gpu_cycle_counter() or
3008 * kbase_pm_request_gpu_cycle_counter_l2_is_on() only
3009 *
3010 * When this function is called the l2 cache must be on - i.e., the GPU must be
3011 * on.
3012 */
3013 static void
3014 kbase_pm_request_gpu_cycle_counter_do_request(struct kbase_device *kbdev)
3015 {
3016 unsigned long flags;
3017
3018 spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
3019 flags);
3020 ++kbdev->pm.backend.gpu_cycle_counter_requests;
3021
3022 if (kbdev->pm.backend.gpu_cycle_counter_requests == 1)
3023 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
3024 GPU_COMMAND_CYCLE_COUNT_START);
3025 else {
3026 /* This might happen after GPU reset.
3027 * Then counter needs to be kicked.
3028 */
3029 #if !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
3030 if (!(kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS)) &
3031 GPU_STATUS_CYCLE_COUNT_ACTIVE)) {
3032 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
3033 GPU_COMMAND_CYCLE_COUNT_START);
3034 }
3035 #endif
3036 }
3037
3038 spin_unlock_irqrestore(
3039 &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
3040 flags);
3041 }
3042
3043 void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev)
3044 {
3045 KBASE_DEBUG_ASSERT(kbdev != NULL);
3046
3047 KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
3048
3049 KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests <
3050 INT_MAX);
3051
3052 kbase_pm_wait_for_l2_powered(kbdev);
3053
3054 kbase_pm_request_gpu_cycle_counter_do_request(kbdev);
3055 }
3056
3057 KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter);
3058
3059 void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev)
3060 {
3061 KBASE_DEBUG_ASSERT(kbdev != NULL);
3062
3063 KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
3064
3065 KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests <
3066 INT_MAX);
3067
3068 kbase_pm_request_gpu_cycle_counter_do_request(kbdev);
3069 }
3070
3071 KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter_l2_is_on);
3072
3073 void kbase_pm_release_gpu_cycle_counter_nolock(struct kbase_device *kbdev)
3074 {
3075 unsigned long flags;
3076
3077 KBASE_DEBUG_ASSERT(kbdev != NULL);
3078
3079 lockdep_assert_held(&kbdev->hwaccess_lock);
3080
3081 spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
3082 flags);
3083
3084 KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_cycle_counter_requests > 0);
3085
3086 --kbdev->pm.backend.gpu_cycle_counter_requests;
3087
3088 if (kbdev->pm.backend.gpu_cycle_counter_requests == 0)
3089 kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
3090 GPU_COMMAND_CYCLE_COUNT_STOP);
3091
3092 spin_unlock_irqrestore(
3093 &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
3094 flags);
3095 }
3096
3097 void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev)
3098 {
3099 unsigned long flags;
3100
3101 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
3102
3103 kbase_pm_release_gpu_cycle_counter_nolock(kbdev);
3104
3105 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
3106 }
3107
3108 KBASE_EXPORT_TEST_API(kbase_pm_release_gpu_cycle_counter);
3109