1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * CPU complex suspend & resume functions for Tegra SoCs
4 *
5 * Copyright (c) 2009-2012, NVIDIA Corporation. All rights reserved.
6 */
7
8 #include <linux/clk/tegra.h>
9 #include <linux/cpumask.h>
10 #include <linux/cpu_pm.h>
11 #include <linux/delay.h>
12 #include <linux/err.h>
13 #include <linux/io.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/suspend.h>
18
19 #include <linux/firmware/trusted_foundations.h>
20
21 #include <soc/tegra/flowctrl.h>
22 #include <soc/tegra/fuse.h>
23 #include <soc/tegra/pm.h>
24 #include <soc/tegra/pmc.h>
25
26 #include <asm/cacheflush.h>
27 #include <asm/firmware.h>
28 #include <asm/idmap.h>
29 #include <asm/proc-fns.h>
30 #include <asm/smp_plat.h>
31 #include <asm/suspend.h>
32 #include <asm/tlbflush.h>
33
34 #include "iomap.h"
35 #include "pm.h"
36 #include "reset.h"
37 #include "sleep.h"
38
39 #ifdef CONFIG_PM_SLEEP
40 static DEFINE_SPINLOCK(tegra_lp2_lock);
41 static u32 iram_save_size;
42 static void *iram_save_addr;
43 struct tegra_lp1_iram tegra_lp1_iram;
44 void (*tegra_tear_down_cpu)(void);
45 void (*tegra_sleep_core_finish)(unsigned long v2p);
46 static int (*tegra_sleep_func)(unsigned long v2p);
47
tegra_tear_down_cpu_init(void)48 static void tegra_tear_down_cpu_init(void)
49 {
50 switch (tegra_get_chip_id()) {
51 case TEGRA20:
52 if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC))
53 tegra_tear_down_cpu = tegra20_tear_down_cpu;
54 break;
55 case TEGRA30:
56 case TEGRA114:
57 case TEGRA124:
58 if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) ||
59 IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) ||
60 IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC))
61 tegra_tear_down_cpu = tegra30_tear_down_cpu;
62 break;
63 }
64 }
65
66 /*
67 * restore_cpu_complex
68 *
69 * restores cpu clock setting, clears flow controller
70 *
71 * Always called on CPU 0.
72 */
restore_cpu_complex(void)73 static void restore_cpu_complex(void)
74 {
75 int cpu = smp_processor_id();
76
77 BUG_ON(cpu != 0);
78
79 #ifdef CONFIG_SMP
80 cpu = cpu_logical_map(cpu);
81 #endif
82
83 /* Restore the CPU clock settings */
84 tegra_cpu_clock_resume();
85
86 flowctrl_cpu_suspend_exit(cpu);
87 }
88
89 /*
90 * suspend_cpu_complex
91 *
92 * saves pll state for use by restart_plls, prepares flow controller for
93 * transition to suspend state
94 *
95 * Must always be called on cpu 0.
96 */
suspend_cpu_complex(void)97 static void suspend_cpu_complex(void)
98 {
99 int cpu = smp_processor_id();
100
101 BUG_ON(cpu != 0);
102
103 #ifdef CONFIG_SMP
104 cpu = cpu_logical_map(cpu);
105 #endif
106
107 /* Save the CPU clock settings */
108 tegra_cpu_clock_suspend();
109
110 flowctrl_cpu_suspend_enter(cpu);
111 }
112
tegra_clear_cpu_in_lp2(void)113 void tegra_clear_cpu_in_lp2(void)
114 {
115 int phy_cpu_id = cpu_logical_map(smp_processor_id());
116 u32 *cpu_in_lp2 = tegra_cpu_lp2_mask;
117
118 spin_lock(&tegra_lp2_lock);
119
120 BUG_ON(!(*cpu_in_lp2 & BIT(phy_cpu_id)));
121 *cpu_in_lp2 &= ~BIT(phy_cpu_id);
122
123 spin_unlock(&tegra_lp2_lock);
124 }
125
tegra_set_cpu_in_lp2(void)126 bool tegra_set_cpu_in_lp2(void)
127 {
128 int phy_cpu_id = cpu_logical_map(smp_processor_id());
129 bool last_cpu = false;
130 cpumask_t *cpu_lp2_mask = tegra_cpu_lp2_mask;
131 u32 *cpu_in_lp2 = tegra_cpu_lp2_mask;
132
133 spin_lock(&tegra_lp2_lock);
134
135 BUG_ON((*cpu_in_lp2 & BIT(phy_cpu_id)));
136 *cpu_in_lp2 |= BIT(phy_cpu_id);
137
138 if ((phy_cpu_id == 0) && cpumask_equal(cpu_lp2_mask, cpu_online_mask))
139 last_cpu = true;
140 else if (tegra_get_chip_id() == TEGRA20 && phy_cpu_id == 1)
141 tegra20_cpu_set_resettable_soon();
142
143 spin_unlock(&tegra_lp2_lock);
144 return last_cpu;
145 }
146
tegra_cpu_do_idle(void)147 int tegra_cpu_do_idle(void)
148 {
149 return cpu_do_idle();
150 }
151
tegra_sleep_cpu(unsigned long v2p)152 static int tegra_sleep_cpu(unsigned long v2p)
153 {
154 /*
155 * L2 cache disabling using kernel API only allowed when all
156 * secondary CPU's are offline. Cache have to be disabled with
157 * MMU-on if cache maintenance is done via Trusted Foundations
158 * firmware. Note that CPUIDLE won't ever enter powergate on Tegra30
159 * if any of secondary CPU's is online and this is the LP2-idle
160 * code-path only for Tegra20/30.
161 */
162 if (trusted_foundations_registered())
163 outer_disable();
164
165 /*
166 * Note that besides of setting up CPU reset vector this firmware
167 * call may also do the following, depending on the FW version:
168 * 1) Disable L2. But this doesn't matter since we already
169 * disabled the L2.
170 * 2) Disable D-cache. This need to be taken into account in
171 * particular by the tegra_disable_clean_inv_dcache() which
172 * shall avoid the re-disable.
173 */
174 call_firmware_op(prepare_idle, TF_PM_MODE_LP2);
175
176 setup_mm_for_reboot();
177 tegra_sleep_cpu_finish(v2p);
178
179 /* should never here */
180 BUG();
181
182 return 0;
183 }
184
tegra_pm_set(enum tegra_suspend_mode mode)185 static void tegra_pm_set(enum tegra_suspend_mode mode)
186 {
187 u32 value;
188
189 switch (tegra_get_chip_id()) {
190 case TEGRA20:
191 case TEGRA30:
192 break;
193 default:
194 /* Turn off CRAIL */
195 value = flowctrl_read_cpu_csr(0);
196 value &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
197 value |= FLOW_CTRL_CSR_ENABLE_EXT_CRAIL;
198 flowctrl_write_cpu_csr(0, value);
199 break;
200 }
201
202 tegra_pmc_enter_suspend_mode(mode);
203 }
204
tegra_idle_lp2_last(void)205 void tegra_idle_lp2_last(void)
206 {
207 tegra_pm_set(TEGRA_SUSPEND_LP2);
208
209 cpu_cluster_pm_enter();
210 suspend_cpu_complex();
211
212 cpu_suspend(PHYS_OFFSET - PAGE_OFFSET, &tegra_sleep_cpu);
213
214 /*
215 * Resume L2 cache if it wasn't re-enabled early during resume,
216 * which is the case for Tegra30 that has to re-enable the cache
217 * via firmware call. In other cases cache is already enabled and
218 * hence re-enabling is a no-op. This is always a no-op on Tegra114+.
219 */
220 outer_resume();
221
222 restore_cpu_complex();
223 cpu_cluster_pm_exit();
224 }
225
tegra_pm_validate_suspend_mode(enum tegra_suspend_mode mode)226 enum tegra_suspend_mode tegra_pm_validate_suspend_mode(
227 enum tegra_suspend_mode mode)
228 {
229 /*
230 * The Tegra devices support suspending to LP1 or lower currently.
231 */
232 if (mode > TEGRA_SUSPEND_LP1)
233 return TEGRA_SUSPEND_LP1;
234
235 return mode;
236 }
237
tegra_sleep_core(unsigned long v2p)238 static int tegra_sleep_core(unsigned long v2p)
239 {
240 /*
241 * Cache have to be disabled with MMU-on if cache maintenance is done
242 * via Trusted Foundations firmware. This is a no-op on Tegra114+.
243 */
244 if (trusted_foundations_registered())
245 outer_disable();
246
247 call_firmware_op(prepare_idle, TF_PM_MODE_LP1);
248
249 setup_mm_for_reboot();
250 tegra_sleep_core_finish(v2p);
251
252 /* should never here */
253 BUG();
254
255 return 0;
256 }
257
258 /*
259 * tegra_lp1_iram_hook
260 *
261 * Hooking the address of LP1 reset vector and SDRAM self-refresh code in
262 * SDRAM. These codes not be copied to IRAM in this fuction. We need to
263 * copy these code to IRAM before LP0/LP1 suspend and restore the content
264 * of IRAM after resume.
265 */
tegra_lp1_iram_hook(void)266 static bool tegra_lp1_iram_hook(void)
267 {
268 switch (tegra_get_chip_id()) {
269 case TEGRA20:
270 if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC))
271 tegra20_lp1_iram_hook();
272 break;
273 case TEGRA30:
274 case TEGRA114:
275 case TEGRA124:
276 if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) ||
277 IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) ||
278 IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC))
279 tegra30_lp1_iram_hook();
280 break;
281 default:
282 break;
283 }
284
285 if (!tegra_lp1_iram.start_addr || !tegra_lp1_iram.end_addr)
286 return false;
287
288 iram_save_size = tegra_lp1_iram.end_addr - tegra_lp1_iram.start_addr;
289 iram_save_addr = kmalloc(iram_save_size, GFP_KERNEL);
290 if (!iram_save_addr)
291 return false;
292
293 return true;
294 }
295
tegra_sleep_core_init(void)296 static bool tegra_sleep_core_init(void)
297 {
298 switch (tegra_get_chip_id()) {
299 case TEGRA20:
300 if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC))
301 tegra20_sleep_core_init();
302 break;
303 case TEGRA30:
304 case TEGRA114:
305 case TEGRA124:
306 if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) ||
307 IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) ||
308 IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC))
309 tegra30_sleep_core_init();
310 break;
311 default:
312 break;
313 }
314
315 if (!tegra_sleep_core_finish)
316 return false;
317
318 return true;
319 }
320
tegra_suspend_enter_lp1(void)321 static void tegra_suspend_enter_lp1(void)
322 {
323 /* copy the reset vector & SDRAM shutdown code into IRAM */
324 memcpy(iram_save_addr, IO_ADDRESS(TEGRA_IRAM_LPx_RESUME_AREA),
325 iram_save_size);
326 memcpy(IO_ADDRESS(TEGRA_IRAM_LPx_RESUME_AREA),
327 tegra_lp1_iram.start_addr, iram_save_size);
328
329 *((u32 *)tegra_cpu_lp1_mask) = 1;
330 }
331
tegra_suspend_exit_lp1(void)332 static void tegra_suspend_exit_lp1(void)
333 {
334 /* restore IRAM */
335 memcpy(IO_ADDRESS(TEGRA_IRAM_LPx_RESUME_AREA), iram_save_addr,
336 iram_save_size);
337
338 *(u32 *)tegra_cpu_lp1_mask = 0;
339 }
340
341 static const char *lp_state[TEGRA_MAX_SUSPEND_MODE] = {
342 [TEGRA_SUSPEND_NONE] = "none",
343 [TEGRA_SUSPEND_LP2] = "LP2",
344 [TEGRA_SUSPEND_LP1] = "LP1",
345 [TEGRA_SUSPEND_LP0] = "LP0",
346 };
347
tegra_suspend_enter(suspend_state_t state)348 static int tegra_suspend_enter(suspend_state_t state)
349 {
350 enum tegra_suspend_mode mode = tegra_pmc_get_suspend_mode();
351
352 if (WARN_ON(mode < TEGRA_SUSPEND_NONE ||
353 mode >= TEGRA_MAX_SUSPEND_MODE))
354 return -EINVAL;
355
356 pr_info("Entering suspend state %s\n", lp_state[mode]);
357
358 tegra_pm_set(mode);
359
360 local_fiq_disable();
361
362 suspend_cpu_complex();
363 switch (mode) {
364 case TEGRA_SUSPEND_LP1:
365 tegra_suspend_enter_lp1();
366 break;
367 case TEGRA_SUSPEND_LP2:
368 tegra_set_cpu_in_lp2();
369 break;
370 default:
371 break;
372 }
373
374 cpu_suspend(PHYS_OFFSET - PAGE_OFFSET, tegra_sleep_func);
375
376 /*
377 * Resume L2 cache if it wasn't re-enabled early during resume,
378 * which is the case for Tegra30 that has to re-enable the cache
379 * via firmware call. In other cases cache is already enabled and
380 * hence re-enabling is a no-op.
381 */
382 outer_resume();
383
384 switch (mode) {
385 case TEGRA_SUSPEND_LP1:
386 tegra_suspend_exit_lp1();
387 break;
388 case TEGRA_SUSPEND_LP2:
389 tegra_clear_cpu_in_lp2();
390 break;
391 default:
392 break;
393 }
394 restore_cpu_complex();
395
396 local_fiq_enable();
397
398 return 0;
399 }
400
401 static const struct platform_suspend_ops tegra_suspend_ops = {
402 .valid = suspend_valid_only_mem,
403 .enter = tegra_suspend_enter,
404 };
405
tegra_init_suspend(void)406 void __init tegra_init_suspend(void)
407 {
408 enum tegra_suspend_mode mode = tegra_pmc_get_suspend_mode();
409
410 if (mode == TEGRA_SUSPEND_NONE)
411 return;
412
413 tegra_tear_down_cpu_init();
414
415 if (mode >= TEGRA_SUSPEND_LP1) {
416 if (!tegra_lp1_iram_hook() || !tegra_sleep_core_init()) {
417 pr_err("%s: unable to allocate memory for SDRAM"
418 "self-refresh -- LP0/LP1 unavailable\n",
419 __func__);
420 tegra_pmc_set_suspend_mode(TEGRA_SUSPEND_LP2);
421 mode = TEGRA_SUSPEND_LP2;
422 }
423 }
424
425 /* set up sleep function for cpu_suspend */
426 switch (mode) {
427 case TEGRA_SUSPEND_LP1:
428 tegra_sleep_func = tegra_sleep_core;
429 break;
430 case TEGRA_SUSPEND_LP2:
431 tegra_sleep_func = tegra_sleep_cpu;
432 break;
433 default:
434 break;
435 }
436
437 suspend_set_ops(&tegra_suspend_ops);
438 }
439 #endif
440