1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * arch/arm/mach-at91/pm.c
4 * AT91 Power Management
5 *
6 * Copyright (C) 2005 David Brownell
7 */
8
9 #include <linux/genalloc.h>
10 #include <linux/io.h>
11 #include <linux/of_address.h>
12 #include <linux/of.h>
13 #include <linux/of_fdt.h>
14 #include <linux/of_platform.h>
15 #include <linux/platform_device.h>
16 #include <linux/parser.h>
17 #include <linux/suspend.h>
18
19 #include <linux/clk.h>
20 #include <linux/clk/at91_pmc.h>
21 #include <linux/platform_data/atmel.h>
22
23 #include <asm/cacheflush.h>
24 #include <asm/fncpy.h>
25 #include <asm/system_misc.h>
26 #include <asm/suspend.h>
27
28 #include "generic.h"
29 #include "pm.h"
30 #include "sam_secure.h"
31
32 #define BACKUP_DDR_PHY_CALIBRATION (9)
33
34 /**
35 * struct at91_pm_bu - AT91 power management backup unit data structure
36 * @suspended: true if suspended to backup mode
37 * @reserved: reserved
38 * @canary: canary data for memory checking after exit from backup mode
39 * @resume: resume API
40 * @ddr_phy_calibration: DDR PHY calibration data: ZQ0CR0, first 8 words
41 * of the memory
42 */
43 struct at91_pm_bu {
44 int suspended;
45 unsigned long reserved;
46 phys_addr_t canary;
47 phys_addr_t resume;
48 unsigned long ddr_phy_calibration[BACKUP_DDR_PHY_CALIBRATION];
49 };
50
51 /**
52 * struct at91_pm_sfrbu_regs - registers mapping for SFRBU
53 * @pswbu: power switch BU control registers
54 */
55 struct at91_pm_sfrbu_regs {
56 struct {
57 u32 key;
58 u32 ctrl;
59 u32 state;
60 u32 softsw;
61 } pswbu;
62 };
63
64 /**
65 * enum at91_pm_eth_clk - Ethernet clock indexes
66 * @AT91_PM_ETH_PCLK: pclk index
67 * @AT91_PM_ETH_HCLK: hclk index
68 * @AT91_PM_ETH_MAX_CLK: max index
69 */
70 enum at91_pm_eth_clk {
71 AT91_PM_ETH_PCLK,
72 AT91_PM_ETH_HCLK,
73 AT91_PM_ETH_MAX_CLK,
74 };
75
76 /**
77 * enum at91_pm_eth - Ethernet controller indexes
78 * @AT91_PM_G_ETH: gigabit Ethernet controller index
79 * @AT91_PM_E_ETH: megabit Ethernet controller index
80 * @AT91_PM_MAX_ETH: max index
81 */
82 enum at91_pm_eth {
83 AT91_PM_G_ETH,
84 AT91_PM_E_ETH,
85 AT91_PM_MAX_ETH,
86 };
87
88 /**
89 * struct at91_pm_quirk_eth - AT91 PM Ethernet quirks
90 * @dev: Ethernet device
91 * @np: Ethernet device node
92 * @clks: Ethernet clocks
93 * @modes: power management mode that this quirk applies to
94 * @dns_modes: do not suspend modes: stop suspending if Ethernet is configured
95 * as wakeup source but buggy and no other wakeup source is
96 * available
97 */
98 struct at91_pm_quirk_eth {
99 struct device *dev;
100 struct device_node *np;
101 struct clk_bulk_data clks[AT91_PM_ETH_MAX_CLK];
102 u32 modes;
103 u32 dns_modes;
104 };
105
106 /**
107 * struct at91_pm_quirks - AT91 PM quirks
108 * @eth: Ethernet quirks
109 */
110 struct at91_pm_quirks {
111 struct at91_pm_quirk_eth eth[AT91_PM_MAX_ETH];
112 };
113
114 /**
115 * struct at91_soc_pm - AT91 SoC power management data structure
116 * @config_shdwc_ws: wakeup sources configuration function for SHDWC
117 * @config_pmc_ws: wakeup srouces configuration function for PMC
118 * @ws_ids: wakup sources of_device_id array
119 * @bu: backup unit mapped data (for backup mode)
120 * @quirks: PM quirks
121 * @data: PM data to be used on last phase of suspend
122 * @sfrbu_regs: SFRBU registers mapping
123 * @memcs: memory chip select
124 */
125 struct at91_soc_pm {
126 int (*config_shdwc_ws)(void __iomem *shdwc, u32 *mode, u32 *polarity);
127 int (*config_pmc_ws)(void __iomem *pmc, u32 mode, u32 polarity);
128 const struct of_device_id *ws_ids;
129 struct at91_pm_bu *bu;
130 struct at91_pm_quirks quirks;
131 struct at91_pm_data data;
132 struct at91_pm_sfrbu_regs sfrbu_regs;
133 void *memcs;
134 };
135
136 /**
137 * enum at91_pm_iomaps - IOs that needs to be mapped for different PM modes
138 * @AT91_PM_IOMAP_SHDWC: SHDWC controller
139 * @AT91_PM_IOMAP_SFRBU: SFRBU controller
140 * @AT91_PM_IOMAP_ETHC: Ethernet controller
141 */
142 enum at91_pm_iomaps {
143 AT91_PM_IOMAP_SHDWC,
144 AT91_PM_IOMAP_SFRBU,
145 AT91_PM_IOMAP_ETHC,
146 };
147
148 #define AT91_PM_IOMAP(name) BIT(AT91_PM_IOMAP_##name)
149
150 static struct at91_soc_pm soc_pm = {
151 .data = {
152 .standby_mode = AT91_PM_STANDBY,
153 .suspend_mode = AT91_PM_ULP0,
154 },
155 };
156
157 static const match_table_t pm_modes __initconst = {
158 { AT91_PM_STANDBY, "standby" },
159 { AT91_PM_ULP0, "ulp0" },
160 { AT91_PM_ULP0_FAST, "ulp0-fast" },
161 { AT91_PM_ULP1, "ulp1" },
162 { AT91_PM_BACKUP, "backup" },
163 { -1, NULL },
164 };
165
166 #define at91_ramc_read(id, field) \
167 __raw_readl(soc_pm.data.ramc[id] + field)
168
169 #define at91_ramc_write(id, field, value) \
170 __raw_writel(value, soc_pm.data.ramc[id] + field)
171
at91_pm_valid_state(suspend_state_t state)172 static int at91_pm_valid_state(suspend_state_t state)
173 {
174 switch (state) {
175 case PM_SUSPEND_ON:
176 case PM_SUSPEND_STANDBY:
177 case PM_SUSPEND_MEM:
178 return 1;
179
180 default:
181 return 0;
182 }
183 }
184
185 static int canary = 0xA5A5A5A5;
186
187 struct wakeup_source_info {
188 unsigned int pmc_fsmr_bit;
189 unsigned int shdwc_mr_bit;
190 bool set_polarity;
191 };
192
193 static const struct wakeup_source_info ws_info[] = {
194 { .pmc_fsmr_bit = AT91_PMC_FSTT(10), .set_polarity = true },
195 { .pmc_fsmr_bit = AT91_PMC_RTCAL, .shdwc_mr_bit = BIT(17) },
196 { .pmc_fsmr_bit = AT91_PMC_USBAL },
197 { .pmc_fsmr_bit = AT91_PMC_SDMMC_CD },
198 { .pmc_fsmr_bit = AT91_PMC_RTTAL },
199 { .pmc_fsmr_bit = AT91_PMC_RXLP_MCE },
200 };
201
202 static const struct of_device_id sama5d2_ws_ids[] = {
203 { .compatible = "atmel,sama5d2-gem", .data = &ws_info[0] },
204 { .compatible = "atmel,sama5d2-rtc", .data = &ws_info[1] },
205 { .compatible = "atmel,sama5d3-udc", .data = &ws_info[2] },
206 { .compatible = "atmel,at91rm9200-ohci", .data = &ws_info[2] },
207 { .compatible = "usb-ohci", .data = &ws_info[2] },
208 { .compatible = "atmel,at91sam9g45-ehci", .data = &ws_info[2] },
209 { .compatible = "usb-ehci", .data = &ws_info[2] },
210 { .compatible = "atmel,sama5d2-sdhci", .data = &ws_info[3] },
211 { /* sentinel */ }
212 };
213
214 static const struct of_device_id sam9x60_ws_ids[] = {
215 { .compatible = "microchip,sam9x60-rtc", .data = &ws_info[1] },
216 { .compatible = "atmel,at91rm9200-ohci", .data = &ws_info[2] },
217 { .compatible = "usb-ohci", .data = &ws_info[2] },
218 { .compatible = "atmel,at91sam9g45-ehci", .data = &ws_info[2] },
219 { .compatible = "usb-ehci", .data = &ws_info[2] },
220 { .compatible = "microchip,sam9x60-rtt", .data = &ws_info[4] },
221 { .compatible = "cdns,sam9x60-macb", .data = &ws_info[5] },
222 { /* sentinel */ }
223 };
224
225 static const struct of_device_id sama7g5_ws_ids[] = {
226 { .compatible = "microchip,sama7g5-rtc", .data = &ws_info[1] },
227 { .compatible = "microchip,sama7g5-ohci", .data = &ws_info[2] },
228 { .compatible = "usb-ohci", .data = &ws_info[2] },
229 { .compatible = "atmel,at91sam9g45-ehci", .data = &ws_info[2] },
230 { .compatible = "usb-ehci", .data = &ws_info[2] },
231 { .compatible = "microchip,sama7g5-sdhci", .data = &ws_info[3] },
232 { .compatible = "microchip,sama7g5-rtt", .data = &ws_info[4] },
233 { /* sentinel */ }
234 };
235
236 static const struct of_device_id sam9x7_ws_ids[] = {
237 { .compatible = "microchip,sam9x7-rtc", .data = &ws_info[1] },
238 { .compatible = "microchip,sam9x7-rtt", .data = &ws_info[4] },
239 { .compatible = "microchip,sam9x7-gem", .data = &ws_info[5] },
240 { /* sentinel */ }
241 };
242
at91_pm_config_ws(unsigned int pm_mode,bool set)243 static int at91_pm_config_ws(unsigned int pm_mode, bool set)
244 {
245 const struct wakeup_source_info *wsi;
246 const struct of_device_id *match;
247 struct platform_device *pdev;
248 struct device_node *np;
249 unsigned int mode = 0, polarity = 0, val = 0;
250
251 if (pm_mode != AT91_PM_ULP1)
252 return 0;
253
254 if (!soc_pm.data.pmc || !soc_pm.data.shdwc || !soc_pm.ws_ids)
255 return -EPERM;
256
257 if (!set) {
258 writel(mode, soc_pm.data.pmc + AT91_PMC_FSMR);
259 return 0;
260 }
261
262 if (soc_pm.config_shdwc_ws)
263 soc_pm.config_shdwc_ws(soc_pm.data.shdwc, &mode, &polarity);
264
265 /* SHDWC.MR */
266 val = readl(soc_pm.data.shdwc + 0x04);
267
268 /* Loop through defined wakeup sources. */
269 for_each_matching_node_and_match(np, soc_pm.ws_ids, &match) {
270 pdev = of_find_device_by_node(np);
271 if (!pdev)
272 continue;
273
274 if (device_may_wakeup(&pdev->dev)) {
275 wsi = match->data;
276
277 /* Check if enabled on SHDWC. */
278 if (wsi->shdwc_mr_bit && !(val & wsi->shdwc_mr_bit))
279 goto put_device;
280
281 mode |= wsi->pmc_fsmr_bit;
282 if (wsi->set_polarity)
283 polarity |= wsi->pmc_fsmr_bit;
284 }
285
286 put_device:
287 put_device(&pdev->dev);
288 }
289
290 if (mode) {
291 if (soc_pm.config_pmc_ws)
292 soc_pm.config_pmc_ws(soc_pm.data.pmc, mode, polarity);
293 } else {
294 pr_err("AT91: PM: no ULP1 wakeup sources found!");
295 }
296
297 return mode ? 0 : -EPERM;
298 }
299
at91_sama5d2_config_shdwc_ws(void __iomem * shdwc,u32 * mode,u32 * polarity)300 static int at91_sama5d2_config_shdwc_ws(void __iomem *shdwc, u32 *mode,
301 u32 *polarity)
302 {
303 u32 val;
304
305 /* SHDWC.WUIR */
306 val = readl(shdwc + 0x0c);
307 *mode |= (val & 0x3ff);
308 *polarity |= ((val >> 16) & 0x3ff);
309
310 return 0;
311 }
312
at91_sama5d2_config_pmc_ws(void __iomem * pmc,u32 mode,u32 polarity)313 static int at91_sama5d2_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
314 {
315 writel(mode, pmc + AT91_PMC_FSMR);
316 writel(polarity, pmc + AT91_PMC_FSPR);
317
318 return 0;
319 }
320
at91_sam9x60_config_pmc_ws(void __iomem * pmc,u32 mode,u32 polarity)321 static int at91_sam9x60_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
322 {
323 writel(mode, pmc + AT91_PMC_FSMR);
324
325 return 0;
326 }
327
at91_pm_eth_quirk_is_valid(struct at91_pm_quirk_eth * eth)328 static bool at91_pm_eth_quirk_is_valid(struct at91_pm_quirk_eth *eth)
329 {
330 struct platform_device *pdev;
331
332 /* Interface NA in DT. */
333 if (!eth->np)
334 return false;
335
336 /* No quirks for this interface and current suspend mode. */
337 if (!(eth->modes & BIT(soc_pm.data.mode)))
338 return false;
339
340 if (!eth->dev) {
341 /* Driver not probed. */
342 pdev = of_find_device_by_node(eth->np);
343 if (!pdev)
344 return false;
345 /* put_device(eth->dev) is called at the end of suspend. */
346 eth->dev = &pdev->dev;
347 }
348
349 /* No quirks if device isn't a wakeup source. */
350 if (!device_may_wakeup(eth->dev))
351 return false;
352
353 return true;
354 }
355
at91_pm_config_quirks(bool suspend)356 static int at91_pm_config_quirks(bool suspend)
357 {
358 struct at91_pm_quirk_eth *eth;
359 int i, j, ret, tmp;
360
361 /*
362 * Ethernet IPs who's device_node pointers are stored into
363 * soc_pm.quirks.eth[].np cannot handle WoL packets while in ULP0, ULP1
364 * or both due to a hardware bug. If they receive WoL packets while in
365 * ULP0 or ULP1 IPs could stop working or the whole system could stop
366 * working. We cannot handle this scenario in the ethernet driver itself
367 * as the driver is common to multiple vendors and also we only know
368 * here, in this file, if we suspend to ULP0 or ULP1 mode. Thus handle
369 * these scenarios here, as quirks.
370 */
371 for (i = 0; i < AT91_PM_MAX_ETH; i++) {
372 eth = &soc_pm.quirks.eth[i];
373
374 if (!at91_pm_eth_quirk_is_valid(eth))
375 continue;
376
377 /*
378 * For modes in dns_modes mask the system blocks if quirk is not
379 * applied but if applied the interface doesn't act at WoL
380 * events. Thus take care to avoid suspending if this interface
381 * is the only configured wakeup source.
382 */
383 if (suspend && eth->dns_modes & BIT(soc_pm.data.mode)) {
384 int ws_count = 0;
385 #ifdef CONFIG_PM_SLEEP
386 struct wakeup_source *ws;
387
388 for_each_wakeup_source(ws) {
389 if (ws->dev == eth->dev)
390 continue;
391
392 ws_count++;
393 break;
394 }
395 #endif
396
397 /*
398 * Checking !ws is good for all platforms with issues
399 * even when both G_ETH and E_ETH are available as dns_modes
400 * is populated only on G_ETH interface.
401 */
402 if (!ws_count) {
403 pr_err("AT91: PM: Ethernet cannot resume from WoL!");
404 ret = -EPERM;
405 put_device(eth->dev);
406 eth->dev = NULL;
407 /* No need to revert clock settings for this eth. */
408 i--;
409 goto clk_unconfigure;
410 }
411 }
412
413 if (suspend) {
414 clk_bulk_disable_unprepare(AT91_PM_ETH_MAX_CLK, eth->clks);
415 } else {
416 ret = clk_bulk_prepare_enable(AT91_PM_ETH_MAX_CLK,
417 eth->clks);
418 if (ret)
419 goto clk_unconfigure;
420 /*
421 * Release the reference to eth->dev taken in
422 * at91_pm_eth_quirk_is_valid().
423 */
424 put_device(eth->dev);
425 eth->dev = NULL;
426 }
427 }
428
429 return 0;
430
431 clk_unconfigure:
432 /*
433 * In case of resume we reach this point if clk_prepare_enable() failed.
434 * we don't want to revert the previous clk_prepare_enable() for the
435 * other IP.
436 */
437 for (j = i; j >= 0; j--) {
438 eth = &soc_pm.quirks.eth[j];
439 if (suspend) {
440 if (!at91_pm_eth_quirk_is_valid(eth))
441 continue;
442
443 tmp = clk_bulk_prepare_enable(AT91_PM_ETH_MAX_CLK, eth->clks);
444 if (tmp) {
445 pr_err("AT91: PM: failed to enable %s clocks\n",
446 j == AT91_PM_G_ETH ? "geth" : "eth");
447 }
448 }
449
450 /*
451 * Release the reference to eth->dev taken in
452 * at91_pm_eth_quirk_is_valid().
453 */
454 put_device(eth->dev);
455 eth->dev = NULL;
456 }
457
458 return ret;
459 }
460
461 /*
462 * Called after processes are frozen, but before we shutdown devices.
463 */
at91_pm_begin(suspend_state_t state)464 static int at91_pm_begin(suspend_state_t state)
465 {
466 int ret;
467
468 switch (state) {
469 case PM_SUSPEND_MEM:
470 soc_pm.data.mode = soc_pm.data.suspend_mode;
471 break;
472
473 case PM_SUSPEND_STANDBY:
474 soc_pm.data.mode = soc_pm.data.standby_mode;
475 break;
476
477 default:
478 soc_pm.data.mode = -1;
479 }
480
481 ret = at91_pm_config_ws(soc_pm.data.mode, true);
482 if (ret)
483 return ret;
484
485 if (soc_pm.data.mode == AT91_PM_BACKUP)
486 soc_pm.bu->suspended = 1;
487 else if (soc_pm.bu)
488 soc_pm.bu->suspended = 0;
489
490 return 0;
491 }
492
493 /*
494 * Verify that all the clocks are correct before entering
495 * slow-clock mode.
496 */
at91_pm_verify_clocks(void)497 static int at91_pm_verify_clocks(void)
498 {
499 unsigned long scsr;
500 int i;
501
502 scsr = readl(soc_pm.data.pmc + AT91_PMC_SCSR);
503
504 /* USB must not be using PLLB */
505 if ((scsr & soc_pm.data.uhp_udp_mask) != 0) {
506 pr_err("AT91: PM - Suspend-to-RAM with USB still active\n");
507 return 0;
508 }
509
510 /* PCK0..PCK3 must be disabled, or configured to use clk32k */
511 for (i = 0; i < 4; i++) {
512 u32 css;
513
514 if ((scsr & (AT91_PMC_PCK0 << i)) == 0)
515 continue;
516 css = readl(soc_pm.data.pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
517 if (css != AT91_PMC_CSS_SLOW) {
518 pr_err("AT91: PM - Suspend-to-RAM with PCK%d src %d\n", i, css);
519 return 0;
520 }
521 }
522
523 return 1;
524 }
525
526 /*
527 * Call this from platform driver suspend() to see how deeply to suspend.
528 * For example, some controllers (like OHCI) need one of the PLL clocks
529 * in order to act as a wakeup source, and those are not available when
530 * going into slow clock mode.
531 *
532 * REVISIT: generalize as clk_will_be_available(clk)? Other platforms have
533 * the very same problem (but not using at91 main_clk), and it'd be better
534 * to add one generic API rather than lots of platform-specific ones.
535 */
at91_suspend_entering_slow_clock(void)536 int at91_suspend_entering_slow_clock(void)
537 {
538 return (soc_pm.data.mode >= AT91_PM_ULP0);
539 }
540 EXPORT_SYMBOL(at91_suspend_entering_slow_clock);
541
542 static void (*at91_suspend_sram_fn)(struct at91_pm_data *);
543 extern void at91_pm_suspend_in_sram(struct at91_pm_data *pm_data);
544 extern u32 at91_pm_suspend_in_sram_sz;
545
at91_suspend_finish(unsigned long val)546 static int at91_suspend_finish(unsigned long val)
547 {
548 /* SYNOPSYS workaround to fix a bug in the calibration logic */
549 unsigned char modified_fix_code[] = {
550 0x00, 0x01, 0x01, 0x06, 0x07, 0x0c, 0x06, 0x07, 0x0b, 0x18,
551 0x0a, 0x0b, 0x0c, 0x0d, 0x0d, 0x0a, 0x13, 0x13, 0x12, 0x13,
552 0x14, 0x15, 0x15, 0x12, 0x18, 0x19, 0x19, 0x1e, 0x1f, 0x14,
553 0x1e, 0x1f,
554 };
555 unsigned int tmp, index;
556 int i;
557
558 if (soc_pm.data.mode == AT91_PM_BACKUP && soc_pm.data.ramc_phy) {
559 /*
560 * Bootloader will perform DDR recalibration and will try to
561 * restore the ZQ0SR0 with the value saved here. But the
562 * calibration is buggy and restoring some values from ZQ0SR0
563 * is forbidden and risky thus we need to provide processed
564 * values for these.
565 */
566 tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0);
567
568 /* Store pull-down output impedance select. */
569 index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f;
570 soc_pm.bu->ddr_phy_calibration[0] = modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDO_OFF;
571
572 /* Store pull-up output impedance select. */
573 index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f;
574 soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PUO_OFF;
575
576 /* Store pull-down on-die termination impedance select. */
577 index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f;
578 soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDODT_OFF;
579
580 /* Store pull-up on-die termination impedance select. */
581 index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f;
582 soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SRO_PUODT_OFF;
583
584 /*
585 * The 1st 8 words of memory might get corrupted in the process
586 * of DDR PHY recalibration; it is saved here in securam and it
587 * will be restored later, after recalibration, by bootloader
588 */
589 for (i = 1; i < BACKUP_DDR_PHY_CALIBRATION; i++)
590 soc_pm.bu->ddr_phy_calibration[i] =
591 *((unsigned int *)soc_pm.memcs + (i - 1));
592 }
593
594 flush_cache_all();
595 outer_disable();
596
597 at91_suspend_sram_fn(&soc_pm.data);
598
599 return 0;
600 }
601
602 /**
603 * at91_pm_switch_ba_to_auto() - Configure Backup Unit Power Switch
604 * to automatic/hardware mode.
605 *
606 * The Backup Unit Power Switch can be managed either by software or hardware.
607 * Enabling hardware mode allows the automatic transition of power between
608 * VDDANA (or VDDIN33) and VDDBU (or VBAT, respectively), based on the
609 * availability of these power sources.
610 *
611 * If the Backup Unit Power Switch is already in automatic mode, no action is
612 * required. If it is in software-controlled mode, it is switched to automatic
613 * mode to enhance safety and eliminate the need for toggling between power
614 * sources.
615 */
at91_pm_switch_ba_to_auto(void)616 static void at91_pm_switch_ba_to_auto(void)
617 {
618 unsigned int offset = offsetof(struct at91_pm_sfrbu_regs, pswbu);
619 unsigned int val;
620
621 /* Just for safety. */
622 if (!soc_pm.data.sfrbu)
623 return;
624
625 val = readl(soc_pm.data.sfrbu + offset);
626
627 /* Already on auto/hardware. */
628 if (!(val & soc_pm.sfrbu_regs.pswbu.ctrl))
629 return;
630
631 val &= ~soc_pm.sfrbu_regs.pswbu.ctrl;
632 val |= soc_pm.sfrbu_regs.pswbu.key;
633 writel(val, soc_pm.data.sfrbu + offset);
634 }
635
at91_pm_suspend(suspend_state_t state)636 static void at91_pm_suspend(suspend_state_t state)
637 {
638 if (soc_pm.data.mode == AT91_PM_BACKUP) {
639 at91_pm_switch_ba_to_auto();
640
641 cpu_suspend(0, at91_suspend_finish);
642
643 /* The SRAM is lost between suspend cycles */
644 at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
645 &at91_pm_suspend_in_sram,
646 at91_pm_suspend_in_sram_sz);
647 } else {
648 at91_suspend_finish(0);
649 }
650
651 outer_resume();
652 }
653
654 /*
655 * STANDBY mode has *all* drivers suspended; ignores irqs not marked as 'wakeup'
656 * event sources; and reduces DRAM power. But otherwise it's identical to
657 * PM_SUSPEND_ON: cpu idle, and nothing fancy done with main or cpu clocks.
658 *
659 * AT91_PM_ULP0 is like STANDBY plus slow clock mode, so drivers must
660 * suspend more deeply, the master clock switches to the clk32k and turns off
661 * the main oscillator
662 *
663 * AT91_PM_BACKUP turns off the whole SoC after placing the DDR in self refresh
664 */
at91_pm_enter(suspend_state_t state)665 static int at91_pm_enter(suspend_state_t state)
666 {
667 int ret;
668
669 ret = at91_pm_config_quirks(true);
670 if (ret)
671 return ret;
672
673 switch (state) {
674 case PM_SUSPEND_MEM:
675 case PM_SUSPEND_STANDBY:
676 /*
677 * Ensure that clocks are in a valid state.
678 */
679 if (soc_pm.data.mode >= AT91_PM_ULP0 &&
680 !at91_pm_verify_clocks())
681 goto error;
682
683 at91_pm_suspend(state);
684
685 break;
686
687 case PM_SUSPEND_ON:
688 cpu_do_idle();
689 break;
690
691 default:
692 pr_debug("AT91: PM - bogus suspend state %d\n", state);
693 goto error;
694 }
695
696 error:
697 at91_pm_config_quirks(false);
698 return 0;
699 }
700
701 /*
702 * Called right prior to thawing processes.
703 */
at91_pm_end(void)704 static void at91_pm_end(void)
705 {
706 at91_pm_config_ws(soc_pm.data.mode, false);
707 }
708
709
710 static const struct platform_suspend_ops at91_pm_ops = {
711 .valid = at91_pm_valid_state,
712 .begin = at91_pm_begin,
713 .enter = at91_pm_enter,
714 .end = at91_pm_end,
715 };
716
717 static struct platform_device at91_cpuidle_device = {
718 .name = "cpuidle-at91",
719 };
720
721 /*
722 * The AT91RM9200 goes into self-refresh mode with this command, and will
723 * terminate self-refresh automatically on the next SDRAM access.
724 *
725 * Self-refresh mode is exited as soon as a memory access is made, but we don't
726 * know for sure when that happens. However, we need to restore the low-power
727 * mode if it was enabled before going idle. Restoring low-power mode while
728 * still in self-refresh is "not recommended", but seems to work.
729 */
at91rm9200_standby(void)730 static void at91rm9200_standby(void)
731 {
732 asm volatile(
733 "b 1f\n\t"
734 ".align 5\n\t"
735 "1: mcr p15, 0, %0, c7, c10, 4\n\t"
736 " str %2, [%1, %3]\n\t"
737 " mcr p15, 0, %0, c7, c0, 4\n\t"
738 :
739 : "r" (0), "r" (soc_pm.data.ramc[0]),
740 "r" (1), "r" (AT91_MC_SDRAMC_SRR));
741 }
742
743 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
744 * remember.
745 */
at91_ddr_standby(void)746 static void at91_ddr_standby(void)
747 {
748 /* Those two values allow us to delay self-refresh activation
749 * to the maximum. */
750 u32 lpr0, lpr1 = 0;
751 u32 mdr, saved_mdr0, saved_mdr1 = 0;
752 u32 saved_lpr0, saved_lpr1 = 0;
753
754 /* LPDDR1 --> force DDR2 mode during self-refresh */
755 saved_mdr0 = at91_ramc_read(0, AT91_DDRSDRC_MDR);
756 if ((saved_mdr0 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) {
757 mdr = saved_mdr0 & ~AT91_DDRSDRC_MD;
758 mdr |= AT91_DDRSDRC_MD_DDR2;
759 at91_ramc_write(0, AT91_DDRSDRC_MDR, mdr);
760 }
761
762 if (soc_pm.data.ramc[1]) {
763 saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
764 lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
765 lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
766 saved_mdr1 = at91_ramc_read(1, AT91_DDRSDRC_MDR);
767 if ((saved_mdr1 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) {
768 mdr = saved_mdr1 & ~AT91_DDRSDRC_MD;
769 mdr |= AT91_DDRSDRC_MD_DDR2;
770 at91_ramc_write(1, AT91_DDRSDRC_MDR, mdr);
771 }
772 }
773
774 saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
775 lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
776 lpr0 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
777
778 /* self-refresh mode now */
779 at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
780 if (soc_pm.data.ramc[1])
781 at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
782
783 cpu_do_idle();
784
785 at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr0);
786 at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
787 if (soc_pm.data.ramc[1]) {
788 at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr1);
789 at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
790 }
791 }
792
sama5d3_ddr_standby(void)793 static void sama5d3_ddr_standby(void)
794 {
795 u32 lpr0;
796 u32 saved_lpr0;
797
798 saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
799 lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
800 lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
801
802 at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
803
804 cpu_do_idle();
805
806 at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
807 }
808
809 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
810 * remember.
811 */
at91sam9_sdram_standby(void)812 static void at91sam9_sdram_standby(void)
813 {
814 u32 lpr0, lpr1 = 0;
815 u32 saved_lpr0, saved_lpr1 = 0;
816
817 if (soc_pm.data.ramc[1]) {
818 saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
819 lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
820 lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
821 }
822
823 saved_lpr0 = at91_ramc_read(0, AT91_SDRAMC_LPR);
824 lpr0 = saved_lpr0 & ~AT91_SDRAMC_LPCB;
825 lpr0 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
826
827 /* self-refresh mode now */
828 at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0);
829 if (soc_pm.data.ramc[1])
830 at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
831
832 cpu_do_idle();
833
834 at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0);
835 if (soc_pm.data.ramc[1])
836 at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
837 }
838
sama7g5_standby(void)839 static void sama7g5_standby(void)
840 {
841 int pwrtmg, ratio;
842
843 pwrtmg = readl(soc_pm.data.ramc[0] + UDDRC_PWRCTL);
844 ratio = readl(soc_pm.data.pmc + AT91_PMC_RATIO);
845
846 /*
847 * Place RAM into self-refresh after a maximum idle clocks. The maximum
848 * idle clocks is configured by bootloader in
849 * UDDRC_PWRMGT.SELFREF_TO_X32.
850 */
851 writel(pwrtmg | UDDRC_PWRCTL_SELFREF_EN,
852 soc_pm.data.ramc[0] + UDDRC_PWRCTL);
853 /* Divide CPU clock by 16. */
854 writel(ratio & ~AT91_PMC_RATIO_RATIO, soc_pm.data.pmc + AT91_PMC_RATIO);
855
856 cpu_do_idle();
857
858 /* Restore previous configuration. */
859 writel(ratio, soc_pm.data.pmc + AT91_PMC_RATIO);
860 writel(pwrtmg, soc_pm.data.ramc[0] + UDDRC_PWRCTL);
861 }
862
863 struct ramc_info {
864 void (*idle)(void);
865 unsigned int memctrl;
866 };
867
868 static const struct ramc_info ramc_infos[] __initconst = {
869 { .idle = at91rm9200_standby, .memctrl = AT91_MEMCTRL_MC},
870 { .idle = at91sam9_sdram_standby, .memctrl = AT91_MEMCTRL_SDRAMC},
871 { .idle = at91_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
872 { .idle = sama5d3_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
873 { .idle = sama7g5_standby, },
874 };
875
876 static const struct of_device_id ramc_ids[] __initconst = {
877 { .compatible = "atmel,at91rm9200-sdramc", .data = &ramc_infos[0] },
878 { .compatible = "atmel,at91sam9260-sdramc", .data = &ramc_infos[1] },
879 { .compatible = "atmel,at91sam9g45-ddramc", .data = &ramc_infos[2] },
880 { .compatible = "atmel,sama5d3-ddramc", .data = &ramc_infos[3] },
881 { .compatible = "microchip,sama7g5-uddrc", .data = &ramc_infos[4], },
882 { /*sentinel*/ }
883 };
884
885 static const struct of_device_id ramc_phy_ids[] __initconst = {
886 { .compatible = "microchip,sama7g5-ddr3phy", },
887 { /* Sentinel. */ },
888 };
889
at91_dt_ramc(bool phy_mandatory)890 static __init int at91_dt_ramc(bool phy_mandatory)
891 {
892 struct device_node *np;
893 const struct of_device_id *of_id;
894 int idx = 0;
895 void *standby = NULL;
896 const struct ramc_info *ramc;
897 int ret;
898
899 for_each_matching_node_and_match(np, ramc_ids, &of_id) {
900 soc_pm.data.ramc[idx] = of_iomap(np, 0);
901 if (!soc_pm.data.ramc[idx]) {
902 pr_err("unable to map ramc[%d] cpu registers\n", idx);
903 ret = -ENOMEM;
904 of_node_put(np);
905 goto unmap_ramc;
906 }
907
908 ramc = of_id->data;
909 if (ramc) {
910 if (!standby)
911 standby = ramc->idle;
912 soc_pm.data.memctrl = ramc->memctrl;
913 }
914
915 idx++;
916 }
917
918 if (!idx) {
919 pr_err("unable to find compatible ram controller node in dtb\n");
920 ret = -ENODEV;
921 goto unmap_ramc;
922 }
923
924 /* Lookup for DDR PHY node, if any. */
925 for_each_matching_node_and_match(np, ramc_phy_ids, &of_id) {
926 soc_pm.data.ramc_phy = of_iomap(np, 0);
927 if (!soc_pm.data.ramc_phy) {
928 pr_err("unable to map ramc phy cpu registers\n");
929 ret = -ENOMEM;
930 of_node_put(np);
931 goto unmap_ramc;
932 }
933 }
934
935 if (phy_mandatory && !soc_pm.data.ramc_phy) {
936 pr_err("DDR PHY is mandatory!\n");
937 ret = -ENODEV;
938 goto unmap_ramc;
939 }
940
941 if (!standby) {
942 pr_warn("ramc no standby function available\n");
943 return 0;
944 }
945
946 at91_cpuidle_device.dev.platform_data = standby;
947
948 return 0;
949
950 unmap_ramc:
951 while (idx)
952 iounmap(soc_pm.data.ramc[--idx]);
953
954 return ret;
955 }
956
at91rm9200_idle(void)957 static void at91rm9200_idle(void)
958 {
959 /*
960 * Disable the processor clock. The processor will be automatically
961 * re-enabled by an interrupt or by a reset.
962 */
963 writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
964 }
965
at91sam9_idle(void)966 static void at91sam9_idle(void)
967 {
968 writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
969 cpu_do_idle();
970 }
971
at91_pm_sram_init(void)972 static void __init at91_pm_sram_init(void)
973 {
974 struct gen_pool *sram_pool;
975 phys_addr_t sram_pbase;
976 unsigned long sram_base;
977 struct device_node *node;
978 struct platform_device *pdev = NULL;
979
980 for_each_compatible_node(node, NULL, "mmio-sram") {
981 pdev = of_find_device_by_node(node);
982 if (pdev) {
983 of_node_put(node);
984 break;
985 }
986 }
987
988 if (!pdev) {
989 pr_warn("%s: failed to find sram device!\n", __func__);
990 return;
991 }
992
993 sram_pool = gen_pool_get(&pdev->dev, NULL);
994 if (!sram_pool) {
995 pr_warn("%s: sram pool unavailable!\n", __func__);
996 goto out_put_device;
997 }
998
999 sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz);
1000 if (!sram_base) {
1001 pr_warn("%s: unable to alloc sram!\n", __func__);
1002 goto out_put_device;
1003 }
1004
1005 sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base);
1006 at91_suspend_sram_fn = __arm_ioremap_exec(sram_pbase,
1007 at91_pm_suspend_in_sram_sz, false);
1008 if (!at91_suspend_sram_fn) {
1009 pr_warn("SRAM: Could not map\n");
1010 goto out_put_device;
1011 }
1012
1013 /* Copy the pm suspend handler to SRAM */
1014 at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
1015 &at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
1016 return;
1017
1018 out_put_device:
1019 put_device(&pdev->dev);
1020 return;
1021 }
1022
at91_is_pm_mode_active(int pm_mode)1023 static bool __init at91_is_pm_mode_active(int pm_mode)
1024 {
1025 return (soc_pm.data.standby_mode == pm_mode ||
1026 soc_pm.data.suspend_mode == pm_mode);
1027 }
1028
at91_pm_backup_scan_memcs(unsigned long node,const char * uname,int depth,void * data)1029 static int __init at91_pm_backup_scan_memcs(unsigned long node,
1030 const char *uname, int depth,
1031 void *data)
1032 {
1033 const char *type;
1034 const __be32 *reg;
1035 int *located = data;
1036 int size;
1037
1038 /* Memory node already located. */
1039 if (*located)
1040 return 0;
1041
1042 type = of_get_flat_dt_prop(node, "device_type", NULL);
1043
1044 /* We are scanning "memory" nodes only. */
1045 if (!type || strcmp(type, "memory"))
1046 return 0;
1047
1048 reg = of_get_flat_dt_prop(node, "reg", &size);
1049 if (reg) {
1050 soc_pm.memcs = __va((phys_addr_t)be32_to_cpu(*reg));
1051 *located = 1;
1052 }
1053
1054 return 0;
1055 }
1056
at91_pm_backup_init(void)1057 static int __init at91_pm_backup_init(void)
1058 {
1059 struct gen_pool *sram_pool;
1060 struct device_node *np;
1061 struct platform_device *pdev;
1062 int ret = -ENODEV, located = 0;
1063
1064 if (!IS_ENABLED(CONFIG_SOC_SAMA5D2) &&
1065 !IS_ENABLED(CONFIG_SOC_SAMA7G5))
1066 return -EPERM;
1067
1068 if (!at91_is_pm_mode_active(AT91_PM_BACKUP))
1069 return 0;
1070
1071 np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
1072 if (!np)
1073 return ret;
1074
1075 pdev = of_find_device_by_node(np);
1076 of_node_put(np);
1077 if (!pdev) {
1078 pr_warn("%s: failed to find securam device!\n", __func__);
1079 return ret;
1080 }
1081
1082 sram_pool = gen_pool_get(&pdev->dev, NULL);
1083 if (!sram_pool) {
1084 pr_warn("%s: securam pool unavailable!\n", __func__);
1085 goto securam_fail;
1086 }
1087
1088 soc_pm.bu = (void *)gen_pool_alloc(sram_pool, sizeof(struct at91_pm_bu));
1089 if (!soc_pm.bu) {
1090 pr_warn("%s: unable to alloc securam!\n", __func__);
1091 ret = -ENOMEM;
1092 goto securam_fail;
1093 }
1094
1095 soc_pm.bu->suspended = 0;
1096 soc_pm.bu->canary = __pa_symbol(&canary);
1097 soc_pm.bu->resume = __pa_symbol(cpu_resume);
1098 if (soc_pm.data.ramc_phy) {
1099 of_scan_flat_dt(at91_pm_backup_scan_memcs, &located);
1100 if (!located)
1101 goto securam_fail;
1102 }
1103
1104 return 0;
1105
1106 securam_fail:
1107 put_device(&pdev->dev);
1108 return ret;
1109 }
1110
at91_pm_secure_init(void)1111 static void __init at91_pm_secure_init(void)
1112 {
1113 int suspend_mode;
1114 struct arm_smccc_res res;
1115
1116 suspend_mode = soc_pm.data.suspend_mode;
1117
1118 res = sam_smccc_call(SAMA5_SMC_SIP_SET_SUSPEND_MODE,
1119 suspend_mode, 0);
1120 if (res.a0 == 0) {
1121 pr_info("AT91: Secure PM: suspend mode set to %s\n",
1122 pm_modes[suspend_mode].pattern);
1123 soc_pm.data.mode = suspend_mode;
1124 return;
1125 }
1126
1127 pr_warn("AT91: Secure PM: %s mode not supported !\n",
1128 pm_modes[suspend_mode].pattern);
1129
1130 res = sam_smccc_call(SAMA5_SMC_SIP_GET_SUSPEND_MODE, 0, 0);
1131 if (res.a0 == 0) {
1132 pr_warn("AT91: Secure PM: failed to get default mode\n");
1133 soc_pm.data.mode = -1;
1134 return;
1135 }
1136
1137 pr_info("AT91: Secure PM: using default suspend mode %s\n",
1138 pm_modes[suspend_mode].pattern);
1139
1140 soc_pm.data.suspend_mode = res.a1;
1141 soc_pm.data.mode = soc_pm.data.suspend_mode;
1142 }
1143 static const struct of_device_id atmel_shdwc_ids[] = {
1144 { .compatible = "atmel,sama5d2-shdwc" },
1145 { .compatible = "microchip,sam9x60-shdwc" },
1146 { .compatible = "microchip,sama7g5-shdwc" },
1147 { /* sentinel. */ }
1148 };
1149
1150 static const struct of_device_id gmac_ids[] __initconst = {
1151 { .compatible = "atmel,sama5d3-gem" },
1152 { .compatible = "atmel,sama5d2-gem" },
1153 { .compatible = "atmel,sama5d29-gem" },
1154 { .compatible = "microchip,sama7g5-gem" },
1155 { },
1156 };
1157
1158 static const struct of_device_id emac_ids[] __initconst = {
1159 { .compatible = "atmel,sama5d3-macb" },
1160 { .compatible = "microchip,sama7g5-emac" },
1161 { },
1162 };
1163
1164 /*
1165 * Replaces _mode_to_replace with a supported mode that doesn't depend
1166 * on controller pointed by _map_bitmask
1167 * @_maps: u32 array containing AT91_PM_IOMAP() flags and indexed by AT91
1168 * PM mode
1169 * @_map_bitmask: AT91_PM_IOMAP() bitmask; if _mode_to_replace depends on
1170 * controller represented by _map_bitmask, _mode_to_replace needs to be
1171 * updated
1172 * @_mode_to_replace: standby_mode or suspend_mode that need to be
1173 * updated
1174 * @_mode_to_check: standby_mode or suspend_mode; this is needed here
1175 * to avoid having standby_mode and suspend_mode set with the same AT91
1176 * PM mode
1177 */
1178 #define AT91_PM_REPLACE_MODE(_maps, _map_bitmask, _mode_to_replace, \
1179 _mode_to_check) \
1180 do { \
1181 if (((_maps)[(_mode_to_replace)]) & (_map_bitmask)) { \
1182 int _mode_to_use, _mode_complementary; \
1183 /* Use ULP0 if it doesn't need _map_bitmask. */ \
1184 if (!((_maps)[AT91_PM_ULP0] & (_map_bitmask))) {\
1185 _mode_to_use = AT91_PM_ULP0; \
1186 _mode_complementary = AT91_PM_STANDBY; \
1187 } else { \
1188 _mode_to_use = AT91_PM_STANDBY; \
1189 _mode_complementary = AT91_PM_STANDBY; \
1190 } \
1191 \
1192 if ((_mode_to_check) != _mode_to_use) \
1193 (_mode_to_replace) = _mode_to_use; \
1194 else \
1195 (_mode_to_replace) = _mode_complementary;\
1196 } \
1197 } while (0)
1198
1199 /*
1200 * Replaces standby and suspend modes with default supported modes:
1201 * ULP0 and STANDBY.
1202 * @_maps: u32 array indexed by AT91 PM mode containing AT91_PM_IOMAP()
1203 * flags
1204 * @_map: controller specific name; standby and suspend mode need to be
1205 * replaced in order to not depend on this controller
1206 */
1207 #define AT91_PM_REPLACE_MODES(_maps, _map) \
1208 do { \
1209 AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\
1210 (soc_pm.data.standby_mode), \
1211 (soc_pm.data.suspend_mode)); \
1212 AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\
1213 (soc_pm.data.suspend_mode), \
1214 (soc_pm.data.standby_mode)); \
1215 } while (0)
1216
at91_pm_get_eth_clks(struct device_node * np,struct clk_bulk_data * clks)1217 static int __init at91_pm_get_eth_clks(struct device_node *np,
1218 struct clk_bulk_data *clks)
1219 {
1220 clks[AT91_PM_ETH_PCLK].clk = of_clk_get_by_name(np, "pclk");
1221 if (IS_ERR(clks[AT91_PM_ETH_PCLK].clk))
1222 return PTR_ERR(clks[AT91_PM_ETH_PCLK].clk);
1223
1224 clks[AT91_PM_ETH_HCLK].clk = of_clk_get_by_name(np, "hclk");
1225 if (IS_ERR(clks[AT91_PM_ETH_HCLK].clk))
1226 return PTR_ERR(clks[AT91_PM_ETH_HCLK].clk);
1227
1228 return 0;
1229 }
1230
at91_pm_eth_clks_empty(struct clk_bulk_data * clks)1231 static int __init at91_pm_eth_clks_empty(struct clk_bulk_data *clks)
1232 {
1233 return IS_ERR(clks[AT91_PM_ETH_PCLK].clk) ||
1234 IS_ERR(clks[AT91_PM_ETH_HCLK].clk);
1235 }
1236
at91_pm_modes_init(const u32 * maps,int len)1237 static void __init at91_pm_modes_init(const u32 *maps, int len)
1238 {
1239 struct at91_pm_quirk_eth *gmac = &soc_pm.quirks.eth[AT91_PM_G_ETH];
1240 struct at91_pm_quirk_eth *emac = &soc_pm.quirks.eth[AT91_PM_E_ETH];
1241 struct device_node *np;
1242 int ret;
1243
1244 ret = at91_pm_backup_init();
1245 if (ret) {
1246 if (soc_pm.data.standby_mode == AT91_PM_BACKUP)
1247 soc_pm.data.standby_mode = AT91_PM_ULP0;
1248 if (soc_pm.data.suspend_mode == AT91_PM_BACKUP)
1249 soc_pm.data.suspend_mode = AT91_PM_ULP0;
1250 }
1251
1252 if (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SHDWC) ||
1253 maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SHDWC)) {
1254 np = of_find_matching_node(NULL, atmel_shdwc_ids);
1255 if (!np) {
1256 pr_warn("%s: failed to find shdwc!\n", __func__);
1257 AT91_PM_REPLACE_MODES(maps, SHDWC);
1258 } else {
1259 soc_pm.data.shdwc = of_iomap(np, 0);
1260 of_node_put(np);
1261 }
1262 }
1263
1264 if (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SFRBU) ||
1265 maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SFRBU)) {
1266 np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-sfrbu");
1267 if (!np) {
1268 pr_warn("%s: failed to find sfrbu!\n", __func__);
1269 AT91_PM_REPLACE_MODES(maps, SFRBU);
1270 } else {
1271 soc_pm.data.sfrbu = of_iomap(np, 0);
1272 of_node_put(np);
1273 }
1274 }
1275
1276 if ((at91_is_pm_mode_active(AT91_PM_ULP1) ||
1277 at91_is_pm_mode_active(AT91_PM_ULP0) ||
1278 at91_is_pm_mode_active(AT91_PM_ULP0_FAST)) &&
1279 (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(ETHC) ||
1280 maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(ETHC))) {
1281 np = of_find_matching_node(NULL, gmac_ids);
1282 if (!np) {
1283 np = of_find_matching_node(NULL, emac_ids);
1284 if (np)
1285 goto get_emac_clks;
1286 AT91_PM_REPLACE_MODES(maps, ETHC);
1287 goto unmap_unused_nodes;
1288 } else {
1289 gmac->np = np;
1290 at91_pm_get_eth_clks(np, gmac->clks);
1291 }
1292
1293 np = of_find_matching_node(NULL, emac_ids);
1294 if (!np) {
1295 if (at91_pm_eth_clks_empty(gmac->clks))
1296 AT91_PM_REPLACE_MODES(maps, ETHC);
1297 } else {
1298 get_emac_clks:
1299 emac->np = np;
1300 ret = at91_pm_get_eth_clks(np, emac->clks);
1301 if (ret && at91_pm_eth_clks_empty(gmac->clks)) {
1302 of_node_put(gmac->np);
1303 of_node_put(emac->np);
1304 gmac->np = NULL;
1305 emac->np = NULL;
1306 }
1307 }
1308 }
1309
1310 unmap_unused_nodes:
1311 /* Unmap all unnecessary. */
1312 if (soc_pm.data.shdwc &&
1313 !(maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SHDWC) ||
1314 maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SHDWC))) {
1315 iounmap(soc_pm.data.shdwc);
1316 soc_pm.data.shdwc = NULL;
1317 }
1318
1319 if (soc_pm.data.sfrbu &&
1320 !(maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SFRBU) ||
1321 maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SFRBU))) {
1322 iounmap(soc_pm.data.sfrbu);
1323 soc_pm.data.sfrbu = NULL;
1324 }
1325
1326 return;
1327 }
1328
1329 struct pmc_info {
1330 unsigned long uhp_udp_mask;
1331 unsigned long mckr;
1332 unsigned long version;
1333 };
1334
1335 static const struct pmc_info pmc_infos[] __initconst = {
1336 {
1337 .uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP,
1338 .mckr = 0x30,
1339 .version = AT91_PMC_V1,
1340 },
1341
1342 {
1343 .uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP,
1344 .mckr = 0x30,
1345 .version = AT91_PMC_V1,
1346 },
1347 {
1348 .uhp_udp_mask = AT91SAM926x_PMC_UHP,
1349 .mckr = 0x30,
1350 .version = AT91_PMC_V1,
1351 },
1352 { .uhp_udp_mask = 0,
1353 .mckr = 0x30,
1354 .version = AT91_PMC_V1,
1355 },
1356 {
1357 .uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP,
1358 .mckr = 0x28,
1359 .version = AT91_PMC_V2,
1360 },
1361 {
1362 .mckr = 0x28,
1363 .version = AT91_PMC_V2,
1364 },
1365
1366 };
1367
1368 static const struct of_device_id atmel_pmc_ids[] __initconst = {
1369 { .compatible = "atmel,at91rm9200-pmc", .data = &pmc_infos[0] },
1370 { .compatible = "atmel,at91sam9260-pmc", .data = &pmc_infos[1] },
1371 { .compatible = "atmel,at91sam9261-pmc", .data = &pmc_infos[1] },
1372 { .compatible = "atmel,at91sam9263-pmc", .data = &pmc_infos[1] },
1373 { .compatible = "atmel,at91sam9g45-pmc", .data = &pmc_infos[2] },
1374 { .compatible = "atmel,at91sam9n12-pmc", .data = &pmc_infos[1] },
1375 { .compatible = "atmel,at91sam9rl-pmc", .data = &pmc_infos[3] },
1376 { .compatible = "atmel,at91sam9x5-pmc", .data = &pmc_infos[1] },
1377 { .compatible = "atmel,sama5d3-pmc", .data = &pmc_infos[1] },
1378 { .compatible = "atmel,sama5d4-pmc", .data = &pmc_infos[1] },
1379 { .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] },
1380 { .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[4] },
1381 { .compatible = "microchip,sam9x7-pmc", .data = &pmc_infos[4] },
1382 { .compatible = "microchip,sama7g5-pmc", .data = &pmc_infos[5] },
1383 { /* sentinel */ },
1384 };
1385
at91_pm_modes_validate(const int * modes,int len)1386 static void __init at91_pm_modes_validate(const int *modes, int len)
1387 {
1388 u8 i, standby = 0, suspend = 0;
1389 int mode;
1390
1391 for (i = 0; i < len; i++) {
1392 if (standby && suspend)
1393 break;
1394
1395 if (modes[i] == soc_pm.data.standby_mode && !standby) {
1396 standby = 1;
1397 continue;
1398 }
1399
1400 if (modes[i] == soc_pm.data.suspend_mode && !suspend) {
1401 suspend = 1;
1402 continue;
1403 }
1404 }
1405
1406 if (!standby) {
1407 if (soc_pm.data.suspend_mode == AT91_PM_STANDBY)
1408 mode = AT91_PM_ULP0;
1409 else
1410 mode = AT91_PM_STANDBY;
1411
1412 pr_warn("AT91: PM: %s mode not supported! Using %s.\n",
1413 pm_modes[soc_pm.data.standby_mode].pattern,
1414 pm_modes[mode].pattern);
1415 soc_pm.data.standby_mode = mode;
1416 }
1417
1418 if (!suspend) {
1419 if (soc_pm.data.standby_mode == AT91_PM_ULP0)
1420 mode = AT91_PM_STANDBY;
1421 else
1422 mode = AT91_PM_ULP0;
1423
1424 pr_warn("AT91: PM: %s mode not supported! Using %s.\n",
1425 pm_modes[soc_pm.data.suspend_mode].pattern,
1426 pm_modes[mode].pattern);
1427 soc_pm.data.suspend_mode = mode;
1428 }
1429 }
1430
at91_pm_init(void (* pm_idle)(void))1431 static void __init at91_pm_init(void (*pm_idle)(void))
1432 {
1433 struct device_node *pmc_np;
1434 const struct of_device_id *of_id;
1435 const struct pmc_info *pmc;
1436
1437 if (at91_cpuidle_device.dev.platform_data)
1438 platform_device_register(&at91_cpuidle_device);
1439
1440 pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id);
1441 soc_pm.data.pmc = of_iomap(pmc_np, 0);
1442 of_node_put(pmc_np);
1443 if (!soc_pm.data.pmc) {
1444 pr_err("AT91: PM not supported, PMC not found\n");
1445 return;
1446 }
1447
1448 pmc = of_id->data;
1449 soc_pm.data.uhp_udp_mask = pmc->uhp_udp_mask;
1450 soc_pm.data.pmc_mckr_offset = pmc->mckr;
1451 soc_pm.data.pmc_version = pmc->version;
1452
1453 if (pm_idle)
1454 arm_pm_idle = pm_idle;
1455
1456 at91_pm_sram_init();
1457
1458 if (at91_suspend_sram_fn) {
1459 suspend_set_ops(&at91_pm_ops);
1460 pr_info("AT91: PM: standby: %s, suspend: %s\n",
1461 pm_modes[soc_pm.data.standby_mode].pattern,
1462 pm_modes[soc_pm.data.suspend_mode].pattern);
1463 } else {
1464 pr_info("AT91: PM not supported, due to no SRAM allocated\n");
1465 }
1466 }
1467
at91rm9200_pm_init(void)1468 void __init at91rm9200_pm_init(void)
1469 {
1470 int ret;
1471
1472 if (!IS_ENABLED(CONFIG_SOC_AT91RM9200))
1473 return;
1474
1475 /*
1476 * Force STANDBY and ULP0 mode to avoid calling
1477 * at91_pm_modes_validate() which may increase booting time.
1478 * Platform supports anyway only STANDBY and ULP0 modes.
1479 */
1480 soc_pm.data.standby_mode = AT91_PM_STANDBY;
1481 soc_pm.data.suspend_mode = AT91_PM_ULP0;
1482
1483 ret = at91_dt_ramc(false);
1484 if (ret)
1485 return;
1486
1487 /*
1488 * AT91RM9200 SDRAM low-power mode cannot be used with self-refresh.
1489 */
1490 at91_ramc_write(0, AT91_MC_SDRAMC_LPR, 0);
1491
1492 at91_pm_init(at91rm9200_idle);
1493 }
1494
sam9x60_pm_init(void)1495 void __init sam9x60_pm_init(void)
1496 {
1497 static const int modes[] __initconst = {
1498 AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1,
1499 };
1500 static const int iomaps[] __initconst = {
1501 [AT91_PM_ULP1] = AT91_PM_IOMAP(SHDWC),
1502 };
1503 int ret;
1504
1505 if (!IS_ENABLED(CONFIG_SOC_SAM9X60))
1506 return;
1507
1508 at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1509 at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1510 ret = at91_dt_ramc(false);
1511 if (ret)
1512 return;
1513
1514 at91_pm_init(NULL);
1515
1516 soc_pm.ws_ids = sam9x60_ws_ids;
1517 soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1518 }
1519
sam9x7_pm_init(void)1520 void __init sam9x7_pm_init(void)
1521 {
1522 static const int modes[] __initconst = {
1523 AT91_PM_STANDBY, AT91_PM_ULP0,
1524 };
1525 int ret;
1526
1527 if (!IS_ENABLED(CONFIG_SOC_SAM9X7))
1528 return;
1529
1530 at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1531 ret = at91_dt_ramc(false);
1532 if (ret)
1533 return;
1534
1535 at91_pm_init(NULL);
1536
1537 soc_pm.ws_ids = sam9x7_ws_ids;
1538 soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1539 }
1540
at91sam9_pm_init(void)1541 void __init at91sam9_pm_init(void)
1542 {
1543 int ret;
1544
1545 if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
1546 return;
1547
1548 /*
1549 * Force STANDBY and ULP0 mode to avoid calling
1550 * at91_pm_modes_validate() which may increase booting time.
1551 * Platform supports anyway only STANDBY and ULP0 modes.
1552 */
1553 soc_pm.data.standby_mode = AT91_PM_STANDBY;
1554 soc_pm.data.suspend_mode = AT91_PM_ULP0;
1555
1556 ret = at91_dt_ramc(false);
1557 if (ret)
1558 return;
1559
1560 at91_pm_init(at91sam9_idle);
1561 }
1562
sama5_pm_init(void)1563 void __init sama5_pm_init(void)
1564 {
1565 static const int modes[] __initconst = {
1566 AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST,
1567 };
1568 static const u32 iomaps[] __initconst = {
1569 [AT91_PM_ULP0] = AT91_PM_IOMAP(ETHC),
1570 [AT91_PM_ULP0_FAST] = AT91_PM_IOMAP(ETHC),
1571 };
1572 int ret;
1573
1574 if (!IS_ENABLED(CONFIG_SOC_SAMA5))
1575 return;
1576
1577 at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1578 at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1579 ret = at91_dt_ramc(false);
1580 if (ret)
1581 return;
1582
1583 at91_pm_init(NULL);
1584
1585 /* Quirks applies to ULP0, ULP0 fast and ULP1 modes. */
1586 soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) |
1587 BIT(AT91_PM_ULP0_FAST) |
1588 BIT(AT91_PM_ULP1);
1589 /* Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup source. */
1590 soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) |
1591 BIT(AT91_PM_ULP0_FAST);
1592 }
1593
sama5d2_pm_init(void)1594 void __init sama5d2_pm_init(void)
1595 {
1596 static const int modes[] __initconst = {
1597 AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1,
1598 AT91_PM_BACKUP,
1599 };
1600 static const u32 iomaps[] __initconst = {
1601 [AT91_PM_ULP0] = AT91_PM_IOMAP(ETHC),
1602 [AT91_PM_ULP0_FAST] = AT91_PM_IOMAP(ETHC),
1603 [AT91_PM_ULP1] = AT91_PM_IOMAP(SHDWC) |
1604 AT91_PM_IOMAP(ETHC),
1605 [AT91_PM_BACKUP] = AT91_PM_IOMAP(SHDWC) |
1606 AT91_PM_IOMAP(SFRBU),
1607 };
1608 int ret;
1609
1610 if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
1611 return;
1612
1613 if (IS_ENABLED(CONFIG_ATMEL_SECURE_PM)) {
1614 pr_warn("AT91: Secure PM: ignoring standby mode\n");
1615 at91_pm_secure_init();
1616 return;
1617 }
1618
1619 at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1620 at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1621 ret = at91_dt_ramc(false);
1622 if (ret)
1623 return;
1624
1625 at91_pm_init(NULL);
1626
1627 soc_pm.ws_ids = sama5d2_ws_ids;
1628 soc_pm.config_shdwc_ws = at91_sama5d2_config_shdwc_ws;
1629 soc_pm.config_pmc_ws = at91_sama5d2_config_pmc_ws;
1630
1631 soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
1632 soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
1633 soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
1634 soc_pm.sfrbu_regs.pswbu.state = BIT(3);
1635
1636 /* Quirk applies to ULP0, ULP0 fast and ULP1 modes. */
1637 soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) |
1638 BIT(AT91_PM_ULP0_FAST) |
1639 BIT(AT91_PM_ULP1);
1640 /*
1641 * Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup
1642 * source.
1643 */
1644 soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) |
1645 BIT(AT91_PM_ULP0_FAST);
1646 }
1647
sama7_pm_init(void)1648 void __init sama7_pm_init(void)
1649 {
1650 static const int modes[] __initconst = {
1651 AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP1, AT91_PM_BACKUP,
1652 };
1653 static const u32 iomaps[] __initconst = {
1654 [AT91_PM_ULP0] = AT91_PM_IOMAP(SFRBU),
1655 [AT91_PM_ULP1] = AT91_PM_IOMAP(SFRBU) |
1656 AT91_PM_IOMAP(SHDWC) |
1657 AT91_PM_IOMAP(ETHC),
1658 [AT91_PM_BACKUP] = AT91_PM_IOMAP(SFRBU) |
1659 AT91_PM_IOMAP(SHDWC),
1660 };
1661 int ret;
1662
1663 if (!IS_ENABLED(CONFIG_SOC_SAMA7))
1664 return;
1665
1666 at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1667
1668 ret = at91_dt_ramc(true);
1669 if (ret)
1670 return;
1671
1672 at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1673 at91_pm_init(NULL);
1674
1675 soc_pm.ws_ids = sama7g5_ws_ids;
1676 soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1677
1678 soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
1679 soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
1680 soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
1681 soc_pm.sfrbu_regs.pswbu.state = BIT(2);
1682
1683 /* Quirks applies to ULP1 for both Ethernet interfaces. */
1684 soc_pm.quirks.eth[AT91_PM_E_ETH].modes = BIT(AT91_PM_ULP1);
1685 soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP1);
1686 }
1687
at91_pm_modes_select(char * str)1688 static int __init at91_pm_modes_select(char *str)
1689 {
1690 char *s;
1691 substring_t args[MAX_OPT_ARGS];
1692 int standby, suspend;
1693
1694 if (!str)
1695 return 0;
1696
1697 s = strsep(&str, ",");
1698 standby = match_token(s, pm_modes, args);
1699 if (standby < 0)
1700 return 0;
1701
1702 suspend = match_token(str, pm_modes, args);
1703 if (suspend < 0)
1704 return 0;
1705
1706 soc_pm.data.standby_mode = standby;
1707 soc_pm.data.suspend_mode = suspend;
1708
1709 return 0;
1710 }
1711 early_param("atmel.pm_modes", at91_pm_modes_select);
1712