1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * ARM-specific support for Broadcom STB S2/S3/S5 power management
4 *
5 * S2: clock gate CPUs and as many peripherals as possible
6 * S3: power off all of the chip except the Always ON (AON) island; keep DDR is
7 * self-refresh
8 * S5: (a.k.a. S3 cold boot) much like S3, except DDR is powered down, so we
9 * treat this mode like a soft power-off, with wakeup allowed from AON
10 *
11 * Copyright © 2014-2017 Broadcom
12 */
13
14 #define pr_fmt(fmt) "brcmstb-pm: " fmt
15
16 #include <linux/bitops.h>
17 #include <linux/compiler.h>
18 #include <linux/delay.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/io.h>
23 #include <linux/ioport.h>
24 #include <linux/kconfig.h>
25 #include <linux/kernel.h>
26 #include <linux/memblock.h>
27 #include <linux/module.h>
28 #include <linux/notifier.h>
29 #include <linux/of.h>
30 #include <linux/of_address.h>
31 #include <linux/platform_device.h>
32 #include <linux/pm.h>
33 #include <linux/printk.h>
34 #include <linux/proc_fs.h>
35 #include <linux/sizes.h>
36 #include <linux/slab.h>
37 #include <linux/sort.h>
38 #include <linux/suspend.h>
39 #include <linux/types.h>
40 #include <linux/uaccess.h>
41 #include <linux/soc/brcmstb/brcmstb.h>
42
43 #include <asm/fncpy.h>
44 #include <asm/setup.h>
45 #include <asm/suspend.h>
46
47 #include "pm.h"
48 #include "aon_defs.h"
49
50 #define SHIMPHY_DDR_PAD_CNTRL 0x8c
51
52 /* Method #0 */
53 #define SHIMPHY_PAD_PLL_SEQUENCE BIT(8)
54 #define SHIMPHY_PAD_GATE_PLL_S3 BIT(9)
55
56 /* Method #1 */
57 #define PWRDWN_SEQ_NO_SEQUENCING 0
58 #define PWRDWN_SEQ_HOLD_CHANNEL 1
59 #define PWRDWN_SEQ_RESET_PLL 2
60 #define PWRDWN_SEQ_POWERDOWN_PLL 3
61
62 #define SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK 0x00f00000
63 #define SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT 20
64
65 #define DDR_FORCE_CKE_RST_N BIT(3)
66 #define DDR_PHY_RST_N BIT(2)
67 #define DDR_PHY_CKE BIT(1)
68
69 #define DDR_PHY_NO_CHANNEL 0xffffffff
70
71 #define MAX_NUM_MEMC 3
72
73 struct brcmstb_memc {
74 void __iomem *ddr_phy_base;
75 void __iomem *ddr_shimphy_base;
76 void __iomem *ddr_ctrl;
77 };
78
79 struct brcmstb_pm_control {
80 void __iomem *aon_ctrl_base;
81 void __iomem *aon_sram;
82 struct brcmstb_memc memcs[MAX_NUM_MEMC];
83
84 void __iomem *boot_sram;
85 size_t boot_sram_len;
86
87 bool support_warm_boot;
88 size_t pll_status_offset;
89 int num_memc;
90
91 struct brcmstb_s3_params *s3_params;
92 dma_addr_t s3_params_pa;
93 int s3entry_method;
94 u32 warm_boot_offset;
95 u32 phy_a_standby_ctrl_offs;
96 u32 phy_b_standby_ctrl_offs;
97 bool needs_ddr_pad;
98 struct platform_device *pdev;
99 };
100
101 enum bsp_initiate_command {
102 BSP_CLOCK_STOP = 0x00,
103 BSP_GEN_RANDOM_KEY = 0x4A,
104 BSP_RESTORE_RANDOM_KEY = 0x55,
105 BSP_GEN_FIXED_KEY = 0x63,
106 };
107
108 #define PM_INITIATE 0x01
109 #define PM_INITIATE_SUCCESS 0x00
110 #define PM_INITIATE_FAIL 0xfe
111
112 static struct brcmstb_pm_control ctrl;
113
114 static int (*brcmstb_pm_do_s2_sram)(void __iomem *aon_ctrl_base,
115 void __iomem *ddr_phy_pll_status);
116
brcmstb_init_sram(struct device_node * dn)117 static int brcmstb_init_sram(struct device_node *dn)
118 {
119 void __iomem *sram;
120 struct resource res;
121 int ret;
122
123 ret = of_address_to_resource(dn, 0, &res);
124 if (ret)
125 return ret;
126
127 /* Uncached, executable remapping of SRAM */
128 sram = __arm_ioremap_exec(res.start, resource_size(&res), false);
129 if (!sram)
130 return -ENOMEM;
131
132 ctrl.boot_sram = sram;
133 ctrl.boot_sram_len = resource_size(&res);
134
135 return 0;
136 }
137
138 static const struct of_device_id sram_dt_ids[] = {
139 { .compatible = "mmio-sram" },
140 { /* sentinel */ }
141 };
142
do_bsp_initiate_command(enum bsp_initiate_command cmd)143 static int do_bsp_initiate_command(enum bsp_initiate_command cmd)
144 {
145 void __iomem *base = ctrl.aon_ctrl_base;
146 int ret;
147 int timeo = 1000 * 1000; /* 1 second */
148
149 writel_relaxed(0, base + AON_CTRL_PM_INITIATE);
150 (void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
151
152 /* Go! */
153 writel_relaxed((cmd << 1) | PM_INITIATE, base + AON_CTRL_PM_INITIATE);
154
155 /*
156 * If firmware doesn't support the 'ack', then just assume it's done
157 * after 10ms. Note that this only works for command 0, BSP_CLOCK_STOP
158 */
159 if (of_machine_is_compatible("brcm,bcm74371a0")) {
160 (void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
161 mdelay(10);
162 return 0;
163 }
164
165 for (;;) {
166 ret = readl_relaxed(base + AON_CTRL_PM_INITIATE);
167 if (!(ret & PM_INITIATE))
168 break;
169 if (timeo <= 0) {
170 pr_err("error: timeout waiting for BSP (%x)\n", ret);
171 break;
172 }
173 timeo -= 50;
174 udelay(50);
175 }
176
177 return (ret & 0xff) != PM_INITIATE_SUCCESS;
178 }
179
brcmstb_pm_handshake(void)180 static int brcmstb_pm_handshake(void)
181 {
182 void __iomem *base = ctrl.aon_ctrl_base;
183 u32 tmp;
184 int ret;
185
186 /* BSP power handshake, v1 */
187 tmp = readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
188 tmp &= ~1UL;
189 writel_relaxed(tmp, base + AON_CTRL_HOST_MISC_CMDS);
190 (void)readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
191
192 ret = do_bsp_initiate_command(BSP_CLOCK_STOP);
193 if (ret)
194 pr_err("BSP handshake failed\n");
195
196 /*
197 * HACK: BSP may have internal race on the CLOCK_STOP command.
198 * Avoid touching the BSP for a few milliseconds.
199 */
200 mdelay(3);
201
202 return ret;
203 }
204
shimphy_set(u32 value,u32 mask)205 static inline void shimphy_set(u32 value, u32 mask)
206 {
207 int i;
208
209 if (!ctrl.needs_ddr_pad)
210 return;
211
212 for (i = 0; i < ctrl.num_memc; i++) {
213 u32 tmp;
214
215 tmp = readl_relaxed(ctrl.memcs[i].ddr_shimphy_base +
216 SHIMPHY_DDR_PAD_CNTRL);
217 tmp = value | (tmp & mask);
218 writel_relaxed(tmp, ctrl.memcs[i].ddr_shimphy_base +
219 SHIMPHY_DDR_PAD_CNTRL);
220 }
221 wmb(); /* Complete sequence in order. */
222 }
223
ddr_ctrl_set(bool warmboot)224 static inline void ddr_ctrl_set(bool warmboot)
225 {
226 int i;
227
228 for (i = 0; i < ctrl.num_memc; i++) {
229 u32 tmp;
230
231 tmp = readl_relaxed(ctrl.memcs[i].ddr_ctrl +
232 ctrl.warm_boot_offset);
233 if (warmboot)
234 tmp |= 1;
235 else
236 tmp &= ~1; /* Cold boot */
237 writel_relaxed(tmp, ctrl.memcs[i].ddr_ctrl +
238 ctrl.warm_boot_offset);
239 }
240 /* Complete sequence in order */
241 wmb();
242 }
243
s3entry_method0(void)244 static inline void s3entry_method0(void)
245 {
246 shimphy_set(SHIMPHY_PAD_GATE_PLL_S3 | SHIMPHY_PAD_PLL_SEQUENCE,
247 0xffffffff);
248 }
249
s3entry_method1(void)250 static inline void s3entry_method1(void)
251 {
252 /*
253 * S3 Entry Sequence
254 * -----------------
255 * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
256 * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 1
257 */
258 shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
259 SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
260 ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
261
262 ddr_ctrl_set(true);
263 }
264
s5entry_method1(void)265 static inline void s5entry_method1(void)
266 {
267 int i;
268
269 /*
270 * S5 Entry Sequence
271 * -----------------
272 * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
273 * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 0
274 * Step 3: DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ CKE ] = 0
275 * DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ RST_N ] = 0
276 */
277 shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
278 SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
279 ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
280
281 ddr_ctrl_set(false);
282
283 for (i = 0; i < ctrl.num_memc; i++) {
284 u32 tmp;
285
286 /* Step 3: Channel A (RST_N = CKE = 0) */
287 tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
288 ctrl.phy_a_standby_ctrl_offs);
289 tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
290 writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
291 ctrl.phy_a_standby_ctrl_offs);
292
293 /* Step 3: Channel B? */
294 if (ctrl.phy_b_standby_ctrl_offs != DDR_PHY_NO_CHANNEL) {
295 tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
296 ctrl.phy_b_standby_ctrl_offs);
297 tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
298 writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
299 ctrl.phy_b_standby_ctrl_offs);
300 }
301 }
302 /* Must complete */
303 wmb();
304 }
305
306 /*
307 * Run a Power Management State Machine (PMSM) shutdown command and put the CPU
308 * into a low-power mode
309 */
brcmstb_do_pmsm_power_down(unsigned long base_cmd,bool onewrite)310 static void brcmstb_do_pmsm_power_down(unsigned long base_cmd, bool onewrite)
311 {
312 void __iomem *base = ctrl.aon_ctrl_base;
313
314 if ((ctrl.s3entry_method == 1) && (base_cmd == PM_COLD_CONFIG))
315 s5entry_method1();
316
317 /* pm_start_pwrdn transition 0->1 */
318 writel_relaxed(base_cmd, base + AON_CTRL_PM_CTRL);
319
320 if (!onewrite) {
321 (void)readl_relaxed(base + AON_CTRL_PM_CTRL);
322
323 writel_relaxed(base_cmd | PM_PWR_DOWN, base + AON_CTRL_PM_CTRL);
324 (void)readl_relaxed(base + AON_CTRL_PM_CTRL);
325 }
326 wfi();
327 }
328
329 /* Support S5 cold boot out of "poweroff" */
brcmstb_pm_poweroff(void)330 static void brcmstb_pm_poweroff(void)
331 {
332 brcmstb_pm_handshake();
333
334 /* Clear magic S3 warm-boot value */
335 writel_relaxed(0, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
336 (void)readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
337
338 /* Skip wait-for-interrupt signal; just use a countdown */
339 writel_relaxed(0x10, ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
340 (void)readl_relaxed(ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
341
342 if (ctrl.s3entry_method == 1) {
343 shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
344 SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
345 ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
346 ddr_ctrl_set(false);
347 brcmstb_do_pmsm_power_down(M1_PM_COLD_CONFIG, true);
348 return; /* We should never actually get here */
349 }
350
351 brcmstb_do_pmsm_power_down(PM_COLD_CONFIG, false);
352 }
353
brcmstb_pm_copy_to_sram(void * fn,size_t len)354 static void *brcmstb_pm_copy_to_sram(void *fn, size_t len)
355 {
356 unsigned int size = ALIGN(len, FNCPY_ALIGN);
357
358 if (ctrl.boot_sram_len < size) {
359 pr_err("standby code will not fit in SRAM\n");
360 return NULL;
361 }
362
363 return fncpy(ctrl.boot_sram, fn, size);
364 }
365
366 /*
367 * S2 suspend/resume picks up where we left off, so we must execute carefully
368 * from SRAM, in order to allow DDR to come back up safely before we continue.
369 */
brcmstb_pm_s2(void)370 static int brcmstb_pm_s2(void)
371 {
372 /* A previous S3 can set a value hazardous to S2, so make sure. */
373 if (ctrl.s3entry_method == 1) {
374 shimphy_set((PWRDWN_SEQ_NO_SEQUENCING <<
375 SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
376 ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
377 ddr_ctrl_set(false);
378 }
379
380 brcmstb_pm_do_s2_sram = brcmstb_pm_copy_to_sram(&brcmstb_pm_do_s2,
381 brcmstb_pm_do_s2_sz);
382 if (!brcmstb_pm_do_s2_sram)
383 return -EINVAL;
384
385 return brcmstb_pm_do_s2_sram(ctrl.aon_ctrl_base,
386 ctrl.memcs[0].ddr_phy_base +
387 ctrl.pll_status_offset);
388 }
389
390 /*
391 * This function is called on a new stack, so don't allow inlining (which will
392 * generate stack references on the old stack). It cannot be made static because
393 * it is referenced from brcmstb_pm_s3()
394 */
brcmstb_pm_s3_finish(void)395 noinline int brcmstb_pm_s3_finish(void)
396 {
397 struct brcmstb_s3_params *params = ctrl.s3_params;
398 dma_addr_t params_pa = ctrl.s3_params_pa;
399 phys_addr_t reentry = virt_to_phys(&cpu_resume_arm);
400 enum bsp_initiate_command cmd;
401 u32 flags;
402
403 /*
404 * Clear parameter structure, but not DTU area, which has already been
405 * filled in. We know DTU is a the end, so we can just subtract its
406 * size.
407 */
408 memset(params, 0, sizeof(*params) - sizeof(params->dtu));
409
410 flags = readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
411
412 flags &= S3_BOOTLOADER_RESERVED;
413 flags |= S3_FLAG_NO_MEM_VERIFY;
414 flags |= S3_FLAG_LOAD_RANDKEY;
415
416 /* Load random / fixed key */
417 if (flags & S3_FLAG_LOAD_RANDKEY)
418 cmd = BSP_GEN_RANDOM_KEY;
419 else
420 cmd = BSP_GEN_FIXED_KEY;
421 if (do_bsp_initiate_command(cmd)) {
422 pr_info("key loading failed\n");
423 return -EIO;
424 }
425
426 params->magic = BRCMSTB_S3_MAGIC;
427 params->reentry = reentry;
428
429 /* No more writes to DRAM */
430 flush_cache_all();
431
432 flags |= BRCMSTB_S3_MAGIC_SHORT;
433
434 writel_relaxed(flags, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
435 writel_relaxed(lower_32_bits(params_pa),
436 ctrl.aon_sram + AON_REG_CONTROL_LOW);
437 writel_relaxed(upper_32_bits(params_pa),
438 ctrl.aon_sram + AON_REG_CONTROL_HIGH);
439
440 switch (ctrl.s3entry_method) {
441 case 0:
442 s3entry_method0();
443 brcmstb_do_pmsm_power_down(PM_WARM_CONFIG, false);
444 break;
445 case 1:
446 s3entry_method1();
447 brcmstb_do_pmsm_power_down(M1_PM_WARM_CONFIG, true);
448 break;
449 default:
450 return -EINVAL;
451 }
452
453 /* Must have been interrupted from wfi()? */
454 return -EINTR;
455 }
456
brcmstb_pm_do_s3(unsigned long sp)457 static int brcmstb_pm_do_s3(unsigned long sp)
458 {
459 unsigned long save_sp;
460 int ret;
461
462 asm volatile (
463 "mov %[save], sp\n"
464 "mov sp, %[new]\n"
465 "bl brcmstb_pm_s3_finish\n"
466 "mov %[ret], r0\n"
467 "mov %[new], sp\n"
468 "mov sp, %[save]\n"
469 : [save] "=&r" (save_sp), [ret] "=&r" (ret)
470 : [new] "r" (sp)
471 );
472
473 return ret;
474 }
475
brcmstb_pm_s3(void)476 static int brcmstb_pm_s3(void)
477 {
478 void __iomem *sp = ctrl.boot_sram + ctrl.boot_sram_len;
479
480 return cpu_suspend((unsigned long)sp, brcmstb_pm_do_s3);
481 }
482
brcmstb_pm_standby(bool deep_standby)483 static int brcmstb_pm_standby(bool deep_standby)
484 {
485 int ret;
486
487 if (brcmstb_pm_handshake())
488 return -EIO;
489
490 if (deep_standby)
491 ret = brcmstb_pm_s3();
492 else
493 ret = brcmstb_pm_s2();
494 if (ret)
495 pr_err("%s: standby failed\n", __func__);
496
497 return ret;
498 }
499
brcmstb_pm_enter(suspend_state_t state)500 static int brcmstb_pm_enter(suspend_state_t state)
501 {
502 int ret = -EINVAL;
503
504 switch (state) {
505 case PM_SUSPEND_STANDBY:
506 ret = brcmstb_pm_standby(false);
507 break;
508 case PM_SUSPEND_MEM:
509 ret = brcmstb_pm_standby(true);
510 break;
511 }
512
513 return ret;
514 }
515
brcmstb_pm_valid(suspend_state_t state)516 static int brcmstb_pm_valid(suspend_state_t state)
517 {
518 switch (state) {
519 case PM_SUSPEND_STANDBY:
520 return true;
521 case PM_SUSPEND_MEM:
522 return ctrl.support_warm_boot;
523 default:
524 return false;
525 }
526 }
527
528 static const struct platform_suspend_ops brcmstb_pm_ops = {
529 .enter = brcmstb_pm_enter,
530 .valid = brcmstb_pm_valid,
531 };
532
533 static const struct of_device_id aon_ctrl_dt_ids[] = {
534 { .compatible = "brcm,brcmstb-aon-ctrl" },
535 {}
536 };
537
538 struct ddr_phy_ofdata {
539 bool supports_warm_boot;
540 size_t pll_status_offset;
541 int s3entry_method;
542 u32 warm_boot_offset;
543 u32 phy_a_standby_ctrl_offs;
544 u32 phy_b_standby_ctrl_offs;
545 };
546
547 static struct ddr_phy_ofdata ddr_phy_71_1 = {
548 .supports_warm_boot = true,
549 .pll_status_offset = 0x0c,
550 .s3entry_method = 1,
551 .warm_boot_offset = 0x2c,
552 .phy_a_standby_ctrl_offs = 0x198,
553 .phy_b_standby_ctrl_offs = DDR_PHY_NO_CHANNEL
554 };
555
556 static struct ddr_phy_ofdata ddr_phy_72_0 = {
557 .supports_warm_boot = true,
558 .pll_status_offset = 0x10,
559 .s3entry_method = 1,
560 .warm_boot_offset = 0x40,
561 .phy_a_standby_ctrl_offs = 0x2a4,
562 .phy_b_standby_ctrl_offs = 0x8a4
563 };
564
565 static struct ddr_phy_ofdata ddr_phy_225_1 = {
566 .supports_warm_boot = false,
567 .pll_status_offset = 0x4,
568 .s3entry_method = 0
569 };
570
571 static struct ddr_phy_ofdata ddr_phy_240_1 = {
572 .supports_warm_boot = true,
573 .pll_status_offset = 0x4,
574 .s3entry_method = 0
575 };
576
577 static const struct of_device_id ddr_phy_dt_ids[] = {
578 {
579 .compatible = "brcm,brcmstb-ddr-phy-v71.1",
580 .data = &ddr_phy_71_1,
581 },
582 {
583 .compatible = "brcm,brcmstb-ddr-phy-v72.0",
584 .data = &ddr_phy_72_0,
585 },
586 {
587 .compatible = "brcm,brcmstb-ddr-phy-v225.1",
588 .data = &ddr_phy_225_1,
589 },
590 {
591 .compatible = "brcm,brcmstb-ddr-phy-v240.1",
592 .data = &ddr_phy_240_1,
593 },
594 {
595 /* Same as v240.1, for the registers we care about */
596 .compatible = "brcm,brcmstb-ddr-phy-v240.2",
597 .data = &ddr_phy_240_1,
598 },
599 {}
600 };
601
602 struct ddr_seq_ofdata {
603 bool needs_ddr_pad;
604 u32 warm_boot_offset;
605 };
606
607 static const struct ddr_seq_ofdata ddr_seq_b22 = {
608 .needs_ddr_pad = false,
609 .warm_boot_offset = 0x2c,
610 };
611
612 static const struct ddr_seq_ofdata ddr_seq = {
613 .needs_ddr_pad = true,
614 };
615
616 static const struct of_device_id ddr_shimphy_dt_ids[] = {
617 { .compatible = "brcm,brcmstb-ddr-shimphy-v1.0" },
618 {}
619 };
620
621 static const struct of_device_id brcmstb_memc_of_match[] = {
622 {
623 .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.1",
624 .data = &ddr_seq,
625 },
626 {
627 .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.2",
628 .data = &ddr_seq_b22,
629 },
630 {
631 .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.3",
632 .data = &ddr_seq_b22,
633 },
634 {
635 .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.0",
636 .data = &ddr_seq_b22,
637 },
638 {
639 .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.1",
640 .data = &ddr_seq_b22,
641 },
642 {
643 .compatible = "brcm,brcmstb-memc-ddr",
644 .data = &ddr_seq,
645 },
646 {},
647 };
648
brcmstb_ioremap_match(const struct of_device_id * matches,int index,const void ** ofdata)649 static void __iomem *brcmstb_ioremap_match(const struct of_device_id *matches,
650 int index, const void **ofdata)
651 {
652 struct device_node *dn;
653 const struct of_device_id *match;
654
655 dn = of_find_matching_node_and_match(NULL, matches, &match);
656 if (!dn)
657 return ERR_PTR(-EINVAL);
658
659 if (ofdata)
660 *ofdata = match->data;
661
662 return of_io_request_and_map(dn, index, dn->full_name);
663 }
664
brcmstb_pm_panic_notify(struct notifier_block * nb,unsigned long action,void * data)665 static int brcmstb_pm_panic_notify(struct notifier_block *nb,
666 unsigned long action, void *data)
667 {
668 writel_relaxed(BRCMSTB_PANIC_MAGIC, ctrl.aon_sram + AON_REG_PANIC);
669
670 return NOTIFY_DONE;
671 }
672
673 static struct notifier_block brcmstb_pm_panic_nb = {
674 .notifier_call = brcmstb_pm_panic_notify,
675 };
676
brcmstb_pm_probe(struct platform_device * pdev)677 static int brcmstb_pm_probe(struct platform_device *pdev)
678 {
679 const struct ddr_phy_ofdata *ddr_phy_data;
680 const struct ddr_seq_ofdata *ddr_seq_data;
681 const struct of_device_id *of_id = NULL;
682 struct device_node *dn;
683 void __iomem *base;
684 int ret, i, s;
685
686 /* AON ctrl registers */
687 base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
688 if (IS_ERR(base)) {
689 pr_err("error mapping AON_CTRL\n");
690 ret = PTR_ERR(base);
691 goto aon_err;
692 }
693 ctrl.aon_ctrl_base = base;
694
695 /* AON SRAM registers */
696 base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 1, NULL);
697 if (IS_ERR(base)) {
698 /* Assume standard offset */
699 ctrl.aon_sram = ctrl.aon_ctrl_base +
700 AON_CTRL_SYSTEM_DATA_RAM_OFS;
701 s = 0;
702 } else {
703 ctrl.aon_sram = base;
704 s = 1;
705 }
706
707 writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
708
709 /* DDR PHY registers */
710 base = brcmstb_ioremap_match(ddr_phy_dt_ids, 0,
711 (const void **)&ddr_phy_data);
712 if (IS_ERR(base)) {
713 pr_err("error mapping DDR PHY\n");
714 ret = PTR_ERR(base);
715 goto ddr_phy_err;
716 }
717 ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
718 ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
719 /* Only need DDR PHY 0 for now? */
720 ctrl.memcs[0].ddr_phy_base = base;
721 ctrl.s3entry_method = ddr_phy_data->s3entry_method;
722 ctrl.phy_a_standby_ctrl_offs = ddr_phy_data->phy_a_standby_ctrl_offs;
723 ctrl.phy_b_standby_ctrl_offs = ddr_phy_data->phy_b_standby_ctrl_offs;
724 /*
725 * Slightly grosss to use the phy ver to get a memc,
726 * offset but that is the only versioned things so far
727 * we can test for.
728 */
729 ctrl.warm_boot_offset = ddr_phy_data->warm_boot_offset;
730
731 /* DDR SHIM-PHY registers */
732 for_each_matching_node(dn, ddr_shimphy_dt_ids) {
733 i = ctrl.num_memc;
734 if (i >= MAX_NUM_MEMC) {
735 of_node_put(dn);
736 pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
737 break;
738 }
739
740 base = of_io_request_and_map(dn, 0, dn->full_name);
741 if (IS_ERR(base)) {
742 of_node_put(dn);
743 if (!ctrl.support_warm_boot)
744 break;
745
746 pr_err("error mapping DDR SHIMPHY %d\n", i);
747 ret = PTR_ERR(base);
748 goto ddr_shimphy_err;
749 }
750 ctrl.memcs[i].ddr_shimphy_base = base;
751 ctrl.num_memc++;
752 }
753
754 /* Sequencer DRAM Param and Control Registers */
755 i = 0;
756 for_each_matching_node(dn, brcmstb_memc_of_match) {
757 base = of_iomap(dn, 0);
758 if (!base) {
759 of_node_put(dn);
760 pr_err("error mapping DDR Sequencer %d\n", i);
761 ret = -ENOMEM;
762 goto brcmstb_memc_err;
763 }
764
765 of_id = of_match_node(brcmstb_memc_of_match, dn);
766 if (!of_id) {
767 iounmap(base);
768 of_node_put(dn);
769 ret = -EINVAL;
770 goto brcmstb_memc_err;
771 }
772
773 ddr_seq_data = of_id->data;
774 ctrl.needs_ddr_pad = ddr_seq_data->needs_ddr_pad;
775 /* Adjust warm boot offset based on the DDR sequencer */
776 if (ddr_seq_data->warm_boot_offset)
777 ctrl.warm_boot_offset = ddr_seq_data->warm_boot_offset;
778
779 ctrl.memcs[i].ddr_ctrl = base;
780 i++;
781 }
782
783 pr_debug("PM: supports warm boot:%d, method:%d, wboffs:%x\n",
784 ctrl.support_warm_boot, ctrl.s3entry_method,
785 ctrl.warm_boot_offset);
786
787 dn = of_find_matching_node(NULL, sram_dt_ids);
788 if (!dn) {
789 pr_err("SRAM not found\n");
790 ret = -EINVAL;
791 goto brcmstb_memc_err;
792 }
793
794 ret = brcmstb_init_sram(dn);
795 of_node_put(dn);
796 if (ret) {
797 pr_err("error setting up SRAM for PM\n");
798 goto brcmstb_memc_err;
799 }
800
801 ctrl.pdev = pdev;
802
803 ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
804 if (!ctrl.s3_params) {
805 ret = -ENOMEM;
806 goto s3_params_err;
807 }
808 ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
809 sizeof(*ctrl.s3_params),
810 DMA_TO_DEVICE);
811 if (dma_mapping_error(&pdev->dev, ctrl.s3_params_pa)) {
812 pr_err("error mapping DMA memory\n");
813 ret = -ENOMEM;
814 goto out;
815 }
816
817 atomic_notifier_chain_register(&panic_notifier_list,
818 &brcmstb_pm_panic_nb);
819
820 pm_power_off = brcmstb_pm_poweroff;
821 suspend_set_ops(&brcmstb_pm_ops);
822
823 return 0;
824
825 out:
826 kfree(ctrl.s3_params);
827 s3_params_err:
828 iounmap(ctrl.boot_sram);
829 brcmstb_memc_err:
830 for (i--; i >= 0; i--)
831 iounmap(ctrl.memcs[i].ddr_ctrl);
832 ddr_shimphy_err:
833 for (i = 0; i < ctrl.num_memc; i++)
834 iounmap(ctrl.memcs[i].ddr_shimphy_base);
835
836 iounmap(ctrl.memcs[0].ddr_phy_base);
837 ddr_phy_err:
838 iounmap(ctrl.aon_ctrl_base);
839 if (s)
840 iounmap(ctrl.aon_sram);
841 aon_err:
842 pr_warn("PM: initialization failed with code %d\n", ret);
843
844 return ret;
845 }
846
847 static struct platform_driver brcmstb_pm_driver = {
848 .driver = {
849 .name = "brcmstb-pm",
850 .of_match_table = aon_ctrl_dt_ids,
851 },
852 };
853
brcmstb_pm_init(void)854 static int __init brcmstb_pm_init(void)
855 {
856 return platform_driver_probe(&brcmstb_pm_driver,
857 brcmstb_pm_probe);
858 }
859 module_init(brcmstb_pm_init);
860