• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * ARM-specific support for Broadcom STB S2/S3/S5 power management
3  *
4  * S2: clock gate CPUs and as many peripherals as possible
5  * S3: power off all of the chip except the Always ON (AON) island; keep DDR is
6  *     self-refresh
7  * S5: (a.k.a. S3 cold boot) much like S3, except DDR is powered down, so we
8  *     treat this mode like a soft power-off, with wakeup allowed from AON
9  *
10  * Copyright © 2014-2017 Broadcom
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19  * GNU General Public License for more details.
20  */
21 
22 #define pr_fmt(fmt) "brcmstb-pm: " fmt
23 
24 #include <linux/bitops.h>
25 #include <linux/compiler.h>
26 #include <linux/delay.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/err.h>
29 #include <linux/init.h>
30 #include <linux/io.h>
31 #include <linux/ioport.h>
32 #include <linux/kconfig.h>
33 #include <linux/kernel.h>
34 #include <linux/memblock.h>
35 #include <linux/module.h>
36 #include <linux/notifier.h>
37 #include <linux/of.h>
38 #include <linux/of_address.h>
39 #include <linux/platform_device.h>
40 #include <linux/pm.h>
41 #include <linux/printk.h>
42 #include <linux/proc_fs.h>
43 #include <linux/sizes.h>
44 #include <linux/slab.h>
45 #include <linux/sort.h>
46 #include <linux/suspend.h>
47 #include <linux/types.h>
48 #include <linux/uaccess.h>
49 #include <linux/soc/brcmstb/brcmstb.h>
50 
51 #include <asm/fncpy.h>
52 #include <asm/setup.h>
53 #include <asm/suspend.h>
54 
55 #include "pm.h"
56 #include "aon_defs.h"
57 
58 #define SHIMPHY_DDR_PAD_CNTRL		0x8c
59 
60 /* Method #0 */
61 #define SHIMPHY_PAD_PLL_SEQUENCE	BIT(8)
62 #define SHIMPHY_PAD_GATE_PLL_S3		BIT(9)
63 
64 /* Method #1 */
65 #define PWRDWN_SEQ_NO_SEQUENCING	0
66 #define PWRDWN_SEQ_HOLD_CHANNEL		1
67 #define	PWRDWN_SEQ_RESET_PLL		2
68 #define PWRDWN_SEQ_POWERDOWN_PLL	3
69 
70 #define SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK	0x00f00000
71 #define SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT	20
72 
73 #define	DDR_FORCE_CKE_RST_N		BIT(3)
74 #define	DDR_PHY_RST_N			BIT(2)
75 #define	DDR_PHY_CKE			BIT(1)
76 
77 #define	DDR_PHY_NO_CHANNEL		0xffffffff
78 
79 #define MAX_NUM_MEMC			3
80 
81 struct brcmstb_memc {
82 	void __iomem *ddr_phy_base;
83 	void __iomem *ddr_shimphy_base;
84 	void __iomem *ddr_ctrl;
85 };
86 
87 struct brcmstb_pm_control {
88 	void __iomem *aon_ctrl_base;
89 	void __iomem *aon_sram;
90 	struct brcmstb_memc memcs[MAX_NUM_MEMC];
91 
92 	void __iomem *boot_sram;
93 	size_t boot_sram_len;
94 
95 	bool support_warm_boot;
96 	size_t pll_status_offset;
97 	int num_memc;
98 
99 	struct brcmstb_s3_params *s3_params;
100 	dma_addr_t s3_params_pa;
101 	int s3entry_method;
102 	u32 warm_boot_offset;
103 	u32 phy_a_standby_ctrl_offs;
104 	u32 phy_b_standby_ctrl_offs;
105 	bool needs_ddr_pad;
106 	struct platform_device *pdev;
107 };
108 
109 enum bsp_initiate_command {
110 	BSP_CLOCK_STOP		= 0x00,
111 	BSP_GEN_RANDOM_KEY	= 0x4A,
112 	BSP_RESTORE_RANDOM_KEY	= 0x55,
113 	BSP_GEN_FIXED_KEY	= 0x63,
114 };
115 
116 #define PM_INITIATE		0x01
117 #define PM_INITIATE_SUCCESS	0x00
118 #define PM_INITIATE_FAIL	0xfe
119 
120 static struct brcmstb_pm_control ctrl;
121 
122 static int (*brcmstb_pm_do_s2_sram)(void __iomem *aon_ctrl_base,
123 		void __iomem *ddr_phy_pll_status);
124 
brcmstb_init_sram(struct device_node * dn)125 static int brcmstb_init_sram(struct device_node *dn)
126 {
127 	void __iomem *sram;
128 	struct resource res;
129 	int ret;
130 
131 	ret = of_address_to_resource(dn, 0, &res);
132 	if (ret)
133 		return ret;
134 
135 	/* Uncached, executable remapping of SRAM */
136 	sram = __arm_ioremap_exec(res.start, resource_size(&res), false);
137 	if (!sram)
138 		return -ENOMEM;
139 
140 	ctrl.boot_sram = sram;
141 	ctrl.boot_sram_len = resource_size(&res);
142 
143 	return 0;
144 }
145 
146 static const struct of_device_id sram_dt_ids[] = {
147 	{ .compatible = "mmio-sram" },
148 	{ /* sentinel */ }
149 };
150 
do_bsp_initiate_command(enum bsp_initiate_command cmd)151 static int do_bsp_initiate_command(enum bsp_initiate_command cmd)
152 {
153 	void __iomem *base = ctrl.aon_ctrl_base;
154 	int ret;
155 	int timeo = 1000 * 1000; /* 1 second */
156 
157 	writel_relaxed(0, base + AON_CTRL_PM_INITIATE);
158 	(void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
159 
160 	/* Go! */
161 	writel_relaxed((cmd << 1) | PM_INITIATE, base + AON_CTRL_PM_INITIATE);
162 
163 	/*
164 	 * If firmware doesn't support the 'ack', then just assume it's done
165 	 * after 10ms. Note that this only works for command 0, BSP_CLOCK_STOP
166 	 */
167 	if (of_machine_is_compatible("brcm,bcm74371a0")) {
168 		(void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
169 		mdelay(10);
170 		return 0;
171 	}
172 
173 	for (;;) {
174 		ret = readl_relaxed(base + AON_CTRL_PM_INITIATE);
175 		if (!(ret & PM_INITIATE))
176 			break;
177 		if (timeo <= 0) {
178 			pr_err("error: timeout waiting for BSP (%x)\n", ret);
179 			break;
180 		}
181 		timeo -= 50;
182 		udelay(50);
183 	}
184 
185 	return (ret & 0xff) != PM_INITIATE_SUCCESS;
186 }
187 
brcmstb_pm_handshake(void)188 static int brcmstb_pm_handshake(void)
189 {
190 	void __iomem *base = ctrl.aon_ctrl_base;
191 	u32 tmp;
192 	int ret;
193 
194 	/* BSP power handshake, v1 */
195 	tmp = readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
196 	tmp &= ~1UL;
197 	writel_relaxed(tmp, base + AON_CTRL_HOST_MISC_CMDS);
198 	(void)readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
199 
200 	ret = do_bsp_initiate_command(BSP_CLOCK_STOP);
201 	if (ret)
202 		pr_err("BSP handshake failed\n");
203 
204 	/*
205 	 * HACK: BSP may have internal race on the CLOCK_STOP command.
206 	 * Avoid touching the BSP for a few milliseconds.
207 	 */
208 	mdelay(3);
209 
210 	return ret;
211 }
212 
shimphy_set(u32 value,u32 mask)213 static inline void shimphy_set(u32 value, u32 mask)
214 {
215 	int i;
216 
217 	if (!ctrl.needs_ddr_pad)
218 		return;
219 
220 	for (i = 0; i < ctrl.num_memc; i++) {
221 		u32 tmp;
222 
223 		tmp = readl_relaxed(ctrl.memcs[i].ddr_shimphy_base +
224 			SHIMPHY_DDR_PAD_CNTRL);
225 		tmp = value | (tmp & mask);
226 		writel_relaxed(tmp, ctrl.memcs[i].ddr_shimphy_base +
227 			SHIMPHY_DDR_PAD_CNTRL);
228 	}
229 	wmb(); /* Complete sequence in order. */
230 }
231 
ddr_ctrl_set(bool warmboot)232 static inline void ddr_ctrl_set(bool warmboot)
233 {
234 	int i;
235 
236 	for (i = 0; i < ctrl.num_memc; i++) {
237 		u32 tmp;
238 
239 		tmp = readl_relaxed(ctrl.memcs[i].ddr_ctrl +
240 				ctrl.warm_boot_offset);
241 		if (warmboot)
242 			tmp |= 1;
243 		else
244 			tmp &= ~1; /* Cold boot */
245 		writel_relaxed(tmp, ctrl.memcs[i].ddr_ctrl +
246 				ctrl.warm_boot_offset);
247 	}
248 	/* Complete sequence in order */
249 	wmb();
250 }
251 
s3entry_method0(void)252 static inline void s3entry_method0(void)
253 {
254 	shimphy_set(SHIMPHY_PAD_GATE_PLL_S3 | SHIMPHY_PAD_PLL_SEQUENCE,
255 		    0xffffffff);
256 }
257 
s3entry_method1(void)258 static inline void s3entry_method1(void)
259 {
260 	/*
261 	 * S3 Entry Sequence
262 	 * -----------------
263 	 * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
264 	 * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 1
265 	 */
266 	shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
267 		    SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
268 		    ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
269 
270 	ddr_ctrl_set(true);
271 }
272 
s5entry_method1(void)273 static inline void s5entry_method1(void)
274 {
275 	int i;
276 
277 	/*
278 	 * S5 Entry Sequence
279 	 * -----------------
280 	 * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
281 	 * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 0
282 	 * Step 3: DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ CKE ] = 0
283 	 *	   DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ RST_N ] = 0
284 	 */
285 	shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
286 		    SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
287 		    ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
288 
289 	ddr_ctrl_set(false);
290 
291 	for (i = 0; i < ctrl.num_memc; i++) {
292 		u32 tmp;
293 
294 		/* Step 3: Channel A (RST_N = CKE = 0) */
295 		tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
296 				  ctrl.phy_a_standby_ctrl_offs);
297 		tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
298 		writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
299 			     ctrl.phy_a_standby_ctrl_offs);
300 
301 		/* Step 3: Channel B? */
302 		if (ctrl.phy_b_standby_ctrl_offs != DDR_PHY_NO_CHANNEL) {
303 			tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
304 					  ctrl.phy_b_standby_ctrl_offs);
305 			tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
306 			writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
307 				     ctrl.phy_b_standby_ctrl_offs);
308 		}
309 	}
310 	/* Must complete */
311 	wmb();
312 }
313 
314 /*
315  * Run a Power Management State Machine (PMSM) shutdown command and put the CPU
316  * into a low-power mode
317  */
brcmstb_do_pmsm_power_down(unsigned long base_cmd,bool onewrite)318 static void brcmstb_do_pmsm_power_down(unsigned long base_cmd, bool onewrite)
319 {
320 	void __iomem *base = ctrl.aon_ctrl_base;
321 
322 	if ((ctrl.s3entry_method == 1) && (base_cmd == PM_COLD_CONFIG))
323 		s5entry_method1();
324 
325 	/* pm_start_pwrdn transition 0->1 */
326 	writel_relaxed(base_cmd, base + AON_CTRL_PM_CTRL);
327 
328 	if (!onewrite) {
329 		(void)readl_relaxed(base + AON_CTRL_PM_CTRL);
330 
331 		writel_relaxed(base_cmd | PM_PWR_DOWN, base + AON_CTRL_PM_CTRL);
332 		(void)readl_relaxed(base + AON_CTRL_PM_CTRL);
333 	}
334 	wfi();
335 }
336 
337 /* Support S5 cold boot out of "poweroff" */
brcmstb_pm_poweroff(void)338 static void brcmstb_pm_poweroff(void)
339 {
340 	brcmstb_pm_handshake();
341 
342 	/* Clear magic S3 warm-boot value */
343 	writel_relaxed(0, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
344 	(void)readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
345 
346 	/* Skip wait-for-interrupt signal; just use a countdown */
347 	writel_relaxed(0x10, ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
348 	(void)readl_relaxed(ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
349 
350 	if (ctrl.s3entry_method == 1) {
351 		shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
352 			     SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
353 			     ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
354 		ddr_ctrl_set(false);
355 		brcmstb_do_pmsm_power_down(M1_PM_COLD_CONFIG, true);
356 		return; /* We should never actually get here */
357 	}
358 
359 	brcmstb_do_pmsm_power_down(PM_COLD_CONFIG, false);
360 }
361 
brcmstb_pm_copy_to_sram(void * fn,size_t len)362 static void *brcmstb_pm_copy_to_sram(void *fn, size_t len)
363 {
364 	unsigned int size = ALIGN(len, FNCPY_ALIGN);
365 
366 	if (ctrl.boot_sram_len < size) {
367 		pr_err("standby code will not fit in SRAM\n");
368 		return NULL;
369 	}
370 
371 	return fncpy(ctrl.boot_sram, fn, size);
372 }
373 
374 /*
375  * S2 suspend/resume picks up where we left off, so we must execute carefully
376  * from SRAM, in order to allow DDR to come back up safely before we continue.
377  */
brcmstb_pm_s2(void)378 static int brcmstb_pm_s2(void)
379 {
380 	/* A previous S3 can set a value hazardous to S2, so make sure. */
381 	if (ctrl.s3entry_method == 1) {
382 		shimphy_set((PWRDWN_SEQ_NO_SEQUENCING <<
383 			    SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
384 			    ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
385 		ddr_ctrl_set(false);
386 	}
387 
388 	brcmstb_pm_do_s2_sram = brcmstb_pm_copy_to_sram(&brcmstb_pm_do_s2,
389 			brcmstb_pm_do_s2_sz);
390 	if (!brcmstb_pm_do_s2_sram)
391 		return -EINVAL;
392 
393 	return brcmstb_pm_do_s2_sram(ctrl.aon_ctrl_base,
394 			ctrl.memcs[0].ddr_phy_base +
395 			ctrl.pll_status_offset);
396 }
397 
398 /*
399  * This function is called on a new stack, so don't allow inlining (which will
400  * generate stack references on the old stack). It cannot be made static because
401  * it is referenced from brcmstb_pm_s3()
402  */
brcmstb_pm_s3_finish(void)403 noinline int brcmstb_pm_s3_finish(void)
404 {
405 	struct brcmstb_s3_params *params = ctrl.s3_params;
406 	dma_addr_t params_pa = ctrl.s3_params_pa;
407 	phys_addr_t reentry = virt_to_phys(&cpu_resume_arm);
408 	enum bsp_initiate_command cmd;
409 	u32 flags;
410 
411 	/*
412 	 * Clear parameter structure, but not DTU area, which has already been
413 	 * filled in. We know DTU is a the end, so we can just subtract its
414 	 * size.
415 	 */
416 	memset(params, 0, sizeof(*params) - sizeof(params->dtu));
417 
418 	flags = readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
419 
420 	flags &= S3_BOOTLOADER_RESERVED;
421 	flags |= S3_FLAG_NO_MEM_VERIFY;
422 	flags |= S3_FLAG_LOAD_RANDKEY;
423 
424 	/* Load random / fixed key */
425 	if (flags & S3_FLAG_LOAD_RANDKEY)
426 		cmd = BSP_GEN_RANDOM_KEY;
427 	else
428 		cmd = BSP_GEN_FIXED_KEY;
429 	if (do_bsp_initiate_command(cmd)) {
430 		pr_info("key loading failed\n");
431 		return -EIO;
432 	}
433 
434 	params->magic = BRCMSTB_S3_MAGIC;
435 	params->reentry = reentry;
436 
437 	/* No more writes to DRAM */
438 	flush_cache_all();
439 
440 	flags |= BRCMSTB_S3_MAGIC_SHORT;
441 
442 	writel_relaxed(flags, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
443 	writel_relaxed(lower_32_bits(params_pa),
444 		       ctrl.aon_sram + AON_REG_CONTROL_LOW);
445 	writel_relaxed(upper_32_bits(params_pa),
446 		       ctrl.aon_sram + AON_REG_CONTROL_HIGH);
447 
448 	switch (ctrl.s3entry_method) {
449 	case 0:
450 		s3entry_method0();
451 		brcmstb_do_pmsm_power_down(PM_WARM_CONFIG, false);
452 		break;
453 	case 1:
454 		s3entry_method1();
455 		brcmstb_do_pmsm_power_down(M1_PM_WARM_CONFIG, true);
456 		break;
457 	default:
458 		return -EINVAL;
459 	}
460 
461 	/* Must have been interrupted from wfi()? */
462 	return -EINTR;
463 }
464 
brcmstb_pm_do_s3(unsigned long sp)465 static int brcmstb_pm_do_s3(unsigned long sp)
466 {
467 	unsigned long save_sp;
468 	int ret;
469 
470 	asm volatile (
471 		"mov	%[save], sp\n"
472 		"mov	sp, %[new]\n"
473 		"bl	brcmstb_pm_s3_finish\n"
474 		"mov	%[ret], r0\n"
475 		"mov	%[new], sp\n"
476 		"mov	sp, %[save]\n"
477 		: [save] "=&r" (save_sp), [ret] "=&r" (ret)
478 		: [new] "r" (sp)
479 	);
480 
481 	return ret;
482 }
483 
brcmstb_pm_s3(void)484 static int brcmstb_pm_s3(void)
485 {
486 	void __iomem *sp = ctrl.boot_sram + ctrl.boot_sram_len;
487 
488 	return cpu_suspend((unsigned long)sp, brcmstb_pm_do_s3);
489 }
490 
brcmstb_pm_standby(bool deep_standby)491 static int brcmstb_pm_standby(bool deep_standby)
492 {
493 	int ret;
494 
495 	if (brcmstb_pm_handshake())
496 		return -EIO;
497 
498 	if (deep_standby)
499 		ret = brcmstb_pm_s3();
500 	else
501 		ret = brcmstb_pm_s2();
502 	if (ret)
503 		pr_err("%s: standby failed\n", __func__);
504 
505 	return ret;
506 }
507 
brcmstb_pm_enter(suspend_state_t state)508 static int brcmstb_pm_enter(suspend_state_t state)
509 {
510 	int ret = -EINVAL;
511 
512 	switch (state) {
513 	case PM_SUSPEND_STANDBY:
514 		ret = brcmstb_pm_standby(false);
515 		break;
516 	case PM_SUSPEND_MEM:
517 		ret = brcmstb_pm_standby(true);
518 		break;
519 	}
520 
521 	return ret;
522 }
523 
brcmstb_pm_valid(suspend_state_t state)524 static int brcmstb_pm_valid(suspend_state_t state)
525 {
526 	switch (state) {
527 	case PM_SUSPEND_STANDBY:
528 		return true;
529 	case PM_SUSPEND_MEM:
530 		return ctrl.support_warm_boot;
531 	default:
532 		return false;
533 	}
534 }
535 
536 static const struct platform_suspend_ops brcmstb_pm_ops = {
537 	.enter		= brcmstb_pm_enter,
538 	.valid		= brcmstb_pm_valid,
539 };
540 
541 static const struct of_device_id aon_ctrl_dt_ids[] = {
542 	{ .compatible = "brcm,brcmstb-aon-ctrl" },
543 	{}
544 };
545 
546 struct ddr_phy_ofdata {
547 	bool supports_warm_boot;
548 	size_t pll_status_offset;
549 	int s3entry_method;
550 	u32 warm_boot_offset;
551 	u32 phy_a_standby_ctrl_offs;
552 	u32 phy_b_standby_ctrl_offs;
553 };
554 
555 static struct ddr_phy_ofdata ddr_phy_71_1 = {
556 	.supports_warm_boot = true,
557 	.pll_status_offset = 0x0c,
558 	.s3entry_method = 1,
559 	.warm_boot_offset = 0x2c,
560 	.phy_a_standby_ctrl_offs = 0x198,
561 	.phy_b_standby_ctrl_offs = DDR_PHY_NO_CHANNEL
562 };
563 
564 static struct ddr_phy_ofdata ddr_phy_72_0 = {
565 	.supports_warm_boot = true,
566 	.pll_status_offset = 0x10,
567 	.s3entry_method = 1,
568 	.warm_boot_offset = 0x40,
569 	.phy_a_standby_ctrl_offs = 0x2a4,
570 	.phy_b_standby_ctrl_offs = 0x8a4
571 };
572 
573 static struct ddr_phy_ofdata ddr_phy_225_1 = {
574 	.supports_warm_boot = false,
575 	.pll_status_offset = 0x4,
576 	.s3entry_method = 0
577 };
578 
579 static struct ddr_phy_ofdata ddr_phy_240_1 = {
580 	.supports_warm_boot = true,
581 	.pll_status_offset = 0x4,
582 	.s3entry_method = 0
583 };
584 
585 static const struct of_device_id ddr_phy_dt_ids[] = {
586 	{
587 		.compatible = "brcm,brcmstb-ddr-phy-v71.1",
588 		.data = &ddr_phy_71_1,
589 	},
590 	{
591 		.compatible = "brcm,brcmstb-ddr-phy-v72.0",
592 		.data = &ddr_phy_72_0,
593 	},
594 	{
595 		.compatible = "brcm,brcmstb-ddr-phy-v225.1",
596 		.data = &ddr_phy_225_1,
597 	},
598 	{
599 		.compatible = "brcm,brcmstb-ddr-phy-v240.1",
600 		.data = &ddr_phy_240_1,
601 	},
602 	{
603 		/* Same as v240.1, for the registers we care about */
604 		.compatible = "brcm,brcmstb-ddr-phy-v240.2",
605 		.data = &ddr_phy_240_1,
606 	},
607 	{}
608 };
609 
610 struct ddr_seq_ofdata {
611 	bool needs_ddr_pad;
612 	u32 warm_boot_offset;
613 };
614 
615 static const struct ddr_seq_ofdata ddr_seq_b22 = {
616 	.needs_ddr_pad = false,
617 	.warm_boot_offset = 0x2c,
618 };
619 
620 static const struct ddr_seq_ofdata ddr_seq = {
621 	.needs_ddr_pad = true,
622 };
623 
624 static const struct of_device_id ddr_shimphy_dt_ids[] = {
625 	{ .compatible = "brcm,brcmstb-ddr-shimphy-v1.0" },
626 	{}
627 };
628 
629 static const struct of_device_id brcmstb_memc_of_match[] = {
630 	{
631 		.compatible = "brcm,brcmstb-memc-ddr-rev-b.2.1",
632 		.data = &ddr_seq,
633 	},
634 	{
635 		.compatible = "brcm,brcmstb-memc-ddr-rev-b.2.2",
636 		.data = &ddr_seq_b22,
637 	},
638 	{
639 		.compatible = "brcm,brcmstb-memc-ddr-rev-b.2.3",
640 		.data = &ddr_seq_b22,
641 	},
642 	{
643 		.compatible = "brcm,brcmstb-memc-ddr-rev-b.3.0",
644 		.data = &ddr_seq_b22,
645 	},
646 	{
647 		.compatible = "brcm,brcmstb-memc-ddr-rev-b.3.1",
648 		.data = &ddr_seq_b22,
649 	},
650 	{
651 		.compatible = "brcm,brcmstb-memc-ddr",
652 		.data = &ddr_seq,
653 	},
654 	{},
655 };
656 
brcmstb_ioremap_match(const struct of_device_id * matches,int index,const void ** ofdata)657 static void __iomem *brcmstb_ioremap_match(const struct of_device_id *matches,
658 					   int index, const void **ofdata)
659 {
660 	struct device_node *dn;
661 	const struct of_device_id *match;
662 
663 	dn = of_find_matching_node_and_match(NULL, matches, &match);
664 	if (!dn)
665 		return ERR_PTR(-EINVAL);
666 
667 	if (ofdata)
668 		*ofdata = match->data;
669 
670 	return of_io_request_and_map(dn, index, dn->full_name);
671 }
672 
brcmstb_pm_panic_notify(struct notifier_block * nb,unsigned long action,void * data)673 static int brcmstb_pm_panic_notify(struct notifier_block *nb,
674 		unsigned long action, void *data)
675 {
676 	writel_relaxed(BRCMSTB_PANIC_MAGIC, ctrl.aon_sram + AON_REG_PANIC);
677 
678 	return NOTIFY_DONE;
679 }
680 
681 static struct notifier_block brcmstb_pm_panic_nb = {
682 	.notifier_call = brcmstb_pm_panic_notify,
683 };
684 
brcmstb_pm_probe(struct platform_device * pdev)685 static int brcmstb_pm_probe(struct platform_device *pdev)
686 {
687 	const struct ddr_phy_ofdata *ddr_phy_data;
688 	const struct ddr_seq_ofdata *ddr_seq_data;
689 	const struct of_device_id *of_id = NULL;
690 	struct device_node *dn;
691 	void __iomem *base;
692 	int ret, i;
693 
694 	/* AON ctrl registers */
695 	base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
696 	if (IS_ERR(base)) {
697 		pr_err("error mapping AON_CTRL\n");
698 		return PTR_ERR(base);
699 	}
700 	ctrl.aon_ctrl_base = base;
701 
702 	/* AON SRAM registers */
703 	base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 1, NULL);
704 	if (IS_ERR(base)) {
705 		/* Assume standard offset */
706 		ctrl.aon_sram = ctrl.aon_ctrl_base +
707 				     AON_CTRL_SYSTEM_DATA_RAM_OFS;
708 	} else {
709 		ctrl.aon_sram = base;
710 	}
711 
712 	writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
713 
714 	/* DDR PHY registers */
715 	base = brcmstb_ioremap_match(ddr_phy_dt_ids, 0,
716 				     (const void **)&ddr_phy_data);
717 	if (IS_ERR(base)) {
718 		pr_err("error mapping DDR PHY\n");
719 		return PTR_ERR(base);
720 	}
721 	ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
722 	ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
723 	/* Only need DDR PHY 0 for now? */
724 	ctrl.memcs[0].ddr_phy_base = base;
725 	ctrl.s3entry_method = ddr_phy_data->s3entry_method;
726 	ctrl.phy_a_standby_ctrl_offs = ddr_phy_data->phy_a_standby_ctrl_offs;
727 	ctrl.phy_b_standby_ctrl_offs = ddr_phy_data->phy_b_standby_ctrl_offs;
728 	/*
729 	 * Slightly grosss to use the phy ver to get a memc,
730 	 * offset but that is the only versioned things so far
731 	 * we can test for.
732 	 */
733 	ctrl.warm_boot_offset = ddr_phy_data->warm_boot_offset;
734 
735 	/* DDR SHIM-PHY registers */
736 	for_each_matching_node(dn, ddr_shimphy_dt_ids) {
737 		i = ctrl.num_memc;
738 		if (i >= MAX_NUM_MEMC) {
739 			pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
740 			break;
741 		}
742 
743 		base = of_io_request_and_map(dn, 0, dn->full_name);
744 		if (IS_ERR(base)) {
745 			if (!ctrl.support_warm_boot)
746 				break;
747 
748 			pr_err("error mapping DDR SHIMPHY %d\n", i);
749 			return PTR_ERR(base);
750 		}
751 		ctrl.memcs[i].ddr_shimphy_base = base;
752 		ctrl.num_memc++;
753 	}
754 
755 	/* Sequencer DRAM Param and Control Registers */
756 	i = 0;
757 	for_each_matching_node(dn, brcmstb_memc_of_match) {
758 		base = of_iomap(dn, 0);
759 		if (!base) {
760 			pr_err("error mapping DDR Sequencer %d\n", i);
761 			return -ENOMEM;
762 		}
763 
764 		of_id = of_match_node(brcmstb_memc_of_match, dn);
765 		if (!of_id) {
766 			iounmap(base);
767 			return -EINVAL;
768 		}
769 
770 		ddr_seq_data = of_id->data;
771 		ctrl.needs_ddr_pad = ddr_seq_data->needs_ddr_pad;
772 		/* Adjust warm boot offset based on the DDR sequencer */
773 		if (ddr_seq_data->warm_boot_offset)
774 			ctrl.warm_boot_offset = ddr_seq_data->warm_boot_offset;
775 
776 		ctrl.memcs[i].ddr_ctrl = base;
777 		i++;
778 	}
779 
780 	pr_debug("PM: supports warm boot:%d, method:%d, wboffs:%x\n",
781 		ctrl.support_warm_boot, ctrl.s3entry_method,
782 		ctrl.warm_boot_offset);
783 
784 	dn = of_find_matching_node(NULL, sram_dt_ids);
785 	if (!dn) {
786 		pr_err("SRAM not found\n");
787 		return -EINVAL;
788 	}
789 
790 	ret = brcmstb_init_sram(dn);
791 	if (ret) {
792 		pr_err("error setting up SRAM for PM\n");
793 		return ret;
794 	}
795 
796 	ctrl.pdev = pdev;
797 
798 	ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
799 	if (!ctrl.s3_params)
800 		return -ENOMEM;
801 	ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
802 					   sizeof(*ctrl.s3_params),
803 					   DMA_TO_DEVICE);
804 	if (dma_mapping_error(&pdev->dev, ctrl.s3_params_pa)) {
805 		pr_err("error mapping DMA memory\n");
806 		ret = -ENOMEM;
807 		goto out;
808 	}
809 
810 	atomic_notifier_chain_register(&panic_notifier_list,
811 				       &brcmstb_pm_panic_nb);
812 
813 	pm_power_off = brcmstb_pm_poweroff;
814 	suspend_set_ops(&brcmstb_pm_ops);
815 
816 	return 0;
817 
818 out:
819 	kfree(ctrl.s3_params);
820 
821 	pr_warn("PM: initialization failed with code %d\n", ret);
822 
823 	return ret;
824 }
825 
826 static struct platform_driver brcmstb_pm_driver = {
827 	.driver = {
828 		.name	= "brcmstb-pm",
829 		.of_match_table = aon_ctrl_dt_ids,
830 	},
831 };
832 
brcmstb_pm_init(void)833 static int __init brcmstb_pm_init(void)
834 {
835 	return platform_driver_probe(&brcmstb_pm_driver,
836 				     brcmstb_pm_probe);
837 }
838 module_init(brcmstb_pm_init);
839