1 /*
2 * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <errno.h>
9
10 #include <platform_def.h>
11
12 #include <arch_helpers.h>
13 #include <common/debug.h>
14 #include <drivers/delay_timer.h>
15 #include <lib/mmio.h>
16 #include <plat/common/platform.h>
17
18 #include <ddr_rk3368.h>
19 #include <plat_private.h>
20 #include <pmu.h>
21 #include <pmu_com.h>
22 #include <rk3368_def.h>
23 #include <soc.h>
24
25 DEFINE_BAKERY_LOCK(rockchip_pd_lock);
26
27 static uint32_t cpu_warm_boot_addr;
28
rk3368_flash_l2_b(void)29 void rk3368_flash_l2_b(void)
30 {
31 uint32_t wait_cnt = 0;
32
33 regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b);
34 dsb();
35
36 while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)
37 & BIT(clst_b_l2_flsh_done))) {
38 wait_cnt++;
39 if (!(wait_cnt % MAX_WAIT_CONUT))
40 WARN("%s:reg %x,wait\n", __func__,
41 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
42 }
43
44 regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_l2flsh_clst_b);
45 }
46
rk3368_pmu_bus_idle(uint32_t req,uint32_t idle)47 static inline int rk3368_pmu_bus_idle(uint32_t req, uint32_t idle)
48 {
49 uint32_t mask = BIT(req);
50 uint32_t idle_mask = 0;
51 uint32_t idle_target = 0;
52 uint32_t val;
53 uint32_t wait_cnt = 0;
54
55 switch (req) {
56 case bus_ide_req_clst_l:
57 idle_mask = BIT(pmu_idle_ack_cluster_l);
58 idle_target = (idle << pmu_idle_ack_cluster_l);
59 break;
60
61 case bus_ide_req_clst_b:
62 idle_mask = BIT(pmu_idle_ack_cluster_b);
63 idle_target = (idle << pmu_idle_ack_cluster_b);
64 break;
65
66 case bus_ide_req_cxcs:
67 idle_mask = BIT(pmu_idle_ack_cxcs);
68 idle_target = ((!idle) << pmu_idle_ack_cxcs);
69 break;
70
71 case bus_ide_req_cci400:
72 idle_mask = BIT(pmu_idle_ack_cci400);
73 idle_target = ((!idle) << pmu_idle_ack_cci400);
74 break;
75
76 case bus_ide_req_gpu:
77 idle_mask = BIT(pmu_idle_ack_gpu) | BIT(pmu_idle_gpu);
78 idle_target = (idle << pmu_idle_ack_gpu) |
79 (idle << pmu_idle_gpu);
80 break;
81
82 case bus_ide_req_core:
83 idle_mask = BIT(pmu_idle_ack_core) | BIT(pmu_idle_core);
84 idle_target = (idle << pmu_idle_ack_core) |
85 (idle << pmu_idle_core);
86 break;
87
88 case bus_ide_req_bus:
89 idle_mask = BIT(pmu_idle_ack_bus) | BIT(pmu_idle_bus);
90 idle_target = (idle << pmu_idle_ack_bus) |
91 (idle << pmu_idle_bus);
92 break;
93 case bus_ide_req_dma:
94 idle_mask = BIT(pmu_idle_ack_dma) | BIT(pmu_idle_dma);
95 idle_target = (idle << pmu_idle_ack_dma) |
96 (idle << pmu_idle_dma);
97 break;
98
99 case bus_ide_req_peri:
100 idle_mask = BIT(pmu_idle_ack_peri) | BIT(pmu_idle_peri);
101 idle_target = (idle << pmu_idle_ack_peri) |
102 (idle << pmu_idle_peri);
103 break;
104
105 case bus_ide_req_video:
106 idle_mask = BIT(pmu_idle_ack_video) | BIT(pmu_idle_video);
107 idle_target = (idle << pmu_idle_ack_video) |
108 (idle << pmu_idle_video);
109 break;
110
111 case bus_ide_req_vio:
112 idle_mask = BIT(pmu_idle_ack_vio) | BIT(pmu_idle_vio);
113 idle_target = (pmu_idle_ack_vio) |
114 (idle << pmu_idle_vio);
115 break;
116
117 case bus_ide_req_alive:
118 idle_mask = BIT(pmu_idle_ack_alive) | BIT(pmu_idle_alive);
119 idle_target = (idle << pmu_idle_ack_alive) |
120 (idle << pmu_idle_alive);
121 break;
122
123 case bus_ide_req_pmu:
124 idle_mask = BIT(pmu_idle_ack_pmu) | BIT(pmu_idle_pmu);
125 idle_target = (idle << pmu_idle_ack_pmu) |
126 (idle << pmu_idle_pmu);
127 break;
128
129 case bus_ide_req_msch:
130 idle_mask = BIT(pmu_idle_ack_msch) | BIT(pmu_idle_msch);
131 idle_target = (idle << pmu_idle_ack_msch) |
132 (idle << pmu_idle_msch);
133 break;
134
135 case bus_ide_req_cci:
136 idle_mask = BIT(pmu_idle_ack_cci) | BIT(pmu_idle_cci);
137 idle_target = (idle << pmu_idle_ack_cci) |
138 (idle << pmu_idle_cci);
139 break;
140
141 default:
142 ERROR("%s: Unsupported the idle request\n", __func__);
143 break;
144 }
145
146 val = mmio_read_32(PMU_BASE + PMU_BUS_IDE_REQ);
147 if (idle)
148 val |= mask;
149 else
150 val &= ~mask;
151
152 mmio_write_32(PMU_BASE + PMU_BUS_IDE_REQ, val);
153
154 while ((mmio_read_32(PMU_BASE +
155 PMU_BUS_IDE_ST) & idle_mask) != idle_target) {
156 wait_cnt++;
157 if (!(wait_cnt % MAX_WAIT_CONUT))
158 WARN("%s:st=%x(%x)\n", __func__,
159 mmio_read_32(PMU_BASE + PMU_BUS_IDE_ST),
160 idle_mask);
161 }
162
163 return 0;
164 }
165
pmu_scu_b_pwrup(void)166 void pmu_scu_b_pwrup(void)
167 {
168 regs_updata_bit_clr(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b);
169 rk3368_pmu_bus_idle(bus_ide_req_clst_b, 0);
170 }
171
pmu_scu_b_pwrdn(void)172 static void pmu_scu_b_pwrdn(void)
173 {
174 uint32_t wait_cnt = 0;
175
176 if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) &
177 PM_PWRDM_CPUSB_MSK) != PM_PWRDM_CPUSB_MSK) {
178 ERROR("%s: not all cpus is off\n", __func__);
179 return;
180 }
181
182 rk3368_flash_l2_b();
183
184 regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_acinactm_clst_b);
185
186 while (!(mmio_read_32(PMU_BASE +
187 PMU_CORE_PWR_ST) & BIT(clst_b_l2_wfi))) {
188 wait_cnt++;
189 if (!(wait_cnt % MAX_WAIT_CONUT))
190 ERROR("%s:wait cluster-b l2(%x)\n", __func__,
191 mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST));
192 }
193 rk3368_pmu_bus_idle(bus_ide_req_clst_b, 1);
194 }
195
pmu_sleep_mode_config(void)196 static void pmu_sleep_mode_config(void)
197 {
198 uint32_t pwrmd_core, pwrmd_com;
199
200 pwrmd_core = BIT(pmu_mdcr_cpu0_pd) |
201 BIT(pmu_mdcr_scu_l_pd) |
202 BIT(pmu_mdcr_l2_flush) |
203 BIT(pmu_mdcr_l2_idle) |
204 BIT(pmu_mdcr_clr_clst_l) |
205 BIT(pmu_mdcr_clr_core) |
206 BIT(pmu_mdcr_clr_cci) |
207 BIT(pmu_mdcr_core_pd);
208
209 pwrmd_com = BIT(pmu_mode_en) |
210 BIT(pmu_mode_sref_enter) |
211 BIT(pmu_mode_pwr_off);
212
213 regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_l_wkup_en);
214 regs_updata_bit_set(PMU_BASE + PMU_WKUP_CFG2, pmu_cluster_b_wkup_en);
215 regs_updata_bit_clr(PMU_BASE + PMU_WKUP_CFG2, pmu_gpio_wkup_en);
216
217 mmio_write_32(PMU_BASE + PMU_PLLLOCK_CNT, CYCL_24M_CNT_MS(2));
218 mmio_write_32(PMU_BASE + PMU_PLLRST_CNT, CYCL_24M_CNT_US(100));
219 mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_24M_CNT_MS(2));
220 mmio_write_32(PMU_BASE + PMU_PWRMD_CORE, pwrmd_core);
221 mmio_write_32(PMU_BASE + PMU_PWRMD_COM, pwrmd_com);
222 dsb();
223 }
224
pmu_set_sleep_mode(void)225 static void pmu_set_sleep_mode(void)
226 {
227 pmu_sleep_mode_config();
228 soc_sleep_config();
229 regs_updata_bit_set(PMU_BASE + PMU_PWRMD_CORE, pmu_mdcr_global_int_dis);
230 regs_updata_bit_set(PMU_BASE + PMU_SFT_CON, pmu_sft_glbl_int_dis_b);
231 pmu_scu_b_pwrdn();
232 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
233 ((uintptr_t)&pmu_cpuson_entrypoint >>
234 CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK);
235 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2),
236 ((uintptr_t)&pmu_cpuson_entrypoint >>
237 CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK);
238 }
239
cpus_id_power_domain(uint32_t cluster,uint32_t cpu,uint32_t pd_state,uint32_t wfie_msk)240 static int cpus_id_power_domain(uint32_t cluster,
241 uint32_t cpu,
242 uint32_t pd_state,
243 uint32_t wfie_msk)
244 {
245 uint32_t pd;
246 uint64_t mpidr;
247
248 if (cluster)
249 pd = PD_CPUB0 + cpu;
250 else
251 pd = PD_CPUL0 + cpu;
252
253 if (pmu_power_domain_st(pd) == pd_state)
254 return 0;
255
256 if (pd_state == pmu_pd_off) {
257 mpidr = (cluster << MPIDR_AFF1_SHIFT) | cpu;
258 if (check_cpu_wfie(mpidr, wfie_msk))
259 return -EINVAL;
260 }
261
262 return pmu_power_domain_ctr(pd, pd_state);
263 }
264
nonboot_cpus_off(void)265 static void nonboot_cpus_off(void)
266 {
267 uint32_t boot_cpu, boot_cluster, cpu;
268
269 boot_cpu = MPIDR_AFFLVL0_VAL(read_mpidr_el1());
270 boot_cluster = MPIDR_AFFLVL1_VAL(read_mpidr_el1());
271
272 /* turn off noboot cpus */
273 for (cpu = 0; cpu < PLATFORM_CLUSTER0_CORE_COUNT; cpu++) {
274 if (!boot_cluster && (cpu == boot_cpu))
275 continue;
276 cpus_id_power_domain(0, cpu, pmu_pd_off, CKECK_WFEI_MSK);
277 }
278
279 for (cpu = 0; cpu < PLATFORM_CLUSTER1_CORE_COUNT; cpu++) {
280 if (boot_cluster && (cpu == boot_cpu))
281 continue;
282 cpus_id_power_domain(1, cpu, pmu_pd_off, CKECK_WFEI_MSK);
283 }
284 }
285
sram_save(void)286 void sram_save(void)
287 {
288 /* TODO: support the sdram save for rk3368 SoCs*/
289 }
290
sram_restore(void)291 void sram_restore(void)
292 {
293 /* TODO: support the sdram restore for rk3368 SoCs */
294 }
295
rockchip_soc_cores_pwr_dm_on(unsigned long mpidr,uint64_t entrypoint)296 int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint)
297 {
298 uint32_t cpu, cluster;
299 uint32_t cpuon_id;
300
301 cpu = MPIDR_AFFLVL0_VAL(mpidr);
302 cluster = MPIDR_AFFLVL1_VAL(mpidr);
303
304 /* Make sure the cpu is off,Before power up the cpu! */
305 cpus_id_power_domain(cluster, cpu, pmu_pd_off, CKECK_WFEI_MSK);
306
307 cpuon_id = (cluster * PLATFORM_CLUSTER0_CORE_COUNT) + cpu;
308 assert(cpuon_id < PLATFORM_CORE_COUNT);
309 assert(cpuson_flags[cpuon_id] == 0);
310 cpuson_flags[cpuon_id] = PMU_CPU_HOTPLUG;
311 cpuson_entry_point[cpuon_id] = entrypoint;
312
313 /* Switch boot addr to pmusram */
314 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster),
315 (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
316 CPU_BOOT_ADDR_WMASK);
317 dsb();
318
319 cpus_id_power_domain(cluster, cpu, pmu_pd_on, CKECK_WFEI_MSK);
320
321 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster),
322 (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
323 CPU_BOOT_ADDR_WMASK);
324
325 return 0;
326 }
327
rockchip_soc_cores_pwr_dm_on_finish(void)328 int rockchip_soc_cores_pwr_dm_on_finish(void)
329 {
330 return 0;
331 }
332
rockchip_soc_sys_pwr_dm_resume(void)333 int rockchip_soc_sys_pwr_dm_resume(void)
334 {
335 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
336 (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
337 CPU_BOOT_ADDR_WMASK);
338 mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2),
339 (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) |
340 CPU_BOOT_ADDR_WMASK);
341 pm_plls_resume();
342 pmu_scu_b_pwrup();
343
344 return 0;
345 }
346
rockchip_soc_sys_pwr_dm_suspend(void)347 int rockchip_soc_sys_pwr_dm_suspend(void)
348 {
349 nonboot_cpus_off();
350 pmu_set_sleep_mode();
351
352 return 0;
353 }
354
rockchip_plat_mmu_el3(void)355 void rockchip_plat_mmu_el3(void)
356 {
357 /* TODO: support the el3 for rk3368 SoCs */
358 }
359
plat_rockchip_pmu_init(void)360 void plat_rockchip_pmu_init(void)
361 {
362 uint32_t cpu;
363
364 /* register requires 32bits mode, switch it to 32 bits */
365 cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
366
367 for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
368 cpuson_flags[cpu] = 0;
369
370 nonboot_cpus_off();
371 INFO("%s(%d): pd status %x\n", __func__, __LINE__,
372 mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
373 }
374