1 /*
2 * Copyright (C) 2002 ARM Ltd.
3 * All Rights Reserved
4 * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
5 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/delay.h>
15 #include <linux/device.h>
16 #include <linux/of.h>
17 #include <linux/of_address.h>
18 #include <linux/smp.h>
19 #include <linux/io.h>
20
21 #include <asm/smp_plat.h>
22
23 #include "scm-boot.h"
24
25 #define VDD_SC1_ARRAY_CLAMP_GFS_CTL 0x35a0
26 #define SCSS_CPU1CORE_RESET 0x2d80
27 #define SCSS_DBG_STATUS_CORE_PWRDUP 0x2e64
28
29 #define APCS_CPU_PWR_CTL 0x04
30 #define PLL_CLAMP BIT(8)
31 #define CORE_PWRD_UP BIT(7)
32 #define COREPOR_RST BIT(5)
33 #define CORE_RST BIT(4)
34 #define L2DT_SLP BIT(3)
35 #define CLAMP BIT(0)
36
37 #define APC_PWR_GATE_CTL 0x14
38 #define BHS_CNT_SHIFT 24
39 #define LDO_PWR_DWN_SHIFT 16
40 #define LDO_BYP_SHIFT 8
41 #define BHS_SEG_SHIFT 1
42 #define BHS_EN BIT(0)
43
44 #define APCS_SAW2_VCTL 0x14
45 #define APCS_SAW2_2_VCTL 0x1c
46
47 extern void secondary_startup(void);
48
49 static DEFINE_SPINLOCK(boot_lock);
50
51 #ifdef CONFIG_HOTPLUG_CPU
qcom_cpu_die(unsigned int cpu)52 static void __ref qcom_cpu_die(unsigned int cpu)
53 {
54 wfi();
55 }
56 #endif
57
qcom_secondary_init(unsigned int cpu)58 static void qcom_secondary_init(unsigned int cpu)
59 {
60 /*
61 * Synchronise with the boot thread.
62 */
63 spin_lock(&boot_lock);
64 spin_unlock(&boot_lock);
65 }
66
scss_release_secondary(unsigned int cpu)67 static int scss_release_secondary(unsigned int cpu)
68 {
69 struct device_node *node;
70 void __iomem *base;
71
72 node = of_find_compatible_node(NULL, NULL, "qcom,gcc-msm8660");
73 if (!node) {
74 pr_err("%s: can't find node\n", __func__);
75 return -ENXIO;
76 }
77
78 base = of_iomap(node, 0);
79 of_node_put(node);
80 if (!base)
81 return -ENOMEM;
82
83 writel_relaxed(0, base + VDD_SC1_ARRAY_CLAMP_GFS_CTL);
84 writel_relaxed(0, base + SCSS_CPU1CORE_RESET);
85 writel_relaxed(3, base + SCSS_DBG_STATUS_CORE_PWRDUP);
86 mb();
87 iounmap(base);
88
89 return 0;
90 }
91
kpssv1_release_secondary(unsigned int cpu)92 static int kpssv1_release_secondary(unsigned int cpu)
93 {
94 int ret = 0;
95 void __iomem *reg, *saw_reg;
96 struct device_node *cpu_node, *acc_node, *saw_node;
97 u32 val;
98
99 cpu_node = of_get_cpu_node(cpu, NULL);
100 if (!cpu_node)
101 return -ENODEV;
102
103 acc_node = of_parse_phandle(cpu_node, "qcom,acc", 0);
104 if (!acc_node) {
105 ret = -ENODEV;
106 goto out_acc;
107 }
108
109 saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
110 if (!saw_node) {
111 ret = -ENODEV;
112 goto out_saw;
113 }
114
115 reg = of_iomap(acc_node, 0);
116 if (!reg) {
117 ret = -ENOMEM;
118 goto out_acc_map;
119 }
120
121 saw_reg = of_iomap(saw_node, 0);
122 if (!saw_reg) {
123 ret = -ENOMEM;
124 goto out_saw_map;
125 }
126
127 /* Turn on CPU rail */
128 writel_relaxed(0xA4, saw_reg + APCS_SAW2_VCTL);
129 mb();
130 udelay(512);
131
132 /* Krait bring-up sequence */
133 val = PLL_CLAMP | L2DT_SLP | CLAMP;
134 writel_relaxed(val, reg + APCS_CPU_PWR_CTL);
135 val &= ~L2DT_SLP;
136 writel_relaxed(val, reg + APCS_CPU_PWR_CTL);
137 mb();
138 ndelay(300);
139
140 val |= COREPOR_RST;
141 writel_relaxed(val, reg + APCS_CPU_PWR_CTL);
142 mb();
143 udelay(2);
144
145 val &= ~CLAMP;
146 writel_relaxed(val, reg + APCS_CPU_PWR_CTL);
147 mb();
148 udelay(2);
149
150 val &= ~COREPOR_RST;
151 writel_relaxed(val, reg + APCS_CPU_PWR_CTL);
152 mb();
153 udelay(100);
154
155 val |= CORE_PWRD_UP;
156 writel_relaxed(val, reg + APCS_CPU_PWR_CTL);
157 mb();
158
159 iounmap(saw_reg);
160 out_saw_map:
161 iounmap(reg);
162 out_acc_map:
163 of_node_put(saw_node);
164 out_saw:
165 of_node_put(acc_node);
166 out_acc:
167 of_node_put(cpu_node);
168 return ret;
169 }
170
kpssv2_release_secondary(unsigned int cpu)171 static int kpssv2_release_secondary(unsigned int cpu)
172 {
173 void __iomem *reg;
174 struct device_node *cpu_node, *l2_node, *acc_node, *saw_node;
175 void __iomem *l2_saw_base;
176 unsigned reg_val;
177 int ret;
178
179 cpu_node = of_get_cpu_node(cpu, NULL);
180 if (!cpu_node)
181 return -ENODEV;
182
183 acc_node = of_parse_phandle(cpu_node, "qcom,acc", 0);
184 if (!acc_node) {
185 ret = -ENODEV;
186 goto out_acc;
187 }
188
189 l2_node = of_parse_phandle(cpu_node, "next-level-cache", 0);
190 if (!l2_node) {
191 ret = -ENODEV;
192 goto out_l2;
193 }
194
195 saw_node = of_parse_phandle(l2_node, "qcom,saw", 0);
196 if (!saw_node) {
197 ret = -ENODEV;
198 goto out_saw;
199 }
200
201 reg = of_iomap(acc_node, 0);
202 if (!reg) {
203 ret = -ENOMEM;
204 goto out_map;
205 }
206
207 l2_saw_base = of_iomap(saw_node, 0);
208 if (!l2_saw_base) {
209 ret = -ENOMEM;
210 goto out_saw_map;
211 }
212
213 /* Turn on the BHS, turn off LDO Bypass and power down LDO */
214 reg_val = (64 << BHS_CNT_SHIFT) | (0x3f << LDO_PWR_DWN_SHIFT) | BHS_EN;
215 writel_relaxed(reg_val, reg + APC_PWR_GATE_CTL);
216 mb();
217 /* wait for the BHS to settle */
218 udelay(1);
219
220 /* Turn on BHS segments */
221 reg_val |= 0x3f << BHS_SEG_SHIFT;
222 writel_relaxed(reg_val, reg + APC_PWR_GATE_CTL);
223 mb();
224 /* wait for the BHS to settle */
225 udelay(1);
226
227 /* Finally turn on the bypass so that BHS supplies power */
228 reg_val |= 0x3f << LDO_BYP_SHIFT;
229 writel_relaxed(reg_val, reg + APC_PWR_GATE_CTL);
230
231 /* enable max phases */
232 writel_relaxed(0x10003, l2_saw_base + APCS_SAW2_2_VCTL);
233 mb();
234 udelay(50);
235
236 reg_val = COREPOR_RST | CLAMP;
237 writel_relaxed(reg_val, reg + APCS_CPU_PWR_CTL);
238 mb();
239 udelay(2);
240
241 reg_val &= ~CLAMP;
242 writel_relaxed(reg_val, reg + APCS_CPU_PWR_CTL);
243 mb();
244 udelay(2);
245
246 reg_val &= ~COREPOR_RST;
247 writel_relaxed(reg_val, reg + APCS_CPU_PWR_CTL);
248 mb();
249
250 reg_val |= CORE_PWRD_UP;
251 writel_relaxed(reg_val, reg + APCS_CPU_PWR_CTL);
252 mb();
253
254 ret = 0;
255
256 iounmap(l2_saw_base);
257 out_saw_map:
258 iounmap(reg);
259 out_map:
260 of_node_put(saw_node);
261 out_saw:
262 of_node_put(l2_node);
263 out_l2:
264 of_node_put(acc_node);
265 out_acc:
266 of_node_put(cpu_node);
267
268 return ret;
269 }
270
271 static DEFINE_PER_CPU(int, cold_boot_done);
272
qcom_boot_secondary(unsigned int cpu,int (* func)(unsigned int))273 static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
274 {
275 int ret = 0;
276
277 if (!per_cpu(cold_boot_done, cpu)) {
278 ret = func(cpu);
279 if (!ret)
280 per_cpu(cold_boot_done, cpu) = true;
281 }
282
283 /*
284 * set synchronisation state between this boot processor
285 * and the secondary one
286 */
287 spin_lock(&boot_lock);
288
289 /*
290 * Send the secondary CPU a soft interrupt, thereby causing
291 * the boot monitor to read the system wide flags register,
292 * and branch to the address found there.
293 */
294 arch_send_wakeup_ipi_mask(cpumask_of(cpu));
295
296 /*
297 * now the secondary core is starting up let it run its
298 * calibrations, then wait for it to finish
299 */
300 spin_unlock(&boot_lock);
301
302 return ret;
303 }
304
msm8660_boot_secondary(unsigned int cpu,struct task_struct * idle)305 static int msm8660_boot_secondary(unsigned int cpu, struct task_struct *idle)
306 {
307 return qcom_boot_secondary(cpu, scss_release_secondary);
308 }
309
kpssv1_boot_secondary(unsigned int cpu,struct task_struct * idle)310 static int kpssv1_boot_secondary(unsigned int cpu, struct task_struct *idle)
311 {
312 return qcom_boot_secondary(cpu, kpssv1_release_secondary);
313 }
314
kpssv2_boot_secondary(unsigned int cpu,struct task_struct * idle)315 static int kpssv2_boot_secondary(unsigned int cpu, struct task_struct *idle)
316 {
317 return qcom_boot_secondary(cpu, kpssv2_release_secondary);
318 }
319
qcom_smp_prepare_cpus(unsigned int max_cpus)320 static void __init qcom_smp_prepare_cpus(unsigned int max_cpus)
321 {
322 int cpu, map;
323 unsigned int flags = 0;
324 static const int cold_boot_flags[] = {
325 0,
326 SCM_FLAG_COLDBOOT_CPU1,
327 SCM_FLAG_COLDBOOT_CPU2,
328 SCM_FLAG_COLDBOOT_CPU3,
329 };
330
331 for_each_present_cpu(cpu) {
332 map = cpu_logical_map(cpu);
333 if (WARN_ON(map >= ARRAY_SIZE(cold_boot_flags))) {
334 set_cpu_present(cpu, false);
335 continue;
336 }
337 flags |= cold_boot_flags[map];
338 }
339
340 if (scm_set_boot_addr(virt_to_phys(secondary_startup), flags)) {
341 for_each_present_cpu(cpu) {
342 if (cpu == smp_processor_id())
343 continue;
344 set_cpu_present(cpu, false);
345 }
346 pr_warn("Failed to set CPU boot address, disabling SMP\n");
347 }
348 }
349
350 static struct smp_operations smp_msm8660_ops __initdata = {
351 .smp_prepare_cpus = qcom_smp_prepare_cpus,
352 .smp_secondary_init = qcom_secondary_init,
353 .smp_boot_secondary = msm8660_boot_secondary,
354 #ifdef CONFIG_HOTPLUG_CPU
355 .cpu_die = qcom_cpu_die,
356 #endif
357 };
358 CPU_METHOD_OF_DECLARE(qcom_smp, "qcom,gcc-msm8660", &smp_msm8660_ops);
359
360 static struct smp_operations qcom_smp_kpssv1_ops __initdata = {
361 .smp_prepare_cpus = qcom_smp_prepare_cpus,
362 .smp_secondary_init = qcom_secondary_init,
363 .smp_boot_secondary = kpssv1_boot_secondary,
364 #ifdef CONFIG_HOTPLUG_CPU
365 .cpu_die = qcom_cpu_die,
366 #endif
367 };
368 CPU_METHOD_OF_DECLARE(qcom_smp_kpssv1, "qcom,kpss-acc-v1", &qcom_smp_kpssv1_ops);
369
370 static struct smp_operations qcom_smp_kpssv2_ops __initdata = {
371 .smp_prepare_cpus = qcom_smp_prepare_cpus,
372 .smp_secondary_init = qcom_secondary_init,
373 .smp_boot_secondary = kpssv2_boot_secondary,
374 #ifdef CONFIG_HOTPLUG_CPU
375 .cpu_die = qcom_cpu_die,
376 #endif
377 };
378 CPU_METHOD_OF_DECLARE(qcom_smp_kpssv2, "qcom,kpss-acc-v2", &qcom_smp_kpssv2_ops);
379