1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2009-2012 Freescale Semiconductor, Inc.
4 *
5 * This file is derived from arch/powerpc/cpu/mpc85xx/cpu.c and
6 * arch/powerpc/cpu/mpc86xx/cpu.c. Basically this file contains
7 * cpu specific common code for 85xx/86xx processors.
8 */
9
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <cpu_func.h>
14 #include <tsec.h>
15 #include <fm_eth.h>
16 #include <netdev.h>
17 #include <asm/cache.h>
18 #include <asm/io.h>
19 #include <vsc9953.h>
20
21 DECLARE_GLOBAL_DATA_PTR;
22
23 static struct cpu_type cpu_type_list[] = {
24 #if defined(CONFIG_MPC85xx)
25 CPU_TYPE_ENTRY(8533, 8533, 1),
26 CPU_TYPE_ENTRY(8535, 8535, 1),
27 CPU_TYPE_ENTRY(8536, 8536, 1),
28 CPU_TYPE_ENTRY(8540, 8540, 1),
29 CPU_TYPE_ENTRY(8541, 8541, 1),
30 CPU_TYPE_ENTRY(8543, 8543, 1),
31 CPU_TYPE_ENTRY(8544, 8544, 1),
32 CPU_TYPE_ENTRY(8545, 8545, 1),
33 CPU_TYPE_ENTRY(8547, 8547, 1),
34 CPU_TYPE_ENTRY(8548, 8548, 1),
35 CPU_TYPE_ENTRY(8555, 8555, 1),
36 CPU_TYPE_ENTRY(8560, 8560, 1),
37 CPU_TYPE_ENTRY(8567, 8567, 1),
38 CPU_TYPE_ENTRY(8568, 8568, 1),
39 CPU_TYPE_ENTRY(8569, 8569, 1),
40 CPU_TYPE_ENTRY(8572, 8572, 2),
41 CPU_TYPE_ENTRY(P1010, P1010, 1),
42 CPU_TYPE_ENTRY(P1011, P1011, 1),
43 CPU_TYPE_ENTRY(P1012, P1012, 1),
44 CPU_TYPE_ENTRY(P1013, P1013, 1),
45 CPU_TYPE_ENTRY(P1014, P1014, 1),
46 CPU_TYPE_ENTRY(P1017, P1017, 1),
47 CPU_TYPE_ENTRY(P1020, P1020, 2),
48 CPU_TYPE_ENTRY(P1021, P1021, 2),
49 CPU_TYPE_ENTRY(P1022, P1022, 2),
50 CPU_TYPE_ENTRY(P1023, P1023, 2),
51 CPU_TYPE_ENTRY(P1024, P1024, 2),
52 CPU_TYPE_ENTRY(P1025, P1025, 2),
53 CPU_TYPE_ENTRY(P2010, P2010, 1),
54 CPU_TYPE_ENTRY(P2020, P2020, 2),
55 CPU_TYPE_ENTRY(P2040, P2040, 4),
56 CPU_TYPE_ENTRY(P2041, P2041, 4),
57 CPU_TYPE_ENTRY(P3041, P3041, 4),
58 CPU_TYPE_ENTRY(P4040, P4040, 4),
59 CPU_TYPE_ENTRY(P4080, P4080, 8),
60 CPU_TYPE_ENTRY(P5010, P5010, 1),
61 CPU_TYPE_ENTRY(P5020, P5020, 2),
62 CPU_TYPE_ENTRY(P5021, P5021, 2),
63 CPU_TYPE_ENTRY(P5040, P5040, 4),
64 CPU_TYPE_ENTRY(T4240, T4240, 0),
65 CPU_TYPE_ENTRY(T4120, T4120, 0),
66 CPU_TYPE_ENTRY(T4160, T4160, 0),
67 CPU_TYPE_ENTRY(T4080, T4080, 4),
68 CPU_TYPE_ENTRY(B4860, B4860, 0),
69 CPU_TYPE_ENTRY(G4860, G4860, 0),
70 CPU_TYPE_ENTRY(B4440, B4440, 0),
71 CPU_TYPE_ENTRY(B4460, B4460, 0),
72 CPU_TYPE_ENTRY(G4440, G4440, 0),
73 CPU_TYPE_ENTRY(B4420, B4420, 0),
74 CPU_TYPE_ENTRY(B4220, B4220, 0),
75 CPU_TYPE_ENTRY(T1040, T1040, 0),
76 CPU_TYPE_ENTRY(T1041, T1041, 0),
77 CPU_TYPE_ENTRY(T1042, T1042, 0),
78 CPU_TYPE_ENTRY(T1020, T1020, 0),
79 CPU_TYPE_ENTRY(T1021, T1021, 0),
80 CPU_TYPE_ENTRY(T1022, T1022, 0),
81 CPU_TYPE_ENTRY(T1024, T1024, 0),
82 CPU_TYPE_ENTRY(T1023, T1023, 0),
83 CPU_TYPE_ENTRY(T1014, T1014, 0),
84 CPU_TYPE_ENTRY(T1013, T1013, 0),
85 CPU_TYPE_ENTRY(T2080, T2080, 0),
86 CPU_TYPE_ENTRY(T2081, T2081, 0),
87 CPU_TYPE_ENTRY(BSC9130, 9130, 1),
88 CPU_TYPE_ENTRY(BSC9131, 9131, 1),
89 CPU_TYPE_ENTRY(BSC9132, 9132, 2),
90 CPU_TYPE_ENTRY(BSC9232, 9232, 2),
91 CPU_TYPE_ENTRY(C291, C291, 1),
92 CPU_TYPE_ENTRY(C292, C292, 1),
93 CPU_TYPE_ENTRY(C293, C293, 1),
94 #elif defined(CONFIG_MPC86xx)
95 CPU_TYPE_ENTRY(8610, 8610, 1),
96 CPU_TYPE_ENTRY(8641, 8641, 2),
97 CPU_TYPE_ENTRY(8641D, 8641D, 2),
98 #endif
99 };
100
101 #ifdef CONFIG_SYS_FSL_QORIQ_CHASSIS2
init_type(u32 cluster,int init_id)102 static inline u32 init_type(u32 cluster, int init_id)
103 {
104 ccsr_gur_t *gur = (void __iomem *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
105 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
106 u32 type = in_be32(&gur->tp_ityp[idx]);
107
108 if (type & TP_ITYP_AV)
109 return type;
110
111 return 0;
112 }
113
compute_ppc_cpumask(void)114 u32 compute_ppc_cpumask(void)
115 {
116 ccsr_gur_t *gur = (void __iomem *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
117 int i = 0, count = 0;
118 u32 cluster, type, mask = 0;
119
120 do {
121 int j;
122 cluster = in_be32(&gur->tp_cluster[i].lower);
123 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
124 type = init_type(cluster, j);
125 if (type) {
126 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_PPC)
127 mask |= 1 << count;
128 count++;
129 }
130 }
131 i++;
132 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
133
134 return mask;
135 }
136
137 #ifdef CONFIG_HETROGENOUS_CLUSTERS
compute_dsp_cpumask(void)138 u32 compute_dsp_cpumask(void)
139 {
140 ccsr_gur_t *gur = (void __iomem *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
141 int i = CONFIG_DSP_CLUSTER_START, count = 0;
142 u32 cluster, type, dsp_mask = 0;
143
144 do {
145 int j;
146 cluster = in_be32(&gur->tp_cluster[i].lower);
147 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
148 type = init_type(cluster, j);
149 if (type) {
150 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_SC)
151 dsp_mask |= 1 << count;
152 count++;
153 }
154 }
155 i++;
156 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
157
158 return dsp_mask;
159 }
160
fsl_qoriq_dsp_core_to_cluster(unsigned int core)161 int fsl_qoriq_dsp_core_to_cluster(unsigned int core)
162 {
163 ccsr_gur_t *gur = (void __iomem *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
164 int count = 0, i = CONFIG_DSP_CLUSTER_START;
165 u32 cluster;
166
167 do {
168 int j;
169 cluster = in_be32(&gur->tp_cluster[i].lower);
170 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
171 if (init_type(cluster, j)) {
172 if (count == core)
173 return i;
174 count++;
175 }
176 }
177 i++;
178 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
179
180 return -1; /* cannot identify the cluster */
181 }
182 #endif
183
fsl_qoriq_core_to_cluster(unsigned int core)184 int fsl_qoriq_core_to_cluster(unsigned int core)
185 {
186 ccsr_gur_t *gur = (void __iomem *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
187 int i = 0, count = 0;
188 u32 cluster;
189
190 do {
191 int j;
192 cluster = in_be32(&gur->tp_cluster[i].lower);
193 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
194 if (init_type(cluster, j)) {
195 if (count == core)
196 return i;
197 count++;
198 }
199 }
200 i++;
201 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
202
203 return -1; /* cannot identify the cluster */
204 }
205
206 #else /* CONFIG_SYS_FSL_QORIQ_CHASSIS2 */
207 /*
208 * Before chassis genenration 2, the cpumask should be hard-coded.
209 * In case of cpu type unknown or cpumask unset, use 1 as fail save.
210 */
211 #define compute_ppc_cpumask() 1
212 #define fsl_qoriq_core_to_cluster(x) x
213 #endif /* CONFIG_SYS_FSL_QORIQ_CHASSIS2 */
214
215 static struct cpu_type cpu_type_unknown = CPU_TYPE_ENTRY(Unknown, Unknown, 0);
216
identify_cpu(u32 ver)217 struct cpu_type *identify_cpu(u32 ver)
218 {
219 int i;
220 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++) {
221 if (cpu_type_list[i].soc_ver == ver)
222 return &cpu_type_list[i];
223 }
224 return &cpu_type_unknown;
225 }
226
227 #define MPC8xxx_PICFRR_NCPU_MASK 0x00001f00
228 #define MPC8xxx_PICFRR_NCPU_SHIFT 8
229
230 /*
231 * Return a 32-bit mask indicating which cores are present on this SOC.
232 */
cpu_mask(void)233 __weak u32 cpu_mask(void)
234 {
235 ccsr_pic_t __iomem *pic = (void *)CONFIG_SYS_MPC8xxx_PIC_ADDR;
236 struct cpu_type *cpu = gd->arch.cpu;
237
238 /* better to query feature reporting register than just assume 1 */
239 if (cpu == &cpu_type_unknown)
240 return ((in_be32(&pic->frr) & MPC8xxx_PICFRR_NCPU_MASK) >>
241 MPC8xxx_PICFRR_NCPU_SHIFT) + 1;
242
243 if (cpu->num_cores == 0)
244 return compute_ppc_cpumask();
245
246 return cpu->mask;
247 }
248
249 #ifdef CONFIG_HETROGENOUS_CLUSTERS
cpu_dsp_mask(void)250 __weak u32 cpu_dsp_mask(void)
251 {
252 ccsr_pic_t __iomem *pic = (void *)CONFIG_SYS_MPC8xxx_PIC_ADDR;
253 struct cpu_type *cpu = gd->arch.cpu;
254
255 /* better to query feature reporting register than just assume 1 */
256 if (cpu == &cpu_type_unknown)
257 return ((in_be32(&pic->frr) & MPC8xxx_PICFRR_NCPU_MASK) >>
258 MPC8xxx_PICFRR_NCPU_SHIFT) + 1;
259
260 if (cpu->dsp_num_cores == 0)
261 return compute_dsp_cpumask();
262
263 return cpu->dsp_mask;
264 }
265
266 /*
267 * Return the number of SC/DSP cores on this SOC.
268 */
cpu_num_dspcores(void)269 __weak int cpu_num_dspcores(void)
270 {
271 struct cpu_type *cpu = gd->arch.cpu;
272
273 /*
274 * Report # of cores in terms of the cpu_mask if we haven't
275 * figured out how many there are yet
276 */
277 if (cpu->dsp_num_cores == 0)
278 return hweight32(cpu_dsp_mask());
279
280 return cpu->dsp_num_cores;
281 }
282 #endif
283
284 /*
285 * Return the number of PPC cores on this SOC.
286 */
cpu_numcores(void)287 __weak int cpu_numcores(void)
288 {
289 struct cpu_type *cpu = gd->arch.cpu;
290
291 /*
292 * Report # of cores in terms of the cpu_mask if we haven't
293 * figured out how many there are yet
294 */
295 if (cpu->num_cores == 0)
296 return hweight32(cpu_mask());
297
298 return cpu->num_cores;
299 }
300
301
302 /*
303 * Check if the given core ID is valid
304 *
305 * Returns zero if it isn't, 1 if it is.
306 */
is_core_valid(unsigned int core)307 int is_core_valid(unsigned int core)
308 {
309 return !!((1 << core) & cpu_mask());
310 }
311
arch_cpu_init(void)312 int arch_cpu_init(void)
313 {
314 uint svr;
315 uint ver;
316
317 svr = get_svr();
318 ver = SVR_SOC_VER(svr);
319
320 gd->arch.cpu = identify_cpu(ver);
321
322 return 0;
323 }
324
325 /* Once in memory, compute mask & # cores once and save them off */
fixup_cpu(void)326 int fixup_cpu(void)
327 {
328 struct cpu_type *cpu = gd->arch.cpu;
329
330 if (cpu->num_cores == 0) {
331 cpu->mask = cpu_mask();
332 cpu->num_cores = cpu_numcores();
333 }
334
335 #ifdef CONFIG_HETROGENOUS_CLUSTERS
336 if (cpu->dsp_num_cores == 0) {
337 cpu->dsp_mask = cpu_dsp_mask();
338 cpu->dsp_num_cores = cpu_num_dspcores();
339 }
340 #endif
341 return 0;
342 }
343
344 /*
345 * Initializes on-chip ethernet controllers.
346 * to override, implement board_eth_init()
347 */
cpu_eth_init(bd_t * bis)348 int cpu_eth_init(bd_t *bis)
349 {
350 #if defined(CONFIG_ETHER_ON_FCC)
351 fec_initialize(bis);
352 #endif
353
354 #if defined(CONFIG_UEC_ETH)
355 uec_standard_init(bis);
356 #endif
357
358 #if defined(CONFIG_TSEC_ENET) || defined(CONFIG_MPC85XX_FEC)
359 tsec_standard_init(bis);
360 #endif
361
362 #ifdef CONFIG_FMAN_ENET
363 fm_standard_init(bis);
364 #endif
365
366 #ifdef CONFIG_VSC9953
367 vsc9953_init(bis);
368 #endif
369 return 0;
370 }
371