1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2009-2012 Freescale Semiconductor, Inc.
4 *
5 * This file is derived from arch/powerpc/cpu/mpc85xx/cpu.c and
6 * arch/powerpc/cpu/mpc86xx/cpu.c. Basically this file contains
7 * cpu specific common code for 85xx/86xx processors.
8 */
9
10 #include <config.h>
11 #include <common.h>
12 #include <command.h>
13 #include <tsec.h>
14 #include <fm_eth.h>
15 #include <netdev.h>
16 #include <asm/cache.h>
17 #include <asm/io.h>
18 #include <vsc9953.h>
19
20 DECLARE_GLOBAL_DATA_PTR;
21
22 static struct cpu_type cpu_type_list[] = {
23 #if defined(CONFIG_MPC85xx)
24 CPU_TYPE_ENTRY(8533, 8533, 1),
25 CPU_TYPE_ENTRY(8535, 8535, 1),
26 CPU_TYPE_ENTRY(8536, 8536, 1),
27 CPU_TYPE_ENTRY(8540, 8540, 1),
28 CPU_TYPE_ENTRY(8541, 8541, 1),
29 CPU_TYPE_ENTRY(8543, 8543, 1),
30 CPU_TYPE_ENTRY(8544, 8544, 1),
31 CPU_TYPE_ENTRY(8545, 8545, 1),
32 CPU_TYPE_ENTRY(8547, 8547, 1),
33 CPU_TYPE_ENTRY(8548, 8548, 1),
34 CPU_TYPE_ENTRY(8555, 8555, 1),
35 CPU_TYPE_ENTRY(8560, 8560, 1),
36 CPU_TYPE_ENTRY(8567, 8567, 1),
37 CPU_TYPE_ENTRY(8568, 8568, 1),
38 CPU_TYPE_ENTRY(8569, 8569, 1),
39 CPU_TYPE_ENTRY(8572, 8572, 2),
40 CPU_TYPE_ENTRY(P1010, P1010, 1),
41 CPU_TYPE_ENTRY(P1011, P1011, 1),
42 CPU_TYPE_ENTRY(P1012, P1012, 1),
43 CPU_TYPE_ENTRY(P1013, P1013, 1),
44 CPU_TYPE_ENTRY(P1014, P1014, 1),
45 CPU_TYPE_ENTRY(P1017, P1017, 1),
46 CPU_TYPE_ENTRY(P1020, P1020, 2),
47 CPU_TYPE_ENTRY(P1021, P1021, 2),
48 CPU_TYPE_ENTRY(P1022, P1022, 2),
49 CPU_TYPE_ENTRY(P1023, P1023, 2),
50 CPU_TYPE_ENTRY(P1024, P1024, 2),
51 CPU_TYPE_ENTRY(P1025, P1025, 2),
52 CPU_TYPE_ENTRY(P2010, P2010, 1),
53 CPU_TYPE_ENTRY(P2020, P2020, 2),
54 CPU_TYPE_ENTRY(P2040, P2040, 4),
55 CPU_TYPE_ENTRY(P2041, P2041, 4),
56 CPU_TYPE_ENTRY(P3041, P3041, 4),
57 CPU_TYPE_ENTRY(P4040, P4040, 4),
58 CPU_TYPE_ENTRY(P4080, P4080, 8),
59 CPU_TYPE_ENTRY(P5010, P5010, 1),
60 CPU_TYPE_ENTRY(P5020, P5020, 2),
61 CPU_TYPE_ENTRY(P5021, P5021, 2),
62 CPU_TYPE_ENTRY(P5040, P5040, 4),
63 CPU_TYPE_ENTRY(T4240, T4240, 0),
64 CPU_TYPE_ENTRY(T4120, T4120, 0),
65 CPU_TYPE_ENTRY(T4160, T4160, 0),
66 CPU_TYPE_ENTRY(T4080, T4080, 4),
67 CPU_TYPE_ENTRY(B4860, B4860, 0),
68 CPU_TYPE_ENTRY(G4860, G4860, 0),
69 CPU_TYPE_ENTRY(B4440, B4440, 0),
70 CPU_TYPE_ENTRY(B4460, B4460, 0),
71 CPU_TYPE_ENTRY(G4440, G4440, 0),
72 CPU_TYPE_ENTRY(B4420, B4420, 0),
73 CPU_TYPE_ENTRY(B4220, B4220, 0),
74 CPU_TYPE_ENTRY(T1040, T1040, 0),
75 CPU_TYPE_ENTRY(T1041, T1041, 0),
76 CPU_TYPE_ENTRY(T1042, T1042, 0),
77 CPU_TYPE_ENTRY(T1020, T1020, 0),
78 CPU_TYPE_ENTRY(T1021, T1021, 0),
79 CPU_TYPE_ENTRY(T1022, T1022, 0),
80 CPU_TYPE_ENTRY(T1024, T1024, 0),
81 CPU_TYPE_ENTRY(T1023, T1023, 0),
82 CPU_TYPE_ENTRY(T1014, T1014, 0),
83 CPU_TYPE_ENTRY(T1013, T1013, 0),
84 CPU_TYPE_ENTRY(T2080, T2080, 0),
85 CPU_TYPE_ENTRY(T2081, T2081, 0),
86 CPU_TYPE_ENTRY(BSC9130, 9130, 1),
87 CPU_TYPE_ENTRY(BSC9131, 9131, 1),
88 CPU_TYPE_ENTRY(BSC9132, 9132, 2),
89 CPU_TYPE_ENTRY(BSC9232, 9232, 2),
90 CPU_TYPE_ENTRY(C291, C291, 1),
91 CPU_TYPE_ENTRY(C292, C292, 1),
92 CPU_TYPE_ENTRY(C293, C293, 1),
93 #elif defined(CONFIG_MPC86xx)
94 CPU_TYPE_ENTRY(8610, 8610, 1),
95 CPU_TYPE_ENTRY(8641, 8641, 2),
96 CPU_TYPE_ENTRY(8641D, 8641D, 2),
97 #endif
98 };
99
100 #ifdef CONFIG_SYS_FSL_QORIQ_CHASSIS2
init_type(u32 cluster,int init_id)101 static inline u32 init_type(u32 cluster, int init_id)
102 {
103 ccsr_gur_t *gur = (void __iomem *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
104 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
105 u32 type = in_be32(&gur->tp_ityp[idx]);
106
107 if (type & TP_ITYP_AV)
108 return type;
109
110 return 0;
111 }
112
compute_ppc_cpumask(void)113 u32 compute_ppc_cpumask(void)
114 {
115 ccsr_gur_t *gur = (void __iomem *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
116 int i = 0, count = 0;
117 u32 cluster, type, mask = 0;
118
119 do {
120 int j;
121 cluster = in_be32(&gur->tp_cluster[i].lower);
122 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
123 type = init_type(cluster, j);
124 if (type) {
125 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_PPC)
126 mask |= 1 << count;
127 count++;
128 }
129 }
130 i++;
131 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
132
133 return mask;
134 }
135
136 #ifdef CONFIG_HETROGENOUS_CLUSTERS
compute_dsp_cpumask(void)137 u32 compute_dsp_cpumask(void)
138 {
139 ccsr_gur_t *gur = (void __iomem *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
140 int i = CONFIG_DSP_CLUSTER_START, count = 0;
141 u32 cluster, type, dsp_mask = 0;
142
143 do {
144 int j;
145 cluster = in_be32(&gur->tp_cluster[i].lower);
146 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
147 type = init_type(cluster, j);
148 if (type) {
149 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_SC)
150 dsp_mask |= 1 << count;
151 count++;
152 }
153 }
154 i++;
155 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
156
157 return dsp_mask;
158 }
159
fsl_qoriq_dsp_core_to_cluster(unsigned int core)160 int fsl_qoriq_dsp_core_to_cluster(unsigned int core)
161 {
162 ccsr_gur_t *gur = (void __iomem *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
163 int count = 0, i = CONFIG_DSP_CLUSTER_START;
164 u32 cluster;
165
166 do {
167 int j;
168 cluster = in_be32(&gur->tp_cluster[i].lower);
169 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
170 if (init_type(cluster, j)) {
171 if (count == core)
172 return i;
173 count++;
174 }
175 }
176 i++;
177 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
178
179 return -1; /* cannot identify the cluster */
180 }
181 #endif
182
fsl_qoriq_core_to_cluster(unsigned int core)183 int fsl_qoriq_core_to_cluster(unsigned int core)
184 {
185 ccsr_gur_t *gur = (void __iomem *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
186 int i = 0, count = 0;
187 u32 cluster;
188
189 do {
190 int j;
191 cluster = in_be32(&gur->tp_cluster[i].lower);
192 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
193 if (init_type(cluster, j)) {
194 if (count == core)
195 return i;
196 count++;
197 }
198 }
199 i++;
200 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
201
202 return -1; /* cannot identify the cluster */
203 }
204
205 #else /* CONFIG_SYS_FSL_QORIQ_CHASSIS2 */
206 /*
207 * Before chassis genenration 2, the cpumask should be hard-coded.
208 * In case of cpu type unknown or cpumask unset, use 1 as fail save.
209 */
210 #define compute_ppc_cpumask() 1
211 #define fsl_qoriq_core_to_cluster(x) x
212 #endif /* CONFIG_SYS_FSL_QORIQ_CHASSIS2 */
213
214 static struct cpu_type cpu_type_unknown = CPU_TYPE_ENTRY(Unknown, Unknown, 0);
215
identify_cpu(u32 ver)216 struct cpu_type *identify_cpu(u32 ver)
217 {
218 int i;
219 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++) {
220 if (cpu_type_list[i].soc_ver == ver)
221 return &cpu_type_list[i];
222 }
223 return &cpu_type_unknown;
224 }
225
226 #define MPC8xxx_PICFRR_NCPU_MASK 0x00001f00
227 #define MPC8xxx_PICFRR_NCPU_SHIFT 8
228
229 /*
230 * Return a 32-bit mask indicating which cores are present on this SOC.
231 */
cpu_mask(void)232 __weak u32 cpu_mask(void)
233 {
234 ccsr_pic_t __iomem *pic = (void *)CONFIG_SYS_MPC8xxx_PIC_ADDR;
235 struct cpu_type *cpu = gd->arch.cpu;
236
237 /* better to query feature reporting register than just assume 1 */
238 if (cpu == &cpu_type_unknown)
239 return ((in_be32(&pic->frr) & MPC8xxx_PICFRR_NCPU_MASK) >>
240 MPC8xxx_PICFRR_NCPU_SHIFT) + 1;
241
242 if (cpu->num_cores == 0)
243 return compute_ppc_cpumask();
244
245 return cpu->mask;
246 }
247
248 #ifdef CONFIG_HETROGENOUS_CLUSTERS
cpu_dsp_mask(void)249 __weak u32 cpu_dsp_mask(void)
250 {
251 ccsr_pic_t __iomem *pic = (void *)CONFIG_SYS_MPC8xxx_PIC_ADDR;
252 struct cpu_type *cpu = gd->arch.cpu;
253
254 /* better to query feature reporting register than just assume 1 */
255 if (cpu == &cpu_type_unknown)
256 return ((in_be32(&pic->frr) & MPC8xxx_PICFRR_NCPU_MASK) >>
257 MPC8xxx_PICFRR_NCPU_SHIFT) + 1;
258
259 if (cpu->dsp_num_cores == 0)
260 return compute_dsp_cpumask();
261
262 return cpu->dsp_mask;
263 }
264
265 /*
266 * Return the number of SC/DSP cores on this SOC.
267 */
cpu_num_dspcores(void)268 __weak int cpu_num_dspcores(void)
269 {
270 struct cpu_type *cpu = gd->arch.cpu;
271
272 /*
273 * Report # of cores in terms of the cpu_mask if we haven't
274 * figured out how many there are yet
275 */
276 if (cpu->dsp_num_cores == 0)
277 return hweight32(cpu_dsp_mask());
278
279 return cpu->dsp_num_cores;
280 }
281 #endif
282
283 /*
284 * Return the number of PPC cores on this SOC.
285 */
cpu_numcores(void)286 __weak int cpu_numcores(void)
287 {
288 struct cpu_type *cpu = gd->arch.cpu;
289
290 /*
291 * Report # of cores in terms of the cpu_mask if we haven't
292 * figured out how many there are yet
293 */
294 if (cpu->num_cores == 0)
295 return hweight32(cpu_mask());
296
297 return cpu->num_cores;
298 }
299
300
301 /*
302 * Check if the given core ID is valid
303 *
304 * Returns zero if it isn't, 1 if it is.
305 */
is_core_valid(unsigned int core)306 int is_core_valid(unsigned int core)
307 {
308 return !!((1 << core) & cpu_mask());
309 }
310
arch_cpu_init(void)311 int arch_cpu_init(void)
312 {
313 uint svr;
314 uint ver;
315
316 svr = get_svr();
317 ver = SVR_SOC_VER(svr);
318
319 gd->arch.cpu = identify_cpu(ver);
320
321 return 0;
322 }
323
324 /* Once in memory, compute mask & # cores once and save them off */
fixup_cpu(void)325 int fixup_cpu(void)
326 {
327 struct cpu_type *cpu = gd->arch.cpu;
328
329 if (cpu->num_cores == 0) {
330 cpu->mask = cpu_mask();
331 cpu->num_cores = cpu_numcores();
332 }
333
334 #ifdef CONFIG_HETROGENOUS_CLUSTERS
335 if (cpu->dsp_num_cores == 0) {
336 cpu->dsp_mask = cpu_dsp_mask();
337 cpu->dsp_num_cores = cpu_num_dspcores();
338 }
339 #endif
340 return 0;
341 }
342
343 /*
344 * Initializes on-chip ethernet controllers.
345 * to override, implement board_eth_init()
346 */
cpu_eth_init(bd_t * bis)347 int cpu_eth_init(bd_t *bis)
348 {
349 #if defined(CONFIG_ETHER_ON_FCC)
350 fec_initialize(bis);
351 #endif
352
353 #if defined(CONFIG_UEC_ETH)
354 uec_standard_init(bis);
355 #endif
356
357 #if defined(CONFIG_TSEC_ENET) || defined(CONFIG_MPC85XX_FEC)
358 tsec_standard_init(bis);
359 #endif
360
361 #ifdef CONFIG_FMAN_ENET
362 fm_standard_init(bis);
363 #endif
364
365 #ifdef CONFIG_VSC9953
366 vsc9953_init(bis);
367 #endif
368 return 0;
369 }
370