• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2008, 2010-2016 Freescale Semiconductor, Inc.
4  * Copyright 2017-2018 NXP Semiconductor
5  */
6 
7 #include <common.h>
8 #include <env.h>
9 #include <hwconfig.h>
10 #include <fsl_ddr_sdram.h>
11 
12 #include <fsl_ddr.h>
13 #if defined(CONFIG_FSL_LSCH2) || defined(CONFIG_FSL_LSCH3) || \
14 	defined(CONFIG_ARM)
15 #include <asm/arch/clock.h>
16 #endif
17 
18 /*
19  * Use our own stack based buffer before relocation to allow accessing longer
20  * hwconfig strings that might be in the environment before we've relocated.
21  * This is pretty fragile on both the use of stack and if the buffer is big
22  * enough. However we will get a warning from env_get_f() for the latter.
23  */
24 
25 /* Board-specific functions defined in each board's ddr.c */
fsl_ddr_board_options(memctl_options_t * popts,dimm_params_t * pdimm,unsigned int ctrl_num)26 void __weak fsl_ddr_board_options(memctl_options_t *popts,
27 				  dimm_params_t *pdimm,
28 				  unsigned int ctrl_num)
29 {
30 	return;
31 }
32 
33 struct dynamic_odt {
34 	unsigned int odt_rd_cfg;
35 	unsigned int odt_wr_cfg;
36 	unsigned int odt_rtt_norm;
37 	unsigned int odt_rtt_wr;
38 };
39 
40 #ifdef CONFIG_SYS_FSL_DDR4
41 /* Quad rank is not verified yet due availability.
42  * Replacing 20 OHM with 34 OHM since DDR4 doesn't have 20 OHM option
43  */
44 static __maybe_unused const struct dynamic_odt single_Q[4] = {
45 	{	/* cs0 */
46 		FSL_DDR_ODT_NEVER,
47 		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
48 		DDR4_RTT_34_OHM,	/* unverified */
49 		DDR4_RTT_120_OHM
50 	},
51 	{	/* cs1 */
52 		FSL_DDR_ODT_NEVER,
53 		FSL_DDR_ODT_NEVER,
54 		DDR4_RTT_OFF,
55 		DDR4_RTT_120_OHM
56 	},
57 	{	/* cs2 */
58 		FSL_DDR_ODT_NEVER,
59 		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
60 		DDR4_RTT_34_OHM,
61 		DDR4_RTT_120_OHM
62 	},
63 	{	/* cs3 */
64 		FSL_DDR_ODT_NEVER,
65 		FSL_DDR_ODT_NEVER,	/* tied high */
66 		DDR4_RTT_OFF,
67 		DDR4_RTT_120_OHM
68 	}
69 };
70 
71 static __maybe_unused const struct dynamic_odt single_D[4] = {
72 	{	/* cs0 */
73 		FSL_DDR_ODT_NEVER,
74 		FSL_DDR_ODT_ALL,
75 		DDR4_RTT_40_OHM,
76 		DDR4_RTT_OFF
77 	},
78 	{	/* cs1 */
79 		FSL_DDR_ODT_NEVER,
80 		FSL_DDR_ODT_NEVER,
81 		DDR4_RTT_OFF,
82 		DDR4_RTT_OFF
83 	},
84 	{0, 0, 0, 0},
85 	{0, 0, 0, 0}
86 };
87 
88 static __maybe_unused const struct dynamic_odt single_S[4] = {
89 	{	/* cs0 */
90 		FSL_DDR_ODT_NEVER,
91 		FSL_DDR_ODT_ALL,
92 		DDR4_RTT_40_OHM,
93 		DDR4_RTT_OFF
94 	},
95 	{0, 0, 0, 0},
96 	{0, 0, 0, 0},
97 	{0, 0, 0, 0},
98 };
99 
100 static __maybe_unused const struct dynamic_odt dual_DD[4] = {
101 	{	/* cs0 */
102 		FSL_DDR_ODT_NEVER,
103 		FSL_DDR_ODT_SAME_DIMM,
104 		DDR4_RTT_120_OHM,
105 		DDR4_RTT_OFF
106 	},
107 	{	/* cs1 */
108 		FSL_DDR_ODT_OTHER_DIMM,
109 		FSL_DDR_ODT_OTHER_DIMM,
110 		DDR4_RTT_34_OHM,
111 		DDR4_RTT_OFF
112 	},
113 	{	/* cs2 */
114 		FSL_DDR_ODT_NEVER,
115 		FSL_DDR_ODT_SAME_DIMM,
116 		DDR4_RTT_120_OHM,
117 		DDR4_RTT_OFF
118 	},
119 	{	/* cs3 */
120 		FSL_DDR_ODT_OTHER_DIMM,
121 		FSL_DDR_ODT_OTHER_DIMM,
122 		DDR4_RTT_34_OHM,
123 		DDR4_RTT_OFF
124 	}
125 };
126 
127 static __maybe_unused const struct dynamic_odt dual_DS[4] = {
128 	{	/* cs0 */
129 		FSL_DDR_ODT_NEVER,
130 		FSL_DDR_ODT_SAME_DIMM,
131 		DDR4_RTT_120_OHM,
132 		DDR4_RTT_OFF
133 	},
134 	{	/* cs1 */
135 		FSL_DDR_ODT_OTHER_DIMM,
136 		FSL_DDR_ODT_OTHER_DIMM,
137 		DDR4_RTT_34_OHM,
138 		DDR4_RTT_OFF
139 	},
140 	{	/* cs2 */
141 		FSL_DDR_ODT_OTHER_DIMM,
142 		FSL_DDR_ODT_ALL,
143 		DDR4_RTT_34_OHM,
144 		DDR4_RTT_120_OHM
145 	},
146 	{0, 0, 0, 0}
147 };
148 static __maybe_unused const struct dynamic_odt dual_SD[4] = {
149 	{	/* cs0 */
150 		FSL_DDR_ODT_OTHER_DIMM,
151 		FSL_DDR_ODT_ALL,
152 		DDR4_RTT_34_OHM,
153 		DDR4_RTT_120_OHM
154 	},
155 	{0, 0, 0, 0},
156 	{	/* cs2 */
157 		FSL_DDR_ODT_NEVER,
158 		FSL_DDR_ODT_SAME_DIMM,
159 		DDR4_RTT_120_OHM,
160 		DDR4_RTT_OFF
161 	},
162 	{	/* cs3 */
163 		FSL_DDR_ODT_OTHER_DIMM,
164 		FSL_DDR_ODT_OTHER_DIMM,
165 		DDR4_RTT_34_OHM,
166 		DDR4_RTT_OFF
167 	}
168 };
169 
170 static __maybe_unused const struct dynamic_odt dual_SS[4] = {
171 	{	/* cs0 */
172 		FSL_DDR_ODT_OTHER_DIMM,
173 		FSL_DDR_ODT_ALL,
174 		DDR4_RTT_34_OHM,
175 		DDR4_RTT_120_OHM
176 	},
177 	{0, 0, 0, 0},
178 	{	/* cs2 */
179 		FSL_DDR_ODT_OTHER_DIMM,
180 		FSL_DDR_ODT_ALL,
181 		DDR4_RTT_34_OHM,
182 		DDR4_RTT_120_OHM
183 	},
184 	{0, 0, 0, 0}
185 };
186 
187 static __maybe_unused const struct dynamic_odt dual_D0[4] = {
188 	{	/* cs0 */
189 		FSL_DDR_ODT_NEVER,
190 		FSL_DDR_ODT_SAME_DIMM,
191 		DDR4_RTT_40_OHM,
192 		DDR4_RTT_OFF
193 	},
194 	{	/* cs1 */
195 		FSL_DDR_ODT_NEVER,
196 		FSL_DDR_ODT_NEVER,
197 		DDR4_RTT_OFF,
198 		DDR4_RTT_OFF
199 	},
200 	{0, 0, 0, 0},
201 	{0, 0, 0, 0}
202 };
203 
204 static __maybe_unused const struct dynamic_odt dual_0D[4] = {
205 	{0, 0, 0, 0},
206 	{0, 0, 0, 0},
207 	{	/* cs2 */
208 		FSL_DDR_ODT_NEVER,
209 		FSL_DDR_ODT_SAME_DIMM,
210 		DDR4_RTT_40_OHM,
211 		DDR4_RTT_OFF
212 	},
213 	{	/* cs3 */
214 		FSL_DDR_ODT_NEVER,
215 		FSL_DDR_ODT_NEVER,
216 		DDR4_RTT_OFF,
217 		DDR4_RTT_OFF
218 	}
219 };
220 
221 static __maybe_unused const struct dynamic_odt dual_S0[4] = {
222 	{	/* cs0 */
223 		FSL_DDR_ODT_NEVER,
224 		FSL_DDR_ODT_CS,
225 		DDR4_RTT_40_OHM,
226 		DDR4_RTT_OFF
227 	},
228 	{0, 0, 0, 0},
229 	{0, 0, 0, 0},
230 	{0, 0, 0, 0}
231 
232 };
233 
234 static __maybe_unused const struct dynamic_odt dual_0S[4] = {
235 	{0, 0, 0, 0},
236 	{0, 0, 0, 0},
237 	{	/* cs2 */
238 		FSL_DDR_ODT_NEVER,
239 		FSL_DDR_ODT_CS,
240 		DDR4_RTT_40_OHM,
241 		DDR4_RTT_OFF
242 	},
243 	{0, 0, 0, 0}
244 
245 };
246 
247 static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
248 	{	/* cs0 */
249 		FSL_DDR_ODT_NEVER,
250 		FSL_DDR_ODT_CS,
251 		DDR4_RTT_120_OHM,
252 		DDR4_RTT_OFF
253 	},
254 	{	/* cs1 */
255 		FSL_DDR_ODT_NEVER,
256 		FSL_DDR_ODT_CS,
257 		DDR4_RTT_120_OHM,
258 		DDR4_RTT_OFF
259 	},
260 	{	/* cs2 */
261 		FSL_DDR_ODT_NEVER,
262 		FSL_DDR_ODT_CS,
263 		DDR4_RTT_120_OHM,
264 		DDR4_RTT_OFF
265 	},
266 	{	/* cs3 */
267 		FSL_DDR_ODT_NEVER,
268 		FSL_DDR_ODT_CS,
269 		DDR4_RTT_120_OHM,
270 		DDR4_RTT_OFF
271 	}
272 };
273 #elif defined(CONFIG_SYS_FSL_DDR3)
274 static __maybe_unused const struct dynamic_odt single_Q[4] = {
275 	{	/* cs0 */
276 		FSL_DDR_ODT_NEVER,
277 		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
278 		DDR3_RTT_20_OHM,
279 		DDR3_RTT_120_OHM
280 	},
281 	{	/* cs1 */
282 		FSL_DDR_ODT_NEVER,
283 		FSL_DDR_ODT_NEVER,	/* tied high */
284 		DDR3_RTT_OFF,
285 		DDR3_RTT_120_OHM
286 	},
287 	{	/* cs2 */
288 		FSL_DDR_ODT_NEVER,
289 		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
290 		DDR3_RTT_20_OHM,
291 		DDR3_RTT_120_OHM
292 	},
293 	{	/* cs3 */
294 		FSL_DDR_ODT_NEVER,
295 		FSL_DDR_ODT_NEVER,	/* tied high */
296 		DDR3_RTT_OFF,
297 		DDR3_RTT_120_OHM
298 	}
299 };
300 
301 static __maybe_unused const struct dynamic_odt single_D[4] = {
302 	{	/* cs0 */
303 		FSL_DDR_ODT_NEVER,
304 		FSL_DDR_ODT_ALL,
305 		DDR3_RTT_40_OHM,
306 		DDR3_RTT_OFF
307 	},
308 	{	/* cs1 */
309 		FSL_DDR_ODT_NEVER,
310 		FSL_DDR_ODT_NEVER,
311 		DDR3_RTT_OFF,
312 		DDR3_RTT_OFF
313 	},
314 	{0, 0, 0, 0},
315 	{0, 0, 0, 0}
316 };
317 
318 static __maybe_unused const struct dynamic_odt single_S[4] = {
319 	{	/* cs0 */
320 		FSL_DDR_ODT_NEVER,
321 		FSL_DDR_ODT_ALL,
322 		DDR3_RTT_40_OHM,
323 		DDR3_RTT_OFF
324 	},
325 	{0, 0, 0, 0},
326 	{0, 0, 0, 0},
327 	{0, 0, 0, 0},
328 };
329 
330 static __maybe_unused const struct dynamic_odt dual_DD[4] = {
331 	{	/* cs0 */
332 		FSL_DDR_ODT_NEVER,
333 		FSL_DDR_ODT_SAME_DIMM,
334 		DDR3_RTT_120_OHM,
335 		DDR3_RTT_OFF
336 	},
337 	{	/* cs1 */
338 		FSL_DDR_ODT_OTHER_DIMM,
339 		FSL_DDR_ODT_OTHER_DIMM,
340 		DDR3_RTT_30_OHM,
341 		DDR3_RTT_OFF
342 	},
343 	{	/* cs2 */
344 		FSL_DDR_ODT_NEVER,
345 		FSL_DDR_ODT_SAME_DIMM,
346 		DDR3_RTT_120_OHM,
347 		DDR3_RTT_OFF
348 	},
349 	{	/* cs3 */
350 		FSL_DDR_ODT_OTHER_DIMM,
351 		FSL_DDR_ODT_OTHER_DIMM,
352 		DDR3_RTT_30_OHM,
353 		DDR3_RTT_OFF
354 	}
355 };
356 
357 static __maybe_unused const struct dynamic_odt dual_DS[4] = {
358 	{	/* cs0 */
359 		FSL_DDR_ODT_NEVER,
360 		FSL_DDR_ODT_SAME_DIMM,
361 		DDR3_RTT_120_OHM,
362 		DDR3_RTT_OFF
363 	},
364 	{	/* cs1 */
365 		FSL_DDR_ODT_OTHER_DIMM,
366 		FSL_DDR_ODT_OTHER_DIMM,
367 		DDR3_RTT_30_OHM,
368 		DDR3_RTT_OFF
369 	},
370 	{	/* cs2 */
371 		FSL_DDR_ODT_OTHER_DIMM,
372 		FSL_DDR_ODT_ALL,
373 		DDR3_RTT_20_OHM,
374 		DDR3_RTT_120_OHM
375 	},
376 	{0, 0, 0, 0}
377 };
378 static __maybe_unused const struct dynamic_odt dual_SD[4] = {
379 	{	/* cs0 */
380 		FSL_DDR_ODT_OTHER_DIMM,
381 		FSL_DDR_ODT_ALL,
382 		DDR3_RTT_20_OHM,
383 		DDR3_RTT_120_OHM
384 	},
385 	{0, 0, 0, 0},
386 	{	/* cs2 */
387 		FSL_DDR_ODT_NEVER,
388 		FSL_DDR_ODT_SAME_DIMM,
389 		DDR3_RTT_120_OHM,
390 		DDR3_RTT_OFF
391 	},
392 	{	/* cs3 */
393 		FSL_DDR_ODT_OTHER_DIMM,
394 		FSL_DDR_ODT_OTHER_DIMM,
395 		DDR3_RTT_20_OHM,
396 		DDR3_RTT_OFF
397 	}
398 };
399 
400 static __maybe_unused const struct dynamic_odt dual_SS[4] = {
401 	{	/* cs0 */
402 		FSL_DDR_ODT_OTHER_DIMM,
403 		FSL_DDR_ODT_ALL,
404 		DDR3_RTT_30_OHM,
405 		DDR3_RTT_120_OHM
406 	},
407 	{0, 0, 0, 0},
408 	{	/* cs2 */
409 		FSL_DDR_ODT_OTHER_DIMM,
410 		FSL_DDR_ODT_ALL,
411 		DDR3_RTT_30_OHM,
412 		DDR3_RTT_120_OHM
413 	},
414 	{0, 0, 0, 0}
415 };
416 
417 static __maybe_unused const struct dynamic_odt dual_D0[4] = {
418 	{	/* cs0 */
419 		FSL_DDR_ODT_NEVER,
420 		FSL_DDR_ODT_SAME_DIMM,
421 		DDR3_RTT_40_OHM,
422 		DDR3_RTT_OFF
423 	},
424 	{	/* cs1 */
425 		FSL_DDR_ODT_NEVER,
426 		FSL_DDR_ODT_NEVER,
427 		DDR3_RTT_OFF,
428 		DDR3_RTT_OFF
429 	},
430 	{0, 0, 0, 0},
431 	{0, 0, 0, 0}
432 };
433 
434 static __maybe_unused const struct dynamic_odt dual_0D[4] = {
435 	{0, 0, 0, 0},
436 	{0, 0, 0, 0},
437 	{	/* cs2 */
438 		FSL_DDR_ODT_NEVER,
439 		FSL_DDR_ODT_SAME_DIMM,
440 		DDR3_RTT_40_OHM,
441 		DDR3_RTT_OFF
442 	},
443 	{	/* cs3 */
444 		FSL_DDR_ODT_NEVER,
445 		FSL_DDR_ODT_NEVER,
446 		DDR3_RTT_OFF,
447 		DDR3_RTT_OFF
448 	}
449 };
450 
451 static __maybe_unused const struct dynamic_odt dual_S0[4] = {
452 	{	/* cs0 */
453 		FSL_DDR_ODT_NEVER,
454 		FSL_DDR_ODT_CS,
455 		DDR3_RTT_40_OHM,
456 		DDR3_RTT_OFF
457 	},
458 	{0, 0, 0, 0},
459 	{0, 0, 0, 0},
460 	{0, 0, 0, 0}
461 
462 };
463 
464 static __maybe_unused const struct dynamic_odt dual_0S[4] = {
465 	{0, 0, 0, 0},
466 	{0, 0, 0, 0},
467 	{	/* cs2 */
468 		FSL_DDR_ODT_NEVER,
469 		FSL_DDR_ODT_CS,
470 		DDR3_RTT_40_OHM,
471 		DDR3_RTT_OFF
472 	},
473 	{0, 0, 0, 0}
474 
475 };
476 
477 static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
478 	{	/* cs0 */
479 		FSL_DDR_ODT_NEVER,
480 		FSL_DDR_ODT_CS,
481 		DDR3_RTT_120_OHM,
482 		DDR3_RTT_OFF
483 	},
484 	{	/* cs1 */
485 		FSL_DDR_ODT_NEVER,
486 		FSL_DDR_ODT_CS,
487 		DDR3_RTT_120_OHM,
488 		DDR3_RTT_OFF
489 	},
490 	{	/* cs2 */
491 		FSL_DDR_ODT_NEVER,
492 		FSL_DDR_ODT_CS,
493 		DDR3_RTT_120_OHM,
494 		DDR3_RTT_OFF
495 	},
496 	{	/* cs3 */
497 		FSL_DDR_ODT_NEVER,
498 		FSL_DDR_ODT_CS,
499 		DDR3_RTT_120_OHM,
500 		DDR3_RTT_OFF
501 	}
502 };
503 #else	/* CONFIG_SYS_FSL_DDR3 */
504 static __maybe_unused const struct dynamic_odt single_Q[4] = {
505 	{0, 0, 0, 0},
506 	{0, 0, 0, 0},
507 	{0, 0, 0, 0},
508 	{0, 0, 0, 0}
509 };
510 
511 static __maybe_unused const struct dynamic_odt single_D[4] = {
512 	{	/* cs0 */
513 		FSL_DDR_ODT_NEVER,
514 		FSL_DDR_ODT_ALL,
515 		DDR2_RTT_150_OHM,
516 		DDR2_RTT_OFF
517 	},
518 	{	/* cs1 */
519 		FSL_DDR_ODT_NEVER,
520 		FSL_DDR_ODT_NEVER,
521 		DDR2_RTT_OFF,
522 		DDR2_RTT_OFF
523 	},
524 	{0, 0, 0, 0},
525 	{0, 0, 0, 0}
526 };
527 
528 static __maybe_unused const struct dynamic_odt single_S[4] = {
529 	{	/* cs0 */
530 		FSL_DDR_ODT_NEVER,
531 		FSL_DDR_ODT_ALL,
532 		DDR2_RTT_150_OHM,
533 		DDR2_RTT_OFF
534 	},
535 	{0, 0, 0, 0},
536 	{0, 0, 0, 0},
537 	{0, 0, 0, 0},
538 };
539 
540 static __maybe_unused const struct dynamic_odt dual_DD[4] = {
541 	{	/* cs0 */
542 		FSL_DDR_ODT_OTHER_DIMM,
543 		FSL_DDR_ODT_OTHER_DIMM,
544 		DDR2_RTT_75_OHM,
545 		DDR2_RTT_OFF
546 	},
547 	{	/* cs1 */
548 		FSL_DDR_ODT_NEVER,
549 		FSL_DDR_ODT_NEVER,
550 		DDR2_RTT_OFF,
551 		DDR2_RTT_OFF
552 	},
553 	{	/* cs2 */
554 		FSL_DDR_ODT_OTHER_DIMM,
555 		FSL_DDR_ODT_OTHER_DIMM,
556 		DDR2_RTT_75_OHM,
557 		DDR2_RTT_OFF
558 	},
559 	{	/* cs3 */
560 		FSL_DDR_ODT_NEVER,
561 		FSL_DDR_ODT_NEVER,
562 		DDR2_RTT_OFF,
563 		DDR2_RTT_OFF
564 	}
565 };
566 
567 static __maybe_unused const struct dynamic_odt dual_DS[4] = {
568 	{	/* cs0 */
569 		FSL_DDR_ODT_OTHER_DIMM,
570 		FSL_DDR_ODT_OTHER_DIMM,
571 		DDR2_RTT_75_OHM,
572 		DDR2_RTT_OFF
573 	},
574 	{	/* cs1 */
575 		FSL_DDR_ODT_NEVER,
576 		FSL_DDR_ODT_NEVER,
577 		DDR2_RTT_OFF,
578 		DDR2_RTT_OFF
579 	},
580 	{	/* cs2 */
581 		FSL_DDR_ODT_OTHER_DIMM,
582 		FSL_DDR_ODT_OTHER_DIMM,
583 		DDR2_RTT_75_OHM,
584 		DDR2_RTT_OFF
585 	},
586 	{0, 0, 0, 0}
587 };
588 
589 static __maybe_unused const struct dynamic_odt dual_SD[4] = {
590 	{	/* cs0 */
591 		FSL_DDR_ODT_OTHER_DIMM,
592 		FSL_DDR_ODT_OTHER_DIMM,
593 		DDR2_RTT_75_OHM,
594 		DDR2_RTT_OFF
595 	},
596 	{0, 0, 0, 0},
597 	{	/* cs2 */
598 		FSL_DDR_ODT_OTHER_DIMM,
599 		FSL_DDR_ODT_OTHER_DIMM,
600 		DDR2_RTT_75_OHM,
601 		DDR2_RTT_OFF
602 	},
603 	{	/* cs3 */
604 		FSL_DDR_ODT_NEVER,
605 		FSL_DDR_ODT_NEVER,
606 		DDR2_RTT_OFF,
607 		DDR2_RTT_OFF
608 	}
609 };
610 
611 static __maybe_unused const struct dynamic_odt dual_SS[4] = {
612 	{	/* cs0 */
613 		FSL_DDR_ODT_OTHER_DIMM,
614 		FSL_DDR_ODT_OTHER_DIMM,
615 		DDR2_RTT_75_OHM,
616 		DDR2_RTT_OFF
617 	},
618 	{0, 0, 0, 0},
619 	{	/* cs2 */
620 		FSL_DDR_ODT_OTHER_DIMM,
621 		FSL_DDR_ODT_OTHER_DIMM,
622 		DDR2_RTT_75_OHM,
623 		DDR2_RTT_OFF
624 	},
625 	{0, 0, 0, 0}
626 };
627 
628 static __maybe_unused const struct dynamic_odt dual_D0[4] = {
629 	{	/* cs0 */
630 		FSL_DDR_ODT_NEVER,
631 		FSL_DDR_ODT_ALL,
632 		DDR2_RTT_150_OHM,
633 		DDR2_RTT_OFF
634 	},
635 	{	/* cs1 */
636 		FSL_DDR_ODT_NEVER,
637 		FSL_DDR_ODT_NEVER,
638 		DDR2_RTT_OFF,
639 		DDR2_RTT_OFF
640 	},
641 	{0, 0, 0, 0},
642 	{0, 0, 0, 0}
643 };
644 
645 static __maybe_unused const struct dynamic_odt dual_0D[4] = {
646 	{0, 0, 0, 0},
647 	{0, 0, 0, 0},
648 	{	/* cs2 */
649 		FSL_DDR_ODT_NEVER,
650 		FSL_DDR_ODT_ALL,
651 		DDR2_RTT_150_OHM,
652 		DDR2_RTT_OFF
653 	},
654 	{	/* cs3 */
655 		FSL_DDR_ODT_NEVER,
656 		FSL_DDR_ODT_NEVER,
657 		DDR2_RTT_OFF,
658 		DDR2_RTT_OFF
659 	}
660 };
661 
662 static __maybe_unused const struct dynamic_odt dual_S0[4] = {
663 	{	/* cs0 */
664 		FSL_DDR_ODT_NEVER,
665 		FSL_DDR_ODT_CS,
666 		DDR2_RTT_150_OHM,
667 		DDR2_RTT_OFF
668 	},
669 	{0, 0, 0, 0},
670 	{0, 0, 0, 0},
671 	{0, 0, 0, 0}
672 
673 };
674 
675 static __maybe_unused const struct dynamic_odt dual_0S[4] = {
676 	{0, 0, 0, 0},
677 	{0, 0, 0, 0},
678 	{	/* cs2 */
679 		FSL_DDR_ODT_NEVER,
680 		FSL_DDR_ODT_CS,
681 		DDR2_RTT_150_OHM,
682 		DDR2_RTT_OFF
683 	},
684 	{0, 0, 0, 0}
685 
686 };
687 
688 static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
689 	{	/* cs0 */
690 		FSL_DDR_ODT_NEVER,
691 		FSL_DDR_ODT_CS,
692 		DDR2_RTT_75_OHM,
693 		DDR2_RTT_OFF
694 	},
695 	{	/* cs1 */
696 		FSL_DDR_ODT_NEVER,
697 		FSL_DDR_ODT_NEVER,
698 		DDR2_RTT_OFF,
699 		DDR2_RTT_OFF
700 	},
701 	{	/* cs2 */
702 		FSL_DDR_ODT_NEVER,
703 		FSL_DDR_ODT_CS,
704 		DDR2_RTT_75_OHM,
705 		DDR2_RTT_OFF
706 	},
707 	{	/* cs3 */
708 		FSL_DDR_ODT_NEVER,
709 		FSL_DDR_ODT_NEVER,
710 		DDR2_RTT_OFF,
711 		DDR2_RTT_OFF
712 	}
713 };
714 #endif
715 
716 /*
717  * Automatically seleect bank interleaving mode based on DIMMs
718  * in this order: cs0_cs1_cs2_cs3, cs0_cs1, null.
719  * This function only deal with one or two slots per controller.
720  */
auto_bank_intlv(dimm_params_t * pdimm)721 static inline unsigned int auto_bank_intlv(dimm_params_t *pdimm)
722 {
723 #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
724 	if (pdimm[0].n_ranks == 4)
725 		return FSL_DDR_CS0_CS1_CS2_CS3;
726 	else if (pdimm[0].n_ranks == 2)
727 		return FSL_DDR_CS0_CS1;
728 #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
729 #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
730 	if (pdimm[0].n_ranks == 4)
731 		return FSL_DDR_CS0_CS1_CS2_CS3;
732 #endif
733 	if (pdimm[0].n_ranks == 2) {
734 		if (pdimm[1].n_ranks == 2)
735 			return FSL_DDR_CS0_CS1_CS2_CS3;
736 		else
737 			return FSL_DDR_CS0_CS1;
738 	}
739 #endif
740 	return 0;
741 }
742 
populate_memctl_options(const common_timing_params_t * common_dimm,memctl_options_t * popts,dimm_params_t * pdimm,unsigned int ctrl_num)743 unsigned int populate_memctl_options(const common_timing_params_t *common_dimm,
744 			memctl_options_t *popts,
745 			dimm_params_t *pdimm,
746 			unsigned int ctrl_num)
747 {
748 	unsigned int i;
749 	char buf[HWCONFIG_BUFFER_SIZE];
750 #if defined(CONFIG_SYS_FSL_DDR3) || \
751 	defined(CONFIG_SYS_FSL_DDR2) || \
752 	defined(CONFIG_SYS_FSL_DDR4)
753 	const struct dynamic_odt *pdodt = odt_unknown;
754 #endif
755 #if (CONFIG_FSL_SDRAM_TYPE != SDRAM_TYPE_DDR4)
756 	ulong ddr_freq;
757 #endif
758 
759 	/*
760 	 * Extract hwconfig from environment since we have not properly setup
761 	 * the environment but need it for ddr config params
762 	 */
763 	if (env_get_f("hwconfig", buf, sizeof(buf)) < 0)
764 		buf[0] = '\0';
765 
766 #if defined(CONFIG_SYS_FSL_DDR3) || \
767 	defined(CONFIG_SYS_FSL_DDR2) || \
768 	defined(CONFIG_SYS_FSL_DDR4)
769 	/* Chip select options. */
770 #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
771 	switch (pdimm[0].n_ranks) {
772 	case 1:
773 		pdodt = single_S;
774 		break;
775 	case 2:
776 		pdodt = single_D;
777 		break;
778 	case 4:
779 		pdodt = single_Q;
780 		break;
781 	}
782 #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
783 	switch (pdimm[0].n_ranks) {
784 #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
785 	case 4:
786 		pdodt = single_Q;
787 		if (pdimm[1].n_ranks)
788 			printf("Error: Quad- and Dual-rank DIMMs cannot be used together\n");
789 		break;
790 #endif
791 	case 2:
792 		switch (pdimm[1].n_ranks) {
793 		case 2:
794 			pdodt = dual_DD;
795 			break;
796 		case 1:
797 			pdodt = dual_DS;
798 			break;
799 		case 0:
800 			pdodt = dual_D0;
801 			break;
802 		}
803 		break;
804 	case 1:
805 		switch (pdimm[1].n_ranks) {
806 		case 2:
807 			pdodt = dual_SD;
808 			break;
809 		case 1:
810 			pdodt = dual_SS;
811 			break;
812 		case 0:
813 			pdodt = dual_S0;
814 			break;
815 		}
816 		break;
817 	case 0:
818 		switch (pdimm[1].n_ranks) {
819 		case 2:
820 			pdodt = dual_0D;
821 			break;
822 		case 1:
823 			pdodt = dual_0S;
824 			break;
825 		}
826 		break;
827 	}
828 #endif	/* CONFIG_DIMM_SLOTS_PER_CTLR */
829 #endif	/* CONFIG_SYS_FSL_DDR2, 3, 4 */
830 
831 	/* Pick chip-select local options. */
832 	for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) {
833 #if defined(CONFIG_SYS_FSL_DDR3) || \
834 	defined(CONFIG_SYS_FSL_DDR2) || \
835 	defined(CONFIG_SYS_FSL_DDR4)
836 		popts->cs_local_opts[i].odt_rd_cfg = pdodt[i].odt_rd_cfg;
837 		popts->cs_local_opts[i].odt_wr_cfg = pdodt[i].odt_wr_cfg;
838 		popts->cs_local_opts[i].odt_rtt_norm = pdodt[i].odt_rtt_norm;
839 		popts->cs_local_opts[i].odt_rtt_wr = pdodt[i].odt_rtt_wr;
840 #else
841 		popts->cs_local_opts[i].odt_rd_cfg = FSL_DDR_ODT_NEVER;
842 		popts->cs_local_opts[i].odt_wr_cfg = FSL_DDR_ODT_CS;
843 #endif
844 		popts->cs_local_opts[i].auto_precharge = 0;
845 	}
846 
847 	/* Pick interleaving mode. */
848 
849 	/*
850 	 * 0 = no interleaving
851 	 * 1 = interleaving between 2 controllers
852 	 */
853 	popts->memctl_interleaving = 0;
854 
855 	/*
856 	 * 0 = cacheline
857 	 * 1 = page
858 	 * 2 = (logical) bank
859 	 * 3 = superbank (only if CS interleaving is enabled)
860 	 */
861 	popts->memctl_interleaving_mode = 0;
862 
863 	/*
864 	 * 0: cacheline: bit 30 of the 36-bit physical addr selects the memctl
865 	 * 1: page:      bit to the left of the column bits selects the memctl
866 	 * 2: bank:      bit to the left of the bank bits selects the memctl
867 	 * 3: superbank: bit to the left of the chip select selects the memctl
868 	 *
869 	 * NOTE: ba_intlv (rank interleaving) is independent of memory
870 	 * controller interleaving; it is only within a memory controller.
871 	 * Must use superbank interleaving if rank interleaving is used and
872 	 * memory controller interleaving is enabled.
873 	 */
874 
875 	/*
876 	 * 0 = no
877 	 * 0x40 = CS0,CS1
878 	 * 0x20 = CS2,CS3
879 	 * 0x60 = CS0,CS1 + CS2,CS3
880 	 * 0x04 = CS0,CS1,CS2,CS3
881 	 */
882 	popts->ba_intlv_ctl = 0;
883 
884 	/* Memory Organization Parameters */
885 	popts->registered_dimm_en = common_dimm->all_dimms_registered;
886 
887 	/* Operational Mode Paramters */
888 
889 	/* Pick ECC modes */
890 	popts->ecc_mode = 0;		  /* 0 = disabled, 1 = enabled */
891 #ifdef CONFIG_DDR_ECC
892 	if (hwconfig_sub_f("fsl_ddr", "ecc", buf)) {
893 		if (hwconfig_subarg_cmp_f("fsl_ddr", "ecc", "on", buf))
894 			popts->ecc_mode = 1;
895 	} else
896 		popts->ecc_mode = 1;
897 #endif
898 	/* 1 = use memory controler to init data */
899 	popts->ecc_init_using_memctl = popts->ecc_mode ? 1 : 0;
900 
901 	/*
902 	 * Choose DQS config
903 	 * 0 for DDR1
904 	 * 1 for DDR2
905 	 */
906 #if defined(CONFIG_SYS_FSL_DDR1)
907 	popts->dqs_config = 0;
908 #elif defined(CONFIG_SYS_FSL_DDR2) || defined(CONFIG_SYS_FSL_DDR3)
909 	popts->dqs_config = 1;
910 #endif
911 
912 	/* Choose self-refresh during sleep. */
913 	popts->self_refresh_in_sleep = 1;
914 
915 	/* Choose dynamic power management mode. */
916 	popts->dynamic_power = 0;
917 
918 	/*
919 	 * check first dimm for primary sdram width
920 	 * presuming all dimms are similar
921 	 * 0 = 64-bit, 1 = 32-bit, 2 = 16-bit
922 	 */
923 #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
924 	if (pdimm[0].n_ranks != 0) {
925 		if ((pdimm[0].data_width >= 64) && \
926 			(pdimm[0].data_width <= 72))
927 			popts->data_bus_width = 0;
928 		else if ((pdimm[0].data_width >= 32) && \
929 			(pdimm[0].data_width <= 40))
930 			popts->data_bus_width = 1;
931 		else {
932 			panic("Error: data width %u is invalid!\n",
933 				pdimm[0].data_width);
934 		}
935 	}
936 #else
937 	if (pdimm[0].n_ranks != 0) {
938 		if (pdimm[0].primary_sdram_width == 64)
939 			popts->data_bus_width = 0;
940 		else if (pdimm[0].primary_sdram_width == 32)
941 			popts->data_bus_width = 1;
942 		else if (pdimm[0].primary_sdram_width == 16)
943 			popts->data_bus_width = 2;
944 		else {
945 			panic("Error: primary sdram width %u is invalid!\n",
946 				pdimm[0].primary_sdram_width);
947 		}
948 	}
949 #endif
950 
951 	popts->x4_en = (pdimm[0].device_width == 4) ? 1 : 0;
952 
953 	/* Choose burst length. */
954 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
955 #if defined(CONFIG_E500MC)
956 	popts->otf_burst_chop_en = 0;	/* on-the-fly burst chop disable */
957 	popts->burst_length = DDR_BL8;	/* Fixed 8-beat burst len */
958 #else
959 	if ((popts->data_bus_width == 1) || (popts->data_bus_width == 2)) {
960 		/* 32-bit or 16-bit bus */
961 		popts->otf_burst_chop_en = 0;
962 		popts->burst_length = DDR_BL8;
963 	} else {
964 		popts->otf_burst_chop_en = 1;	/* on-the-fly burst chop */
965 		popts->burst_length = DDR_OTF;	/* on-the-fly BC4 and BL8 */
966 	}
967 #endif
968 #else
969 	popts->burst_length = DDR_BL4;	/* has to be 4 for DDR2 */
970 #endif
971 
972 	/* Choose ddr controller address mirror mode */
973 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
974 	for (i = 0; i < CONFIG_DIMM_SLOTS_PER_CTLR; i++) {
975 		if (pdimm[i].n_ranks) {
976 			popts->mirrored_dimm = pdimm[i].mirrored_dimm;
977 			break;
978 		}
979 	}
980 #endif
981 
982 	/* Global Timing Parameters. */
983 	debug("mclk_ps = %u ps\n", get_memory_clk_period_ps(ctrl_num));
984 
985 	/* Pick a caslat override. */
986 	popts->cas_latency_override = 0;
987 	popts->cas_latency_override_value = 3;
988 	if (popts->cas_latency_override) {
989 		debug("using caslat override value = %u\n",
990 		       popts->cas_latency_override_value);
991 	}
992 
993 	/* Decide whether to use the computed derated latency */
994 	popts->use_derated_caslat = 0;
995 
996 	/* Choose an additive latency. */
997 	popts->additive_latency_override = 0;
998 	popts->additive_latency_override_value = 3;
999 	if (popts->additive_latency_override) {
1000 		debug("using additive latency override value = %u\n",
1001 		       popts->additive_latency_override_value);
1002 	}
1003 
1004 	/*
1005 	 * 2T_EN setting
1006 	 *
1007 	 * Factors to consider for 2T_EN:
1008 	 *	- number of DIMMs installed
1009 	 *	- number of components, number of active ranks
1010 	 *	- how much time you want to spend playing around
1011 	 */
1012 	popts->twot_en = 0;
1013 	popts->threet_en = 0;
1014 
1015 	/* for RDIMM and DDR4 UDIMM/discrete memory, address parity enable */
1016 	if (popts->registered_dimm_en)
1017 		popts->ap_en = 1; /* 0 = disable,  1 = enable */
1018 	else
1019 		popts->ap_en = 0; /* disabled for DDR4 UDIMM/discrete default */
1020 
1021 	if (hwconfig_sub_f("fsl_ddr", "parity", buf)) {
1022 		if (hwconfig_subarg_cmp_f("fsl_ddr", "parity", "on", buf)) {
1023 			if (popts->registered_dimm_en ||
1024 			    (CONFIG_FSL_SDRAM_TYPE == SDRAM_TYPE_DDR4))
1025 				popts->ap_en = 1;
1026 		}
1027 	}
1028 
1029 	/*
1030 	 * BSTTOPRE precharge interval
1031 	 *
1032 	 * Set this to 0 for global auto precharge
1033 	 * The value of 0x100 has been used for DDR1, DDR2, DDR3.
1034 	 * It is not wrong. Any value should be OK. The performance depends on
1035 	 * applications. There is no one good value for all. One way to set
1036 	 * is to use 1/4 of refint value.
1037 	 */
1038 	popts->bstopre = picos_to_mclk(ctrl_num, common_dimm->refresh_rate_ps)
1039 			 >> 2;
1040 
1041 	/*
1042 	 * Window for four activates -- tFAW
1043 	 *
1044 	 * FIXME: UM: applies only to DDR2/DDR3 with eight logical banks only
1045 	 * FIXME: varies depending upon number of column addresses or data
1046 	 * FIXME: width, was considering looking at pdimm->primary_sdram_width
1047 	 */
1048 #if defined(CONFIG_SYS_FSL_DDR1)
1049 	popts->tfaw_window_four_activates_ps = mclk_to_picos(ctrl_num, 1);
1050 
1051 #elif defined(CONFIG_SYS_FSL_DDR2)
1052 	/*
1053 	 * x4/x8;  some datasheets have 35000
1054 	 * x16 wide columns only?  Use 50000?
1055 	 */
1056 	popts->tfaw_window_four_activates_ps = 37500;
1057 
1058 #else
1059 	popts->tfaw_window_four_activates_ps = pdimm[0].tfaw_ps;
1060 #endif
1061 	popts->zq_en = 0;
1062 	popts->wrlvl_en = 0;
1063 #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
1064 	/*
1065 	 * due to ddr3 dimm is fly-by topology
1066 	 * we suggest to enable write leveling to
1067 	 * meet the tQDSS under different loading.
1068 	 */
1069 	popts->wrlvl_en = 1;
1070 	popts->zq_en = 1;
1071 	popts->wrlvl_override = 0;
1072 #endif
1073 
1074 	/*
1075 	 * Check interleaving configuration from environment.
1076 	 * Please refer to doc/README.fsl-ddr for the detail.
1077 	 *
1078 	 * If memory controller interleaving is enabled, then the data
1079 	 * bus widths must be programmed identically for all memory controllers.
1080 	 *
1081 	 * Attempt to set all controllers to the same chip select
1082 	 * interleaving mode. It will do a best effort to get the
1083 	 * requested ranks interleaved together such that the result
1084 	 * should be a subset of the requested configuration.
1085 	 *
1086 	 * if CONFIG_SYS_FSL_DDR_INTLV_256B is defined, mandatory interleaving
1087 	 * with 256 Byte is enabled.
1088 	 */
1089 #if (CONFIG_SYS_NUM_DDR_CTLRS > 1)
1090 	if (!hwconfig_sub_f("fsl_ddr", "ctlr_intlv", buf))
1091 #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
1092 		;
1093 #else
1094 		goto done;
1095 #endif
1096 	if (pdimm[0].n_ranks == 0) {
1097 		printf("There is no rank on CS0 for controller %d.\n", ctrl_num);
1098 		popts->memctl_interleaving = 0;
1099 		goto done;
1100 	}
1101 	popts->memctl_interleaving = 1;
1102 #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
1103 	popts->memctl_interleaving_mode = FSL_DDR_256B_INTERLEAVING;
1104 	popts->memctl_interleaving = 1;
1105 	debug("256 Byte interleaving\n");
1106 #else
1107 	/*
1108 	 * test null first. if CONFIG_HWCONFIG is not defined
1109 	 * hwconfig_arg_cmp returns non-zero
1110 	 */
1111 	if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv",
1112 				    "null", buf)) {
1113 		popts->memctl_interleaving = 0;
1114 		debug("memory controller interleaving disabled.\n");
1115 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1116 					"ctlr_intlv",
1117 					"cacheline", buf)) {
1118 		popts->memctl_interleaving_mode =
1119 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1120 			0 : FSL_DDR_CACHE_LINE_INTERLEAVING;
1121 		popts->memctl_interleaving =
1122 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1123 			0 : 1;
1124 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1125 					"ctlr_intlv",
1126 					"page", buf)) {
1127 		popts->memctl_interleaving_mode =
1128 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1129 			0 : FSL_DDR_PAGE_INTERLEAVING;
1130 		popts->memctl_interleaving =
1131 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1132 			0 : 1;
1133 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1134 					"ctlr_intlv",
1135 					"bank", buf)) {
1136 		popts->memctl_interleaving_mode =
1137 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1138 			0 : FSL_DDR_BANK_INTERLEAVING;
1139 		popts->memctl_interleaving =
1140 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1141 			0 : 1;
1142 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1143 					"ctlr_intlv",
1144 					"superbank", buf)) {
1145 		popts->memctl_interleaving_mode =
1146 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1147 			0 : FSL_DDR_SUPERBANK_INTERLEAVING;
1148 		popts->memctl_interleaving =
1149 			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1150 			0 : 1;
1151 #if (CONFIG_SYS_NUM_DDR_CTLRS == 3)
1152 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1153 					"ctlr_intlv",
1154 					"3way_1KB", buf)) {
1155 		popts->memctl_interleaving_mode =
1156 			FSL_DDR_3WAY_1KB_INTERLEAVING;
1157 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1158 					"ctlr_intlv",
1159 					"3way_4KB", buf)) {
1160 		popts->memctl_interleaving_mode =
1161 			FSL_DDR_3WAY_4KB_INTERLEAVING;
1162 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1163 					"ctlr_intlv",
1164 					"3way_8KB", buf)) {
1165 		popts->memctl_interleaving_mode =
1166 			FSL_DDR_3WAY_8KB_INTERLEAVING;
1167 #elif (CONFIG_SYS_NUM_DDR_CTLRS == 4)
1168 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1169 					"ctlr_intlv",
1170 					"4way_1KB", buf)) {
1171 		popts->memctl_interleaving_mode =
1172 			FSL_DDR_4WAY_1KB_INTERLEAVING;
1173 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1174 					"ctlr_intlv",
1175 					"4way_4KB", buf)) {
1176 		popts->memctl_interleaving_mode =
1177 			FSL_DDR_4WAY_4KB_INTERLEAVING;
1178 	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1179 					"ctlr_intlv",
1180 					"4way_8KB", buf)) {
1181 		popts->memctl_interleaving_mode =
1182 			FSL_DDR_4WAY_8KB_INTERLEAVING;
1183 #endif
1184 	} else {
1185 		popts->memctl_interleaving = 0;
1186 		printf("hwconfig has unrecognized parameter for ctlr_intlv.\n");
1187 	}
1188 #endif	/* CONFIG_SYS_FSL_DDR_INTLV_256B */
1189 done:
1190 #endif /* CONFIG_SYS_NUM_DDR_CTLRS > 1 */
1191 	if ((hwconfig_sub_f("fsl_ddr", "bank_intlv", buf)) &&
1192 		(CONFIG_CHIP_SELECTS_PER_CTRL > 1)) {
1193 		/* test null first. if CONFIG_HWCONFIG is not defined,
1194 		 * hwconfig_subarg_cmp_f returns non-zero */
1195 		if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1196 					    "null", buf))
1197 			debug("bank interleaving disabled.\n");
1198 		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1199 						 "cs0_cs1", buf))
1200 			popts->ba_intlv_ctl = FSL_DDR_CS0_CS1;
1201 		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1202 						 "cs2_cs3", buf))
1203 			popts->ba_intlv_ctl = FSL_DDR_CS2_CS3;
1204 		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1205 						 "cs0_cs1_and_cs2_cs3", buf))
1206 			popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_AND_CS2_CS3;
1207 		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1208 						 "cs0_cs1_cs2_cs3", buf))
1209 			popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_CS2_CS3;
1210 		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1211 						"auto", buf))
1212 			popts->ba_intlv_ctl = auto_bank_intlv(pdimm);
1213 		else
1214 			printf("hwconfig has unrecognized parameter for bank_intlv.\n");
1215 		switch (popts->ba_intlv_ctl & FSL_DDR_CS0_CS1_CS2_CS3) {
1216 		case FSL_DDR_CS0_CS1_CS2_CS3:
1217 #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
1218 			if (pdimm[0].n_ranks < 4) {
1219 				popts->ba_intlv_ctl = 0;
1220 				printf("Not enough bank(chip-select) for "
1221 					"CS0+CS1+CS2+CS3 on controller %d, "
1222 					"interleaving disabled!\n", ctrl_num);
1223 			}
1224 #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
1225 #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
1226 			if (pdimm[0].n_ranks == 4)
1227 				break;
1228 #endif
1229 			if ((pdimm[0].n_ranks < 2) && (pdimm[1].n_ranks < 2)) {
1230 				popts->ba_intlv_ctl = 0;
1231 				printf("Not enough bank(chip-select) for "
1232 					"CS0+CS1+CS2+CS3 on controller %d, "
1233 					"interleaving disabled!\n", ctrl_num);
1234 			}
1235 			if (pdimm[0].capacity != pdimm[1].capacity) {
1236 				popts->ba_intlv_ctl = 0;
1237 				printf("Not identical DIMM size for "
1238 					"CS0+CS1+CS2+CS3 on controller %d, "
1239 					"interleaving disabled!\n", ctrl_num);
1240 			}
1241 #endif
1242 			break;
1243 		case FSL_DDR_CS0_CS1:
1244 			if (pdimm[0].n_ranks < 2) {
1245 				popts->ba_intlv_ctl = 0;
1246 				printf("Not enough bank(chip-select) for "
1247 					"CS0+CS1 on controller %d, "
1248 					"interleaving disabled!\n", ctrl_num);
1249 			}
1250 			break;
1251 		case FSL_DDR_CS2_CS3:
1252 #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
1253 			if (pdimm[0].n_ranks < 4) {
1254 				popts->ba_intlv_ctl = 0;
1255 				printf("Not enough bank(chip-select) for CS2+CS3 "
1256 					"on controller %d, interleaving disabled!\n", ctrl_num);
1257 			}
1258 #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
1259 			if (pdimm[1].n_ranks < 2) {
1260 				popts->ba_intlv_ctl = 0;
1261 				printf("Not enough bank(chip-select) for CS2+CS3 "
1262 					"on controller %d, interleaving disabled!\n", ctrl_num);
1263 			}
1264 #endif
1265 			break;
1266 		case FSL_DDR_CS0_CS1_AND_CS2_CS3:
1267 #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
1268 			if (pdimm[0].n_ranks < 4) {
1269 				popts->ba_intlv_ctl = 0;
1270 				printf("Not enough bank(CS) for CS0+CS1 and "
1271 					"CS2+CS3 on controller %d, "
1272 					"interleaving disabled!\n", ctrl_num);
1273 			}
1274 #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
1275 			if ((pdimm[0].n_ranks < 2) || (pdimm[1].n_ranks < 2)) {
1276 				popts->ba_intlv_ctl = 0;
1277 				printf("Not enough bank(CS) for CS0+CS1 and "
1278 					"CS2+CS3 on controller %d, "
1279 					"interleaving disabled!\n", ctrl_num);
1280 			}
1281 #endif
1282 			break;
1283 		default:
1284 			popts->ba_intlv_ctl = 0;
1285 			break;
1286 		}
1287 	}
1288 
1289 	if (hwconfig_sub_f("fsl_ddr", "addr_hash", buf)) {
1290 		if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash", "null", buf))
1291 			popts->addr_hash = 0;
1292 		else if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash",
1293 					       "true", buf))
1294 			popts->addr_hash = 1;
1295 	}
1296 
1297 	if (pdimm[0].n_ranks == 4)
1298 		popts->quad_rank_present = 1;
1299 
1300 	popts->package_3ds = pdimm->package_3ds;
1301 
1302 #if (CONFIG_FSL_SDRAM_TYPE != SDRAM_TYPE_DDR4)
1303 	ddr_freq = get_ddr_freq(ctrl_num) / 1000000;
1304 	if (popts->registered_dimm_en) {
1305 		popts->rcw_override = 1;
1306 		popts->rcw_1 = 0x000a5a00;
1307 		if (ddr_freq <= 800)
1308 			popts->rcw_2 = 0x00000000;
1309 		else if (ddr_freq <= 1066)
1310 			popts->rcw_2 = 0x00100000;
1311 		else if (ddr_freq <= 1333)
1312 			popts->rcw_2 = 0x00200000;
1313 		else
1314 			popts->rcw_2 = 0x00300000;
1315 	}
1316 #endif
1317 
1318 	fsl_ddr_board_options(popts, pdimm, ctrl_num);
1319 
1320 	return 0;
1321 }
1322 
check_interleaving_options(fsl_ddr_info_t * pinfo)1323 void check_interleaving_options(fsl_ddr_info_t *pinfo)
1324 {
1325 	int i, j, k, check_n_ranks, intlv_invalid = 0;
1326 	unsigned int check_intlv, check_n_row_addr, check_n_col_addr;
1327 	unsigned long long check_rank_density;
1328 	struct dimm_params_s *dimm;
1329 	int first_ctrl = pinfo->first_ctrl;
1330 	int last_ctrl = first_ctrl + pinfo->num_ctrls - 1;
1331 
1332 	/*
1333 	 * Check if all controllers are configured for memory
1334 	 * controller interleaving. Identical dimms are recommended. At least
1335 	 * the size, row and col address should be checked.
1336 	 */
1337 	j = 0;
1338 	check_n_ranks = pinfo->dimm_params[first_ctrl][0].n_ranks;
1339 	check_rank_density = pinfo->dimm_params[first_ctrl][0].rank_density;
1340 	check_n_row_addr =  pinfo->dimm_params[first_ctrl][0].n_row_addr;
1341 	check_n_col_addr = pinfo->dimm_params[first_ctrl][0].n_col_addr;
1342 	check_intlv = pinfo->memctl_opts[first_ctrl].memctl_interleaving_mode;
1343 	for (i = first_ctrl; i <= last_ctrl; i++) {
1344 		dimm = &pinfo->dimm_params[i][0];
1345 		if (!pinfo->memctl_opts[i].memctl_interleaving) {
1346 			continue;
1347 		} else if (((check_rank_density != dimm->rank_density) ||
1348 		     (check_n_ranks != dimm->n_ranks) ||
1349 		     (check_n_row_addr != dimm->n_row_addr) ||
1350 		     (check_n_col_addr != dimm->n_col_addr) ||
1351 		     (check_intlv !=
1352 			pinfo->memctl_opts[i].memctl_interleaving_mode))){
1353 			intlv_invalid = 1;
1354 			break;
1355 		} else {
1356 			j++;
1357 		}
1358 
1359 	}
1360 	if (intlv_invalid) {
1361 		for (i = first_ctrl; i <= last_ctrl; i++)
1362 			pinfo->memctl_opts[i].memctl_interleaving = 0;
1363 		printf("Not all DIMMs are identical. "
1364 			"Memory controller interleaving disabled.\n");
1365 	} else {
1366 		switch (check_intlv) {
1367 		case FSL_DDR_256B_INTERLEAVING:
1368 		case FSL_DDR_CACHE_LINE_INTERLEAVING:
1369 		case FSL_DDR_PAGE_INTERLEAVING:
1370 		case FSL_DDR_BANK_INTERLEAVING:
1371 		case FSL_DDR_SUPERBANK_INTERLEAVING:
1372 #if (3 == CONFIG_SYS_NUM_DDR_CTLRS)
1373 				k = 2;
1374 #else
1375 				k = CONFIG_SYS_NUM_DDR_CTLRS;
1376 #endif
1377 			break;
1378 		case FSL_DDR_3WAY_1KB_INTERLEAVING:
1379 		case FSL_DDR_3WAY_4KB_INTERLEAVING:
1380 		case FSL_DDR_3WAY_8KB_INTERLEAVING:
1381 		case FSL_DDR_4WAY_1KB_INTERLEAVING:
1382 		case FSL_DDR_4WAY_4KB_INTERLEAVING:
1383 		case FSL_DDR_4WAY_8KB_INTERLEAVING:
1384 		default:
1385 			k = CONFIG_SYS_NUM_DDR_CTLRS;
1386 			break;
1387 		}
1388 		debug("%d of %d controllers are interleaving.\n", j, k);
1389 		if (j && (j != k)) {
1390 			for (i = first_ctrl; i <= last_ctrl; i++)
1391 				pinfo->memctl_opts[i].memctl_interleaving = 0;
1392 			if ((last_ctrl - first_ctrl) > 1)
1393 				puts("Not all controllers have compatible interleaving mode. All disabled.\n");
1394 		}
1395 	}
1396 	debug("Checking interleaving options completed\n");
1397 }
1398 
fsl_use_spd(void)1399 int fsl_use_spd(void)
1400 {
1401 	int use_spd = 0;
1402 
1403 #ifdef CONFIG_DDR_SPD
1404 	char buf[HWCONFIG_BUFFER_SIZE];
1405 
1406 	/*
1407 	 * Extract hwconfig from environment since we have not properly setup
1408 	 * the environment but need it for ddr config params
1409 	 */
1410 	if (env_get_f("hwconfig", buf, sizeof(buf)) < 0)
1411 		buf[0] = '\0';
1412 
1413 	/* if hwconfig is not enabled, or "sdram" is not defined, use spd */
1414 	if (hwconfig_sub_f("fsl_ddr", "sdram", buf)) {
1415 		if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram", "spd", buf))
1416 			use_spd = 1;
1417 		else if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram",
1418 					       "fixed", buf))
1419 			use_spd = 0;
1420 		else
1421 			use_spd = 1;
1422 	} else
1423 		use_spd = 1;
1424 #endif
1425 
1426 	return use_spd;
1427 }
1428