• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
3 #include <device/pci_ops.h>
4 #include <device/smbus_host.h>
5 #include <cbmem.h>
6 #include <cf9_reset.h>
7 #include <console/console.h>
8 #include <arch/cpu.h>
9 #include <spd.h>
10 #include <string.h>
11 #include <device/dram/ddr2.h>
12 #include <device/dram/ddr3.h>
13 #include <mrc_cache.h>
14 #include <timestamp.h>
15 #include <types.h>
16 
17 #include "raminit.h"
18 #include "x4x.h"
19 
20 #define MRC_CACHE_VERSION 0
21 
ddr2_get_crc(u8 device,u8 len)22 static u16 ddr2_get_crc(u8 device, u8 len)
23 {
24 	u8 raw_spd[128] = {};
25 	i2c_eeprom_read(device, 64, 9, &raw_spd[64]);
26 	i2c_eeprom_read(device, 93, 6, &raw_spd[93]);
27 	return spd_ddr2_calc_unique_crc(raw_spd, len);
28 }
29 
ddr3_get_crc(u8 device,u8 len)30 static u16 ddr3_get_crc(u8 device, u8 len)
31 {
32 	u8 raw_spd[256] = {};
33 	i2c_eeprom_read(device, 117, 11, &raw_spd[117]);
34 	return spd_ddr3_calc_unique_crc(raw_spd, len);
35 }
36 
verify_spds(const u8 * spd_map,const struct sysinfo * ctrl_cached)37 static enum cb_err verify_spds(const u8 *spd_map, const struct sysinfo *ctrl_cached)
38 {
39 	int i;
40 	u16 crc;
41 
42 	for (i = 0; i < TOTAL_DIMMS; i++) {
43 		if (!(spd_map[i]))
44 			continue;
45 		int len = smbus_read_byte(spd_map[i], 0);
46 		if (len < 0 && ctrl_cached->dimms[i].card_type == RAW_CARD_UNPOPULATED)
47 			continue;
48 		if (len > 0 && ctrl_cached->dimms[i].card_type == RAW_CARD_UNPOPULATED)
49 			return CB_ERR;
50 
51 		if (ctrl_cached->spd_type == DDR2)
52 			crc = ddr2_get_crc(spd_map[i], len);
53 		else
54 			crc = ddr3_get_crc(spd_map[i], len);
55 
56 		if (crc != ctrl_cached->dimms[i].spd_crc)
57 			return CB_ERR;
58 	}
59 	return CB_SUCCESS;
60 }
61 
62 struct abs_timings {
63 	u32 min_tclk;
64 	u32 min_tRAS;
65 	u32 min_tRP;
66 	u32 min_tRCD;
67 	u32 min_tWR;
68 	u32 min_tRFC;
69 	u32 min_tWTR;
70 	u32 min_tRRD;
71 	u32 min_tRTP;
72 	u32 min_tAA;
73 	u32 min_tCLK_cas[8];
74 	u32 cas_supported;
75 };
76 
77 #define CTRL_MIN_TCLK_DDR2 TCK_400MHZ
78 
select_cas_dramfreq_ddr2(struct sysinfo * s,const struct abs_timings * saved_timings)79 static void select_cas_dramfreq_ddr2(struct sysinfo *s, const struct abs_timings *saved_timings)
80 {
81 	u8 try_cas;
82 	/* Currently only these CAS are supported */
83 	u8 cas_mask = SPD_CAS_LATENCY_DDR2_5 | SPD_CAS_LATENCY_DDR2_6;
84 
85 	cas_mask &= saved_timings->cas_supported;
86 	try_cas = spd_get_msbs(cas_mask);
87 
88 	while (cas_mask & (1 << try_cas) && try_cas > 0) {
89 		s->selected_timings.CAS = try_cas;
90 		s->selected_timings.tclk = saved_timings->min_tCLK_cas[try_cas];
91 		if (s->selected_timings.tclk >= CTRL_MIN_TCLK_DDR2 &&
92 				saved_timings->min_tCLK_cas[try_cas] !=
93 				saved_timings->min_tCLK_cas[try_cas - 1])
94 			break;
95 		try_cas--;
96 	}
97 
98 	if ((s->selected_timings.CAS < 3) || (s->selected_timings.tclk == 0))
99 		die("Could not find common memory frequency and CAS\n");
100 
101 	switch (s->selected_timings.tclk) {
102 	case TCK_200MHZ:
103 	case TCK_266MHZ:
104 		/* FIXME: this works on vendor BIOS */
105 		die("Selected dram frequency not supported\n");
106 	case TCK_333MHZ:
107 		s->selected_timings.mem_clk = MEM_CLOCK_667MHz;
108 		break;
109 	case TCK_400MHZ:
110 		s->selected_timings.mem_clk = MEM_CLOCK_800MHz;
111 		break;
112 	}
113 }
114 
mchinfo_ddr2(struct sysinfo * s)115 static void mchinfo_ddr2(struct sysinfo *s)
116 {
117 	const u32 eax = cpuid_ext(0x04, 0).eax;
118 	printk(BIOS_WARNING, "%d CPU cores\n", ((eax >> 26) & 0x3f) + 1);
119 
120 	u32 capid = pci_read_config16(HOST_BRIDGE, 0xe8);
121 	if (!(capid & (1<<(79-64))))
122 		printk(BIOS_WARNING, "iTPM enabled\n");
123 
124 	capid = pci_read_config32(HOST_BRIDGE, 0xe4);
125 	if (!(capid & (1<<(57-32))))
126 		printk(BIOS_WARNING, "ME enabled\n");
127 
128 	if (!(capid & (1<<(56-32))))
129 		printk(BIOS_WARNING, "AMT enabled\n");
130 
131 	if (!(capid & (1<<(48-32))))
132 		printk(BIOS_WARNING, "VT-d enabled\n");
133 }
134 
ddr2_save_dimminfo(u8 dimm_idx,u8 * raw_spd,struct abs_timings * saved_timings,struct sysinfo * s)135 static enum cb_err ddr2_save_dimminfo(u8 dimm_idx, u8 *raw_spd,
136 		struct abs_timings *saved_timings, struct sysinfo *s)
137 {
138 	struct dimm_attr_ddr2_st decoded_dimm;
139 	int i;
140 
141 	if (spd_decode_ddr2(&decoded_dimm, raw_spd) != SPD_STATUS_OK) {
142 		printk(BIOS_DEBUG, "Problems decoding SPD\n");
143 		return CB_ERR;
144 	}
145 
146 	if (CONFIG(DEBUG_RAM_SETUP))
147 		dram_print_spd_ddr2(&decoded_dimm);
148 
149 	if (!(decoded_dimm.width & (0x08 | 0x10))) {
150 		printk(BIOS_ERR, "DIMM%d Unsupported width: x%d. Disabling dimm\n",
151 			dimm_idx, s->dimms[dimm_idx].width);
152 		return CB_ERR;
153 	}
154 	s->dimms[dimm_idx].width = (decoded_dimm.width >> 3) - 1;
155 	/*
156 	 * This boils down to:
157 	 * "Except for the x16 configuration, all DDR2 devices have a
158 	 * 1KB page size. For the x16 configuration, the page size is 2KB
159 	 * for all densities except the 256Mb device, which has a 1KB page
160 	 * size." Micron, 'TN-47-16 Designing for High-Density DDR2 Memory'
161 	 * The formula is pagesize in KiB = width * 2^col_bits / 8.
162 	 */
163 	s->dimms[dimm_idx].page_size = decoded_dimm.width * (1 << decoded_dimm.col_bits) / 8;
164 
165 	switch (decoded_dimm.banks) {
166 	case 4:
167 		s->dimms[dimm_idx].n_banks = N_BANKS_4;
168 		break;
169 	case 8:
170 		s->dimms[dimm_idx].n_banks = N_BANKS_8;
171 		break;
172 	default:
173 		printk(BIOS_ERR, "DIMM%d Unsupported #banks: x%d. Disabling dimm\n",
174 			 dimm_idx, decoded_dimm.banks);
175 		return CB_ERR;
176 	}
177 
178 	s->dimms[dimm_idx].ranks = decoded_dimm.ranks;
179 	s->dimms[dimm_idx].rows = decoded_dimm.row_bits;
180 	s->dimms[dimm_idx].cols = decoded_dimm.col_bits;
181 
182 	saved_timings->cas_supported &= decoded_dimm.cas_supported;
183 
184 	saved_timings->min_tRAS = MAX(saved_timings->min_tRAS, decoded_dimm.tRAS);
185 	saved_timings->min_tRP  = MAX(saved_timings->min_tRP,  decoded_dimm.tRP);
186 	saved_timings->min_tRCD = MAX(saved_timings->min_tRCD, decoded_dimm.tRCD);
187 	saved_timings->min_tWR  = MAX(saved_timings->min_tWR,  decoded_dimm.tWR);
188 	saved_timings->min_tRFC = MAX(saved_timings->min_tRFC, decoded_dimm.tRFC);
189 	saved_timings->min_tWTR = MAX(saved_timings->min_tWTR, decoded_dimm.tWTR);
190 	saved_timings->min_tRRD = MAX(saved_timings->min_tRRD, decoded_dimm.tRRD);
191 	saved_timings->min_tRTP = MAX(saved_timings->min_tRTP, decoded_dimm.tRTP);
192 	for (i = 0; i < 8; i++) {
193 		if (!(saved_timings->cas_supported & (1 << i)))
194 			saved_timings->min_tCLK_cas[i] = 0;
195 		else
196 			saved_timings->min_tCLK_cas[i] =
197 				MAX(saved_timings->min_tCLK_cas[i],
198 					decoded_dimm.cycle_time[i]);
199 	}
200 
201 	s->dimms[dimm_idx].spd_crc = spd_ddr2_calc_unique_crc(raw_spd,
202 					spd_decode_spd_size_ddr2(raw_spd[0]));
203 	return CB_SUCCESS;
204 }
205 
normalize_tCLK(u32 * tCLK)206 static void normalize_tCLK(u32 *tCLK)
207 {
208 	if (*tCLK <= TCK_666MHZ)
209 		*tCLK = TCK_666MHZ;
210 	else if (*tCLK <= TCK_533MHZ)
211 		*tCLK = TCK_533MHZ;
212 	else if (*tCLK <= TCK_400MHZ)
213 		*tCLK = TCK_400MHZ;
214 	else
215 		*tCLK = 0;
216 }
217 
select_cas_dramfreq_ddr3(struct sysinfo * s,struct abs_timings * saved_timings)218 static void select_cas_dramfreq_ddr3(struct sysinfo *s, struct abs_timings *saved_timings)
219 {
220 	/*
221 	 * various constraints must be fulfilled:
222 	 *  CAS * tCK < 20ns == 160MTB
223 	 * tCK_max >= tCK >= tCK_min
224 	 * CAS >= roundup(tAA_min/tCK)
225 	 * CAS supported
226 	 * AND BTW: Clock(MT) = 2000 / tCK(ns) - intel uses MTs but calls them MHz
227 	 */
228 
229 	u32 min_tCLK;
230 	u8 try_CAS;
231 	u16 capid = (pci_read_config16(HOST_BRIDGE, 0xea) >> 4) & 0x3f;
232 
233 	switch (s->max_fsb) {
234 	default:
235 	case FSB_CLOCK_800MHz:
236 		min_tCLK = TCK_400MHZ;
237 		break;
238 	case FSB_CLOCK_1066MHz:
239 		min_tCLK = TCK_533MHZ;
240 		break;
241 	case FSB_CLOCK_1333MHz:
242 		min_tCLK = TCK_666MHZ;
243 		break;
244 	}
245 
246 	switch (capid >> 3) {
247 	default: /* Should not happen */
248 		min_tCLK = TCK_400MHZ;
249 		break;
250 	case 1:
251 		min_tCLK = MAX(min_tCLK, TCK_400MHZ);
252 		break;
253 	case 2:
254 		min_tCLK = MAX(min_tCLK, TCK_533MHZ);
255 		break;
256 	case 3: /* Only on P45 */
257 	case 0:
258 		min_tCLK = MAX(min_tCLK, TCK_666MHZ);
259 		break;
260 	}
261 
262 	min_tCLK = MAX(min_tCLK, saved_timings->min_tclk);
263 	if (min_tCLK == 0) {
264 		printk(BIOS_ERR,
265 			"DRAM frequency is under lowest supported frequency (400 MHz).\n"
266 			"Increasing to 400 MHz as last resort.\n");
267 		min_tCLK = TCK_400MHZ;
268 	}
269 
270 	while (1) {
271 		normalize_tCLK(&min_tCLK);
272 		if (min_tCLK == 0)
273 			die("Couldn't find compatible clock / CAS settings.\n");
274 		try_CAS = DIV_ROUND_UP(saved_timings->min_tAA, min_tCLK);
275 		printk(BIOS_SPEW, "Trying CAS %u, tCK %u.\n", try_CAS, min_tCLK);
276 		for (; try_CAS <= DDR3_MAX_CAS; try_CAS++) {
277 			/*
278 			 * cas_supported is encoded like the SPD which starts
279 			 * at CAS=4.
280 			 */
281 			if ((saved_timings->cas_supported << 4) & (1 << try_CAS))
282 				break;
283 		}
284 		if ((try_CAS <= DDR3_MAX_CAS) && (try_CAS * min_tCLK < 20 * 256)) {
285 			/* Found good CAS. */
286 			printk(BIOS_SPEW, "Found compatible tCLK / CAS pair: %u / %u.\n",
287 				min_tCLK, try_CAS);
288 			break;
289 		}
290 		/*
291 		 * If no valid tCLK / CAS pair could be found for a tCLK
292 		 * increase it after which it gets normalised. This means
293 		 * that a lower frequency gets tried.
294 		 */
295 		min_tCLK++;
296 	}
297 
298 	s->selected_timings.tclk = min_tCLK;
299 	s->selected_timings.CAS = try_CAS;
300 
301 	switch (s->selected_timings.tclk) {
302 	case TCK_400MHZ:
303 		s->selected_timings.mem_clk = MEM_CLOCK_800MHz;
304 		break;
305 	case TCK_533MHZ:
306 		s->selected_timings.mem_clk = MEM_CLOCK_1066MHz;
307 		break;
308 	case TCK_666MHZ:
309 		s->selected_timings.mem_clk = MEM_CLOCK_1333MHz;
310 		break;
311 	}
312 }
313 
314 /* With DDR3 and 533MHz mem clock and an enabled internal gfx device the display
315    is not usable in non stacked mode, so select stacked mode accordingly */
workaround_stacked_mode(struct sysinfo * s)316 static void workaround_stacked_mode(struct sysinfo *s)
317 {
318 	u32 deven;
319 	/* Only a problem on DDR3 */
320 	if (s->spd_type == DDR2)
321 		return;
322 	/* Does not matter if only one channel is populated */
323 	if (!CHANNEL_IS_POPULATED(s->dimms, 0) || !CHANNEL_IS_POPULATED(s->dimms, 1))
324 		return;
325 	if (s->selected_timings.mem_clk != MEM_CLOCK_1066MHz)
326 		return;
327 	/* IGD0EN gets disabled if not present before this code runs */
328 	deven = pci_read_config32(HOST_BRIDGE, D0F0_DEVEN);
329 	if (deven & IGD0EN)
330 		s->stacked_mode = 1;
331 }
332 
ddr3_save_dimminfo(u8 dimm_idx,u8 * raw_spd,struct abs_timings * saved_timings,struct sysinfo * s)333 static enum cb_err ddr3_save_dimminfo(u8 dimm_idx, u8 *raw_spd,
334 		struct abs_timings *saved_timings, struct sysinfo *s)
335 {
336 	struct dimm_attr_ddr3_st decoded_dimm;
337 
338 	if (spd_decode_ddr3(&decoded_dimm, raw_spd) != SPD_STATUS_OK)
339 		return CB_ERR;
340 
341 	if (CONFIG(DEBUG_RAM_SETUP))
342 		dram_print_spd_ddr3(&decoded_dimm);
343 
344 	/* x4 DIMMs are not supported (true for both ddr2 and ddr3) */
345 	if (!(decoded_dimm.width & (0x8 | 0x10))) {
346 		printk(BIOS_ERR, "DIMM%d Unsupported width: x%d. Disabling dimm\n",
347 			dimm_idx, s->dimms[dimm_idx].width);
348 		return CB_ERR;
349 	}
350 	s->dimms[dimm_idx].width = (decoded_dimm.width >> 3) - 1;
351 	/*
352 	 * This boils down to:
353 	 * "Except for the x16 configuration, all DDR3 devices have a
354 	 * 1KB page size. For the x16 configuration, the page size is 2KB
355 	 * for all densities except the 256Mb device, which has a 1KB page size."
356 	 * Micron, 'TN-47-16 Designing for High-Density DDR2 Memory'
357 	*/
358 	s->dimms[dimm_idx].page_size = decoded_dimm.width * (1 << decoded_dimm.col_bits) / 8;
359 
360 	s->dimms[dimm_idx].n_banks = N_BANKS_8; /* Always 8 banks on ddr3?? */
361 
362 	s->dimms[dimm_idx].ranks = decoded_dimm.ranks;
363 	s->dimms[dimm_idx].rows = decoded_dimm.row_bits;
364 	s->dimms[dimm_idx].cols = decoded_dimm.col_bits;
365 
366 	saved_timings->min_tRAS = MAX(saved_timings->min_tRAS, decoded_dimm.tRAS);
367 	saved_timings->min_tRP  = MAX(saved_timings->min_tRP,  decoded_dimm.tRP);
368 	saved_timings->min_tRCD = MAX(saved_timings->min_tRCD, decoded_dimm.tRCD);
369 	saved_timings->min_tWR  = MAX(saved_timings->min_tWR,  decoded_dimm.tWR);
370 	saved_timings->min_tRFC = MAX(saved_timings->min_tRFC, decoded_dimm.tRFC);
371 	saved_timings->min_tWTR = MAX(saved_timings->min_tWTR, decoded_dimm.tWTR);
372 	saved_timings->min_tRRD = MAX(saved_timings->min_tRRD, decoded_dimm.tRRD);
373 	saved_timings->min_tRTP = MAX(saved_timings->min_tRTP, decoded_dimm.tRTP);
374 	saved_timings->min_tAA  = MAX(saved_timings->min_tAA,  decoded_dimm.tAA);
375 	saved_timings->cas_supported &= decoded_dimm.cas_supported;
376 
377 	s->dimms[dimm_idx].spd_crc = spd_ddr3_calc_unique_crc(raw_spd, raw_spd[0]);
378 
379 	s->dimms[dimm_idx].mirrored = decoded_dimm.flags.pins_mirrored;
380 
381 	return CB_SUCCESS;
382 }
383 
select_discrete_timings(struct sysinfo * s,const struct abs_timings * timings)384 static void select_discrete_timings(struct sysinfo *s, const struct abs_timings *timings)
385 {
386 	s->selected_timings.tRAS = DIV_ROUND_UP(timings->min_tRAS, s->selected_timings.tclk);
387 	s->selected_timings.tRP  = DIV_ROUND_UP(timings->min_tRP,  s->selected_timings.tclk);
388 	s->selected_timings.tRCD = DIV_ROUND_UP(timings->min_tRCD, s->selected_timings.tclk);
389 	s->selected_timings.tWR  = DIV_ROUND_UP(timings->min_tWR,  s->selected_timings.tclk);
390 	s->selected_timings.tRFC = DIV_ROUND_UP(timings->min_tRFC, s->selected_timings.tclk);
391 	s->selected_timings.tWTR = DIV_ROUND_UP(timings->min_tWTR, s->selected_timings.tclk);
392 	s->selected_timings.tRRD = DIV_ROUND_UP(timings->min_tRRD, s->selected_timings.tclk);
393 	s->selected_timings.tRTP = DIV_ROUND_UP(timings->min_tRTP, s->selected_timings.tclk);
394 }
print_selected_timings(struct sysinfo * s)395 static void print_selected_timings(struct sysinfo *s)
396 {
397 	printk(BIOS_DEBUG, "Selected timings:\n");
398 	printk(BIOS_DEBUG, "\tFSB:  %dMHz\n", fsb_to_mhz(s->selected_timings.fsb_clk));
399 	printk(BIOS_DEBUG, "\tDDR:  %dMHz\n", ddr_to_mhz(s->selected_timings.mem_clk));
400 
401 	printk(BIOS_DEBUG, "\tCAS:  %d\n", s->selected_timings.CAS);
402 	printk(BIOS_DEBUG, "\ttRAS: %d\n", s->selected_timings.tRAS);
403 	printk(BIOS_DEBUG, "\ttRP:  %d\n", s->selected_timings.tRP);
404 	printk(BIOS_DEBUG, "\ttRCD: %d\n", s->selected_timings.tRCD);
405 	printk(BIOS_DEBUG, "\ttWR:  %d\n", s->selected_timings.tWR);
406 	printk(BIOS_DEBUG, "\ttRFC: %d\n", s->selected_timings.tRFC);
407 	printk(BIOS_DEBUG, "\ttWTR: %d\n", s->selected_timings.tWTR);
408 	printk(BIOS_DEBUG, "\ttRRD: %d\n", s->selected_timings.tRRD);
409 	printk(BIOS_DEBUG, "\ttRTP: %d\n", s->selected_timings.tRTP);
410 }
411 
find_fsb_speed(struct sysinfo * s)412 static void find_fsb_speed(struct sysinfo *s)
413 {
414 	switch ((mchbar_read32(CLKCFG_MCHBAR) & CLKCFG_FSBCLK_MASK) >> CLKCFG_FSBCLK_SHIFT) {
415 	case 0x0:
416 		s->max_fsb = FSB_CLOCK_1066MHz;
417 		break;
418 	case 0x2:
419 		s->max_fsb = FSB_CLOCK_800MHz;
420 		break;
421 	case 0x4:
422 		s->max_fsb = FSB_CLOCK_1333MHz;
423 		break;
424 	default:
425 		s->max_fsb = FSB_CLOCK_800MHz;
426 		printk(BIOS_WARNING, "Can't detect FSB, setting 800MHz\n");
427 		break;
428 	}
429 	s->selected_timings.fsb_clk = s->max_fsb;
430 }
431 
decode_spd_select_timings(struct sysinfo * s)432 static void decode_spd_select_timings(struct sysinfo *s)
433 {
434 	unsigned int device;
435 	u8 dram_type_mask = (1 << DDR2) | (1 << DDR3);
436 	u8 dimm_mask = 0;
437 	u8 raw_spd[256];
438 	int i, j;
439 	struct abs_timings saved_timings;
440 	memset(&saved_timings, 0, sizeof(saved_timings));
441 	saved_timings.cas_supported = UINT32_MAX;
442 
443 	FOR_EACH_DIMM(i) {
444 		s->dimms[i].card_type = RAW_CARD_POPULATED;
445 		device = s->spd_map[i];
446 		if (!device) {
447 			s->dimms[i].card_type = RAW_CARD_UNPOPULATED;
448 			continue;
449 		}
450 		switch (smbus_read_byte(s->spd_map[i], SPD_MEMORY_TYPE)) {
451 		case DDR2SPD:
452 			dram_type_mask &= 1 << DDR2;
453 			s->spd_type = DDR2;
454 			break;
455 		case DDR3SPD:
456 			dram_type_mask &= 1 << DDR3;
457 			s->spd_type = DDR3;
458 			break;
459 		default:
460 			s->dimms[i].card_type = RAW_CARD_UNPOPULATED;
461 			continue;
462 		}
463 		if (!dram_type_mask)
464 			die("Mixing up dimm types is not supported!\n");
465 
466 		printk(BIOS_DEBUG, "Decoding dimm %d\n", i);
467 		if (i2c_eeprom_read(device, 0, 128, raw_spd) != 128) {
468 			printk(BIOS_DEBUG,
469 				"i2c block operation failed, trying smbus byte operation.\n");
470 			for (j = 0; j < 128; j++)
471 				raw_spd[j] = smbus_read_byte(device, j);
472 		}
473 
474 		if (s->spd_type == DDR2){
475 			if (ddr2_save_dimminfo(i, raw_spd, &saved_timings, s)) {
476 				printk(BIOS_WARNING,
477 					"Encountered problems with SPD, skipping this DIMM.\n");
478 				s->dimms[i].card_type = RAW_CARD_UNPOPULATED;
479 				continue;
480 			}
481 		} else { /* DDR3 */
482 			if (ddr3_save_dimminfo(i, raw_spd, &saved_timings, s)) {
483 				printk(BIOS_WARNING,
484 					"Encountered problems with SPD, skipping this DIMM.\n");
485 				/* something in decoded SPD was unsupported */
486 				s->dimms[i].card_type = RAW_CARD_UNPOPULATED;
487 				continue;
488 			}
489 		}
490 		dimm_mask |= (1 << i);
491 	}
492 	if (!dimm_mask)
493 		die("No memory installed.\n");
494 
495 	if (s->spd_type == DDR2)
496 		select_cas_dramfreq_ddr2(s, &saved_timings);
497 	else
498 		select_cas_dramfreq_ddr3(s, &saved_timings);
499 	select_discrete_timings(s, &saved_timings);
500 	workaround_stacked_mode(s);
501 }
502 
find_dimm_config(struct sysinfo * s)503 static void find_dimm_config(struct sysinfo *s)
504 {
505 	int chan, i;
506 
507 	FOR_EACH_POPULATED_CHANNEL(s->dimms, chan) {
508 		FOR_EACH_POPULATED_DIMM_IN_CHANNEL(s->dimms, chan, i) {
509 			int dimm_config;
510 			if (s->dimms[i].ranks == 1) {
511 				if (s->dimms[i].width == 0)	/* x8 */
512 					dimm_config = 1;
513 				else				/* x16 */
514 					dimm_config = 3;
515 			} else {
516 				if (s->dimms[i].width == 0)	/* x8 */
517 					dimm_config = 2;
518 				else
519 					die("Dual-rank x16 not supported\n");
520 			}
521 			s->dimm_config[chan] |= dimm_config << (i % DIMMS_PER_CHANNEL) * 2;
522 		}
523 		printk(BIOS_DEBUG, "  Config[CH%d] : %d\n", chan, s->dimm_config[chan]);
524 	}
525 }
526 
checkreset_ddr2(int boot_path)527 static void checkreset_ddr2(int boot_path)
528 {
529 	u8 pmcon2;
530 	u32 pmsts;
531 
532 	if (boot_path >= 1) {
533 		pmsts = mchbar_read32(PMSTS_MCHBAR);
534 		if (!(pmsts & 1))
535 			printk(BIOS_DEBUG, "Channel 0 possibly not in self refresh\n");
536 		if (!(pmsts & 2))
537 			printk(BIOS_DEBUG, "Channel 1 possibly not in self refresh\n");
538 	}
539 
540 	pmcon2 = pci_read_config8(PCI_DEV(0, 0x1f, 0), 0xa2);
541 
542 	if (pmcon2 & 0x80) {
543 		pmcon2 &= ~0x80;
544 		pci_write_config8(PCI_DEV(0, 0x1f, 0), 0xa2, pmcon2);
545 
546 		/* do magic 0xf0 thing. */
547 		pci_and_config8(HOST_BRIDGE, 0xf0, ~(1 << 2));
548 
549 		pci_or_config8(HOST_BRIDGE, 0xf0, (1 << 2));
550 
551 		full_reset();
552 	}
553 	pmcon2 |= 0x80;
554 	pci_write_config8(PCI_DEV(0, 0x1f, 0), 0xa2, pmcon2);
555 }
556 
557 /**
558  * @param boot_path: 0 = normal, 1 = reset, 2 = resume from s3
559  */
sdram_initialize(int boot_path,const u8 * spd_map)560 void sdram_initialize(int boot_path, const u8 *spd_map)
561 {
562 	struct sysinfo s, *ctrl_cached;
563 	u8 reg8;
564 	int fast_boot, cbmem_was_inited;
565 	size_t mrc_size;
566 
567 	timestamp_add_now(TS_INITRAM_START);
568 	printk(BIOS_DEBUG, "Setting up RAM controller.\n");
569 
570 	pci_write_config8(HOST_BRIDGE, 0xdf, 0xff);
571 
572 	memset(&s, 0, sizeof(struct sysinfo));
573 
574 	ctrl_cached = mrc_cache_current_mmap_leak(MRC_TRAINING_DATA,
575 						  MRC_CACHE_VERSION,
576 						  &mrc_size);
577 
578 	if (!ctrl_cached || mrc_size < sizeof(s)) {
579 		if (boot_path == BOOT_PATH_RESUME) {
580 			/* Failed S3 resume, reset to come up cleanly */
581 			system_reset();
582 		} else if (boot_path == BOOT_PATH_WARM_RESET) {
583 			/* On warm reset some of dram calibrations fail
584 			   and therefore requiring valid cached settings */
585 			full_reset();
586 		}
587 	}
588 
589 	/* verify MRC cache for fast boot */
590 	if (boot_path != BOOT_PATH_RESUME && ctrl_cached) {
591 		/* check SPD checksum to make sure the DIMMs haven't been replaced */
592 		fast_boot = verify_spds(spd_map, ctrl_cached) == CB_SUCCESS;
593 		if (!fast_boot) {
594 			printk(BIOS_DEBUG,
595 			       "SPD checksums don't match, dimm's have been replaced\n");
596 		} else {
597 			find_fsb_speed(&s);
598 			fast_boot = s.max_fsb == ctrl_cached->max_fsb;
599 			if (!fast_boot)
600 				printk(BIOS_DEBUG,
601 				       "CPU FSB does not match and has been replaced\n");
602 		}
603 	} else {
604 		fast_boot = boot_path == BOOT_PATH_RESUME;
605 	}
606 
607 	if (fast_boot) {
608 		printk(BIOS_DEBUG, "Using cached raminit settings\n");
609 		memcpy(&s, ctrl_cached, sizeof(s));
610 		s.boot_path = boot_path;
611 		mchinfo_ddr2(&s);
612 		print_selected_timings(&s);
613 	} else {
614 		s.boot_path = boot_path;
615 		s.spd_map[0] = spd_map[0];
616 		s.spd_map[1] = spd_map[1];
617 		s.spd_map[2] = spd_map[2];
618 		s.spd_map[3] = spd_map[3];
619 		checkreset_ddr2(s.boot_path);
620 
621 		/* Detect dimms per channel */
622 		reg8 = pci_read_config8(HOST_BRIDGE, 0xe9);
623 		printk(BIOS_DEBUG, "Dimms per channel: %d\n", (reg8 & 0x10) ? 1 : 2);
624 
625 		mchinfo_ddr2(&s);
626 
627 		find_fsb_speed(&s);
628 		decode_spd_select_timings(&s);
629 		print_selected_timings(&s);
630 		find_dimm_config(&s);
631 	}
632 
633 	do_raminit(&s, fast_boot);
634 
635 	pci_and_config8(PCI_DEV(0, 0x1f, 0), 0xa2, (u8)~0x80);
636 
637 	pci_or_config8(HOST_BRIDGE, 0xf4, 1);
638 
639 	timestamp_add_now(TS_INITRAM_END);
640 
641 	printk(BIOS_DEBUG, "RAM initialization finished.\n");
642 
643 	int s3resume = boot_path == BOOT_PATH_RESUME;
644 
645 	cbmem_was_inited = !cbmem_recovery(s3resume);
646 	if (!fast_boot)
647 		mrc_cache_stash_data(MRC_TRAINING_DATA, MRC_CACHE_VERSION, &s, sizeof(s));
648 
649 	if (s3resume && !cbmem_was_inited) {
650 		/* Failed S3 resume, reset to come up cleanly */
651 		system_reset();
652 	}
653 
654 	printk(BIOS_DEBUG, "Memory initialized\n");
655 }
656