1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
3
4 static struct edac_pci_ctl_info *pci_ctl;
5
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
8
9 /*
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
12 */
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
15
16 static struct msr __percpu *msrs;
17
18 /* Per-node stuff */
19 static struct ecc_settings **ecc_stngs;
20
21 /*
22 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
23 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
24 * or higher value'.
25 *
26 *FIXME: Produce a better mapping/linearisation.
27 */
28 static const struct scrubrate {
29 u32 scrubval; /* bit pattern for scrub rate */
30 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
31 } scrubrates[] = {
32 { 0x01, 1600000000UL},
33 { 0x02, 800000000UL},
34 { 0x03, 400000000UL},
35 { 0x04, 200000000UL},
36 { 0x05, 100000000UL},
37 { 0x06, 50000000UL},
38 { 0x07, 25000000UL},
39 { 0x08, 12284069UL},
40 { 0x09, 6274509UL},
41 { 0x0A, 3121951UL},
42 { 0x0B, 1560975UL},
43 { 0x0C, 781440UL},
44 { 0x0D, 390720UL},
45 { 0x0E, 195300UL},
46 { 0x0F, 97650UL},
47 { 0x10, 48854UL},
48 { 0x11, 24427UL},
49 { 0x12, 12213UL},
50 { 0x13, 6101UL},
51 { 0x14, 3051UL},
52 { 0x15, 1523UL},
53 { 0x16, 761UL},
54 { 0x00, 0UL}, /* scrubbing off */
55 };
56
__amd64_read_pci_cfg_dword(struct pci_dev * pdev,int offset,u32 * val,const char * func)57 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
58 u32 *val, const char *func)
59 {
60 int err = 0;
61
62 err = pci_read_config_dword(pdev, offset, val);
63 if (err)
64 amd64_warn("%s: error reading F%dx%03x.\n",
65 func, PCI_FUNC(pdev->devfn), offset);
66
67 return err;
68 }
69
__amd64_write_pci_cfg_dword(struct pci_dev * pdev,int offset,u32 val,const char * func)70 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
71 u32 val, const char *func)
72 {
73 int err = 0;
74
75 err = pci_write_config_dword(pdev, offset, val);
76 if (err)
77 amd64_warn("%s: error writing to F%dx%03x.\n",
78 func, PCI_FUNC(pdev->devfn), offset);
79
80 return err;
81 }
82
83 /*
84 * Select DCT to which PCI cfg accesses are routed
85 */
f15h_select_dct(struct amd64_pvt * pvt,u8 dct)86 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
87 {
88 u32 reg = 0;
89
90 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
91 reg &= (pvt->model == 0x30) ? ~3 : ~1;
92 reg |= dct;
93 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
94 }
95
96 /*
97 *
98 * Depending on the family, F2 DCT reads need special handling:
99 *
100 * K8: has a single DCT only and no address offsets >= 0x100
101 *
102 * F10h: each DCT has its own set of regs
103 * DCT0 -> F2x040..
104 * DCT1 -> F2x140..
105 *
106 * F16h: has only 1 DCT
107 *
108 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
109 */
amd64_read_dct_pci_cfg(struct amd64_pvt * pvt,u8 dct,int offset,u32 * val)110 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
111 int offset, u32 *val)
112 {
113 switch (pvt->fam) {
114 case 0xf:
115 if (dct || offset >= 0x100)
116 return -EINVAL;
117 break;
118
119 case 0x10:
120 if (dct) {
121 /*
122 * Note: If ganging is enabled, barring the regs
123 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
124 * return 0. (cf. Section 2.8.1 F10h BKDG)
125 */
126 if (dct_ganging_enabled(pvt))
127 return 0;
128
129 offset += 0x100;
130 }
131 break;
132
133 case 0x15:
134 /*
135 * F15h: F2x1xx addresses do not map explicitly to DCT1.
136 * We should select which DCT we access using F1x10C[DctCfgSel]
137 */
138 dct = (dct && pvt->model == 0x30) ? 3 : dct;
139 f15h_select_dct(pvt, dct);
140 break;
141
142 case 0x16:
143 if (dct)
144 return -EINVAL;
145 break;
146
147 default:
148 break;
149 }
150 return amd64_read_pci_cfg(pvt->F2, offset, val);
151 }
152
153 /*
154 * Memory scrubber control interface. For K8, memory scrubbing is handled by
155 * hardware and can involve L2 cache, dcache as well as the main memory. With
156 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
157 * functionality.
158 *
159 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
160 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
161 * bytes/sec for the setting.
162 *
163 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
164 * other archs, we might not have access to the caches directly.
165 */
166
__f17h_set_scrubval(struct amd64_pvt * pvt,u32 scrubval)167 static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
168 {
169 /*
170 * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
171 * are shifted down by 0x5, so scrubval 0x5 is written to the register
172 * as 0x0, scrubval 0x6 as 0x1, etc.
173 */
174 if (scrubval >= 0x5 && scrubval <= 0x14) {
175 scrubval -= 0x5;
176 pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
177 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
178 } else {
179 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
180 }
181 }
182 /*
183 * Scan the scrub rate mapping table for a close or matching bandwidth value to
184 * issue. If requested is too big, then use last maximum value found.
185 */
__set_scrub_rate(struct amd64_pvt * pvt,u32 new_bw,u32 min_rate)186 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
187 {
188 u32 scrubval;
189 int i;
190
191 /*
192 * map the configured rate (new_bw) to a value specific to the AMD64
193 * memory controller and apply to register. Search for the first
194 * bandwidth entry that is greater or equal than the setting requested
195 * and program that. If at last entry, turn off DRAM scrubbing.
196 *
197 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
198 * by falling back to the last element in scrubrates[].
199 */
200 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
201 /*
202 * skip scrub rates which aren't recommended
203 * (see F10 BKDG, F3x58)
204 */
205 if (scrubrates[i].scrubval < min_rate)
206 continue;
207
208 if (scrubrates[i].bandwidth <= new_bw)
209 break;
210 }
211
212 scrubval = scrubrates[i].scrubval;
213
214 if (pvt->fam == 0x17) {
215 __f17h_set_scrubval(pvt, scrubval);
216 } else if (pvt->fam == 0x15 && pvt->model == 0x60) {
217 f15h_select_dct(pvt, 0);
218 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
219 f15h_select_dct(pvt, 1);
220 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
221 } else {
222 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
223 }
224
225 if (scrubval)
226 return scrubrates[i].bandwidth;
227
228 return 0;
229 }
230
set_scrub_rate(struct mem_ctl_info * mci,u32 bw)231 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
232 {
233 struct amd64_pvt *pvt = mci->pvt_info;
234 u32 min_scrubrate = 0x5;
235
236 if (pvt->fam == 0xf)
237 min_scrubrate = 0x0;
238
239 if (pvt->fam == 0x15) {
240 /* Erratum #505 */
241 if (pvt->model < 0x10)
242 f15h_select_dct(pvt, 0);
243
244 if (pvt->model == 0x60)
245 min_scrubrate = 0x6;
246 }
247 return __set_scrub_rate(pvt, bw, min_scrubrate);
248 }
249
get_scrub_rate(struct mem_ctl_info * mci)250 static int get_scrub_rate(struct mem_ctl_info *mci)
251 {
252 struct amd64_pvt *pvt = mci->pvt_info;
253 int i, retval = -EINVAL;
254 u32 scrubval = 0;
255
256 switch (pvt->fam) {
257 case 0x15:
258 /* Erratum #505 */
259 if (pvt->model < 0x10)
260 f15h_select_dct(pvt, 0);
261
262 if (pvt->model == 0x60)
263 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
264 else
265 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
266 break;
267
268 case 0x17:
269 amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
270 if (scrubval & BIT(0)) {
271 amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
272 scrubval &= 0xF;
273 scrubval += 0x5;
274 } else {
275 scrubval = 0;
276 }
277 break;
278
279 default:
280 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
281 break;
282 }
283
284 scrubval = scrubval & 0x001F;
285
286 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
287 if (scrubrates[i].scrubval == scrubval) {
288 retval = scrubrates[i].bandwidth;
289 break;
290 }
291 }
292 return retval;
293 }
294
295 /*
296 * returns true if the SysAddr given by sys_addr matches the
297 * DRAM base/limit associated with node_id
298 */
base_limit_match(struct amd64_pvt * pvt,u64 sys_addr,u8 nid)299 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
300 {
301 u64 addr;
302
303 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
304 * all ones if the most significant implemented address bit is 1.
305 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
306 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
307 * Application Programming.
308 */
309 addr = sys_addr & 0x000000ffffffffffull;
310
311 return ((addr >= get_dram_base(pvt, nid)) &&
312 (addr <= get_dram_limit(pvt, nid)));
313 }
314
315 /*
316 * Attempt to map a SysAddr to a node. On success, return a pointer to the
317 * mem_ctl_info structure for the node that the SysAddr maps to.
318 *
319 * On failure, return NULL.
320 */
find_mc_by_sys_addr(struct mem_ctl_info * mci,u64 sys_addr)321 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
322 u64 sys_addr)
323 {
324 struct amd64_pvt *pvt;
325 u8 node_id;
326 u32 intlv_en, bits;
327
328 /*
329 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
330 * 3.4.4.2) registers to map the SysAddr to a node ID.
331 */
332 pvt = mci->pvt_info;
333
334 /*
335 * The value of this field should be the same for all DRAM Base
336 * registers. Therefore we arbitrarily choose to read it from the
337 * register for node 0.
338 */
339 intlv_en = dram_intlv_en(pvt, 0);
340
341 if (intlv_en == 0) {
342 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
343 if (base_limit_match(pvt, sys_addr, node_id))
344 goto found;
345 }
346 goto err_no_match;
347 }
348
349 if (unlikely((intlv_en != 0x01) &&
350 (intlv_en != 0x03) &&
351 (intlv_en != 0x07))) {
352 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
353 return NULL;
354 }
355
356 bits = (((u32) sys_addr) >> 12) & intlv_en;
357
358 for (node_id = 0; ; ) {
359 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
360 break; /* intlv_sel field matches */
361
362 if (++node_id >= DRAM_RANGES)
363 goto err_no_match;
364 }
365
366 /* sanity test for sys_addr */
367 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
368 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
369 "range for node %d with node interleaving enabled.\n",
370 __func__, sys_addr, node_id);
371 return NULL;
372 }
373
374 found:
375 return edac_mc_find((int)node_id);
376
377 err_no_match:
378 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
379 (unsigned long)sys_addr);
380
381 return NULL;
382 }
383
384 /*
385 * compute the CS base address of the @csrow on the DRAM controller @dct.
386 * For details see F2x[5C:40] in the processor's BKDG
387 */
get_cs_base_and_mask(struct amd64_pvt * pvt,int csrow,u8 dct,u64 * base,u64 * mask)388 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
389 u64 *base, u64 *mask)
390 {
391 u64 csbase, csmask, base_bits, mask_bits;
392 u8 addr_shift;
393
394 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
395 csbase = pvt->csels[dct].csbases[csrow];
396 csmask = pvt->csels[dct].csmasks[csrow];
397 base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
398 mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
399 addr_shift = 4;
400
401 /*
402 * F16h and F15h, models 30h and later need two addr_shift values:
403 * 8 for high and 6 for low (cf. F16h BKDG).
404 */
405 } else if (pvt->fam == 0x16 ||
406 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
407 csbase = pvt->csels[dct].csbases[csrow];
408 csmask = pvt->csels[dct].csmasks[csrow >> 1];
409
410 *base = (csbase & GENMASK_ULL(15, 5)) << 6;
411 *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
412
413 *mask = ~0ULL;
414 /* poke holes for the csmask */
415 *mask &= ~((GENMASK_ULL(15, 5) << 6) |
416 (GENMASK_ULL(30, 19) << 8));
417
418 *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
419 *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
420
421 return;
422 } else {
423 csbase = pvt->csels[dct].csbases[csrow];
424 csmask = pvt->csels[dct].csmasks[csrow >> 1];
425 addr_shift = 8;
426
427 if (pvt->fam == 0x15)
428 base_bits = mask_bits =
429 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
430 else
431 base_bits = mask_bits =
432 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
433 }
434
435 *base = (csbase & base_bits) << addr_shift;
436
437 *mask = ~0ULL;
438 /* poke holes for the csmask */
439 *mask &= ~(mask_bits << addr_shift);
440 /* OR them in */
441 *mask |= (csmask & mask_bits) << addr_shift;
442 }
443
444 #define for_each_chip_select(i, dct, pvt) \
445 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
446
447 #define chip_select_base(i, dct, pvt) \
448 pvt->csels[dct].csbases[i]
449
450 #define for_each_chip_select_mask(i, dct, pvt) \
451 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
452
453 /*
454 * @input_addr is an InputAddr associated with the node given by mci. Return the
455 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
456 */
input_addr_to_csrow(struct mem_ctl_info * mci,u64 input_addr)457 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
458 {
459 struct amd64_pvt *pvt;
460 int csrow;
461 u64 base, mask;
462
463 pvt = mci->pvt_info;
464
465 for_each_chip_select(csrow, 0, pvt) {
466 if (!csrow_enabled(csrow, 0, pvt))
467 continue;
468
469 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
470
471 mask = ~mask;
472
473 if ((input_addr & mask) == (base & mask)) {
474 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
475 (unsigned long)input_addr, csrow,
476 pvt->mc_node_id);
477
478 return csrow;
479 }
480 }
481 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
482 (unsigned long)input_addr, pvt->mc_node_id);
483
484 return -1;
485 }
486
487 /*
488 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
489 * for the node represented by mci. Info is passed back in *hole_base,
490 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
491 * info is invalid. Info may be invalid for either of the following reasons:
492 *
493 * - The revision of the node is not E or greater. In this case, the DRAM Hole
494 * Address Register does not exist.
495 *
496 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
497 * indicating that its contents are not valid.
498 *
499 * The values passed back in *hole_base, *hole_offset, and *hole_size are
500 * complete 32-bit values despite the fact that the bitfields in the DHAR
501 * only represent bits 31-24 of the base and offset values.
502 */
amd64_get_dram_hole_info(struct mem_ctl_info * mci,u64 * hole_base,u64 * hole_offset,u64 * hole_size)503 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
504 u64 *hole_offset, u64 *hole_size)
505 {
506 struct amd64_pvt *pvt = mci->pvt_info;
507
508 /* only revE and later have the DRAM Hole Address Register */
509 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
510 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
511 pvt->ext_model, pvt->mc_node_id);
512 return 1;
513 }
514
515 /* valid for Fam10h and above */
516 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
517 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
518 return 1;
519 }
520
521 if (!dhar_valid(pvt)) {
522 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
523 pvt->mc_node_id);
524 return 1;
525 }
526
527 /* This node has Memory Hoisting */
528
529 /* +------------------+--------------------+--------------------+-----
530 * | memory | DRAM hole | relocated |
531 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
532 * | | | DRAM hole |
533 * | | | [0x100000000, |
534 * | | | (0x100000000+ |
535 * | | | (0xffffffff-x))] |
536 * +------------------+--------------------+--------------------+-----
537 *
538 * Above is a diagram of physical memory showing the DRAM hole and the
539 * relocated addresses from the DRAM hole. As shown, the DRAM hole
540 * starts at address x (the base address) and extends through address
541 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
542 * addresses in the hole so that they start at 0x100000000.
543 */
544
545 *hole_base = dhar_base(pvt);
546 *hole_size = (1ULL << 32) - *hole_base;
547
548 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
549 : k8_dhar_offset(pvt);
550
551 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
552 pvt->mc_node_id, (unsigned long)*hole_base,
553 (unsigned long)*hole_offset, (unsigned long)*hole_size);
554
555 return 0;
556 }
557 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
558
559 /*
560 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
561 * assumed that sys_addr maps to the node given by mci.
562 *
563 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
564 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
565 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
566 * then it is also involved in translating a SysAddr to a DramAddr. Sections
567 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
568 * These parts of the documentation are unclear. I interpret them as follows:
569 *
570 * When node n receives a SysAddr, it processes the SysAddr as follows:
571 *
572 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
573 * Limit registers for node n. If the SysAddr is not within the range
574 * specified by the base and limit values, then node n ignores the Sysaddr
575 * (since it does not map to node n). Otherwise continue to step 2 below.
576 *
577 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
578 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
579 * the range of relocated addresses (starting at 0x100000000) from the DRAM
580 * hole. If not, skip to step 3 below. Else get the value of the
581 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
582 * offset defined by this value from the SysAddr.
583 *
584 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
585 * Base register for node n. To obtain the DramAddr, subtract the base
586 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
587 */
sys_addr_to_dram_addr(struct mem_ctl_info * mci,u64 sys_addr)588 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
589 {
590 struct amd64_pvt *pvt = mci->pvt_info;
591 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
592 int ret;
593
594 dram_base = get_dram_base(pvt, pvt->mc_node_id);
595
596 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
597 &hole_size);
598 if (!ret) {
599 if ((sys_addr >= (1ULL << 32)) &&
600 (sys_addr < ((1ULL << 32) + hole_size))) {
601 /* use DHAR to translate SysAddr to DramAddr */
602 dram_addr = sys_addr - hole_offset;
603
604 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
605 (unsigned long)sys_addr,
606 (unsigned long)dram_addr);
607
608 return dram_addr;
609 }
610 }
611
612 /*
613 * Translate the SysAddr to a DramAddr as shown near the start of
614 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
615 * only deals with 40-bit values. Therefore we discard bits 63-40 of
616 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
617 * discard are all 1s. Otherwise the bits we discard are all 0s. See
618 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
619 * Programmer's Manual Volume 1 Application Programming.
620 */
621 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
622
623 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
624 (unsigned long)sys_addr, (unsigned long)dram_addr);
625 return dram_addr;
626 }
627
628 /*
629 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
630 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
631 * for node interleaving.
632 */
num_node_interleave_bits(unsigned intlv_en)633 static int num_node_interleave_bits(unsigned intlv_en)
634 {
635 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
636 int n;
637
638 BUG_ON(intlv_en > 7);
639 n = intlv_shift_table[intlv_en];
640 return n;
641 }
642
643 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
dram_addr_to_input_addr(struct mem_ctl_info * mci,u64 dram_addr)644 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
645 {
646 struct amd64_pvt *pvt;
647 int intlv_shift;
648 u64 input_addr;
649
650 pvt = mci->pvt_info;
651
652 /*
653 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
654 * concerning translating a DramAddr to an InputAddr.
655 */
656 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
657 input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
658 (dram_addr & 0xfff);
659
660 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
661 intlv_shift, (unsigned long)dram_addr,
662 (unsigned long)input_addr);
663
664 return input_addr;
665 }
666
667 /*
668 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
669 * assumed that @sys_addr maps to the node given by mci.
670 */
sys_addr_to_input_addr(struct mem_ctl_info * mci,u64 sys_addr)671 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
672 {
673 u64 input_addr;
674
675 input_addr =
676 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
677
678 edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
679 (unsigned long)sys_addr, (unsigned long)input_addr);
680
681 return input_addr;
682 }
683
684 /* Map the Error address to a PAGE and PAGE OFFSET. */
error_address_to_page_and_offset(u64 error_address,struct err_info * err)685 static inline void error_address_to_page_and_offset(u64 error_address,
686 struct err_info *err)
687 {
688 err->page = (u32) (error_address >> PAGE_SHIFT);
689 err->offset = ((u32) error_address) & ~PAGE_MASK;
690 }
691
692 /*
693 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
694 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
695 * of a node that detected an ECC memory error. mci represents the node that
696 * the error address maps to (possibly different from the node that detected
697 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
698 * error.
699 */
sys_addr_to_csrow(struct mem_ctl_info * mci,u64 sys_addr)700 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
701 {
702 int csrow;
703
704 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
705
706 if (csrow == -1)
707 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
708 "address 0x%lx\n", (unsigned long)sys_addr);
709 return csrow;
710 }
711
712 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
713
714 /*
715 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
716 * are ECC capable.
717 */
determine_edac_cap(struct amd64_pvt * pvt)718 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
719 {
720 unsigned long edac_cap = EDAC_FLAG_NONE;
721 u8 bit;
722
723 if (pvt->umc) {
724 u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
725
726 for (i = 0; i < NUM_UMCS; i++) {
727 if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
728 continue;
729
730 umc_en_mask |= BIT(i);
731
732 /* UMC Configuration bit 12 (DimmEccEn) */
733 if (pvt->umc[i].umc_cfg & BIT(12))
734 dimm_ecc_en_mask |= BIT(i);
735 }
736
737 if (umc_en_mask == dimm_ecc_en_mask)
738 edac_cap = EDAC_FLAG_SECDED;
739 } else {
740 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
741 ? 19
742 : 17;
743
744 if (pvt->dclr0 & BIT(bit))
745 edac_cap = EDAC_FLAG_SECDED;
746 }
747
748 return edac_cap;
749 }
750
751 static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
752
debug_dump_dramcfg_low(struct amd64_pvt * pvt,u32 dclr,int chan)753 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
754 {
755 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
756
757 if (pvt->dram_type == MEM_LRDDR3) {
758 u32 dcsm = pvt->csels[chan].csmasks[0];
759 /*
760 * It's assumed all LRDIMMs in a DCT are going to be of
761 * same 'type' until proven otherwise. So, use a cs
762 * value of '0' here to get dcsm value.
763 */
764 edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
765 }
766
767 edac_dbg(1, "All DIMMs support ECC:%s\n",
768 (dclr & BIT(19)) ? "yes" : "no");
769
770
771 edac_dbg(1, " PAR/ERR parity: %s\n",
772 (dclr & BIT(8)) ? "enabled" : "disabled");
773
774 if (pvt->fam == 0x10)
775 edac_dbg(1, " DCT 128bit mode width: %s\n",
776 (dclr & BIT(11)) ? "128b" : "64b");
777
778 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
779 (dclr & BIT(12)) ? "yes" : "no",
780 (dclr & BIT(13)) ? "yes" : "no",
781 (dclr & BIT(14)) ? "yes" : "no",
782 (dclr & BIT(15)) ? "yes" : "no");
783 }
784
debug_display_dimm_sizes_df(struct amd64_pvt * pvt,u8 ctrl)785 static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
786 {
787 int dimm, size0, size1, cs0, cs1;
788
789 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
790
791 for (dimm = 0; dimm < 4; dimm++) {
792 size0 = 0;
793 cs0 = dimm * 2;
794
795 if (csrow_enabled(cs0, ctrl, pvt))
796 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs0);
797
798 size1 = 0;
799 cs1 = dimm * 2 + 1;
800
801 if (csrow_enabled(cs1, ctrl, pvt))
802 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1);
803
804 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
805 cs0, size0,
806 cs1, size1);
807 }
808 }
809
__dump_misc_regs_df(struct amd64_pvt * pvt)810 static void __dump_misc_regs_df(struct amd64_pvt *pvt)
811 {
812 struct amd64_umc *umc;
813 u32 i, tmp, umc_base;
814
815 for (i = 0; i < NUM_UMCS; i++) {
816 umc_base = get_umc_base(i);
817 umc = &pvt->umc[i];
818
819 edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
820 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
821 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
822 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
823
824 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
825 edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
826
827 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
828 edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
829 edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
830
831 edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
832 i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
833 (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
834 edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
835 i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
836 edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
837 i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
838 edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
839 i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
840
841 if (pvt->dram_type == MEM_LRDDR4) {
842 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
843 edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
844 i, 1 << ((tmp >> 4) & 0x3));
845 }
846
847 debug_display_dimm_sizes_df(pvt, i);
848 }
849
850 edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
851 pvt->dhar, dhar_base(pvt));
852 }
853
854 /* Display and decode various NB registers for debug purposes. */
__dump_misc_regs(struct amd64_pvt * pvt)855 static void __dump_misc_regs(struct amd64_pvt *pvt)
856 {
857 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
858
859 edac_dbg(1, " NB two channel DRAM capable: %s\n",
860 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
861
862 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
863 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
864 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
865
866 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
867
868 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
869
870 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
871 pvt->dhar, dhar_base(pvt),
872 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
873 : f10_dhar_offset(pvt));
874
875 debug_display_dimm_sizes(pvt, 0);
876
877 /* everything below this point is Fam10h and above */
878 if (pvt->fam == 0xf)
879 return;
880
881 debug_display_dimm_sizes(pvt, 1);
882
883 /* Only if NOT ganged does dclr1 have valid info */
884 if (!dct_ganging_enabled(pvt))
885 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
886 }
887
888 /* Display and decode various NB registers for debug purposes. */
dump_misc_regs(struct amd64_pvt * pvt)889 static void dump_misc_regs(struct amd64_pvt *pvt)
890 {
891 if (pvt->umc)
892 __dump_misc_regs_df(pvt);
893 else
894 __dump_misc_regs(pvt);
895
896 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
897
898 amd64_info("using %s syndromes.\n",
899 ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
900 }
901
902 /*
903 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
904 */
prep_chip_selects(struct amd64_pvt * pvt)905 static void prep_chip_selects(struct amd64_pvt *pvt)
906 {
907 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
908 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
909 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
910 } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
911 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
912 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
913 } else {
914 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
915 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
916 }
917 }
918
919 /*
920 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
921 */
read_dct_base_mask(struct amd64_pvt * pvt)922 static void read_dct_base_mask(struct amd64_pvt *pvt)
923 {
924 int base_reg0, base_reg1, mask_reg0, mask_reg1, cs;
925
926 prep_chip_selects(pvt);
927
928 if (pvt->umc) {
929 base_reg0 = get_umc_base(0) + UMCCH_BASE_ADDR;
930 base_reg1 = get_umc_base(1) + UMCCH_BASE_ADDR;
931 mask_reg0 = get_umc_base(0) + UMCCH_ADDR_MASK;
932 mask_reg1 = get_umc_base(1) + UMCCH_ADDR_MASK;
933 } else {
934 base_reg0 = DCSB0;
935 base_reg1 = DCSB1;
936 mask_reg0 = DCSM0;
937 mask_reg1 = DCSM1;
938 }
939
940 for_each_chip_select(cs, 0, pvt) {
941 int reg0 = base_reg0 + (cs * 4);
942 int reg1 = base_reg1 + (cs * 4);
943 u32 *base0 = &pvt->csels[0].csbases[cs];
944 u32 *base1 = &pvt->csels[1].csbases[cs];
945
946 if (pvt->umc) {
947 if (!amd_smn_read(pvt->mc_node_id, reg0, base0))
948 edac_dbg(0, " DCSB0[%d]=0x%08x reg: 0x%x\n",
949 cs, *base0, reg0);
950
951 if (!amd_smn_read(pvt->mc_node_id, reg1, base1))
952 edac_dbg(0, " DCSB1[%d]=0x%08x reg: 0x%x\n",
953 cs, *base1, reg1);
954 } else {
955 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
956 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
957 cs, *base0, reg0);
958
959 if (pvt->fam == 0xf)
960 continue;
961
962 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
963 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
964 cs, *base1, (pvt->fam == 0x10) ? reg1
965 : reg0);
966 }
967 }
968
969 for_each_chip_select_mask(cs, 0, pvt) {
970 int reg0 = mask_reg0 + (cs * 4);
971 int reg1 = mask_reg1 + (cs * 4);
972 u32 *mask0 = &pvt->csels[0].csmasks[cs];
973 u32 *mask1 = &pvt->csels[1].csmasks[cs];
974
975 if (pvt->umc) {
976 if (!amd_smn_read(pvt->mc_node_id, reg0, mask0))
977 edac_dbg(0, " DCSM0[%d]=0x%08x reg: 0x%x\n",
978 cs, *mask0, reg0);
979
980 if (!amd_smn_read(pvt->mc_node_id, reg1, mask1))
981 edac_dbg(0, " DCSM1[%d]=0x%08x reg: 0x%x\n",
982 cs, *mask1, reg1);
983 } else {
984 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
985 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
986 cs, *mask0, reg0);
987
988 if (pvt->fam == 0xf)
989 continue;
990
991 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
992 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
993 cs, *mask1, (pvt->fam == 0x10) ? reg1
994 : reg0);
995 }
996 }
997 }
998
determine_memory_type(struct amd64_pvt * pvt)999 static void determine_memory_type(struct amd64_pvt *pvt)
1000 {
1001 u32 dram_ctrl, dcsm;
1002
1003 switch (pvt->fam) {
1004 case 0xf:
1005 if (pvt->ext_model >= K8_REV_F)
1006 goto ddr3;
1007
1008 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1009 return;
1010
1011 case 0x10:
1012 if (pvt->dchr0 & DDR3_MODE)
1013 goto ddr3;
1014
1015 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1016 return;
1017
1018 case 0x15:
1019 if (pvt->model < 0x60)
1020 goto ddr3;
1021
1022 /*
1023 * Model 0x60h needs special handling:
1024 *
1025 * We use a Chip Select value of '0' to obtain dcsm.
1026 * Theoretically, it is possible to populate LRDIMMs of different
1027 * 'Rank' value on a DCT. But this is not the common case. So,
1028 * it's reasonable to assume all DIMMs are going to be of same
1029 * 'type' until proven otherwise.
1030 */
1031 amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1032 dcsm = pvt->csels[0].csmasks[0];
1033
1034 if (((dram_ctrl >> 8) & 0x7) == 0x2)
1035 pvt->dram_type = MEM_DDR4;
1036 else if (pvt->dclr0 & BIT(16))
1037 pvt->dram_type = MEM_DDR3;
1038 else if (dcsm & 0x3)
1039 pvt->dram_type = MEM_LRDDR3;
1040 else
1041 pvt->dram_type = MEM_RDDR3;
1042
1043 return;
1044
1045 case 0x16:
1046 goto ddr3;
1047
1048 case 0x17:
1049 if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
1050 pvt->dram_type = MEM_LRDDR4;
1051 else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
1052 pvt->dram_type = MEM_RDDR4;
1053 else
1054 pvt->dram_type = MEM_DDR4;
1055 return;
1056
1057 default:
1058 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1059 pvt->dram_type = MEM_EMPTY;
1060 }
1061 return;
1062
1063 ddr3:
1064 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1065 }
1066
1067 /* Get the number of DCT channels the memory controller is using. */
k8_early_channel_count(struct amd64_pvt * pvt)1068 static int k8_early_channel_count(struct amd64_pvt *pvt)
1069 {
1070 int flag;
1071
1072 if (pvt->ext_model >= K8_REV_F)
1073 /* RevF (NPT) and later */
1074 flag = pvt->dclr0 & WIDTH_128;
1075 else
1076 /* RevE and earlier */
1077 flag = pvt->dclr0 & REVE_WIDTH_128;
1078
1079 /* not used */
1080 pvt->dclr1 = 0;
1081
1082 return (flag) ? 2 : 1;
1083 }
1084
1085 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
get_error_address(struct amd64_pvt * pvt,struct mce * m)1086 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1087 {
1088 u16 mce_nid = amd_get_nb_id(m->extcpu);
1089 struct mem_ctl_info *mci;
1090 u8 start_bit = 1;
1091 u8 end_bit = 47;
1092 u64 addr;
1093
1094 mci = edac_mc_find(mce_nid);
1095 if (!mci)
1096 return 0;
1097
1098 pvt = mci->pvt_info;
1099
1100 if (pvt->fam == 0xf) {
1101 start_bit = 3;
1102 end_bit = 39;
1103 }
1104
1105 addr = m->addr & GENMASK_ULL(end_bit, start_bit);
1106
1107 /*
1108 * Erratum 637 workaround
1109 */
1110 if (pvt->fam == 0x15) {
1111 u64 cc6_base, tmp_addr;
1112 u32 tmp;
1113 u8 intlv_en;
1114
1115 if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1116 return addr;
1117
1118
1119 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1120 intlv_en = tmp >> 21 & 0x7;
1121
1122 /* add [47:27] + 3 trailing bits */
1123 cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
1124
1125 /* reverse and add DramIntlvEn */
1126 cc6_base |= intlv_en ^ 0x7;
1127
1128 /* pin at [47:24] */
1129 cc6_base <<= 24;
1130
1131 if (!intlv_en)
1132 return cc6_base | (addr & GENMASK_ULL(23, 0));
1133
1134 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1135
1136 /* faster log2 */
1137 tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
1138
1139 /* OR DramIntlvSel into bits [14:12] */
1140 tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
1141
1142 /* add remaining [11:0] bits from original MC4_ADDR */
1143 tmp_addr |= addr & GENMASK_ULL(11, 0);
1144
1145 return cc6_base | tmp_addr;
1146 }
1147
1148 return addr;
1149 }
1150
pci_get_related_function(unsigned int vendor,unsigned int device,struct pci_dev * related)1151 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1152 unsigned int device,
1153 struct pci_dev *related)
1154 {
1155 struct pci_dev *dev = NULL;
1156
1157 while ((dev = pci_get_device(vendor, device, dev))) {
1158 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
1159 (dev->bus->number == related->bus->number) &&
1160 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1161 break;
1162 }
1163
1164 return dev;
1165 }
1166
read_dram_base_limit_regs(struct amd64_pvt * pvt,unsigned range)1167 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1168 {
1169 struct amd_northbridge *nb;
1170 struct pci_dev *f1 = NULL;
1171 unsigned int pci_func;
1172 int off = range << 3;
1173 u32 llim;
1174
1175 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1176 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1177
1178 if (pvt->fam == 0xf)
1179 return;
1180
1181 if (!dram_rw(pvt, range))
1182 return;
1183
1184 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1185 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1186
1187 /* F15h: factor in CC6 save area by reading dst node's limit reg */
1188 if (pvt->fam != 0x15)
1189 return;
1190
1191 nb = node_to_amd_nb(dram_dst_node(pvt, range));
1192 if (WARN_ON(!nb))
1193 return;
1194
1195 if (pvt->model == 0x60)
1196 pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1197 else if (pvt->model == 0x30)
1198 pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1199 else
1200 pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1201
1202 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1203 if (WARN_ON(!f1))
1204 return;
1205
1206 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1207
1208 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1209
1210 /* {[39:27],111b} */
1211 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1212
1213 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1214
1215 /* [47:40] */
1216 pvt->ranges[range].lim.hi |= llim >> 13;
1217
1218 pci_dev_put(f1);
1219 }
1220
k8_map_sysaddr_to_csrow(struct mem_ctl_info * mci,u64 sys_addr,struct err_info * err)1221 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1222 struct err_info *err)
1223 {
1224 struct amd64_pvt *pvt = mci->pvt_info;
1225
1226 error_address_to_page_and_offset(sys_addr, err);
1227
1228 /*
1229 * Find out which node the error address belongs to. This may be
1230 * different from the node that detected the error.
1231 */
1232 err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1233 if (!err->src_mci) {
1234 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1235 (unsigned long)sys_addr);
1236 err->err_code = ERR_NODE;
1237 return;
1238 }
1239
1240 /* Now map the sys_addr to a CSROW */
1241 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1242 if (err->csrow < 0) {
1243 err->err_code = ERR_CSROW;
1244 return;
1245 }
1246
1247 /* CHIPKILL enabled */
1248 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1249 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1250 if (err->channel < 0) {
1251 /*
1252 * Syndrome didn't map, so we don't know which of the
1253 * 2 DIMMs is in error. So we need to ID 'both' of them
1254 * as suspect.
1255 */
1256 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1257 "possible error reporting race\n",
1258 err->syndrome);
1259 err->err_code = ERR_CHANNEL;
1260 return;
1261 }
1262 } else {
1263 /*
1264 * non-chipkill ecc mode
1265 *
1266 * The k8 documentation is unclear about how to determine the
1267 * channel number when using non-chipkill memory. This method
1268 * was obtained from email communication with someone at AMD.
1269 * (Wish the email was placed in this comment - norsk)
1270 */
1271 err->channel = ((sys_addr & BIT(3)) != 0);
1272 }
1273 }
1274
ddr2_cs_size(unsigned i,bool dct_width)1275 static int ddr2_cs_size(unsigned i, bool dct_width)
1276 {
1277 unsigned shift = 0;
1278
1279 if (i <= 2)
1280 shift = i;
1281 else if (!(i & 0x1))
1282 shift = i >> 1;
1283 else
1284 shift = (i + 1) >> 1;
1285
1286 return 128 << (shift + !!dct_width);
1287 }
1288
k8_dbam_to_chip_select(struct amd64_pvt * pvt,u8 dct,unsigned cs_mode,int cs_mask_nr)1289 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1290 unsigned cs_mode, int cs_mask_nr)
1291 {
1292 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1293
1294 if (pvt->ext_model >= K8_REV_F) {
1295 WARN_ON(cs_mode > 11);
1296 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1297 }
1298 else if (pvt->ext_model >= K8_REV_D) {
1299 unsigned diff;
1300 WARN_ON(cs_mode > 10);
1301
1302 /*
1303 * the below calculation, besides trying to win an obfuscated C
1304 * contest, maps cs_mode values to DIMM chip select sizes. The
1305 * mappings are:
1306 *
1307 * cs_mode CS size (mb)
1308 * ======= ============
1309 * 0 32
1310 * 1 64
1311 * 2 128
1312 * 3 128
1313 * 4 256
1314 * 5 512
1315 * 6 256
1316 * 7 512
1317 * 8 1024
1318 * 9 1024
1319 * 10 2048
1320 *
1321 * Basically, it calculates a value with which to shift the
1322 * smallest CS size of 32MB.
1323 *
1324 * ddr[23]_cs_size have a similar purpose.
1325 */
1326 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1327
1328 return 32 << (cs_mode - diff);
1329 }
1330 else {
1331 WARN_ON(cs_mode > 6);
1332 return 32 << cs_mode;
1333 }
1334 }
1335
1336 /*
1337 * Get the number of DCT channels in use.
1338 *
1339 * Return:
1340 * number of Memory Channels in operation
1341 * Pass back:
1342 * contents of the DCL0_LOW register
1343 */
f1x_early_channel_count(struct amd64_pvt * pvt)1344 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1345 {
1346 int i, j, channels = 0;
1347
1348 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1349 if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1350 return 2;
1351
1352 /*
1353 * Need to check if in unganged mode: In such, there are 2 channels,
1354 * but they are not in 128 bit mode and thus the above 'dclr0' status
1355 * bit will be OFF.
1356 *
1357 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1358 * their CSEnable bit on. If so, then SINGLE DIMM case.
1359 */
1360 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1361
1362 /*
1363 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1364 * is more than just one DIMM present in unganged mode. Need to check
1365 * both controllers since DIMMs can be placed in either one.
1366 */
1367 for (i = 0; i < 2; i++) {
1368 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1369
1370 for (j = 0; j < 4; j++) {
1371 if (DBAM_DIMM(j, dbam) > 0) {
1372 channels++;
1373 break;
1374 }
1375 }
1376 }
1377
1378 if (channels > 2)
1379 channels = 2;
1380
1381 amd64_info("MCT channel count: %d\n", channels);
1382
1383 return channels;
1384 }
1385
f17_early_channel_count(struct amd64_pvt * pvt)1386 static int f17_early_channel_count(struct amd64_pvt *pvt)
1387 {
1388 int i, channels = 0;
1389
1390 /* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
1391 for (i = 0; i < NUM_UMCS; i++)
1392 channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
1393
1394 amd64_info("MCT channel count: %d\n", channels);
1395
1396 return channels;
1397 }
1398
ddr3_cs_size(unsigned i,bool dct_width)1399 static int ddr3_cs_size(unsigned i, bool dct_width)
1400 {
1401 unsigned shift = 0;
1402 int cs_size = 0;
1403
1404 if (i == 0 || i == 3 || i == 4)
1405 cs_size = -1;
1406 else if (i <= 2)
1407 shift = i;
1408 else if (i == 12)
1409 shift = 7;
1410 else if (!(i & 0x1))
1411 shift = i >> 1;
1412 else
1413 shift = (i + 1) >> 1;
1414
1415 if (cs_size != -1)
1416 cs_size = (128 * (1 << !!dct_width)) << shift;
1417
1418 return cs_size;
1419 }
1420
ddr3_lrdimm_cs_size(unsigned i,unsigned rank_multiply)1421 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1422 {
1423 unsigned shift = 0;
1424 int cs_size = 0;
1425
1426 if (i < 4 || i == 6)
1427 cs_size = -1;
1428 else if (i == 12)
1429 shift = 7;
1430 else if (!(i & 0x1))
1431 shift = i >> 1;
1432 else
1433 shift = (i + 1) >> 1;
1434
1435 if (cs_size != -1)
1436 cs_size = rank_multiply * (128 << shift);
1437
1438 return cs_size;
1439 }
1440
ddr4_cs_size(unsigned i)1441 static int ddr4_cs_size(unsigned i)
1442 {
1443 int cs_size = 0;
1444
1445 if (i == 0)
1446 cs_size = -1;
1447 else if (i == 1)
1448 cs_size = 1024;
1449 else
1450 /* Min cs_size = 1G */
1451 cs_size = 1024 * (1 << (i >> 1));
1452
1453 return cs_size;
1454 }
1455
f10_dbam_to_chip_select(struct amd64_pvt * pvt,u8 dct,unsigned cs_mode,int cs_mask_nr)1456 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1457 unsigned cs_mode, int cs_mask_nr)
1458 {
1459 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1460
1461 WARN_ON(cs_mode > 11);
1462
1463 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1464 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1465 else
1466 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1467 }
1468
1469 /*
1470 * F15h supports only 64bit DCT interfaces
1471 */
f15_dbam_to_chip_select(struct amd64_pvt * pvt,u8 dct,unsigned cs_mode,int cs_mask_nr)1472 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1473 unsigned cs_mode, int cs_mask_nr)
1474 {
1475 WARN_ON(cs_mode > 12);
1476
1477 return ddr3_cs_size(cs_mode, false);
1478 }
1479
1480 /* F15h M60h supports DDR4 mapping as well.. */
f15_m60h_dbam_to_chip_select(struct amd64_pvt * pvt,u8 dct,unsigned cs_mode,int cs_mask_nr)1481 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1482 unsigned cs_mode, int cs_mask_nr)
1483 {
1484 int cs_size;
1485 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1486
1487 WARN_ON(cs_mode > 12);
1488
1489 if (pvt->dram_type == MEM_DDR4) {
1490 if (cs_mode > 9)
1491 return -1;
1492
1493 cs_size = ddr4_cs_size(cs_mode);
1494 } else if (pvt->dram_type == MEM_LRDDR3) {
1495 unsigned rank_multiply = dcsm & 0xf;
1496
1497 if (rank_multiply == 3)
1498 rank_multiply = 4;
1499 cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1500 } else {
1501 /* Minimum cs size is 512mb for F15hM60h*/
1502 if (cs_mode == 0x1)
1503 return -1;
1504
1505 cs_size = ddr3_cs_size(cs_mode, false);
1506 }
1507
1508 return cs_size;
1509 }
1510
1511 /*
1512 * F16h and F15h model 30h have only limited cs_modes.
1513 */
f16_dbam_to_chip_select(struct amd64_pvt * pvt,u8 dct,unsigned cs_mode,int cs_mask_nr)1514 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1515 unsigned cs_mode, int cs_mask_nr)
1516 {
1517 WARN_ON(cs_mode > 12);
1518
1519 if (cs_mode == 6 || cs_mode == 8 ||
1520 cs_mode == 9 || cs_mode == 12)
1521 return -1;
1522 else
1523 return ddr3_cs_size(cs_mode, false);
1524 }
1525
f17_base_addr_to_cs_size(struct amd64_pvt * pvt,u8 umc,unsigned int cs_mode,int csrow_nr)1526 static int f17_base_addr_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1527 unsigned int cs_mode, int csrow_nr)
1528 {
1529 u32 base_addr = pvt->csels[umc].csbases[csrow_nr];
1530
1531 /* Each mask is used for every two base addresses. */
1532 u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr >> 1];
1533
1534 /* Register [31:1] = Address [39:9]. Size is in kBs here. */
1535 u32 size = ((addr_mask >> 1) - (base_addr >> 1) + 1) >> 1;
1536
1537 edac_dbg(1, "BaseAddr: 0x%x, AddrMask: 0x%x\n", base_addr, addr_mask);
1538
1539 /* Return size in MBs. */
1540 return size >> 10;
1541 }
1542
read_dram_ctl_register(struct amd64_pvt * pvt)1543 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1544 {
1545
1546 if (pvt->fam == 0xf)
1547 return;
1548
1549 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1550 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1551 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1552
1553 edac_dbg(0, " DCTs operate in %s mode\n",
1554 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1555
1556 if (!dct_ganging_enabled(pvt))
1557 edac_dbg(0, " Address range split per DCT: %s\n",
1558 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1559
1560 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1561 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1562 (dct_memory_cleared(pvt) ? "yes" : "no"));
1563
1564 edac_dbg(0, " channel interleave: %s, "
1565 "interleave bits selector: 0x%x\n",
1566 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1567 dct_sel_interleave_addr(pvt));
1568 }
1569
1570 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
1571 }
1572
1573 /*
1574 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1575 * 2.10.12 Memory Interleaving Modes).
1576 */
f15_m30h_determine_channel(struct amd64_pvt * pvt,u64 sys_addr,u8 intlv_en,int num_dcts_intlv,u32 dct_sel)1577 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1578 u8 intlv_en, int num_dcts_intlv,
1579 u32 dct_sel)
1580 {
1581 u8 channel = 0;
1582 u8 select;
1583
1584 if (!(intlv_en))
1585 return (u8)(dct_sel);
1586
1587 if (num_dcts_intlv == 2) {
1588 select = (sys_addr >> 8) & 0x3;
1589 channel = select ? 0x3 : 0;
1590 } else if (num_dcts_intlv == 4) {
1591 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1592 switch (intlv_addr) {
1593 case 0x4:
1594 channel = (sys_addr >> 8) & 0x3;
1595 break;
1596 case 0x5:
1597 channel = (sys_addr >> 9) & 0x3;
1598 break;
1599 }
1600 }
1601 return channel;
1602 }
1603
1604 /*
1605 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1606 * Interleaving Modes.
1607 */
f1x_determine_channel(struct amd64_pvt * pvt,u64 sys_addr,bool hi_range_sel,u8 intlv_en)1608 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1609 bool hi_range_sel, u8 intlv_en)
1610 {
1611 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1612
1613 if (dct_ganging_enabled(pvt))
1614 return 0;
1615
1616 if (hi_range_sel)
1617 return dct_sel_high;
1618
1619 /*
1620 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1621 */
1622 if (dct_interleave_enabled(pvt)) {
1623 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1624
1625 /* return DCT select function: 0=DCT0, 1=DCT1 */
1626 if (!intlv_addr)
1627 return sys_addr >> 6 & 1;
1628
1629 if (intlv_addr & 0x2) {
1630 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1631 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
1632
1633 return ((sys_addr >> shift) & 1) ^ temp;
1634 }
1635
1636 if (intlv_addr & 0x4) {
1637 u8 shift = intlv_addr & 0x1 ? 9 : 8;
1638
1639 return (sys_addr >> shift) & 1;
1640 }
1641
1642 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1643 }
1644
1645 if (dct_high_range_enabled(pvt))
1646 return ~dct_sel_high & 1;
1647
1648 return 0;
1649 }
1650
1651 /* Convert the sys_addr to the normalized DCT address */
f1x_get_norm_dct_addr(struct amd64_pvt * pvt,u8 range,u64 sys_addr,bool hi_rng,u32 dct_sel_base_addr)1652 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1653 u64 sys_addr, bool hi_rng,
1654 u32 dct_sel_base_addr)
1655 {
1656 u64 chan_off;
1657 u64 dram_base = get_dram_base(pvt, range);
1658 u64 hole_off = f10_dhar_offset(pvt);
1659 u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1660
1661 if (hi_rng) {
1662 /*
1663 * if
1664 * base address of high range is below 4Gb
1665 * (bits [47:27] at [31:11])
1666 * DRAM address space on this DCT is hoisted above 4Gb &&
1667 * sys_addr > 4Gb
1668 *
1669 * remove hole offset from sys_addr
1670 * else
1671 * remove high range offset from sys_addr
1672 */
1673 if ((!(dct_sel_base_addr >> 16) ||
1674 dct_sel_base_addr < dhar_base(pvt)) &&
1675 dhar_valid(pvt) &&
1676 (sys_addr >= BIT_64(32)))
1677 chan_off = hole_off;
1678 else
1679 chan_off = dct_sel_base_off;
1680 } else {
1681 /*
1682 * if
1683 * we have a valid hole &&
1684 * sys_addr > 4Gb
1685 *
1686 * remove hole
1687 * else
1688 * remove dram base to normalize to DCT address
1689 */
1690 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1691 chan_off = hole_off;
1692 else
1693 chan_off = dram_base;
1694 }
1695
1696 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
1697 }
1698
1699 /*
1700 * checks if the csrow passed in is marked as SPARED, if so returns the new
1701 * spare row
1702 */
f10_process_possible_spare(struct amd64_pvt * pvt,u8 dct,int csrow)1703 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1704 {
1705 int tmp_cs;
1706
1707 if (online_spare_swap_done(pvt, dct) &&
1708 csrow == online_spare_bad_dramcs(pvt, dct)) {
1709
1710 for_each_chip_select(tmp_cs, dct, pvt) {
1711 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1712 csrow = tmp_cs;
1713 break;
1714 }
1715 }
1716 }
1717 return csrow;
1718 }
1719
1720 /*
1721 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1722 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1723 *
1724 * Return:
1725 * -EINVAL: NOT FOUND
1726 * 0..csrow = Chip-Select Row
1727 */
f1x_lookup_addr_in_dct(u64 in_addr,u8 nid,u8 dct)1728 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1729 {
1730 struct mem_ctl_info *mci;
1731 struct amd64_pvt *pvt;
1732 u64 cs_base, cs_mask;
1733 int cs_found = -EINVAL;
1734 int csrow;
1735
1736 mci = edac_mc_find(nid);
1737 if (!mci)
1738 return cs_found;
1739
1740 pvt = mci->pvt_info;
1741
1742 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1743
1744 for_each_chip_select(csrow, dct, pvt) {
1745 if (!csrow_enabled(csrow, dct, pvt))
1746 continue;
1747
1748 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1749
1750 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1751 csrow, cs_base, cs_mask);
1752
1753 cs_mask = ~cs_mask;
1754
1755 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1756 (in_addr & cs_mask), (cs_base & cs_mask));
1757
1758 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1759 if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1760 cs_found = csrow;
1761 break;
1762 }
1763 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1764
1765 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1766 break;
1767 }
1768 }
1769 return cs_found;
1770 }
1771
1772 /*
1773 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1774 * swapped with a region located at the bottom of memory so that the GPU can use
1775 * the interleaved region and thus two channels.
1776 */
f1x_swap_interleaved_region(struct amd64_pvt * pvt,u64 sys_addr)1777 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1778 {
1779 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1780
1781 if (pvt->fam == 0x10) {
1782 /* only revC3 and revE have that feature */
1783 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1784 return sys_addr;
1785 }
1786
1787 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
1788
1789 if (!(swap_reg & 0x1))
1790 return sys_addr;
1791
1792 swap_base = (swap_reg >> 3) & 0x7f;
1793 swap_limit = (swap_reg >> 11) & 0x7f;
1794 rgn_size = (swap_reg >> 20) & 0x7f;
1795 tmp_addr = sys_addr >> 27;
1796
1797 if (!(sys_addr >> 34) &&
1798 (((tmp_addr >= swap_base) &&
1799 (tmp_addr <= swap_limit)) ||
1800 (tmp_addr < rgn_size)))
1801 return sys_addr ^ (u64)swap_base << 27;
1802
1803 return sys_addr;
1804 }
1805
1806 /* For a given @dram_range, check if @sys_addr falls within it. */
f1x_match_to_this_node(struct amd64_pvt * pvt,unsigned range,u64 sys_addr,int * chan_sel)1807 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1808 u64 sys_addr, int *chan_sel)
1809 {
1810 int cs_found = -EINVAL;
1811 u64 chan_addr;
1812 u32 dct_sel_base;
1813 u8 channel;
1814 bool high_range = false;
1815
1816 u8 node_id = dram_dst_node(pvt, range);
1817 u8 intlv_en = dram_intlv_en(pvt, range);
1818 u32 intlv_sel = dram_intlv_sel(pvt, range);
1819
1820 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1821 range, sys_addr, get_dram_limit(pvt, range));
1822
1823 if (dhar_valid(pvt) &&
1824 dhar_base(pvt) <= sys_addr &&
1825 sys_addr < BIT_64(32)) {
1826 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1827 sys_addr);
1828 return -EINVAL;
1829 }
1830
1831 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1832 return -EINVAL;
1833
1834 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1835
1836 dct_sel_base = dct_sel_baseaddr(pvt);
1837
1838 /*
1839 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1840 * select between DCT0 and DCT1.
1841 */
1842 if (dct_high_range_enabled(pvt) &&
1843 !dct_ganging_enabled(pvt) &&
1844 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1845 high_range = true;
1846
1847 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1848
1849 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1850 high_range, dct_sel_base);
1851
1852 /* Remove node interleaving, see F1x120 */
1853 if (intlv_en)
1854 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1855 (chan_addr & 0xfff);
1856
1857 /* remove channel interleave */
1858 if (dct_interleave_enabled(pvt) &&
1859 !dct_high_range_enabled(pvt) &&
1860 !dct_ganging_enabled(pvt)) {
1861
1862 if (dct_sel_interleave_addr(pvt) != 1) {
1863 if (dct_sel_interleave_addr(pvt) == 0x3)
1864 /* hash 9 */
1865 chan_addr = ((chan_addr >> 10) << 9) |
1866 (chan_addr & 0x1ff);
1867 else
1868 /* A[6] or hash 6 */
1869 chan_addr = ((chan_addr >> 7) << 6) |
1870 (chan_addr & 0x3f);
1871 } else
1872 /* A[12] */
1873 chan_addr = ((chan_addr >> 13) << 12) |
1874 (chan_addr & 0xfff);
1875 }
1876
1877 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1878
1879 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1880
1881 if (cs_found >= 0)
1882 *chan_sel = channel;
1883
1884 return cs_found;
1885 }
1886
f15_m30h_match_to_this_node(struct amd64_pvt * pvt,unsigned range,u64 sys_addr,int * chan_sel)1887 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1888 u64 sys_addr, int *chan_sel)
1889 {
1890 int cs_found = -EINVAL;
1891 int num_dcts_intlv = 0;
1892 u64 chan_addr, chan_offset;
1893 u64 dct_base, dct_limit;
1894 u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1895 u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1896
1897 u64 dhar_offset = f10_dhar_offset(pvt);
1898 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1899 u8 node_id = dram_dst_node(pvt, range);
1900 u8 intlv_en = dram_intlv_en(pvt, range);
1901
1902 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
1903 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
1904
1905 dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
1906 dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
1907
1908 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1909 range, sys_addr, get_dram_limit(pvt, range));
1910
1911 if (!(get_dram_base(pvt, range) <= sys_addr) &&
1912 !(get_dram_limit(pvt, range) >= sys_addr))
1913 return -EINVAL;
1914
1915 if (dhar_valid(pvt) &&
1916 dhar_base(pvt) <= sys_addr &&
1917 sys_addr < BIT_64(32)) {
1918 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1919 sys_addr);
1920 return -EINVAL;
1921 }
1922
1923 /* Verify sys_addr is within DCT Range. */
1924 dct_base = (u64) dct_sel_baseaddr(pvt);
1925 dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
1926
1927 if (!(dct_cont_base_reg & BIT(0)) &&
1928 !(dct_base <= (sys_addr >> 27) &&
1929 dct_limit >= (sys_addr >> 27)))
1930 return -EINVAL;
1931
1932 /* Verify number of dct's that participate in channel interleaving. */
1933 num_dcts_intlv = (int) hweight8(intlv_en);
1934
1935 if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
1936 return -EINVAL;
1937
1938 if (pvt->model >= 0x60)
1939 channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
1940 else
1941 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
1942 num_dcts_intlv, dct_sel);
1943
1944 /* Verify we stay within the MAX number of channels allowed */
1945 if (channel > 3)
1946 return -EINVAL;
1947
1948 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
1949
1950 /* Get normalized DCT addr */
1951 if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
1952 chan_offset = dhar_offset;
1953 else
1954 chan_offset = dct_base << 27;
1955
1956 chan_addr = sys_addr - chan_offset;
1957
1958 /* remove channel interleave */
1959 if (num_dcts_intlv == 2) {
1960 if (intlv_addr == 0x4)
1961 chan_addr = ((chan_addr >> 9) << 8) |
1962 (chan_addr & 0xff);
1963 else if (intlv_addr == 0x5)
1964 chan_addr = ((chan_addr >> 10) << 9) |
1965 (chan_addr & 0x1ff);
1966 else
1967 return -EINVAL;
1968
1969 } else if (num_dcts_intlv == 4) {
1970 if (intlv_addr == 0x4)
1971 chan_addr = ((chan_addr >> 10) << 8) |
1972 (chan_addr & 0xff);
1973 else if (intlv_addr == 0x5)
1974 chan_addr = ((chan_addr >> 11) << 9) |
1975 (chan_addr & 0x1ff);
1976 else
1977 return -EINVAL;
1978 }
1979
1980 if (dct_offset_en) {
1981 amd64_read_pci_cfg(pvt->F1,
1982 DRAM_CONT_HIGH_OFF + (int) channel * 4,
1983 &tmp);
1984 chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
1985 }
1986
1987 f15h_select_dct(pvt, channel);
1988
1989 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1990
1991 /*
1992 * Find Chip select:
1993 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
1994 * there is support for 4 DCT's, but only 2 are currently functional.
1995 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
1996 * pvt->csels[1]. So we need to use '1' here to get correct info.
1997 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
1998 */
1999 alias_channel = (channel == 3) ? 1 : channel;
2000
2001 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
2002
2003 if (cs_found >= 0)
2004 *chan_sel = alias_channel;
2005
2006 return cs_found;
2007 }
2008
f1x_translate_sysaddr_to_cs(struct amd64_pvt * pvt,u64 sys_addr,int * chan_sel)2009 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2010 u64 sys_addr,
2011 int *chan_sel)
2012 {
2013 int cs_found = -EINVAL;
2014 unsigned range;
2015
2016 for (range = 0; range < DRAM_RANGES; range++) {
2017 if (!dram_rw(pvt, range))
2018 continue;
2019
2020 if (pvt->fam == 0x15 && pvt->model >= 0x30)
2021 cs_found = f15_m30h_match_to_this_node(pvt, range,
2022 sys_addr,
2023 chan_sel);
2024
2025 else if ((get_dram_base(pvt, range) <= sys_addr) &&
2026 (get_dram_limit(pvt, range) >= sys_addr)) {
2027 cs_found = f1x_match_to_this_node(pvt, range,
2028 sys_addr, chan_sel);
2029 if (cs_found >= 0)
2030 break;
2031 }
2032 }
2033 return cs_found;
2034 }
2035
2036 /*
2037 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
2038 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
2039 *
2040 * The @sys_addr is usually an error address received from the hardware
2041 * (MCX_ADDR).
2042 */
f1x_map_sysaddr_to_csrow(struct mem_ctl_info * mci,u64 sys_addr,struct err_info * err)2043 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2044 struct err_info *err)
2045 {
2046 struct amd64_pvt *pvt = mci->pvt_info;
2047
2048 error_address_to_page_and_offset(sys_addr, err);
2049
2050 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2051 if (err->csrow < 0) {
2052 err->err_code = ERR_CSROW;
2053 return;
2054 }
2055
2056 /*
2057 * We need the syndromes for channel detection only when we're
2058 * ganged. Otherwise @chan should already contain the channel at
2059 * this point.
2060 */
2061 if (dct_ganging_enabled(pvt))
2062 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2063 }
2064
2065 /*
2066 * debug routine to display the memory sizes of all logical DIMMs and its
2067 * CSROWs
2068 */
debug_display_dimm_sizes(struct amd64_pvt * pvt,u8 ctrl)2069 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
2070 {
2071 int dimm, size0, size1;
2072 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
2073 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
2074
2075 if (pvt->fam == 0xf) {
2076 /* K8 families < revF not supported yet */
2077 if (pvt->ext_model < K8_REV_F)
2078 return;
2079 else
2080 WARN_ON(ctrl != 0);
2081 }
2082
2083 if (pvt->fam == 0x10) {
2084 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
2085 : pvt->dbam0;
2086 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
2087 pvt->csels[1].csbases :
2088 pvt->csels[0].csbases;
2089 } else if (ctrl) {
2090 dbam = pvt->dbam0;
2091 dcsb = pvt->csels[1].csbases;
2092 }
2093 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
2094 ctrl, dbam);
2095
2096 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
2097
2098 /* Dump memory sizes for DIMM and its CSROWs */
2099 for (dimm = 0; dimm < 4; dimm++) {
2100
2101 size0 = 0;
2102 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
2103 /*
2104 * For F15m60h, we need multiplier for LRDIMM cs_size
2105 * calculation. We pass dimm value to the dbam_to_cs
2106 * mapper so we can find the multiplier from the
2107 * corresponding DCSM.
2108 */
2109 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
2110 DBAM_DIMM(dimm, dbam),
2111 dimm);
2112
2113 size1 = 0;
2114 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
2115 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
2116 DBAM_DIMM(dimm, dbam),
2117 dimm);
2118
2119 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
2120 dimm * 2, size0,
2121 dimm * 2 + 1, size1);
2122 }
2123 }
2124
2125 static struct amd64_family_type family_types[] = {
2126 [K8_CPUS] = {
2127 .ctl_name = "K8",
2128 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
2129 .f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2130 .ops = {
2131 .early_channel_count = k8_early_channel_count,
2132 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
2133 .dbam_to_cs = k8_dbam_to_chip_select,
2134 }
2135 },
2136 [F10_CPUS] = {
2137 .ctl_name = "F10h",
2138 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
2139 .f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2140 .ops = {
2141 .early_channel_count = f1x_early_channel_count,
2142 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2143 .dbam_to_cs = f10_dbam_to_chip_select,
2144 }
2145 },
2146 [F15_CPUS] = {
2147 .ctl_name = "F15h",
2148 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
2149 .f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
2150 .ops = {
2151 .early_channel_count = f1x_early_channel_count,
2152 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2153 .dbam_to_cs = f15_dbam_to_chip_select,
2154 }
2155 },
2156 [F15_M30H_CPUS] = {
2157 .ctl_name = "F15h_M30h",
2158 .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
2159 .f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2160 .ops = {
2161 .early_channel_count = f1x_early_channel_count,
2162 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2163 .dbam_to_cs = f16_dbam_to_chip_select,
2164 }
2165 },
2166 [F15_M60H_CPUS] = {
2167 .ctl_name = "F15h_M60h",
2168 .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
2169 .f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
2170 .ops = {
2171 .early_channel_count = f1x_early_channel_count,
2172 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2173 .dbam_to_cs = f15_m60h_dbam_to_chip_select,
2174 }
2175 },
2176 [F16_CPUS] = {
2177 .ctl_name = "F16h",
2178 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
2179 .f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
2180 .ops = {
2181 .early_channel_count = f1x_early_channel_count,
2182 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2183 .dbam_to_cs = f16_dbam_to_chip_select,
2184 }
2185 },
2186 [F16_M30H_CPUS] = {
2187 .ctl_name = "F16h_M30h",
2188 .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
2189 .f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2190 .ops = {
2191 .early_channel_count = f1x_early_channel_count,
2192 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2193 .dbam_to_cs = f16_dbam_to_chip_select,
2194 }
2195 },
2196 [F17_CPUS] = {
2197 .ctl_name = "F17h",
2198 .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
2199 .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
2200 .ops = {
2201 .early_channel_count = f17_early_channel_count,
2202 .dbam_to_cs = f17_base_addr_to_cs_size,
2203 }
2204 },
2205 [F17_M10H_CPUS] = {
2206 .ctl_name = "F17h_M10h",
2207 .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
2208 .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
2209 .ops = {
2210 .early_channel_count = f17_early_channel_count,
2211 .dbam_to_cs = f17_base_addr_to_cs_size,
2212 }
2213 },
2214 [F17_M30H_CPUS] = {
2215 .ctl_name = "F17h_M30h",
2216 .f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
2217 .f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
2218 .ops = {
2219 .early_channel_count = f17_early_channel_count,
2220 .dbam_to_cs = f17_base_addr_to_cs_size,
2221 }
2222 },
2223 };
2224
2225 /*
2226 * These are tables of eigenvectors (one per line) which can be used for the
2227 * construction of the syndrome tables. The modified syndrome search algorithm
2228 * uses those to find the symbol in error and thus the DIMM.
2229 *
2230 * Algorithm courtesy of Ross LaFetra from AMD.
2231 */
2232 static const u16 x4_vectors[] = {
2233 0x2f57, 0x1afe, 0x66cc, 0xdd88,
2234 0x11eb, 0x3396, 0x7f4c, 0xeac8,
2235 0x0001, 0x0002, 0x0004, 0x0008,
2236 0x1013, 0x3032, 0x4044, 0x8088,
2237 0x106b, 0x30d6, 0x70fc, 0xe0a8,
2238 0x4857, 0xc4fe, 0x13cc, 0x3288,
2239 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2240 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2241 0x15c1, 0x2a42, 0x89ac, 0x4758,
2242 0x2b03, 0x1602, 0x4f0c, 0xca08,
2243 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2244 0x8ba7, 0x465e, 0x244c, 0x1cc8,
2245 0x2b87, 0x164e, 0x642c, 0xdc18,
2246 0x40b9, 0x80de, 0x1094, 0x20e8,
2247 0x27db, 0x1eb6, 0x9dac, 0x7b58,
2248 0x11c1, 0x2242, 0x84ac, 0x4c58,
2249 0x1be5, 0x2d7a, 0x5e34, 0xa718,
2250 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2251 0x4c97, 0xc87e, 0x11fc, 0x33a8,
2252 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2253 0x16b3, 0x3d62, 0x4f34, 0x8518,
2254 0x1e2f, 0x391a, 0x5cac, 0xf858,
2255 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2256 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2257 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2258 0x4397, 0xc27e, 0x17fc, 0x3ea8,
2259 0x1617, 0x3d3e, 0x6464, 0xb8b8,
2260 0x23ff, 0x12aa, 0xab6c, 0x56d8,
2261 0x2dfb, 0x1ba6, 0x913c, 0x7328,
2262 0x185d, 0x2ca6, 0x7914, 0x9e28,
2263 0x171b, 0x3e36, 0x7d7c, 0xebe8,
2264 0x4199, 0x82ee, 0x19f4, 0x2e58,
2265 0x4807, 0xc40e, 0x130c, 0x3208,
2266 0x1905, 0x2e0a, 0x5804, 0xac08,
2267 0x213f, 0x132a, 0xadfc, 0x5ba8,
2268 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2269 };
2270
2271 static const u16 x8_vectors[] = {
2272 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2273 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2274 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2275 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2276 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2277 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2278 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2279 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2280 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2281 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2282 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2283 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2284 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2285 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2286 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2287 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2288 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2289 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2290 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2291 };
2292
decode_syndrome(u16 syndrome,const u16 * vectors,unsigned num_vecs,unsigned v_dim)2293 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2294 unsigned v_dim)
2295 {
2296 unsigned int i, err_sym;
2297
2298 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2299 u16 s = syndrome;
2300 unsigned v_idx = err_sym * v_dim;
2301 unsigned v_end = (err_sym + 1) * v_dim;
2302
2303 /* walk over all 16 bits of the syndrome */
2304 for (i = 1; i < (1U << 16); i <<= 1) {
2305
2306 /* if bit is set in that eigenvector... */
2307 if (v_idx < v_end && vectors[v_idx] & i) {
2308 u16 ev_comp = vectors[v_idx++];
2309
2310 /* ... and bit set in the modified syndrome, */
2311 if (s & i) {
2312 /* remove it. */
2313 s ^= ev_comp;
2314
2315 if (!s)
2316 return err_sym;
2317 }
2318
2319 } else if (s & i)
2320 /* can't get to zero, move to next symbol */
2321 break;
2322 }
2323 }
2324
2325 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2326 return -1;
2327 }
2328
map_err_sym_to_channel(int err_sym,int sym_size)2329 static int map_err_sym_to_channel(int err_sym, int sym_size)
2330 {
2331 if (sym_size == 4)
2332 switch (err_sym) {
2333 case 0x20:
2334 case 0x21:
2335 return 0;
2336 break;
2337 case 0x22:
2338 case 0x23:
2339 return 1;
2340 break;
2341 default:
2342 return err_sym >> 4;
2343 break;
2344 }
2345 /* x8 symbols */
2346 else
2347 switch (err_sym) {
2348 /* imaginary bits not in a DIMM */
2349 case 0x10:
2350 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2351 err_sym);
2352 return -1;
2353 break;
2354
2355 case 0x11:
2356 return 0;
2357 break;
2358 case 0x12:
2359 return 1;
2360 break;
2361 default:
2362 return err_sym >> 3;
2363 break;
2364 }
2365 return -1;
2366 }
2367
get_channel_from_ecc_syndrome(struct mem_ctl_info * mci,u16 syndrome)2368 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2369 {
2370 struct amd64_pvt *pvt = mci->pvt_info;
2371 int err_sym = -1;
2372
2373 if (pvt->ecc_sym_sz == 8)
2374 err_sym = decode_syndrome(syndrome, x8_vectors,
2375 ARRAY_SIZE(x8_vectors),
2376 pvt->ecc_sym_sz);
2377 else if (pvt->ecc_sym_sz == 4)
2378 err_sym = decode_syndrome(syndrome, x4_vectors,
2379 ARRAY_SIZE(x4_vectors),
2380 pvt->ecc_sym_sz);
2381 else {
2382 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2383 return err_sym;
2384 }
2385
2386 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2387 }
2388
__log_ecc_error(struct mem_ctl_info * mci,struct err_info * err,u8 ecc_type)2389 static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
2390 u8 ecc_type)
2391 {
2392 enum hw_event_mc_err_type err_type;
2393 const char *string;
2394
2395 if (ecc_type == 2)
2396 err_type = HW_EVENT_ERR_CORRECTED;
2397 else if (ecc_type == 1)
2398 err_type = HW_EVENT_ERR_UNCORRECTED;
2399 else if (ecc_type == 3)
2400 err_type = HW_EVENT_ERR_DEFERRED;
2401 else {
2402 WARN(1, "Something is rotten in the state of Denmark.\n");
2403 return;
2404 }
2405
2406 switch (err->err_code) {
2407 case DECODE_OK:
2408 string = "";
2409 break;
2410 case ERR_NODE:
2411 string = "Failed to map error addr to a node";
2412 break;
2413 case ERR_CSROW:
2414 string = "Failed to map error addr to a csrow";
2415 break;
2416 case ERR_CHANNEL:
2417 string = "Unknown syndrome - possible error reporting race";
2418 break;
2419 case ERR_SYND:
2420 string = "MCA_SYND not valid - unknown syndrome and csrow";
2421 break;
2422 case ERR_NORM_ADDR:
2423 string = "Cannot decode normalized address";
2424 break;
2425 default:
2426 string = "WTF error";
2427 break;
2428 }
2429
2430 edac_mc_handle_error(err_type, mci, 1,
2431 err->page, err->offset, err->syndrome,
2432 err->csrow, err->channel, -1,
2433 string, "");
2434 }
2435
decode_bus_error(int node_id,struct mce * m)2436 static inline void decode_bus_error(int node_id, struct mce *m)
2437 {
2438 struct mem_ctl_info *mci;
2439 struct amd64_pvt *pvt;
2440 u8 ecc_type = (m->status >> 45) & 0x3;
2441 u8 xec = XEC(m->status, 0x1f);
2442 u16 ec = EC(m->status);
2443 u64 sys_addr;
2444 struct err_info err;
2445
2446 mci = edac_mc_find(node_id);
2447 if (!mci)
2448 return;
2449
2450 pvt = mci->pvt_info;
2451
2452 /* Bail out early if this was an 'observed' error */
2453 if (PP(ec) == NBSL_PP_OBS)
2454 return;
2455
2456 /* Do only ECC errors */
2457 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2458 return;
2459
2460 memset(&err, 0, sizeof(err));
2461
2462 sys_addr = get_error_address(pvt, m);
2463
2464 if (ecc_type == 2)
2465 err.syndrome = extract_syndrome(m->status);
2466
2467 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2468
2469 __log_ecc_error(mci, &err, ecc_type);
2470 }
2471
2472 /*
2473 * To find the UMC channel represented by this bank we need to match on its
2474 * instance_id. The instance_id of a bank is held in the lower 32 bits of its
2475 * IPID.
2476 */
find_umc_channel(struct amd64_pvt * pvt,struct mce * m)2477 static int find_umc_channel(struct amd64_pvt *pvt, struct mce *m)
2478 {
2479 u32 umc_instance_id[] = {0x50f00, 0x150f00};
2480 u32 instance_id = m->ipid & GENMASK(31, 0);
2481 int i, channel = -1;
2482
2483 for (i = 0; i < ARRAY_SIZE(umc_instance_id); i++)
2484 if (umc_instance_id[i] == instance_id)
2485 channel = i;
2486
2487 return channel;
2488 }
2489
decode_umc_error(int node_id,struct mce * m)2490 static void decode_umc_error(int node_id, struct mce *m)
2491 {
2492 u8 ecc_type = (m->status >> 45) & 0x3;
2493 struct mem_ctl_info *mci;
2494 struct amd64_pvt *pvt;
2495 struct err_info err;
2496 u64 sys_addr;
2497
2498 mci = edac_mc_find(node_id);
2499 if (!mci)
2500 return;
2501
2502 pvt = mci->pvt_info;
2503
2504 memset(&err, 0, sizeof(err));
2505
2506 if (m->status & MCI_STATUS_DEFERRED)
2507 ecc_type = 3;
2508
2509 err.channel = find_umc_channel(pvt, m);
2510 if (err.channel < 0) {
2511 err.err_code = ERR_CHANNEL;
2512 goto log_error;
2513 }
2514
2515 if (!(m->status & MCI_STATUS_SYNDV)) {
2516 err.err_code = ERR_SYND;
2517 goto log_error;
2518 }
2519
2520 if (ecc_type == 2) {
2521 u8 length = (m->synd >> 18) & 0x3f;
2522
2523 if (length)
2524 err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
2525 else
2526 err.err_code = ERR_CHANNEL;
2527 }
2528
2529 err.csrow = m->synd & 0x7;
2530
2531 if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
2532 err.err_code = ERR_NORM_ADDR;
2533 goto log_error;
2534 }
2535
2536 error_address_to_page_and_offset(sys_addr, &err);
2537
2538 log_error:
2539 __log_ecc_error(mci, &err, ecc_type);
2540 }
2541
2542 /*
2543 * Use pvt->F3 which contains the F3 CPU PCI device to get the related
2544 * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
2545 * Reserve F0 and F6 on systems with a UMC.
2546 */
2547 static int
reserve_mc_sibling_devs(struct amd64_pvt * pvt,u16 pci_id1,u16 pci_id2)2548 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2549 {
2550 if (pvt->umc) {
2551 pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2552 if (!pvt->F0) {
2553 amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1);
2554 return -ENODEV;
2555 }
2556
2557 pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2558 if (!pvt->F6) {
2559 pci_dev_put(pvt->F0);
2560 pvt->F0 = NULL;
2561
2562 amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2563 return -ENODEV;
2564 }
2565
2566 edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
2567 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2568 edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
2569
2570 return 0;
2571 }
2572
2573 /* Reserve the ADDRESS MAP Device */
2574 pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2575 if (!pvt->F1) {
2576 amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1);
2577 return -ENODEV;
2578 }
2579
2580 /* Reserve the DCT Device */
2581 pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2582 if (!pvt->F2) {
2583 pci_dev_put(pvt->F1);
2584 pvt->F1 = NULL;
2585
2586 amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2587 return -ENODEV;
2588 }
2589
2590 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2591 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2592 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2593
2594 return 0;
2595 }
2596
free_mc_sibling_devs(struct amd64_pvt * pvt)2597 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2598 {
2599 if (pvt->umc) {
2600 pci_dev_put(pvt->F0);
2601 pci_dev_put(pvt->F6);
2602 } else {
2603 pci_dev_put(pvt->F1);
2604 pci_dev_put(pvt->F2);
2605 }
2606 }
2607
determine_ecc_sym_sz(struct amd64_pvt * pvt)2608 static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
2609 {
2610 pvt->ecc_sym_sz = 4;
2611
2612 if (pvt->umc) {
2613 u8 i;
2614
2615 for (i = 0; i < NUM_UMCS; i++) {
2616 /* Check enabled channels only: */
2617 if ((pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) &&
2618 (pvt->umc[i].ecc_ctrl & BIT(7))) {
2619 pvt->ecc_sym_sz = 8;
2620 break;
2621 }
2622 }
2623
2624 return;
2625 }
2626
2627 if (pvt->fam >= 0x10) {
2628 u32 tmp;
2629
2630 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2631 /* F16h has only DCT0, so no need to read dbam1. */
2632 if (pvt->fam != 0x16)
2633 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2634
2635 /* F10h, revD and later can do x8 ECC too. */
2636 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2637 pvt->ecc_sym_sz = 8;
2638 }
2639 }
2640
2641 /*
2642 * Retrieve the hardware registers of the memory controller.
2643 */
__read_mc_regs_df(struct amd64_pvt * pvt)2644 static void __read_mc_regs_df(struct amd64_pvt *pvt)
2645 {
2646 u8 nid = pvt->mc_node_id;
2647 struct amd64_umc *umc;
2648 u32 i, umc_base;
2649
2650 /* Read registers from each UMC */
2651 for (i = 0; i < NUM_UMCS; i++) {
2652
2653 umc_base = get_umc_base(i);
2654 umc = &pvt->umc[i];
2655
2656 amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
2657 amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
2658 amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
2659 amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
2660 amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
2661 }
2662 }
2663
2664 /*
2665 * Retrieve the hardware registers of the memory controller (this includes the
2666 * 'Address Map' and 'Misc' device regs)
2667 */
read_mc_regs(struct amd64_pvt * pvt)2668 static void read_mc_regs(struct amd64_pvt *pvt)
2669 {
2670 unsigned int range;
2671 u64 msr_val;
2672
2673 /*
2674 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2675 * those are Read-As-Zero.
2676 */
2677 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2678 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
2679
2680 /* Check first whether TOP_MEM2 is enabled: */
2681 rdmsrl(MSR_K8_SYSCFG, msr_val);
2682 if (msr_val & BIT(21)) {
2683 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2684 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2685 } else {
2686 edac_dbg(0, " TOP_MEM2 disabled\n");
2687 }
2688
2689 if (pvt->umc) {
2690 __read_mc_regs_df(pvt);
2691 amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
2692
2693 goto skip;
2694 }
2695
2696 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2697
2698 read_dram_ctl_register(pvt);
2699
2700 for (range = 0; range < DRAM_RANGES; range++) {
2701 u8 rw;
2702
2703 /* read settings for this DRAM range */
2704 read_dram_base_limit_regs(pvt, range);
2705
2706 rw = dram_rw(pvt, range);
2707 if (!rw)
2708 continue;
2709
2710 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2711 range,
2712 get_dram_base(pvt, range),
2713 get_dram_limit(pvt, range));
2714
2715 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2716 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2717 (rw & 0x1) ? "R" : "-",
2718 (rw & 0x2) ? "W" : "-",
2719 dram_intlv_sel(pvt, range),
2720 dram_dst_node(pvt, range));
2721 }
2722
2723 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2724 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
2725
2726 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2727
2728 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2729 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
2730
2731 if (!dct_ganging_enabled(pvt)) {
2732 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2733 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
2734 }
2735
2736 skip:
2737 read_dct_base_mask(pvt);
2738
2739 determine_memory_type(pvt);
2740 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
2741
2742 determine_ecc_sym_sz(pvt);
2743
2744 dump_misc_regs(pvt);
2745 }
2746
2747 /*
2748 * NOTE: CPU Revision Dependent code
2749 *
2750 * Input:
2751 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2752 * k8 private pointer to -->
2753 * DRAM Bank Address mapping register
2754 * node_id
2755 * DCL register where dual_channel_active is
2756 *
2757 * The DBAM register consists of 4 sets of 4 bits each definitions:
2758 *
2759 * Bits: CSROWs
2760 * 0-3 CSROWs 0 and 1
2761 * 4-7 CSROWs 2 and 3
2762 * 8-11 CSROWs 4 and 5
2763 * 12-15 CSROWs 6 and 7
2764 *
2765 * Values range from: 0 to 15
2766 * The meaning of the values depends on CPU revision and dual-channel state,
2767 * see relevant BKDG more info.
2768 *
2769 * The memory controller provides for total of only 8 CSROWs in its current
2770 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2771 * single channel or two (2) DIMMs in dual channel mode.
2772 *
2773 * The following code logic collapses the various tables for CSROW based on CPU
2774 * revision.
2775 *
2776 * Returns:
2777 * The number of PAGE_SIZE pages on the specified CSROW number it
2778 * encompasses
2779 *
2780 */
get_csrow_nr_pages(struct amd64_pvt * pvt,u8 dct,int csrow_nr_orig)2781 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
2782 {
2783 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2784 int csrow_nr = csrow_nr_orig;
2785 u32 cs_mode, nr_pages;
2786
2787 if (!pvt->umc)
2788 csrow_nr >>= 1;
2789
2790 cs_mode = DBAM_DIMM(csrow_nr, dbam);
2791
2792 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
2793 nr_pages <<= 20 - PAGE_SHIFT;
2794
2795 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2796 csrow_nr_orig, dct, cs_mode);
2797 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2798
2799 return nr_pages;
2800 }
2801
2802 /*
2803 * Initialize the array of csrow attribute instances, based on the values
2804 * from pci config hardware registers.
2805 */
init_csrows(struct mem_ctl_info * mci)2806 static int init_csrows(struct mem_ctl_info *mci)
2807 {
2808 struct amd64_pvt *pvt = mci->pvt_info;
2809 enum edac_type edac_mode = EDAC_NONE;
2810 struct csrow_info *csrow;
2811 struct dimm_info *dimm;
2812 int i, j, empty = 1;
2813 int nr_pages = 0;
2814 u32 val;
2815
2816 if (!pvt->umc) {
2817 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2818
2819 pvt->nbcfg = val;
2820
2821 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2822 pvt->mc_node_id, val,
2823 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2824 }
2825
2826 /*
2827 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2828 */
2829 for_each_chip_select(i, 0, pvt) {
2830 bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2831 bool row_dct1 = false;
2832
2833 if (pvt->fam != 0xf)
2834 row_dct1 = !!csrow_enabled(i, 1, pvt);
2835
2836 if (!row_dct0 && !row_dct1)
2837 continue;
2838
2839 csrow = mci->csrows[i];
2840 empty = 0;
2841
2842 edac_dbg(1, "MC node: %d, csrow: %d\n",
2843 pvt->mc_node_id, i);
2844
2845 if (row_dct0) {
2846 nr_pages = get_csrow_nr_pages(pvt, 0, i);
2847 csrow->channels[0]->dimm->nr_pages = nr_pages;
2848 }
2849
2850 /* K8 has only one DCT */
2851 if (pvt->fam != 0xf && row_dct1) {
2852 int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
2853
2854 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
2855 nr_pages += row_dct1_pages;
2856 }
2857
2858 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
2859
2860 /* Determine DIMM ECC mode: */
2861 if (pvt->umc) {
2862 if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED)
2863 edac_mode = EDAC_S4ECD4ED;
2864 else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED)
2865 edac_mode = EDAC_SECDED;
2866
2867 } else if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
2868 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
2869 ? EDAC_S4ECD4ED
2870 : EDAC_SECDED;
2871 }
2872
2873 for (j = 0; j < pvt->channel_count; j++) {
2874 dimm = csrow->channels[j]->dimm;
2875 dimm->mtype = pvt->dram_type;
2876 dimm->edac_mode = edac_mode;
2877 dimm->grain = 64;
2878 }
2879 }
2880
2881 return empty;
2882 }
2883
2884 /* get all cores on this DCT */
get_cpus_on_this_dct_cpumask(struct cpumask * mask,u16 nid)2885 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
2886 {
2887 int cpu;
2888
2889 for_each_online_cpu(cpu)
2890 if (amd_get_nb_id(cpu) == nid)
2891 cpumask_set_cpu(cpu, mask);
2892 }
2893
2894 /* check MCG_CTL on all the cpus on this node */
nb_mce_bank_enabled_on_node(u16 nid)2895 static bool nb_mce_bank_enabled_on_node(u16 nid)
2896 {
2897 cpumask_var_t mask;
2898 int cpu, nbe;
2899 bool ret = false;
2900
2901 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2902 amd64_warn("%s: Error allocating mask\n", __func__);
2903 return false;
2904 }
2905
2906 get_cpus_on_this_dct_cpumask(mask, nid);
2907
2908 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2909
2910 for_each_cpu(cpu, mask) {
2911 struct msr *reg = per_cpu_ptr(msrs, cpu);
2912 nbe = reg->l & MSR_MCGCTL_NBE;
2913
2914 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2915 cpu, reg->q,
2916 (nbe ? "enabled" : "disabled"));
2917
2918 if (!nbe)
2919 goto out;
2920 }
2921 ret = true;
2922
2923 out:
2924 free_cpumask_var(mask);
2925 return ret;
2926 }
2927
toggle_ecc_err_reporting(struct ecc_settings * s,u16 nid,bool on)2928 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
2929 {
2930 cpumask_var_t cmask;
2931 int cpu;
2932
2933 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2934 amd64_warn("%s: error allocating mask\n", __func__);
2935 return -ENOMEM;
2936 }
2937
2938 get_cpus_on_this_dct_cpumask(cmask, nid);
2939
2940 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2941
2942 for_each_cpu(cpu, cmask) {
2943
2944 struct msr *reg = per_cpu_ptr(msrs, cpu);
2945
2946 if (on) {
2947 if (reg->l & MSR_MCGCTL_NBE)
2948 s->flags.nb_mce_enable = 1;
2949
2950 reg->l |= MSR_MCGCTL_NBE;
2951 } else {
2952 /*
2953 * Turn off NB MCE reporting only when it was off before
2954 */
2955 if (!s->flags.nb_mce_enable)
2956 reg->l &= ~MSR_MCGCTL_NBE;
2957 }
2958 }
2959 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2960
2961 free_cpumask_var(cmask);
2962
2963 return 0;
2964 }
2965
enable_ecc_error_reporting(struct ecc_settings * s,u16 nid,struct pci_dev * F3)2966 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2967 struct pci_dev *F3)
2968 {
2969 bool ret = true;
2970 u32 value, mask = 0x3; /* UECC/CECC enable */
2971
2972 if (toggle_ecc_err_reporting(s, nid, ON)) {
2973 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2974 return false;
2975 }
2976
2977 amd64_read_pci_cfg(F3, NBCTL, &value);
2978
2979 s->old_nbctl = value & mask;
2980 s->nbctl_valid = true;
2981
2982 value |= mask;
2983 amd64_write_pci_cfg(F3, NBCTL, value);
2984
2985 amd64_read_pci_cfg(F3, NBCFG, &value);
2986
2987 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2988 nid, value, !!(value & NBCFG_ECC_ENABLE));
2989
2990 if (!(value & NBCFG_ECC_ENABLE)) {
2991 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2992
2993 s->flags.nb_ecc_prev = 0;
2994
2995 /* Attempt to turn on DRAM ECC Enable */
2996 value |= NBCFG_ECC_ENABLE;
2997 amd64_write_pci_cfg(F3, NBCFG, value);
2998
2999 amd64_read_pci_cfg(F3, NBCFG, &value);
3000
3001 if (!(value & NBCFG_ECC_ENABLE)) {
3002 amd64_warn("Hardware rejected DRAM ECC enable,"
3003 "check memory DIMM configuration.\n");
3004 ret = false;
3005 } else {
3006 amd64_info("Hardware accepted DRAM ECC Enable\n");
3007 }
3008 } else {
3009 s->flags.nb_ecc_prev = 1;
3010 }
3011
3012 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3013 nid, value, !!(value & NBCFG_ECC_ENABLE));
3014
3015 return ret;
3016 }
3017
restore_ecc_error_reporting(struct ecc_settings * s,u16 nid,struct pci_dev * F3)3018 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3019 struct pci_dev *F3)
3020 {
3021 u32 value, mask = 0x3; /* UECC/CECC enable */
3022
3023 if (!s->nbctl_valid)
3024 return;
3025
3026 amd64_read_pci_cfg(F3, NBCTL, &value);
3027 value &= ~mask;
3028 value |= s->old_nbctl;
3029
3030 amd64_write_pci_cfg(F3, NBCTL, value);
3031
3032 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
3033 if (!s->flags.nb_ecc_prev) {
3034 amd64_read_pci_cfg(F3, NBCFG, &value);
3035 value &= ~NBCFG_ECC_ENABLE;
3036 amd64_write_pci_cfg(F3, NBCFG, value);
3037 }
3038
3039 /* restore the NB Enable MCGCTL bit */
3040 if (toggle_ecc_err_reporting(s, nid, OFF))
3041 amd64_warn("Error restoring NB MCGCTL settings!\n");
3042 }
3043
3044 /*
3045 * EDAC requires that the BIOS have ECC enabled before
3046 * taking over the processing of ECC errors. A command line
3047 * option allows to force-enable hardware ECC later in
3048 * enable_ecc_error_reporting().
3049 */
3050 static const char *ecc_msg =
3051 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
3052 " Either enable ECC checking or force module loading by setting "
3053 "'ecc_enable_override'.\n"
3054 " (Note that use of the override may cause unknown side effects.)\n";
3055
ecc_enabled(struct pci_dev * F3,u16 nid)3056 static bool ecc_enabled(struct pci_dev *F3, u16 nid)
3057 {
3058 bool nb_mce_en = false;
3059 u8 ecc_en = 0, i;
3060 u32 value;
3061
3062 if (boot_cpu_data.x86 >= 0x17) {
3063 u8 umc_en_mask = 0, ecc_en_mask = 0;
3064
3065 for (i = 0; i < NUM_UMCS; i++) {
3066 u32 base = get_umc_base(i);
3067
3068 /* Only check enabled UMCs. */
3069 if (amd_smn_read(nid, base + UMCCH_SDP_CTRL, &value))
3070 continue;
3071
3072 if (!(value & UMC_SDP_INIT))
3073 continue;
3074
3075 umc_en_mask |= BIT(i);
3076
3077 if (amd_smn_read(nid, base + UMCCH_UMC_CAP_HI, &value))
3078 continue;
3079
3080 if (value & UMC_ECC_ENABLED)
3081 ecc_en_mask |= BIT(i);
3082 }
3083
3084 /* Check whether at least one UMC is enabled: */
3085 if (umc_en_mask)
3086 ecc_en = umc_en_mask == ecc_en_mask;
3087 else
3088 edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
3089
3090 /* Assume UMC MCA banks are enabled. */
3091 nb_mce_en = true;
3092 } else {
3093 amd64_read_pci_cfg(F3, NBCFG, &value);
3094
3095 ecc_en = !!(value & NBCFG_ECC_ENABLE);
3096
3097 nb_mce_en = nb_mce_bank_enabled_on_node(nid);
3098 if (!nb_mce_en)
3099 edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3100 MSR_IA32_MCG_CTL, nid);
3101 }
3102
3103 amd64_info("Node %d: DRAM ECC %s.\n",
3104 nid, (ecc_en ? "enabled" : "disabled"));
3105
3106 if (!ecc_en || !nb_mce_en) {
3107 amd64_info("%s", ecc_msg);
3108 return false;
3109 }
3110 return true;
3111 }
3112
3113 static inline void
f17h_determine_edac_ctl_cap(struct mem_ctl_info * mci,struct amd64_pvt * pvt)3114 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3115 {
3116 u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
3117
3118 for (i = 0; i < NUM_UMCS; i++) {
3119 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3120 ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3121 cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3122
3123 dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3124 dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3125 }
3126 }
3127
3128 /* Set chipkill only if ECC is enabled: */
3129 if (ecc_en) {
3130 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3131
3132 if (!cpk_en)
3133 return;
3134
3135 if (dev_x4)
3136 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3137 else if (dev_x16)
3138 mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
3139 else
3140 mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
3141 }
3142 }
3143
setup_mci_misc_attrs(struct mem_ctl_info * mci,struct amd64_family_type * fam)3144 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
3145 struct amd64_family_type *fam)
3146 {
3147 struct amd64_pvt *pvt = mci->pvt_info;
3148
3149 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3150 mci->edac_ctl_cap = EDAC_FLAG_NONE;
3151
3152 if (pvt->umc) {
3153 f17h_determine_edac_ctl_cap(mci, pvt);
3154 } else {
3155 if (pvt->nbcap & NBCAP_SECDED)
3156 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3157
3158 if (pvt->nbcap & NBCAP_CHIPKILL)
3159 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3160 }
3161
3162 mci->edac_cap = determine_edac_cap(pvt);
3163 mci->mod_name = EDAC_MOD_STR;
3164 mci->ctl_name = fam->ctl_name;
3165 mci->dev_name = pci_name(pvt->F3);
3166 mci->ctl_page_to_phys = NULL;
3167
3168 /* memory scrubber interface */
3169 mci->set_sdram_scrub_rate = set_scrub_rate;
3170 mci->get_sdram_scrub_rate = get_scrub_rate;
3171 }
3172
3173 /*
3174 * returns a pointer to the family descriptor on success, NULL otherwise.
3175 */
per_family_init(struct amd64_pvt * pvt)3176 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
3177 {
3178 struct amd64_family_type *fam_type = NULL;
3179
3180 pvt->ext_model = boot_cpu_data.x86_model >> 4;
3181 pvt->stepping = boot_cpu_data.x86_stepping;
3182 pvt->model = boot_cpu_data.x86_model;
3183 pvt->fam = boot_cpu_data.x86;
3184
3185 switch (pvt->fam) {
3186 case 0xf:
3187 fam_type = &family_types[K8_CPUS];
3188 pvt->ops = &family_types[K8_CPUS].ops;
3189 break;
3190
3191 case 0x10:
3192 fam_type = &family_types[F10_CPUS];
3193 pvt->ops = &family_types[F10_CPUS].ops;
3194 break;
3195
3196 case 0x15:
3197 if (pvt->model == 0x30) {
3198 fam_type = &family_types[F15_M30H_CPUS];
3199 pvt->ops = &family_types[F15_M30H_CPUS].ops;
3200 break;
3201 } else if (pvt->model == 0x60) {
3202 fam_type = &family_types[F15_M60H_CPUS];
3203 pvt->ops = &family_types[F15_M60H_CPUS].ops;
3204 break;
3205 }
3206
3207 fam_type = &family_types[F15_CPUS];
3208 pvt->ops = &family_types[F15_CPUS].ops;
3209 break;
3210
3211 case 0x16:
3212 if (pvt->model == 0x30) {
3213 fam_type = &family_types[F16_M30H_CPUS];
3214 pvt->ops = &family_types[F16_M30H_CPUS].ops;
3215 break;
3216 }
3217 fam_type = &family_types[F16_CPUS];
3218 pvt->ops = &family_types[F16_CPUS].ops;
3219 break;
3220
3221 case 0x17:
3222 if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
3223 fam_type = &family_types[F17_M10H_CPUS];
3224 pvt->ops = &family_types[F17_M10H_CPUS].ops;
3225 break;
3226 } else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
3227 fam_type = &family_types[F17_M30H_CPUS];
3228 pvt->ops = &family_types[F17_M30H_CPUS].ops;
3229 break;
3230 }
3231 fam_type = &family_types[F17_CPUS];
3232 pvt->ops = &family_types[F17_CPUS].ops;
3233 break;
3234
3235 default:
3236 amd64_err("Unsupported family!\n");
3237 return NULL;
3238 }
3239
3240 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
3241 (pvt->fam == 0xf ?
3242 (pvt->ext_model >= K8_REV_F ? "revF or later "
3243 : "revE or earlier ")
3244 : ""), pvt->mc_node_id);
3245 return fam_type;
3246 }
3247
3248 static const struct attribute_group *amd64_edac_attr_groups[] = {
3249 #ifdef CONFIG_EDAC_DEBUG
3250 &amd64_edac_dbg_group,
3251 #endif
3252 #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
3253 &amd64_edac_inj_group,
3254 #endif
3255 NULL
3256 };
3257
init_one_instance(unsigned int nid)3258 static int init_one_instance(unsigned int nid)
3259 {
3260 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3261 struct amd64_family_type *fam_type = NULL;
3262 struct mem_ctl_info *mci = NULL;
3263 struct edac_mc_layer layers[2];
3264 struct amd64_pvt *pvt = NULL;
3265 u16 pci_id1, pci_id2;
3266 int err = 0, ret;
3267
3268 ret = -ENOMEM;
3269 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
3270 if (!pvt)
3271 goto err_ret;
3272
3273 pvt->mc_node_id = nid;
3274 pvt->F3 = F3;
3275
3276 ret = -EINVAL;
3277 fam_type = per_family_init(pvt);
3278 if (!fam_type)
3279 goto err_free;
3280
3281 if (pvt->fam >= 0x17) {
3282 pvt->umc = kcalloc(NUM_UMCS, sizeof(struct amd64_umc), GFP_KERNEL);
3283 if (!pvt->umc) {
3284 ret = -ENOMEM;
3285 goto err_free;
3286 }
3287
3288 pci_id1 = fam_type->f0_id;
3289 pci_id2 = fam_type->f6_id;
3290 } else {
3291 pci_id1 = fam_type->f1_id;
3292 pci_id2 = fam_type->f2_id;
3293 }
3294
3295 err = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
3296 if (err)
3297 goto err_post_init;
3298
3299 read_mc_regs(pvt);
3300
3301 /*
3302 * We need to determine how many memory channels there are. Then use
3303 * that information for calculating the size of the dynamic instance
3304 * tables in the 'mci' structure.
3305 */
3306 ret = -EINVAL;
3307 pvt->channel_count = pvt->ops->early_channel_count(pvt);
3308 if (pvt->channel_count < 0)
3309 goto err_siblings;
3310
3311 ret = -ENOMEM;
3312 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
3313 layers[0].size = pvt->csels[0].b_cnt;
3314 layers[0].is_virt_csrow = true;
3315 layers[1].type = EDAC_MC_LAYER_CHANNEL;
3316
3317 /*
3318 * Always allocate two channels since we can have setups with DIMMs on
3319 * only one channel. Also, this simplifies handling later for the price
3320 * of a couple of KBs tops.
3321 */
3322 layers[1].size = 2;
3323 layers[1].is_virt_csrow = false;
3324
3325 mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
3326 if (!mci)
3327 goto err_siblings;
3328
3329 mci->pvt_info = pvt;
3330 mci->pdev = &pvt->F3->dev;
3331
3332 setup_mci_misc_attrs(mci, fam_type);
3333
3334 if (init_csrows(mci))
3335 mci->edac_cap = EDAC_FLAG_NONE;
3336
3337 ret = -ENODEV;
3338 if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
3339 edac_dbg(1, "failed edac_mc_add_mc()\n");
3340 goto err_add_mc;
3341 }
3342
3343 return 0;
3344
3345 err_add_mc:
3346 edac_mc_free(mci);
3347
3348 err_siblings:
3349 free_mc_sibling_devs(pvt);
3350
3351 err_post_init:
3352 if (pvt->fam >= 0x17)
3353 kfree(pvt->umc);
3354
3355 err_free:
3356 kfree(pvt);
3357
3358 err_ret:
3359 return ret;
3360 }
3361
probe_one_instance(unsigned int nid)3362 static int probe_one_instance(unsigned int nid)
3363 {
3364 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3365 struct ecc_settings *s;
3366 int ret;
3367
3368 ret = -ENOMEM;
3369 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
3370 if (!s)
3371 goto err_out;
3372
3373 ecc_stngs[nid] = s;
3374
3375 if (!ecc_enabled(F3, nid)) {
3376 ret = 0;
3377
3378 if (!ecc_enable_override)
3379 goto err_enable;
3380
3381 if (boot_cpu_data.x86 >= 0x17) {
3382 amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
3383 goto err_enable;
3384 } else
3385 amd64_warn("Forcing ECC on!\n");
3386
3387 if (!enable_ecc_error_reporting(s, nid, F3))
3388 goto err_enable;
3389 }
3390
3391 ret = init_one_instance(nid);
3392 if (ret < 0) {
3393 amd64_err("Error probing instance: %d\n", nid);
3394
3395 if (boot_cpu_data.x86 < 0x17)
3396 restore_ecc_error_reporting(s, nid, F3);
3397
3398 goto err_enable;
3399 }
3400
3401 return ret;
3402
3403 err_enable:
3404 kfree(s);
3405 ecc_stngs[nid] = NULL;
3406
3407 err_out:
3408 return ret;
3409 }
3410
remove_one_instance(unsigned int nid)3411 static void remove_one_instance(unsigned int nid)
3412 {
3413 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3414 struct ecc_settings *s = ecc_stngs[nid];
3415 struct mem_ctl_info *mci;
3416 struct amd64_pvt *pvt;
3417
3418 mci = find_mci_by_dev(&F3->dev);
3419 WARN_ON(!mci);
3420
3421 /* Remove from EDAC CORE tracking list */
3422 mci = edac_mc_del_mc(&F3->dev);
3423 if (!mci)
3424 return;
3425
3426 pvt = mci->pvt_info;
3427
3428 restore_ecc_error_reporting(s, nid, F3);
3429
3430 free_mc_sibling_devs(pvt);
3431
3432 kfree(ecc_stngs[nid]);
3433 ecc_stngs[nid] = NULL;
3434
3435 /* Free the EDAC CORE resources */
3436 mci->pvt_info = NULL;
3437
3438 kfree(pvt);
3439 edac_mc_free(mci);
3440 }
3441
setup_pci_device(void)3442 static void setup_pci_device(void)
3443 {
3444 struct mem_ctl_info *mci;
3445 struct amd64_pvt *pvt;
3446
3447 if (pci_ctl)
3448 return;
3449
3450 mci = edac_mc_find(0);
3451 if (!mci)
3452 return;
3453
3454 pvt = mci->pvt_info;
3455 if (pvt->umc)
3456 pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR);
3457 else
3458 pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
3459 if (!pci_ctl) {
3460 pr_warn("%s(): Unable to create PCI control\n", __func__);
3461 pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
3462 }
3463 }
3464
3465 static const struct x86_cpu_id amd64_cpuids[] = {
3466 { X86_VENDOR_AMD, 0xF, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3467 { X86_VENDOR_AMD, 0x10, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3468 { X86_VENDOR_AMD, 0x15, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3469 { X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3470 { X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3471 { }
3472 };
3473 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
3474
amd64_edac_init(void)3475 static int __init amd64_edac_init(void)
3476 {
3477 const char *owner;
3478 int err = -ENODEV;
3479 int i;
3480
3481 owner = edac_get_owner();
3482 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3483 return -EBUSY;
3484
3485 if (!x86_match_cpu(amd64_cpuids))
3486 return -ENODEV;
3487
3488 if (amd_cache_northbridges() < 0)
3489 return -ENODEV;
3490
3491 opstate_init();
3492
3493 err = -ENOMEM;
3494 ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
3495 if (!ecc_stngs)
3496 goto err_free;
3497
3498 msrs = msrs_alloc();
3499 if (!msrs)
3500 goto err_free;
3501
3502 for (i = 0; i < amd_nb_num(); i++) {
3503 err = probe_one_instance(i);
3504 if (err) {
3505 /* unwind properly */
3506 while (--i >= 0)
3507 remove_one_instance(i);
3508
3509 goto err_pci;
3510 }
3511 }
3512
3513 if (!edac_has_mcs()) {
3514 err = -ENODEV;
3515 goto err_pci;
3516 }
3517
3518 /* register stuff with EDAC MCE */
3519 if (report_gart_errors)
3520 amd_report_gart_errors(true);
3521
3522 if (boot_cpu_data.x86 >= 0x17)
3523 amd_register_ecc_decoder(decode_umc_error);
3524 else
3525 amd_register_ecc_decoder(decode_bus_error);
3526
3527 setup_pci_device();
3528
3529 #ifdef CONFIG_X86_32
3530 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
3531 #endif
3532
3533 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
3534
3535 return 0;
3536
3537 err_pci:
3538 msrs_free(msrs);
3539 msrs = NULL;
3540
3541 err_free:
3542 kfree(ecc_stngs);
3543 ecc_stngs = NULL;
3544
3545 return err;
3546 }
3547
amd64_edac_exit(void)3548 static void __exit amd64_edac_exit(void)
3549 {
3550 int i;
3551
3552 if (pci_ctl)
3553 edac_pci_release_generic_ctl(pci_ctl);
3554
3555 /* unregister from EDAC MCE */
3556 amd_report_gart_errors(false);
3557
3558 if (boot_cpu_data.x86 >= 0x17)
3559 amd_unregister_ecc_decoder(decode_umc_error);
3560 else
3561 amd_unregister_ecc_decoder(decode_bus_error);
3562
3563 for (i = 0; i < amd_nb_num(); i++)
3564 remove_one_instance(i);
3565
3566 kfree(ecc_stngs);
3567 ecc_stngs = NULL;
3568
3569 msrs_free(msrs);
3570 msrs = NULL;
3571 }
3572
3573 module_init(amd64_edac_init);
3574 module_exit(amd64_edac_exit);
3575
3576 MODULE_LICENSE("GPL");
3577 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3578 "Dave Peterson, Thayne Harbaugh");
3579 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3580 EDAC_AMD64_VERSION);
3581
3582 module_param(edac_op_state, int, 0444);
3583 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
3584