1 /*
2 * edac_mc kernel module
3 * (C) 2005, 2006 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
10 *
11 * Modified by Dave Peterson and Doug Thompson
12 *
13 */
14
15 #include <linux/module.h>
16 #include <linux/proc_fs.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/sysctl.h>
22 #include <linux/highmem.h>
23 #include <linux/timer.h>
24 #include <linux/slab.h>
25 #include <linux/jiffies.h>
26 #include <linux/spinlock.h>
27 #include <linux/list.h>
28 #include <linux/ctype.h>
29 #include <linux/edac.h>
30 #include <linux/bitops.h>
31 #include <linux/uaccess.h>
32 #include <asm/page.h>
33 #include "edac_mc.h"
34 #include "edac_module.h"
35 #include <ras/ras_event.h>
36
37 #ifdef CONFIG_EDAC_ATOMIC_SCRUB
38 #include <asm/edac.h>
39 #else
40 #define edac_atomic_scrub(va, size) do { } while (0)
41 #endif
42
43 int edac_op_state = EDAC_OPSTATE_INVAL;
44 EXPORT_SYMBOL_GPL(edac_op_state);
45
46 /* lock to memory controller's control array */
47 static DEFINE_MUTEX(mem_ctls_mutex);
48 static LIST_HEAD(mc_devices);
49
50 /*
51 * Used to lock EDAC MC to just one module, avoiding two drivers e. g.
52 * apei/ghes and i7core_edac to be used at the same time.
53 */
54 static const char *edac_mc_owner;
55
error_desc_to_mci(struct edac_raw_error_desc * e)56 static struct mem_ctl_info *error_desc_to_mci(struct edac_raw_error_desc *e)
57 {
58 return container_of(e, struct mem_ctl_info, error_desc);
59 }
60
edac_dimm_info_location(struct dimm_info * dimm,char * buf,unsigned int len)61 unsigned int edac_dimm_info_location(struct dimm_info *dimm, char *buf,
62 unsigned int len)
63 {
64 struct mem_ctl_info *mci = dimm->mci;
65 int i, n, count = 0;
66 char *p = buf;
67
68 for (i = 0; i < mci->n_layers; i++) {
69 n = snprintf(p, len, "%s %d ",
70 edac_layer_name[mci->layers[i].type],
71 dimm->location[i]);
72 p += n;
73 len -= n;
74 count += n;
75 if (!len)
76 break;
77 }
78
79 return count;
80 }
81
82 #ifdef CONFIG_EDAC_DEBUG
83
edac_mc_dump_channel(struct rank_info * chan)84 static void edac_mc_dump_channel(struct rank_info *chan)
85 {
86 edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx);
87 edac_dbg(4, " channel = %p\n", chan);
88 edac_dbg(4, " channel->csrow = %p\n", chan->csrow);
89 edac_dbg(4, " channel->dimm = %p\n", chan->dimm);
90 }
91
edac_mc_dump_dimm(struct dimm_info * dimm)92 static void edac_mc_dump_dimm(struct dimm_info *dimm)
93 {
94 char location[80];
95
96 if (!dimm->nr_pages)
97 return;
98
99 edac_dimm_info_location(dimm, location, sizeof(location));
100
101 edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
102 dimm->mci->csbased ? "rank" : "dimm",
103 dimm->idx, location, dimm->csrow, dimm->cschannel);
104 edac_dbg(4, " dimm = %p\n", dimm);
105 edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
106 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
107 edac_dbg(4, " dimm->grain = %d\n", dimm->grain);
108 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
109 }
110
edac_mc_dump_csrow(struct csrow_info * csrow)111 static void edac_mc_dump_csrow(struct csrow_info *csrow)
112 {
113 edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx);
114 edac_dbg(4, " csrow = %p\n", csrow);
115 edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page);
116 edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page);
117 edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask);
118 edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels);
119 edac_dbg(4, " csrow->channels = %p\n", csrow->channels);
120 edac_dbg(4, " csrow->mci = %p\n", csrow->mci);
121 }
122
edac_mc_dump_mci(struct mem_ctl_info * mci)123 static void edac_mc_dump_mci(struct mem_ctl_info *mci)
124 {
125 edac_dbg(3, "\tmci = %p\n", mci);
126 edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap);
127 edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
128 edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap);
129 edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check);
130 edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n",
131 mci->nr_csrows, mci->csrows);
132 edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n",
133 mci->tot_dimms, mci->dimms);
134 edac_dbg(3, "\tdev = %p\n", mci->pdev);
135 edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
136 mci->mod_name, mci->ctl_name);
137 edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info);
138 }
139
140 #endif /* CONFIG_EDAC_DEBUG */
141
142 const char * const edac_mem_types[] = {
143 [MEM_EMPTY] = "Empty",
144 [MEM_RESERVED] = "Reserved",
145 [MEM_UNKNOWN] = "Unknown",
146 [MEM_FPM] = "FPM",
147 [MEM_EDO] = "EDO",
148 [MEM_BEDO] = "BEDO",
149 [MEM_SDR] = "Unbuffered-SDR",
150 [MEM_RDR] = "Registered-SDR",
151 [MEM_DDR] = "Unbuffered-DDR",
152 [MEM_RDDR] = "Registered-DDR",
153 [MEM_RMBS] = "RMBS",
154 [MEM_DDR2] = "Unbuffered-DDR2",
155 [MEM_FB_DDR2] = "FullyBuffered-DDR2",
156 [MEM_RDDR2] = "Registered-DDR2",
157 [MEM_XDR] = "XDR",
158 [MEM_DDR3] = "Unbuffered-DDR3",
159 [MEM_RDDR3] = "Registered-DDR3",
160 [MEM_LRDDR3] = "Load-Reduced-DDR3-RAM",
161 [MEM_LPDDR3] = "Low-Power-DDR3-RAM",
162 [MEM_DDR4] = "Unbuffered-DDR4",
163 [MEM_RDDR4] = "Registered-DDR4",
164 [MEM_LPDDR4] = "Low-Power-DDR4-RAM",
165 [MEM_LRDDR4] = "Load-Reduced-DDR4-RAM",
166 [MEM_DDR5] = "Unbuffered-DDR5",
167 [MEM_NVDIMM] = "Non-volatile-RAM",
168 [MEM_WIO2] = "Wide-IO-2",
169 [MEM_HBM2] = "High-bandwidth-memory-Gen2",
170 };
171 EXPORT_SYMBOL_GPL(edac_mem_types);
172
173 /**
174 * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
175 * @p: pointer to a pointer with the memory offset to be used. At
176 * return, this will be incremented to point to the next offset
177 * @size: Size of the data structure to be reserved
178 * @n_elems: Number of elements that should be reserved
179 *
180 * If 'size' is a constant, the compiler will optimize this whole function
181 * down to either a no-op or the addition of a constant to the value of '*p'.
182 *
183 * The 'p' pointer is absolutely needed to keep the proper advancing
184 * further in memory to the proper offsets when allocating the struct along
185 * with its embedded structs, as edac_device_alloc_ctl_info() does it
186 * above, for example.
187 *
188 * At return, the pointer 'p' will be incremented to be used on a next call
189 * to this function.
190 */
edac_align_ptr(void ** p,unsigned int size,int n_elems)191 void *edac_align_ptr(void **p, unsigned int size, int n_elems)
192 {
193 unsigned int align, r;
194 void *ptr = *p;
195
196 *p += size * n_elems;
197
198 /*
199 * 'p' can possibly be an unaligned item X such that sizeof(X) is
200 * 'size'. Adjust 'p' so that its alignment is at least as
201 * stringent as what the compiler would provide for X and return
202 * the aligned result.
203 * Here we assume that the alignment of a "long long" is the most
204 * stringent alignment that the compiler will ever provide by default.
205 * As far as I know, this is a reasonable assumption.
206 */
207 if (size > sizeof(long))
208 align = sizeof(long long);
209 else if (size > sizeof(int))
210 align = sizeof(long);
211 else if (size > sizeof(short))
212 align = sizeof(int);
213 else if (size > sizeof(char))
214 align = sizeof(short);
215 else
216 return (char *)ptr;
217
218 r = (unsigned long)ptr % align;
219
220 if (r == 0)
221 return (char *)ptr;
222
223 *p += align - r;
224
225 return (void *)(((unsigned long)ptr) + align - r);
226 }
227
_edac_mc_free(struct mem_ctl_info * mci)228 static void _edac_mc_free(struct mem_ctl_info *mci)
229 {
230 put_device(&mci->dev);
231 }
232
mci_release(struct device * dev)233 static void mci_release(struct device *dev)
234 {
235 struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
236 struct csrow_info *csr;
237 int i, chn, row;
238
239 if (mci->dimms) {
240 for (i = 0; i < mci->tot_dimms; i++)
241 kfree(mci->dimms[i]);
242 kfree(mci->dimms);
243 }
244
245 if (mci->csrows) {
246 for (row = 0; row < mci->nr_csrows; row++) {
247 csr = mci->csrows[row];
248 if (!csr)
249 continue;
250
251 if (csr->channels) {
252 for (chn = 0; chn < mci->num_cschannel; chn++)
253 kfree(csr->channels[chn]);
254 kfree(csr->channels);
255 }
256 kfree(csr);
257 }
258 kfree(mci->csrows);
259 }
260 kfree(mci);
261 }
262
edac_mc_alloc_csrows(struct mem_ctl_info * mci)263 static int edac_mc_alloc_csrows(struct mem_ctl_info *mci)
264 {
265 unsigned int tot_channels = mci->num_cschannel;
266 unsigned int tot_csrows = mci->nr_csrows;
267 unsigned int row, chn;
268
269 /*
270 * Alocate and fill the csrow/channels structs
271 */
272 mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
273 if (!mci->csrows)
274 return -ENOMEM;
275
276 for (row = 0; row < tot_csrows; row++) {
277 struct csrow_info *csr;
278
279 csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
280 if (!csr)
281 return -ENOMEM;
282
283 mci->csrows[row] = csr;
284 csr->csrow_idx = row;
285 csr->mci = mci;
286 csr->nr_channels = tot_channels;
287 csr->channels = kcalloc(tot_channels, sizeof(*csr->channels),
288 GFP_KERNEL);
289 if (!csr->channels)
290 return -ENOMEM;
291
292 for (chn = 0; chn < tot_channels; chn++) {
293 struct rank_info *chan;
294
295 chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL);
296 if (!chan)
297 return -ENOMEM;
298
299 csr->channels[chn] = chan;
300 chan->chan_idx = chn;
301 chan->csrow = csr;
302 }
303 }
304
305 return 0;
306 }
307
edac_mc_alloc_dimms(struct mem_ctl_info * mci)308 static int edac_mc_alloc_dimms(struct mem_ctl_info *mci)
309 {
310 unsigned int pos[EDAC_MAX_LAYERS];
311 unsigned int row, chn, idx;
312 int layer;
313 void *p;
314
315 /*
316 * Allocate and fill the dimm structs
317 */
318 mci->dimms = kcalloc(mci->tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
319 if (!mci->dimms)
320 return -ENOMEM;
321
322 memset(&pos, 0, sizeof(pos));
323 row = 0;
324 chn = 0;
325 for (idx = 0; idx < mci->tot_dimms; idx++) {
326 struct dimm_info *dimm;
327 struct rank_info *chan;
328 int n, len;
329
330 chan = mci->csrows[row]->channels[chn];
331
332 dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
333 if (!dimm)
334 return -ENOMEM;
335 mci->dimms[idx] = dimm;
336 dimm->mci = mci;
337 dimm->idx = idx;
338
339 /*
340 * Copy DIMM location and initialize it.
341 */
342 len = sizeof(dimm->label);
343 p = dimm->label;
344 n = snprintf(p, len, "mc#%u", mci->mc_idx);
345 p += n;
346 len -= n;
347 for (layer = 0; layer < mci->n_layers; layer++) {
348 n = snprintf(p, len, "%s#%u",
349 edac_layer_name[mci->layers[layer].type],
350 pos[layer]);
351 p += n;
352 len -= n;
353 dimm->location[layer] = pos[layer];
354
355 if (len <= 0)
356 break;
357 }
358
359 /* Link it to the csrows old API data */
360 chan->dimm = dimm;
361 dimm->csrow = row;
362 dimm->cschannel = chn;
363
364 /* Increment csrow location */
365 if (mci->layers[0].is_virt_csrow) {
366 chn++;
367 if (chn == mci->num_cschannel) {
368 chn = 0;
369 row++;
370 }
371 } else {
372 row++;
373 if (row == mci->nr_csrows) {
374 row = 0;
375 chn++;
376 }
377 }
378
379 /* Increment dimm location */
380 for (layer = mci->n_layers - 1; layer >= 0; layer--) {
381 pos[layer]++;
382 if (pos[layer] < mci->layers[layer].size)
383 break;
384 pos[layer] = 0;
385 }
386 }
387
388 return 0;
389 }
390
edac_mc_alloc(unsigned int mc_num,unsigned int n_layers,struct edac_mc_layer * layers,unsigned int sz_pvt)391 struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
392 unsigned int n_layers,
393 struct edac_mc_layer *layers,
394 unsigned int sz_pvt)
395 {
396 struct mem_ctl_info *mci;
397 struct edac_mc_layer *layer;
398 unsigned int idx, size, tot_dimms = 1;
399 unsigned int tot_csrows = 1, tot_channels = 1;
400 void *pvt, *ptr = NULL;
401 bool per_rank = false;
402
403 if (WARN_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0))
404 return NULL;
405
406 /*
407 * Calculate the total amount of dimms and csrows/cschannels while
408 * in the old API emulation mode
409 */
410 for (idx = 0; idx < n_layers; idx++) {
411 tot_dimms *= layers[idx].size;
412
413 if (layers[idx].is_virt_csrow)
414 tot_csrows *= layers[idx].size;
415 else
416 tot_channels *= layers[idx].size;
417
418 if (layers[idx].type == EDAC_MC_LAYER_CHIP_SELECT)
419 per_rank = true;
420 }
421
422 /* Figure out the offsets of the various items from the start of an mc
423 * structure. We want the alignment of each item to be at least as
424 * stringent as what the compiler would provide if we could simply
425 * hardcode everything into a single struct.
426 */
427 mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
428 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
429 pvt = edac_align_ptr(&ptr, sz_pvt, 1);
430 size = ((unsigned long)pvt) + sz_pvt;
431
432 edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
433 size,
434 tot_dimms,
435 per_rank ? "ranks" : "dimms",
436 tot_csrows * tot_channels);
437
438 mci = kzalloc(size, GFP_KERNEL);
439 if (mci == NULL)
440 return NULL;
441
442 mci->dev.release = mci_release;
443 device_initialize(&mci->dev);
444
445 /* Adjust pointers so they point within the memory we just allocated
446 * rather than an imaginary chunk of memory located at address 0.
447 */
448 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
449 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
450
451 /* setup index and various internal pointers */
452 mci->mc_idx = mc_num;
453 mci->tot_dimms = tot_dimms;
454 mci->pvt_info = pvt;
455 mci->n_layers = n_layers;
456 mci->layers = layer;
457 memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
458 mci->nr_csrows = tot_csrows;
459 mci->num_cschannel = tot_channels;
460 mci->csbased = per_rank;
461
462 if (edac_mc_alloc_csrows(mci))
463 goto error;
464
465 if (edac_mc_alloc_dimms(mci))
466 goto error;
467
468 mci->op_state = OP_ALLOC;
469
470 return mci;
471
472 error:
473 _edac_mc_free(mci);
474
475 return NULL;
476 }
477 EXPORT_SYMBOL_GPL(edac_mc_alloc);
478
edac_mc_free(struct mem_ctl_info * mci)479 void edac_mc_free(struct mem_ctl_info *mci)
480 {
481 edac_dbg(1, "\n");
482
483 _edac_mc_free(mci);
484 }
485 EXPORT_SYMBOL_GPL(edac_mc_free);
486
edac_has_mcs(void)487 bool edac_has_mcs(void)
488 {
489 bool ret;
490
491 mutex_lock(&mem_ctls_mutex);
492
493 ret = list_empty(&mc_devices);
494
495 mutex_unlock(&mem_ctls_mutex);
496
497 return !ret;
498 }
499 EXPORT_SYMBOL_GPL(edac_has_mcs);
500
501 /* Caller must hold mem_ctls_mutex */
__find_mci_by_dev(struct device * dev)502 static struct mem_ctl_info *__find_mci_by_dev(struct device *dev)
503 {
504 struct mem_ctl_info *mci;
505 struct list_head *item;
506
507 edac_dbg(3, "\n");
508
509 list_for_each(item, &mc_devices) {
510 mci = list_entry(item, struct mem_ctl_info, link);
511
512 if (mci->pdev == dev)
513 return mci;
514 }
515
516 return NULL;
517 }
518
519 /**
520 * find_mci_by_dev
521 *
522 * scan list of controllers looking for the one that manages
523 * the 'dev' device
524 * @dev: pointer to a struct device related with the MCI
525 */
find_mci_by_dev(struct device * dev)526 struct mem_ctl_info *find_mci_by_dev(struct device *dev)
527 {
528 struct mem_ctl_info *ret;
529
530 mutex_lock(&mem_ctls_mutex);
531 ret = __find_mci_by_dev(dev);
532 mutex_unlock(&mem_ctls_mutex);
533
534 return ret;
535 }
536 EXPORT_SYMBOL_GPL(find_mci_by_dev);
537
538 /*
539 * edac_mc_workq_function
540 * performs the operation scheduled by a workq request
541 */
edac_mc_workq_function(struct work_struct * work_req)542 static void edac_mc_workq_function(struct work_struct *work_req)
543 {
544 struct delayed_work *d_work = to_delayed_work(work_req);
545 struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
546
547 mutex_lock(&mem_ctls_mutex);
548
549 if (mci->op_state != OP_RUNNING_POLL) {
550 mutex_unlock(&mem_ctls_mutex);
551 return;
552 }
553
554 if (edac_op_state == EDAC_OPSTATE_POLL)
555 mci->edac_check(mci);
556
557 mutex_unlock(&mem_ctls_mutex);
558
559 /* Queue ourselves again. */
560 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
561 }
562
563 /*
564 * edac_mc_reset_delay_period(unsigned long value)
565 *
566 * user space has updated our poll period value, need to
567 * reset our workq delays
568 */
edac_mc_reset_delay_period(unsigned long value)569 void edac_mc_reset_delay_period(unsigned long value)
570 {
571 struct mem_ctl_info *mci;
572 struct list_head *item;
573
574 mutex_lock(&mem_ctls_mutex);
575
576 list_for_each(item, &mc_devices) {
577 mci = list_entry(item, struct mem_ctl_info, link);
578
579 if (mci->op_state == OP_RUNNING_POLL)
580 edac_mod_work(&mci->work, value);
581 }
582 mutex_unlock(&mem_ctls_mutex);
583 }
584
585
586
587 /* Return 0 on success, 1 on failure.
588 * Before calling this function, caller must
589 * assign a unique value to mci->mc_idx.
590 *
591 * locking model:
592 *
593 * called with the mem_ctls_mutex lock held
594 */
add_mc_to_global_list(struct mem_ctl_info * mci)595 static int add_mc_to_global_list(struct mem_ctl_info *mci)
596 {
597 struct list_head *item, *insert_before;
598 struct mem_ctl_info *p;
599
600 insert_before = &mc_devices;
601
602 p = __find_mci_by_dev(mci->pdev);
603 if (unlikely(p != NULL))
604 goto fail0;
605
606 list_for_each(item, &mc_devices) {
607 p = list_entry(item, struct mem_ctl_info, link);
608
609 if (p->mc_idx >= mci->mc_idx) {
610 if (unlikely(p->mc_idx == mci->mc_idx))
611 goto fail1;
612
613 insert_before = item;
614 break;
615 }
616 }
617
618 list_add_tail_rcu(&mci->link, insert_before);
619 return 0;
620
621 fail0:
622 edac_printk(KERN_WARNING, EDAC_MC,
623 "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev),
624 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
625 return 1;
626
627 fail1:
628 edac_printk(KERN_WARNING, EDAC_MC,
629 "bug in low-level driver: attempt to assign\n"
630 " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
631 return 1;
632 }
633
del_mc_from_global_list(struct mem_ctl_info * mci)634 static int del_mc_from_global_list(struct mem_ctl_info *mci)
635 {
636 list_del_rcu(&mci->link);
637
638 /* these are for safe removal of devices from global list while
639 * NMI handlers may be traversing list
640 */
641 synchronize_rcu();
642 INIT_LIST_HEAD(&mci->link);
643
644 return list_empty(&mc_devices);
645 }
646
edac_mc_find(int idx)647 struct mem_ctl_info *edac_mc_find(int idx)
648 {
649 struct mem_ctl_info *mci;
650 struct list_head *item;
651
652 mutex_lock(&mem_ctls_mutex);
653
654 list_for_each(item, &mc_devices) {
655 mci = list_entry(item, struct mem_ctl_info, link);
656 if (mci->mc_idx == idx)
657 goto unlock;
658 }
659
660 mci = NULL;
661 unlock:
662 mutex_unlock(&mem_ctls_mutex);
663 return mci;
664 }
665 EXPORT_SYMBOL(edac_mc_find);
666
edac_get_owner(void)667 const char *edac_get_owner(void)
668 {
669 return edac_mc_owner;
670 }
671 EXPORT_SYMBOL_GPL(edac_get_owner);
672
673 /* FIXME - should a warning be printed if no error detection? correction? */
edac_mc_add_mc_with_groups(struct mem_ctl_info * mci,const struct attribute_group ** groups)674 int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
675 const struct attribute_group **groups)
676 {
677 int ret = -EINVAL;
678 edac_dbg(0, "\n");
679
680 #ifdef CONFIG_EDAC_DEBUG
681 if (edac_debug_level >= 3)
682 edac_mc_dump_mci(mci);
683
684 if (edac_debug_level >= 4) {
685 struct dimm_info *dimm;
686 int i;
687
688 for (i = 0; i < mci->nr_csrows; i++) {
689 struct csrow_info *csrow = mci->csrows[i];
690 u32 nr_pages = 0;
691 int j;
692
693 for (j = 0; j < csrow->nr_channels; j++)
694 nr_pages += csrow->channels[j]->dimm->nr_pages;
695 if (!nr_pages)
696 continue;
697 edac_mc_dump_csrow(csrow);
698 for (j = 0; j < csrow->nr_channels; j++)
699 if (csrow->channels[j]->dimm->nr_pages)
700 edac_mc_dump_channel(csrow->channels[j]);
701 }
702
703 mci_for_each_dimm(mci, dimm)
704 edac_mc_dump_dimm(dimm);
705 }
706 #endif
707 mutex_lock(&mem_ctls_mutex);
708
709 if (edac_mc_owner && edac_mc_owner != mci->mod_name) {
710 ret = -EPERM;
711 goto fail0;
712 }
713
714 if (add_mc_to_global_list(mci))
715 goto fail0;
716
717 /* set load time so that error rate can be tracked */
718 mci->start_time = jiffies;
719
720 mci->bus = edac_get_sysfs_subsys();
721
722 if (edac_create_sysfs_mci_device(mci, groups)) {
723 edac_mc_printk(mci, KERN_WARNING,
724 "failed to create sysfs device\n");
725 goto fail1;
726 }
727
728 if (mci->edac_check) {
729 mci->op_state = OP_RUNNING_POLL;
730
731 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
732 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
733
734 } else {
735 mci->op_state = OP_RUNNING_INTERRUPT;
736 }
737
738 /* Report action taken */
739 edac_mc_printk(mci, KERN_INFO,
740 "Giving out device to module %s controller %s: DEV %s (%s)\n",
741 mci->mod_name, mci->ctl_name, mci->dev_name,
742 edac_op_state_to_string(mci->op_state));
743
744 edac_mc_owner = mci->mod_name;
745
746 mutex_unlock(&mem_ctls_mutex);
747 return 0;
748
749 fail1:
750 del_mc_from_global_list(mci);
751
752 fail0:
753 mutex_unlock(&mem_ctls_mutex);
754 return ret;
755 }
756 EXPORT_SYMBOL_GPL(edac_mc_add_mc_with_groups);
757
edac_mc_del_mc(struct device * dev)758 struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
759 {
760 struct mem_ctl_info *mci;
761
762 edac_dbg(0, "\n");
763
764 mutex_lock(&mem_ctls_mutex);
765
766 /* find the requested mci struct in the global list */
767 mci = __find_mci_by_dev(dev);
768 if (mci == NULL) {
769 mutex_unlock(&mem_ctls_mutex);
770 return NULL;
771 }
772
773 /* mark MCI offline: */
774 mci->op_state = OP_OFFLINE;
775
776 if (del_mc_from_global_list(mci))
777 edac_mc_owner = NULL;
778
779 mutex_unlock(&mem_ctls_mutex);
780
781 if (mci->edac_check)
782 edac_stop_work(&mci->work);
783
784 /* remove from sysfs */
785 edac_remove_sysfs_mci_device(mci);
786
787 edac_printk(KERN_INFO, EDAC_MC,
788 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
789 mci->mod_name, mci->ctl_name, edac_dev_name(mci));
790
791 return mci;
792 }
793 EXPORT_SYMBOL_GPL(edac_mc_del_mc);
794
edac_mc_scrub_block(unsigned long page,unsigned long offset,u32 size)795 static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
796 u32 size)
797 {
798 struct page *pg;
799 void *virt_addr;
800 unsigned long flags = 0;
801
802 edac_dbg(3, "\n");
803
804 /* ECC error page was not in our memory. Ignore it. */
805 if (!pfn_valid(page))
806 return;
807
808 /* Find the actual page structure then map it and fix */
809 pg = pfn_to_page(page);
810
811 if (PageHighMem(pg))
812 local_irq_save(flags);
813
814 virt_addr = kmap_atomic(pg);
815
816 /* Perform architecture specific atomic scrub operation */
817 edac_atomic_scrub(virt_addr + offset, size);
818
819 /* Unmap and complete */
820 kunmap_atomic(virt_addr);
821
822 if (PageHighMem(pg))
823 local_irq_restore(flags);
824 }
825
826 /* FIXME - should return -1 */
edac_mc_find_csrow_by_page(struct mem_ctl_info * mci,unsigned long page)827 int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
828 {
829 struct csrow_info **csrows = mci->csrows;
830 int row, i, j, n;
831
832 edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page);
833 row = -1;
834
835 for (i = 0; i < mci->nr_csrows; i++) {
836 struct csrow_info *csrow = csrows[i];
837 n = 0;
838 for (j = 0; j < csrow->nr_channels; j++) {
839 struct dimm_info *dimm = csrow->channels[j]->dimm;
840 n += dimm->nr_pages;
841 }
842 if (n == 0)
843 continue;
844
845 edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n",
846 mci->mc_idx,
847 csrow->first_page, page, csrow->last_page,
848 csrow->page_mask);
849
850 if ((page >= csrow->first_page) &&
851 (page <= csrow->last_page) &&
852 ((page & csrow->page_mask) ==
853 (csrow->first_page & csrow->page_mask))) {
854 row = i;
855 break;
856 }
857 }
858
859 if (row == -1)
860 edac_mc_printk(mci, KERN_ERR,
861 "could not look up page error address %lx\n",
862 (unsigned long)page);
863
864 return row;
865 }
866 EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
867
868 const char *edac_layer_name[] = {
869 [EDAC_MC_LAYER_BRANCH] = "branch",
870 [EDAC_MC_LAYER_CHANNEL] = "channel",
871 [EDAC_MC_LAYER_SLOT] = "slot",
872 [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
873 [EDAC_MC_LAYER_ALL_MEM] = "memory",
874 };
875 EXPORT_SYMBOL_GPL(edac_layer_name);
876
edac_inc_ce_error(struct edac_raw_error_desc * e)877 static void edac_inc_ce_error(struct edac_raw_error_desc *e)
878 {
879 int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
880 struct mem_ctl_info *mci = error_desc_to_mci(e);
881 struct dimm_info *dimm = edac_get_dimm(mci, pos[0], pos[1], pos[2]);
882
883 mci->ce_mc += e->error_count;
884
885 if (dimm)
886 dimm->ce_count += e->error_count;
887 else
888 mci->ce_noinfo_count += e->error_count;
889 }
890
edac_inc_ue_error(struct edac_raw_error_desc * e)891 static void edac_inc_ue_error(struct edac_raw_error_desc *e)
892 {
893 int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
894 struct mem_ctl_info *mci = error_desc_to_mci(e);
895 struct dimm_info *dimm = edac_get_dimm(mci, pos[0], pos[1], pos[2]);
896
897 mci->ue_mc += e->error_count;
898
899 if (dimm)
900 dimm->ue_count += e->error_count;
901 else
902 mci->ue_noinfo_count += e->error_count;
903 }
904
edac_ce_error(struct edac_raw_error_desc * e)905 static void edac_ce_error(struct edac_raw_error_desc *e)
906 {
907 struct mem_ctl_info *mci = error_desc_to_mci(e);
908 unsigned long remapped_page;
909
910 if (edac_mc_get_log_ce()) {
911 edac_mc_printk(mci, KERN_WARNING,
912 "%d CE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx%s%s)\n",
913 e->error_count, e->msg,
914 *e->msg ? " " : "",
915 e->label, e->location, e->page_frame_number, e->offset_in_page,
916 e->grain, e->syndrome,
917 *e->other_detail ? " - " : "",
918 e->other_detail);
919 }
920
921 edac_inc_ce_error(e);
922
923 if (mci->scrub_mode == SCRUB_SW_SRC) {
924 /*
925 * Some memory controllers (called MCs below) can remap
926 * memory so that it is still available at a different
927 * address when PCI devices map into memory.
928 * MC's that can't do this, lose the memory where PCI
929 * devices are mapped. This mapping is MC-dependent
930 * and so we call back into the MC driver for it to
931 * map the MC page to a physical (CPU) page which can
932 * then be mapped to a virtual page - which can then
933 * be scrubbed.
934 */
935 remapped_page = mci->ctl_page_to_phys ?
936 mci->ctl_page_to_phys(mci, e->page_frame_number) :
937 e->page_frame_number;
938
939 edac_mc_scrub_block(remapped_page, e->offset_in_page, e->grain);
940 }
941 }
942
edac_ue_error(struct edac_raw_error_desc * e)943 static void edac_ue_error(struct edac_raw_error_desc *e)
944 {
945 struct mem_ctl_info *mci = error_desc_to_mci(e);
946
947 if (edac_mc_get_log_ue()) {
948 edac_mc_printk(mci, KERN_WARNING,
949 "%d UE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld%s%s)\n",
950 e->error_count, e->msg,
951 *e->msg ? " " : "",
952 e->label, e->location, e->page_frame_number, e->offset_in_page,
953 e->grain,
954 *e->other_detail ? " - " : "",
955 e->other_detail);
956 }
957
958 edac_inc_ue_error(e);
959
960 if (edac_mc_get_panic_on_ue()) {
961 panic("UE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld%s%s)\n",
962 e->msg,
963 *e->msg ? " " : "",
964 e->label, e->location, e->page_frame_number, e->offset_in_page,
965 e->grain,
966 *e->other_detail ? " - " : "",
967 e->other_detail);
968 }
969 }
970
edac_inc_csrow(struct edac_raw_error_desc * e,int row,int chan)971 static void edac_inc_csrow(struct edac_raw_error_desc *e, int row, int chan)
972 {
973 struct mem_ctl_info *mci = error_desc_to_mci(e);
974 enum hw_event_mc_err_type type = e->type;
975 u16 count = e->error_count;
976
977 if (row < 0)
978 return;
979
980 edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
981
982 if (type == HW_EVENT_ERR_CORRECTED) {
983 mci->csrows[row]->ce_count += count;
984 if (chan >= 0)
985 mci->csrows[row]->channels[chan]->ce_count += count;
986 } else {
987 mci->csrows[row]->ue_count += count;
988 }
989 }
990
edac_raw_mc_handle_error(struct edac_raw_error_desc * e)991 void edac_raw_mc_handle_error(struct edac_raw_error_desc *e)
992 {
993 struct mem_ctl_info *mci = error_desc_to_mci(e);
994 u8 grain_bits;
995
996 /* Sanity-check driver-supplied grain value. */
997 if (WARN_ON_ONCE(!e->grain))
998 e->grain = 1;
999
1000 grain_bits = fls_long(e->grain - 1);
1001
1002 /* Report the error via the trace interface */
1003 if (IS_ENABLED(CONFIG_RAS))
1004 trace_mc_event(e->type, e->msg, e->label, e->error_count,
1005 mci->mc_idx, e->top_layer, e->mid_layer,
1006 e->low_layer,
1007 (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
1008 grain_bits, e->syndrome, e->other_detail);
1009
1010 if (e->type == HW_EVENT_ERR_CORRECTED)
1011 edac_ce_error(e);
1012 else
1013 edac_ue_error(e);
1014 }
1015 EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error);
1016
edac_mc_handle_error(const enum hw_event_mc_err_type type,struct mem_ctl_info * mci,const u16 error_count,const unsigned long page_frame_number,const unsigned long offset_in_page,const unsigned long syndrome,const int top_layer,const int mid_layer,const int low_layer,const char * msg,const char * other_detail)1017 void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1018 struct mem_ctl_info *mci,
1019 const u16 error_count,
1020 const unsigned long page_frame_number,
1021 const unsigned long offset_in_page,
1022 const unsigned long syndrome,
1023 const int top_layer,
1024 const int mid_layer,
1025 const int low_layer,
1026 const char *msg,
1027 const char *other_detail)
1028 {
1029 struct dimm_info *dimm;
1030 char *p;
1031 int row = -1, chan = -1;
1032 int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
1033 int i, n_labels = 0;
1034 struct edac_raw_error_desc *e = &mci->error_desc;
1035 bool any_memory = true;
1036
1037 edac_dbg(3, "MC%d\n", mci->mc_idx);
1038
1039 /* Fills the error report buffer */
1040 memset(e, 0, sizeof (*e));
1041 e->error_count = error_count;
1042 e->type = type;
1043 e->top_layer = top_layer;
1044 e->mid_layer = mid_layer;
1045 e->low_layer = low_layer;
1046 e->page_frame_number = page_frame_number;
1047 e->offset_in_page = offset_in_page;
1048 e->syndrome = syndrome;
1049 /* need valid strings here for both: */
1050 e->msg = msg ?: "";
1051 e->other_detail = other_detail ?: "";
1052
1053 /*
1054 * Check if the event report is consistent and if the memory location is
1055 * known. If it is, the DIMM(s) label info will be filled and the DIMM's
1056 * error counters will be incremented.
1057 */
1058 for (i = 0; i < mci->n_layers; i++) {
1059 if (pos[i] >= (int)mci->layers[i].size) {
1060
1061 edac_mc_printk(mci, KERN_ERR,
1062 "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
1063 edac_layer_name[mci->layers[i].type],
1064 pos[i], mci->layers[i].size);
1065 /*
1066 * Instead of just returning it, let's use what's
1067 * known about the error. The increment routines and
1068 * the DIMM filter logic will do the right thing by
1069 * pointing the likely damaged DIMMs.
1070 */
1071 pos[i] = -1;
1072 }
1073 if (pos[i] >= 0)
1074 any_memory = false;
1075 }
1076
1077 /*
1078 * Get the dimm label/grain that applies to the match criteria.
1079 * As the error algorithm may not be able to point to just one memory
1080 * stick, the logic here will get all possible labels that could
1081 * pottentially be affected by the error.
1082 * On FB-DIMM memory controllers, for uncorrected errors, it is common
1083 * to have only the MC channel and the MC dimm (also called "branch")
1084 * but the channel is not known, as the memory is arranged in pairs,
1085 * where each memory belongs to a separate channel within the same
1086 * branch.
1087 */
1088 p = e->label;
1089 *p = '\0';
1090
1091 mci_for_each_dimm(mci, dimm) {
1092 if (top_layer >= 0 && top_layer != dimm->location[0])
1093 continue;
1094 if (mid_layer >= 0 && mid_layer != dimm->location[1])
1095 continue;
1096 if (low_layer >= 0 && low_layer != dimm->location[2])
1097 continue;
1098
1099 /* get the max grain, over the error match range */
1100 if (dimm->grain > e->grain)
1101 e->grain = dimm->grain;
1102
1103 /*
1104 * If the error is memory-controller wide, there's no need to
1105 * seek for the affected DIMMs because the whole channel/memory
1106 * controller/... may be affected. Also, don't show errors for
1107 * empty DIMM slots.
1108 */
1109 if (!dimm->nr_pages)
1110 continue;
1111
1112 n_labels++;
1113 if (n_labels > EDAC_MAX_LABELS) {
1114 p = e->label;
1115 *p = '\0';
1116 } else {
1117 if (p != e->label) {
1118 strcpy(p, OTHER_LABEL);
1119 p += strlen(OTHER_LABEL);
1120 }
1121 strcpy(p, dimm->label);
1122 p += strlen(p);
1123 }
1124
1125 /*
1126 * get csrow/channel of the DIMM, in order to allow
1127 * incrementing the compat API counters
1128 */
1129 edac_dbg(4, "%s csrows map: (%d,%d)\n",
1130 mci->csbased ? "rank" : "dimm",
1131 dimm->csrow, dimm->cschannel);
1132 if (row == -1)
1133 row = dimm->csrow;
1134 else if (row >= 0 && row != dimm->csrow)
1135 row = -2;
1136
1137 if (chan == -1)
1138 chan = dimm->cschannel;
1139 else if (chan >= 0 && chan != dimm->cschannel)
1140 chan = -2;
1141 }
1142
1143 if (any_memory)
1144 strcpy(e->label, "any memory");
1145 else if (!*e->label)
1146 strcpy(e->label, "unknown memory");
1147
1148 edac_inc_csrow(e, row, chan);
1149
1150 /* Fill the RAM location data */
1151 p = e->location;
1152
1153 for (i = 0; i < mci->n_layers; i++) {
1154 if (pos[i] < 0)
1155 continue;
1156
1157 p += sprintf(p, "%s:%d ",
1158 edac_layer_name[mci->layers[i].type],
1159 pos[i]);
1160 }
1161 if (p > e->location)
1162 *(p - 1) = '\0';
1163
1164 edac_raw_mc_handle_error(e);
1165 }
1166 EXPORT_SYMBOL_GPL(edac_mc_handle_error);
1167