Lines Matching refs:mci
58 struct mem_ctl_info *mci = dimm->mci; in edac_dimm_info_location() local
62 for (i = 0; i < mci->n_layers; i++) { in edac_dimm_info_location()
64 edac_layer_name[mci->layers[i].type], in edac_dimm_info_location()
93 dimm->mci->csbased ? "rank" : "dimm", in edac_mc_dump_dimm()
111 edac_dbg(4, " csrow->mci = %p\n", csrow->mci); in edac_mc_dump_csrow()
114 static void edac_mc_dump_mci(struct mem_ctl_info *mci) in edac_mc_dump_mci() argument
116 edac_dbg(3, "\tmci = %p\n", mci); in edac_mc_dump_mci()
117 edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap); in edac_mc_dump_mci()
118 edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap); in edac_mc_dump_mci()
119 edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap); in edac_mc_dump_mci()
120 edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check); in edac_mc_dump_mci()
122 mci->nr_csrows, mci->csrows); in edac_mc_dump_mci()
124 mci->tot_dimms, mci->dimms); in edac_mc_dump_mci()
125 edac_dbg(3, "\tdev = %p\n", mci->pdev); in edac_mc_dump_mci()
127 mci->mod_name, mci->ctl_name); in edac_mc_dump_mci()
128 edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info); in edac_mc_dump_mci()
212 static void _edac_mc_free(struct mem_ctl_info *mci) in _edac_mc_free() argument
216 const unsigned int tot_dimms = mci->tot_dimms; in _edac_mc_free()
217 const unsigned int tot_channels = mci->num_cschannel; in _edac_mc_free()
218 const unsigned int tot_csrows = mci->nr_csrows; in _edac_mc_free()
220 if (mci->dimms) { in _edac_mc_free()
222 kfree(mci->dimms[i]); in _edac_mc_free()
223 kfree(mci->dimms); in _edac_mc_free()
225 if (mci->csrows) { in _edac_mc_free()
227 csr = mci->csrows[row]; in _edac_mc_free()
237 kfree(mci->csrows); in _edac_mc_free()
239 kfree(mci); in _edac_mc_free()
271 struct mem_ctl_info *mci; in edac_mc_alloc() local
305 mci = edac_align_ptr(&ptr, sizeof(*mci), 1); in edac_mc_alloc()
325 mci = kzalloc(size, GFP_KERNEL); in edac_mc_alloc()
326 if (mci == NULL) in edac_mc_alloc()
332 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer)); in edac_mc_alloc()
334 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i])); in edac_mc_alloc()
335 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i])); in edac_mc_alloc()
337 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL; in edac_mc_alloc()
340 mci->mc_idx = mc_num; in edac_mc_alloc()
341 mci->tot_dimms = tot_dimms; in edac_mc_alloc()
342 mci->pvt_info = pvt; in edac_mc_alloc()
343 mci->n_layers = n_layers; in edac_mc_alloc()
344 mci->layers = layer; in edac_mc_alloc()
345 memcpy(mci->layers, layers, sizeof(*layer) * n_layers); in edac_mc_alloc()
346 mci->nr_csrows = tot_csrows; in edac_mc_alloc()
347 mci->num_cschannel = tot_channels; in edac_mc_alloc()
348 mci->csbased = per_rank; in edac_mc_alloc()
353 mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL); in edac_mc_alloc()
354 if (!mci->csrows) in edac_mc_alloc()
357 csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL); in edac_mc_alloc()
360 mci->csrows[row] = csr; in edac_mc_alloc()
362 csr->mci = mci; in edac_mc_alloc()
382 mci->dimms = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL); in edac_mc_alloc()
383 if (!mci->dimms) in edac_mc_alloc()
390 chan = mci->csrows[row]->channels[chn]; in edac_mc_alloc()
393 …edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access… in edac_mc_alloc()
397 dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL); in edac_mc_alloc()
400 mci->dimms[off] = dimm; in edac_mc_alloc()
401 dimm->mci = mci; in edac_mc_alloc()
452 mci->op_state = OP_ALLOC; in edac_mc_alloc()
454 return mci; in edac_mc_alloc()
457 _edac_mc_free(mci); in edac_mc_alloc()
468 void edac_mc_free(struct mem_ctl_info *mci) in edac_mc_free() argument
475 if (!device_is_registered(&mci->dev)) { in edac_mc_free()
476 _edac_mc_free(mci); in edac_mc_free()
481 edac_unregister_sysfs(mci); in edac_mc_free()
495 struct mem_ctl_info *mci; in find_mci_by_dev() local
501 mci = list_entry(item, struct mem_ctl_info, link); in find_mci_by_dev()
503 if (mci->pdev == dev) in find_mci_by_dev()
504 return mci; in find_mci_by_dev()
534 struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work); in edac_mc_workq_function() local
538 if (mci->op_state != OP_RUNNING_POLL) { in edac_mc_workq_function()
544 mci->edac_check(mci); in edac_mc_workq_function()
549 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec())); in edac_mc_workq_function()
560 struct mem_ctl_info *mci; in edac_mc_reset_delay_period() local
566 mci = list_entry(item, struct mem_ctl_info, link); in edac_mc_reset_delay_period()
568 if (mci->op_state == OP_RUNNING_POLL) in edac_mc_reset_delay_period()
569 edac_mod_work(&mci->work, value); in edac_mc_reset_delay_period()
584 static int add_mc_to_global_list(struct mem_ctl_info *mci) in add_mc_to_global_list() argument
591 p = find_mci_by_dev(mci->pdev); in add_mc_to_global_list()
598 if (p->mc_idx >= mci->mc_idx) { in add_mc_to_global_list()
599 if (unlikely(p->mc_idx == mci->mc_idx)) in add_mc_to_global_list()
607 list_add_tail_rcu(&mci->link, insert_before); in add_mc_to_global_list()
614 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx); in add_mc_to_global_list()
624 static int del_mc_from_global_list(struct mem_ctl_info *mci) in del_mc_from_global_list() argument
627 list_del_rcu(&mci->link); in del_mc_from_global_list()
633 INIT_LIST_HEAD(&mci->link); in del_mc_from_global_list()
649 struct mem_ctl_info *mci; in edac_mc_find() local
652 mci = list_entry(item, struct mem_ctl_info, link); in edac_mc_find()
654 if (mci->mc_idx >= idx) { in edac_mc_find()
655 if (mci->mc_idx == idx) in edac_mc_find()
656 return mci; in edac_mc_find()
678 int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci, in edac_mc_add_mc_with_groups() argument
684 if (mci->mc_idx >= EDAC_MAX_MCS) { in edac_mc_add_mc_with_groups()
685 pr_warn_once("Too many memory controllers: %d\n", mci->mc_idx); in edac_mc_add_mc_with_groups()
691 edac_mc_dump_mci(mci); in edac_mc_add_mc_with_groups()
696 for (i = 0; i < mci->nr_csrows; i++) { in edac_mc_add_mc_with_groups()
697 struct csrow_info *csrow = mci->csrows[i]; in edac_mc_add_mc_with_groups()
710 for (i = 0; i < mci->tot_dimms; i++) in edac_mc_add_mc_with_groups()
711 if (mci->dimms[i]->nr_pages) in edac_mc_add_mc_with_groups()
712 edac_mc_dump_dimm(mci->dimms[i], i); in edac_mc_add_mc_with_groups()
717 if (edac_mc_owner && edac_mc_owner != mci->mod_name) { in edac_mc_add_mc_with_groups()
722 if (add_mc_to_global_list(mci)) in edac_mc_add_mc_with_groups()
726 mci->start_time = jiffies; in edac_mc_add_mc_with_groups()
728 mci->bus = &mc_bus[mci->mc_idx]; in edac_mc_add_mc_with_groups()
730 if (edac_create_sysfs_mci_device(mci, groups)) { in edac_mc_add_mc_with_groups()
731 edac_mc_printk(mci, KERN_WARNING, in edac_mc_add_mc_with_groups()
736 if (mci->edac_check) { in edac_mc_add_mc_with_groups()
737 mci->op_state = OP_RUNNING_POLL; in edac_mc_add_mc_with_groups()
739 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); in edac_mc_add_mc_with_groups()
740 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec())); in edac_mc_add_mc_with_groups()
743 mci->op_state = OP_RUNNING_INTERRUPT; in edac_mc_add_mc_with_groups()
747 edac_mc_printk(mci, KERN_INFO, in edac_mc_add_mc_with_groups()
749 mci->mod_name, mci->ctl_name, mci->dev_name, in edac_mc_add_mc_with_groups()
750 edac_op_state_to_string(mci->op_state)); in edac_mc_add_mc_with_groups()
752 edac_mc_owner = mci->mod_name; in edac_mc_add_mc_with_groups()
758 del_mc_from_global_list(mci); in edac_mc_add_mc_with_groups()
775 struct mem_ctl_info *mci; in edac_mc_del_mc() local
782 mci = find_mci_by_dev(dev); in edac_mc_del_mc()
783 if (mci == NULL) { in edac_mc_del_mc()
789 mci->op_state = OP_OFFLINE; in edac_mc_del_mc()
791 if (!del_mc_from_global_list(mci)) in edac_mc_del_mc()
796 if (mci->edac_check) in edac_mc_del_mc()
797 edac_stop_work(&mci->work); in edac_mc_del_mc()
800 edac_remove_sysfs_mci_device(mci); in edac_mc_del_mc()
803 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx, in edac_mc_del_mc()
804 mci->mod_name, mci->ctl_name, edac_dev_name(mci)); in edac_mc_del_mc()
806 return mci; in edac_mc_del_mc()
842 int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) in edac_mc_find_csrow_by_page() argument
844 struct csrow_info **csrows = mci->csrows; in edac_mc_find_csrow_by_page()
847 edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page); in edac_mc_find_csrow_by_page()
850 for (i = 0; i < mci->nr_csrows; i++) { in edac_mc_find_csrow_by_page()
861 mci->mc_idx, in edac_mc_find_csrow_by_page()
875 edac_mc_printk(mci, KERN_ERR, in edac_mc_find_csrow_by_page()
892 static void edac_inc_ce_error(struct mem_ctl_info *mci, in edac_inc_ce_error() argument
899 mci->ce_mc += count; in edac_inc_ce_error()
902 mci->ce_noinfo_count += count; in edac_inc_ce_error()
906 for (i = 0; i < mci->n_layers; i++) { in edac_inc_ce_error()
910 mci->ce_per_layer[i][index] += count; in edac_inc_ce_error()
912 if (i < mci->n_layers - 1) in edac_inc_ce_error()
913 index *= mci->layers[i + 1].size; in edac_inc_ce_error()
917 static void edac_inc_ue_error(struct mem_ctl_info *mci, in edac_inc_ue_error() argument
924 mci->ue_mc += count; in edac_inc_ue_error()
927 mci->ue_noinfo_count += count; in edac_inc_ue_error()
931 for (i = 0; i < mci->n_layers; i++) { in edac_inc_ue_error()
935 mci->ue_per_layer[i][index] += count; in edac_inc_ue_error()
937 if (i < mci->n_layers - 1) in edac_inc_ue_error()
938 index *= mci->layers[i + 1].size; in edac_inc_ue_error()
942 static void edac_ce_error(struct mem_ctl_info *mci, in edac_ce_error() argument
963 edac_mc_printk(mci, KERN_WARNING, in edac_ce_error()
968 edac_mc_printk(mci, KERN_WARNING, in edac_ce_error()
973 edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count); in edac_ce_error()
975 if (mci->scrub_mode == SCRUB_SW_SRC) { in edac_ce_error()
987 remapped_page = mci->ctl_page_to_phys ? in edac_ce_error()
988 mci->ctl_page_to_phys(mci, page_frame_number) : in edac_ce_error()
996 static void edac_ue_error(struct mem_ctl_info *mci, in edac_ue_error() argument
1013 edac_mc_printk(mci, KERN_WARNING, in edac_ue_error()
1018 edac_mc_printk(mci, KERN_WARNING, in edac_ue_error()
1033 edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count); in edac_ue_error()
1049 struct mem_ctl_info *mci, in edac_raw_mc_handle_error() argument
1061 edac_ce_error(mci, e->error_count, pos, e->msg, e->location, e->label, in edac_raw_mc_handle_error()
1069 edac_ue_error(mci, e->error_count, pos, e->msg, e->location, e->label, in edac_raw_mc_handle_error()
1096 struct mem_ctl_info *mci, in edac_mc_handle_error() argument
1112 struct edac_raw_error_desc *e = &mci->error_desc; in edac_mc_handle_error()
1114 edac_dbg(3, "MC%d\n", mci->mc_idx); in edac_mc_handle_error()
1134 for (i = 0; i < mci->n_layers; i++) { in edac_mc_handle_error()
1135 if (pos[i] >= (int)mci->layers[i].size) { in edac_mc_handle_error()
1137 edac_mc_printk(mci, KERN_ERR, in edac_mc_handle_error()
1139 edac_layer_name[mci->layers[i].type], in edac_mc_handle_error()
1140 pos[i], mci->layers[i].size); in edac_mc_handle_error()
1167 for (i = 0; i < mci->tot_dimms; i++) { in edac_mc_handle_error()
1168 struct dimm_info *dimm = mci->dimms[i]; in edac_mc_handle_error()
1206 mci->csbased ? "rank" : "dimm", in edac_mc_handle_error()
1228 mci->csrows[row]->ce_count += error_count; in edac_mc_handle_error()
1230 mci->csrows[row]->channels[chan]->ce_count += error_count; in edac_mc_handle_error()
1234 mci->csrows[row]->ue_count += error_count; in edac_mc_handle_error()
1240 for (i = 0; i < mci->n_layers; i++) { in edac_mc_handle_error()
1245 edac_layer_name[mci->layers[i].type], in edac_mc_handle_error()
1254 mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer, in edac_mc_handle_error()
1258 edac_raw_mc_handle_error(type, mci, e); in edac_mc_handle_error()