• Home
  • Raw
  • Download

Lines Matching refs:mci

64 	struct mem_ctl_info *mci = dimm->mci;  in edac_dimm_info_location()  local
68 for (i = 0; i < mci->n_layers; i++) { in edac_dimm_info_location()
70 edac_layer_name[mci->layers[i].type], in edac_dimm_info_location()
102 dimm->mci->csbased ? "rank" : "dimm", in edac_mc_dump_dimm()
120 edac_dbg(4, " csrow->mci = %p\n", csrow->mci); in edac_mc_dump_csrow()
123 static void edac_mc_dump_mci(struct mem_ctl_info *mci) in edac_mc_dump_mci() argument
125 edac_dbg(3, "\tmci = %p\n", mci); in edac_mc_dump_mci()
126 edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap); in edac_mc_dump_mci()
127 edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap); in edac_mc_dump_mci()
128 edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap); in edac_mc_dump_mci()
129 edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check); in edac_mc_dump_mci()
131 mci->nr_csrows, mci->csrows); in edac_mc_dump_mci()
133 mci->tot_dimms, mci->dimms); in edac_mc_dump_mci()
134 edac_dbg(3, "\tdev = %p\n", mci->pdev); in edac_mc_dump_mci()
136 mci->mod_name, mci->ctl_name); in edac_mc_dump_mci()
137 edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info); in edac_mc_dump_mci()
228 static void _edac_mc_free(struct mem_ctl_info *mci) in _edac_mc_free() argument
230 put_device(&mci->dev); in _edac_mc_free()
235 struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev); in mci_release() local
239 if (mci->dimms) { in mci_release()
240 for (i = 0; i < mci->tot_dimms; i++) in mci_release()
241 kfree(mci->dimms[i]); in mci_release()
242 kfree(mci->dimms); in mci_release()
245 if (mci->csrows) { in mci_release()
246 for (row = 0; row < mci->nr_csrows; row++) { in mci_release()
247 csr = mci->csrows[row]; in mci_release()
252 for (chn = 0; chn < mci->num_cschannel; chn++) in mci_release()
258 kfree(mci->csrows); in mci_release()
260 kfree(mci); in mci_release()
263 static int edac_mc_alloc_csrows(struct mem_ctl_info *mci) in edac_mc_alloc_csrows() argument
265 unsigned int tot_channels = mci->num_cschannel; in edac_mc_alloc_csrows()
266 unsigned int tot_csrows = mci->nr_csrows; in edac_mc_alloc_csrows()
272 mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL); in edac_mc_alloc_csrows()
273 if (!mci->csrows) in edac_mc_alloc_csrows()
279 csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL); in edac_mc_alloc_csrows()
283 mci->csrows[row] = csr; in edac_mc_alloc_csrows()
285 csr->mci = mci; in edac_mc_alloc_csrows()
308 static int edac_mc_alloc_dimms(struct mem_ctl_info *mci) in edac_mc_alloc_dimms() argument
318 mci->dimms = kcalloc(mci->tot_dimms, sizeof(*mci->dimms), GFP_KERNEL); in edac_mc_alloc_dimms()
319 if (!mci->dimms) in edac_mc_alloc_dimms()
325 for (idx = 0; idx < mci->tot_dimms; idx++) { in edac_mc_alloc_dimms()
330 chan = mci->csrows[row]->channels[chn]; in edac_mc_alloc_dimms()
332 dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL); in edac_mc_alloc_dimms()
335 mci->dimms[idx] = dimm; in edac_mc_alloc_dimms()
336 dimm->mci = mci; in edac_mc_alloc_dimms()
344 n = snprintf(p, len, "mc#%u", mci->mc_idx); in edac_mc_alloc_dimms()
347 for (layer = 0; layer < mci->n_layers; layer++) { in edac_mc_alloc_dimms()
349 edac_layer_name[mci->layers[layer].type], in edac_mc_alloc_dimms()
365 if (mci->layers[0].is_virt_csrow) { in edac_mc_alloc_dimms()
367 if (chn == mci->num_cschannel) { in edac_mc_alloc_dimms()
373 if (row == mci->nr_csrows) { in edac_mc_alloc_dimms()
380 for (layer = mci->n_layers - 1; layer >= 0; layer--) { in edac_mc_alloc_dimms()
382 if (pos[layer] < mci->layers[layer].size) in edac_mc_alloc_dimms()
396 struct mem_ctl_info *mci; in edac_mc_alloc() local
427 mci = edac_align_ptr(&ptr, sizeof(*mci), 1); in edac_mc_alloc()
438 mci = kzalloc(size, GFP_KERNEL); in edac_mc_alloc()
439 if (mci == NULL) in edac_mc_alloc()
442 mci->dev.release = mci_release; in edac_mc_alloc()
443 device_initialize(&mci->dev); in edac_mc_alloc()
448 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer)); in edac_mc_alloc()
449 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL; in edac_mc_alloc()
452 mci->mc_idx = mc_num; in edac_mc_alloc()
453 mci->tot_dimms = tot_dimms; in edac_mc_alloc()
454 mci->pvt_info = pvt; in edac_mc_alloc()
455 mci->n_layers = n_layers; in edac_mc_alloc()
456 mci->layers = layer; in edac_mc_alloc()
457 memcpy(mci->layers, layers, sizeof(*layer) * n_layers); in edac_mc_alloc()
458 mci->nr_csrows = tot_csrows; in edac_mc_alloc()
459 mci->num_cschannel = tot_channels; in edac_mc_alloc()
460 mci->csbased = per_rank; in edac_mc_alloc()
462 if (edac_mc_alloc_csrows(mci)) in edac_mc_alloc()
465 if (edac_mc_alloc_dimms(mci)) in edac_mc_alloc()
468 mci->op_state = OP_ALLOC; in edac_mc_alloc()
470 return mci; in edac_mc_alloc()
473 _edac_mc_free(mci); in edac_mc_alloc()
479 void edac_mc_free(struct mem_ctl_info *mci) in edac_mc_free() argument
483 _edac_mc_free(mci); in edac_mc_free()
504 struct mem_ctl_info *mci; in __find_mci_by_dev() local
510 mci = list_entry(item, struct mem_ctl_info, link); in __find_mci_by_dev()
512 if (mci->pdev == dev) in __find_mci_by_dev()
513 return mci; in __find_mci_by_dev()
545 struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work); in edac_mc_workq_function() local
549 if (mci->op_state != OP_RUNNING_POLL) { in edac_mc_workq_function()
555 mci->edac_check(mci); in edac_mc_workq_function()
560 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec())); in edac_mc_workq_function()
571 struct mem_ctl_info *mci; in edac_mc_reset_delay_period() local
577 mci = list_entry(item, struct mem_ctl_info, link); in edac_mc_reset_delay_period()
579 if (mci->op_state == OP_RUNNING_POLL) in edac_mc_reset_delay_period()
580 edac_mod_work(&mci->work, value); in edac_mc_reset_delay_period()
595 static int add_mc_to_global_list(struct mem_ctl_info *mci) in add_mc_to_global_list() argument
602 p = __find_mci_by_dev(mci->pdev); in add_mc_to_global_list()
609 if (p->mc_idx >= mci->mc_idx) { in add_mc_to_global_list()
610 if (unlikely(p->mc_idx == mci->mc_idx)) in add_mc_to_global_list()
618 list_add_tail_rcu(&mci->link, insert_before); in add_mc_to_global_list()
624 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx); in add_mc_to_global_list()
634 static int del_mc_from_global_list(struct mem_ctl_info *mci) in del_mc_from_global_list() argument
636 list_del_rcu(&mci->link); in del_mc_from_global_list()
642 INIT_LIST_HEAD(&mci->link); in del_mc_from_global_list()
649 struct mem_ctl_info *mci; in edac_mc_find() local
655 mci = list_entry(item, struct mem_ctl_info, link); in edac_mc_find()
656 if (mci->mc_idx == idx) in edac_mc_find()
660 mci = NULL; in edac_mc_find()
663 return mci; in edac_mc_find()
674 int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci, in edac_mc_add_mc_with_groups() argument
682 edac_mc_dump_mci(mci); in edac_mc_add_mc_with_groups()
688 for (i = 0; i < mci->nr_csrows; i++) { in edac_mc_add_mc_with_groups()
689 struct csrow_info *csrow = mci->csrows[i]; in edac_mc_add_mc_with_groups()
703 mci_for_each_dimm(mci, dimm) in edac_mc_add_mc_with_groups()
709 if (edac_mc_owner && edac_mc_owner != mci->mod_name) { in edac_mc_add_mc_with_groups()
714 if (add_mc_to_global_list(mci)) in edac_mc_add_mc_with_groups()
718 mci->start_time = jiffies; in edac_mc_add_mc_with_groups()
720 mci->bus = edac_get_sysfs_subsys(); in edac_mc_add_mc_with_groups()
722 if (edac_create_sysfs_mci_device(mci, groups)) { in edac_mc_add_mc_with_groups()
723 edac_mc_printk(mci, KERN_WARNING, in edac_mc_add_mc_with_groups()
728 if (mci->edac_check) { in edac_mc_add_mc_with_groups()
729 mci->op_state = OP_RUNNING_POLL; in edac_mc_add_mc_with_groups()
731 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); in edac_mc_add_mc_with_groups()
732 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec())); in edac_mc_add_mc_with_groups()
735 mci->op_state = OP_RUNNING_INTERRUPT; in edac_mc_add_mc_with_groups()
739 edac_mc_printk(mci, KERN_INFO, in edac_mc_add_mc_with_groups()
741 mci->mod_name, mci->ctl_name, mci->dev_name, in edac_mc_add_mc_with_groups()
742 edac_op_state_to_string(mci->op_state)); in edac_mc_add_mc_with_groups()
744 edac_mc_owner = mci->mod_name; in edac_mc_add_mc_with_groups()
750 del_mc_from_global_list(mci); in edac_mc_add_mc_with_groups()
760 struct mem_ctl_info *mci; in edac_mc_del_mc() local
767 mci = __find_mci_by_dev(dev); in edac_mc_del_mc()
768 if (mci == NULL) { in edac_mc_del_mc()
774 mci->op_state = OP_OFFLINE; in edac_mc_del_mc()
776 if (del_mc_from_global_list(mci)) in edac_mc_del_mc()
781 if (mci->edac_check) in edac_mc_del_mc()
782 edac_stop_work(&mci->work); in edac_mc_del_mc()
785 edac_remove_sysfs_mci_device(mci); in edac_mc_del_mc()
788 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx, in edac_mc_del_mc()
789 mci->mod_name, mci->ctl_name, edac_dev_name(mci)); in edac_mc_del_mc()
791 return mci; in edac_mc_del_mc()
827 int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) in edac_mc_find_csrow_by_page() argument
829 struct csrow_info **csrows = mci->csrows; in edac_mc_find_csrow_by_page()
832 edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page); in edac_mc_find_csrow_by_page()
835 for (i = 0; i < mci->nr_csrows; i++) { in edac_mc_find_csrow_by_page()
846 mci->mc_idx, in edac_mc_find_csrow_by_page()
860 edac_mc_printk(mci, KERN_ERR, in edac_mc_find_csrow_by_page()
880 struct mem_ctl_info *mci = error_desc_to_mci(e); in edac_inc_ce_error() local
881 struct dimm_info *dimm = edac_get_dimm(mci, pos[0], pos[1], pos[2]); in edac_inc_ce_error()
883 mci->ce_mc += e->error_count; in edac_inc_ce_error()
888 mci->ce_noinfo_count += e->error_count; in edac_inc_ce_error()
894 struct mem_ctl_info *mci = error_desc_to_mci(e); in edac_inc_ue_error() local
895 struct dimm_info *dimm = edac_get_dimm(mci, pos[0], pos[1], pos[2]); in edac_inc_ue_error()
897 mci->ue_mc += e->error_count; in edac_inc_ue_error()
902 mci->ue_noinfo_count += e->error_count; in edac_inc_ue_error()
907 struct mem_ctl_info *mci = error_desc_to_mci(e); in edac_ce_error() local
911 edac_mc_printk(mci, KERN_WARNING, in edac_ce_error()
923 if (mci->scrub_mode == SCRUB_SW_SRC) { in edac_ce_error()
935 remapped_page = mci->ctl_page_to_phys ? in edac_ce_error()
936 mci->ctl_page_to_phys(mci, e->page_frame_number) : in edac_ce_error()
945 struct mem_ctl_info *mci = error_desc_to_mci(e); in edac_ue_error() local
948 edac_mc_printk(mci, KERN_WARNING, in edac_ue_error()
973 struct mem_ctl_info *mci = error_desc_to_mci(e); in edac_inc_csrow() local
983 mci->csrows[row]->ce_count += count; in edac_inc_csrow()
985 mci->csrows[row]->channels[chan]->ce_count += count; in edac_inc_csrow()
987 mci->csrows[row]->ue_count += count; in edac_inc_csrow()
993 struct mem_ctl_info *mci = error_desc_to_mci(e); in edac_raw_mc_handle_error() local
1005 mci->mc_idx, e->top_layer, e->mid_layer, in edac_raw_mc_handle_error()
1018 struct mem_ctl_info *mci, in edac_mc_handle_error() argument
1034 struct edac_raw_error_desc *e = &mci->error_desc; in edac_mc_handle_error()
1037 edac_dbg(3, "MC%d\n", mci->mc_idx); in edac_mc_handle_error()
1058 for (i = 0; i < mci->n_layers; i++) { in edac_mc_handle_error()
1059 if (pos[i] >= (int)mci->layers[i].size) { in edac_mc_handle_error()
1061 edac_mc_printk(mci, KERN_ERR, in edac_mc_handle_error()
1063 edac_layer_name[mci->layers[i].type], in edac_mc_handle_error()
1064 pos[i], mci->layers[i].size); in edac_mc_handle_error()
1091 mci_for_each_dimm(mci, dimm) { in edac_mc_handle_error()
1130 mci->csbased ? "rank" : "dimm", in edac_mc_handle_error()
1153 for (i = 0; i < mci->n_layers; i++) { in edac_mc_handle_error()
1158 edac_layer_name[mci->layers[i].type], in edac_mc_handle_error()