Lines Matching refs:mci

64 	struct mem_ctl_info *mci = dimm->mci;
68 for (i = 0; i < mci->n_layers; i++) {
70 edac_layer_name[mci->layers[i].type],
100 dimm->mci->csbased ? "rank" : "dimm",
117 edac_dbg(4, " csrow->mci = %p\n", csrow->mci);
120 static void edac_mc_dump_mci(struct mem_ctl_info *mci)
122 edac_dbg(3, "\tmci = %p\n", mci);
123 edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap);
124 edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
125 edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap);
126 edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check);
128 mci->nr_csrows, mci->csrows);
130 mci->tot_dimms, mci->dimms);
131 edac_dbg(3, "\tdev = %p\n", mci->pdev);
133 mci->mod_name, mci->ctl_name);
134 edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info);
173 static void _edac_mc_free(struct mem_ctl_info *mci)
175 put_device(&mci->dev);
180 struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
184 if (mci->dimms) {
185 for (i = 0; i < mci->tot_dimms; i++)
186 kfree(mci->dimms[i]);
187 kfree(mci->dimms);
190 if (mci->csrows) {
191 for (row = 0; row < mci->nr_csrows; row++) {
192 csr = mci->csrows[row];
197 for (chn = 0; chn < mci->num_cschannel; chn++)
203 kfree(mci->csrows);
205 kfree(mci->pvt_info);
206 kfree(mci->layers);
207 kfree(mci);
210 static int edac_mc_alloc_csrows(struct mem_ctl_info *mci)
212 unsigned int tot_channels = mci->num_cschannel;
213 unsigned int tot_csrows = mci->nr_csrows;
219 mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
220 if (!mci->csrows)
226 csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
230 mci->csrows[row] = csr;
232 csr->mci = mci;
255 static int edac_mc_alloc_dimms(struct mem_ctl_info *mci)
265 mci->dimms = kcalloc(mci->tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
266 if (!mci->dimms)
272 for (idx = 0; idx < mci->tot_dimms; idx++) {
277 chan = mci->csrows[row]->channels[chn];
279 dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
282 mci->dimms[idx] = dimm;
283 dimm->mci = mci;
291 n = scnprintf(p, len, "mc#%u", mci->mc_idx);
294 for (layer = 0; layer < mci->n_layers; layer++) {
296 edac_layer_name[mci->layers[layer].type],
309 if (mci->layers[0].is_virt_csrow) {
311 if (chn == mci->num_cschannel) {
317 if (row == mci->nr_csrows) {
324 for (layer = mci->n_layers - 1; layer >= 0; layer--) {
326 if (pos[layer] < mci->layers[layer].size)
340 struct mem_ctl_info *mci;
365 mci = kzalloc(sizeof(struct mem_ctl_info), GFP_KERNEL);
366 if (!mci)
369 mci->layers = kcalloc(n_layers, sizeof(struct edac_mc_layer), GFP_KERNEL);
370 if (!mci->layers)
373 mci->pvt_info = kzalloc(sz_pvt, GFP_KERNEL);
374 if (!mci->pvt_info)
377 mci->dev.release = mci_release;
378 device_initialize(&mci->dev);
381 mci->mc_idx = mc_num;
382 mci->tot_dimms = tot_dimms;
383 mci->n_layers = n_layers;
384 memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
385 mci->nr_csrows = tot_csrows;
386 mci->num_cschannel = tot_channels;
387 mci->csbased = per_rank;
389 if (edac_mc_alloc_csrows(mci))
392 if (edac_mc_alloc_dimms(mci))
395 mci->op_state = OP_ALLOC;
397 return mci;
400 _edac_mc_free(mci);
406 void edac_mc_free(struct mem_ctl_info *mci)
410 _edac_mc_free(mci);
431 struct mem_ctl_info *mci;
437 mci = list_entry(item, struct mem_ctl_info, link);
439 if (mci->pdev == dev)
440 return mci;
472 struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
476 if (mci->op_state != OP_RUNNING_POLL) {
482 mci->edac_check(mci);
487 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
498 struct mem_ctl_info *mci;
504 mci = list_entry(item, struct mem_ctl_info, link);
506 if (mci->op_state == OP_RUNNING_POLL)
507 edac_mod_work(&mci->work, value);
516 * assign a unique value to mci->mc_idx.
522 static int add_mc_to_global_list(struct mem_ctl_info *mci)
529 p = __find_mci_by_dev(mci->pdev);
536 if (p->mc_idx >= mci->mc_idx) {
537 if (unlikely(p->mc_idx == mci->mc_idx))
545 list_add_tail_rcu(&mci->link, insert_before);
551 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
561 static int del_mc_from_global_list(struct mem_ctl_info *mci)
563 list_del_rcu(&mci->link);
569 INIT_LIST_HEAD(&mci->link);
576 struct mem_ctl_info *mci;
582 mci = list_entry(item, struct mem_ctl_info, link);
583 if (mci->mc_idx == idx)
587 mci = NULL;
590 return mci;
601 int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
609 edac_mc_dump_mci(mci);
615 for (i = 0; i < mci->nr_csrows; i++) {
616 struct csrow_info *csrow = mci->csrows[i];
630 mci_for_each_dimm(mci, dimm)
636 if (edac_mc_owner && edac_mc_owner != mci->mod_name) {
641 if (add_mc_to_global_list(mci))
645 mci->start_time = jiffies;
647 mci->bus = edac_get_sysfs_subsys();
649 if (edac_create_sysfs_mci_device(mci, groups)) {
650 edac_mc_printk(mci, KERN_WARNING,
655 if (mci->edac_check) {
656 mci->op_state = OP_RUNNING_POLL;
658 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
659 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
662 mci->op_state = OP_RUNNING_INTERRUPT;
666 edac_mc_printk(mci, KERN_INFO,
668 mci->mod_name, mci->ctl_name, mci->dev_name,
669 edac_op_state_to_string(mci->op_state));
671 edac_mc_owner = mci->mod_name;
677 del_mc_from_global_list(mci);
687 struct mem_ctl_info *mci;
693 /* find the requested mci struct in the global list */
694 mci = __find_mci_by_dev(dev);
695 if (mci == NULL) {
701 mci->op_state = OP_OFFLINE;
703 if (del_mc_from_global_list(mci))
708 if (mci->edac_check)
709 edac_stop_work(&mci->work);
712 edac_remove_sysfs_mci_device(mci);
715 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
716 mci->mod_name, mci->ctl_name, edac_dev_name(mci));
718 return mci;
754 int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
756 struct csrow_info **csrows = mci->csrows;
759 edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page);
762 for (i = 0; i < mci->nr_csrows; i++) {
773 mci->mc_idx,
787 edac_mc_printk(mci, KERN_ERR,
807 struct mem_ctl_info *mci = error_desc_to_mci(e);
808 struct dimm_info *dimm = edac_get_dimm(mci, pos[0], pos[1], pos[2]);
810 mci->ce_mc += e->error_count;
815 mci->ce_noinfo_count += e->error_count;
821 struct mem_ctl_info *mci = error_desc_to_mci(e);
822 struct dimm_info *dimm = edac_get_dimm(mci, pos[0], pos[1], pos[2]);
824 mci->ue_mc += e->error_count;
829 mci->ue_noinfo_count += e->error_count;
834 struct mem_ctl_info *mci = error_desc_to_mci(e);
838 edac_mc_printk(mci, KERN_WARNING,
850 if (mci->scrub_mode == SCRUB_SW_SRC) {
862 remapped_page = mci->ctl_page_to_phys ?
863 mci->ctl_page_to_phys(mci, e->page_frame_number) :
872 struct mem_ctl_info *mci = error_desc_to_mci(e);
875 edac_mc_printk(mci, KERN_WARNING,
900 struct mem_ctl_info *mci = error_desc_to_mci(e);
910 mci->csrows[row]->ce_count += count;
912 mci->csrows[row]->channels[chan]->ce_count += count;
914 mci->csrows[row]->ue_count += count;
920 struct mem_ctl_info *mci = error_desc_to_mci(e);
932 mci->mc_idx, e->top_layer, e->mid_layer,
945 struct mem_ctl_info *mci,
961 struct edac_raw_error_desc *e = &mci->error_desc;
965 edac_dbg(3, "MC%d\n", mci->mc_idx);
986 for (i = 0; i < mci->n_layers; i++) {
987 if (pos[i] >= (int)mci->layers[i].size) {
989 edac_mc_printk(mci, KERN_ERR,
991 edac_layer_name[mci->layers[i].type],
992 pos[i], mci->layers[i].size);
1021 mci_for_each_dimm(mci, dimm) {
1056 mci->csbased ? "rank" : "dimm",
1081 for (i = 0; i < mci->n_layers; i++) {
1086 edac_layer_name[mci->layers[i].type], pos[i]);