• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/edac/

Lines Matching refs:pvt

158 	struct amd64_pvt *pvt = mci->pvt_info;
176 return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, bandwidth,
182 struct amd64_pvt *pvt = mci->pvt_info;
186 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval);
205 static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
207 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
214 static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
217 return pvt->dcsb0[csrow];
219 return pvt->dcsb1[csrow];
227 static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
230 return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
232 return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
244 static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
247 *base = pvt->dram_base[node_id];
248 *limit = pvt->dram_limit[node_id];
255 static int amd64_base_limit_match(struct amd64_pvt *pvt,
260 amd64_get_base_and_limit(pvt, node_id, &base, &limit);
282 struct amd64_pvt *pvt;
290 pvt = mci->pvt_info;
297 intlv_en = pvt->dram_IntlvEn[0];
301 if (amd64_base_limit_match(pvt, sys_addr, node_id))
319 if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
327 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
349 static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
351 return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
352 pvt->dcs_shift;
358 static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
364 dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
366 other_bits = pvt->dcsm_mask;
367 other_bits = ~(other_bits << pvt->dcs_shift);
373 mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
384 struct amd64_pvt *pvt;
388 pvt = mci->pvt_info;
395 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
398 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
401 base = base_from_dct_base(pvt, csrow);
402 mask = ~mask_from_dct_mask(pvt, csrow);
407 pvt->mc_node_id);
414 (unsigned long)input_addr, pvt->mc_node_id);
427 struct amd64_pvt *pvt = mci->pvt_info;
429 return pvt->dram_base[pvt->mc_node_id];
451 struct amd64_pvt *pvt = mci->pvt_info;
455 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
457 pvt->ext_model, pvt->mc_node_id);
463 (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
468 if ((pvt->dhar & DHAR_VALID) == 0) {
470 pvt->mc_node_id);
492 base = dhar_base(pvt->dhar);
498 *hole_offset = f10_dhar_offset(pvt->dhar);
500 *hole_offset = k8_dhar_offset(pvt->dhar);
503 pvt->mc_node_id, (unsigned long)*hole_base,
598 struct amd64_pvt *pvt;
602 pvt = mci->pvt_info;
608 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
643 struct amd64_pvt *pvt;
656 pvt = mci->pvt_info;
657 node_id = pvt->mc_node_id;
660 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
672 intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
688 struct amd64_pvt *pvt = mci->pvt_info;
707 amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
724 pvt->mc_node_id, (unsigned long)dram_addr,
748 struct amd64_pvt *pvt;
751 pvt = mci->pvt_info;
752 BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
754 base = base_from_dct_base(pvt, csrow);
755 mask = mask_from_dct_mask(pvt, csrow);
758 *input_addr_max = base | mask | pvt->dcs_mask_notused;
797 static void amd64_cpu_display_info(struct amd64_pvt *pvt)
805 (pvt->ext_model >= K8_REV_F) ?
816 static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
821 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
825 if (pvt->dclr0 & BIT(bit))
832 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
856 static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
860 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
863 (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
866 (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
867 (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
869 amd64_dump_dramcfg_low(pvt->dclr0, 0);
871 debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
875 pvt->dhar,
876 dhar_base(pvt->dhar),
877 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar)
878 : f10_dhar_offset(pvt->dhar));
881 (pvt->dhar & DHAR_VALID) ? "yes" : "no");
885 amd64_debug_display_dimm_sizes(0, pvt);
890 ((pvt->syn_type == 8) ? "x8" : "x4"));
893 if (!dct_ganging_enabled(pvt))
894 amd64_dump_dramcfg_low(pvt->dclr1, 1);
900 ganged = dct_ganging_enabled(pvt);
902 amd64_debug_display_dimm_sizes(0, pvt);
905 amd64_debug_display_dimm_sizes(1, pvt);
909 static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
911 amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM0, &pvt->dbam0);
914 amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM1, &pvt->dbam1);
946 static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
949 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
950 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
951 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
952 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
953 pvt->dcs_shift = REV_E_DCS_SHIFT;
954 pvt->cs_count = 8;
955 pvt->num_dcsm = 8;
957 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
958 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
959 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
960 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
963 pvt->cs_count = 4;
964 pvt->num_dcsm = 2;
966 pvt->cs_count = 8;
967 pvt->num_dcsm = 4;
975 static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
979 amd64_set_dct_base_and_mask(pvt);
981 for (cs = 0; cs < pvt->cs_count; cs++) {
983 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsb0[cs]))
985 cs, pvt->dcsb0[cs], reg);
988 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
990 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
991 &pvt->dcsb1[cs]))
993 cs, pvt->dcsb1[cs], reg);
995 pvt->dcsb1[cs] = 0;
999 for (cs = 0; cs < pvt->num_dcsm; cs++) {
1001 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsm0[cs]))
1003 cs, pvt->dcsm0[cs], reg);
1006 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
1008 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
1009 &pvt->dcsm1[cs]))
1011 cs, pvt->dcsm1[cs], reg);
1013 pvt->dcsm1[cs] = 0;
1018 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt)
1022 if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
1023 if (pvt->dchr0 & DDR3_MODE)
1024 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1026 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1028 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1045 static int k8_early_channel_count(struct amd64_pvt *pvt)
1049 err = amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
1055 flag = pvt->dclr0 & F10_WIDTH_128;
1058 flag = pvt->dclr0 & REVE_WIDTH_128;
1062 pvt->dclr1 = 0;
1081 static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1086 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_BASE_LOW + off, &low);
1089 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
1090 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
1091 pvt->dram_rw_en[dram] = (low & 0x3);
1093 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_LIMIT_LOW + off, &low);
1099 pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
1100 pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
1101 pvt->dram_DstNode[dram] = (low & 0x7);
1166 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1170 if (pvt->ext_model >= K8_REV_F)
1172 else if (pvt->ext_model >= K8_REV_D)
1188 static int f10_early_channel_count(struct amd64_pvt *pvt)
1195 if (pvt->dclr0 & F10_WIDTH_128) {
1216 if (amd64_read_pci_cfg(pvt->dram_f2_ctl, dbams[i], &dbam))
1239 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1243 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1252 static void amd64_setup(struct amd64_pvt *pvt)
1256 amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
1258 pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG);
1260 pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
1264 static void amd64_teardown(struct amd64_pvt *pvt)
1268 amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
1271 if (pvt->flags.cf8_extcfg)
1273 pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
1289 static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1297 amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_base);
1300 amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_base);
1303 pvt->dram_rw_en[dram] = (low_base & 0x3);
1305 if (pvt->dram_rw_en[dram] == 0)
1308 pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
1310 pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
1317 amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_limit);
1320 amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_limit);
1322 pvt->dram_DstNode[dram] = (low_limit & 0x7);
1323 pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
1329 pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
1334 static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1337 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW,
1338 &pvt->dram_ctl_select_low)) {
1341 pvt->dram_ctl_select_low,
1342 dct_sel_baseaddr(pvt));
1345 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
1346 (dct_dram_enabled(pvt) ? "yes" : "no"));
1348 if (!dct_ganging_enabled(pvt))
1350 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1354 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1355 (dct_memory_cleared(pvt) ? "yes" : "no"));
1359 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1360 dct_sel_interleave_addr(pvt));
1363 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH,
1364 &pvt->dram_ctl_select_high);
1371 static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1374 u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
1376 if (dct_ganging_enabled(pvt))
1380 else if (dct_interleave_enabled(pvt)) {
1384 if (dct_sel_interleave_addr(pvt) == 0)
1386 else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
1389 if (dct_sel_interleave_addr(pvt) & 1)
1401 } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
1456 u32 cs, struct amd64_pvt *pvt)
1463 swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
1464 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
1468 swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
1469 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
1487 struct amd64_pvt *pvt;
1496 pvt = mci->pvt_info;
1500 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
1502 cs_base = amd64_get_dct_base(pvt, cs, csrow);
1517 cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
1530 cs_found = f10_process_possible_spare(csrow, cs, pvt);
1540 static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
1548 dram_base = pvt->dram_base[dram_range];
1549 intlv_en = pvt->dram_IntlvEn[dram_range];
1551 node_id = pvt->dram_DstNode[dram_range];
1552 intlv_sel = pvt->dram_IntlvSel[dram_range];
1555 dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
1561 hole_off = (pvt->dhar & 0x0000FF80);
1562 hole_valid = (pvt->dhar & 0x1);
1563 dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
1572 dct_sel_base = dct_sel_baseaddr(pvt);
1578 if (dct_high_range_enabled(pvt) &&
1579 !dct_ganging_enabled(pvt) &&
1583 channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
1597 if (dct_interleave_enabled(pvt) &&
1598 !dct_high_range_enabled(pvt) &&
1599 !dct_ganging_enabled(pvt)) {
1600 if (dct_sel_interleave_addr(pvt) != 1)
1621 static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1629 if (!pvt->dram_rw_en[dram_range])
1632 dram_base = pvt->dram_base[dram_range];
1633 dram_limit = pvt->dram_limit[dram_range];
1637 cs_found = f10_match_to_this_node(pvt, dram_range,
1658 struct amd64_pvt *pvt = mci->pvt_info;
1663 csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1679 if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL))
1698 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1705 if (pvt->dclr0 & F10_WIDTH_128)
1709 if (pvt->ext_model < K8_REV_F)
1716 ctrl, ctrl ? pvt->dbam1 : pvt->dbam0);
1718 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1719 dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
1728 size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1732 size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1954 struct amd64_pvt *pvt = mci->pvt_info;
1957 if (pvt->syn_type == 8)
1960 pvt->syn_type);
1961 else if (pvt->syn_type == 4)
1964 pvt->syn_type);
1967 __func__, pvt->syn_type);
1971 return map_err_sym_to_channel(err_sym, pvt->syn_type);
1981 struct amd64_pvt *pvt = mci->pvt_info;
1992 sys_addr = pvt->ops->get_error_address(mci, info);
1997 pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
2004 struct amd64_pvt *pvt = mci->pvt_info;
2019 sys_addr = pvt->ops->get_error_address(mci, info);
2082 * 1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer
2088 * struct pvt->addr_f1_ctl
2089 * struct pvt->misc_f3_ctl
2098 static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, int mc_idx)
2103 pvt->addr_f1_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
2105 pvt->dram_f2_ctl);
2107 if (!pvt->addr_f1_ctl) {
2115 pvt->misc_f3_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
2117 pvt->dram_f2_ctl);
2119 if (!pvt->misc_f3_ctl) {
2120 pci_dev_put(pvt->addr_f1_ctl);
2121 pvt->addr_f1_ctl = NULL;
2130 pci_name(pvt->addr_f1_ctl));
2132 pci_name(pvt->dram_f2_ctl));
2134 pci_name(pvt->misc_f3_ctl));
2139 static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt)
2141 pci_dev_put(pvt->addr_f1_ctl);
2142 pci_dev_put(pvt->misc_f3_ctl);
2149 static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2159 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2160 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
2165 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2166 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2170 amd64_cpu_display_info(pvt);
2172 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap);
2174 if (pvt->ops->read_dram_ctl_register)
2175 pvt->ops->read_dram_ctl_register(pvt);
2182 pvt->ops->read_dram_base_limit(pvt, dram);
2189 if (pvt->dram_rw_en[dram] != 0) {
2193 pvt->dram_base[dram],
2194 pvt->dram_limit[dram]);
2198 pvt->dram_IntlvEn[dram] ?
2200 (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
2201 (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
2202 pvt->dram_IntlvSel[dram],
2203 pvt->dram_DstNode[dram]);
2207 amd64_read_dct_base_mask(pvt);
2209 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar);
2210 amd64_read_dbam_reg(pvt);
2212 amd64_read_pci_cfg(pvt->misc_f3_ctl,
2213 F10_ONLINE_SPARE, &pvt->online_spare);
2215 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
2216 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0);
2219 if (!dct_ganging_enabled(pvt)) {
2220 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1);
2221 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1);
2223 amd64_read_pci_cfg(pvt->misc_f3_ctl, EXT_NB_MCA_CFG, &tmp);
2230 pvt->syn_type = 8;
2232 pvt->syn_type = 4;
2234 amd64_dump_misc_regs(pvt);
2241 * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
2271 static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2282 cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
2284 nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
2290 nr_pages <<= (pvt->channel_count - 1);
2294 nr_pages, pvt->channel_count);
2306 struct amd64_pvt *pvt;
2310 pvt = mci->pvt_info;
2312 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg);
2314 debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg,
2315 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2316 (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
2319 for (i = 0; i < pvt->cs_count; i++) {
2322 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
2324 pvt->mc_node_id);
2329 i, pvt->mc_node_id);
2332 csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
2338 csrow->page_mask = ~mask_from_dct_mask(pvt, i);
2341 csrow->mtype = amd64_determine_memory_type(pvt);
2343 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2357 if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
2359 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
2413 static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
2424 get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
2434 pvt->flags.nb_mce_enable = 1;
2441 if (!pvt->flags.nb_mce_enable)
2454 struct amd64_pvt *pvt = mci->pvt_info;
2457 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
2460 pvt->old_nbctl = value & mask;
2461 pvt->nbctl_mcgctl_saved = 1;
2464 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2466 if (amd64_toggle_ecc_err_reporting(pvt, ON))
2470 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2481 pvt->flags.nb_ecc_prev = 0;
2485 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
2487 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2498 pvt->flags.nb_ecc_prev = 1;
2505 pvt->ctl_error_info.nbcfg = value;
2508 static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2512 if (!pvt->nbctl_mcgctl_saved)
2515 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
2517 value |= pvt->old_nbctl;
2519 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2522 if (!pvt->flags.nb_ecc_prev) {
2523 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2525 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
2529 if (amd64_toggle_ecc_err_reporting(pvt, OFF))
2545 static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2551 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2557 K8_NBCFG, pci_name(pvt->misc_f3_ctl));
2561 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
2565 MSR_IA32_MCG_CTL, pvt->mc_node_id);
2602 struct amd64_pvt *pvt = mci->pvt_info;
2607 if (pvt->nbcap & K8_NBCAP_SECDED)
2610 if (pvt->nbcap & K8_NBCAP_CHIPKILL)
2613 mci->edac_cap = amd64_determine_edac_cap(pvt);
2616 mci->ctl_name = get_amd_family_name(pvt->mc_type_index);
2617 mci->dev_name = pci_name(pvt->dram_f2_ctl);
2640 struct amd64_pvt *pvt = NULL;
2644 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2645 if (!pvt)
2648 pvt->mc_node_id = get_node_id(dram_f2_ctl);
2650 pvt->dram_f2_ctl = dram_f2_ctl;
2651 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2652 pvt->mc_type_index = mc_type_index;
2653 pvt->ops = family_ops(mc_type_index);
2660 err = amd64_reserve_mc_sibling_devices(pvt, mc_type_index);
2665 err = amd64_check_ecc_enabled(pvt);
2675 amd64_setup(pvt);
2681 pvt_lookup[pvt->mc_node_id] = pvt;
2686 amd64_free_mc_sibling_devices(pvt);
2689 kfree(pvt);
2699 static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
2701 int node_id = pvt->mc_node_id;
2705 amd64_read_mc_registers(pvt);
2712 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2713 if (pvt->channel_count < 0)
2717 mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id);
2721 mci->pvt_info = pvt;
2723 mci->dev = &pvt->dram_f2_ctl->dev;
2755 amd64_restore_ecc_error_reporting(pvt);
2758 amd64_teardown(pvt);
2760 amd64_free_mc_sibling_devices(pvt);
2762 kfree(pvt_lookup[pvt->mc_node_id]);
2792 struct amd64_pvt *pvt;
2799 pvt = mci->pvt_info;
2801 amd64_restore_ecc_error_reporting(pvt);
2804 amd64_teardown(pvt);
2806 amd64_free_mc_sibling_devices(pvt);
2814 mci_lookup[pvt->mc_node_id] = NULL;
2816 kfree(pvt);
2867 struct amd64_pvt *pvt;
2875 pvt = mci->pvt_info;
2877 edac_pci_create_generic_ctl(&pvt->dram_f2_ctl->dev,