1/* 2 * Intel 5100 Memory Controllers kernel module 3 * 4 * This file may be distributed under the terms of the 5 * GNU General Public License. 6 * 7 * This module is based on the following document: 8 * 9 * Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet 10 * http://download.intel.com/design/chipsets/datashts/318378.pdf 11 * 12 * The intel 5100 has two independent channels. EDAC core currently 13 * can not reflect this configuration so instead the chip-select 14 * rows for each respective channel are layed out one after another, 15 * the first half belonging to channel 0, the second half belonging 16 * to channel 1. 17 */ 18#include <linux/module.h> 19#include <linux/init.h> 20#include <linux/pci.h> 21#include <linux/pci_ids.h> 22#include <linux/edac.h> 23#include <linux/delay.h> 24#include <linux/mmzone.h> 25 26#include "edac_core.h" 27 28/* register addresses */ 29 30/* device 16, func 1 */ 31#define I5100_MC 0x40 /* Memory Control Register */ 32#define I5100_MC_SCRBEN_MASK (1 << 7) 33#define I5100_MC_SCRBDONE_MASK (1 << 4) 34#define I5100_MS 0x44 /* Memory Status Register */ 35#define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */ 36#define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */ 37#define I5100_TOLM 0x6c /* Top of Low Memory */ 38#define I5100_MIR0 0x80 /* Memory Interleave Range 0 */ 39#define I5100_MIR1 0x84 /* Memory Interleave Range 1 */ 40#define I5100_AMIR_0 0x8c /* Adjusted Memory Interleave Range 0 */ 41#define I5100_AMIR_1 0x90 /* Adjusted Memory Interleave Range 1 */ 42#define I5100_FERR_NF_MEM 0xa0 /* MC First Non Fatal Errors */ 43#define I5100_FERR_NF_MEM_M16ERR_MASK (1 << 16) 44#define I5100_FERR_NF_MEM_M15ERR_MASK (1 << 15) 45#define I5100_FERR_NF_MEM_M14ERR_MASK (1 << 14) 46#define I5100_FERR_NF_MEM_M12ERR_MASK (1 << 12) 47#define I5100_FERR_NF_MEM_M11ERR_MASK (1 << 11) 48#define I5100_FERR_NF_MEM_M10ERR_MASK (1 << 10) 49#define I5100_FERR_NF_MEM_M6ERR_MASK (1 << 6) 50#define I5100_FERR_NF_MEM_M5ERR_MASK (1 << 5) 51#define I5100_FERR_NF_MEM_M4ERR_MASK (1 << 4) 52#define I5100_FERR_NF_MEM_M1ERR_MASK 1 53#define I5100_FERR_NF_MEM_ANY_MASK \ 54 (I5100_FERR_NF_MEM_M16ERR_MASK | \ 55 I5100_FERR_NF_MEM_M15ERR_MASK | \ 56 I5100_FERR_NF_MEM_M14ERR_MASK | \ 57 I5100_FERR_NF_MEM_M12ERR_MASK | \ 58 I5100_FERR_NF_MEM_M11ERR_MASK | \ 59 I5100_FERR_NF_MEM_M10ERR_MASK | \ 60 I5100_FERR_NF_MEM_M6ERR_MASK | \ 61 I5100_FERR_NF_MEM_M5ERR_MASK | \ 62 I5100_FERR_NF_MEM_M4ERR_MASK | \ 63 I5100_FERR_NF_MEM_M1ERR_MASK) 64#define I5100_NERR_NF_MEM 0xa4 /* MC Next Non-Fatal Errors */ 65#define I5100_EMASK_MEM 0xa8 /* MC Error Mask Register */ 66 67/* device 21 and 22, func 0 */ 68#define I5100_MTR_0 0x154 /* Memory Technology Registers 0-3 */ 69#define I5100_DMIR 0x15c /* DIMM Interleave Range */ 70#define I5100_VALIDLOG 0x18c /* Valid Log Markers */ 71#define I5100_NRECMEMA 0x190 /* Non-Recoverable Memory Error Log Reg A */ 72#define I5100_NRECMEMB 0x194 /* Non-Recoverable Memory Error Log Reg B */ 73#define I5100_REDMEMA 0x198 /* Recoverable Memory Data Error Log Reg A */ 74#define I5100_REDMEMB 0x19c /* Recoverable Memory Data Error Log Reg B */ 75#define I5100_RECMEMA 0x1a0 /* Recoverable Memory Error Log Reg A */ 76#define I5100_RECMEMB 0x1a4 /* Recoverable Memory Error Log Reg B */ 77#define I5100_MTR_4 0x1b0 /* Memory Technology Registers 4,5 */ 78 79/* bit field accessors */ 80 81static inline u32 i5100_mc_scrben(u32 mc) 82{ 83 return mc >> 7 & 1; 84} 85 86static inline u32 i5100_mc_errdeten(u32 mc) 87{ 88 return mc >> 5 & 1; 89} 90 91static inline u32 i5100_mc_scrbdone(u32 mc) 92{ 93 return mc >> 4 & 1; 94} 95 96static inline u16 i5100_spddata_rdo(u16 a) 97{ 98 return a >> 15 & 1; 99} 100 101static inline u16 i5100_spddata_sbe(u16 a) 102{ 103 return a >> 13 & 1; 104} 105 106static inline u16 i5100_spddata_busy(u16 a) 107{ 108 return a >> 12 & 1; 109} 110 111static inline u16 i5100_spddata_data(u16 a) 112{ 113 return a & ((1 << 8) - 1); 114} 115 116static inline u32 i5100_spdcmd_create(u32 dti, u32 ckovrd, u32 sa, u32 ba, 117 u32 data, u32 cmd) 118{ 119 return ((dti & ((1 << 4) - 1)) << 28) | 120 ((ckovrd & 1) << 27) | 121 ((sa & ((1 << 3) - 1)) << 24) | 122 ((ba & ((1 << 8) - 1)) << 16) | 123 ((data & ((1 << 8) - 1)) << 8) | 124 (cmd & 1); 125} 126 127static inline u16 i5100_tolm_tolm(u16 a) 128{ 129 return a >> 12 & ((1 << 4) - 1); 130} 131 132static inline u16 i5100_mir_limit(u16 a) 133{ 134 return a >> 4 & ((1 << 12) - 1); 135} 136 137static inline u16 i5100_mir_way1(u16 a) 138{ 139 return a >> 1 & 1; 140} 141 142static inline u16 i5100_mir_way0(u16 a) 143{ 144 return a & 1; 145} 146 147static inline u32 i5100_ferr_nf_mem_chan_indx(u32 a) 148{ 149 return a >> 28 & 1; 150} 151 152static inline u32 i5100_ferr_nf_mem_any(u32 a) 153{ 154 return a & I5100_FERR_NF_MEM_ANY_MASK; 155} 156 157static inline u32 i5100_nerr_nf_mem_any(u32 a) 158{ 159 return i5100_ferr_nf_mem_any(a); 160} 161 162static inline u32 i5100_dmir_limit(u32 a) 163{ 164 return a >> 16 & ((1 << 11) - 1); 165} 166 167static inline u32 i5100_dmir_rank(u32 a, u32 i) 168{ 169 return a >> (4 * i) & ((1 << 2) - 1); 170} 171 172static inline u16 i5100_mtr_present(u16 a) 173{ 174 return a >> 10 & 1; 175} 176 177static inline u16 i5100_mtr_ethrottle(u16 a) 178{ 179 return a >> 9 & 1; 180} 181 182static inline u16 i5100_mtr_width(u16 a) 183{ 184 return a >> 8 & 1; 185} 186 187static inline u16 i5100_mtr_numbank(u16 a) 188{ 189 return a >> 6 & 1; 190} 191 192static inline u16 i5100_mtr_numrow(u16 a) 193{ 194 return a >> 2 & ((1 << 2) - 1); 195} 196 197static inline u16 i5100_mtr_numcol(u16 a) 198{ 199 return a & ((1 << 2) - 1); 200} 201 202 203static inline u32 i5100_validlog_redmemvalid(u32 a) 204{ 205 return a >> 2 & 1; 206} 207 208static inline u32 i5100_validlog_recmemvalid(u32 a) 209{ 210 return a >> 1 & 1; 211} 212 213static inline u32 i5100_validlog_nrecmemvalid(u32 a) 214{ 215 return a & 1; 216} 217 218static inline u32 i5100_nrecmema_merr(u32 a) 219{ 220 return a >> 15 & ((1 << 5) - 1); 221} 222 223static inline u32 i5100_nrecmema_bank(u32 a) 224{ 225 return a >> 12 & ((1 << 3) - 1); 226} 227 228static inline u32 i5100_nrecmema_rank(u32 a) 229{ 230 return a >> 8 & ((1 << 3) - 1); 231} 232 233static inline u32 i5100_nrecmema_dm_buf_id(u32 a) 234{ 235 return a & ((1 << 8) - 1); 236} 237 238static inline u32 i5100_nrecmemb_cas(u32 a) 239{ 240 return a >> 16 & ((1 << 13) - 1); 241} 242 243static inline u32 i5100_nrecmemb_ras(u32 a) 244{ 245 return a & ((1 << 16) - 1); 246} 247 248static inline u32 i5100_redmemb_ecc_locator(u32 a) 249{ 250 return a & ((1 << 18) - 1); 251} 252 253static inline u32 i5100_recmema_merr(u32 a) 254{ 255 return i5100_nrecmema_merr(a); 256} 257 258static inline u32 i5100_recmema_bank(u32 a) 259{ 260 return i5100_nrecmema_bank(a); 261} 262 263static inline u32 i5100_recmema_rank(u32 a) 264{ 265 return i5100_nrecmema_rank(a); 266} 267 268static inline u32 i5100_recmema_dm_buf_id(u32 a) 269{ 270 return i5100_nrecmema_dm_buf_id(a); 271} 272 273static inline u32 i5100_recmemb_cas(u32 a) 274{ 275 return i5100_nrecmemb_cas(a); 276} 277 278static inline u32 i5100_recmemb_ras(u32 a) 279{ 280 return i5100_nrecmemb_ras(a); 281} 282 283/* some generic limits */ 284#define I5100_MAX_RANKS_PER_CHAN 6 285#define I5100_CHANNELS 2 286#define I5100_MAX_RANKS_PER_DIMM 4 287#define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */ 288#define I5100_MAX_DIMM_SLOTS_PER_CHAN 4 289#define I5100_MAX_RANK_INTERLEAVE 4 290#define I5100_MAX_DMIRS 5 291#define I5100_SCRUB_REFRESH_RATE (5 * 60 * HZ) 292 293struct i5100_priv { 294 /* ranks on each dimm -- 0 maps to not present -- obtained via SPD */ 295 int dimm_numrank[I5100_CHANNELS][I5100_MAX_DIMM_SLOTS_PER_CHAN]; 296 297 /* 298 * mainboard chip select map -- maps i5100 chip selects to 299 * DIMM slot chip selects. In the case of only 4 ranks per 300 * channel, the mapping is fairly obvious but not unique. 301 * we map -1 -> NC and assume both channels use the same 302 * map... 303 * 304 */ 305 int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CHAN][I5100_MAX_RANKS_PER_DIMM]; 306 307 /* memory interleave range */ 308 struct { 309 u64 limit; 310 unsigned way[2]; 311 } mir[I5100_CHANNELS]; 312 313 /* adjusted memory interleave range register */ 314 unsigned amir[I5100_CHANNELS]; 315 316 /* dimm interleave range */ 317 struct { 318 unsigned rank[I5100_MAX_RANK_INTERLEAVE]; 319 u64 limit; 320 } dmir[I5100_CHANNELS][I5100_MAX_DMIRS]; 321 322 /* memory technology registers... */ 323 struct { 324 unsigned present; /* 0 or 1 */ 325 unsigned ethrottle; /* 0 or 1 */ 326 unsigned width; /* 4 or 8 bits */ 327 unsigned numbank; /* 2 or 3 lines */ 328 unsigned numrow; /* 13 .. 16 lines */ 329 unsigned numcol; /* 11 .. 12 lines */ 330 } mtr[I5100_CHANNELS][I5100_MAX_RANKS_PER_CHAN]; 331 332 u64 tolm; /* top of low memory in bytes */ 333 unsigned ranksperchan; /* number of ranks per channel */ 334 335 struct pci_dev *mc; /* device 16 func 1 */ 336 struct pci_dev *ch0mm; /* device 21 func 0 */ 337 struct pci_dev *ch1mm; /* device 22 func 0 */ 338 339 struct delayed_work i5100_scrubbing; 340 int scrub_enable; 341}; 342 343/* map a rank/chan to a slot number on the mainboard */ 344static int i5100_rank_to_slot(const struct mem_ctl_info *mci, 345 int chan, int rank) 346{ 347 const struct i5100_priv *priv = mci->pvt_info; 348 int i; 349 350 for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) { 351 int j; 352 const int numrank = priv->dimm_numrank[chan][i]; 353 354 for (j = 0; j < numrank; j++) 355 if (priv->dimm_csmap[i][j] == rank) 356 return i * 2 + chan; 357 } 358 359 return -1; 360} 361 362static const char *i5100_err_msg(unsigned err) 363{ 364 static const char *merrs[] = { 365 "unknown", /* 0 */ 366 "uncorrectable data ECC on replay", /* 1 */ 367 "unknown", /* 2 */ 368 "unknown", /* 3 */ 369 "aliased uncorrectable demand data ECC", /* 4 */ 370 "aliased uncorrectable spare-copy data ECC", /* 5 */ 371 "aliased uncorrectable patrol data ECC", /* 6 */ 372 "unknown", /* 7 */ 373 "unknown", /* 8 */ 374 "unknown", /* 9 */ 375 "non-aliased uncorrectable demand data ECC", /* 10 */ 376 "non-aliased uncorrectable spare-copy data ECC", /* 11 */ 377 "non-aliased uncorrectable patrol data ECC", /* 12 */ 378 "unknown", /* 13 */ 379 "correctable demand data ECC", /* 14 */ 380 "correctable spare-copy data ECC", /* 15 */ 381 "correctable patrol data ECC", /* 16 */ 382 "unknown", /* 17 */ 383 "SPD protocol error", /* 18 */ 384 "unknown", /* 19 */ 385 "spare copy initiated", /* 20 */ 386 "spare copy completed", /* 21 */ 387 }; 388 unsigned i; 389 390 for (i = 0; i < ARRAY_SIZE(merrs); i++) 391 if (1 << i & err) 392 return merrs[i]; 393 394 return "none"; 395} 396 397/* convert csrow index into a rank (per channel -- 0..5) */ 398static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow) 399{ 400 const struct i5100_priv *priv = mci->pvt_info; 401 402 return csrow % priv->ranksperchan; 403} 404 405/* convert csrow index into a channel (0..1) */ 406static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow) 407{ 408 const struct i5100_priv *priv = mci->pvt_info; 409 410 return csrow / priv->ranksperchan; 411} 412 413static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci, 414 int chan, int rank) 415{ 416 const struct i5100_priv *priv = mci->pvt_info; 417 418 return chan * priv->ranksperchan + rank; 419} 420 421static void i5100_handle_ce(struct mem_ctl_info *mci, 422 int chan, 423 unsigned bank, 424 unsigned rank, 425 unsigned long syndrome, 426 unsigned cas, 427 unsigned ras, 428 const char *msg) 429{ 430 const int csrow = i5100_rank_to_csrow(mci, chan, rank); 431 432 printk(KERN_ERR 433 "CE chan %d, bank %u, rank %u, syndrome 0x%lx, " 434 "cas %u, ras %u, csrow %u, label \"%s\": %s\n", 435 chan, bank, rank, syndrome, cas, ras, 436 csrow, mci->csrows[csrow].channels[0].label, msg); 437 438 mci->ce_count++; 439 mci->csrows[csrow].ce_count++; 440 mci->csrows[csrow].channels[0].ce_count++; 441} 442 443static void i5100_handle_ue(struct mem_ctl_info *mci, 444 int chan, 445 unsigned bank, 446 unsigned rank, 447 unsigned long syndrome, 448 unsigned cas, 449 unsigned ras, 450 const char *msg) 451{ 452 const int csrow = i5100_rank_to_csrow(mci, chan, rank); 453 454 printk(KERN_ERR 455 "UE chan %d, bank %u, rank %u, syndrome 0x%lx, " 456 "cas %u, ras %u, csrow %u, label \"%s\": %s\n", 457 chan, bank, rank, syndrome, cas, ras, 458 csrow, mci->csrows[csrow].channels[0].label, msg); 459 460 mci->ue_count++; 461 mci->csrows[csrow].ue_count++; 462} 463 464static void i5100_read_log(struct mem_ctl_info *mci, int chan, 465 u32 ferr, u32 nerr) 466{ 467 struct i5100_priv *priv = mci->pvt_info; 468 struct pci_dev *pdev = (chan) ? priv->ch1mm : priv->ch0mm; 469 u32 dw; 470 u32 dw2; 471 unsigned syndrome = 0; 472 unsigned ecc_loc = 0; 473 unsigned merr; 474 unsigned bank; 475 unsigned rank; 476 unsigned cas; 477 unsigned ras; 478 479 pci_read_config_dword(pdev, I5100_VALIDLOG, &dw); 480 481 if (i5100_validlog_redmemvalid(dw)) { 482 pci_read_config_dword(pdev, I5100_REDMEMA, &dw2); 483 syndrome = dw2; 484 pci_read_config_dword(pdev, I5100_REDMEMB, &dw2); 485 ecc_loc = i5100_redmemb_ecc_locator(dw2); 486 } 487 488 if (i5100_validlog_recmemvalid(dw)) { 489 const char *msg; 490 491 pci_read_config_dword(pdev, I5100_RECMEMA, &dw2); 492 merr = i5100_recmema_merr(dw2); 493 bank = i5100_recmema_bank(dw2); 494 rank = i5100_recmema_rank(dw2); 495 496 pci_read_config_dword(pdev, I5100_RECMEMB, &dw2); 497 cas = i5100_recmemb_cas(dw2); 498 ras = i5100_recmemb_ras(dw2); 499 500 if (!merr) 501 msg = i5100_err_msg(ferr); 502 else 503 msg = i5100_err_msg(nerr); 504 505 i5100_handle_ce(mci, chan, bank, rank, syndrome, cas, ras, msg); 506 } 507 508 if (i5100_validlog_nrecmemvalid(dw)) { 509 const char *msg; 510 511 pci_read_config_dword(pdev, I5100_NRECMEMA, &dw2); 512 merr = i5100_nrecmema_merr(dw2); 513 bank = i5100_nrecmema_bank(dw2); 514 rank = i5100_nrecmema_rank(dw2); 515 516 pci_read_config_dword(pdev, I5100_NRECMEMB, &dw2); 517 cas = i5100_nrecmemb_cas(dw2); 518 ras = i5100_nrecmemb_ras(dw2); 519 520 if (!merr) 521 msg = i5100_err_msg(ferr); 522 else 523 msg = i5100_err_msg(nerr); 524 525 i5100_handle_ue(mci, chan, bank, rank, syndrome, cas, ras, msg); 526 } 527 528 pci_write_config_dword(pdev, I5100_VALIDLOG, dw); 529} 530 531static void i5100_check_error(struct mem_ctl_info *mci) 532{ 533 struct i5100_priv *priv = mci->pvt_info; 534 u32 dw; 535 536 537 pci_read_config_dword(priv->mc, I5100_FERR_NF_MEM, &dw); 538 if (i5100_ferr_nf_mem_any(dw)) { 539 u32 dw2; 540 541 pci_read_config_dword(priv->mc, I5100_NERR_NF_MEM, &dw2); 542 if (dw2) 543 pci_write_config_dword(priv->mc, I5100_NERR_NF_MEM, 544 dw2); 545 pci_write_config_dword(priv->mc, I5100_FERR_NF_MEM, dw); 546 547 i5100_read_log(mci, i5100_ferr_nf_mem_chan_indx(dw), 548 i5100_ferr_nf_mem_any(dw), 549 i5100_nerr_nf_mem_any(dw2)); 550 } 551} 552 553/* The i5100 chipset will scrub the entire memory once, then 554 * set a done bit. Continuous scrubbing is achieved by enqueing 555 * delayed work to a workqueue, checking every few minutes if 556 * the scrubbing has completed and if so reinitiating it. 557 */ 558 559static void i5100_refresh_scrubbing(struct work_struct *work) 560{ 561 struct delayed_work *i5100_scrubbing = container_of(work, 562 struct delayed_work, 563 work); 564 struct i5100_priv *priv = container_of(i5100_scrubbing, 565 struct i5100_priv, 566 i5100_scrubbing); 567 u32 dw; 568 569 pci_read_config_dword(priv->mc, I5100_MC, &dw); 570 571 if (priv->scrub_enable) { 572 573 pci_read_config_dword(priv->mc, I5100_MC, &dw); 574 575 if (i5100_mc_scrbdone(dw)) { 576 dw |= I5100_MC_SCRBEN_MASK; 577 pci_write_config_dword(priv->mc, I5100_MC, dw); 578 pci_read_config_dword(priv->mc, I5100_MC, &dw); 579 } 580 581 schedule_delayed_work(&(priv->i5100_scrubbing), 582 I5100_SCRUB_REFRESH_RATE); 583 } 584} 585/* 586 * The bandwidth is based on experimentation, feel free to refine it. 587 */ 588static int i5100_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth) 589{ 590 struct i5100_priv *priv = mci->pvt_info; 591 u32 dw; 592 593 pci_read_config_dword(priv->mc, I5100_MC, &dw); 594 if (bandwidth) { 595 priv->scrub_enable = 1; 596 dw |= I5100_MC_SCRBEN_MASK; 597 schedule_delayed_work(&(priv->i5100_scrubbing), 598 I5100_SCRUB_REFRESH_RATE); 599 } else { 600 priv->scrub_enable = 0; 601 dw &= ~I5100_MC_SCRBEN_MASK; 602 cancel_delayed_work(&(priv->i5100_scrubbing)); 603 } 604 pci_write_config_dword(priv->mc, I5100_MC, dw); 605 606 pci_read_config_dword(priv->mc, I5100_MC, &dw); 607 608 bandwidth = 5900000 * i5100_mc_scrben(dw); 609 610 return 0; 611} 612 613static int i5100_get_scrub_rate(struct mem_ctl_info *mci, 614 u32 *bandwidth) 615{ 616 struct i5100_priv *priv = mci->pvt_info; 617 u32 dw; 618 619 pci_read_config_dword(priv->mc, I5100_MC, &dw); 620 621 *bandwidth = 5900000 * i5100_mc_scrben(dw); 622 623 return 0; 624} 625 626static struct pci_dev *pci_get_device_func(unsigned vendor, 627 unsigned device, 628 unsigned func) 629{ 630 struct pci_dev *ret = NULL; 631 632 while (1) { 633 ret = pci_get_device(vendor, device, ret); 634 635 if (!ret) 636 break; 637 638 if (PCI_FUNC(ret->devfn) == func) 639 break; 640 } 641 642 return ret; 643} 644 645static unsigned long __devinit i5100_npages(struct mem_ctl_info *mci, 646 int csrow) 647{ 648 struct i5100_priv *priv = mci->pvt_info; 649 const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow); 650 const unsigned chan = i5100_csrow_to_chan(mci, csrow); 651 unsigned addr_lines; 652 653 /* dimm present? */ 654 if (!priv->mtr[chan][chan_rank].present) 655 return 0ULL; 656 657 addr_lines = 658 I5100_DIMM_ADDR_LINES + 659 priv->mtr[chan][chan_rank].numcol + 660 priv->mtr[chan][chan_rank].numrow + 661 priv->mtr[chan][chan_rank].numbank; 662 663 return (unsigned long) 664 ((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE); 665} 666 667static void __devinit i5100_init_mtr(struct mem_ctl_info *mci) 668{ 669 struct i5100_priv *priv = mci->pvt_info; 670 struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm }; 671 int i; 672 673 for (i = 0; i < I5100_CHANNELS; i++) { 674 int j; 675 struct pci_dev *pdev = mms[i]; 676 677 for (j = 0; j < I5100_MAX_RANKS_PER_CHAN; j++) { 678 const unsigned addr = 679 (j < 4) ? I5100_MTR_0 + j * 2 : 680 I5100_MTR_4 + (j - 4) * 2; 681 u16 w; 682 683 pci_read_config_word(pdev, addr, &w); 684 685 priv->mtr[i][j].present = i5100_mtr_present(w); 686 priv->mtr[i][j].ethrottle = i5100_mtr_ethrottle(w); 687 priv->mtr[i][j].width = 4 + 4 * i5100_mtr_width(w); 688 priv->mtr[i][j].numbank = 2 + i5100_mtr_numbank(w); 689 priv->mtr[i][j].numrow = 13 + i5100_mtr_numrow(w); 690 priv->mtr[i][j].numcol = 10 + i5100_mtr_numcol(w); 691 } 692 } 693} 694 695static int i5100_read_spd_byte(const struct mem_ctl_info *mci, 696 u8 ch, u8 slot, u8 addr, u8 *byte) 697{ 698 struct i5100_priv *priv = mci->pvt_info; 699 u16 w; 700 unsigned long et; 701 702 pci_read_config_word(priv->mc, I5100_SPDDATA, &w); 703 if (i5100_spddata_busy(w)) 704 return -1; 705 706 pci_write_config_dword(priv->mc, I5100_SPDCMD, 707 i5100_spdcmd_create(0xa, 1, ch * 4 + slot, addr, 708 0, 0)); 709 710 /* wait up to 100ms */ 711 et = jiffies + HZ / 10; 712 udelay(100); 713 while (1) { 714 pci_read_config_word(priv->mc, I5100_SPDDATA, &w); 715 if (!i5100_spddata_busy(w)) 716 break; 717 udelay(100); 718 } 719 720 if (!i5100_spddata_rdo(w) || i5100_spddata_sbe(w)) 721 return -1; 722 723 *byte = i5100_spddata_data(w); 724 725 return 0; 726} 727 728static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci) 729{ 730 struct i5100_priv *priv = mci->pvt_info; 731 int i; 732 733 for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) { 734 int j; 735 736 for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++) 737 priv->dimm_csmap[i][j] = -1; /* default NC */ 738 } 739 740 /* only 2 chip selects per slot... */ 741 if (priv->ranksperchan == 4) { 742 priv->dimm_csmap[0][0] = 0; 743 priv->dimm_csmap[0][1] = 3; 744 priv->dimm_csmap[1][0] = 1; 745 priv->dimm_csmap[1][1] = 2; 746 priv->dimm_csmap[2][0] = 2; 747 priv->dimm_csmap[3][0] = 3; 748 } else { 749 priv->dimm_csmap[0][0] = 0; 750 priv->dimm_csmap[0][1] = 1; 751 priv->dimm_csmap[1][0] = 2; 752 priv->dimm_csmap[1][1] = 3; 753 priv->dimm_csmap[2][0] = 4; 754 priv->dimm_csmap[2][1] = 5; 755 } 756} 757 758static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev, 759 struct mem_ctl_info *mci) 760{ 761 struct i5100_priv *priv = mci->pvt_info; 762 int i; 763 764 for (i = 0; i < I5100_CHANNELS; i++) { 765 int j; 766 767 for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CHAN; j++) { 768 u8 rank; 769 770 if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0) 771 priv->dimm_numrank[i][j] = 0; 772 else 773 priv->dimm_numrank[i][j] = (rank & 3) + 1; 774 } 775 } 776 777 i5100_init_dimm_csmap(mci); 778} 779 780static void __devinit i5100_init_interleaving(struct pci_dev *pdev, 781 struct mem_ctl_info *mci) 782{ 783 u16 w; 784 u32 dw; 785 struct i5100_priv *priv = mci->pvt_info; 786 struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm }; 787 int i; 788 789 pci_read_config_word(pdev, I5100_TOLM, &w); 790 priv->tolm = (u64) i5100_tolm_tolm(w) * 256 * 1024 * 1024; 791 792 pci_read_config_word(pdev, I5100_MIR0, &w); 793 priv->mir[0].limit = (u64) i5100_mir_limit(w) << 28; 794 priv->mir[0].way[1] = i5100_mir_way1(w); 795 priv->mir[0].way[0] = i5100_mir_way0(w); 796 797 pci_read_config_word(pdev, I5100_MIR1, &w); 798 priv->mir[1].limit = (u64) i5100_mir_limit(w) << 28; 799 priv->mir[1].way[1] = i5100_mir_way1(w); 800 priv->mir[1].way[0] = i5100_mir_way0(w); 801 802 pci_read_config_word(pdev, I5100_AMIR_0, &w); 803 priv->amir[0] = w; 804 pci_read_config_word(pdev, I5100_AMIR_1, &w); 805 priv->amir[1] = w; 806 807 for (i = 0; i < I5100_CHANNELS; i++) { 808 int j; 809 810 for (j = 0; j < 5; j++) { 811 int k; 812 813 pci_read_config_dword(mms[i], I5100_DMIR + j * 4, &dw); 814 815 priv->dmir[i][j].limit = 816 (u64) i5100_dmir_limit(dw) << 28; 817 for (k = 0; k < I5100_MAX_RANKS_PER_DIMM; k++) 818 priv->dmir[i][j].rank[k] = 819 i5100_dmir_rank(dw, k); 820 } 821 } 822 823 i5100_init_mtr(mci); 824} 825 826static void __devinit i5100_init_csrows(struct mem_ctl_info *mci) 827{ 828 int i; 829 unsigned long total_pages = 0UL; 830 struct i5100_priv *priv = mci->pvt_info; 831 832 for (i = 0; i < mci->nr_csrows; i++) { 833 const unsigned long npages = i5100_npages(mci, i); 834 const unsigned chan = i5100_csrow_to_chan(mci, i); 835 const unsigned rank = i5100_csrow_to_rank(mci, i); 836 837 if (!npages) 838 continue; 839 840 mci->csrows[i].first_page = total_pages; 841 mci->csrows[i].last_page = total_pages + npages - 1; 842 mci->csrows[i].page_mask = 0UL; 843 844 mci->csrows[i].nr_pages = npages; 845 mci->csrows[i].grain = 32; 846 mci->csrows[i].csrow_idx = i; 847 mci->csrows[i].dtype = 848 (priv->mtr[chan][rank].width == 4) ? DEV_X4 : DEV_X8; 849 mci->csrows[i].ue_count = 0; 850 mci->csrows[i].ce_count = 0; 851 mci->csrows[i].mtype = MEM_RDDR2; 852 mci->csrows[i].edac_mode = EDAC_SECDED; 853 mci->csrows[i].mci = mci; 854 mci->csrows[i].nr_channels = 1; 855 mci->csrows[i].channels[0].chan_idx = 0; 856 mci->csrows[i].channels[0].ce_count = 0; 857 mci->csrows[i].channels[0].csrow = mci->csrows + i; 858 snprintf(mci->csrows[i].channels[0].label, 859 sizeof(mci->csrows[i].channels[0].label), 860 "DIMM%u", i5100_rank_to_slot(mci, chan, rank)); 861 862 total_pages += npages; 863 } 864} 865 866static int __devinit i5100_init_one(struct pci_dev *pdev, 867 const struct pci_device_id *id) 868{ 869 int rc; 870 struct mem_ctl_info *mci; 871 struct i5100_priv *priv; 872 struct pci_dev *ch0mm, *ch1mm; 873 int ret = 0; 874 u32 dw; 875 int ranksperch; 876 877 if (PCI_FUNC(pdev->devfn) != 1) 878 return -ENODEV; 879 880 rc = pci_enable_device(pdev); 881 if (rc < 0) { 882 ret = rc; 883 goto bail; 884 } 885 886 /* ECC enabled? */ 887 pci_read_config_dword(pdev, I5100_MC, &dw); 888 if (!i5100_mc_errdeten(dw)) { 889 printk(KERN_INFO "i5100_edac: ECC not enabled.\n"); 890 ret = -ENODEV; 891 goto bail_pdev; 892 } 893 894 /* figure out how many ranks, from strapped state of 48GB_Mode input */ 895 pci_read_config_dword(pdev, I5100_MS, &dw); 896 ranksperch = !!(dw & (1 << 8)) * 2 + 4; 897 898 /* enable error reporting... */ 899 pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw); 900 dw &= ~I5100_FERR_NF_MEM_ANY_MASK; 901 pci_write_config_dword(pdev, I5100_EMASK_MEM, dw); 902 903 /* device 21, func 0, Channel 0 Memory Map, Error Flag/Mask, etc... */ 904 ch0mm = pci_get_device_func(PCI_VENDOR_ID_INTEL, 905 PCI_DEVICE_ID_INTEL_5100_21, 0); 906 if (!ch0mm) { 907 ret = -ENODEV; 908 goto bail_pdev; 909 } 910 911 rc = pci_enable_device(ch0mm); 912 if (rc < 0) { 913 ret = rc; 914 goto bail_ch0; 915 } 916 917 /* device 22, func 0, Channel 1 Memory Map, Error Flag/Mask, etc... */ 918 ch1mm = pci_get_device_func(PCI_VENDOR_ID_INTEL, 919 PCI_DEVICE_ID_INTEL_5100_22, 0); 920 if (!ch1mm) { 921 ret = -ENODEV; 922 goto bail_disable_ch0; 923 } 924 925 rc = pci_enable_device(ch1mm); 926 if (rc < 0) { 927 ret = rc; 928 goto bail_ch1; 929 } 930 931 mci = edac_mc_alloc(sizeof(*priv), ranksperch * 2, 1, 0); 932 if (!mci) { 933 ret = -ENOMEM; 934 goto bail_disable_ch1; 935 } 936 937 mci->dev = &pdev->dev; 938 939 priv = mci->pvt_info; 940 priv->ranksperchan = ranksperch; 941 priv->mc = pdev; 942 priv->ch0mm = ch0mm; 943 priv->ch1mm = ch1mm; 944 945 INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing); 946 947 /* If scrubbing was already enabled by the bios, start maintaining it */ 948 pci_read_config_dword(pdev, I5100_MC, &dw); 949 if (i5100_mc_scrben(dw)) { 950 priv->scrub_enable = 1; 951 schedule_delayed_work(&(priv->i5100_scrubbing), 952 I5100_SCRUB_REFRESH_RATE); 953 } 954 955 i5100_init_dimm_layout(pdev, mci); 956 i5100_init_interleaving(pdev, mci); 957 958 mci->mtype_cap = MEM_FLAG_FB_DDR2; 959 mci->edac_ctl_cap = EDAC_FLAG_SECDED; 960 mci->edac_cap = EDAC_FLAG_SECDED; 961 mci->mod_name = "i5100_edac.c"; 962 mci->mod_ver = "not versioned"; 963 mci->ctl_name = "i5100"; 964 mci->dev_name = pci_name(pdev); 965 mci->ctl_page_to_phys = NULL; 966 967 mci->edac_check = i5100_check_error; 968 mci->set_sdram_scrub_rate = i5100_set_scrub_rate; 969 mci->get_sdram_scrub_rate = i5100_get_scrub_rate; 970 971 i5100_init_csrows(mci); 972 973 /* this strange construction seems to be in every driver, dunno why */ 974 switch (edac_op_state) { 975 case EDAC_OPSTATE_POLL: 976 case EDAC_OPSTATE_NMI: 977 break; 978 default: 979 edac_op_state = EDAC_OPSTATE_POLL; 980 break; 981 } 982 983 if (edac_mc_add_mc(mci)) { 984 ret = -ENODEV; 985 goto bail_scrub; 986 } 987 988 return ret; 989 990bail_scrub: 991 priv->scrub_enable = 0; 992 cancel_delayed_work_sync(&(priv->i5100_scrubbing)); 993 edac_mc_free(mci); 994 995bail_disable_ch1: 996 pci_disable_device(ch1mm); 997 998bail_ch1: 999 pci_dev_put(ch1mm); 1000 1001bail_disable_ch0: 1002 pci_disable_device(ch0mm); 1003 1004bail_ch0: 1005 pci_dev_put(ch0mm); 1006 1007bail_pdev: 1008 pci_disable_device(pdev); 1009 1010bail: 1011 return ret; 1012} 1013 1014static void __devexit i5100_remove_one(struct pci_dev *pdev) 1015{ 1016 struct mem_ctl_info *mci; 1017 struct i5100_priv *priv; 1018 1019 mci = edac_mc_del_mc(&pdev->dev); 1020 1021 if (!mci) 1022 return; 1023 1024 priv = mci->pvt_info; 1025 1026 priv->scrub_enable = 0; 1027 cancel_delayed_work_sync(&(priv->i5100_scrubbing)); 1028 1029 pci_disable_device(pdev); 1030 pci_disable_device(priv->ch0mm); 1031 pci_disable_device(priv->ch1mm); 1032 pci_dev_put(priv->ch0mm); 1033 pci_dev_put(priv->ch1mm); 1034 1035 edac_mc_free(mci); 1036} 1037 1038static const struct pci_device_id i5100_pci_tbl[] __devinitdata = { 1039 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */ 1040 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) }, 1041 { 0, } 1042}; 1043MODULE_DEVICE_TABLE(pci, i5100_pci_tbl); 1044 1045static struct pci_driver i5100_driver = { 1046 .name = KBUILD_BASENAME, 1047 .probe = i5100_init_one, 1048 .remove = __devexit_p(i5100_remove_one), 1049 .id_table = i5100_pci_tbl, 1050}; 1051 1052static int __init i5100_init(void) 1053{ 1054 int pci_rc; 1055 1056 pci_rc = pci_register_driver(&i5100_driver); 1057 1058 return (pci_rc < 0) ? pci_rc : 0; 1059} 1060 1061static void __exit i5100_exit(void) 1062{ 1063 pci_unregister_driver(&i5100_driver); 1064} 1065 1066module_init(i5100_init); 1067module_exit(i5100_exit); 1068 1069MODULE_LICENSE("GPL"); 1070MODULE_AUTHOR 1071 ("Arthur Jones <ajones@riverbed.com>"); 1072MODULE_DESCRIPTION("MC Driver for Intel I5100 memory controllers"); 1073