mca.c revision 233793
1/*- 2 * Copyright (c) 2009 Advanced Computing Technologies LLC 3 * Written by: John H. Baldwin <jhb@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28/* 29 * Support for x86 machine check architecture. 30 */ 31 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: head/sys/x86/x86/mca.c 233793 2012-04-02 17:26:21Z jhb $"); 34 35#ifdef __amd64__ 36#define DEV_APIC 37#else 38#include "opt_apic.h" 39#endif 40 41#include <sys/param.h> 42#include <sys/bus.h> 43#include <sys/interrupt.h> 44#include <sys/kernel.h> 45#include <sys/lock.h> 46#include <sys/malloc.h> 47#include <sys/mutex.h> 48#include <sys/proc.h> 49#include <sys/sched.h> 50#include <sys/smp.h> 51#include <sys/sysctl.h> 52#include <sys/systm.h> 53#include <sys/taskqueue.h> 54#include <machine/intr_machdep.h> 55#include <machine/apicvar.h> 56#include <machine/cputypes.h> 57#include <x86/mca.h> 58#include <machine/md_var.h> 59#include <machine/specialreg.h> 60 61/* Modes for mca_scan() */ 62enum scan_mode { 63 POLLED, 64 MCE, 65 CMCI, 66}; 67 68#ifdef DEV_APIC 69/* 70 * State maintained for each monitored MCx bank to control the 71 * corrected machine check interrupt threshold. 72 */ 73struct cmc_state { 74 int max_threshold; 75 int last_intr; 76}; 77#endif 78 79struct mca_internal { 80 struct mca_record rec; 81 int logged; 82 STAILQ_ENTRY(mca_internal) link; 83}; 84 85static MALLOC_DEFINE(M_MCA, "MCA", "Machine Check Architecture"); 86 87static int mca_count; /* Number of records stored. */ 88static int mca_banks; /* Number of per-CPU register banks. */ 89 90static SYSCTL_NODE(_hw, OID_AUTO, mca, CTLFLAG_RD, NULL, 91 "Machine Check Architecture"); 92 93static int mca_enabled = 1; 94TUNABLE_INT("hw.mca.enabled", &mca_enabled); 95SYSCTL_INT(_hw_mca, OID_AUTO, enabled, CTLFLAG_RDTUN, &mca_enabled, 0, 96 "Administrative toggle for machine check support"); 97 98static int amd10h_L1TP = 1; 99TUNABLE_INT("hw.mca.amd10h_L1TP", &amd10h_L1TP); 100SYSCTL_INT(_hw_mca, OID_AUTO, amd10h_L1TP, CTLFLAG_RDTUN, &amd10h_L1TP, 0, 101 "Administrative toggle for logging of level one TLB parity (L1TP) errors"); 102 103int workaround_erratum383; 104SYSCTL_INT(_hw_mca, OID_AUTO, erratum383, CTLFLAG_RD, &workaround_erratum383, 0, 105 "Is the workaround for Erratum 383 on AMD Family 10h processors enabled?"); 106 107static STAILQ_HEAD(, mca_internal) mca_freelist; 108static int mca_freecount; 109static STAILQ_HEAD(, mca_internal) mca_records; 110static struct callout mca_timer; 111static int mca_ticks = 3600; /* Check hourly by default. */ 112static struct taskqueue *mca_tq; 113static struct task mca_refill_task, mca_scan_task; 114static struct mtx mca_lock; 115 116#ifdef DEV_APIC 117static struct cmc_state **cmc_state; /* Indexed by cpuid, bank */ 118static int cmc_throttle = 60; /* Time in seconds to throttle CMCI. */ 119#endif 120 121static int 122sysctl_positive_int(SYSCTL_HANDLER_ARGS) 123{ 124 int error, value; 125 126 value = *(int *)arg1; 127 error = sysctl_handle_int(oidp, &value, 0, req); 128 if (error || req->newptr == NULL) 129 return (error); 130 if (value <= 0) 131 return (EINVAL); 132 *(int *)arg1 = value; 133 return (0); 134} 135 136static int 137sysctl_mca_records(SYSCTL_HANDLER_ARGS) 138{ 139 int *name = (int *)arg1; 140 u_int namelen = arg2; 141 struct mca_record record; 142 struct mca_internal *rec; 143 int i; 144 145 if (namelen != 1) 146 return (EINVAL); 147 148 if (name[0] < 0 || name[0] >= mca_count) 149 return (EINVAL); 150 151 mtx_lock_spin(&mca_lock); 152 if (name[0] >= mca_count) { 153 mtx_unlock_spin(&mca_lock); 154 return (EINVAL); 155 } 156 i = 0; 157 STAILQ_FOREACH(rec, &mca_records, link) { 158 if (i == name[0]) { 159 record = rec->rec; 160 break; 161 } 162 i++; 163 } 164 mtx_unlock_spin(&mca_lock); 165 return (SYSCTL_OUT(req, &record, sizeof(record))); 166} 167 168static const char * 169mca_error_ttype(uint16_t mca_error) 170{ 171 172 switch ((mca_error & 0x000c) >> 2) { 173 case 0: 174 return ("I"); 175 case 1: 176 return ("D"); 177 case 2: 178 return ("G"); 179 } 180 return ("?"); 181} 182 183static const char * 184mca_error_level(uint16_t mca_error) 185{ 186 187 switch (mca_error & 0x0003) { 188 case 0: 189 return ("L0"); 190 case 1: 191 return ("L1"); 192 case 2: 193 return ("L2"); 194 case 3: 195 return ("LG"); 196 } 197 return ("L?"); 198} 199 200static const char * 201mca_error_request(uint16_t mca_error) 202{ 203 204 switch ((mca_error & 0x00f0) >> 4) { 205 case 0x0: 206 return ("ERR"); 207 case 0x1: 208 return ("RD"); 209 case 0x2: 210 return ("WR"); 211 case 0x3: 212 return ("DRD"); 213 case 0x4: 214 return ("DWR"); 215 case 0x5: 216 return ("IRD"); 217 case 0x6: 218 return ("PREFETCH"); 219 case 0x7: 220 return ("EVICT"); 221 case 0x8: 222 return ("SNOOP"); 223 } 224 return ("???"); 225} 226 227static const char * 228mca_error_mmtype(uint16_t mca_error) 229{ 230 231 switch ((mca_error & 0x70) >> 4) { 232 case 0x0: 233 return ("GEN"); 234 case 0x1: 235 return ("RD"); 236 case 0x2: 237 return ("WR"); 238 case 0x3: 239 return ("AC"); 240 case 0x4: 241 return ("MS"); 242 } 243 return ("???"); 244} 245 246/* Dump details about a single machine check. */ 247static void __nonnull(1) 248mca_log(const struct mca_record *rec) 249{ 250 uint16_t mca_error; 251 252 printf("MCA: Bank %d, Status 0x%016llx\n", rec->mr_bank, 253 (long long)rec->mr_status); 254 printf("MCA: Global Cap 0x%016llx, Status 0x%016llx\n", 255 (long long)rec->mr_mcg_cap, (long long)rec->mr_mcg_status); 256 printf("MCA: Vendor \"%s\", ID 0x%x, APIC ID %d\n", cpu_vendor, 257 rec->mr_cpu_id, rec->mr_apic_id); 258 printf("MCA: CPU %d ", rec->mr_cpu); 259 if (rec->mr_status & MC_STATUS_UC) 260 printf("UNCOR "); 261 else { 262 printf("COR "); 263 if (rec->mr_mcg_cap & MCG_CAP_CMCI_P) 264 printf("(%lld) ", ((long long)rec->mr_status & 265 MC_STATUS_COR_COUNT) >> 38); 266 } 267 if (rec->mr_status & MC_STATUS_PCC) 268 printf("PCC "); 269 if (rec->mr_status & MC_STATUS_OVER) 270 printf("OVER "); 271 mca_error = rec->mr_status & MC_STATUS_MCA_ERROR; 272 switch (mca_error) { 273 /* Simple error codes. */ 274 case 0x0000: 275 printf("no error"); 276 break; 277 case 0x0001: 278 printf("unclassified error"); 279 break; 280 case 0x0002: 281 printf("ucode ROM parity error"); 282 break; 283 case 0x0003: 284 printf("external error"); 285 break; 286 case 0x0004: 287 printf("FRC error"); 288 break; 289 case 0x0005: 290 printf("internal parity error"); 291 break; 292 case 0x0400: 293 printf("internal timer error"); 294 break; 295 default: 296 if ((mca_error & 0xfc00) == 0x0400) { 297 printf("internal error %x", mca_error & 0x03ff); 298 break; 299 } 300 301 /* Compound error codes. */ 302 303 /* Memory hierarchy error. */ 304 if ((mca_error & 0xeffc) == 0x000c) { 305 printf("%s memory error", mca_error_level(mca_error)); 306 break; 307 } 308 309 /* TLB error. */ 310 if ((mca_error & 0xeff0) == 0x0010) { 311 printf("%sTLB %s error", mca_error_ttype(mca_error), 312 mca_error_level(mca_error)); 313 break; 314 } 315 316 /* Memory controller error. */ 317 if ((mca_error & 0xef80) == 0x0080) { 318 printf("%s channel ", mca_error_mmtype(mca_error)); 319 if ((mca_error & 0x000f) != 0x000f) 320 printf("%d", mca_error & 0x000f); 321 else 322 printf("??"); 323 printf(" memory error"); 324 break; 325 } 326 327 /* Cache error. */ 328 if ((mca_error & 0xef00) == 0x0100) { 329 printf("%sCACHE %s %s error", 330 mca_error_ttype(mca_error), 331 mca_error_level(mca_error), 332 mca_error_request(mca_error)); 333 break; 334 } 335 336 /* Bus and/or Interconnect error. */ 337 if ((mca_error & 0xe800) == 0x0800) { 338 printf("BUS%s ", mca_error_level(mca_error)); 339 switch ((mca_error & 0x0600) >> 9) { 340 case 0: 341 printf("Source"); 342 break; 343 case 1: 344 printf("Responder"); 345 break; 346 case 2: 347 printf("Observer"); 348 break; 349 default: 350 printf("???"); 351 break; 352 } 353 printf(" %s ", mca_error_request(mca_error)); 354 switch ((mca_error & 0x000c) >> 2) { 355 case 0: 356 printf("Memory"); 357 break; 358 case 2: 359 printf("I/O"); 360 break; 361 case 3: 362 printf("Other"); 363 break; 364 default: 365 printf("???"); 366 break; 367 } 368 if (mca_error & 0x0100) 369 printf(" timed out"); 370 break; 371 } 372 373 printf("unknown error %x", mca_error); 374 break; 375 } 376 printf("\n"); 377 if (rec->mr_status & MC_STATUS_ADDRV) 378 printf("MCA: Address 0x%llx\n", (long long)rec->mr_addr); 379 if (rec->mr_status & MC_STATUS_MISCV) 380 printf("MCA: Misc 0x%llx\n", (long long)rec->mr_misc); 381} 382 383static int __nonnull(2) 384mca_check_status(int bank, struct mca_record *rec) 385{ 386 uint64_t status; 387 u_int p[4]; 388 389 status = rdmsr(MSR_MC_STATUS(bank)); 390 if (!(status & MC_STATUS_VAL)) 391 return (0); 392 393 /* Save exception information. */ 394 rec->mr_status = status; 395 rec->mr_bank = bank; 396 rec->mr_addr = 0; 397 if (status & MC_STATUS_ADDRV) 398 rec->mr_addr = rdmsr(MSR_MC_ADDR(bank)); 399 rec->mr_misc = 0; 400 if (status & MC_STATUS_MISCV) 401 rec->mr_misc = rdmsr(MSR_MC_MISC(bank)); 402 rec->mr_tsc = rdtsc(); 403 rec->mr_apic_id = PCPU_GET(apic_id); 404 rec->mr_mcg_cap = rdmsr(MSR_MCG_CAP); 405 rec->mr_mcg_status = rdmsr(MSR_MCG_STATUS); 406 rec->mr_cpu_id = cpu_id; 407 rec->mr_cpu_vendor_id = cpu_vendor_id; 408 rec->mr_cpu = PCPU_GET(cpuid); 409 410 /* 411 * Clear machine check. Don't do this for uncorrectable 412 * errors so that the BIOS can see them. 413 */ 414 if (!(rec->mr_status & (MC_STATUS_PCC | MC_STATUS_UC))) { 415 wrmsr(MSR_MC_STATUS(bank), 0); 416 do_cpuid(0, p); 417 } 418 return (1); 419} 420 421static void 422mca_fill_freelist(void) 423{ 424 struct mca_internal *rec; 425 int desired; 426 427 /* 428 * Ensure we have at least one record for each bank and one 429 * record per CPU. 430 */ 431 desired = imax(mp_ncpus, mca_banks); 432 mtx_lock_spin(&mca_lock); 433 while (mca_freecount < desired) { 434 mtx_unlock_spin(&mca_lock); 435 rec = malloc(sizeof(*rec), M_MCA, M_WAITOK); 436 mtx_lock_spin(&mca_lock); 437 STAILQ_INSERT_TAIL(&mca_freelist, rec, link); 438 mca_freecount++; 439 } 440 mtx_unlock_spin(&mca_lock); 441} 442 443static void 444mca_refill(void *context, int pending) 445{ 446 447 mca_fill_freelist(); 448} 449 450static void __nonnull(2) 451mca_record_entry(enum scan_mode mode, const struct mca_record *record) 452{ 453 struct mca_internal *rec; 454 455 if (mode == POLLED) { 456 rec = malloc(sizeof(*rec), M_MCA, M_WAITOK); 457 mtx_lock_spin(&mca_lock); 458 } else { 459 mtx_lock_spin(&mca_lock); 460 rec = STAILQ_FIRST(&mca_freelist); 461 if (rec == NULL) { 462 printf("MCA: Unable to allocate space for an event.\n"); 463 mca_log(record); 464 mtx_unlock_spin(&mca_lock); 465 return; 466 } 467 STAILQ_REMOVE_HEAD(&mca_freelist, link); 468 mca_freecount--; 469 } 470 471 rec->rec = *record; 472 rec->logged = 0; 473 STAILQ_INSERT_TAIL(&mca_records, rec, link); 474 mca_count++; 475 mtx_unlock_spin(&mca_lock); 476 if (mode == CMCI) 477 taskqueue_enqueue_fast(mca_tq, &mca_refill_task); 478} 479 480#ifdef DEV_APIC 481/* 482 * Update the interrupt threshold for a CMCI. The strategy is to use 483 * a low trigger that interrupts as soon as the first event occurs. 484 * However, if a steady stream of events arrive, the threshold is 485 * increased until the interrupts are throttled to once every 486 * cmc_throttle seconds or the periodic scan. If a periodic scan 487 * finds that the threshold is too high, it is lowered. 488 */ 489static void 490cmci_update(enum scan_mode mode, int bank, int valid, struct mca_record *rec) 491{ 492 struct cmc_state *cc; 493 uint64_t ctl; 494 u_int delta; 495 int count, limit; 496 497 /* Fetch the current limit for this bank. */ 498 cc = &cmc_state[PCPU_GET(cpuid)][bank]; 499 ctl = rdmsr(MSR_MC_CTL2(bank)); 500 count = (rec->mr_status & MC_STATUS_COR_COUNT) >> 38; 501 delta = (u_int)(ticks - cc->last_intr); 502 503 /* 504 * If an interrupt was received less than cmc_throttle seconds 505 * since the previous interrupt and the count from the current 506 * event is greater than or equal to the current threshold, 507 * double the threshold up to the max. 508 */ 509 if (mode == CMCI && valid) { 510 limit = ctl & MC_CTL2_THRESHOLD; 511 if (delta < cmc_throttle && count >= limit && 512 limit < cc->max_threshold) { 513 limit = min(limit << 1, cc->max_threshold); 514 ctl &= ~MC_CTL2_THRESHOLD; 515 ctl |= limit; 516 wrmsr(MSR_MC_CTL2(bank), limit); 517 } 518 cc->last_intr = ticks; 519 return; 520 } 521 522 /* 523 * When the banks are polled, check to see if the threshold 524 * should be lowered. 525 */ 526 if (mode != POLLED) 527 return; 528 529 /* If a CMCI occured recently, do nothing for now. */ 530 if (delta < cmc_throttle) 531 return; 532 533 /* 534 * Compute a new limit based on the average rate of events per 535 * cmc_throttle seconds since the last interrupt. 536 */ 537 if (valid) { 538 count = (rec->mr_status & MC_STATUS_COR_COUNT) >> 38; 539 limit = count * cmc_throttle / delta; 540 if (limit <= 0) 541 limit = 1; 542 else if (limit > cc->max_threshold) 543 limit = cc->max_threshold; 544 } else 545 limit = 1; 546 if ((ctl & MC_CTL2_THRESHOLD) != limit) { 547 ctl &= ~MC_CTL2_THRESHOLD; 548 ctl |= limit; 549 wrmsr(MSR_MC_CTL2(bank), limit); 550 } 551} 552#endif 553 554/* 555 * This scans all the machine check banks of the current CPU to see if 556 * there are any machine checks. Any non-recoverable errors are 557 * reported immediately via mca_log(). The current thread must be 558 * pinned when this is called. The 'mode' parameter indicates if we 559 * are being called from the MC exception handler, the CMCI handler, 560 * or the periodic poller. In the MC exception case this function 561 * returns true if the system is restartable. Otherwise, it returns a 562 * count of the number of valid MC records found. 563 */ 564static int 565mca_scan(enum scan_mode mode) 566{ 567 struct mca_record rec; 568 uint64_t mcg_cap, ucmask; 569 int count, i, recoverable, valid; 570 571 count = 0; 572 recoverable = 1; 573 ucmask = MC_STATUS_UC | MC_STATUS_PCC; 574 575 /* When handling a MCE#, treat the OVER flag as non-restartable. */ 576 if (mode == MCE) 577 ucmask |= MC_STATUS_OVER; 578 mcg_cap = rdmsr(MSR_MCG_CAP); 579 for (i = 0; i < (mcg_cap & MCG_CAP_COUNT); i++) { 580#ifdef DEV_APIC 581 /* 582 * For a CMCI, only check banks this CPU is 583 * responsible for. 584 */ 585 if (mode == CMCI && !(PCPU_GET(cmci_mask) & 1 << i)) 586 continue; 587#endif 588 589 valid = mca_check_status(i, &rec); 590 if (valid) { 591 count++; 592 if (rec.mr_status & ucmask) { 593 recoverable = 0; 594 mtx_lock_spin(&mca_lock); 595 mca_log(&rec); 596 mtx_unlock_spin(&mca_lock); 597 } 598 mca_record_entry(mode, &rec); 599 } 600 601#ifdef DEV_APIC 602 /* 603 * If this is a bank this CPU monitors via CMCI, 604 * update the threshold. 605 */ 606 if (PCPU_GET(cmci_mask) & 1 << i) 607 cmci_update(mode, i, valid, &rec); 608#endif 609 } 610 if (mode == POLLED) 611 mca_fill_freelist(); 612 return (mode == MCE ? recoverable : count); 613} 614 615/* 616 * Scan the machine check banks on all CPUs by binding to each CPU in 617 * turn. If any of the CPUs contained new machine check records, log 618 * them to the console. 619 */ 620static void 621mca_scan_cpus(void *context, int pending) 622{ 623 struct mca_internal *mca; 624 struct thread *td; 625 int count, cpu; 626 627 mca_fill_freelist(); 628 td = curthread; 629 count = 0; 630 thread_lock(td); 631 CPU_FOREACH(cpu) { 632 sched_bind(td, cpu); 633 thread_unlock(td); 634 count += mca_scan(POLLED); 635 thread_lock(td); 636 sched_unbind(td); 637 } 638 thread_unlock(td); 639 if (count != 0) { 640 mtx_lock_spin(&mca_lock); 641 STAILQ_FOREACH(mca, &mca_records, link) { 642 if (!mca->logged) { 643 mca->logged = 1; 644 mca_log(&mca->rec); 645 } 646 } 647 mtx_unlock_spin(&mca_lock); 648 } 649} 650 651static void 652mca_periodic_scan(void *arg) 653{ 654 655 taskqueue_enqueue_fast(mca_tq, &mca_scan_task); 656 callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL); 657} 658 659static int 660sysctl_mca_scan(SYSCTL_HANDLER_ARGS) 661{ 662 int error, i; 663 664 i = 0; 665 error = sysctl_handle_int(oidp, &i, 0, req); 666 if (error) 667 return (error); 668 if (i) 669 taskqueue_enqueue_fast(mca_tq, &mca_scan_task); 670 return (0); 671} 672 673static void 674mca_createtq(void *dummy) 675{ 676 if (mca_banks <= 0) 677 return; 678 679 mca_tq = taskqueue_create_fast("mca", M_WAITOK, 680 taskqueue_thread_enqueue, &mca_tq); 681 taskqueue_start_threads(&mca_tq, 1, PI_SWI(SWI_TQ), "mca taskq"); 682} 683SYSINIT(mca_createtq, SI_SUB_CONFIGURE, SI_ORDER_ANY, mca_createtq, NULL); 684 685static void 686mca_startup(void *dummy) 687{ 688 689 if (mca_banks <= 0) 690 return; 691 692 callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL); 693} 694SYSINIT(mca_startup, SI_SUB_SMP, SI_ORDER_ANY, mca_startup, NULL); 695 696#ifdef DEV_APIC 697static void 698cmci_setup(void) 699{ 700 int i; 701 702 cmc_state = malloc((mp_maxid + 1) * sizeof(struct cmc_state **), 703 M_MCA, M_WAITOK); 704 for (i = 0; i <= mp_maxid; i++) 705 cmc_state[i] = malloc(sizeof(struct cmc_state) * mca_banks, 706 M_MCA, M_WAITOK | M_ZERO); 707 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO, 708 "cmc_throttle", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 709 &cmc_throttle, 0, sysctl_positive_int, "I", 710 "Interval in seconds to throttle corrected MC interrupts"); 711} 712#endif 713 714static void 715mca_setup(uint64_t mcg_cap) 716{ 717 718 /* 719 * On AMD Family 10h processors, unless logging of level one TLB 720 * parity (L1TP) errors is disabled, enable the recommended workaround 721 * for Erratum 383. 722 */ 723 if (cpu_vendor_id == CPU_VENDOR_AMD && 724 CPUID_TO_FAMILY(cpu_id) == 0x10 && amd10h_L1TP) 725 workaround_erratum383 = 1; 726 727 mca_banks = mcg_cap & MCG_CAP_COUNT; 728 mtx_init(&mca_lock, "mca", NULL, MTX_SPIN); 729 STAILQ_INIT(&mca_records); 730 TASK_INIT(&mca_scan_task, 0, mca_scan_cpus, NULL); 731 callout_init(&mca_timer, CALLOUT_MPSAFE); 732 STAILQ_INIT(&mca_freelist); 733 TASK_INIT(&mca_refill_task, 0, mca_refill, NULL); 734 mca_fill_freelist(); 735 SYSCTL_ADD_INT(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO, 736 "count", CTLFLAG_RD, &mca_count, 0, "Record count"); 737 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO, 738 "interval", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &mca_ticks, 739 0, sysctl_positive_int, "I", 740 "Periodic interval in seconds to scan for machine checks"); 741 SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO, 742 "records", CTLFLAG_RD, sysctl_mca_records, "Machine check records"); 743 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO, 744 "force_scan", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, 745 sysctl_mca_scan, "I", "Force an immediate scan for machine checks"); 746#ifdef DEV_APIC 747 if (mcg_cap & MCG_CAP_CMCI_P) 748 cmci_setup(); 749#endif 750} 751 752#ifdef DEV_APIC 753/* 754 * See if we should monitor CMCI for this bank. If CMCI_EN is already 755 * set in MC_CTL2, then another CPU is responsible for this bank, so 756 * ignore it. If CMCI_EN returns zero after being set, then this bank 757 * does not support CMCI_EN. If this CPU sets CMCI_EN, then it should 758 * now monitor this bank. 759 */ 760static void 761cmci_monitor(int i) 762{ 763 struct cmc_state *cc; 764 uint64_t ctl; 765 766 KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid))); 767 768 ctl = rdmsr(MSR_MC_CTL2(i)); 769 if (ctl & MC_CTL2_CMCI_EN) 770 /* Already monitored by another CPU. */ 771 return; 772 773 /* Set the threshold to one event for now. */ 774 ctl &= ~MC_CTL2_THRESHOLD; 775 ctl |= MC_CTL2_CMCI_EN | 1; 776 wrmsr(MSR_MC_CTL2(i), ctl); 777 ctl = rdmsr(MSR_MC_CTL2(i)); 778 if (!(ctl & MC_CTL2_CMCI_EN)) 779 /* This bank does not support CMCI. */ 780 return; 781 782 cc = &cmc_state[PCPU_GET(cpuid)][i]; 783 784 /* Determine maximum threshold. */ 785 ctl &= ~MC_CTL2_THRESHOLD; 786 ctl |= 0x7fff; 787 wrmsr(MSR_MC_CTL2(i), ctl); 788 ctl = rdmsr(MSR_MC_CTL2(i)); 789 cc->max_threshold = ctl & MC_CTL2_THRESHOLD; 790 791 /* Start off with a threshold of 1. */ 792 ctl &= ~MC_CTL2_THRESHOLD; 793 ctl |= 1; 794 wrmsr(MSR_MC_CTL2(i), ctl); 795 796 /* Mark this bank as monitored. */ 797 PCPU_SET(cmci_mask, PCPU_GET(cmci_mask) | 1 << i); 798} 799 800/* 801 * For resume, reset the threshold for any banks we monitor back to 802 * one and throw away the timestamp of the last interrupt. 803 */ 804static void 805cmci_resume(int i) 806{ 807 struct cmc_state *cc; 808 uint64_t ctl; 809 810 KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid))); 811 812 /* Ignore banks not monitored by this CPU. */ 813 if (!(PCPU_GET(cmci_mask) & 1 << i)) 814 return; 815 816 cc = &cmc_state[PCPU_GET(cpuid)][i]; 817 cc->last_intr = -ticks; 818 ctl = rdmsr(MSR_MC_CTL2(i)); 819 ctl &= ~MC_CTL2_THRESHOLD; 820 ctl |= MC_CTL2_CMCI_EN | 1; 821 wrmsr(MSR_MC_CTL2(i), ctl); 822} 823#endif 824 825/* 826 * Initializes per-CPU machine check registers and enables corrected 827 * machine check interrupts. 828 */ 829static void 830_mca_init(int boot) 831{ 832 uint64_t mcg_cap; 833 uint64_t ctl, mask; 834 int i, skip; 835 836 /* MCE is required. */ 837 if (!mca_enabled || !(cpu_feature & CPUID_MCE)) 838 return; 839 840 if (cpu_feature & CPUID_MCA) { 841 if (boot) 842 PCPU_SET(cmci_mask, 0); 843 844 mcg_cap = rdmsr(MSR_MCG_CAP); 845 if (mcg_cap & MCG_CAP_CTL_P) 846 /* Enable MCA features. */ 847 wrmsr(MSR_MCG_CTL, MCG_CTL_ENABLE); 848 if (PCPU_GET(cpuid) == 0 && boot) 849 mca_setup(mcg_cap); 850 851 /* 852 * Disable logging of level one TLB parity (L1TP) errors by 853 * the data cache as an alternative workaround for AMD Family 854 * 10h Erratum 383. Unlike the recommended workaround, there 855 * is no performance penalty to this workaround. However, 856 * L1TP errors will go unreported. 857 */ 858 if (cpu_vendor_id == CPU_VENDOR_AMD && 859 CPUID_TO_FAMILY(cpu_id) == 0x10 && !amd10h_L1TP) { 860 mask = rdmsr(MSR_MC0_CTL_MASK); 861 if ((mask & (1UL << 5)) == 0) 862 wrmsr(MSR_MC0_CTL_MASK, mask | (1UL << 5)); 863 } 864 for (i = 0; i < (mcg_cap & MCG_CAP_COUNT); i++) { 865 /* By default enable logging of all errors. */ 866 ctl = 0xffffffffffffffffUL; 867 skip = 0; 868 869 if (cpu_vendor_id == CPU_VENDOR_INTEL) { 870 /* 871 * For P6 models before Nehalem MC0_CTL is 872 * always enabled and reserved. 873 */ 874 if (i == 0 && CPUID_TO_FAMILY(cpu_id) == 0x6 875 && CPUID_TO_MODEL(cpu_id) < 0x1a) 876 skip = 1; 877 } else if (cpu_vendor_id == CPU_VENDOR_AMD) { 878 /* BKDG for Family 10h: unset GartTblWkEn. */ 879 if (i == 4 && CPUID_TO_FAMILY(cpu_id) >= 0xf) 880 ctl &= ~(1UL << 10); 881 } 882 883 if (!skip) 884 wrmsr(MSR_MC_CTL(i), ctl); 885 886#ifdef DEV_APIC 887 if (mcg_cap & MCG_CAP_CMCI_P) { 888 if (boot) 889 cmci_monitor(i); 890 else 891 cmci_resume(i); 892 } 893#endif 894 895 /* Clear all errors. */ 896 wrmsr(MSR_MC_STATUS(i), 0); 897 } 898 899#ifdef DEV_APIC 900 if (PCPU_GET(cmci_mask) != 0 && boot) 901 lapic_enable_cmc(); 902#endif 903 } 904 905 load_cr4(rcr4() | CR4_MCE); 906} 907 908/* Must be executed on each CPU during boot. */ 909void 910mca_init(void) 911{ 912 913 _mca_init(1); 914} 915 916/* Must be executed on each CPU during resume. */ 917void 918mca_resume(void) 919{ 920 921 _mca_init(0); 922} 923 924/* 925 * The machine check registers for the BSP cannot be initialized until 926 * the local APIC is initialized. This happens at SI_SUB_CPU, 927 * SI_ORDER_SECOND. 928 */ 929static void 930mca_init_bsp(void *arg __unused) 931{ 932 933 mca_init(); 934} 935SYSINIT(mca_init_bsp, SI_SUB_CPU, SI_ORDER_ANY, mca_init_bsp, NULL); 936 937/* Called when a machine check exception fires. */ 938void 939mca_intr(void) 940{ 941 uint64_t mcg_status; 942 int recoverable; 943 944 if (!(cpu_feature & CPUID_MCA)) { 945 /* 946 * Just print the values of the old Pentium registers 947 * and panic. 948 */ 949 printf("MC Type: 0x%jx Address: 0x%jx\n", 950 (uintmax_t)rdmsr(MSR_P5_MC_TYPE), 951 (uintmax_t)rdmsr(MSR_P5_MC_ADDR)); 952 panic("Machine check"); 953 } 954 955 /* Scan the banks and check for any non-recoverable errors. */ 956 recoverable = mca_scan(MCE); 957 mcg_status = rdmsr(MSR_MCG_STATUS); 958 if (!(mcg_status & MCG_STATUS_RIPV)) 959 recoverable = 0; 960 961 /* Clear MCIP. */ 962 wrmsr(MSR_MCG_STATUS, mcg_status & ~MCG_STATUS_MCIP); 963 if (!recoverable) 964 panic("Unrecoverable machine check exception"); 965} 966 967#ifdef DEV_APIC 968/* Called for a CMCI (correctable machine check interrupt). */ 969void 970cmc_intr(void) 971{ 972 struct mca_internal *mca; 973 int count; 974 975 /* 976 * Serialize MCA bank scanning to prevent collisions from 977 * sibling threads. 978 */ 979 count = mca_scan(CMCI); 980 981 /* If we found anything, log them to the console. */ 982 if (count != 0) { 983 mtx_lock_spin(&mca_lock); 984 STAILQ_FOREACH(mca, &mca_records, link) { 985 if (!mca->logged) { 986 mca->logged = 1; 987 mca_log(&mca->rec); 988 } 989 } 990 mtx_unlock_spin(&mca_lock); 991 } 992} 993#endif 994