mca.c revision 281751
1/*-
2 * Copyright (c) 2009 Advanced Computing Technologies LLC
3 * Written by: John H. Baldwin <jhb@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28/*
29 * Support for x86 machine check architecture.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/x86/x86/mca.c 281751 2015-04-19 20:15:57Z marius $");
34
35#ifdef __amd64__
36#define	DEV_APIC
37#else
38#include "opt_apic.h"
39#endif
40
41#include <sys/param.h>
42#include <sys/bus.h>
43#include <sys/interrupt.h>
44#include <sys/kernel.h>
45#include <sys/lock.h>
46#include <sys/malloc.h>
47#include <sys/mutex.h>
48#include <sys/proc.h>
49#include <sys/sched.h>
50#include <sys/smp.h>
51#include <sys/sysctl.h>
52#include <sys/systm.h>
53#include <sys/taskqueue.h>
54#include <machine/intr_machdep.h>
55#include <x86/apicvar.h>
56#include <machine/cpu.h>
57#include <machine/cputypes.h>
58#include <x86/mca.h>
59#include <machine/md_var.h>
60#include <machine/specialreg.h>
61
62/* Modes for mca_scan() */
63enum scan_mode {
64	POLLED,
65	MCE,
66	CMCI,
67};
68
69#ifdef DEV_APIC
70/*
71 * State maintained for each monitored MCx bank to control the
72 * corrected machine check interrupt threshold.
73 */
74struct cmc_state {
75	int	max_threshold;
76	int	last_intr;
77};
78#endif
79
80struct mca_internal {
81	struct mca_record rec;
82	int		logged;
83	STAILQ_ENTRY(mca_internal) link;
84};
85
86static MALLOC_DEFINE(M_MCA, "MCA", "Machine Check Architecture");
87
88static volatile int mca_count;	/* Number of records stored. */
89static int mca_banks;		/* Number of per-CPU register banks. */
90
91static SYSCTL_NODE(_hw, OID_AUTO, mca, CTLFLAG_RD, NULL,
92    "Machine Check Architecture");
93
94static int mca_enabled = 1;
95SYSCTL_INT(_hw_mca, OID_AUTO, enabled, CTLFLAG_RDTUN, &mca_enabled, 0,
96    "Administrative toggle for machine check support");
97
98static int amd10h_L1TP = 1;
99SYSCTL_INT(_hw_mca, OID_AUTO, amd10h_L1TP, CTLFLAG_RDTUN, &amd10h_L1TP, 0,
100    "Administrative toggle for logging of level one TLB parity (L1TP) errors");
101
102static int intel6h_HSD131;
103SYSCTL_INT(_hw_mca, OID_AUTO, intel6h_HSD131, CTLFLAG_RDTUN, &intel6h_HSD131, 0,
104    "Administrative toggle for logging of spurious corrected errors");
105
106int workaround_erratum383;
107SYSCTL_INT(_hw_mca, OID_AUTO, erratum383, CTLFLAG_RD, &workaround_erratum383, 0,
108    "Is the workaround for Erratum 383 on AMD Family 10h processors enabled?");
109
110static STAILQ_HEAD(, mca_internal) mca_freelist;
111static int mca_freecount;
112static STAILQ_HEAD(, mca_internal) mca_records;
113static struct callout mca_timer;
114static int mca_ticks = 3600;	/* Check hourly by default. */
115static struct taskqueue *mca_tq;
116static struct task mca_refill_task, mca_scan_task;
117static struct mtx mca_lock;
118
119#ifdef DEV_APIC
120static struct cmc_state **cmc_state;	/* Indexed by cpuid, bank */
121static int cmc_throttle = 60;	/* Time in seconds to throttle CMCI. */
122#endif
123
124static int
125sysctl_positive_int(SYSCTL_HANDLER_ARGS)
126{
127	int error, value;
128
129	value = *(int *)arg1;
130	error = sysctl_handle_int(oidp, &value, 0, req);
131	if (error || req->newptr == NULL)
132		return (error);
133	if (value <= 0)
134		return (EINVAL);
135	*(int *)arg1 = value;
136	return (0);
137}
138
139static int
140sysctl_mca_records(SYSCTL_HANDLER_ARGS)
141{
142	int *name = (int *)arg1;
143	u_int namelen = arg2;
144	struct mca_record record;
145	struct mca_internal *rec;
146	int i;
147
148	if (namelen != 1)
149		return (EINVAL);
150
151	if (name[0] < 0 || name[0] >= mca_count)
152		return (EINVAL);
153
154	mtx_lock_spin(&mca_lock);
155	if (name[0] >= mca_count) {
156		mtx_unlock_spin(&mca_lock);
157		return (EINVAL);
158	}
159	i = 0;
160	STAILQ_FOREACH(rec, &mca_records, link) {
161		if (i == name[0]) {
162			record = rec->rec;
163			break;
164		}
165		i++;
166	}
167	mtx_unlock_spin(&mca_lock);
168	return (SYSCTL_OUT(req, &record, sizeof(record)));
169}
170
171static const char *
172mca_error_ttype(uint16_t mca_error)
173{
174
175	switch ((mca_error & 0x000c) >> 2) {
176	case 0:
177		return ("I");
178	case 1:
179		return ("D");
180	case 2:
181		return ("G");
182	}
183	return ("?");
184}
185
186static const char *
187mca_error_level(uint16_t mca_error)
188{
189
190	switch (mca_error & 0x0003) {
191	case 0:
192		return ("L0");
193	case 1:
194		return ("L1");
195	case 2:
196		return ("L2");
197	case 3:
198		return ("LG");
199	}
200	return ("L?");
201}
202
203static const char *
204mca_error_request(uint16_t mca_error)
205{
206
207	switch ((mca_error & 0x00f0) >> 4) {
208	case 0x0:
209		return ("ERR");
210	case 0x1:
211		return ("RD");
212	case 0x2:
213		return ("WR");
214	case 0x3:
215		return ("DRD");
216	case 0x4:
217		return ("DWR");
218	case 0x5:
219		return ("IRD");
220	case 0x6:
221		return ("PREFETCH");
222	case 0x7:
223		return ("EVICT");
224	case 0x8:
225		return ("SNOOP");
226	}
227	return ("???");
228}
229
230static const char *
231mca_error_mmtype(uint16_t mca_error)
232{
233
234	switch ((mca_error & 0x70) >> 4) {
235	case 0x0:
236		return ("GEN");
237	case 0x1:
238		return ("RD");
239	case 0x2:
240		return ("WR");
241	case 0x3:
242		return ("AC");
243	case 0x4:
244		return ("MS");
245	}
246	return ("???");
247}
248
249static int __nonnull(1)
250mca_mute(const struct mca_record *rec)
251{
252
253	/*
254	 * Skip spurious corrected parity errors generated by Intel Haswell-
255	 * and Broadwell-based CPUs (see HSD131, HSM142, HSW131 and BDM48
256	 * erratum respectively), unless reporting is enabled.
257	 * Note that these errors also have been observed with the D0-stepping
258	 * of Haswell, while at least initially the CPU specification updates
259	 * suggested only the C0-stepping to be affected.  Similarly, Celeron
260	 * 2955U with a CPU ID of 0x45 apparently are also concerned with the
261	 * same problem, with HSM142 only referring to 0x3c and 0x46.
262	 */
263	if (cpu_vendor_id == CPU_VENDOR_INTEL &&
264	    CPUID_TO_FAMILY(cpu_id) == 0x6 &&
265	    (CPUID_TO_MODEL(cpu_id) == 0x3c ||	/* HSD131, HSM142, HSW131 */
266	    CPUID_TO_MODEL(cpu_id) == 0x3d ||	/* BDM48 */
267	    CPUID_TO_MODEL(cpu_id) == 0x45 ||
268	    CPUID_TO_MODEL(cpu_id) == 0x46) &&	/* HSM142 */
269	    rec->mr_bank == 0 &&
270	    (rec->mr_status & 0xa0000000ffffffff) == 0x80000000000f0005 &&
271	    !intel6h_HSD131)
272	    	return (1);
273
274	return (0);
275}
276
277/* Dump details about a single machine check. */
278static void __nonnull(1)
279mca_log(const struct mca_record *rec)
280{
281	uint16_t mca_error;
282
283	if (mca_mute(rec))
284	    	return;
285
286	printf("MCA: Bank %d, Status 0x%016llx\n", rec->mr_bank,
287	    (long long)rec->mr_status);
288	printf("MCA: Global Cap 0x%016llx, Status 0x%016llx\n",
289	    (long long)rec->mr_mcg_cap, (long long)rec->mr_mcg_status);
290	printf("MCA: Vendor \"%s\", ID 0x%x, APIC ID %d\n", cpu_vendor,
291	    rec->mr_cpu_id, rec->mr_apic_id);
292	printf("MCA: CPU %d ", rec->mr_cpu);
293	if (rec->mr_status & MC_STATUS_UC)
294		printf("UNCOR ");
295	else {
296		printf("COR ");
297		if (rec->mr_mcg_cap & MCG_CAP_CMCI_P)
298			printf("(%lld) ", ((long long)rec->mr_status &
299			    MC_STATUS_COR_COUNT) >> 38);
300	}
301	if (rec->mr_status & MC_STATUS_PCC)
302		printf("PCC ");
303	if (rec->mr_status & MC_STATUS_OVER)
304		printf("OVER ");
305	mca_error = rec->mr_status & MC_STATUS_MCA_ERROR;
306	switch (mca_error) {
307		/* Simple error codes. */
308	case 0x0000:
309		printf("no error");
310		break;
311	case 0x0001:
312		printf("unclassified error");
313		break;
314	case 0x0002:
315		printf("ucode ROM parity error");
316		break;
317	case 0x0003:
318		printf("external error");
319		break;
320	case 0x0004:
321		printf("FRC error");
322		break;
323	case 0x0005:
324		printf("internal parity error");
325		break;
326	case 0x0400:
327		printf("internal timer error");
328		break;
329	default:
330		if ((mca_error & 0xfc00) == 0x0400) {
331			printf("internal error %x", mca_error & 0x03ff);
332			break;
333		}
334
335		/* Compound error codes. */
336
337		/* Memory hierarchy error. */
338		if ((mca_error & 0xeffc) == 0x000c) {
339			printf("%s memory error", mca_error_level(mca_error));
340			break;
341		}
342
343		/* TLB error. */
344		if ((mca_error & 0xeff0) == 0x0010) {
345			printf("%sTLB %s error", mca_error_ttype(mca_error),
346			    mca_error_level(mca_error));
347			break;
348		}
349
350		/* Memory controller error. */
351		if ((mca_error & 0xef80) == 0x0080) {
352			printf("%s channel ", mca_error_mmtype(mca_error));
353			if ((mca_error & 0x000f) != 0x000f)
354				printf("%d", mca_error & 0x000f);
355			else
356				printf("??");
357			printf(" memory error");
358			break;
359		}
360
361		/* Cache error. */
362		if ((mca_error & 0xef00) == 0x0100) {
363			printf("%sCACHE %s %s error",
364			    mca_error_ttype(mca_error),
365			    mca_error_level(mca_error),
366			    mca_error_request(mca_error));
367			break;
368		}
369
370		/* Bus and/or Interconnect error. */
371		if ((mca_error & 0xe800) == 0x0800) {
372			printf("BUS%s ", mca_error_level(mca_error));
373			switch ((mca_error & 0x0600) >> 9) {
374			case 0:
375				printf("Source");
376				break;
377			case 1:
378				printf("Responder");
379				break;
380			case 2:
381				printf("Observer");
382				break;
383			default:
384				printf("???");
385				break;
386			}
387			printf(" %s ", mca_error_request(mca_error));
388			switch ((mca_error & 0x000c) >> 2) {
389			case 0:
390				printf("Memory");
391				break;
392			case 2:
393				printf("I/O");
394				break;
395			case 3:
396				printf("Other");
397				break;
398			default:
399				printf("???");
400				break;
401			}
402			if (mca_error & 0x0100)
403				printf(" timed out");
404			break;
405		}
406
407		printf("unknown error %x", mca_error);
408		break;
409	}
410	printf("\n");
411	if (rec->mr_status & MC_STATUS_ADDRV)
412		printf("MCA: Address 0x%llx\n", (long long)rec->mr_addr);
413	if (rec->mr_status & MC_STATUS_MISCV)
414		printf("MCA: Misc 0x%llx\n", (long long)rec->mr_misc);
415}
416
417static int __nonnull(2)
418mca_check_status(int bank, struct mca_record *rec)
419{
420	uint64_t status;
421	u_int p[4];
422
423	status = rdmsr(MSR_MC_STATUS(bank));
424	if (!(status & MC_STATUS_VAL))
425		return (0);
426
427	/* Save exception information. */
428	rec->mr_status = status;
429	rec->mr_bank = bank;
430	rec->mr_addr = 0;
431	if (status & MC_STATUS_ADDRV)
432		rec->mr_addr = rdmsr(MSR_MC_ADDR(bank));
433	rec->mr_misc = 0;
434	if (status & MC_STATUS_MISCV)
435		rec->mr_misc = rdmsr(MSR_MC_MISC(bank));
436	rec->mr_tsc = rdtsc();
437	rec->mr_apic_id = PCPU_GET(apic_id);
438	rec->mr_mcg_cap = rdmsr(MSR_MCG_CAP);
439	rec->mr_mcg_status = rdmsr(MSR_MCG_STATUS);
440	rec->mr_cpu_id = cpu_id;
441	rec->mr_cpu_vendor_id = cpu_vendor_id;
442	rec->mr_cpu = PCPU_GET(cpuid);
443
444	/*
445	 * Clear machine check.  Don't do this for uncorrectable
446	 * errors so that the BIOS can see them.
447	 */
448	if (!(rec->mr_status & (MC_STATUS_PCC | MC_STATUS_UC))) {
449		wrmsr(MSR_MC_STATUS(bank), 0);
450		do_cpuid(0, p);
451	}
452	return (1);
453}
454
455static void
456mca_fill_freelist(void)
457{
458	struct mca_internal *rec;
459	int desired;
460
461	/*
462	 * Ensure we have at least one record for each bank and one
463	 * record per CPU.
464	 */
465	desired = imax(mp_ncpus, mca_banks);
466	mtx_lock_spin(&mca_lock);
467	while (mca_freecount < desired) {
468		mtx_unlock_spin(&mca_lock);
469		rec = malloc(sizeof(*rec), M_MCA, M_WAITOK);
470		mtx_lock_spin(&mca_lock);
471		STAILQ_INSERT_TAIL(&mca_freelist, rec, link);
472		mca_freecount++;
473	}
474	mtx_unlock_spin(&mca_lock);
475}
476
477static void
478mca_refill(void *context, int pending)
479{
480
481	mca_fill_freelist();
482}
483
484static void __nonnull(2)
485mca_record_entry(enum scan_mode mode, const struct mca_record *record)
486{
487	struct mca_internal *rec;
488
489	if (mode == POLLED) {
490		rec = malloc(sizeof(*rec), M_MCA, M_WAITOK);
491		mtx_lock_spin(&mca_lock);
492	} else {
493		mtx_lock_spin(&mca_lock);
494		rec = STAILQ_FIRST(&mca_freelist);
495		if (rec == NULL) {
496			printf("MCA: Unable to allocate space for an event.\n");
497			mca_log(record);
498			mtx_unlock_spin(&mca_lock);
499			return;
500		}
501		STAILQ_REMOVE_HEAD(&mca_freelist, link);
502		mca_freecount--;
503	}
504
505	rec->rec = *record;
506	rec->logged = 0;
507	STAILQ_INSERT_TAIL(&mca_records, rec, link);
508	mca_count++;
509	mtx_unlock_spin(&mca_lock);
510	if (mode == CMCI)
511		taskqueue_enqueue_fast(mca_tq, &mca_refill_task);
512}
513
514#ifdef DEV_APIC
515/*
516 * Update the interrupt threshold for a CMCI.  The strategy is to use
517 * a low trigger that interrupts as soon as the first event occurs.
518 * However, if a steady stream of events arrive, the threshold is
519 * increased until the interrupts are throttled to once every
520 * cmc_throttle seconds or the periodic scan.  If a periodic scan
521 * finds that the threshold is too high, it is lowered.
522 */
523static void
524cmci_update(enum scan_mode mode, int bank, int valid, struct mca_record *rec)
525{
526	struct cmc_state *cc;
527	uint64_t ctl;
528	u_int delta;
529	int count, limit;
530
531	/* Fetch the current limit for this bank. */
532	cc = &cmc_state[PCPU_GET(cpuid)][bank];
533	ctl = rdmsr(MSR_MC_CTL2(bank));
534	count = (rec->mr_status & MC_STATUS_COR_COUNT) >> 38;
535	delta = (u_int)(ticks - cc->last_intr);
536
537	/*
538	 * If an interrupt was received less than cmc_throttle seconds
539	 * since the previous interrupt and the count from the current
540	 * event is greater than or equal to the current threshold,
541	 * double the threshold up to the max.
542	 */
543	if (mode == CMCI && valid) {
544		limit = ctl & MC_CTL2_THRESHOLD;
545		if (delta < cmc_throttle && count >= limit &&
546		    limit < cc->max_threshold) {
547			limit = min(limit << 1, cc->max_threshold);
548			ctl &= ~MC_CTL2_THRESHOLD;
549			ctl |= limit;
550			wrmsr(MSR_MC_CTL2(bank), limit);
551		}
552		cc->last_intr = ticks;
553		return;
554	}
555
556	/*
557	 * When the banks are polled, check to see if the threshold
558	 * should be lowered.
559	 */
560	if (mode != POLLED)
561		return;
562
563	/* If a CMCI occured recently, do nothing for now. */
564	if (delta < cmc_throttle)
565		return;
566
567	/*
568	 * Compute a new limit based on the average rate of events per
569	 * cmc_throttle seconds since the last interrupt.
570	 */
571	if (valid) {
572		count = (rec->mr_status & MC_STATUS_COR_COUNT) >> 38;
573		limit = count * cmc_throttle / delta;
574		if (limit <= 0)
575			limit = 1;
576		else if (limit > cc->max_threshold)
577			limit = cc->max_threshold;
578	} else
579		limit = 1;
580	if ((ctl & MC_CTL2_THRESHOLD) != limit) {
581		ctl &= ~MC_CTL2_THRESHOLD;
582		ctl |= limit;
583		wrmsr(MSR_MC_CTL2(bank), limit);
584	}
585}
586#endif
587
588/*
589 * This scans all the machine check banks of the current CPU to see if
590 * there are any machine checks.  Any non-recoverable errors are
591 * reported immediately via mca_log().  The current thread must be
592 * pinned when this is called.  The 'mode' parameter indicates if we
593 * are being called from the MC exception handler, the CMCI handler,
594 * or the periodic poller.  In the MC exception case this function
595 * returns true if the system is restartable.  Otherwise, it returns a
596 * count of the number of valid MC records found.
597 */
598static int
599mca_scan(enum scan_mode mode)
600{
601	struct mca_record rec;
602	uint64_t mcg_cap, ucmask;
603	int count, i, recoverable, valid;
604
605	count = 0;
606	recoverable = 1;
607	ucmask = MC_STATUS_UC | MC_STATUS_PCC;
608
609	/* When handling a MCE#, treat the OVER flag as non-restartable. */
610	if (mode == MCE)
611		ucmask |= MC_STATUS_OVER;
612	mcg_cap = rdmsr(MSR_MCG_CAP);
613	for (i = 0; i < (mcg_cap & MCG_CAP_COUNT); i++) {
614#ifdef DEV_APIC
615		/*
616		 * For a CMCI, only check banks this CPU is
617		 * responsible for.
618		 */
619		if (mode == CMCI && !(PCPU_GET(cmci_mask) & 1 << i))
620			continue;
621#endif
622
623		valid = mca_check_status(i, &rec);
624		if (valid) {
625			count++;
626			if (rec.mr_status & ucmask) {
627				recoverable = 0;
628				mtx_lock_spin(&mca_lock);
629				mca_log(&rec);
630				mtx_unlock_spin(&mca_lock);
631			}
632			mca_record_entry(mode, &rec);
633		}
634
635#ifdef DEV_APIC
636		/*
637		 * If this is a bank this CPU monitors via CMCI,
638		 * update the threshold.
639		 */
640		if (PCPU_GET(cmci_mask) & 1 << i)
641			cmci_update(mode, i, valid, &rec);
642#endif
643	}
644	if (mode == POLLED)
645		mca_fill_freelist();
646	return (mode == MCE ? recoverable : count);
647}
648
649/*
650 * Scan the machine check banks on all CPUs by binding to each CPU in
651 * turn.  If any of the CPUs contained new machine check records, log
652 * them to the console.
653 */
654static void
655mca_scan_cpus(void *context, int pending)
656{
657	struct mca_internal *mca;
658	struct thread *td;
659	int count, cpu;
660
661	mca_fill_freelist();
662	td = curthread;
663	count = 0;
664	thread_lock(td);
665	CPU_FOREACH(cpu) {
666		sched_bind(td, cpu);
667		thread_unlock(td);
668		count += mca_scan(POLLED);
669		thread_lock(td);
670		sched_unbind(td);
671	}
672	thread_unlock(td);
673	if (count != 0) {
674		mtx_lock_spin(&mca_lock);
675		STAILQ_FOREACH(mca, &mca_records, link) {
676			if (!mca->logged) {
677				mca->logged = 1;
678				mca_log(&mca->rec);
679			}
680		}
681		mtx_unlock_spin(&mca_lock);
682	}
683}
684
685static void
686mca_periodic_scan(void *arg)
687{
688
689	taskqueue_enqueue_fast(mca_tq, &mca_scan_task);
690	callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL);
691}
692
693static int
694sysctl_mca_scan(SYSCTL_HANDLER_ARGS)
695{
696	int error, i;
697
698	i = 0;
699	error = sysctl_handle_int(oidp, &i, 0, req);
700	if (error)
701		return (error);
702	if (i)
703		taskqueue_enqueue_fast(mca_tq, &mca_scan_task);
704	return (0);
705}
706
707static void
708mca_createtq(void *dummy)
709{
710	if (mca_banks <= 0)
711		return;
712
713	mca_tq = taskqueue_create_fast("mca", M_WAITOK,
714	    taskqueue_thread_enqueue, &mca_tq);
715	taskqueue_start_threads(&mca_tq, 1, PI_SWI(SWI_TQ), "mca taskq");
716}
717SYSINIT(mca_createtq, SI_SUB_CONFIGURE, SI_ORDER_ANY, mca_createtq, NULL);
718
719static void
720mca_startup(void *dummy)
721{
722
723	if (mca_banks <= 0)
724		return;
725
726	callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL);
727}
728SYSINIT(mca_startup, SI_SUB_SMP, SI_ORDER_ANY, mca_startup, NULL);
729
730#ifdef DEV_APIC
731static void
732cmci_setup(void)
733{
734	int i;
735
736	cmc_state = malloc((mp_maxid + 1) * sizeof(struct cmc_state *), M_MCA,
737	    M_WAITOK);
738	for (i = 0; i <= mp_maxid; i++)
739		cmc_state[i] = malloc(sizeof(struct cmc_state) * mca_banks,
740		    M_MCA, M_WAITOK | M_ZERO);
741	SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
742	    "cmc_throttle", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
743	    &cmc_throttle, 0, sysctl_positive_int, "I",
744	    "Interval in seconds to throttle corrected MC interrupts");
745}
746#endif
747
748static void
749mca_setup(uint64_t mcg_cap)
750{
751
752	/*
753	 * On AMD Family 10h processors, unless logging of level one TLB
754	 * parity (L1TP) errors is disabled, enable the recommended workaround
755	 * for Erratum 383.
756	 */
757	if (cpu_vendor_id == CPU_VENDOR_AMD &&
758	    CPUID_TO_FAMILY(cpu_id) == 0x10 && amd10h_L1TP)
759		workaround_erratum383 = 1;
760
761	mca_banks = mcg_cap & MCG_CAP_COUNT;
762	mtx_init(&mca_lock, "mca", NULL, MTX_SPIN);
763	STAILQ_INIT(&mca_records);
764	TASK_INIT(&mca_scan_task, 0, mca_scan_cpus, NULL);
765	callout_init(&mca_timer, CALLOUT_MPSAFE);
766	STAILQ_INIT(&mca_freelist);
767	TASK_INIT(&mca_refill_task, 0, mca_refill, NULL);
768	mca_fill_freelist();
769	SYSCTL_ADD_INT(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
770	    "count", CTLFLAG_RD, (int *)(uintptr_t)&mca_count, 0,
771	    "Record count");
772	SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
773	    "interval", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &mca_ticks,
774	    0, sysctl_positive_int, "I",
775	    "Periodic interval in seconds to scan for machine checks");
776	SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
777	    "records", CTLFLAG_RD, sysctl_mca_records, "Machine check records");
778	SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
779	    "force_scan", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
780	    sysctl_mca_scan, "I", "Force an immediate scan for machine checks");
781#ifdef DEV_APIC
782	if (mcg_cap & MCG_CAP_CMCI_P)
783		cmci_setup();
784#endif
785}
786
787#ifdef DEV_APIC
788/*
789 * See if we should monitor CMCI for this bank.  If CMCI_EN is already
790 * set in MC_CTL2, then another CPU is responsible for this bank, so
791 * ignore it.  If CMCI_EN returns zero after being set, then this bank
792 * does not support CMCI_EN.  If this CPU sets CMCI_EN, then it should
793 * now monitor this bank.
794 */
795static void
796cmci_monitor(int i)
797{
798	struct cmc_state *cc;
799	uint64_t ctl;
800
801	KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid)));
802
803	ctl = rdmsr(MSR_MC_CTL2(i));
804	if (ctl & MC_CTL2_CMCI_EN)
805		/* Already monitored by another CPU. */
806		return;
807
808	/* Set the threshold to one event for now. */
809	ctl &= ~MC_CTL2_THRESHOLD;
810	ctl |= MC_CTL2_CMCI_EN | 1;
811	wrmsr(MSR_MC_CTL2(i), ctl);
812	ctl = rdmsr(MSR_MC_CTL2(i));
813	if (!(ctl & MC_CTL2_CMCI_EN))
814		/* This bank does not support CMCI. */
815		return;
816
817	cc = &cmc_state[PCPU_GET(cpuid)][i];
818
819	/* Determine maximum threshold. */
820	ctl &= ~MC_CTL2_THRESHOLD;
821	ctl |= 0x7fff;
822	wrmsr(MSR_MC_CTL2(i), ctl);
823	ctl = rdmsr(MSR_MC_CTL2(i));
824	cc->max_threshold = ctl & MC_CTL2_THRESHOLD;
825
826	/* Start off with a threshold of 1. */
827	ctl &= ~MC_CTL2_THRESHOLD;
828	ctl |= 1;
829	wrmsr(MSR_MC_CTL2(i), ctl);
830
831	/* Mark this bank as monitored. */
832	PCPU_SET(cmci_mask, PCPU_GET(cmci_mask) | 1 << i);
833}
834
835/*
836 * For resume, reset the threshold for any banks we monitor back to
837 * one and throw away the timestamp of the last interrupt.
838 */
839static void
840cmci_resume(int i)
841{
842	struct cmc_state *cc;
843	uint64_t ctl;
844
845	KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid)));
846
847	/* Ignore banks not monitored by this CPU. */
848	if (!(PCPU_GET(cmci_mask) & 1 << i))
849		return;
850
851	cc = &cmc_state[PCPU_GET(cpuid)][i];
852	cc->last_intr = -ticks;
853	ctl = rdmsr(MSR_MC_CTL2(i));
854	ctl &= ~MC_CTL2_THRESHOLD;
855	ctl |= MC_CTL2_CMCI_EN | 1;
856	wrmsr(MSR_MC_CTL2(i), ctl);
857}
858#endif
859
860/*
861 * Initializes per-CPU machine check registers and enables corrected
862 * machine check interrupts.
863 */
864static void
865_mca_init(int boot)
866{
867	uint64_t mcg_cap;
868	uint64_t ctl, mask;
869	int i, skip;
870
871	/* MCE is required. */
872	if (!mca_enabled || !(cpu_feature & CPUID_MCE))
873		return;
874
875	if (cpu_feature & CPUID_MCA) {
876		if (boot)
877			PCPU_SET(cmci_mask, 0);
878
879		mcg_cap = rdmsr(MSR_MCG_CAP);
880		if (mcg_cap & MCG_CAP_CTL_P)
881			/* Enable MCA features. */
882			wrmsr(MSR_MCG_CTL, MCG_CTL_ENABLE);
883		if (PCPU_GET(cpuid) == 0 && boot)
884			mca_setup(mcg_cap);
885
886		/*
887		 * Disable logging of level one TLB parity (L1TP) errors by
888		 * the data cache as an alternative workaround for AMD Family
889		 * 10h Erratum 383.  Unlike the recommended workaround, there
890		 * is no performance penalty to this workaround.  However,
891		 * L1TP errors will go unreported.
892		 */
893		if (cpu_vendor_id == CPU_VENDOR_AMD &&
894		    CPUID_TO_FAMILY(cpu_id) == 0x10 && !amd10h_L1TP) {
895			mask = rdmsr(MSR_MC0_CTL_MASK);
896			if ((mask & (1UL << 5)) == 0)
897				wrmsr(MSR_MC0_CTL_MASK, mask | (1UL << 5));
898		}
899		for (i = 0; i < (mcg_cap & MCG_CAP_COUNT); i++) {
900			/* By default enable logging of all errors. */
901			ctl = 0xffffffffffffffffUL;
902			skip = 0;
903
904			if (cpu_vendor_id == CPU_VENDOR_INTEL) {
905				/*
906				 * For P6 models before Nehalem MC0_CTL is
907				 * always enabled and reserved.
908				 */
909				if (i == 0 && CPUID_TO_FAMILY(cpu_id) == 0x6
910				    && CPUID_TO_MODEL(cpu_id) < 0x1a)
911					skip = 1;
912			} else if (cpu_vendor_id == CPU_VENDOR_AMD) {
913				/* BKDG for Family 10h: unset GartTblWkEn. */
914				if (i == 4 && CPUID_TO_FAMILY(cpu_id) >= 0xf)
915					ctl &= ~(1UL << 10);
916			}
917
918			if (!skip)
919				wrmsr(MSR_MC_CTL(i), ctl);
920
921#ifdef DEV_APIC
922			if (mcg_cap & MCG_CAP_CMCI_P) {
923				if (boot)
924					cmci_monitor(i);
925				else
926					cmci_resume(i);
927			}
928#endif
929
930			/* Clear all errors. */
931			wrmsr(MSR_MC_STATUS(i), 0);
932		}
933
934#ifdef DEV_APIC
935		if (PCPU_GET(cmci_mask) != 0 && boot)
936			lapic_enable_cmc();
937#endif
938	}
939
940	load_cr4(rcr4() | CR4_MCE);
941}
942
943/* Must be executed on each CPU during boot. */
944void
945mca_init(void)
946{
947
948	_mca_init(1);
949}
950
951/* Must be executed on each CPU during resume. */
952void
953mca_resume(void)
954{
955
956	_mca_init(0);
957}
958
959/*
960 * The machine check registers for the BSP cannot be initialized until
961 * the local APIC is initialized.  This happens at SI_SUB_CPU,
962 * SI_ORDER_SECOND.
963 */
964static void
965mca_init_bsp(void *arg __unused)
966{
967
968	mca_init();
969}
970SYSINIT(mca_init_bsp, SI_SUB_CPU, SI_ORDER_ANY, mca_init_bsp, NULL);
971
972/* Called when a machine check exception fires. */
973void
974mca_intr(void)
975{
976	uint64_t mcg_status;
977	int old_count, recoverable;
978
979	if (!(cpu_feature & CPUID_MCA)) {
980		/*
981		 * Just print the values of the old Pentium registers
982		 * and panic.
983		 */
984		printf("MC Type: 0x%jx  Address: 0x%jx\n",
985		    (uintmax_t)rdmsr(MSR_P5_MC_TYPE),
986		    (uintmax_t)rdmsr(MSR_P5_MC_ADDR));
987		panic("Machine check");
988	}
989
990	/* Scan the banks and check for any non-recoverable errors. */
991	old_count = mca_count;
992	recoverable = mca_scan(MCE);
993	mcg_status = rdmsr(MSR_MCG_STATUS);
994	if (!(mcg_status & MCG_STATUS_RIPV))
995		recoverable = 0;
996
997	if (!recoverable) {
998		/*
999		 * Wait for at least one error to be logged before
1000		 * panic'ing.  Some errors will assert a machine check
1001		 * on all CPUs, but only certain CPUs will find a valid
1002		 * bank to log.
1003		 */
1004		while (mca_count == old_count)
1005			cpu_spinwait();
1006
1007		panic("Unrecoverable machine check exception");
1008	}
1009
1010	/* Clear MCIP. */
1011	wrmsr(MSR_MCG_STATUS, mcg_status & ~MCG_STATUS_MCIP);
1012}
1013
1014#ifdef DEV_APIC
1015/* Called for a CMCI (correctable machine check interrupt). */
1016void
1017cmc_intr(void)
1018{
1019	struct mca_internal *mca;
1020	int count;
1021
1022	/*
1023	 * Serialize MCA bank scanning to prevent collisions from
1024	 * sibling threads.
1025	 */
1026	count = mca_scan(CMCI);
1027
1028	/* If we found anything, log them to the console. */
1029	if (count != 0) {
1030		mtx_lock_spin(&mca_lock);
1031		STAILQ_FOREACH(mca, &mca_records, link) {
1032			if (!mca->logged) {
1033				mca->logged = 1;
1034				mca_log(&mca->rec);
1035			}
1036		}
1037		mtx_unlock_spin(&mca_lock);
1038	}
1039}
1040#endif
1041